Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable more linters and fix reported issues #283

Merged
merged 5 commits into from
Jan 3, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 38 additions & 0 deletions .golangci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# .golangci.yml for github.com/onflow/atree

linters:
disable-all: true
enable:
# enable default linters
- errcheck
- gosimple
- govet
- ineffassign
- staticcheck
- typecheck
- unused

# enable extra linters
- exportloopref
- gocritic
- gofmt
- goimports
- misspell
- nilerr
- unconvert

issues:
max-issues-per-linter: 0
max-same-issues: 0

linters-settings:
gocritic:
disabled-checks:
- ifElseChain # style
- singleCaseSwitch # style
- unslice # false positives
- commentFormatting # does not detect commented out code
- exitAfterDefer

goimports:
local-prefixes: github.com/onflow/atree
2 changes: 1 addition & 1 deletion array.go
Original file line number Diff line number Diff line change
Expand Up @@ -892,7 +892,7 @@ func newArrayMetaDataSlabFromData(
totalCount += count

childrenHeaders[i] = ArraySlabHeader{
id: StorageID(storageID),
id: storageID,
count: count,
size: size,
}
Expand Down
2 changes: 1 addition & 1 deletion blake3_regression_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ func TestBLAKE3Regression(t *testing.T) {
data := nonUniformBytes64KiB()

// Verify BLAKE3 digests produced from hashing portions of
// data. Input sizes vary from 1 to 64KiB bytes by varing
// data. Input sizes vary from 1 to 64KiB bytes by varying
// starting pos and ending pos.
// We use 64KiB because BLAKE3 implementations can have
// special optimizations for large data sizes and we
Expand Down
2 changes: 1 addition & 1 deletion circlehash64_regression_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ func TestCircleHash64Regression(t *testing.T) {

// Verify CircleHash64 digests produced from hashing portions of
// data using different seed values. Input sizes vary from
// 1 to 16384 bytes by varing starting pos and ending pos.
// 1 to 16384 bytes by varying starting pos and ending pos.

testCases := []struct {
name string
Expand Down
3 changes: 2 additions & 1 deletion cmd/main/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,9 @@ import (
"flag"
"fmt"

"github.com/fxamacker/cbor/v2"
"github.com/onflow/atree"

"github.com/fxamacker/cbor/v2"
)

const cborTagUInt64Value = 164
Expand Down
7 changes: 4 additions & 3 deletions cmd/stress/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,9 @@ import (
"syscall"
"time"

"github.com/fxamacker/cbor/v2"
"github.com/onflow/atree"

"github.com/fxamacker/cbor/v2"
)

const maxStatusLength = 128
Expand Down Expand Up @@ -87,7 +88,7 @@ func main() {
var seed int64
if len(seedHex) != 0 {
var err error
seed, err = strconv.ParseInt(strings.Replace(seedHex, "0x", "", -1), 16, 64)
seed, err = strconv.ParseInt(strings.ReplaceAll(seedHex, "0x", ""), 16, 64)
if err != nil {
panic("Failed to parse seed flag (hex string)")
}
Expand All @@ -98,7 +99,7 @@ func main() {
typ = strings.ToLower(typ)

if typ != "array" && typ != "map" {
fmt.Fprintf(os.Stderr, "Please specifiy type as either \"array\" or \"map\"")
fmt.Fprintf(os.Stderr, "Please specify type as either \"array\" or \"map\"")
return
}

Expand Down
3 changes: 2 additions & 1 deletion cmd/stress/storable.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,9 @@ import (
"fmt"
"math"

"github.com/fxamacker/cbor/v2"
"github.com/onflow/atree"

"github.com/fxamacker/cbor/v2"
)

// This file is mostly from github.com/onflow/atree/storable_test.go
Expand Down
3 changes: 2 additions & 1 deletion cmd/stress/typeinfo.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,9 @@
package main

import (
"github.com/fxamacker/cbor/v2"
"github.com/onflow/atree"

"github.com/fxamacker/cbor/v2"
)

type testTypeInfo struct {
Expand Down
2 changes: 1 addition & 1 deletion cmd/stress/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ var (

func newRand(seed int64) *rand.Rand {
if seed == 0 {
seed = int64(time.Now().UnixNano())
seed = time.Now().UnixNano()
}

fmt.Printf("rand seed 0x%x\n", seed)
Expand Down
4 changes: 2 additions & 2 deletions errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ func (e *SlabNotFoundError) Error() string {
// Unwrap returns the wrapped err
func (e *SlabNotFoundError) Unwrap() error { return e.err }

// SlabSplitError is alwyas a fatal error returned when splitting an slab has failed
// SlabSplitError is always a fatal error returned when splitting an slab has failed
type SlabSplitError struct {
err error
}
Expand Down Expand Up @@ -288,7 +288,7 @@ func (e *SlabMergeError) Error() string {

func (e *SlabMergeError) Unwrap() error { return e.err }

// SlabRebalanceError is alwyas a fatal error returned when rebalancing a slab has failed
// SlabRebalanceError is always a fatal error returned when rebalancing a slab has failed
type SlabRebalanceError struct {
err error
}
Expand Down
4 changes: 2 additions & 2 deletions map.go
Original file line number Diff line number Diff line change
Expand Up @@ -715,7 +715,7 @@ func (e *inlineCollisionGroup) Set(storage SlabStorage, address Address, b Diges
}

if level == 1 {
// Export oversized inline collision group to separete slab (external collision group)
// Export oversized inline collision group to separate slab (external collision group)
// for first level collision.
if e.Size() > uint32(maxInlineMapElementSize) {

Expand Down Expand Up @@ -2503,7 +2503,7 @@ func newMapMetaDataSlabFromData(
size := binary.BigEndian.Uint32(data[sizeOffset:])

childrenHeaders[i] = MapSlabHeader{
id: StorageID(storageID),
id: storageID,
size: size,
firstKey: Digest(firstKey),
}
Expand Down
2 changes: 1 addition & 1 deletion map_debug.go
Original file line number Diff line number Diff line change
Expand Up @@ -581,7 +581,7 @@ func validMapHkeyElements(

elementSize += e.Size()

elementCount += uint64(count)
elementCount += count

} else {

Expand Down
6 changes: 0 additions & 6 deletions map_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1734,7 +1734,6 @@ func TestMapEncodeDecode(t *testing.T) {
// extra data (CBOR encoded array of 3 elements)
0x83,
// type info: "map"
//0x63, 0x6d, 0x61, 0x70,
0x18, 0x2A,
// count: 8
0x08,
Expand Down Expand Up @@ -1951,7 +1950,6 @@ func TestMapEncodeDecode(t *testing.T) {
// extra data (CBOR encoded array of 3 elements)
0x83,
// type info: "map"
//0x63, 0x6d, 0x61, 0x70,
0x18, 0x2A,
// count: 8
0x08,
Expand Down Expand Up @@ -2145,7 +2143,6 @@ func TestMapEncodeDecode(t *testing.T) {
// extra data (CBOR encoded array of 3 elements)
0x83,
// type info: "map"
//0x63, 0x6d, 0x61, 0x70,
0x18, 0x2A,
// count: 8
0x08,
Expand Down Expand Up @@ -2391,7 +2388,6 @@ func TestMapEncodeDecode(t *testing.T) {
// extra data (CBOR encoded array of 3 elements)
0x83,
// type info: "map"
//0x63, 0x6d, 0x61, 0x70,
0x18, 0x2A,
// count: 10
0x14,
Expand Down Expand Up @@ -2616,7 +2612,6 @@ func TestMapEncodeDecode(t *testing.T) {
// extra data (CBOR encoded array of 3 elements)
0x83,
// type info: "map"
//0x63, 0x6d, 0x61, 0x70,
0x18, 0x2A,
// count: 10
0x01,
Expand Down Expand Up @@ -2672,7 +2667,6 @@ func TestMapEncodeDecode(t *testing.T) {
// extra data (CBOR encoded array of 3 elements)
0x83,
// type info: "map"
//0x63, 0x6d, 0x61, 0x70,
0x18, 0x2A,
// count: 10
0x01,
Expand Down
8 changes: 4 additions & 4 deletions settings.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,12 @@ func SetThreshold(threshold uint64) (uint64, uint64, uint64, uint64) {
}

targetThreshold = threshold
minThreshold = uint64(targetThreshold / 2)
minThreshold = targetThreshold / 2
maxThreshold = uint64(float64(targetThreshold) * 1.5)

// Total slab size available for array elements, excluding slab encoding overhead
availableArrayElementsSize := targetThreshold - arrayDataSlabPrefixSize
MaxInlineArrayElementSize = uint64(availableArrayElementsSize / minElementCountInSlab)
MaxInlineArrayElementSize = availableArrayElementsSize / minElementCountInSlab

// Total slab size available for map elements, excluding slab encoding overhead
availableMapElementsSize := targetThreshold - mapDataSlabPrefixSize - hkeyElementsPrefixSize
Expand All @@ -63,10 +63,10 @@ func SetThreshold(threshold uint64) (uint64, uint64, uint64, uint64) {
mapElementOverheadSize := uint64(digestSize)

// Max inline size for a map's element
maxInlineMapElementSize = uint64(availableMapElementsSize/minElementCountInSlab) - mapElementOverheadSize
maxInlineMapElementSize = availableMapElementsSize/minElementCountInSlab - mapElementOverheadSize

// Max inline size for a map's key or value, excluding element encoding overhead
MaxInlineMapKeyOrValueSize = uint64((maxInlineMapElementSize - singleElementPrefixSize) / 2)
MaxInlineMapKeyOrValueSize = (maxInlineMapElementSize - singleElementPrefixSize) / 2

return minThreshold, maxThreshold, MaxInlineArrayElementSize, MaxInlineMapKeyOrValueSize
}
2 changes: 1 addition & 1 deletion storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -914,7 +914,7 @@ func (s *PersistentSlabStorage) Remove(id StorageID) error {
return nil
}

// Warning Counts doesn't consider new segments in the deltas and only returns commited values
// Warning Counts doesn't consider new segments in the deltas and only returns committed values
func (s *PersistentSlabStorage) Count() int {
return s.baseStorage.SegmentCounts()
}
Expand Down
9 changes: 4 additions & 5 deletions storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -226,10 +226,9 @@ func TestLedgerBaseStorageStore(t *testing.T) {

// Overwrite stored values
for id := range values {
value := append(values[id], []byte{1, 2, 3}...)
values[id] = value
bytesStored += len(value)
err := baseStorage.Store(id, value)
values[id] = append(values[id], []byte{1, 2, 3}...)
bytesStored += len(values[id])
err := baseStorage.Store(id, values[id])
require.NoError(t, err)
}

Expand Down Expand Up @@ -675,7 +674,7 @@ func TestPersistentStorage(t *testing.T) {
storageWithFastCommit := NewPersistentSlabStorage(baseStorage2, encMode, decMode, nil, nil)

simpleMap := make(map[StorageID][]byte)
// test random updates apply commit and check the order of commited values
// test random updates apply commit and check the order of committed values
for i := 0; i < numberOfAccounts; i++ {
for j := 0; j < numberOfSlabsPerAccount; j++ {
addr := generateRandomAddress(r)
Expand Down