diff --git a/array.go b/array.go index f8a3ae3..1f3772b 100644 --- a/array.go +++ b/array.go @@ -32,9 +32,6 @@ import ( // NOTE: we use encoding size (in bytes) instead of Go type size for slab operations, // such as merge and split, so size constants here are related to encoding size. const ( - slabAddressSize = 8 - slabIndexSize = 8 - slabIDSize = slabAddressSize + slabIndexSize // version and flag size: version (1 byte) + flag (1 byte) versionAndFlagSize = 2 @@ -42,18 +39,18 @@ const ( // slab header size: slab index (8 bytes) + count (4 bytes) + size (2 bytes) // Support up to 4,294,967,295 elements in each array. // Support up to 65,535 bytes for slab size limit (default limit is 1536 max bytes). - arraySlabHeaderSize = slabIndexSize + 4 + 2 + arraySlabHeaderSize = SlabIndexLength + 4 + 2 // meta data slab prefix size: version (1 byte) + flag (1 byte) + address (8 bytes) + child header count (2 bytes) // Support up to 65,535 children per metadata slab. - arrayMetaDataSlabPrefixSize = versionAndFlagSize + slabAddressSize + 2 + arrayMetaDataSlabPrefixSize = versionAndFlagSize + SlabAddressLength + 2 // Encoded element head in array data slab (fixed-size for easy computation). arrayDataSlabElementHeadSize = 3 // non-root data slab prefix size: version (1 byte) + flag (1 byte) + next id (16 bytes) + element array head (3 bytes) // Support up to 65,535 elements in the array per data slab. - arrayDataSlabPrefixSize = versionAndFlagSize + slabIDSize + arrayDataSlabElementHeadSize + arrayDataSlabPrefixSize = versionAndFlagSize + SlabIDLength + arrayDataSlabElementHeadSize // root data slab prefix size: version (1 byte) + flag (1 byte) + element array head (3 bytes) // Support up to 65,535 elements in the array per data slab. @@ -420,7 +417,7 @@ func newArrayDataSlabFromDataV0( var next SlabID if !h.isRoot() { // Check data length for next slab ID - if len(data) < slabIDSize { + if len(data) < SlabIDLength { return nil, NewDecodingErrorf("data is too short for array data slab") } @@ -431,7 +428,7 @@ func newArrayDataSlabFromDataV0( return nil, err } - data = data[slabIDSize:] + data = data[SlabIDLength:] } // Check data length for array element head @@ -539,7 +536,7 @@ func newArrayDataSlabFromDataV1( return nil, err } - data = data[slabIDSize:] + data = data[SlabIDLength:] } // Check minimum data length after header @@ -651,11 +648,11 @@ func DecodeInlinedArrayStorable( if err != nil { return nil, NewDecodingError(err) } - if len(b) != slabIndexSize { + if len(b) != SlabIndexLength { return nil, NewDecodingError( fmt.Errorf( "failed to decode inlined array data slab: expect %d bytes for slab index, got %d bytes", - slabIndexSize, + SlabIndexLength, len(b))) } @@ -1494,7 +1491,7 @@ func newArrayMetaDataSlabFromDataV0( arrayMetaDataArrayHeadSizeV0 = 2 // slab header size: slab id (16 bytes) + count (4 bytes) + size (4 bytes) - arraySlabHeaderSizeV0 = slabIDSize + 4 + 4 + arraySlabHeaderSizeV0 = SlabIDLength + 4 + 4 ) var err error @@ -1547,7 +1544,7 @@ func newArrayMetaDataSlabFromDataV0( return nil, err } - countOffset := offset + slabIDSize + countOffset := offset + SlabIDLength count := binary.BigEndian.Uint32(data[countOffset:]) sizeOffset := countOffset + 4 @@ -1633,7 +1630,7 @@ func newArrayMetaDataSlabFromDataV1( // Decode shared address of headers var address Address copy(address[:], data[offset:]) - offset += slabAddressSize + offset += SlabAddressLength // Decode number of child headers const arrayHeaderSize = 2 @@ -1660,7 +1657,7 @@ func newArrayMetaDataSlabFromDataV1( copy(index[:], data[offset:]) slabID := SlabID{address, index} - offset += slabIndexSize + offset += SlabIndexLength // Decode count count := binary.BigEndian.Uint32(data[offset:]) @@ -1748,7 +1745,7 @@ func (a *ArrayMetaDataSlab) Encode(enc *Encoder) error { copy(enc.Scratch[:], a.header.slabID.address[:]) // Encode child header count to scratch - const childHeaderCountOffset = slabAddressSize + const childHeaderCountOffset = SlabAddressLength binary.BigEndian.PutUint16( enc.Scratch[childHeaderCountOffset:], uint16(len(a.childrenHeaders)), @@ -1767,7 +1764,7 @@ func (a *ArrayMetaDataSlab) Encode(enc *Encoder) error { copy(enc.Scratch[:], h.slabID.index[:]) // Encode count - const countOffset = slabIndexSize + const countOffset = SlabIndexLength binary.BigEndian.PutUint32(enc.Scratch[countOffset:], h.count) // Encode size diff --git a/array_debug.go b/array_debug.go index 89a1702..f090e04 100644 --- a/array_debug.go +++ b/array_debug.go @@ -826,7 +826,7 @@ func computeSize(data []byte) (int, error) { size -= inlinedSlabExtrDataSize if !h.isRoot() && isDataSlab && !h.hasNextSlabID() { - size += slabIDSize + size += SlabIDLength } return size, nil @@ -954,22 +954,22 @@ func verifyArrayValueID(a *Array) error { vid := a.ValueID() - if !bytes.Equal(vid[:slabAddressSize], rootSlabID.address[:]) { + if !bytes.Equal(vid[:SlabAddressLength], rootSlabID.address[:]) { return NewFatalError( fmt.Errorf( "expect first %d bytes of array value ID as %v, got %v", - slabAddressSize, + SlabAddressLength, rootSlabID.address[:], - vid[:slabAddressSize])) + vid[:SlabAddressLength])) } - if !bytes.Equal(vid[slabAddressSize:], rootSlabID.index[:]) { + if !bytes.Equal(vid[SlabAddressLength:], rootSlabID.index[:]) { return NewFatalError( fmt.Errorf( "expect second %d bytes of array value ID as %v, got %v", - slabIndexSize, + SlabIndexLength, rootSlabID.index[:], - vid[slabAddressSize:])) + vid[SlabAddressLength:])) } return nil diff --git a/array_test.go b/array_test.go index 172d02c..60670e9 100644 --- a/array_test.go +++ b/array_test.go @@ -5027,7 +5027,7 @@ func TestArrayMaxInlineElement(t *testing.T) { // Size of root data slab with two elements of max inlined size is target slab size minus // slab id size (next slab id is omitted in root slab), and minus 1 byte // (for rounding when computing max inline array element size). - require.Equal(t, targetThreshold-slabIDSize-1, uint64(array.root.Header().size)) + require.Equal(t, targetThreshold-SlabIDLength-1, uint64(array.root.Header().size)) testArray(t, storage, typeInfo, address, array, values, false) } @@ -6025,8 +6025,8 @@ func TestArrayID(t *testing.T) { sid := array.SlabID() id := array.ValueID() - require.Equal(t, sid.address[:], id[:8]) - require.Equal(t, sid.index[:], id[8:]) + require.Equal(t, sid.address[:], id[:SlabAddressLength]) + require.Equal(t, sid.index[:], id[SlabAddressLength:]) } func TestSlabSizeWhenResettingMutableStorable(t *testing.T) { @@ -6118,8 +6118,8 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { require.Equal(t, SlabIDUndefined, childArray.SlabID()) valueID := childArray.ValueID() - require.Equal(t, address[:], valueID[:slabAddressSize]) - require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + require.Equal(t, address[:], valueID[:SlabAddressLength]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[SlabAddressLength:]) v := NewStringValue(strings.Repeat("a", 9)) vSize := v.size @@ -6245,8 +6245,8 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { require.Equal(t, SlabIDUndefined, childArray.SlabID()) valueID := childArray.ValueID() - require.Equal(t, address[:], valueID[:slabAddressSize]) - require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + require.Equal(t, address[:], valueID[:SlabAddressLength]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[SlabAddressLength:]) children[i].array = childArray children[i].valueID = valueID @@ -6445,8 +6445,8 @@ func TestChildArrayInlinabilityInParentArray(t *testing.T) { require.Equal(t, SlabIDUndefined, childArray.SlabID()) valueID := childArray.ValueID() - require.Equal(t, address[:], valueID[:slabAddressSize]) - require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + require.Equal(t, address[:], valueID[:SlabAddressLength]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[SlabAddressLength:]) children[i].array = childArray children[i].valueID = valueID @@ -6638,8 +6638,8 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.Equal(t, SlabIDUndefined, childArray.SlabID()) valueID := childArray.ValueID() - require.Equal(t, address[:], valueID[:slabAddressSize]) - require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + require.Equal(t, address[:], valueID[:SlabAddressLength]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[SlabAddressLength:]) // Get inlined grand child array e, err = childArray.Get(0) @@ -6652,9 +6652,9 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) gValueID := gchildArray.ValueID() - require.Equal(t, address[:], gValueID[:slabAddressSize]) - require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) - require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) + require.Equal(t, address[:], gValueID[:SlabAddressLength]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[SlabAddressLength:]) + require.NotEqual(t, valueID[SlabAddressLength:], gValueID[SlabAddressLength:]) v := NewStringValue(strings.Repeat("a", 9)) vSize := v.size @@ -6833,8 +6833,8 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.Equal(t, SlabIDUndefined, childArray.SlabID()) valueID := childArray.ValueID() - require.Equal(t, address[:], valueID[:slabAddressSize]) - require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + require.Equal(t, address[:], valueID[:SlabAddressLength]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[SlabAddressLength:]) // Get inlined grand child array e, err = childArray.Get(0) @@ -6847,9 +6847,9 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) gValueID := gchildArray.ValueID() - require.Equal(t, address[:], gValueID[:slabAddressSize]) - require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) - require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) + require.Equal(t, address[:], gValueID[:SlabAddressLength]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[SlabAddressLength:]) + require.NotEqual(t, valueID[SlabAddressLength:], gValueID[SlabAddressLength:]) v := NewStringValue(strings.Repeat("a", 9)) vSize := v.size @@ -7066,8 +7066,8 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.Equal(t, SlabIDUndefined, childArray.SlabID()) valueID := childArray.ValueID() - require.Equal(t, address[:], valueID[:slabAddressSize]) - require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + require.Equal(t, address[:], valueID[:SlabAddressLength]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[SlabAddressLength:]) e, err = childArray.Get(0) require.NoError(t, err) @@ -7079,9 +7079,9 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) gValueID := gchildArray.ValueID() - require.Equal(t, address[:], gValueID[:slabAddressSize]) - require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) - require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) + require.Equal(t, address[:], gValueID[:SlabAddressLength]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[SlabAddressLength:]) + require.NotEqual(t, valueID[SlabAddressLength:], gValueID[SlabAddressLength:]) children[i] = arrayInfo{ array: childArray, @@ -7362,8 +7362,8 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.Equal(t, SlabIDUndefined, childArray.SlabID()) valueID := childArray.ValueID() - require.Equal(t, address[:], valueID[:slabAddressSize]) - require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + require.Equal(t, address[:], valueID[:SlabAddressLength]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[SlabAddressLength:]) e, err = childArray.Get(0) require.NoError(t, err) @@ -7375,9 +7375,9 @@ func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) gValueID := gchildArray.ValueID() - require.Equal(t, address[:], gValueID[:slabAddressSize]) - require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) - require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) + require.Equal(t, address[:], gValueID[:SlabAddressLength]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[SlabAddressLength:]) + require.NotEqual(t, valueID[SlabAddressLength:], gValueID[SlabAddressLength:]) children[i] = arrayInfo{ array: childArray, @@ -7695,8 +7695,8 @@ func TestChildArrayWhenParentArrayIsModified(t *testing.T) { require.Equal(t, SlabIDUndefined, childArray.SlabID()) valueID := childArray.ValueID() - require.Equal(t, address[:], valueID[:slabAddressSize]) - require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + require.Equal(t, address[:], valueID[:SlabAddressLength]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[SlabAddressLength:]) children[i] = &struct { array *Array diff --git a/map.go b/map.go index 51493c3..298ee7c 100644 --- a/map.go +++ b/map.go @@ -61,14 +61,14 @@ const ( // slab header size: slab index (8 bytes) + size (2 bytes) + first digest (8 bytes) // Support up to 65,535 bytes for slab size limit (default limit is 1536 max bytes). - mapSlabHeaderSize = slabIndexSize + 2 + digestSize + mapSlabHeaderSize = SlabIndexLength + 2 + digestSize // meta data slab prefix size: version (1 byte) + flag (1 byte) + address (8 bytes) + child header count (2 bytes) // Support up to 65,535 children per metadata slab. - mapMetaDataSlabPrefixSize = versionAndFlagSize + slabAddressSize + 2 + mapMetaDataSlabPrefixSize = versionAndFlagSize + SlabAddressLength + 2 // version (1 byte) + flag (1 byte) + next id (16 bytes) - mapDataSlabPrefixSize = versionAndFlagSize + slabIDSize + mapDataSlabPrefixSize = versionAndFlagSize + SlabIDLength // version (1 byte) + flag (1 byte) mapRootDataSlabPrefixSize = versionAndFlagSize @@ -2456,7 +2456,7 @@ func newMapDataSlabFromDataV0( if !h.isRoot() { // Check data length for next slab ID - if len(data) < slabIDSize { + if len(data) < SlabIDLength { return nil, NewDecodingErrorf("data is too short for map data slab") } @@ -2468,7 +2468,7 @@ func newMapDataSlabFromDataV0( return nil, err } - data = data[slabIDSize:] + data = data[SlabIDLength:] } // Decode elements @@ -2482,7 +2482,7 @@ func newMapDataSlabFromDataV0( // Compute slab size for version 1. slabSize := versionAndFlagSize + elements.Size() if !h.isRoot() { - slabSize += slabIDSize + slabSize += SlabIDLength } header := MapSlabHeader{ @@ -2557,7 +2557,7 @@ func newMapDataSlabFromDataV1( // Decode next slab ID for non-root slab if h.hasNextSlabID() { - if len(data) < slabIDSize { + if len(data) < SlabIDLength { return nil, NewDecodingErrorf("data is too short for map data slab") } @@ -2567,7 +2567,7 @@ func newMapDataSlabFromDataV1( return nil, err } - data = data[slabIDSize:] + data = data[SlabIDLength:] } // Decode elements @@ -2581,7 +2581,7 @@ func newMapDataSlabFromDataV1( // Compute slab size. slabSize := versionAndFlagSize + elements.Size() if !h.isRoot() { - slabSize += slabIDSize + slabSize += SlabIDLength } header := MapSlabHeader{ @@ -2660,11 +2660,11 @@ func DecodeInlinedCompactMapStorable( if err != nil { return nil, NewDecodingError(err) } - if len(b) != slabIndexSize { + if len(b) != SlabIndexLength { return nil, NewDecodingError( fmt.Errorf( "failed to decode inlined compact map data: expect %d bytes for slab index, got %d bytes", - slabIndexSize, + SlabIndexLength, len(b))) } @@ -2799,11 +2799,11 @@ func DecodeInlinedMapStorable( if err != nil { return nil, NewDecodingError(err) } - if len(b) != slabIndexSize { + if len(b) != SlabIndexLength { return nil, NewDecodingError( fmt.Errorf( "failed to decode inlined compact map data: expect %d bytes for slab index, got %d bytes", - slabIndexSize, + SlabIndexLength, len(b))) } @@ -3680,7 +3680,7 @@ func newMapMetaDataSlabFromDataV0( ) (*MapMetaDataSlab, error) { const ( mapMetaDataArrayHeadSizeV0 = 2 - mapSlabHeaderSizeV0 = slabIDSize + 4 + digestSize + mapSlabHeaderSizeV0 = SlabIDLength + 4 + digestSize ) var err error @@ -3732,7 +3732,7 @@ func newMapMetaDataSlabFromDataV0( return nil, err } - firstKeyOffset := offset + slabIDSize + firstKeyOffset := offset + SlabIDLength firstKey := binary.BigEndian.Uint64(data[firstKeyOffset:]) sizeOffset := firstKeyOffset + digestSize @@ -3818,7 +3818,7 @@ func newMapMetaDataSlabFromDataV1( // Decode shared address of headers var address Address copy(address[:], data[offset:]) - offset += slabAddressSize + offset += SlabAddressLength // Decode number of child headers const arrayHeaderSize = 2 @@ -3841,7 +3841,7 @@ func newMapMetaDataSlabFromDataV1( // Decode slab index var index SlabIndex copy(index[:], data[offset:]) - offset += slabIndexSize + offset += SlabIndexLength // Decode first key firstKey := binary.BigEndian.Uint64(data[offset:]) @@ -3930,7 +3930,7 @@ func (m *MapMetaDataSlab) Encode(enc *Encoder) error { copy(enc.Scratch[:], m.header.slabID.address[:]) // Encode child header count to scratch - const childHeaderCountOffset = slabAddressSize + const childHeaderCountOffset = SlabAddressLength binary.BigEndian.PutUint16( enc.Scratch[childHeaderCountOffset:], uint16(len(m.childrenHeaders)), @@ -3948,7 +3948,7 @@ func (m *MapMetaDataSlab) Encode(enc *Encoder) error { // Encode slab index to scratch copy(enc.Scratch[:], h.slabID.index[:]) - const firstKeyOffset = slabIndexSize + const firstKeyOffset = SlabIndexLength binary.BigEndian.PutUint64(enc.Scratch[firstKeyOffset:], uint64(h.firstKey)) const sizeOffset = firstKeyOffset + digestSize diff --git a/map_debug.go b/map_debug.go index 7e5aea8..bdf4231 100644 --- a/map_debug.go +++ b/map_debug.go @@ -1407,22 +1407,22 @@ func verifyMapValueID(m *OrderedMap) error { vid := m.ValueID() - if !bytes.Equal(vid[:slabAddressSize], rootSlabID.address[:]) { + if !bytes.Equal(vid[:SlabAddressLength], rootSlabID.address[:]) { return NewFatalError( fmt.Errorf( "expect first %d bytes of array value ID as %v, got %v", - slabAddressSize, + SlabAddressLength, rootSlabID.address[:], - vid[:slabAddressSize])) + vid[:SlabAddressLength])) } - if !bytes.Equal(vid[slabAddressSize:], rootSlabID.index[:]) { + if !bytes.Equal(vid[SlabAddressLength:], rootSlabID.index[:]) { return NewFatalError( fmt.Errorf( "expect second %d bytes of array value ID as %v, got %v", - slabIndexSize, + SlabIndexLength, rootSlabID.index[:], - vid[slabAddressSize:])) + vid[SlabAddressLength:])) } return nil diff --git a/map_test.go b/map_test.go index e783811..bdff27e 100644 --- a/map_test.go +++ b/map_test.go @@ -8610,7 +8610,7 @@ func TestMapEncodeDecode(t *testing.T) { require.Equal(t, uint32(len(stored[id2])), meta.childrenHeaders[0].size) const inlinedExtraDataSize = 8 - require.Equal(t, uint32(len(stored[id3])-inlinedExtraDataSize+slabIDSize), meta.childrenHeaders[1].size) + require.Equal(t, uint32(len(stored[id3])-inlinedExtraDataSize+SlabIDLength), meta.childrenHeaders[1].size) // Decode data to new storage storage2 := newTestPersistentStorageWithData(t, stored) @@ -11798,7 +11798,7 @@ func TestMapEncodeDecode(t *testing.T) { require.True(t, ok) require.Equal(t, 2, len(meta.childrenHeaders)) require.Equal(t, uint32(len(stored[id2])), meta.childrenHeaders[0].size) - require.Equal(t, uint32(len(stored[id3])+slabIDSize), meta.childrenHeaders[1].size) + require.Equal(t, uint32(len(stored[id3])+SlabIDLength), meta.childrenHeaders[1].size) // Decode data to new storage storage2 := newTestPersistentStorageWithData(t, stored) @@ -14338,7 +14338,7 @@ func TestMapMaxInlineElement(t *testing.T) { // Size of root data slab with two elements (key+value pairs) of // max inlined size is target slab size minus // slab id size (next slab id is omitted in root slab) - require.Equal(t, targetThreshold-slabIDSize, uint64(m.root.Header().size)) + require.Equal(t, targetThreshold-SlabIDLength, uint64(m.root.Header().size)) testMap(t, storage, typeInfo, address, m, keyValues, nil, false) } @@ -16492,8 +16492,8 @@ func TestMapID(t *testing.T) { sid := m.SlabID() id := m.ValueID() - require.Equal(t, sid.address[:], id[:8]) - require.Equal(t, sid.index[:], id[8:]) + require.Equal(t, sid.address[:], id[:SlabAddressLength]) + require.Equal(t, sid.index[:], id[SlabAddressLength:]) } func TestSlabSizeWhenResettingMutableStorableInMap(t *testing.T) { diff --git a/storable.go b/storable.go index bd49ef3..7b8c179 100644 --- a/storable.go +++ b/storable.go @@ -180,7 +180,7 @@ func (v SlabIDStorable) Encode(enc *Encoder) error { copy(enc.Scratch[:], v.address[:]) copy(enc.Scratch[8:], v.index[:]) - err = enc.CBOR.EncodeBytes(enc.Scratch[:slabIDSize]) + err = enc.CBOR.EncodeBytes(enc.Scratch[:SlabIDLength]) if err != nil { return NewEncodingError(err) } @@ -190,7 +190,7 @@ func (v SlabIDStorable) Encode(enc *Encoder) error { func (v SlabIDStorable) ByteSize() uint32 { // tag number (2 bytes) + byte string header (1 byte) + slab id (16 bytes) - return 2 + 1 + slabIDSize + return 2 + 1 + SlabIDLength } func (v SlabIDStorable) String() string { diff --git a/storage.go b/storage.go index f562ae8..01d709d 100644 --- a/storage.go +++ b/storage.go @@ -26,20 +26,26 @@ import ( "sort" "strings" "sync" - "unsafe" "github.com/fxamacker/cbor/v2" ) const LedgerBaseStorageSlabPrefix = "$" +const ( + SlabAddressLength = 8 + SlabIndexLength = 8 + SlabIDLength = SlabAddressLength + SlabIndexLength + ValueIDLength = SlabIDLength +) + // ValueID identifies an Array or OrderedMap. ValueID is consistent // independent of inlining status, while ValueID and SlabID are used // differently despite having the same size and content under the hood. // By contrast, SlabID is affected by inlining because it identifies // a slab in storage. Given this, ValueID should be used for // resource tracking, etc. -type ValueID [unsafe.Sizeof(Address{}) + unsafe.Sizeof(SlabIndex{})]byte +type ValueID [ValueIDLength]byte var emptyValueID = ValueID{} @@ -58,16 +64,16 @@ func (vid ValueID) equal(sid SlabID) bool { func (vid ValueID) String() string { return fmt.Sprintf( "0x%x.%d", - binary.BigEndian.Uint64(vid[:8]), - binary.BigEndian.Uint64(vid[8:]), + binary.BigEndian.Uint64(vid[:SlabAddressLength]), + binary.BigEndian.Uint64(vid[SlabAddressLength:]), ) } // WARNING: Any changes to SlabID or its components (Address and SlabIndex) // require updates to ValueID definition and functions. type ( - Address [8]byte - SlabIndex [8]byte + Address [SlabAddressLength]byte + SlabIndex [SlabIndexLength]byte // SlabID identifies slab in storage. // SlabID should only be used to retrieve, @@ -102,7 +108,7 @@ func NewSlabID(address Address, index SlabIndex) SlabID { } func NewSlabIDFromRawBytes(b []byte) (SlabID, error) { - if len(b) < slabIDSize { + if len(b) < SlabIDLength { return SlabID{}, NewSlabIDErrorf("incorrect slab ID buffer length %d", len(b)) } @@ -110,18 +116,18 @@ func NewSlabIDFromRawBytes(b []byte) (SlabID, error) { copy(address[:], b) var index SlabIndex - copy(index[:], b[8:]) + copy(index[:], b[SlabAddressLength:]) return SlabID{address, index}, nil } func (id SlabID) ToRawBytes(b []byte) (int, error) { - if len(b) < slabIDSize { + if len(b) < SlabIDLength { return 0, NewSlabIDErrorf("incorrect slab ID buffer length %d", len(b)) } copy(b, id.address[:]) - copy(b[8:], id.index[:]) - return slabIDSize, nil + copy(b[SlabAddressLength:], id.index[:]) + return SlabIDLength, nil } func (id SlabID) String() string { diff --git a/storage_test.go b/storage_test.go index 59906fe..95096b2 100644 --- a/storage_test.go +++ b/storage_test.go @@ -116,46 +116,46 @@ func TestSlabIDToRawBytes(t *testing.T) { }) t.Run("undefined", func(t *testing.T) { - b := make([]byte, slabIDSize) + b := make([]byte, SlabIDLength) size, err := SlabIDUndefined.ToRawBytes(b) require.NoError(t, err) want := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} require.Equal(t, want, b) - require.Equal(t, slabIDSize, size) + require.Equal(t, SlabIDLength, size) }) t.Run("temp address", func(t *testing.T) { id := NewSlabID(Address{0, 0, 0, 0, 0, 0, 0, 0}, SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}) - b := make([]byte, slabIDSize) + b := make([]byte, SlabIDLength) size, err := id.ToRawBytes(b) require.NoError(t, err) want := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1} require.Equal(t, want, b) - require.Equal(t, slabIDSize, size) + require.Equal(t, SlabIDLength, size) }) t.Run("temp index", func(t *testing.T) { id := NewSlabID(Address{0, 0, 0, 0, 0, 0, 0, 1}, SlabIndex{0, 0, 0, 0, 0, 0, 0, 0}) - b := make([]byte, slabIDSize) + b := make([]byte, SlabIDLength) size, err := id.ToRawBytes(b) require.NoError(t, err) want := []byte{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0} require.Equal(t, want, b) - require.Equal(t, slabIDSize, size) + require.Equal(t, SlabIDLength, size) }) t.Run("perm", func(t *testing.T) { id := NewSlabID(Address{0, 0, 0, 0, 0, 0, 0, 1}, SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}) - b := make([]byte, slabIDSize) + b := make([]byte, SlabIDLength) size, err := id.ToRawBytes(b) require.NoError(t, err) want := []byte{0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2} require.Equal(t, want, b) - require.Equal(t, slabIDSize, size) + require.Equal(t, SlabIDLength, size) }) } diff --git a/utils_test.go b/utils_test.go index 1e5b980..4302b12 100644 --- a/utils_test.go +++ b/utils_test.go @@ -395,8 +395,8 @@ func mapEqual(t *testing.T, expected mapValue, actual *OrderedMap) { func valueIDToSlabID(vid ValueID) SlabID { var id SlabID - copy(id.address[:], vid[:slabAddressSize]) - copy(id.index[:], vid[slabAddressSize:]) + copy(id.address[:], vid[:SlabAddressLength]) + copy(id.index[:], vid[SlabAddressLength:]) return id } @@ -411,16 +411,16 @@ func testNotInlinedMapIDs(t *testing.T, address Address, m *OrderedMap) { func testInlinedSlabIDAndValueID(t *testing.T, expectedAddress Address, slabID SlabID, valueID ValueID) { require.Equal(t, SlabIDUndefined, slabID) - require.Equal(t, expectedAddress[:], valueID[:slabAddressSize]) - require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + require.Equal(t, expectedAddress[:], valueID[:SlabAddressLength]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[SlabAddressLength:]) } func testNotInlinedSlabIDAndValueID(t *testing.T, expectedAddress Address, slabID SlabID, valueID ValueID) { require.Equal(t, expectedAddress, slabID.address) require.NotEqual(t, SlabIndexUndefined, slabID.index) - require.Equal(t, slabID.address[:], valueID[:slabAddressSize]) - require.Equal(t, slabID.index[:], valueID[slabAddressSize:]) + require.Equal(t, slabID.address[:], valueID[:SlabAddressLength]) + require.Equal(t, slabID.index[:], valueID[SlabAddressLength:]) } type arrayValue []Value