diff --git a/gen.go b/gen.go index 2c57612..de6fe1a 100644 --- a/gen.go +++ b/gen.go @@ -35,6 +35,15 @@ type Variant struct { StructSuffix string ExtraFileds string + // For fast string only. + // These fields are empty for other types. + FSHashResult string // hash := strhash(key) + FSHashResultS string // hash = strhash(key) + FSHashField string // hash uint64 + FSHashFieldAssign string // hash : 0, + FSHashParameter string // hash uint64, + FSHashArgument string // hash, + // Basic key and value type. KeyType string ValueType string @@ -178,6 +187,50 @@ func main() { generate(baseType) generate(baseTypeDesc) } + + // For NewStringFast. + fv := &Variant{ + Package: "skipmap", + Name: "stringfast", + Path: "gen_stringfast.go", + Imports: "\"sync\"\n\"sync/atomic\"\n\"unsafe\"\n", + KeyType: "string", + ValueType: "valueT", + TypeArgument: "[valueT]", + TypeParam: "[valueT any]", + StructPrefix: "String", + StructPrefixLow: "string", + StructSuffix: "Fast", + + FSHashResult: "hash := strhash(key)", + FSHashResultS: "hash = strhash(key)", + FSHashField: "hash uint64", + FSHashFieldAssign: "hash : hash,", + FSHashParameter: "hash uint64,", + FSHashArgument: "hash,", + + Funcs: template.FuncMap{ + "Less": func(i, j string) string { + // succ.key < key + // => + // succ.hash < hash + i = strings.ReplaceAll(i, "key", "hash") + j = strings.ReplaceAll(j, "key", "hash") + return fmt.Sprintf("(%s < %s)", i, j) + }, + "Equal": func(i, j string) string { + // succ.key == key + // => + // succ.hash == hash && succ.key == key + cond2 := fmt.Sprintf("%s == %s", i, j) + i = strings.ReplaceAll(i, "key", "hash") + j = strings.ReplaceAll(j, "key", "hash") + cond1 := fmt.Sprintf("%s == %s", i, j) + return fmt.Sprintf("%s && %s", cond1, cond2) + }, + }, + } + generate(fv) } // generate generates the code for variant `v` into a file named by `v.Path`. diff --git a/gen_func.go b/gen_func.go index f68a454..820d2ad 100644 --- a/gen_func.go +++ b/gen_func.go @@ -18,16 +18,17 @@ type FuncMap[keyT any, valueT any] struct { } type funcnode[keyT any, valueT any] struct { + key keyT value unsafe.Pointer // *any flags bitflag - key keyT - next optionalArray // [level]*funcnode - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*funcnode } func newFuncNode[keyT any, valueT any](key keyT, value valueT, level int) *funcnode[keyT, valueT] { node := &funcnode[keyT, valueT]{ + key: key, level: uint32(level), } @@ -89,6 +90,7 @@ func (s *FuncMap[keyT, valueT]) findNode(key keyT, preds *[maxLevel]*funcnode[ke func (s *FuncMap[keyT, valueT]) findNodeDelete(key keyT, preds *[maxLevel]*funcnode[keyT, valueT], succs *[maxLevel]*funcnode[keyT, valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && s.less(succ.key, key) { @@ -119,6 +121,7 @@ func unlockfunc[keyT any, valueT any](preds [maxLevel]*funcnode[keyT, valueT], h // Store sets the value for a key. func (s *FuncMap[keyT, valueT]) Store(key keyT, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*funcnode[keyT, valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -193,6 +196,7 @@ func (s *FuncMap[keyT, valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *FuncMap[keyT, valueT]) Load(key keyT) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && s.less(nex.key, key) { diff --git a/gen_int.go b/gen_int.go index af045fd..43688b1 100644 --- a/gen_int.go +++ b/gen_int.go @@ -16,16 +16,17 @@ type IntMap[valueT any] struct { } type intnode[valueT any] struct { + key int value unsafe.Pointer // *any flags bitflag - key int - next optionalArray // [level]*intnode - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*intnode } func newIntNode[valueT any](key int, value valueT, level int) *intnode[valueT] { node := &intnode[valueT]{ + key: key, level: uint32(level), } @@ -87,6 +88,7 @@ func (s *IntMap[valueT]) findNode(key int, preds *[maxLevel]*intnode[valueT], su func (s *IntMap[valueT]) findNodeDelete(key int, preds *[maxLevel]*intnode[valueT], succs *[maxLevel]*intnode[valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && (succ.key < key) { @@ -117,6 +119,7 @@ func unlockint[valueT any](preds [maxLevel]*intnode[valueT], highestLevel int) { // Store sets the value for a key. func (s *IntMap[valueT]) Store(key int, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*intnode[valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -191,6 +194,7 @@ func (s *IntMap[valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *IntMap[valueT]) Load(key int) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && (nex.key < key) { diff --git a/gen_int32.go b/gen_int32.go index fef7a96..e36b494 100644 --- a/gen_int32.go +++ b/gen_int32.go @@ -16,16 +16,17 @@ type Int32Map[valueT any] struct { } type int32node[valueT any] struct { + key int32 value unsafe.Pointer // *any flags bitflag - key int32 - next optionalArray // [level]*int32node - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*int32node } func newInt32Node[valueT any](key int32, value valueT, level int) *int32node[valueT] { node := &int32node[valueT]{ + key: key, level: uint32(level), } @@ -87,6 +88,7 @@ func (s *Int32Map[valueT]) findNode(key int32, preds *[maxLevel]*int32node[value func (s *Int32Map[valueT]) findNodeDelete(key int32, preds *[maxLevel]*int32node[valueT], succs *[maxLevel]*int32node[valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && (succ.key < key) { @@ -117,6 +119,7 @@ func unlockint32[valueT any](preds [maxLevel]*int32node[valueT], highestLevel in // Store sets the value for a key. func (s *Int32Map[valueT]) Store(key int32, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*int32node[valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -191,6 +194,7 @@ func (s *Int32Map[valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *Int32Map[valueT]) Load(key int32) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && (nex.key < key) { diff --git a/gen_int32desc.go b/gen_int32desc.go index de08ca5..5cc4dbd 100644 --- a/gen_int32desc.go +++ b/gen_int32desc.go @@ -16,16 +16,17 @@ type Int32MapDesc[valueT any] struct { } type int32nodeDesc[valueT any] struct { + key int32 value unsafe.Pointer // *any flags bitflag - key int32 - next optionalArray // [level]*int32nodeDesc - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*int32nodeDesc } func newInt32NodeDesc[valueT any](key int32, value valueT, level int) *int32nodeDesc[valueT] { node := &int32nodeDesc[valueT]{ + key: key, level: uint32(level), } @@ -87,6 +88,7 @@ func (s *Int32MapDesc[valueT]) findNode(key int32, preds *[maxLevel]*int32nodeDe func (s *Int32MapDesc[valueT]) findNodeDelete(key int32, preds *[maxLevel]*int32nodeDesc[valueT], succs *[maxLevel]*int32nodeDesc[valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && (succ.key > key) { @@ -117,6 +119,7 @@ func unlockint32Desc[valueT any](preds [maxLevel]*int32nodeDesc[valueT], highest // Store sets the value for a key. func (s *Int32MapDesc[valueT]) Store(key int32, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*int32nodeDesc[valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -191,6 +194,7 @@ func (s *Int32MapDesc[valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *Int32MapDesc[valueT]) Load(key int32) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && (nex.key > key) { diff --git a/gen_int64.go b/gen_int64.go index 8f8711a..0b98b8a 100644 --- a/gen_int64.go +++ b/gen_int64.go @@ -16,16 +16,17 @@ type Int64Map[valueT any] struct { } type int64node[valueT any] struct { + key int64 value unsafe.Pointer // *any flags bitflag - key int64 - next optionalArray // [level]*int64node - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*int64node } func newInt64Node[valueT any](key int64, value valueT, level int) *int64node[valueT] { node := &int64node[valueT]{ + key: key, level: uint32(level), } @@ -87,6 +88,7 @@ func (s *Int64Map[valueT]) findNode(key int64, preds *[maxLevel]*int64node[value func (s *Int64Map[valueT]) findNodeDelete(key int64, preds *[maxLevel]*int64node[valueT], succs *[maxLevel]*int64node[valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && (succ.key < key) { @@ -117,6 +119,7 @@ func unlockint64[valueT any](preds [maxLevel]*int64node[valueT], highestLevel in // Store sets the value for a key. func (s *Int64Map[valueT]) Store(key int64, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*int64node[valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -191,6 +194,7 @@ func (s *Int64Map[valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *Int64Map[valueT]) Load(key int64) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && (nex.key < key) { diff --git a/gen_int64desc.go b/gen_int64desc.go index c1f1e46..7a95e51 100644 --- a/gen_int64desc.go +++ b/gen_int64desc.go @@ -16,16 +16,17 @@ type Int64MapDesc[valueT any] struct { } type int64nodeDesc[valueT any] struct { + key int64 value unsafe.Pointer // *any flags bitflag - key int64 - next optionalArray // [level]*int64nodeDesc - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*int64nodeDesc } func newInt64NodeDesc[valueT any](key int64, value valueT, level int) *int64nodeDesc[valueT] { node := &int64nodeDesc[valueT]{ + key: key, level: uint32(level), } @@ -87,6 +88,7 @@ func (s *Int64MapDesc[valueT]) findNode(key int64, preds *[maxLevel]*int64nodeDe func (s *Int64MapDesc[valueT]) findNodeDelete(key int64, preds *[maxLevel]*int64nodeDesc[valueT], succs *[maxLevel]*int64nodeDesc[valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && (succ.key > key) { @@ -117,6 +119,7 @@ func unlockint64Desc[valueT any](preds [maxLevel]*int64nodeDesc[valueT], highest // Store sets the value for a key. func (s *Int64MapDesc[valueT]) Store(key int64, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*int64nodeDesc[valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -191,6 +194,7 @@ func (s *Int64MapDesc[valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *Int64MapDesc[valueT]) Load(key int64) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && (nex.key > key) { diff --git a/gen_intdesc.go b/gen_intdesc.go index d02db36..493e964 100644 --- a/gen_intdesc.go +++ b/gen_intdesc.go @@ -16,16 +16,17 @@ type IntMapDesc[valueT any] struct { } type intnodeDesc[valueT any] struct { + key int value unsafe.Pointer // *any flags bitflag - key int - next optionalArray // [level]*intnodeDesc - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*intnodeDesc } func newIntNodeDesc[valueT any](key int, value valueT, level int) *intnodeDesc[valueT] { node := &intnodeDesc[valueT]{ + key: key, level: uint32(level), } @@ -87,6 +88,7 @@ func (s *IntMapDesc[valueT]) findNode(key int, preds *[maxLevel]*intnodeDesc[val func (s *IntMapDesc[valueT]) findNodeDelete(key int, preds *[maxLevel]*intnodeDesc[valueT], succs *[maxLevel]*intnodeDesc[valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && (succ.key > key) { @@ -117,6 +119,7 @@ func unlockintDesc[valueT any](preds [maxLevel]*intnodeDesc[valueT], highestLeve // Store sets the value for a key. func (s *IntMapDesc[valueT]) Store(key int, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*intnodeDesc[valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -191,6 +194,7 @@ func (s *IntMapDesc[valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *IntMapDesc[valueT]) Load(key int) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && (nex.key > key) { diff --git a/gen_ordered.go b/gen_ordered.go index f35b331..c27dfcc 100644 --- a/gen_ordered.go +++ b/gen_ordered.go @@ -16,16 +16,17 @@ type OrderedMap[keyT ordered, valueT any] struct { } type orderednode[keyT ordered, valueT any] struct { + key keyT value unsafe.Pointer // *any flags bitflag - key keyT - next optionalArray // [level]*orderednode - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*orderednode } func newOrderedNode[keyT ordered, valueT any](key keyT, value valueT, level int) *orderednode[keyT, valueT] { node := &orderednode[keyT, valueT]{ + key: key, level: uint32(level), } @@ -87,6 +88,7 @@ func (s *OrderedMap[keyT, valueT]) findNode(key keyT, preds *[maxLevel]*orderedn func (s *OrderedMap[keyT, valueT]) findNodeDelete(key keyT, preds *[maxLevel]*orderednode[keyT, valueT], succs *[maxLevel]*orderednode[keyT, valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && (succ.key < key) { @@ -117,6 +119,7 @@ func unlockordered[keyT ordered, valueT any](preds [maxLevel]*orderednode[keyT, // Store sets the value for a key. func (s *OrderedMap[keyT, valueT]) Store(key keyT, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*orderednode[keyT, valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -191,6 +194,7 @@ func (s *OrderedMap[keyT, valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *OrderedMap[keyT, valueT]) Load(key keyT) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && (nex.key < key) { diff --git a/gen_ordereddesc.go b/gen_ordereddesc.go index 5923d84..e760ff8 100644 --- a/gen_ordereddesc.go +++ b/gen_ordereddesc.go @@ -16,16 +16,17 @@ type OrderedMapDesc[keyT ordered, valueT any] struct { } type orderednodeDesc[keyT ordered, valueT any] struct { + key keyT value unsafe.Pointer // *any flags bitflag - key keyT - next optionalArray // [level]*orderednodeDesc - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*orderednodeDesc } func newOrderedNodeDesc[keyT ordered, valueT any](key keyT, value valueT, level int) *orderednodeDesc[keyT, valueT] { node := &orderednodeDesc[keyT, valueT]{ + key: key, level: uint32(level), } @@ -87,6 +88,7 @@ func (s *OrderedMapDesc[keyT, valueT]) findNode(key keyT, preds *[maxLevel]*orde func (s *OrderedMapDesc[keyT, valueT]) findNodeDelete(key keyT, preds *[maxLevel]*orderednodeDesc[keyT, valueT], succs *[maxLevel]*orderednodeDesc[keyT, valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && (succ.key > key) { @@ -117,6 +119,7 @@ func unlockorderedDesc[keyT ordered, valueT any](preds [maxLevel]*orderednodeDes // Store sets the value for a key. func (s *OrderedMapDesc[keyT, valueT]) Store(key keyT, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*orderednodeDesc[keyT, valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -191,6 +194,7 @@ func (s *OrderedMapDesc[keyT, valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *OrderedMapDesc[keyT, valueT]) Load(key keyT) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && (nex.key > key) { diff --git a/gen_string.go b/gen_string.go index 3f26be7..6bfef8a 100644 --- a/gen_string.go +++ b/gen_string.go @@ -16,16 +16,17 @@ type StringMap[valueT any] struct { } type stringnode[valueT any] struct { + key string value unsafe.Pointer // *any flags bitflag - key string - next optionalArray // [level]*stringnode - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*stringnode } func newStringNode[valueT any](key string, value valueT, level int) *stringnode[valueT] { node := &stringnode[valueT]{ + key: key, level: uint32(level), } @@ -87,6 +88,7 @@ func (s *StringMap[valueT]) findNode(key string, preds *[maxLevel]*stringnode[va func (s *StringMap[valueT]) findNodeDelete(key string, preds *[maxLevel]*stringnode[valueT], succs *[maxLevel]*stringnode[valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && (succ.key < key) { @@ -117,6 +119,7 @@ func unlockstring[valueT any](preds [maxLevel]*stringnode[valueT], highestLevel // Store sets the value for a key. func (s *StringMap[valueT]) Store(key string, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*stringnode[valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -191,6 +194,7 @@ func (s *StringMap[valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *StringMap[valueT]) Load(key string) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && (nex.key < key) { diff --git a/gen_string2.go b/gen_string2.go new file mode 100644 index 0000000..92c9bf3 --- /dev/null +++ b/gen_string2.go @@ -0,0 +1,520 @@ +package skipmap + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// NewString returns an empty skipmap in ascending order. +func NewString2[valueT any]() *StringMap2[valueT] { + var t valueT + h := newStringNode2(0, "", t, maxLevel) + h.flags.SetTrue(fullyLinked) + return &StringMap2[valueT]{ + header: h, + highestLevel: defaultHighestLevel, + } +} + +// StringMap represents a map based on skip list. +type StringMap2[valueT any] struct { + length int64 + highestLevel uint64 // highest level for now + header *stringnode2[valueT] +} + +type stringnode2[valueT any] struct { + hash uint64 + key string + value unsafe.Pointer // *any + flags bitflag + level uint32 + mu sync.Mutex + next optionalArray // [level]*stringnode +} + +func newStringNode2[valueT any](hash uint64, key string, value valueT, level int) *stringnode2[valueT] { + node := &stringnode2[valueT]{ + hash: hash, + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *stringnode2[valueT]) storeVal(value valueT) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *stringnode2[valueT]) loadVal() valueT { + return *(*valueT)(atomic.LoadPointer(&n.value)) +} + +func (n *stringnode2[valueT]) loadNext(i int) *stringnode2[valueT] { + return (*stringnode2[valueT])(n.next.load(i)) +} + +func (n *stringnode2[valueT]) storeNext(i int, node *stringnode2[valueT]) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *stringnode2[valueT]) atomicLoadNext(i int) *stringnode2[valueT] { + return (*stringnode2[valueT])(n.next.atomicLoad(i)) +} + +func (n *stringnode2[valueT]) atomicStoreNext(i int, node *stringnode2[valueT]) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *StringMap2[valueT]) findNode(hash uint64, key string, preds *[maxLevel]*stringnode2[valueT], succs *[maxLevel]*stringnode2[valueT]) *stringnode2[valueT] { + x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && (succ.hash < hash) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.hash == hash && succ.key == key { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *StringMap2[valueT]) findNodeDelete(key string, preds *[maxLevel]*stringnode2[valueT], succs *[maxLevel]*stringnode2[valueT]) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + hash := strhash(key) + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && (succ.hash < hash) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.hash == hash && succ.key == key { + lFound = i + } + } + return lFound +} + +func unlockstring2[valueT any](preds [maxLevel]*stringnode2[valueT], highestLevel int) { + var prevPred *stringnode2[valueT] + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *StringMap2[valueT]) Store(key string, value valueT) { + level := s.randomlevel() + hash := strhash(key) + var preds, succs [maxLevel]*stringnode2[valueT] + for { + nodeFound := s.findNode(hash, key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringnode2[valueT] + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockstring2(preds, highestLocked) + continue + } + + nn := newStringNode2(hash, key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockstring2(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return + } +} + +// randomlevel returns a random level and update the highest level if needed. +func (s *StringMap2[valueT]) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadUint64(&s.highestLevel) + if uint64(level) <= hl { + break + } + if atomic.CompareAndSwapUint64(&s.highestLevel, hl, uint64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *StringMap2[valueT]) Load(key string) (value valueT, ok bool) { + x := s.header + hash := strhash(key) + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && (nex.hash < hash) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.hash == hash && nex.key == key { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return + } + } + return +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *StringMap2[valueT]) LoadAndDelete(key string) (value valueT, loaded bool) { + var ( + nodeToDelete *stringnode2[valueT] + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*stringnode2[valueT] + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringnode2[valueT] + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockstring2(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockstring2(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *StringMap2[valueT]) LoadOrStore(key string, value valueT) (actual valueT, loaded bool) { + var ( + level int + preds, succs [maxLevel]*stringnode2[valueT] + hl = int(atomic.LoadUint64(&s.highestLevel)) + hash = strhash(key) + ) + for { + nodeFound := s.findNode(hash, key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringnode2[valueT] + ) + if level == 0 { + level = s.randomlevel() + if level > hl { + // If the highest level is updated, usually means that many goroutines + // are inserting items. Hopefully we can find a better path in next loop. + // TODO(zyh): consider filling the preds if s.header[level].next == nil, + // but this strategy's performance is almost the same as the existing method. + continue + } + } + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockstring2(preds, highestLocked) + continue + } + + nn := newStringNode2(hash, key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockstring2(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *StringMap2[valueT]) LoadOrStoreLazy(key string, f func() valueT) (actual valueT, loaded bool) { + var ( + level int + preds, succs [maxLevel]*stringnode2[valueT] + hl = int(atomic.LoadUint64(&s.highestLevel)) + hash = strhash(key) + ) + for { + nodeFound := s.findNode(hash, key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringnode2[valueT] + ) + if level == 0 { + level = s.randomlevel() + if level > hl { + // If the highest level is updated, usually means that many goroutines + // are inserting items. Hopefully we can find a better path in next loop. + // TODO(zyh): consider filling the preds if s.header[level].next == nil, + // but this strategy's performance is almost the same as the existing method. + continue + } + } + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockstring2(preds, highestLocked) + continue + } + value := f() + nn := newStringNode2(hash, key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockstring2(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *StringMap2[valueT]) Delete(key string) bool { + var ( + nodeToDelete *stringnode2[valueT] + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*stringnode2[valueT] + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringnode2[valueT] + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockstring2(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockstring2(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *StringMap2[valueT]) Range(f func(key string, value valueT) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len returns the length of this skipmap. +func (s *StringMap2[valueT]) Len() int { + return int(atomic.LoadInt64(&s.length)) +} diff --git a/gen_stringdesc.go b/gen_stringdesc.go index 6609315..e87a1ce 100644 --- a/gen_stringdesc.go +++ b/gen_stringdesc.go @@ -16,16 +16,17 @@ type StringMapDesc[valueT any] struct { } type stringnodeDesc[valueT any] struct { + key string value unsafe.Pointer // *any flags bitflag - key string - next optionalArray // [level]*stringnodeDesc - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*stringnodeDesc } func newStringNodeDesc[valueT any](key string, value valueT, level int) *stringnodeDesc[valueT] { node := &stringnodeDesc[valueT]{ + key: key, level: uint32(level), } @@ -87,6 +88,7 @@ func (s *StringMapDesc[valueT]) findNode(key string, preds *[maxLevel]*stringnod func (s *StringMapDesc[valueT]) findNodeDelete(key string, preds *[maxLevel]*stringnodeDesc[valueT], succs *[maxLevel]*stringnodeDesc[valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && (succ.key > key) { @@ -117,6 +119,7 @@ func unlockstringDesc[valueT any](preds [maxLevel]*stringnodeDesc[valueT], highe // Store sets the value for a key. func (s *StringMapDesc[valueT]) Store(key string, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*stringnodeDesc[valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -191,6 +194,7 @@ func (s *StringMapDesc[valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *StringMapDesc[valueT]) Load(key string) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && (nex.key > key) { diff --git a/gen_stringfast.go b/gen_stringfast.go new file mode 100644 index 0000000..7f73a56 --- /dev/null +++ b/gen_stringfast.go @@ -0,0 +1,511 @@ +// Code generated by gen.go; DO NOT EDIT. + +package skipmap + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// StringMapFast represents a map based on skip list. +type StringMapFast[valueT any] struct { + length int64 + highestLevel uint64 // highest level for now + header *stringnodeFast[valueT] +} + +type stringnodeFast[valueT any] struct { + hash uint64 + key string + value unsafe.Pointer // *any + flags bitflag + level uint32 + mu sync.Mutex + next optionalArray // [level]*stringnodeFast +} + +func newStringNodeFast[valueT any](hash uint64, key string, value valueT, level int) *stringnodeFast[valueT] { + node := &stringnodeFast[valueT]{ + hash: hash, + key: key, + level: uint32(level), + } + node.storeVal(value) + if level > op1 { + node.next.extra = new([op2]unsafe.Pointer) + } + return node +} + +func (n *stringnodeFast[valueT]) storeVal(value valueT) { + atomic.StorePointer(&n.value, unsafe.Pointer(&value)) +} + +func (n *stringnodeFast[valueT]) loadVal() valueT { + return *(*valueT)(atomic.LoadPointer(&n.value)) +} + +func (n *stringnodeFast[valueT]) loadNext(i int) *stringnodeFast[valueT] { + return (*stringnodeFast[valueT])(n.next.load(i)) +} + +func (n *stringnodeFast[valueT]) storeNext(i int, node *stringnodeFast[valueT]) { + n.next.store(i, unsafe.Pointer(node)) +} + +func (n *stringnodeFast[valueT]) atomicLoadNext(i int) *stringnodeFast[valueT] { + return (*stringnodeFast[valueT])(n.next.atomicLoad(i)) +} + +func (n *stringnodeFast[valueT]) atomicStoreNext(i int, node *stringnodeFast[valueT]) { + n.next.atomicStore(i, unsafe.Pointer(node)) +} + +// findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +// (without fullpath, if find the node will return immediately) +func (s *StringMapFast[valueT]) findNode(hash uint64, key string, preds *[maxLevel]*stringnodeFast[valueT], succs *[maxLevel]*stringnodeFast[valueT]) *stringnodeFast[valueT] { + x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && (succ.hash < hash) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skipmap. + if succ != nil && succ.hash == hash && succ.key == key { + return succ + } + } + return nil +} + +// findNodeDelete takes a key and two maximal-height arrays then searches exactly as in a sequential skip-list. +// The returned preds and succs always satisfy preds[i] > key >= succs[i]. +func (s *StringMapFast[valueT]) findNodeDelete(key string, preds *[maxLevel]*stringnodeFast[valueT], succs *[maxLevel]*stringnodeFast[valueT]) int { + // lFound represents the index of the first layer at which it found a node. + lFound, x := -1, s.header + hash := strhash(key) + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { + succ := x.atomicLoadNext(i) + for succ != nil && (succ.hash < hash) { + x = succ + succ = x.atomicLoadNext(i) + } + preds[i] = x + succs[i] = succ + + // Check if the key already in the skip list. + if lFound == -1 && succ != nil && succ.hash == hash && succ.key == key { + lFound = i + } + } + return lFound +} + +func unlockstringfast[valueT any](preds [maxLevel]*stringnodeFast[valueT], highestLevel int) { + var prevPred *stringnodeFast[valueT] + for i := highestLevel; i >= 0; i-- { + if preds[i] != prevPred { // the node could be unlocked by previous loop + preds[i].mu.Unlock() + prevPred = preds[i] + } + } +} + +// Store sets the value for a key. +func (s *StringMapFast[valueT]) Store(key string, value valueT) { + level := s.randomlevel() + hash := strhash(key) + var preds, succs [maxLevel]*stringnodeFast[valueT] + for { + nodeFound := s.findNode(hash, key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just replace the value. + nodeFound.storeVal(value) + return + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringnodeFast[valueT] + ) + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockstringfast(preds, highestLocked) + continue + } + + nn := newStringNodeFast(hash, key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockstringfast(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return + } +} + +// randomlevel returns a random level and update the highest level if needed. +func (s *StringMapFast[valueT]) randomlevel() int { + // Generate random level. + level := randomLevel() + // Update highest level if possible. + for { + hl := atomic.LoadUint64(&s.highestLevel) + if uint64(level) <= hl { + break + } + if atomic.CompareAndSwapUint64(&s.highestLevel, hl, uint64(level)) { + break + } + } + return level +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (s *StringMapFast[valueT]) Load(key string) (value valueT, ok bool) { + x := s.header + hash := strhash(key) + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { + nex := x.atomicLoadNext(i) + for nex != nil && (nex.hash < hash) { + x = nex + nex = x.atomicLoadNext(i) + } + + // Check if the key already in the skip list. + if nex != nil && nex.hash == hash && nex.key == key { + if nex.flags.MGet(fullyLinked|marked, fullyLinked) { + return nex.loadVal(), true + } + return + } + } + return +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +// (Modified from Delete) +func (s *StringMapFast[valueT]) LoadAndDelete(key string) (value valueT, loaded bool) { + var ( + nodeToDelete *stringnodeFast[valueT] + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*stringnodeFast[valueT] + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringnodeFast[valueT] + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ + } + if !valid { + unlockstringfast(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockstringfast(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return nodeToDelete.loadVal(), true + } + return + } +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from Store) +func (s *StringMapFast[valueT]) LoadOrStore(key string, value valueT) (actual valueT, loaded bool) { + var ( + level int + preds, succs [maxLevel]*stringnodeFast[valueT] + hl = int(atomic.LoadUint64(&s.highestLevel)) + hash = strhash(key) + ) + for { + nodeFound := s.findNode(hash, key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringnodeFast[valueT] + ) + if level == 0 { + level = s.randomlevel() + if level > hl { + // If the highest level is updated, usually means that many goroutines + // are inserting items. Hopefully we can find a better path in next loop. + // TODO(zyh): consider filling the preds if s.header[level].next == nil, + // but this strategy's performance is almost the same as the existing method. + continue + } + } + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && (succ == nil || !succ.flags.Get(marked)) && pred.loadNext(layer) == succ + } + if !valid { + unlockstringfast(preds, highestLocked) + continue + } + + nn := newStringNodeFast(hash, key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockstringfast(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// LoadOrStoreLazy returns the existing value for the key if present. +// Otherwise, it stores and returns the given value from f, f will only be called once. +// The loaded result is true if the value was loaded, false if stored. +// (Modified from LoadOrStore) +func (s *StringMapFast[valueT]) LoadOrStoreLazy(key string, f func() valueT) (actual valueT, loaded bool) { + var ( + level int + preds, succs [maxLevel]*stringnodeFast[valueT] + hl = int(atomic.LoadUint64(&s.highestLevel)) + hash = strhash(key) + ) + for { + nodeFound := s.findNode(hash, key, &preds, &succs) + if nodeFound != nil { // indicating the key is already in the skip-list + if !nodeFound.flags.Get(marked) { + // We don't need to care about whether or not the node is fully linked, + // just return the value. + return nodeFound.loadVal(), true + } + // If the node is marked, represents some other goroutines is in the process of deleting this node, + // we need to add this node in next loop. + continue + } + + // Add this node into skip list. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringnodeFast[valueT] + ) + if level == 0 { + level = s.randomlevel() + if level > hl { + // If the highest level is updated, usually means that many goroutines + // are inserting items. Hopefully we can find a better path in next loop. + // TODO(zyh): consider filling the preds if s.header[level].next == nil, + // but this strategy's performance is almost the same as the existing method. + continue + } + } + for layer := 0; valid && layer < level; layer++ { + pred = preds[layer] // target node's previous node + succ = succs[layer] // target node's next node + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer during this process. + // It is valid if: + // 1. The previous node and next node both are not marked. + // 2. The previous node's next node is succ in this layer. + valid = !pred.flags.Get(marked) && pred.loadNext(layer) == succ && (succ == nil || !succ.flags.Get(marked)) + } + if !valid { + unlockstringfast(preds, highestLocked) + continue + } + value := f() + nn := newStringNodeFast(hash, key, value, level) + for layer := 0; layer < level; layer++ { + nn.storeNext(layer, succs[layer]) + preds[layer].atomicStoreNext(layer, nn) + } + nn.flags.SetTrue(fullyLinked) + unlockstringfast(preds, highestLocked) + atomic.AddInt64(&s.length, 1) + return value, false + } +} + +// Delete deletes the value for a key. +func (s *StringMapFast[valueT]) Delete(key string) bool { + var ( + nodeToDelete *stringnodeFast[valueT] + isMarked bool // represents if this operation mark the node + topLayer = -1 + preds, succs [maxLevel]*stringnodeFast[valueT] + ) + for { + lFound := s.findNodeDelete(key, &preds, &succs) + if isMarked || // this process mark this node or we can find this node in the skip list + lFound != -1 && succs[lFound].flags.MGet(fullyLinked|marked, fullyLinked) && (int(succs[lFound].level)-1) == lFound { + if !isMarked { // we don't mark this node for now + nodeToDelete = succs[lFound] + topLayer = lFound + nodeToDelete.mu.Lock() + if nodeToDelete.flags.Get(marked) { + // The node is marked by another process, + // the physical deletion will be accomplished by another process. + nodeToDelete.mu.Unlock() + return false + } + nodeToDelete.flags.SetTrue(marked) + isMarked = true + } + // Accomplish the physical deletion. + var ( + highestLocked = -1 // the highest level being locked by this process + valid = true + pred, succ, prevPred *stringnodeFast[valueT] + ) + for layer := 0; valid && (layer <= topLayer); layer++ { + pred, succ = preds[layer], succs[layer] + if pred != prevPred { // the node in this layer could be locked by previous loop + pred.mu.Lock() + highestLocked = layer + prevPred = pred + } + // valid check if there is another node has inserted into the skip list in this layer + // during this process, or the previous is deleted by another process. + // It is valid if: + // 1. the previous node exists. + // 2. no another node has inserted into the skip list in this layer. + valid = !pred.flags.Get(marked) && pred.atomicLoadNext(layer) == succ + } + if !valid { + unlockstringfast(preds, highestLocked) + continue + } + for i := topLayer; i >= 0; i-- { + // Now we own the `nodeToDelete`, no other goroutine will modify it. + // So we don't need `nodeToDelete.loadNext` + preds[i].atomicStoreNext(i, nodeToDelete.loadNext(i)) + } + nodeToDelete.mu.Unlock() + unlockstringfast(preds, highestLocked) + atomic.AddInt64(&s.length, -1) + return true + } + return false + } +} + +// Range calls f sequentially for each key and value present in the skipmap. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +func (s *StringMapFast[valueT]) Range(f func(key string, value valueT) bool) { + x := s.header.atomicLoadNext(0) + for x != nil { + if !x.flags.MGet(fullyLinked|marked, fullyLinked) { + x = x.atomicLoadNext(0) + continue + } + if !f(x.key, x.loadVal()) { + break + } + x = x.atomicLoadNext(0) + } +} + +// Len returns the length of this skipmap. +func (s *StringMapFast[valueT]) Len() int { + return int(atomic.LoadInt64(&s.length)) +} diff --git a/gen_uint.go b/gen_uint.go index 84b6ac8..b9ee7d2 100644 --- a/gen_uint.go +++ b/gen_uint.go @@ -16,16 +16,17 @@ type UintMap[valueT any] struct { } type uintnode[valueT any] struct { + key uint value unsafe.Pointer // *any flags bitflag - key uint - next optionalArray // [level]*uintnode - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*uintnode } func newUintNode[valueT any](key uint, value valueT, level int) *uintnode[valueT] { node := &uintnode[valueT]{ + key: key, level: uint32(level), } @@ -87,6 +88,7 @@ func (s *UintMap[valueT]) findNode(key uint, preds *[maxLevel]*uintnode[valueT], func (s *UintMap[valueT]) findNodeDelete(key uint, preds *[maxLevel]*uintnode[valueT], succs *[maxLevel]*uintnode[valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && (succ.key < key) { @@ -117,6 +119,7 @@ func unlockuint[valueT any](preds [maxLevel]*uintnode[valueT], highestLevel int) // Store sets the value for a key. func (s *UintMap[valueT]) Store(key uint, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*uintnode[valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -191,6 +194,7 @@ func (s *UintMap[valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *UintMap[valueT]) Load(key uint) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && (nex.key < key) { diff --git a/gen_uint32.go b/gen_uint32.go index 5cda985..ecc1580 100644 --- a/gen_uint32.go +++ b/gen_uint32.go @@ -16,16 +16,17 @@ type Uint32Map[valueT any] struct { } type uint32node[valueT any] struct { + key uint32 value unsafe.Pointer // *any flags bitflag - key uint32 - next optionalArray // [level]*uint32node - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*uint32node } func newUint32Node[valueT any](key uint32, value valueT, level int) *uint32node[valueT] { node := &uint32node[valueT]{ + key: key, level: uint32(level), } @@ -87,6 +88,7 @@ func (s *Uint32Map[valueT]) findNode(key uint32, preds *[maxLevel]*uint32node[va func (s *Uint32Map[valueT]) findNodeDelete(key uint32, preds *[maxLevel]*uint32node[valueT], succs *[maxLevel]*uint32node[valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && (succ.key < key) { @@ -117,6 +119,7 @@ func unlockuint32[valueT any](preds [maxLevel]*uint32node[valueT], highestLevel // Store sets the value for a key. func (s *Uint32Map[valueT]) Store(key uint32, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*uint32node[valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -191,6 +194,7 @@ func (s *Uint32Map[valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *Uint32Map[valueT]) Load(key uint32) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && (nex.key < key) { diff --git a/gen_uint32desc.go b/gen_uint32desc.go index 6946b55..e4ca32f 100644 --- a/gen_uint32desc.go +++ b/gen_uint32desc.go @@ -16,16 +16,17 @@ type Uint32MapDesc[valueT any] struct { } type uint32nodeDesc[valueT any] struct { + key uint32 value unsafe.Pointer // *any flags bitflag - key uint32 - next optionalArray // [level]*uint32nodeDesc - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*uint32nodeDesc } func newUint32NodeDesc[valueT any](key uint32, value valueT, level int) *uint32nodeDesc[valueT] { node := &uint32nodeDesc[valueT]{ + key: key, level: uint32(level), } @@ -87,6 +88,7 @@ func (s *Uint32MapDesc[valueT]) findNode(key uint32, preds *[maxLevel]*uint32nod func (s *Uint32MapDesc[valueT]) findNodeDelete(key uint32, preds *[maxLevel]*uint32nodeDesc[valueT], succs *[maxLevel]*uint32nodeDesc[valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && (succ.key > key) { @@ -117,6 +119,7 @@ func unlockuint32Desc[valueT any](preds [maxLevel]*uint32nodeDesc[valueT], highe // Store sets the value for a key. func (s *Uint32MapDesc[valueT]) Store(key uint32, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*uint32nodeDesc[valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -191,6 +194,7 @@ func (s *Uint32MapDesc[valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *Uint32MapDesc[valueT]) Load(key uint32) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && (nex.key > key) { diff --git a/gen_uint64.go b/gen_uint64.go index 8b6b750..ebf693f 100644 --- a/gen_uint64.go +++ b/gen_uint64.go @@ -16,16 +16,17 @@ type Uint64Map[valueT any] struct { } type uint64node[valueT any] struct { + key uint64 value unsafe.Pointer // *any flags bitflag - key uint64 - next optionalArray // [level]*uint64node - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*uint64node } func newUint64Node[valueT any](key uint64, value valueT, level int) *uint64node[valueT] { node := &uint64node[valueT]{ + key: key, level: uint32(level), } @@ -87,6 +88,7 @@ func (s *Uint64Map[valueT]) findNode(key uint64, preds *[maxLevel]*uint64node[va func (s *Uint64Map[valueT]) findNodeDelete(key uint64, preds *[maxLevel]*uint64node[valueT], succs *[maxLevel]*uint64node[valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && (succ.key < key) { @@ -117,6 +119,7 @@ func unlockuint64[valueT any](preds [maxLevel]*uint64node[valueT], highestLevel // Store sets the value for a key. func (s *Uint64Map[valueT]) Store(key uint64, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*uint64node[valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -191,6 +194,7 @@ func (s *Uint64Map[valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *Uint64Map[valueT]) Load(key uint64) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && (nex.key < key) { diff --git a/gen_uint64desc.go b/gen_uint64desc.go index edae54b..79c0174 100644 --- a/gen_uint64desc.go +++ b/gen_uint64desc.go @@ -16,16 +16,17 @@ type Uint64MapDesc[valueT any] struct { } type uint64nodeDesc[valueT any] struct { + key uint64 value unsafe.Pointer // *any flags bitflag - key uint64 - next optionalArray // [level]*uint64nodeDesc - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*uint64nodeDesc } func newUint64NodeDesc[valueT any](key uint64, value valueT, level int) *uint64nodeDesc[valueT] { node := &uint64nodeDesc[valueT]{ + key: key, level: uint32(level), } @@ -87,6 +88,7 @@ func (s *Uint64MapDesc[valueT]) findNode(key uint64, preds *[maxLevel]*uint64nod func (s *Uint64MapDesc[valueT]) findNodeDelete(key uint64, preds *[maxLevel]*uint64nodeDesc[valueT], succs *[maxLevel]*uint64nodeDesc[valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && (succ.key > key) { @@ -117,6 +119,7 @@ func unlockuint64Desc[valueT any](preds [maxLevel]*uint64nodeDesc[valueT], highe // Store sets the value for a key. func (s *Uint64MapDesc[valueT]) Store(key uint64, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*uint64nodeDesc[valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -191,6 +194,7 @@ func (s *Uint64MapDesc[valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *Uint64MapDesc[valueT]) Load(key uint64) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && (nex.key > key) { diff --git a/gen_uintdesc.go b/gen_uintdesc.go index e8d24f6..f8d6b0b 100644 --- a/gen_uintdesc.go +++ b/gen_uintdesc.go @@ -16,16 +16,17 @@ type UintMapDesc[valueT any] struct { } type uintnodeDesc[valueT any] struct { + key uint value unsafe.Pointer // *any flags bitflag - key uint - next optionalArray // [level]*uintnodeDesc - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*uintnodeDesc } func newUintNodeDesc[valueT any](key uint, value valueT, level int) *uintnodeDesc[valueT] { node := &uintnodeDesc[valueT]{ + key: key, level: uint32(level), } @@ -87,6 +88,7 @@ func (s *UintMapDesc[valueT]) findNode(key uint, preds *[maxLevel]*uintnodeDesc[ func (s *UintMapDesc[valueT]) findNodeDelete(key uint, preds *[maxLevel]*uintnodeDesc[valueT], succs *[maxLevel]*uintnodeDesc[valueT]) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && (succ.key > key) { @@ -117,6 +119,7 @@ func unlockuintDesc[valueT any](preds [maxLevel]*uintnodeDesc[valueT], highestLe // Store sets the value for a key. func (s *UintMapDesc[valueT]) Store(key uint, value valueT) { level := s.randomlevel() + var preds, succs [maxLevel]*uintnodeDesc[valueT] for { nodeFound := s.findNode(key, &preds, &succs) @@ -191,6 +194,7 @@ func (s *UintMapDesc[valueT]) randomlevel() int { // The ok result indicates whether value was found in the map. func (s *UintMapDesc[valueT]) Load(key uint) (value valueT, ok bool) { x := s.header + for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && (nex.key > key) { diff --git a/go.sum b/go.sum index 0a2a9b6..02c07e1 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,2 @@ -github.com/zhangyunhao116/fastrand v0.2.1 h1:H5FygwAWuYF7IqJKrdWBbphgHffRC07okNQjT2qbGB4= -github.com/zhangyunhao116/fastrand v0.2.1/go.mod h1:0v5KgHho0VE6HU192HnY15de/oDS8UrbBChIFjIhBtc= github.com/zhangyunhao116/fastrand v0.3.0 h1:7bwe124xcckPulX6fxtr2lFdO2KQqaefdtbk+mqO/Ig= github.com/zhangyunhao116/fastrand v0.3.0/go.mod h1:0v5KgHho0VE6HU192HnY15de/oDS8UrbBChIFjIhBtc= diff --git a/internal/typehack/abi_type.go b/internal/typehack/abi_type.go new file mode 100644 index 0000000..7e47b99 --- /dev/null +++ b/internal/typehack/abi_type.go @@ -0,0 +1,60 @@ +package typehack + +import "unsafe" + +// Keep in sync with src/internal/abi/type.go +// Supported versions: 1.18 1.19 1.20 1.21 + +type MapType struct { + Type + Key *Type + Elem *Type + Bucket *Type // internal type representing a hash bucket + // function for hashing keys (ptr to key, seed) -> hash + Hasher func(unsafe.Pointer, uintptr) uintptr + KeySize uint8 // size of key slot + ValueSize uint8 // size of elem slot + BucketSize uint16 // size of bucket + Flags uint32 +} + +// Type is the runtime representation of a Go type. +// +// Type is also referenced implicitly +// (in the form of expressions involving constants and arch.PtrSize) +// in cmd/compile/internal/reflectdata/reflect.go +// and cmd/link/internal/ld/decodesym.go +// (e.g. data[2*arch.PtrSize+4] references the TFlag field) +// unsafe.OffsetOf(Type{}.TFlag) cannot be used directly in those +// places because it varies with cross compilation and experiments. +type Type struct { + Size_ uintptr + PtrBytes uintptr // number of (prefix) bytes in the type that can contain pointers + Hash uint32 // hash of type; avoids computation in hash tables + TFlag TFlag // extra type information flags + Align_ uint8 // alignment of variable with this type + FieldAlign_ uint8 // alignment of struct field with this type + Kind_ uint8 // enumeration for C + // function for comparing objects of this type + // (ptr to object A, ptr to object B) -> ==? + Equal func(unsafe.Pointer, unsafe.Pointer) bool + // GCData stores the GC type data for the garbage collector. + // If the KindGCProg bit is set in kind, GCData is a GC program. + // Otherwise it is a ptrmask bitmap. See mbitmap.go for details. + GCData *byte + Str NameOff // string form + PtrToThis TypeOff // type for pointer to this type, may be zero +} + +// TFlag is used by a Type to signal what extra type information is +// available in the memory directly following the Type value. +type TFlag uint8 + +// NameOff is the offset to a name from moduledata.types. See resolveNameOff in runtime. +type NameOff int32 + +// TypeOff is the offset to a type from moduledata.types. See resolveTypeOff in runtime. +type TypeOff int32 + +// TextOff is an offset from the top of a text section. See (rtype).textOff in runtime. +type TextOff int32 diff --git a/internal/typehack/typehack.go b/internal/typehack/typehack.go new file mode 100644 index 0000000..e7e284f --- /dev/null +++ b/internal/typehack/typehack.go @@ -0,0 +1,19 @@ +package typehack + +import "unsafe" + +type Hasher func(unsafe.Pointer, uintptr) uintptr + +// Keep sync with src/runtime/runtime2.go +type eface struct { + _type *MapType + data unsafe.Pointer +} + +// NewHasher returns a new hash function for the comparable type. +func NewHasher[T comparable]() Hasher { + var m map[T]struct{} + tmp := interface{}(m) + eface := (*eface)(unsafe.Pointer(&tmp)) + return eface._type.Hasher +} diff --git a/skipmap.tpl b/skipmap.tpl index c9f0bd7..37035aa 100644 --- a/skipmap.tpl +++ b/skipmap.tpl @@ -15,16 +15,18 @@ type {{.StructPrefix}}Map{{.StructSuffix}}{{.TypeParam}} struct { } type {{.StructPrefixLow}}node{{.StructSuffix}}{{.TypeParam}} struct { + {{.FSHashField}} + key {{.KeyType}} value unsafe.Pointer // *any flags bitflag - key {{.KeyType}} - next optionalArray // [level]*{{.StructPrefixLow}}node{{.StructSuffix}} - mu sync.Mutex level uint32 + mu sync.Mutex + next optionalArray // [level]*{{.StructPrefixLow}}node{{.StructSuffix}} } -func new{{.StructPrefix}}Node{{.StructSuffix}}{{.TypeParam}}(key {{.KeyType}}, value {{.ValueType}}, level int) *{{.StructPrefixLow}}node{{.StructSuffix}}{{.TypeArgument}} { +func new{{.StructPrefix}}Node{{.StructSuffix}}{{.TypeParam}}({{.FSHashParameter}} key {{.KeyType}}, value {{.ValueType}}, level int) *{{.StructPrefixLow}}node{{.StructSuffix}}{{.TypeArgument}} { node := &{{.StructPrefixLow}}node{{.StructSuffix}}{{.TypeArgument}}{ + {{.FSHashFieldAssign}} key: key, level: uint32(level), } @@ -62,7 +64,7 @@ func (n *{{.StructPrefixLow}}node{{.StructSuffix}}{{.TypeArgument}}) atomicStore // findNode takes a key and two maximal-height arrays then searches exactly as in a sequential skipmap. // The returned preds and succs always satisfy preds[i] > key >= succs[i]. // (without fullpath, if find the node will return immediately) -func (s *{{.StructPrefix}}Map{{.StructSuffix}}{{.TypeArgument}}) findNode(key {{.KeyType}}, preds *[maxLevel]*{{.StructPrefixLow}}node{{.StructSuffix}}{{.TypeArgument}}, succs *[maxLevel]*{{.StructPrefixLow}}node{{.StructSuffix}}{{.TypeArgument}}) *{{.StructPrefixLow}}node{{.StructSuffix}}{{.TypeArgument}} { +func (s *{{.StructPrefix}}Map{{.StructSuffix}}{{.TypeArgument}}) findNode({{.FSHashParameter}} key {{.KeyType}}, preds *[maxLevel]*{{.StructPrefixLow}}node{{.StructSuffix}}{{.TypeArgument}}, succs *[maxLevel]*{{.StructPrefixLow}}node{{.StructSuffix}}{{.TypeArgument}}) *{{.StructPrefixLow}}node{{.StructSuffix}}{{.TypeArgument}} { x := s.header for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) @@ -86,6 +88,7 @@ func (s *{{.StructPrefix}}Map{{.StructSuffix}}{{.TypeArgument}}) findNode(key {{ func (s *{{.StructPrefix}}Map{{.StructSuffix}}{{.TypeArgument}}) findNodeDelete(key {{.KeyType}}, preds *[maxLevel]*{{.StructPrefixLow}}node{{.StructSuffix}}{{.TypeArgument}}, succs *[maxLevel]*{{.StructPrefixLow}}node{{.StructSuffix}}{{.TypeArgument}}) int { // lFound represents the index of the first layer at which it found a node. lFound, x := -1, s.header + {{.FSHashResult}} for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { succ := x.atomicLoadNext(i) for succ != nil && {{Less "succ.key" "key"}} { @@ -116,9 +119,10 @@ func unlock{{.Name}}{{.TypeParam}}(preds [maxLevel]*{{.StructPrefixLow}}node{{.S // Store sets the value for a key. func (s *{{.StructPrefix}}Map{{.StructSuffix}}{{.TypeArgument}}) Store(key {{.KeyType}}, value {{.ValueType}}) { level := s.randomlevel() + {{.FSHashResult}} var preds, succs [maxLevel]*{{.StructPrefixLow}}node{{.StructSuffix}}{{.TypeArgument}} for { - nodeFound := s.findNode(key, &preds, &succs) + nodeFound := s.findNode({{.FSHashArgument}} key, &preds, &succs) if nodeFound != nil { // indicating the key is already in the skip-list if !nodeFound.flags.Get(marked) { // We don't need to care about whether or not the node is fully linked, @@ -156,7 +160,7 @@ func (s *{{.StructPrefix}}Map{{.StructSuffix}}{{.TypeArgument}}) Store(key {{.Ke continue } - nn := new{{.StructPrefix}}Node{{.StructSuffix}}(key, value, level) + nn := new{{.StructPrefix}}Node{{.StructSuffix}}({{.FSHashArgument}} key, value, level) for layer := 0; layer < level; layer++ { nn.storeNext(layer, succs[layer]) preds[layer].atomicStoreNext(layer, nn) @@ -190,6 +194,7 @@ func (s *{{.StructPrefix}}Map{{.StructSuffix}}{{.TypeArgument}}) randomlevel() i // The ok result indicates whether value was found in the map. func (s *{{.StructPrefix}}Map{{.StructSuffix}}{{.TypeArgument}}) Load(key {{.KeyType}}) (value {{.ValueType}}, ok bool) { x := s.header + {{.FSHashResult}} for i := int(atomic.LoadUint64(&s.highestLevel)) - 1; i >= 0; i-- { nex := x.atomicLoadNext(i) for nex != nil && {{Less "nex.key" "key"}} { @@ -282,9 +287,10 @@ func (s *{{.StructPrefix}}Map{{.StructSuffix}}{{.TypeArgument}}) LoadOrStore(key level int preds, succs [maxLevel]*{{.StructPrefixLow}}node{{.StructSuffix}}{{.TypeArgument}} hl = int(atomic.LoadUint64(&s.highestLevel)) + {{.FSHashResultS}} ) for { - nodeFound := s.findNode(key, &preds, &succs) + nodeFound := s.findNode({{.FSHashArgument}} key, &preds, &succs) if nodeFound != nil { // indicating the key is already in the skip-list if !nodeFound.flags.Get(marked) { // We don't need to care about whether or not the node is fully linked, @@ -331,7 +337,7 @@ func (s *{{.StructPrefix}}Map{{.StructSuffix}}{{.TypeArgument}}) LoadOrStore(key continue } - nn := new{{.StructPrefix}}Node{{.StructSuffix}}(key, value, level) + nn := new{{.StructPrefix}}Node{{.StructSuffix}}({{.FSHashArgument}} key, value, level) for layer := 0; layer < level; layer++ { nn.storeNext(layer, succs[layer]) preds[layer].atomicStoreNext(layer, nn) @@ -352,9 +358,10 @@ func (s *{{.StructPrefix}}Map{{.StructSuffix}}{{.TypeArgument}}) LoadOrStoreLazy level int preds, succs [maxLevel]*{{.StructPrefixLow}}node{{.StructSuffix}}{{.TypeArgument}} hl = int(atomic.LoadUint64(&s.highestLevel)) + {{.FSHashResultS}} ) for { - nodeFound := s.findNode(key, &preds, &succs) + nodeFound := s.findNode({{.FSHashArgument}} key, &preds, &succs) if nodeFound != nil { // indicating the key is already in the skip-list if !nodeFound.flags.Get(marked) { // We don't need to care about whether or not the node is fully linked, @@ -401,7 +408,7 @@ func (s *{{.StructPrefix}}Map{{.StructSuffix}}{{.TypeArgument}}) LoadOrStoreLazy continue } value := f() - nn := new{{.StructPrefix}}Node{{.StructSuffix}}(key, value, level) + nn := new{{.StructPrefix}}Node{{.StructSuffix}}({{.FSHashArgument}} key, value, level) for layer := 0; layer < level; layer++ { nn.storeNext(layer, succs[layer]) preds[layer].atomicStoreNext(layer, nn) diff --git a/skipmap_faststr.go b/skipmap_faststr.go new file mode 100644 index 0000000..0485da1 --- /dev/null +++ b/skipmap_faststr.go @@ -0,0 +1,32 @@ +package skipmap + +import ( + "unsafe" + + "github.com/zhangyunhao116/fastrand" + "github.com/zhangyunhao116/skipmap/internal/typehack" +) + +var strhash func(string) uint64 + +func init() { + runtimestrhash := typehack.NewHasher[string]() + randomseed := fastrand.Uint() + strhash = func(s string) uint64 { + return uint64(runtimestrhash(unsafe.Pointer(&s), uintptr(randomseed))) + } +} + +// NewStringFast returns an empty skipmap with string key. +// The item order of the skipmap is different between each run. +// If you need to keep the item order of each run, use [`NewString`]. +// The [`StringMapFast`] is about 25% faster than the [`StringMap`]. +func NewStringFast[valueT any]() *StringMapFast[valueT] { + var t valueT + h := newStringNodeFast(0, "", t, maxLevel) + h.flags.SetTrue(fullyLinked) + return &StringMapFast[valueT]{ + header: h, + highestLevel: defaultHighestLevel, + } +} diff --git a/skipmap_test.go b/skipmap_test.go index 6e8447e..dfa5e15 100644 --- a/skipmap_test.go +++ b/skipmap_test.go @@ -17,6 +17,8 @@ func TestTyped(t *testing.T) { testSkipMapInt(t, func() anyskipmap[int] { return NewInt[any]() }) testSkipMapIntDesc(t, func() anyskipmap[int] { return NewIntDesc[any]() }) testSkipMapString(t, func() anyskipmap[string] { return NewString[any]() }) + testSkipMapString(t, func() anyskipmap[string] { return NewStringDesc[any]() }) + testSkipMapString(t, func() anyskipmap[string] { return NewStringFast[any]() }) testSyncMapSuiteInt64(t, func() anyskipmap[int64] { return NewInt64[any]() }) }