Skip to content

Commit

Permalink
Use uint64 intead of uint32 (#236)
Browse files Browse the repository at this point in the history
* Fix iterator styling issues (#247)

Issues reported in #246

* fix #160 (#246)

* Update to latest golang (#248)

Co-authored-by: Oleg Kovalov <[email protected]>

* inital prep for v3.

Signed-off-by: Mike Lloyd <[email protected]>

* Use uint64 intead of uint32

There are posibility we run into a problem of int32 overflow.
To prevent this let's use uint64 everywhere.

https://github.com/allegro/bigcache/blob/21e5ca5c3d539f94e8dc563350acd97c5400154f/shard.go#L138

Fixes: #148

* Fix CI

* Do not run on 1.13

* Do not run long test

* Optimze append (#249)

* Add Benchmark for append

* Optimize Append and halve byte copies

* Optimize Append by reducing allocs

* Optimize Append by reducing allocs

* Reduces allocs from test construct

Co-authored-by: Fabian Gärtner <[email protected]>

Co-authored-by: S@P <[email protected]>
Co-authored-by: Oleg Kovalov <[email protected]>
Co-authored-by: Mike Lloyd <[email protected]>
Co-authored-by: Fabianexe <[email protected]>
Co-authored-by: Fabian Gärtner <[email protected]>
  • Loading branch information
6 people committed Nov 4, 2020
1 parent 4f62c6f commit b548fed
Show file tree
Hide file tree
Showing 6 changed files with 88 additions and 33 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ jobs:
fail-fast: true
max-parallel: 2
matrix:
go: ["1.13.x", "1.14.x", "1.15.x"]
go: ["1.14.x", "1.15.x"]

steps:
- name: Set up Go
Expand Down Expand Up @@ -42,7 +42,7 @@ jobs:
run: |
go test -race -count=1 -coverprofile=queue.coverprofile ./queue
go test -race -count=1 -coverprofile=server.coverprofile ./server
go test -race -count=1 -coverprofile=main.coverprofile
go test -race -count=1 -coverprofile=main.coverprofile -short
- name: Upload coverage to codecov
run: |
Expand Down
49 changes: 49 additions & 0 deletions bigcache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"math"
"math/rand"
"runtime"
"strconv"
"strings"
"sync"
"testing"
Expand Down Expand Up @@ -1065,6 +1066,54 @@ func TestBigCache_GetWithInfo(t *testing.T) {
assertEqual(t, []byte(value), data)
}

// This test is designed for int32 overflow that was reported in #148
// By default it's skipped since it need huge amount of memory
func Test_issue_148(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
const n = 2070400
var message = bytes.Repeat([]byte{0}, 2<<10)
cache, _ := NewBigCache(Config{
Shards: 1,
LifeWindow: time.Hour,
MaxEntriesInWindow: 10,
MaxEntrySize: len(message),
HardMaxCacheSize: 2 << 13,
})
for i := 0; i < n; i++ {
err := cache.Set(strconv.Itoa(i), message)
if err != nil {
t.Fatal(err)
}
}

err := cache.Set(strconv.Itoa(n), message)
if err != nil {
t.Fatal(err)
}

cache.Get(strconv.Itoa(n))

i := 0
defer func() {
if r := recover(); r != nil {
t.Log("Element: ", i)
t.Fatal(r)
}
}()

for ; i < n; i++ {
v, err := cache.Get(strconv.Itoa(i))
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(v, message) {
t.Fatal("Should be equal", i, v, message)
}
}
}

type mockedLogger struct {
lastFormat string
lastArgs []interface{}
Expand Down
2 changes: 2 additions & 0 deletions go.mod
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
module github.com/allegro/bigcache/v3

go 1.12

require github.com/allegro/bigcache/v2 v2.2.5
3 changes: 3 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc=
github.com/allegro/bigcache/v2 v2.2.5 h1:mRc8r6GQjuJsmSKQNPsR5jQVXc8IJ1xsW5YXUYMLfqI=
github.com/allegro/bigcache/v2 v2.2.5/go.mod h1:FppZsIO+IZk7gCuj5FiIDHGygD9xvWQcqg1uIPMb6tY=
51 changes: 26 additions & 25 deletions queue/bytes_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@ var (
type BytesQueue struct {
full bool
array []byte
capacity int
maxCapacity int
head int
tail int
capacity uint64
maxCapacity uint64
head uint64
tail uint64
count int
rightMargin int
rightMargin uint64
headerBuffer []byte
verbose bool
}
Expand All @@ -39,7 +39,7 @@ type queueError struct {
}

// getUvarintSize returns the number of bytes to encode x in uvarint format
func getUvarintSize(x uint32) int {
func getUvarintSize(x uint32) uint64 {
if x < 128 {
return 1
} else if x < 16384 {
Expand All @@ -59,8 +59,8 @@ func getUvarintSize(x uint32) int {
func NewBytesQueue(capacity int, maxCapacity int, verbose bool) *BytesQueue {
return &BytesQueue{
array: make([]byte, capacity),
capacity: capacity,
maxCapacity: maxCapacity,
capacity: uint64(capacity),
maxCapacity: uint64(maxCapacity),
headerBuffer: make([]byte, binary.MaxVarintLen32),
tail: leftMarginIndex,
head: leftMarginIndex,
Expand All @@ -82,7 +82,7 @@ func (q *BytesQueue) Reset() {
// Push copies entry at the end of queue and moves tail pointer. Allocates more space if needed.
// Returns index for pushed data or error if maximum size queue limit is reached.
func (q *BytesQueue) Push(data []byte) (int, error) {
dataLen := len(data)
dataLen := uint64(len(data))
headerEntrySize := getUvarintSize(uint32(dataLen))

if !q.canInsertAfterTail(dataLen + headerEntrySize) {
Expand All @@ -99,10 +99,10 @@ func (q *BytesQueue) Push(data []byte) (int, error) {

q.push(data, dataLen)

return index, nil
return int(index), nil
}

func (q *BytesQueue) allocateAdditionalMemory(minimum int) {
func (q *BytesQueue) allocateAdditionalMemory(minimum uint64) {
start := time.Now()
if q.capacity < minimum {
q.capacity += minimum
Expand Down Expand Up @@ -137,8 +137,8 @@ func (q *BytesQueue) allocateAdditionalMemory(minimum int) {
}
}

func (q *BytesQueue) push(data []byte, len int) {
headerEntrySize := binary.PutUvarint(q.headerBuffer, uint64(len))
func (q *BytesQueue) push(data []byte, len uint64) {
headerEntrySize := uint64(binary.PutUvarint(q.headerBuffer, len))
q.copy(q.headerBuffer, headerEntrySize)

q.copy(data, len)
Expand All @@ -153,8 +153,8 @@ func (q *BytesQueue) push(data []byte, len int) {
q.count++
}

func (q *BytesQueue) copy(data []byte, len int) {
q.tail += copy(q.array[q.tail:], data[:len])
func (q *BytesQueue) copy(data []byte, len uint64) {
q.tail += uint64(copy(q.array[q.tail:], data[:len]))
}

// Pop reads the oldest entry from queue and moves head pointer to the next one
Expand All @@ -163,7 +163,7 @@ func (q *BytesQueue) Pop() ([]byte, error) {
if err != nil {
return nil, err
}
size := len(data)
size := uint64(len(data))

q.head += headerEntrySize + size
q.count--
Expand All @@ -189,18 +189,18 @@ func (q *BytesQueue) Peek() ([]byte, error) {

// Get reads entry from index
func (q *BytesQueue) Get(index int) ([]byte, error) {
data, _, err := q.peek(index)
data, _, err := q.peek(uint64(index))
return data, err
}

// CheckGet checks if an entry can be read from index
func (q *BytesQueue) CheckGet(index int) error {
return q.peekCheckErr(index)
return q.peekCheckErr(uint64(index))
}

// Capacity returns number of allocated bytes for queue
func (q *BytesQueue) Capacity() int {
return q.capacity
return int(q.capacity)
}

// Len returns number of entries kept in queue
Expand All @@ -214,7 +214,7 @@ func (e *queueError) Error() string {
}

// peekCheckErr is identical to peek, but does not actually return any data
func (q *BytesQueue) peekCheckErr(index int) error {
func (q *BytesQueue) peekCheckErr(index uint64) error {

if q.count == 0 {
return errEmptyQueue
Expand All @@ -224,25 +224,26 @@ func (q *BytesQueue) peekCheckErr(index int) error {
return errInvalidIndex
}

if index >= len(q.array) {
if index >= uint64(len(q.array)) {
return errIndexOutOfBounds
}
return nil
}

// peek returns the data from index and the number of bytes to encode the length of the data in uvarint format
func (q *BytesQueue) peek(index int) ([]byte, int, error) {
func (q *BytesQueue) peek(index uint64) ([]byte, uint64, error) {
err := q.peekCheckErr(index)
if err != nil {
return nil, 0, err
}

blockSize, n := binary.Uvarint(q.array[index:])
return q.array[index+n : index+n+int(blockSize)], n, nil
un := uint64(n)
return q.array[index+un : index+un+blockSize], un, nil
}

// canInsertAfterTail returns true if it's possible to insert an entry of size of need after the tail of the queue
func (q *BytesQueue) canInsertAfterTail(need int) bool {
func (q *BytesQueue) canInsertAfterTail(need uint64) bool {
if q.full {
return false
}
Expand All @@ -257,7 +258,7 @@ func (q *BytesQueue) canInsertAfterTail(need int) bool {
}

// canInsertBeforeHead returns true if it's possible to insert an entry of size of need before the head of the queue
func (q *BytesQueue) canInsertBeforeHead(need int) bool {
func (q *BytesQueue) canInsertBeforeHead(need uint64) bool {
if q.full {
return false
}
Expand Down
12 changes: 6 additions & 6 deletions shard.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ type Metadata struct {
}

type cacheShard struct {
hashmap map[uint64]uint32
hashmap map[uint64]uint64
entries queue.BytesQueue
lock sync.RWMutex
entryBuffer []byte
Expand Down Expand Up @@ -136,7 +136,7 @@ func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {

for {
if index, err := s.entries.Push(w); err == nil {
s.hashmap[hashedKey] = uint32(index)
s.hashmap[hashedKey] = uint64(index)
s.lock.Unlock()
return nil
}
Expand All @@ -158,7 +158,7 @@ func (s *cacheShard) addNewWithoutLock(key string, hashedKey uint64, entry []byt

for {
if index, err := s.entries.Push(w); err == nil {
s.hashmap[hashedKey] = uint32(index)
s.hashmap[hashedKey] = uint64(index)
return nil
}
if s.removeOldestEntry(NoSpace) != nil {
Expand All @@ -180,7 +180,7 @@ func (s *cacheShard) setWrappedEntryWithoutLock(currentTimestamp uint64, w []byt

for {
if index, err := s.entries.Push(w); err == nil {
s.hashmap[hashedKey] = uint32(index)
s.hashmap[hashedKey] = uint64(index)
return nil
}
if s.removeOldestEntry(NoSpace) != nil {
Expand Down Expand Up @@ -332,7 +332,7 @@ func (s *cacheShard) removeOldestEntry(reason RemoveReason) error {

func (s *cacheShard) reset(config Config) {
s.lock.Lock()
s.hashmap = make(map[uint64]uint32, config.initialShardSize())
s.hashmap = make(map[uint64]uint64, config.initialShardSize())
s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes)
s.entries.Reset()
s.lock.Unlock()
Expand Down Expand Up @@ -417,7 +417,7 @@ func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheS
bytesQueueInitialCapacity = maximumShardSizeInBytes
}
return &cacheShard{
hashmap: make(map[uint64]uint32, config.initialShardSize()),
hashmap: make(map[uint64]uint64, config.initialShardSize()),
hashmapStats: make(map[uint64]uint32, config.initialShardSize()),
entries: *queue.NewBytesQueue(bytesQueueInitialCapacity, maximumShardSizeInBytes, config.Verbose),
entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes),
Expand Down

0 comments on commit b548fed

Please sign in to comment.