diff --git a/core/memallocator_slab.go b/core/memallocator_slab.go new file mode 100644 index 0000000..81d29f8 --- /dev/null +++ b/core/memallocator_slab.go @@ -0,0 +1,359 @@ +package core + +import ( + "errors" + "sort" + "sync" + "unsafe" +) + +var ( + ErrSlabNoCacheFound = errors.New("no slab cache matching request") + ErrSlabTooLarge = errors.New("requested size too large") +) + +type SlabAllocatorConfig struct { + MinCanarySize int + Sizes []int +} + +// Configuration options +type SlabOption func(*SlabAllocatorConfig) + +// WithSizes allows to overwrite the SLAB Page sizes, defaulting to +// 64, 128, 256, 512, 1024 and 2048 byte +func WithSizes(sizes []int) SlabOption { + return func(cfg *SlabAllocatorConfig) { + cfg.Sizes = sizes + } +} + +// WithMinCanarySize allows to specify the minimum canary size (default: 16 byte) +func WithMinCanarySize(size int) SlabOption { + return func(cfg *SlabAllocatorConfig) { + cfg.MinCanarySize = size + } +} + +// Memory allocator implementation +type slabAllocator struct { + maxSlabSize int + stats *MemStats + cfg *SlabAllocatorConfig + allocator *pageAllocator + slabs []*slab +} + +func NewSlabAllocator(options ...SlabOption) MemAllocator { + cfg := &SlabAllocatorConfig{ + MinCanarySize: 16, + Sizes: []int{64, 128, 256, 512, 1024, 2048}, + } + for _, o := range options { + o(cfg) + } + sort.Ints(cfg.Sizes) + + if len(cfg.Sizes) == 0 { + return nil + } + + // Setup the allocator and initialize the slabs + a := &slabAllocator{ + maxSlabSize: cfg.Sizes[len(cfg.Sizes)-1], + stats: &MemStats{}, + cfg: cfg, + slabs: make([]*slab, 0, len(cfg.Sizes)), + allocator: &pageAllocator{ + objects: make(map[int]*pageObject), + stats: &MemStats{}, + }, + } + for _, size := range cfg.Sizes { + s := &slab{ + objSize: size, + stats: a.stats, + allocator: a.allocator, + } + a.slabs = append(a.slabs, s) + } + + return a +} + +func (a *slabAllocator) Alloc(size int) ([]byte, error) { + if size < 1 { + return nil, ErrNullAlloc + } + + // If the requested size is bigger than the largest slab, just malloc + // the memory. + requiredSlabSize := size + a.cfg.MinCanarySize + if requiredSlabSize > a.maxSlabSize { + return a.allocator.Alloc(size) + } + + // Determine which slab to use depending on the size + var s *slab + for _, current := range a.slabs { + if requiredSlabSize <= current.objSize { + s = current + break + } + } + if s == nil { + return nil, ErrSlabNoCacheFound + } + buf, err := s.alloc(size) + if err != nil { + return nil, err + } + + // Trunc the buffer to the required size if requested + return buf, nil +} + +func (a *slabAllocator) Protect(buf []byte, readonly bool) error { + // For the slab allocator, the data-slice is not identical to a memory page. + // However, protection rules can only be applied to whole memory pages, + // therefore protection of the data-slice is not supported by the slab + // allocator. + return nil +} + +func (a *slabAllocator) Inner(buf []byte) []byte { + if len(buf) == 0 { + return nil + } + + // If the buffer size is bigger than the largest slab, just free + // the memory. + size := len(buf) + a.cfg.MinCanarySize + if size > a.maxSlabSize { + return a.allocator.Inner(buf) + } + + // Determine which slab to use depending on the size + var s *slab + for _, current := range a.slabs { + if size <= current.objSize { + s = current + break + } + } + if s == nil { + Panic(ErrSlabNoCacheFound) + } + + for _, c := range s.pages { + if offset, contained := contains(c.buffer, buf); contained { + return c.buffer[offset : offset+s.objSize] + } + } + return nil +} + +func (a *slabAllocator) Free(buf []byte) error { + size := len(buf) + a.cfg.MinCanarySize + + // If the buffer size is bigger than the largest slab, just free + // the memory. + if size > a.maxSlabSize { + return a.allocator.Free(buf) + } + + // Determine which slab to use depending on the size + var s *slab + for _, current := range a.slabs { + if size <= current.objSize { + s = current + break + } + } + if s == nil { + return ErrSlabNoCacheFound + } + + return s.free(buf) +} + +func (a *slabAllocator) Stats() *MemStats { + return a.stats +} + +// *** INTERNAL FUNCTIONS *** // + +// Page implementation +type slabObject struct { + offset int + next *slabObject +} + +type slabPage struct { + used int + head *slabObject + canary []byte + buffer []byte +} + +func newPage(page []byte, size int) *slabPage { + if size > len(page) || size < 1 { + Panic(ErrSlabTooLarge) + } + + // Determine the number of objects fitting into the page + count := len(page) / size + + // Init the Page meta-data + c := &slabPage{ + head: &slabObject{}, + canary: page[len(page)-size:], + buffer: page, + } + + // Use the last object to create a canary prototype + if err := Scramble(c.canary); err != nil { + Panic(err) + } + + // Initialize the objects + last := c.head + offset := size + for i := 1; i < count-1; i++ { + obj := &slabObject{offset: offset} + last.next = obj + offset += size + last = obj + } + + return c +} + +// Slab is a container for all Pages serving the same size +type slab struct { + objSize int + stats *MemStats + allocator *pageAllocator + pages []*slabPage + sync.Mutex +} + +func (s *slab) alloc(size int) ([]byte, error) { + s.Lock() + defer s.Unlock() + + // Find the fullest Page that isn't completely filled + var c *slabPage + for _, current := range s.pages { + if current.head != nil && (c == nil || current.used > c.used) { + c = current + } + } + + // No Page available, create a new one + if c == nil { + // Use the page allocator to get a new guarded memory page + page, err := s.allocator.Alloc(pageSize - s.objSize) + if err != nil { + s.stats.PageAllocErrors.Add(1) + return nil, err + } + s.stats.PageAllocs.Store(s.allocator.stats.PageAllocs.Load()) + c = newPage(page, s.objSize) + s.pages = append(s.pages, c) + } + + // Remove the object from the free-list and increase the usage count + obj := c.head + c.head = c.head.next + c.used++ + + s.stats.ObjectAllocs.Add(1) + data := getBufferPart(c.buffer, obj.offset, size) + canary := getBufferPart(c.buffer, obj.offset+size, s.objSize-size) + + // Fill in the remaining bytes with canary + Copy(canary, c.canary) + + return data, nil +} + +func contains(buf, obj []byte) (int, bool) { + bb := uintptr(unsafe.Pointer(&buf[0])) + be := uintptr(unsafe.Pointer(&buf[len(buf)-1])) + o := uintptr(unsafe.Pointer(&obj[0])) + + if bb <= be { + return int(o - bb), bb <= o && o < be + } + return int(o - be), be <= o && o < bb +} + +func (s *slab) free(buf []byte) error { + s.Lock() + defer s.Unlock() + + // Find the Page containing the object + var c *slabPage + var cidx, offset int + for i, current := range s.pages { + diff, contained := contains(current.buffer, buf) + if contained { + c = current + cidx = i + offset = diff + break + } + } + if c == nil { + return ErrBufferNotOwnedByAllocator + } + + s.stats.ObjectFrees.Add(1) + + // Wipe the buffer including the canary check + if err := s.wipe(c, offset, len(buf)); err != nil { + s.stats.ObjectFreeErrors.Add(1) + return err + } + obj := &slabObject{ + offset: offset, + next: c.head, + } + c.head = obj + c.used-- + + // In case the Page is completely empty, we should remove it and + // free the underlying memory + if c.used == 0 { + err := s.allocator.Free(c.buffer) + s.stats.PageFrees.Store(s.allocator.stats.PageFrees.Load()) + if err != nil { + s.stats.PageFreeErrors.Add(1) + return err + } + + s.pages = append(s.pages[:cidx], s.pages[cidx+1:]...) + } + + return nil +} + +func (s *slab) wipe(page *slabPage, offset, size int) error { + canary := getBufferPart(page.buffer, -s.objSize, s.objSize) + inner := getBufferPart(page.buffer, offset, s.objSize) + data := getBufferPart(page.buffer, offset, size) + + // Wipe data field + Wipe(data) + + // Verify the canary + if !Equal(inner[len(data):], canary[:size]) { + return ErrBufferOverflow + } + + // Wipe the memory + Wipe(inner) + + return nil +} diff --git a/core/memallocator_slab_test.go b/core/memallocator_slab_test.go new file mode 100644 index 0000000..cfbcecb --- /dev/null +++ b/core/memallocator_slab_test.go @@ -0,0 +1,106 @@ +package core + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSlabAllocAllocInvalidSize(t *testing.T) { + alloc := NewSlabAllocator() + + a, err := alloc.Alloc(0) + require.Nil(t, a) + require.ErrorIs(t, err, ErrNullAlloc) + + b, err := alloc.Alloc(-1) + require.Nil(t, b) + require.ErrorIs(t, err, ErrNullAlloc) +} + +func TestSlabAllocAlloc(t *testing.T) { + alloc := NewSlabAllocator() + + b, err := alloc.Alloc(32) + require.NoError(t, err) + require.Lenf(t, b, 32, "invalid buffer len %d", len(b)) + + require.Lenf(t, b, 32, "invalid data len %d", len(b)) + require.Equalf(t, cap(b), 32, "invalid data capacity %d", cap(b)) + // require.Len(t, o.memory, 3*pageSize) + // require.EqualValues(t, make([]byte, 32), o.data, "container is not zero-filled") + + // Destroy the buffer. + require.NoError(t, alloc.Free(b)) +} + +func TestSlabAllocLotsOfAllocs(t *testing.T) { + // Create a local allocator instance + alloc := NewPageAllocator() + palloc := alloc.(*pageAllocator) + + for i := 1; i <= 16385; i++ { + b, err := alloc.Alloc(i) + require.NoErrorf(t, err, "size: %d", i) + + o, found := palloc.lookup(b) + require.True(t, found) + + require.Lenf(t, o.data, i, "size: %d", i) + require.Lenf(t, o.memory, roundToPageSize(i)+2*pageSize, "memory length invalid size: %d", i) + require.Lenf(t, o.preguard, pageSize, "pre-guard length invalid size: %d", i) + require.Lenf(t, o.postguard, pageSize, "pre-guard length invalid size: %d", i) + require.Lenf(t, o.canary, len(o.inner)-i, "canary length invalid size: %d", i) + require.Zerof(t, len(o.inner)%pageSize, "inner length is not multiple of page size size: %d", i) + + // Fill the data + for j := range o.data { + o.data[j] = 1 + } + require.EqualValuesf(t, bytes.Repeat([]byte{1}, i), o.data, "region rw test failed", "size: %d", i) + require.NoErrorf(t, alloc.Free(b), "size: %d", i) + } +} + +func TestSlabAllocDestroy(t *testing.T) { + alloc := NewPageAllocator() + + // Allocate a new buffer. + b, err := alloc.Alloc(32) + require.NoError(t, err) + + o, found := alloc.(*pageAllocator).lookup(b) + require.True(t, found) + + // Destroy it and check it is gone... + require.NoError(t, o.wipe()) + + // Pick apart the destruction. + require.Nil(t, o.data, "data not nil") + require.Nil(t, o.inner, "inner not nil") + require.Nil(t, o.preguard, "preguard not nil") + require.Nil(t, o.postguard, "postguard not nil") + require.Nil(t, o.canary, "canary not nil") + require.EqualValues(t, make([]byte, len(o.memory)), o.memory, "memory not zero'ed") + + // Call destroy again to check idempotency. + require.NoError(t, alloc.Free(b)) +} + +func TestSlabAllocOverflow(t *testing.T) { + alloc := NewPageAllocator() + + // Allocate a new buffer. + b, err := alloc.Alloc(32) + require.NoError(t, err) + + o, found := alloc.(*pageAllocator).lookup(b) + require.True(t, found) + + // Modify the canary as if we overflow + o.canary[0] = ^o.canary[0] + + // Destroy it and check it is gone... + require.ErrorIs(t, alloc.Free(b), ErrBufferOverflow) +}