diff --git a/util/lru/lru.go b/util/lru/lru.go index b8fe511eb..aab175156 100644 --- a/util/lru/lru.go +++ b/util/lru/lru.go @@ -174,6 +174,9 @@ func (c *Cache[K, V]) deleteElement(ent *entry[K, V]) { } else { ent.next.prev = ent.prev ent.prev.next = ent.next + if c.head == ent { + c.head = ent.next + } } delete(c.lookup, ent.key) } diff --git a/util/lru/lru_test.go b/util/lru/lru_test.go index 0620688ce..ea78b9229 100644 --- a/util/lru/lru_test.go +++ b/util/lru/lru_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" + xmaps "golang.org/x/exp/maps" ) func TestLRU(t *testing.T) { @@ -48,6 +49,156 @@ func TestLRU(t *testing.T) { } } +func TestLRUDeleteCorruption(t *testing.T) { + // Regression test for tailscale/corp#14747 + + c := Cache[int, bool]{} + + c.Set(1, true) + c.Set(2, true) // now 2 is the head + c.Delete(2) // delete the head + c.check(t) +} + +func TestStressEvictions(t *testing.T) { + const ( + cacheSize = 1_000 + numKeys = 10_000 + numProbes = 100_000 + ) + + vm := map[uint64]bool{} + for len(vm) < numKeys { + vm[rand.Uint64()] = true + } + vals := xmaps.Keys(vm) + + c := Cache[uint64, bool]{ + MaxEntries: cacheSize, + } + + for i := 0; i < numProbes; i++ { + v := vals[rand.Intn(len(vals))] + c.Set(v, true) + if l := c.Len(); l > cacheSize { + t.Fatalf("Cache size now %d, want max %d", l, cacheSize) + } + } +} + +func TestStressBatchedEvictions(t *testing.T) { + // One of Cache's consumers dynamically adjusts the cache size at + // runtime, and does batched evictions as needed. This test + // simulates that behavior. + + const ( + cacheSizeMin = 1_000 + cacheSizeMax = 2_000 + numKeys = 10_000 + numProbes = 100_000 + ) + + vm := map[uint64]bool{} + for len(vm) < numKeys { + vm[rand.Uint64()] = true + } + vals := xmaps.Keys(vm) + + c := Cache[uint64, bool]{} + + for i := 0; i < numProbes; i++ { + v := vals[rand.Intn(len(vals))] + c.Set(v, true) + if c.Len() == cacheSizeMax { + // Batch eviction down to cacheSizeMin + for c.Len() > cacheSizeMin { + c.DeleteOldest() + } + } + if l := c.Len(); l > cacheSizeMax { + t.Fatalf("Cache size now %d, want max %d", l, cacheSizeMax) + } + } +} + +func TestLRUStress(t *testing.T) { + var c Cache[int, int] + const ( + maxSize = 500 + numProbes = 5_000 + ) + for i := 0; i < numProbes; i++ { + n := rand.Intn(maxSize * 2) + op := rand.Intn(4) + switch op { + case 0: + c.Get(n) + case 1: + c.Set(n, n) + case 2: + c.Delete(n) + case 3: + for c.Len() > maxSize { + c.DeleteOldest() + } + } + c.check(t) + } +} + +// check verifies that c.lookup and c.head are consistent in size with +// each other, and that the ring has the same size when traversed in +// both directions. +func (c *Cache[K, V]) check(t testing.TB) { + size := c.Len() + nextLen := c.nextLen(t, size) + prevLen := c.prevLen(t, size) + if nextLen != size { + t.Fatalf("next list len %v != map len %v", nextLen, size) + } + if prevLen != size { + t.Fatalf("prev list len %v != map len %v", prevLen, size) + } +} + +// nextLen returns the length of the ring at c.head when traversing +// the .next pointers. +func (c *Cache[K, V]) nextLen(t testing.TB, limit int) (n int) { + if c.head == nil { + return 0 + } + n = 1 + at := c.head.next + for at != c.head { + limit-- + if limit < 0 { + t.Fatal("next list is too long") + } + n++ + at = at.next + } + return n +} + +// prevLen returns the length of the ring at c.head when traversing +// the .prev pointers. +func (c *Cache[K, V]) prevLen(t testing.TB, limit int) (n int) { + if c.head == nil { + return 0 + } + n = 1 + at := c.head.prev + for at != c.head { + limit-- + if limit < 0 { + t.Fatal("next list is too long") + } + n++ + at = at.prev + } + return n +} + func TestDumpHTML(t *testing.T) { c := Cache[int, string]{MaxEntries: 3}