diff --git a/README.md b/README.md index 5b94176..f04917c 100644 --- a/README.md +++ b/README.md @@ -127,6 +127,11 @@ func example() { ... } + // Iterate over all objects in reverse order + for obj := range myObjects.AllReverse() { + ... + } + // Iterate with revision for obj, revision := range myObjects.All() { ... @@ -147,6 +152,9 @@ func example() { // Iterate objects where ID is between 0x1000_0000 and 0x1fff_ffff objs, watch = myObjects.PrefixWatch(txn, IDIndex.Query(0x1000_0000)) for obj := range objs { ... } + + // Iterate objects where ID is between 0x1000_0000 and 0x1fff_ffff in reverse order + for obj := range myObjects.PrefixReverse(txn, IDIndex.Query(0x1000_0000)) { ... } } ``` diff --git a/benchmarks_test.go b/benchmarks_test.go index 317a566..782c72d 100644 --- a/benchmarks_test.go +++ b/benchmarks_test.go @@ -9,6 +9,7 @@ import ( "iter" "log/slog" "math/rand" + "net/netip" "slices" "testing" "time" @@ -461,6 +462,7 @@ func BenchmarkDB_Prefix_SecondaryIndex(b *testing.B) { } const numObjectsIteration = 100000 +const numLPMObjectsIteration = 50000 func BenchmarkDB_FullIteration_All(b *testing.B) { db, table := newTestDBWithMetrics(b, &NopMetrics{}) @@ -487,6 +489,32 @@ func BenchmarkDB_FullIteration_All(b *testing.B) { b.ReportMetric(float64(numObjectsIteration*b.N)/b.Elapsed().Seconds(), "objects/sec") } +func BenchmarkDB_FullIteration_AllReverse(b *testing.B) { + db, table := newTestDBWithMetrics(b, &NopMetrics{}) + wtxn := db.WriteTxn(table) + for i := range numObjectsIteration { + _, _, err := table.Insert(wtxn, &testObject{ID: uint64(i)}) + require.NoError(b, err) + } + wtxn.Commit() + + for b.Loop() { + txn := db.ReadTxn() + i := uint64(0) + for obj := range table.AllReverse(txn) { + expected := uint64(numObjectsIteration - 1 - i) + if obj.ID != expected { + b.Fatalf("expected ID %d, got %d", expected, obj.ID) + } + i++ + } + if numObjectsIteration != i { + b.Fatalf("expected to iterate %d objects, got %d", numObjectsIteration, i) + } + } + b.ReportMetric(float64(numObjectsIteration*b.N)/b.Elapsed().Seconds(), "objects/sec") +} + func BenchmarkDB_FullIteration_Prefix(b *testing.B) { db, table := newTestDBWithMetrics(b, &NopMetrics{}) wtxn := db.WriteTxn(table) @@ -514,6 +542,66 @@ func BenchmarkDB_FullIteration_Prefix(b *testing.B) { b.ReportMetric(float64(numObjectsIteration*b.N)/b.Elapsed().Seconds(), "objects/sec") } +func BenchmarkDB_FullIteration_PrefixReverse(b *testing.B) { + db, table := newTestDBWithMetrics(b, &NopMetrics{}) + wtxn := db.WriteTxn(table) + for i := range numObjectsIteration { + _, _, err := table.Insert(wtxn, &testObject{ID: uint64(i)}) + require.NoError(b, err) + } + wtxn.Commit() + + query := Query[*testObject]{index: idIndex.indexName()} + + for b.Loop() { + txn := db.ReadTxn() + i := uint64(0) + for obj := range table.PrefixReverse(txn, query) { + expected := uint64(numObjectsIteration - 1 - i) + if obj.ID != expected { + b.Fatalf("expected ID %d, got %d", expected, obj.ID) + } + i++ + } + if numObjectsIteration != i { + b.Fatalf("expected to iterate %d objects, got %d", numObjectsIteration, i) + } + } + b.ReportMetric(float64(numObjectsIteration*b.N)/b.Elapsed().Seconds(), "objects/sec") +} + +func BenchmarkDB_LPM_PrefixReverse(b *testing.B) { + db := New() + table := newLPMTestTable(db) + + wtxn := db.WriteTxn(table) + for i := 0; i < numLPMObjectsIteration; i++ { + addr := netip.AddrFrom4([4]byte{10, byte(i >> 8), byte(i), 1}) + prefix := netip.PrefixFrom(addr, 32) + _, _, err := table.Insert(wtxn, lpmTestObject{ + ID: uint16(i), + Prefix: prefix, + PortPrefixLen: 16, + }) + require.NoError(b, err) + } + txn := wtxn.Commit() + + query := lpmPrefixIndex.QueryPrefix(netip.MustParsePrefix("10.0.0.0/8")) + b.ResetTimer() + + for b.Loop() { + count := 0 + for range table.PrefixReverse(txn, query) { + count++ + } + if numLPMObjectsIteration != count { + b.Fatalf("expected to iterate %d objects, got %d", numLPMObjectsIteration, count) + } + } + b.ReportMetric(float64(numLPMObjectsIteration*b.N)/b.Elapsed().Seconds(), "objects/sec") +} + func BenchmarkDB_FullIteration_Get(b *testing.B) { db, table := newTestDBWithMetrics(b, &NopMetrics{}) wtxn := db.WriteTxn(table) diff --git a/db_test.go b/db_test.go index a6dbc8e..8415c8e 100644 --- a/db_test.go +++ b/db_test.go @@ -367,6 +367,31 @@ func TestDB_Prefix(t *testing.T) { require.Equal(t, Collect(Map(iter, (*testObject).getID)), []uint64{71, 82, 99}) } +func TestDB_PrefixReverse(t *testing.T) { + t.Parallel() + + db, table := newTestDBWithMetrics(t, &NopMetrics{}, tagsIndex) + + { + txn := db.WriteTxn(table) + _, _, err := table.Insert(txn, &testObject{ID: 42, Tags: part.NewSet("a", "b")}) + require.NoError(t, err, "Insert failed") + _, _, err = table.Insert(txn, &testObject{ID: 82, Tags: part.NewSet("abc")}) + require.NoError(t, err, "Insert failed") + _, _, err = table.Insert(txn, &testObject{ID: 71, Tags: part.NewSet("ab")}) + require.NoError(t, err, "Insert failed") + _, _, err = table.Insert(txn, &testObject{ID: 99, Tags: part.NewSet("abcd")}) + require.NoError(t, err, "Insert failed") + txn.Commit() + } + + txn := db.ReadTxn() + forward := Collect(Map(table.Prefix(txn, tagsIndex.Query("ab")), (*testObject).getID)) + reverse := Collect(Map(table.PrefixReverse(txn, tagsIndex.Query("ab")), (*testObject).getID)) + slices.Reverse(forward) + require.Equal(t, forward, reverse) +} + func TestDB_Changes(t *testing.T) { t.Parallel() @@ -717,6 +742,29 @@ func TestDB_All(t *testing.T) { } } +func TestDB_AllReverse(t *testing.T) { + t.Parallel() + + db, table, _ := newTestDB(t) + + { + txn := db.WriteTxn(table) + _, _, err := table.Insert(txn, &testObject{ID: uint64(1)}) + require.NoError(t, err, "Insert failed") + _, _, err = table.Insert(txn, &testObject{ID: uint64(2)}) + require.NoError(t, err, "Insert failed") + _, _, err = table.Insert(txn, &testObject{ID: uint64(3)}) + require.NoError(t, err, "Insert failed") + txn.Commit() + } + + txn := db.ReadTxn() + forward := Collect(Map(table.All(txn), (*testObject).getID)) + reverse := Collect(Map(table.AllReverse(txn), (*testObject).getID)) + slices.Reverse(forward) + require.Equal(t, forward, reverse) +} + func TestDB_Modify(t *testing.T) { t.Parallel() @@ -889,6 +937,31 @@ func TestDB_GetList(t *testing.T) { } } +func TestDB_ListReverse(t *testing.T) { + t.Parallel() + + db, table, _ := newTestDB(t, tagsIndex) + + { + txn := db.WriteTxn(table) + for i := 1; i <= 10; i++ { + tag := "odd" + if i%2 == 0 { + tag = "even" + } + _, _, err := table.Insert(txn, &testObject{ID: uint64(i), Tags: part.NewSet(tag)}) + require.NoError(t, err) + } + txn.Commit() + } + + txn := db.ReadTxn() + forward := Collect(Map(table.List(txn, tagsIndex.Query("odd")), (*testObject).getID)) + reverse := Collect(Map(table.ListReverse(txn, tagsIndex.Query("odd")), (*testObject).getID)) + slices.Reverse(forward) + require.Equal(t, forward, reverse) +} + func TestDB_CommitAbort(t *testing.T) { t.Parallel() diff --git a/lpm/iterator.go b/lpm/iterator.go index d8de4c2..8b8d575 100644 --- a/lpm/iterator.go +++ b/lpm/iterator.go @@ -10,6 +10,16 @@ type Iterator[T any] struct { stack []*lpmNode[T] } +type reverseFrame[T any] struct { + node *lpmNode[T] + visited bool +} + +type ReverseIterator[T any] struct { + start *lpmNode[T] + stack []reverseFrame[T] +} + func (it *Iterator[T]) All(yield func([]byte, T) bool) { if it == nil { return @@ -74,3 +84,84 @@ func (it *Iterator[T]) Next() (key []byte, value T, ok bool) { } return } + +func (it *ReverseIterator[T]) All(yield func([]byte, T) bool) { + if it == nil { + return + } + var ( + // Use a stack allocated array for holding the next nodes + // to explore. If this isn't large enough [append] will heap + // allocate. + stackArray [32]reverseFrame[T] + + stack []reverseFrame[T] + ) + + if it.start != nil { + stack = stackArray[0:1:32] + stack[0] = reverseFrame[T]{node: it.start} + } else if len(it.stack) < cap(stackArray) { + stack = stackArray[:len(it.stack)] + copy(stack, it.stack) + } else { + stack = slices.Clone(it.stack) + } + + for len(stack) > 0 { + frame := stack[len(stack)-1] + stack = stack[:len(stack)-1] + if frame.node == nil { + continue + } + if frame.visited { + if !frame.node.imaginary { + if !yield(frame.node.key, frame.node.value) { + return + } + } + continue + } + + stack = append(stack, reverseFrame[T]{node: frame.node, visited: true}) + if frame.node.children[0] != nil { + stack = append(stack, reverseFrame[T]{node: frame.node.children[0]}) + } + if frame.node.children[1] != nil { + stack = append(stack, reverseFrame[T]{node: frame.node.children[1]}) + } + } +} + +func (it *ReverseIterator[T]) Next() (key []byte, value T, ok bool) { + if it == nil { + return + } + if it.start != nil { + it.stack = []reverseFrame[T]{{node: it.start}} + it.start = nil + } + + for len(it.stack) > 0 { + frame := it.stack[len(it.stack)-1] + it.stack = it.stack[:len(it.stack)-1] + if frame.node == nil { + continue + } + if frame.visited { + if !frame.node.imaginary { + return frame.node.key, frame.node.value, true + } + continue + } + + it.stack = append(it.stack, reverseFrame[T]{node: frame.node, visited: true}) + if frame.node.children[0] != nil { + it.stack = append(it.stack, reverseFrame[T]{node: frame.node.children[0]}) + } + if frame.node.children[1] != nil { + it.stack = append(it.stack, reverseFrame[T]{node: frame.node.children[1]}) + } + } + return +} diff --git a/lpm/trie.go b/lpm/trie.go index 2e3e5cb..f943978 100644 --- a/lpm/trie.go +++ b/lpm/trie.go @@ -68,11 +68,23 @@ func (l *Trie[T]) All() *Iterator[T] { return &Iterator[T]{start: l.root} } +func (l *Trie[T]) AllReverse() *ReverseIterator[T] { + if l.root == nil { + return nil + } + return &ReverseIterator[T]{start: l.root} +} + func (l *Trie[T]) Prefix(key index.Key) *Iterator[T] { txn := Txn[T]{root: l.root, size: l.size} return txn.Prefix(key) } +func (l *Trie[T]) PrefixReverse(key index.Key) *ReverseIterator[T] { + txn := Txn[T]{root: l.root, size: l.size} + return txn.PrefixReverse(key) +} + func (l *Trie[T]) LowerBound(key index.Key) *Iterator[T] { txn := Txn[T]{root: l.root, size: l.size} return txn.LowerBound(key) @@ -381,6 +393,15 @@ func (txn *Txn[T]) All() *Iterator[T] { return &Iterator[T]{start: txn.root} } +func (txn *Txn[T]) AllReverse() *ReverseIterator[T] { + if txn.root == nil { + return nil + } + // Bump txnID to freeze the trie + txn.txnID++ + return &ReverseIterator[T]{start: txn.root} +} + func (txn *Txn[T]) Prefix(key index.Key) *Iterator[T] { if txn.root == nil { return nil @@ -405,6 +426,30 @@ func (txn *Txn[T]) Prefix(key index.Key) *Iterator[T] { return &Iterator[T]{start: node} } +func (txn *Txn[T]) PrefixReverse(key index.Key) *ReverseIterator[T] { + if txn.root == nil { + return nil + } + // Bump txnID to freeze the trie + txn.txnID++ + + node := txn.root + data, prefixLen := DecodeLPMKey(key) + + var matchLen PrefixLen + for node != nil { + matchLen = longestMatch(matchLen, node, data, prefixLen) + if matchLen == prefixLen || matchLen < node.prefixLen() { + break + } + node = node.children[getBitAt(data, node.prefixLen())] + } + if node == nil { + return nil + } + return &ReverseIterator[T]{start: node} +} + func (txn *Txn[T]) LowerBound(key index.Key) *Iterator[T] { if txn.root == nil { return nil diff --git a/lpm/trie_test.go b/lpm/trie_test.go index a07f51c..c55da57 100644 --- a/lpm/trie_test.go +++ b/lpm/trie_test.go @@ -28,6 +28,22 @@ func iteratorToValues[T any](it *Iterator[T]) iter.Seq[T] { } } +func iteratorToKeys[T any](it *Iterator[T]) []string { + var keys []string + for key := range it.All { + keys = append(keys, string(key)) + } + return keys +} + +func reverseIteratorToKeys[T any](it *ReverseIterator[T]) []string { + var keys []string + for key := range it.All { + keys = append(keys, string(key)) + } + return keys +} + func TestTrie(t *testing.T) { lpm := New[int]() @@ -122,6 +138,60 @@ func TestTrie(t *testing.T) { require.Equal(t, 999, v) } +func TestTrie_AllReverse(t *testing.T) { + trie := New[int]() + keys := []string{ + "10.1.1.1/32", + "10.0.0.0/8", + "192.168.1.0/24", + "192.168.1.5/32", + } + + prefixKey := func(p string) index.Key { + key := netip.MustParsePrefix(p) + return EncodeLPMKey(key.Addr().AsSlice(), uint16(key.Bits())) + } + + txn := trie.Txn() + for i, k := range keys { + txn.Insert(prefixKey(k), i) + } + trie = txn.Commit() + + forward := iteratorToKeys(trie.All()) + reverse := reverseIteratorToKeys(trie.AllReverse()) + slices.Reverse(forward) + require.Equal(t, forward, reverse) +} + +func TestTrie_PrefixReverse(t *testing.T) { + trie := New[int]() + keys := []string{ + "10.1.1.1/32", + "10.0.0.0/8", + "10.0.0.0/24", + "10.0.0.1/32", + "192.168.1.5/32", + } + + prefixKey := func(p string) index.Key { + key := netip.MustParsePrefix(p) + return EncodeLPMKey(key.Addr().AsSlice(), uint16(key.Bits())) + } + + txn := trie.Txn() + for i, k := range keys { + txn.Insert(prefixKey(k), i) + } + trie = txn.Commit() + + query := prefixKey("10.0.0.0/8") + forward := iteratorToKeys(trie.Prefix(query)) + reverse := reverseIteratorToKeys(trie.PrefixReverse(query)) + slices.Reverse(forward) + require.Equal(t, forward, reverse) +} + func TestEncodeDecodeLPMKey(t *testing.T) { maskData := func(data []byte, prefixLen PrefixLen) []byte { dataLen := (prefixLen + 7) / 8 diff --git a/lpm_index.go b/lpm_index.go index 36e3921..9634971 100644 --- a/lpm_index.go +++ b/lpm_index.go @@ -203,6 +203,10 @@ func (l lpmIndex) all() (tableIndexIterator, <-chan struct{}) { return newLPMIterator(l.lpm.All()), l.watch } +// allReverse implements tableIndex. +func (l lpmIndex) allReverse() (tableIndexIterator, <-chan struct{}) { + return newLPMReverseIterator(l.lpm.AllReverse()), l.watch +} // get implements tableIndex. func (l lpmIndex) get(ikey index.Key) (object, <-chan struct{}, bool) { entry, found := l.lpm.Lookup(ikey) @@ -229,6 +233,14 @@ func (l lpmIndex) list(key index.Key) (tableIndexIterator, <-chan struct{}) { return &entry, l.watch } +// listReverse implements tableIndex. +func (l lpmIndex) listReverse(key index.Key) (tableIndexIterator, <-chan struct{}) { + entry, found := l.lpm.Lookup(key) + if !found || entry.len() == 0 { + return emptyTableIndexIterator, l.watch + } + return &lpmEntryReverseIterator{entry: &entry}, l.watch +} // lowerBound implements tableIndex. func (l lpmIndex) lowerBound(key index.Key) (tableIndexIterator, <-chan struct{}) { return newLPMIterator(l.lpm.LowerBound(key)), l.watch @@ -249,6 +261,10 @@ func (l lpmIndex) prefix(key index.Key) (tableIndexIterator, <-chan struct{}) { return newLPMIterator(l.lpm.Prefix(key)), l.watch } +// prefixReverse implements tableIndex. +func (l lpmIndex) prefixReverse(key index.Key) (tableIndexIterator, <-chan struct{}) { + return newLPMReverseIterator(l.lpm.PrefixReverse(key)), l.watch +} // rootWatch implements tableIndex. func (l lpmIndex) rootWatch() <-chan struct{} { return l.watch @@ -287,6 +303,10 @@ func (l *lpmIndexTxn) all() (tableIndexIterator, <-chan struct{}) { return newLPMIterator(l.tx.All()), l.index.watch } +// allReverse implements tableIndexTxn. +func (l *lpmIndexTxn) allReverse() (tableIndexIterator, <-chan struct{}) { + return newLPMReverseIterator(l.tx.AllReverse()), l.index.watch +} // commit implements tableIndexTxn. func (l *lpmIndexTxn) commit() (tableIndex, tableIndexTxnNotify) { lpm := l.tx.Commit() @@ -342,6 +362,14 @@ func (l *lpmIndexTxn) list(key index.Key) (tableIndexIterator, <-chan struct{}) return &entry, l.index.watch } +// listReverse implements tableIndexTxn. +func (l *lpmIndexTxn) listReverse(key index.Key) (tableIndexIterator, <-chan struct{}) { + entry, found := l.tx.Lookup(key) + if !found || entry.len() == 0 { + return emptyTableIndexIterator, l.index.watch + } + return &lpmEntryReverseIterator{entry: &entry}, l.index.watch +} // lowerBound implements tableIndexTxn. func (l *lpmIndexTxn) lowerBound(key index.Key) (tableIndexIterator, <-chan struct{}) { return newLPMIterator(l.tx.LowerBound(key)), l.index.watch @@ -370,6 +398,10 @@ func (l *lpmIndexTxn) prefix(key index.Key) (tableIndexIterator, <-chan struct{} return newLPMIterator(l.tx.Prefix(key)), l.index.watch } +// prefixReverse implements tableIndexTxn. +func (l *lpmIndexTxn) prefixReverse(key index.Key) (tableIndexIterator, <-chan struct{}) { + return newLPMReverseIterator(l.tx.PrefixReverse(key)), l.index.watch +} // reindex implements tableIndexTxn. func (l *lpmIndexTxn) reindex(primaryKey index.Key, old object, new object) { var newKeys index.KeySet @@ -570,6 +602,20 @@ func (e *lpmEntry) All(yield func([]byte, object) bool) { } } +func (e *lpmEntry) AllReverse(yield func([]byte, object) bool) { + if e == nil || !e.used { + return + } + for i := len(e.tail) - 1; i >= 0; i-- { + if !yield(e.secondary, e.tail[i].obj) { + return + } + } + if !yield(e.secondary, e.head.obj) { + return + } +} + func (e *lpmEntry) appendObjects(dst []object) []object { dst = dst[:0] if e == nil || !e.used { @@ -601,6 +647,37 @@ func (l *lpmIteratorAdapter) All(yield func([]byte, object) bool) { }) } +type lpmReverseIteratorAdapter struct { + iter *lpm.ReverseIterator[lpmEntry] +} + +func newLPMReverseIterator(iter *lpm.ReverseIterator[lpmEntry]) tableIndexIterator { + return &lpmReverseIteratorAdapter{iter: iter} +} + +func (l *lpmReverseIteratorAdapter) All(yield func([]byte, object) bool) { + if l.iter == nil { + return + } + l.iter.All(func(key []byte, entry lpmEntry) bool { + entry.AllReverse(func(_ []byte, obj object) bool { + return yield(key, obj) + }) + return true + }) +} + +type lpmEntryReverseIterator struct { + entry *lpmEntry +} + +func (l *lpmEntryReverseIterator) All(yield func([]byte, object) bool) { + if l.entry == nil { + return + } + l.entry.AllReverse(yield) +} + type lpmNextIterator struct { iter *lpm.Iterator[lpmEntry] pending []object diff --git a/lpm_index_test.go b/lpm_index_test.go index 01c41f4..28e510e 100644 --- a/lpm_index_test.go +++ b/lpm_index_test.go @@ -27,6 +27,10 @@ type lpmTestObject struct { PortPrefixLen lpm.PrefixLen } +func collectLPMIDs(seq iter.Seq2[lpmTestObject, Revision]) []uint16 { + return Collect(Map(seq, func(obj lpmTestObject) uint16 { return obj.ID })) +} + // TableHeader implements TableWritable. func (l lpmTestObject) TableHeader() []string { return []string{ @@ -199,6 +203,40 @@ func TestLPMIndex(t *testing.T) { } +func TestLPMIndex_PrefixReverse(t *testing.T) { + db := New() + tbl := newLPMTestTable(db) + + wtxn := db.WriteTxn(tbl) + tbl.Insert(wtxn, lpmTestObject{ + ID: 0, + Prefix: netip.MustParsePrefix("1.0.0.0/8"), + PortPrefixLen: 16, + }) + tbl.Insert(wtxn, lpmTestObject{ + ID: 1, + Prefix: netip.MustParsePrefix("10.0.0.0/8"), + PortPrefixLen: 16, + }) + tbl.Insert(wtxn, lpmTestObject{ + ID: 2, + Prefix: netip.MustParsePrefix("10.0.0.0/24"), + PortPrefixLen: 16, + }) + tbl.Insert(wtxn, lpmTestObject{ + ID: 3, + Prefix: netip.MustParsePrefix("10.0.0.1/32"), + PortPrefixLen: 16, + }) + txn := wtxn.Commit() + + query := lpmPrefixIndex.QueryPrefix(netip.MustParsePrefix("10.0.0.0/8")) + forward := collectLPMIDs(tbl.Prefix(txn, query)) + reverse := collectLPMIDs(tbl.PrefixReverse(txn, query)) + slices.Reverse(forward) + require.Equal(t, forward, reverse) +} + func TestLPMIndexNonUnique(t *testing.T) { db := New() tbl := newLPMNonUniqueTestTable(db) @@ -253,6 +291,34 @@ func TestLPMIndexNonUnique(t *testing.T) { require.EqualValues(t, 20, objs[0].ID) } +func TestLPMIndex_ListReverse(t *testing.T) { + db := New() + tbl := newLPMNonUniqueTestTable(db) + + wtxn := db.WriteTxn(tbl) + key := binary.BigEndian.AppendUint16(nil, 0x1234) + prefix := netip.MustParsePrefix("10.0.0.0/8") + tbl.Insert(wtxn, lpmTestObject{ + ID: 10, + Prefix: prefix, + Port: 0x1234, + PortPrefixLen: 16, + }) + tbl.Insert(wtxn, lpmTestObject{ + ID: 20, + Prefix: prefix.Masked(), + Port: 0x1234, + PortPrefixLen: 16, + }) + txn := wtxn.Commit() + + query := lpmPortNonUniqueIndex.Query(key, 16) + forward := collectLPMIDs(tbl.List(txn, query)) + reverse := collectLPMIDs(tbl.ListReverse(txn, query)) + slices.Reverse(forward) + require.Equal(t, forward, reverse) +} + func TestLPMIndexIteratorNonUnique(t *testing.T) { idx := lpmIndex{ lpm: lpm.New[lpmEntry](), diff --git a/part/iterator.go b/part/iterator.go index afe3a97..a716560 100644 --- a/part/iterator.go +++ b/part/iterator.go @@ -95,7 +95,7 @@ func (it *Iterator[T]) Next() (key []byte, value T, ok bool) { it.start = nil if node.size() > 0 { it.edges = make([][]*header[T], 1, 32) - it.edges = append(it.edges, node.children()) + it.edges[0] = node.children() } if leaf := node.getLeaf(); leaf != nil { return leaf.fullKey(), leaf.value, true @@ -138,6 +138,104 @@ func newIterator[T any](start *header[T]) Iterator[T] { return Iterator[T]{start: start} } +type reverseFrame[T any] struct { + children []*header[T] + leaf *leaf[T] +} + +func newReverseFrame[T any](node *header[T]) reverseFrame[T] { + return reverseFrame[T]{children: node.children(), leaf: node.getLeaf()} +} + +func (f *reverseFrame[T]) nextChild() *header[T] { + // Find the next non-nil child (node256 may have holes) + for i := len(f.children) - 1; i >= 0; i-- { + if child := f.children[i]; child != nil { + f.children = f.children[:i] + return child + } + } + f.children = nil + return nil +} + +// ReverseIterator iterates over key/value pairs in reverse order. +type ReverseIterator[T any] struct { + start *header[T] + stack []reverseFrame[T] +} + +// All calls yield for every value in reverse order. Can be called multiple times. +func (it ReverseIterator[T]) All(yield func(key []byte, value T) bool) { + // Try to use a stack-allocated stack of frames to avoid heap allocations. + var stackArray [32]reverseFrame[T] + stack := stackArray[0:0:32] + + switch { + case it.start != nil: + stack = append(stack, newReverseFrame(it.start)) + case len(it.stack) > 0: + stack = append(stack, it.stack...) + default: + return + } + + for len(stack) > 0 { + frame := &stack[len(stack)-1] + if child := frame.nextChild(); child != nil { + stack = append(stack, newReverseFrame(child)) + continue + } + + if leaf := frame.leaf; leaf != nil { + frame.leaf = nil + if !yield(leaf.fullKey(), leaf.value) { + return + } + continue + } + + stack = stack[:len(stack)-1] + } +} + +// Next returns the next key, value and true if the value exists, +// otherwise it returns false. +func (it *ReverseIterator[T]) Next() (key []byte, value T, ok bool) { + if it == nil { + return + } + + if it.stack == nil { + if it.start == nil { + return + } + it.stack = make([]reverseFrame[T], 1, 32) + it.stack[0] = newReverseFrame(it.start) + it.start = nil + } + + for len(it.stack) > 0 { + frame := &it.stack[len(it.stack)-1] + if child := frame.nextChild(); child != nil { + it.stack = append(it.stack, newReverseFrame(child)) + continue + } + + if leaf := frame.leaf; leaf != nil { + frame.leaf = nil + return leaf.fullKey(), leaf.value, true + } + + it.stack = it.stack[:len(it.stack)-1] + } + return +} + +func newReverseIterator[T any](start *header[T]) ReverseIterator[T] { + return ReverseIterator[T]{start: start} +} + func prefixSearch[T any](root *header[T], rootWatch <-chan struct{}, prefix []byte) (Iterator[T], <-chan struct{}) { if root == nil { return newIterator[T](nil), rootWatch @@ -173,6 +271,11 @@ func prefixSearch[T any](root *header[T], rootWatch <-chan struct{}, prefix []by } } +func prefixSearchReverse[T any](root *header[T], rootWatch <-chan struct{}, prefix []byte) (ReverseIterator[T], <-chan struct{}) { + iter, watch := prefixSearch(root, rootWatch, prefix) + return newReverseIterator(iter.start), watch +} + func traverseToMin[T any](n *header[T], edges [][]*header[T]) [][]*header[T] { if leaf := n.getLeaf(); leaf != nil { return append(edges, []*header[T]{n}) diff --git a/part/ops.go b/part/ops.go index 7e8b5b9..c2fec40 100644 --- a/part/ops.go +++ b/part/ops.go @@ -20,6 +20,11 @@ type Ops[T any] interface { // the given prefix are upserted or deleted. Prefix(key []byte) (Iterator[T], <-chan struct{}) + // PrefixReverse returns a reverse iterator for all objects that start with + // the given prefix, and a channel that closes when any objects matching the + // given prefix are upserted or deleted. + PrefixReverse(key []byte) (ReverseIterator[T], <-chan struct{}) + // LowerBound returns an iterator for all objects that have a // key equal or higher than the given 'key'. LowerBound(key []byte) Iterator[T] @@ -32,6 +37,9 @@ type Ops[T any] interface { // Iterator returns an iterator for all objects. Iterator() Iterator[T] + // ReverseIterator returns an iterator for all objects in reverse order. + ReverseIterator() ReverseIterator[T] + // PrintTree to the standard output. For debugging. PrintTree() } diff --git a/part/part_test.go b/part/part_test.go index 06f1c32..7a7be17 100644 --- a/part/part_test.go +++ b/part/part_test.go @@ -1329,6 +1329,51 @@ func TestIterator(t *testing.T) { require.Equal(t, 0, v) } +func TestReverseIterator(t *testing.T) { + tree := New[int]() + it := tree.ReverseIterator() + for range it.All { + t.Fatalf("All yielded value from empty tree") + } + + _, _, tree = tree.Insert([]byte("bbb"), 2) + _, _, tree = tree.Insert([]byte("aaa"), 1) + + it = tree.ReverseIterator() + var values []int + for _, v := range it.All { + values = append(values, v) + } + require.Equal(t, []int{2, 1}, values) + + it = tree.ReverseIterator() + k, v, ok := it.Next() + require.True(t, ok) + require.Equal(t, "bbb", string(k)) + require.Equal(t, 2, v) + + values = nil + for _, v := range it.All { + values = append(values, v) + } + require.Equal(t, []int{1}, values) +} + +func TestPrefixReverse(t *testing.T) { + tree := New[int]() + _, _, tree = tree.Insert([]byte("a"), 1) + _, _, tree = tree.Insert([]byte("aa"), 2) + _, _, tree = tree.Insert([]byte("ab"), 3) + _, _, tree = tree.Insert([]byte("b"), 4) + + iter, _ := tree.PrefixReverse([]byte("a")) + var keys []string + for key := range iter.All { + keys = append(keys, string(key)) + } + require.Equal(t, []string{"ab", "aa", "a"}, keys) +} + func Benchmark_Insert_RootOnlyWatch(b *testing.B) { benchmark_Insert(b, RootOnlyWatch) } @@ -1538,6 +1583,23 @@ func Benchmark_Iterator_All(b *testing.B) { b.ReportMetric(float64(numObjectsToInsert*b.N)/b.Elapsed().Seconds(), "objects/sec") } +func Benchmark_ReverseIterator_All(b *testing.B) { + tree := New[uint64](RootOnlyWatch) + for j := uint64(1); j <= numObjectsToInsert; j++ { + _, _, tree = tree.Insert(uint64Key(j), j) + } + b.ResetTimer() + + for b.Loop() { + for _, j := range tree.ReverseIterator().All { + if j < 1 || j > numObjectsToInsert+1 { + b.Fatalf("impossible value: %d", j) + } + } + } + b.ReportMetric(float64(numObjectsToInsert*b.N)/b.Elapsed().Seconds(), "objects/sec") +} + func Benchmark_Iterator_Next(b *testing.B) { tree := New[uint64](RootOnlyWatch) for j := uint64(1); j <= numObjectsToInsert; j++ { @@ -1556,6 +1618,128 @@ func Benchmark_Iterator_Next(b *testing.B) { b.ReportMetric(float64(numObjectsToInsert*b.N)/b.Elapsed().Seconds(), "objects/sec") } +func Benchmark_ReverseIterator_Next(b *testing.B) { + tree := New[uint64](RootOnlyWatch) + for j := uint64(1); j <= numObjectsToInsert; j++ { + _, _, tree = tree.Insert(uint64Key(j), j) + } + b.ResetTimer() + + for b.Loop() { + iter := tree.ReverseIterator() + for _, j, ok := iter.Next(); ok; _, j, ok = iter.Next() { + if j < 1 || j > numObjectsToInsert+1 { + b.Fatalf("impossible value: %d", j) + } + } + } + b.ReportMetric(float64(numObjectsToInsert*b.N)/b.Elapsed().Seconds(), "objects/sec") +} + +func Benchmark_Prefix_All(b *testing.B) { + tree := New[uint64](RootOnlyWatch) + for j := uint64(1); j <= numObjectsToInsert; j++ { + key := make([]byte, 1+8) + if j%2 == 0 { + key[0] = 'a' + } else { + key[0] = 'b' + } + binary.BigEndian.PutUint64(key[1:], j) + _, _, tree = tree.Insert(key, j) + } + prefix := []byte{'a'} + b.ResetTimer() + + for b.Loop() { + iter, _ := tree.Prefix(prefix) + for _, j := range iter.All { + if j < 1 || j > numObjectsToInsert+1 { + b.Fatalf("impossible value: %d", j) + } + } + } + b.ReportMetric(float64(numObjectsToInsert/2*b.N)/b.Elapsed().Seconds(), "objects/sec") +} + +func Benchmark_Prefix_Next(b *testing.B) { + tree := New[uint64](RootOnlyWatch) + for j := uint64(1); j <= numObjectsToInsert; j++ { + key := make([]byte, 1+8) + if j%2 == 0 { + key[0] = 'a' + } else { + key[0] = 'b' + } + binary.BigEndian.PutUint64(key[1:], j) + _, _, tree = tree.Insert(key, j) + } + prefix := []byte{'a'} + b.ResetTimer() + + for b.Loop() { + iter, _ := tree.Prefix(prefix) + for _, j, ok := iter.Next(); ok; _, j, ok = iter.Next() { + if j < 1 || j > numObjectsToInsert+1 { + b.Fatalf("impossible value: %d", j) + } + } + } + b.ReportMetric(float64(numObjectsToInsert/2*b.N)/b.Elapsed().Seconds(), "objects/sec") +} + +func Benchmark_PrefixReverse_All(b *testing.B) { + tree := New[uint64](RootOnlyWatch) + for j := uint64(1); j <= numObjectsToInsert; j++ { + key := make([]byte, 1+8) + if j%2 == 0 { + key[0] = 'a' + } else { + key[0] = 'b' + } + binary.BigEndian.PutUint64(key[1:], j) + _, _, tree = tree.Insert(key, j) + } + prefix := []byte{'a'} + b.ResetTimer() + + for b.Loop() { + iter, _ := tree.PrefixReverse(prefix) + for _, j := range iter.All { + if j < 1 || j > numObjectsToInsert+1 { + b.Fatalf("impossible value: %d", j) + } + } + } + b.ReportMetric(float64(numObjectsToInsert/2*b.N)/b.Elapsed().Seconds(), "objects/sec") +} + +func Benchmark_PrefixReverse_Next(b *testing.B) { + tree := New[uint64](RootOnlyWatch) + for j := uint64(1); j <= numObjectsToInsert; j++ { + key := make([]byte, 1+8) + if j%2 == 0 { + key[0] = 'a' + } else { + key[0] = 'b' + } + binary.BigEndian.PutUint64(key[1:], j) + _, _, tree = tree.Insert(key, j) + } + prefix := []byte{'a'} + b.ResetTimer() + + for b.Loop() { + iter, _ := tree.PrefixReverse(prefix) + for _, j, ok := iter.Next(); ok; _, j, ok = iter.Next() { + if j < 1 || j > numObjectsToInsert+1 { + b.Fatalf("impossible value: %d", j) + } + } + } + b.ReportMetric(float64(numObjectsToInsert/2*b.N)/b.Elapsed().Seconds(), "objects/sec") +} + func Benchmark_Hashmap_Insert(b *testing.B) { for b.Loop() { m := map[uint64]uint64{} diff --git a/part/tree.go b/part/tree.go index 7134549..d536726 100644 --- a/part/tree.go +++ b/part/tree.go @@ -99,6 +99,13 @@ func (t *Tree[T]) Prefix(prefix []byte) (Iterator[T], <-chan struct{}) { return prefixSearch(t.root, t.rootWatch, prefix) } +// PrefixReverse returns a reverse iterator for all objects that start with the +// given prefix, and a channel that closes when any objects matching the prefix +// are upserted or deleted. +func (t *Tree[T]) PrefixReverse(prefix []byte) (ReverseIterator[T], <-chan struct{}) { + return prefixSearchReverse(t.root, t.rootWatch, prefix) +} + // RootWatch returns a watch channel for the root of the tree. // Since this is the channel associated with the root, this closes // when there are any changes to the tree. @@ -146,6 +153,11 @@ func (t *Tree[T]) Iterator() Iterator[T] { return newIterator(t.root) } +// ReverseIterator returns an iterator for all objects in reverse order. +func (t *Tree[T]) ReverseIterator() ReverseIterator[T] { + return newReverseIterator(t.root) +} + // All iterates over all objects func (t *Tree[T]) All(yield func([]byte, T) bool) { Iterator[T]{start: t.root}.All(yield) diff --git a/part/txn.go b/part/txn.go index b6f484e..f8bc192 100644 --- a/part/txn.go +++ b/part/txn.go @@ -152,6 +152,15 @@ func (txn *Txn[T]) Prefix(key []byte) (Iterator[T], <-chan struct{}) { return prefixSearch(txn.root, txn.rootWatch, key) } +// PrefixReverse returns a reverse iterator for all objects that start with the +// given prefix, and a channel that closes when any objects matching the prefix +// are upserted or deleted. +func (txn *Txn[T]) PrefixReverse(key []byte) (ReverseIterator[T], <-chan struct{}) { + // Bump txnID in order to freeze the current tree. + txn.txnID++ + return prefixSearchReverse(txn.root, txn.rootWatch, key) +} + // LowerBound returns an iterator for all objects that have a // key equal or higher than the given 'key'. func (txn *Txn[T]) LowerBound(key []byte) Iterator[T] { @@ -167,6 +176,13 @@ func (txn *Txn[T]) Iterator() Iterator[T] { return newIterator(txn.root) } +// ReverseIterator returns an iterator for all objects in reverse order. +func (txn *Txn[T]) ReverseIterator() ReverseIterator[T] { + // Bump txnID in order to freeze the current tree. + txn.txnID++ + return newReverseIterator(txn.root) +} + // CommitAndNotify commits the transaction and notifies by // closing the watch channels of all modified nodes. func (txn *Txn[T]) CommitAndNotify() Tree[T] { diff --git a/part_index.go b/part_index.go index 7bde0da..f8720bd 100644 --- a/part_index.go +++ b/part_index.go @@ -118,6 +118,11 @@ func (r *partIndex) list(key index.Key) (tableIndexIterator, <-chan struct{}) { return partList(r.unique, &r.tree, key) } +// listReverse implements tableIndex. +func (r *partIndex) listReverse(key index.Key) (tableIndexIterator, <-chan struct{}) { + return partListReverse(r.unique, &r.tree, key) +} + var emptyTableIndexIterator = &singletonTableIndexIterator{} func partList(unique bool, tree part.Ops[object], key index.Key) (tableIndexIterator, <-chan struct{}) { @@ -141,6 +146,20 @@ func partList(unique bool, tree part.Ops[object], key index.Key) (tableIndexIter return newNonUniquePartIterator(iter, false, key), watch } +func partListReverse(unique bool, tree part.Ops[object], key index.Key) (tableIndexIterator, <-chan struct{}) { + if unique { + obj, watch, ok := tree.Get(key) + if ok { + return &singletonTableIndexIterator{key, obj}, watch + } + return emptyTableIndexIterator, watch + } + + key = encodeNonUniqueBytes(key) + iter, watch := tree.PrefixReverse(key) + return newNonUniquePartReverseIterator(iter, false, key), watch +} + // rootWatch implements tableIndex. func (r *partIndex) rootWatch() <-chan struct{} { return r.tree.RootWatch() @@ -194,11 +213,21 @@ func (r *partIndex) all() (tableIndexIterator, <-chan struct{}) { return &r.tree, r.rootWatch() } +// allReverse implements tableIndex. +func (r *partIndex) allReverse() (tableIndexIterator, <-chan struct{}) { + return partReverseAll(&r.tree), r.rootWatch() +} + // prefix implements tableIndex. func (r *partIndex) prefix(ikey index.Key) (tableIndexIterator, <-chan struct{}) { return partPrefix(r.unique, &r.tree, ikey) } +// prefixReverse implements tableIndex. +func (r *partIndex) prefixReverse(ikey index.Key) (tableIndexIterator, <-chan struct{}) { + return partPrefixReverse(r.unique, &r.tree, ikey) +} + func partPrefix(unique bool, tree part.Ops[object], key index.Key) (tableIndexIterator, <-chan struct{}) { if !unique { key = encodeNonUniqueBytes(key) @@ -210,6 +239,17 @@ func partPrefix(unique bool, tree part.Ops[object], key index.Key) (tableIndexIt return newNonUniquePartIterator(iter, true, key), watch } +func partPrefixReverse(unique bool, tree part.Ops[object], key index.Key) (tableIndexIterator, <-chan struct{}) { + if !unique { + key = encodeNonUniqueBytes(key) + } + iter, watch := tree.PrefixReverse(key) + if unique { + return iter, watch + } + return newNonUniquePartReverseIterator(iter, true, key), watch +} + // lowerBound implements tableIndexTxn. func (r *partIndex) lowerBound(ikey index.Key) (tableIndexIterator, <-chan struct{}) { return partLowerBound(r.unique, &r.tree, ikey), r.rootWatch() @@ -259,12 +299,24 @@ func (r *partIndexTxn) all() (tableIndexIterator, <-chan struct{}) { return &snapshot, r.rootWatch() } +// allReverse implements tableIndexTxn. +func (r *partIndexTxn) allReverse() (tableIndexIterator, <-chan struct{}) { + snapshot := r.tx.Clone() + return partReverseAll(&snapshot), r.rootWatch() +} + // list implements tableIndexTxn. func (r *partIndexTxn) list(ikey index.Key) (tableIndexIterator, <-chan struct{}) { snapshot := r.tx.Clone() return partList(r.unique, &snapshot, ikey) } +// listReverse implements tableIndexTxn. +func (r *partIndexTxn) listReverse(ikey index.Key) (tableIndexIterator, <-chan struct{}) { + snapshot := r.tx.Clone() + return partListReverse(r.unique, &snapshot, ikey) +} + // lowerBound implements tableIndexTxn. func (r *partIndexTxn) lowerBound(ikey index.Key) (tableIndexIterator, <-chan struct{}) { snapshot := r.tx.Clone() @@ -339,6 +391,12 @@ func (r *partIndexTxn) prefix(ikey index.Key) (tableIndexIterator, <-chan struct return partPrefix(r.unique, &snapshot, ikey) } +// prefixReverse implements tableIndexTxn. +func (r *partIndexTxn) prefixReverse(ikey index.Key) (tableIndexIterator, <-chan struct{}) { + snapshot := r.tx.Clone() + return partPrefixReverse(r.unique, &snapshot, ikey) +} + func (r *partIndexTxn) objectToKey(obj object) index.Key { return r.objectToKeys(obj).First() } @@ -532,6 +590,44 @@ func (it *nonUniquePartIterator) Next() ([]byte, object, bool) { var _ tableIndexIterator = &nonUniquePartIterator{} +type nonUniquePartReverseIterator struct { + iter part.ReverseIterator[object] + prefixSearch bool + searchKey []byte +} + +func (it *nonUniquePartReverseIterator) All(yield func([]byte, object) bool) { + var visited map[string]struct{} + if it.prefixSearch { + visited = map[string]struct{}{} + } + for key, iobj := range it.iter.All { + nuk := nonUniqueKey(key) + secondaryLen := nuk.secondaryLen() + + switch { + case !it.prefixSearch && secondaryLen != len(it.searchKey): + continue + case it.prefixSearch && secondaryLen < len(it.searchKey): + continue + } + + if it.prefixSearch { + primary := nuk.encodedPrimary() + if _, found := visited[string(primary)]; found { + continue + } + visited[string(primary)] = struct{}{} + } + + if !yield(key, iobj) { + return + } + } +} + +var _ tableIndexIterator = &nonUniquePartReverseIterator{} + // nonUniqueSeq returns a sequence of objects for a non-unique index. // Non-unique indexes work by concatenating the secondary key with the // primary key and then prefix searching for the items: @@ -558,6 +654,14 @@ func newNonUniquePartIterator(iter part.Iterator[object], prefixSearch bool, sea } } +func newNonUniquePartReverseIterator(iter part.ReverseIterator[object], prefixSearch bool, searchKey []byte) tableIndexIterator { + return &nonUniquePartReverseIterator{ + iter: iter, + prefixSearch: prefixSearch, + searchKey: searchKey, + } +} + type nonUniqueLowerBoundPartIterator struct { iter part.Iterator[object] searchKey []byte @@ -636,3 +740,19 @@ func (s *singletonTableIndexIterator) All(yield func([]byte, object) bool) { } var _ tableIndexIterator = &singletonTableIndexIterator{} + +type partReverseIteratorAdapter struct { + iter part.ReverseIterator[object] +} + +func (p *partReverseIteratorAdapter) All(yield func([]byte, object) bool) { + for key, obj := range p.iter.All { + if !yield(key, obj) { + return + } + } +} + +func partReverseAll(tree part.Ops[object]) tableIndexIterator { + return &partReverseIteratorAdapter{iter: tree.ReverseIterator()} +} diff --git a/table.go b/table.go index 2e01cf6..1c69b22 100644 --- a/table.go +++ b/table.go @@ -438,12 +438,23 @@ func (t *genTable[Obj]) Prefix(txn ReadTxn, q Query[Obj]) iter.Seq2[Obj, Revisio return iter } +func (t *genTable[Obj]) PrefixReverse(txn ReadTxn, q Query[Obj]) iter.Seq2[Obj, Revision] { + iter, _ := t.PrefixReverseWatch(txn, q) + return iter +} + func (t *genTable[Obj]) PrefixWatch(txn ReadTxn, q Query[Obj]) (iter.Seq2[Obj, Revision], <-chan struct{}) { indexTxn := txn.mustIndexReadTxn(t, t.indexPos(q.index)) iter, watch := indexTxn.prefix(q.key) return objSeq[Obj](iter), watch } +func (t *genTable[Obj]) PrefixReverseWatch(txn ReadTxn, q Query[Obj]) (iter.Seq2[Obj, Revision], <-chan struct{}) { + indexTxn := txn.mustIndexReadTxn(t, t.indexPos(q.index)) + iter, watch := indexTxn.prefixReverse(q.key) + return objSeq[Obj](iter), watch +} + func (t *genTable[Obj]) All(txn ReadTxn) iter.Seq2[Obj, Revision] { iter, _ := t.AllWatch(txn) return iter @@ -459,6 +470,17 @@ func (t *genTable[Obj]) AllWatch(txn ReadTxn) (iter.Seq2[Obj, Revision], <-chan }, watch } +func (t *genTable[Obj]) AllReverse(txn ReadTxn) iter.Seq2[Obj, Revision] { + iter, _ := t.AllReverseWatch(txn) + return iter +} + +func (t *genTable[Obj]) AllReverseWatch(txn ReadTxn) (iter.Seq2[Obj, Revision], <-chan struct{}) { + indexTxn := txn.mustIndexReadTxn(t, PrimaryIndexPos) + iter, watch := indexTxn.allReverse() + return objSeq[Obj](iter), watch +} + func (t *genTable[Obj]) List(txn ReadTxn, q Query[Obj]) iter.Seq2[Obj, Revision] { iter, _ := t.ListWatch(txn, q) return iter @@ -470,6 +492,17 @@ func (t *genTable[Obj]) ListWatch(txn ReadTxn, q Query[Obj]) (iter.Seq2[Obj, Rev return objSeq[Obj](iter), watch } +func (t *genTable[Obj]) ListReverse(txn ReadTxn, q Query[Obj]) iter.Seq2[Obj, Revision] { + iter, _ := t.ListReverseWatch(txn, q) + return iter +} + +func (t *genTable[Obj]) ListReverseWatch(txn ReadTxn, q Query[Obj]) (iter.Seq2[Obj, Revision], <-chan struct{}) { + indexTxn := txn.mustIndexReadTxn(t, t.indexPos(q.index)) + iter, watch := indexTxn.listReverse(q.key) + return objSeq[Obj](iter), watch +} + func (t *genTable[Obj]) Insert(txn WriteTxn, obj Obj) (oldObj Obj, hadOld bool, err error) { oldObj, hadOld, _, err = t.InsertWatch(txn, obj) return diff --git a/types.go b/types.go index 15e3ac6..6ab423d 100644 --- a/types.go +++ b/types.go @@ -35,6 +35,13 @@ type Table[Obj any] interface { // channel that is closed when the table changes. AllWatch(ReadTxn) (iter.Seq2[Obj, Revision], <-chan struct{}) + // AllReverse returns a sequence of all objects in the table in reverse order. + AllReverse(ReadTxn) iter.Seq2[Obj, Revision] + + // AllReverseWatch returns a sequence of all objects in the table in reverse + // order and a watch channel that is closed when the table changes. + AllReverseWatch(ReadTxn) (iter.Seq2[Obj, Revision], <-chan struct{}) + // List returns sequence of objects matching the given query. List(ReadTxn, Query[Obj]) iter.Seq2[Obj, Revision] @@ -43,6 +50,14 @@ type Table[Obj any] interface { // invalidated by a write to the table. ListWatch(ReadTxn, Query[Obj]) (iter.Seq2[Obj, Revision], <-chan struct{}) + // ListReverse returns sequence of objects matching the given query in reverse order. + ListReverse(ReadTxn, Query[Obj]) iter.Seq2[Obj, Revision] + + // ListReverseWatch returns an iterator for all objects matching the given query + // in reverse order and a watch channel that is closed if the query results are + // invalidated by a write to the table. + ListReverseWatch(ReadTxn, Query[Obj]) (iter.Seq2[Obj, Revision], <-chan struct{}) + // Get returns the first matching object for the query. Get(ReadTxn, Query[Obj]) (obj Obj, rev Revision, found bool) @@ -63,10 +78,17 @@ type Table[Obj any] interface { // Prefix searches the table by key prefix. Prefix(ReadTxn, Query[Obj]) iter.Seq2[Obj, Revision] + // PrefixReverse searches the table by key prefix in reverse order. + PrefixReverse(ReadTxn, Query[Obj]) iter.Seq2[Obj, Revision] + // PrefixWatch searches the table by key prefix. Returns an iterator and a watch // channel that closes when the query results have become stale. PrefixWatch(ReadTxn, Query[Obj]) (seq iter.Seq2[Obj, Revision], watch <-chan struct{}) + // PrefixReverseWatch searches the table by key prefix in reverse order. Returns an iterator + // and a watch channel that closes when the query results have become stale. + PrefixReverseWatch(ReadTxn, Query[Obj]) (seq iter.Seq2[Obj, Revision], watch <-chan struct{}) + // Changes returns an iterator for changes happening to the table. // This uses the revision index to iterate over the objects in the order // they have changed. Deleted objects are placed onto a temporary index @@ -390,10 +412,13 @@ type tableIndexReader interface { len() int get(key index.Key) (object, <-chan struct{}, bool) prefix(key index.Key) (tableIndexIterator, <-chan struct{}) + prefixReverse(key index.Key) (tableIndexIterator, <-chan struct{}) lowerBound(key index.Key) (tableIndexIterator, <-chan struct{}) lowerBoundNext(key index.Key) (func() ([]byte, object, bool), <-chan struct{}) list(key index.Key) (tableIndexIterator, <-chan struct{}) + listReverse(key index.Key) (tableIndexIterator, <-chan struct{}) all() (tableIndexIterator, <-chan struct{}) + allReverse() (tableIndexIterator, <-chan struct{}) rootWatch() <-chan struct{} objectToKey(obj object) index.Key }