@@ -105,13 +105,12 @@ func (r *TraceReader) getTraces(traceIDs []model.TraceID) ([]*model.Trace, error
105
105
106
106
err := r .store .View (func (txn * badger.Txn ) error {
107
107
opts := badger .DefaultIteratorOptions
108
- opts .PrefetchSize = 10 // TraceIDs are not sorted, pointless to prefetch large amount of values
109
108
it := txn .NewIterator (opts )
110
109
defer it .Close ()
111
110
112
111
val := []byte {}
113
112
for _ , prefix := range prefixes {
114
- spans := make ([]* model.Span , 0 , 4 ) // reduce reallocation requirements by defining some initial length
113
+ spans := make ([]* model.Span , 0 , 32 ) // reduce reallocation requirements by defining some initial length
115
114
116
115
for it .Seek (prefix ); it .ValidForPrefix (prefix ); it .Next () {
117
116
// Add value to the span store (decode from JSON / defined encoding first)
@@ -346,53 +345,65 @@ func (r *TraceReader) durationQueries(query *spanstore.TraceQueryParameters, ids
346
345
return ids
347
346
}
348
347
348
+ func mergeJoinIds (left , right [][]byte ) [][]byte {
349
+ // len(left) or len(right) is the maximum, whichever is the smallest
350
+ allocateSize := len (left )
351
+ if len (right ) < allocateSize {
352
+ allocateSize = len (right )
353
+ }
354
+
355
+ merged := make ([][]byte , 0 , allocateSize )
356
+
357
+ lMax := len (left ) - 1
358
+ rMax := len (right ) - 1
359
+ for r , l := 0 , 0 ; r <= rMax && l <= lMax ; {
360
+ switch bytes .Compare (left [l ], right [r ]) {
361
+ case 0 :
362
+ // Left matches right - merge
363
+ merged = append (merged , left [l ])
364
+ // Advance both
365
+ l ++
366
+ r ++
367
+ case 1 :
368
+ // left > right, increase right one
369
+ r ++
370
+ case - 1 :
371
+ // left < right, increase left one
372
+ l ++
373
+ }
374
+ }
375
+ return merged
376
+ }
377
+
349
378
// sortMergeIds does a sort-merge join operation to the list of TraceIDs to remove duplicates
350
379
func sortMergeIds (query * spanstore.TraceQueryParameters , ids [][][]byte ) []model.TraceID {
351
380
// Key only scan is a lot faster in the badger - use sort-merge join algorithm instead of hash join since we have the keys in sorted order already
352
- intersected := ids [ 0 ]
353
- mergeIntersected := make ( [][]byte , 0 , len ( intersected )) // intersected is the maximum size
381
+
382
+ var merged [][]byte
354
383
355
384
if len (ids ) > 1 {
356
- for i := 1 ; i < len (ids ); i ++ {
357
- mergeIntersected = make ([][]byte , 0 , len (intersected )) // intersected is the maximum size
358
- k := len (intersected ) - 1
359
- for j := len (ids [i ]) - 1 ; j >= 0 && k >= 0 ; {
360
- // The result will be 0 if a==b, -1 if a < b, and +1 if a > b.
361
- switch bytes .Compare (intersected [k ], ids [i ][j ]) {
362
- case 1 :
363
- k -- // Move on to the next item in the intersected list
364
- // a > b
365
- case - 1 :
366
- j --
367
- // a < b
368
- // Move on to next iteration of j
369
- case 0 :
370
- mergeIntersected = append (mergeIntersected , intersected [k ])
371
- k -- // Move on to next item
372
- // Match
373
- }
374
- }
375
- intersected = mergeIntersected
385
+ merged = mergeJoinIds (ids [0 ], ids [1 ])
386
+ for i := 2 ; i < len (ids ); i ++ {
387
+ merged = mergeJoinIds (merged , ids [i ])
376
388
}
377
-
378
389
} else {
379
- // mergeIntersected should be reversed intersected
380
- for i , j := 0 , len (intersected )- 1 ; j >= 0 ; i , j = i + 1 , j - 1 {
381
- mergeIntersected = append (mergeIntersected , intersected [j ])
382
- }
383
- intersected = mergeIntersected
390
+ merged = ids [0 ]
391
+ }
392
+
393
+ // Get top query.NumTraces results (order in DESC)
394
+ if query .NumTraces < len (merged ) {
395
+ merged = merged [len (merged )- query .NumTraces :]
384
396
}
385
397
386
- // Get top query.NumTraces results (note, the slice is now in descending timestamp order)
387
- if query . NumTraces < len (intersected ) {
388
- intersected = intersected [: query . NumTraces ]
398
+ // Results are in ASC (badger's default order), but Jaeger uses DESC, thus we need to reverse the array
399
+ for left , right := 0 , len (merged ) - 1 ; left < right ; left , right = left + 1 , right - 1 {
400
+ merged [ left ], merged [ right ] = merged [ right ], merged [ left ]
389
401
}
390
402
391
- // Enrich the traceIds to model.Trace
392
- // result := make([]*model.Trace, 0, len(intersected))
393
- keys := make ([]model.TraceID , 0 , len (intersected ))
403
+ // Create the structs from [][]byte to TraceID
404
+ keys := make ([]model.TraceID , 0 , len (merged ))
394
405
395
- for _ , key := range intersected {
406
+ for _ , key := range merged {
396
407
keys = append (keys , model.TraceID {
397
408
High : binary .BigEndian .Uint64 (key [:8 ]),
398
409
Low : binary .BigEndian .Uint64 (key [8 :]),
0 commit comments