From f0ad170003ab8a556540e531863a0c55c561530f Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Wed, 17 May 2023 16:21:44 -0400 Subject: [PATCH 01/18] supported Signed-off-by: Joe Elliott --- pkg/traceql/ast.go | 8 -------- pkg/traceql/ast_execute.go | 9 +++++++++ pkg/traceql/ast_validate.go | 12 +++++------- pkg/traceql/test_examples.yaml | 26 +++++++++++++------------- 4 files changed, 27 insertions(+), 28 deletions(-) diff --git a/pkg/traceql/ast.go b/pkg/traceql/ast.go index af392fca94a..68b50500d4b 100644 --- a/pkg/traceql/ast.go +++ b/pkg/traceql/ast.go @@ -116,10 +116,6 @@ func (o GroupOperation) extractConditions(request *FetchSpansRequest) { o.Expression.extractConditions(request) } -func (GroupOperation) evaluate(ss []*Spanset) ([]*Spanset, error) { - return ss, nil -} - type CoalesceOperation struct { } @@ -130,10 +126,6 @@ func newCoalesceOperation() CoalesceOperation { func (o CoalesceOperation) extractConditions(request *FetchSpansRequest) { } -func (CoalesceOperation) evaluate(ss []*Spanset) ([]*Spanset, error) { - return ss, nil -} - // ********************** // Scalars // ********************** diff --git a/pkg/traceql/ast_execute.go b/pkg/traceql/ast_execute.go index a487d869d77..9438916fa2a 100644 --- a/pkg/traceql/ast_execute.go +++ b/pkg/traceql/ast_execute.go @@ -8,6 +8,15 @@ import ( "strings" ) +// jpe - do these and test +func (GroupOperation) evaluate(ss []*Spanset) ([]*Spanset, error) { + return ss, nil +} + +func (CoalesceOperation) evaluate(ss []*Spanset) ([]*Spanset, error) { + return ss, nil +} + func (o SpansetOperation) evaluate(input []*Spanset) (output []*Spanset, err error) { for i := range input { diff --git a/pkg/traceql/ast_validate.go b/pkg/traceql/ast_validate.go index 30dff1c4880..35947809f3d 100644 --- a/pkg/traceql/ast_validate.go +++ b/pkg/traceql/ast_validate.go @@ -30,18 +30,16 @@ func (p Pipeline) validate() error { } func (o GroupOperation) validate() error { - return newUnsupportedError("by()") - // todo: once grouping is supported the below validation will apply - // if !o.Expression.referencesSpan() { - // return fmt.Errorf("grouping field expressions must reference the span: %s", o.String()) - // } + if !o.Expression.referencesSpan() { + return fmt.Errorf("grouping field expressions must reference the span: %s", o.String()) + } - // return o.Expression.validate() + return o.Expression.validate() } func (o CoalesceOperation) validate() error { - return newUnsupportedError("coalesce()") + return nil } func (o ScalarOperation) validate() error { diff --git a/pkg/traceql/test_examples.yaml b/pkg/traceql/test_examples.yaml index cf428e70435..58ccf139f35 100644 --- a/pkg/traceql/test_examples.yaml +++ b/pkg/traceql/test_examples.yaml @@ -84,6 +84,16 @@ valid: # pipeline expressions - '({ true } | count() > 1 | { false }) && ({ true } | count() > 1 | { false })' - '({ true } | count() > 1 | { false }) || ({ true } | count() > 1 | { false })' + # coalesce - will be valid when supported + - '{ true } | coalesce()' + - '{ true } | by(1 + .a) | coalesce()' + # by - will be valid when supported + - '{ true } | by(.a)' + - '{ true } | by(1 + .a)' + - 'by(.a) | { true }' + - '{ true } | by(name) | count() > 2' + - '{ true } | by(.field) | avg(.b) = 2' + - '{ true } | by(3 * .field - 2) | max(duration) < 1s' # parse_fails throw an error when parsing parse_fails: @@ -189,21 +199,12 @@ validate_fails: - 'min(1) = max(2) + 3' - 'min(1.1 - 3) > 1' - 'max(1h + 2h) > 1' - -# unsupported parse correctly and return an unsupported error when calling .validate() -unsupported: - # coalesce - will be valid when supported - - '{ true } | coalesce()' - - '{ true } | by(1 + .a) | coalesce()' - # by - will be valid when supported - - '{ true } | by(.a)' - - '{ true } | by(1 + .a)' - - 'by(.a) | { true }' - - '{ true } | by(name) | count() > 2' - - '{ true } | by(.field) | avg(.b) = 2' # by - will *not* be valid when supported - group expressions must reference the span - '{ true } | by(1)' - '{ true } | by("foo")' + +# unsupported parse correctly and return an unsupported error when calling .validate() +unsupported: # complex scalar filters - will be valid when supported - 'min(.field) < max(duration)' - 'sum(.field) = min(.field)' @@ -214,7 +215,6 @@ unsupported: # aggregates - will be valid when supported - 'min(childCount) < 2' - '{ true } | max(parent.a) = 1' - - '{ true } | by(3 * .field - 2) | max(duration) < 1s' - '{ .http.status = 200 } | max(.field) - min(.field) > 3' # parent - will be valid when supported - '{ parent.a != 3 }' From b20a3abd63e4f108b2252b7eccb89717988506d8 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Thu, 18 May 2023 08:27:12 -0400 Subject: [PATCH 02/18] group Signed-off-by: Joe Elliott --- pkg/traceql/ast_execute.go | 46 ++++++++++++++++++++- pkg/traceql/ast_execute_test.go | 71 +++++++++++++++++++++++++++++++++ 2 files changed, 115 insertions(+), 2 deletions(-) diff --git a/pkg/traceql/ast_execute.go b/pkg/traceql/ast_execute.go index 9438916fa2a..21162040a94 100644 --- a/pkg/traceql/ast_execute.go +++ b/pkg/traceql/ast_execute.go @@ -9,10 +9,52 @@ import ( ) // jpe - do these and test -func (GroupOperation) evaluate(ss []*Spanset) ([]*Spanset, error) { - return ss, nil +func (g GroupOperation) evaluate(ss []*Spanset) ([]*Spanset, error) { + result := make([]*Spanset, 0, len(ss)) + groups := make(map[Static]*Spanset) // todo: don't recreate this map for every eval (jpe) + + // Iterate over each spanset in the input slice + for _, spanset := range ss { + // clear out the groups + for k := range groups { + delete(groups, k) + } + + // Iterate over each span in the spanset + for _, span := range spanset.Spans { + // Execute the FieldExpression for the span + result, err := g.Expression.execute(span) + if err != nil { + return nil, err + } + + // Check if the result already has a group in the map + group, ok := groups[result] + if !ok { + // If not, create a new group and add it to the map + group = &Spanset{} + // copy all existing attributes forward - jpe - does avg() clobber existing attributes? + for k, att := range spanset.Attributes { + group.AddAttribute(k, att) + } + group.AddAttribute(g.String(), result) + groups[result] = group + } + + // Add the current spanset to the group + group.Spans = append(group.Spans, span) + } + + // add all groups created by this spanset to the result + for _, group := range groups { + result = append(result, group) + } + } + + return result, nil } +// jpe me next func (CoalesceOperation) evaluate(ss []*Spanset) ([]*Spanset, error) { return ss, nil } diff --git a/pkg/traceql/ast_execute_test.go b/pkg/traceql/ast_execute_test.go index 96db2ac3eb9..a1531f64f22 100644 --- a/pkg/traceql/ast_execute_test.go +++ b/pkg/traceql/ast_execute_test.go @@ -1,8 +1,10 @@ package traceql import ( + "bytes" "fmt" "reflect" + "sort" "testing" "time" @@ -32,6 +34,16 @@ func testEvaluator(t *testing.T, tc evalTC) { actual, err := ast.Pipeline.evaluate(tc.input) require.NoError(t, err) + // sort expected/actual spansets. grouping requires this b/c map iteration makes the output + // non-deterministic. + makeSort := func(ss []*Spanset) func(i, j int) bool { + return func(i, j int) bool { + return bytes.Compare(ss[i].Spans[0].ID(), ss[j].Spans[0].ID()) < 0 + } + } + sort.Slice(actual, makeSort(actual)) + sort.Slice(tc.output, makeSort(tc.output)) + // reflect.DeepEqual() used b/c it correctly compares maps if eq := reflect.DeepEqual(actual, tc.output); !eq { require.Equal(t, tc.output, actual) // this is will nicely print diffs but some diffs may be red herrings due to map iteration. @@ -172,6 +184,65 @@ func TestSpansetFilter_matches(t *testing.T) { } } +func TestGroup(t *testing.T) { + testCases := []evalTC{ + { + "{ } | by(.foo)", + []*Spanset{ + {Spans: []Span{ + &mockSpan{id: []byte{1}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("a")}}, + &mockSpan{id: []byte{2}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b")}}, + &mockSpan{id: []byte{3}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b")}}, + }}, + }, + []*Spanset{ + {Spans: []Span{ + &mockSpan{id: []byte{1}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("a")}}, + }, + Attributes: map[string]Static{"by(.foo)": NewStaticString("a")}, + }, + {Spans: []Span{ + &mockSpan{id: []byte{2}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b")}}, + &mockSpan{id: []byte{3}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b")}}, + }, + Attributes: map[string]Static{"by(.foo)": NewStaticString("b")}, + }, + }, + }, + { + "{ } | by(.foo) | by(.bar)", + []*Spanset{ + {Spans: []Span{ + &mockSpan{id: []byte{1}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("a"), NewAttribute("bar"): NewStaticString("1")}}, + &mockSpan{id: []byte{2}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b"), NewAttribute("bar"): NewStaticString("1")}}, + &mockSpan{id: []byte{3}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b"), NewAttribute("bar"): NewStaticString("2")}}, + }}, + }, + []*Spanset{ + {Spans: []Span{ + &mockSpan{id: []byte{1}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("a"), NewAttribute("bar"): NewStaticString("1")}}, + }, + Attributes: map[string]Static{"by(.foo)": NewStaticString("a"), "by(.bar)": NewStaticString("1")}, + }, + {Spans: []Span{ + &mockSpan{id: []byte{2}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b"), NewAttribute("bar"): NewStaticString("1")}}, + }, + Attributes: map[string]Static{"by(.foo)": NewStaticString("b"), "by(.bar)": NewStaticString("1")}, + }, + {Spans: []Span{ + &mockSpan{id: []byte{3}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b"), NewAttribute("bar"): NewStaticString("2")}}, + }, + Attributes: map[string]Static{"by(.foo)": NewStaticString("b"), "by(.bar)": NewStaticString("2")}, + }, + }, + }, + } + + for _, tc := range testCases { + testEvaluator(t, tc) + } +} + func TestSpansetOperationEvaluate(t *testing.T) { testCases := []evalTC{ { From 2932b38cff771fb1c5da62eb784956266104f324 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Thu, 18 May 2023 08:32:26 -0400 Subject: [PATCH 03/18] coalesce Signed-off-by: Joe Elliott --- pkg/traceql/ast_execute.go | 18 +++++++++++++++--- pkg/traceql/ast_execute_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 3 deletions(-) diff --git a/pkg/traceql/ast_execute.go b/pkg/traceql/ast_execute.go index 21162040a94..825e8a80a24 100644 --- a/pkg/traceql/ast_execute.go +++ b/pkg/traceql/ast_execute.go @@ -8,7 +8,6 @@ import ( "strings" ) -// jpe - do these and test func (g GroupOperation) evaluate(ss []*Spanset) ([]*Spanset, error) { result := make([]*Spanset, 0, len(ss)) groups := make(map[Static]*Spanset) // todo: don't recreate this map for every eval (jpe) @@ -54,9 +53,22 @@ func (g GroupOperation) evaluate(ss []*Spanset) ([]*Spanset, error) { return result, nil } -// jpe me next +// CoalesceOperation undoes grouping. It takes spansets and recombines them into +// one by trace id. Since all spansets are guaranteed to be from the same traceid +// due to the structure of the engine we can cheat and just recombine all spansets +// in ss into one without checking. func (CoalesceOperation) evaluate(ss []*Spanset) ([]*Spanset, error) { - return ss, nil + l := 0 + for _, spanset := range ss { + l += len(spanset.Spans) + } + result := &Spanset{ + Spans: make([]Span, 0, l), + } + for _, spanset := range ss { + result.Spans = append(result.Spans, spanset.Spans...) + } + return []*Spanset{result}, nil } func (o SpansetOperation) evaluate(input []*Spanset) (output []*Spanset, err error) { diff --git a/pkg/traceql/ast_execute_test.go b/pkg/traceql/ast_execute_test.go index a1531f64f22..7e74295fdaa 100644 --- a/pkg/traceql/ast_execute_test.go +++ b/pkg/traceql/ast_execute_test.go @@ -243,6 +243,34 @@ func TestGroup(t *testing.T) { } } +func TestCoalesce(t *testing.T) { + testCases := []evalTC{ + { + "{ } | coalesce()", + []*Spanset{ + {Spans: []Span{ + &mockSpan{id: []byte{1}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("a")}}, + }}, + {Spans: []Span{ + &mockSpan{id: []byte{2}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b")}}, + &mockSpan{id: []byte{3}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b")}}, + }}, + }, + []*Spanset{ + {Spans: []Span{ + &mockSpan{id: []byte{1}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("a")}}, + &mockSpan{id: []byte{2}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b")}}, + &mockSpan{id: []byte{3}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b")}}, + }}, + }, + }, + } + + for _, tc := range testCases { + testEvaluator(t, tc) + } +} + func TestSpansetOperationEvaluate(t *testing.T) { testCases := []evalTC{ { From 169401c4722d98d2ca714b2a7005029a92fb0d1f Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Thu, 18 May 2023 08:36:08 -0400 Subject: [PATCH 04/18] reuse buffer Signed-off-by: Joe Elliott --- pkg/traceql/ast.go | 5 ++++- pkg/traceql/ast_execute.go | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/pkg/traceql/ast.go b/pkg/traceql/ast.go index 68b50500d4b..88a11e7050f 100644 --- a/pkg/traceql/ast.go +++ b/pkg/traceql/ast.go @@ -104,11 +104,14 @@ func (p Pipeline) evaluate(input []*Spanset) (result []*Spanset, err error) { type GroupOperation struct { Expression FieldExpression + + groupBuffer map[Static]*Spanset } func newGroupOperation(e FieldExpression) GroupOperation { return GroupOperation{ - Expression: e, + Expression: e, + groupBuffer: make(map[Static]*Spanset), } } diff --git a/pkg/traceql/ast_execute.go b/pkg/traceql/ast_execute.go index 825e8a80a24..0afd098c0a2 100644 --- a/pkg/traceql/ast_execute.go +++ b/pkg/traceql/ast_execute.go @@ -10,7 +10,7 @@ import ( func (g GroupOperation) evaluate(ss []*Spanset) ([]*Spanset, error) { result := make([]*Spanset, 0, len(ss)) - groups := make(map[Static]*Spanset) // todo: don't recreate this map for every eval (jpe) + groups := g.groupBuffer // Iterate over each spanset in the input slice for _, spanset := range ss { @@ -32,7 +32,7 @@ func (g GroupOperation) evaluate(ss []*Spanset) ([]*Spanset, error) { if !ok { // If not, create a new group and add it to the map group = &Spanset{} - // copy all existing attributes forward - jpe - does avg() clobber existing attributes? + // copy all existing attributes forward for k, att := range spanset.Attributes { group.AddAttribute(k, att) } From c86c53d4785f14501c7795b8f9699e0c0cf37d83 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Thu, 18 May 2023 09:36:26 -0400 Subject: [PATCH 05/18] docs Signed-off-by: Joe Elliott --- docs/sources/tempo/traceql/_index.md | 9 +++++++++ docs/sources/tempo/traceql/architecture.md | 3 +-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/docs/sources/tempo/traceql/_index.md b/docs/sources/tempo/traceql/_index.md index 19e6c5df6de..19153fad98b 100644 --- a/docs/sources/tempo/traceql/_index.md +++ b/docs/sources/tempo/traceql/_index.md @@ -216,6 +216,15 @@ For example, find traces that have more than 3 spans with an attribute `http.sta { span.http.status_code = 200 } | count() > 3 ``` +## Grouping + +TraceQL supports a grouping pipeline operator that can be used to group by arbitrary attributes. This can be useful to +find someting like a single service with more than 1 error: + +``` +{ error = true } | by(resource.service.name) | count() > 1 +``` + ## Arithmetic TraceQL supports arbitrary arithmetic in your queries. This can be useful to make queries more human readable: diff --git a/docs/sources/tempo/traceql/architecture.md b/docs/sources/tempo/traceql/architecture.md index 169761dbdb0..743a71be42d 100644 --- a/docs/sources/tempo/traceql/architecture.md +++ b/docs/sources/tempo/traceql/architecture.md @@ -35,8 +35,7 @@ For more information about TraceQL’s design, refer to the [TraceQL Concepts de ### Future work -- Additional aggregates, such as `max()`, `min()`, and others. -- Grouping +- Increase OTEL support: Events, Lists, ILS Scope, etc. - Structural Queries - Metrics - Pipeline comparisons From 444a62c605f69421925ff620611e0b2c1e0bab5e Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Thu, 18 May 2023 13:29:08 -0400 Subject: [PATCH 06/18] rebatch iterators Signed-off-by: Joe Elliott --- tempodb/encoding/vparquet/block_traceql.go | 192 ++++++++++++++---- .../vparquet/block_traceql_meta_test.go | 2 + tempodb/encoding/vparquet2/block_traceql.go | 36 +++- 3 files changed, 193 insertions(+), 37 deletions(-) diff --git a/tempodb/encoding/vparquet/block_traceql.go b/tempodb/encoding/vparquet/block_traceql.go index 91e56212d16..f1d78645315 100644 --- a/tempodb/encoding/vparquet/block_traceql.go +++ b/tempodb/encoding/vparquet/block_traceql.go @@ -28,7 +28,11 @@ type span struct { id []byte startTimeUnixNanos uint64 endtimeUnixNanos uint64 - rowNum parquetquery.RowNumber + + // metadata used to track the span in the parquet file + rowNum parquetquery.RowNumber + cbSpansetFinal bool + cbSpanset *traceql.Spanset } func (s *span) Attributes() map[traceql.Attribute]traceql.Static { @@ -85,6 +89,8 @@ func putSpan(s *span) { s.endtimeUnixNanos = 0 s.startTimeUnixNanos = 0 s.rowNum = parquetquery.EmptyRowNumber() + s.cbSpansetFinal = false + s.cbSpanset = nil // clear attributes for k := range s.attributes { @@ -268,7 +274,7 @@ type bridgeIterator struct { iter parquetquery.Iterator cb traceql.SecondPassFn - currentSpans []*span + nextSpans []*span } func newBridgeIterator(iter parquetquery.Iterator, cb traceql.SecondPassFn) *bridgeIterator { @@ -284,9 +290,9 @@ func (i *bridgeIterator) String() string { func (i *bridgeIterator) Next() (*pq.IteratorResult, error) { // drain current buffer - if len(i.currentSpans) > 0 { - ret := i.currentSpans[0] - i.currentSpans = i.currentSpans[1:] + if len(i.nextSpans) > 0 { + ret := i.nextSpans[0] + i.nextSpans = i.nextSpans[1:] return spanToIteratorResult(ret), nil } @@ -302,51 +308,51 @@ func (i *bridgeIterator) Next() (*pq.IteratorResult, error) { // The spanset is in the OtherEntries iface := res.OtherValueFromKey(otherEntrySpansetKey) if iface == nil { - return nil, fmt.Errorf("engine assumption broken: spanset not found in other entries") + return nil, fmt.Errorf("engine assumption broken: spanset not found in other entries in bridge") } spanset, ok := iface.(*traceql.Spanset) if !ok { - return nil, fmt.Errorf("engine assumption broken: spanset is not of type *traceql.Spanset") + return nil, fmt.Errorf("engine assumption broken: spanset is not of type *traceql.Spanset in bridge") } - var filteredSpansets []*traceql.Spanset - if i.cb != nil { - filteredSpansets, err = i.cb(spanset) - if err == io.EOF { - return nil, nil - } - if err != nil { - return nil, err - } - // if the filter removed all spansets then let's release all back to the pool - // no reason to try anything more nuanced than this. it will handle nearly all cases - if len(filteredSpansets) == 0 { - for _, s := range spanset.Spans { - putSpan(s.(*span)) - } + filteredSpansets, err := i.cb(spanset) + if err == io.EOF { + return nil, nil + } + if err != nil { + return nil, err + } + // if the filter removed all spansets then let's release all back to the pool + // no reason to try anything more nuanced than this. it will handle nearly all cases + if len(filteredSpansets) == 0 { + for _, s := range spanset.Spans { + putSpan(s.(*span)) } - } else { - filteredSpansets = []*traceql.Spanset{spanset} } // flatten spans into i.currentSpans for _, ss := range filteredSpansets { - for _, s := range ss.Spans { + for idx, s := range ss.Spans { span := s.(*span) - i.currentSpans = append(i.currentSpans, span) + + // use otherEntryCallbackSpansetKey to indicate to the rebatchIterator that either + // 1) this is the last span in the spanset, or 2) there are more spans in the spanset + span.cbSpansetFinal = idx == len(ss.Spans)-1 + span.cbSpanset = ss + i.nextSpans = append(i.nextSpans, span) } } // spans returned from the filter are not guaranteed to be in file order // we need them to be so that the meta iterators work correctly. sort here - sort.Slice(i.currentSpans, func(j, k int) bool { - return parquetquery.CompareRowNumbers(DefinitionLevelResourceSpans, i.currentSpans[j].rowNum, i.currentSpans[k].rowNum) == -1 + sort.Slice(i.nextSpans, func(j, k int) bool { + return parquetquery.CompareRowNumbers(DefinitionLevelResourceSpans, i.nextSpans[j].rowNum, i.nextSpans[k].rowNum) == -1 }) // found something! - if len(i.currentSpans) > 0 { - ret := i.currentSpans[0] - i.currentSpans = i.currentSpans[1:] + if len(i.nextSpans) > 0 { + ret := i.nextSpans[0] + i.nextSpans = i.nextSpans[1:] return spanToIteratorResult(ret), nil } } @@ -373,6 +379,122 @@ func (i *bridgeIterator) Close() { i.iter.Close() } +// confirm rebatchIterator implements pq.Iterator +var _ pq.Iterator = (*rebatchIterator)(nil) + +// rebatchIterator either passes spansets through directly OR rebatches them based on metadata +// in OtherEntries jpe - test +type rebatchIterator struct { + iter parquetquery.Iterator + + nextSpans []*span +} + +func newRebatchIterator(iter parquetquery.Iterator) *rebatchIterator { + return &rebatchIterator{ + iter: iter, + } +} + +func (i *rebatchIterator) String() string { + return fmt.Sprintf("rebatchIterator: \n\t%s", util.TabOut(i.iter)) +} + +// Next has to handle two different style results. First is an initial set of spans +// that does not have a callback spanset. These can be passed directly through. +// Second is a set of spans that have spansets imposed by the callback (i.e. for grouping) +// these must be regrouped into the callback spansets +func (i *rebatchIterator) Next() (*pq.IteratorResult, error) { + for { + // see if we have a queue + res := i.resultFromNextSpans() + if res != nil { + return res, nil + } + + // check the iterator for anything + res, err := i.iter.Next() + if err != nil { + return nil, err + } + if res == nil { + return nil, nil + } + + // get the spanset and see if we should pass it through or buffer for rebatching + iface := res.OtherValueFromKey(otherEntrySpansetKey) + if iface == nil { + return nil, fmt.Errorf("engine assumption broken: spanset not found in other entries in rebatch") + } + ss, ok := iface.(*traceql.Spanset) + if !ok { + return nil, fmt.Errorf("engine assumption broken: spanset is not of type *traceql.Spanset in rebatch") + } + + // if this has no call back spanset just pass it on + if len(ss.Spans) > 0 && ss.Spans[0].(*span).cbSpanset == nil { + return res, nil + } + + // dump all spans into our buffer + for _, s := range ss.Spans { + sp := s.(*span) + if !sp.cbSpansetFinal { + continue + } + + // copy trace level data from the current iteration spanset into the rebatch spanset. only do this if + // we don't have current data + if sp.cbSpanset.DurationNanos == 0 { + sp.cbSpanset.DurationNanos = ss.DurationNanos + } + if len(sp.cbSpanset.TraceID) == 0 { + sp.cbSpanset.TraceID = ss.TraceID + } + if len(sp.cbSpanset.RootSpanName) == 0 { + sp.cbSpanset.RootSpanName = ss.RootSpanName + } + if len(sp.cbSpanset.RootServiceName) == 0 { + sp.cbSpanset.RootServiceName = ss.RootServiceName + } + if sp.cbSpanset.StartTimeUnixNanos == 0 { + sp.cbSpanset.StartTimeUnixNanos = ss.StartTimeUnixNanos + } + + i.nextSpans = append(i.nextSpans, sp) + } + + res = i.resultFromNextSpans() + if res != nil { + return res, nil + } + // if we don't find anything in that spanset, start over + } +} + +func (i *rebatchIterator) resultFromNextSpans() *pq.IteratorResult { + for len(i.nextSpans) > 0 { + ret := i.nextSpans[0] + i.nextSpans = i.nextSpans[1:] + + if ret.cbSpansetFinal && ret.cbSpanset != nil { + res := &pq.IteratorResult{} + res.AppendOtherValue(otherEntrySpansetKey, ret.cbSpanset) + return res + } + } + + return nil +} + +func (i *rebatchIterator) SeekTo(to pq.RowNumber, definitionLevel int) (*pq.IteratorResult, error) { + return i.iter.SeekTo(to, definitionLevel) +} + +func (i *rebatchIterator) Close() { + i.iter.Close() +} + // spansetIterator turns the parquet iterator into the final // traceql iterator. Every row it receives is one spanset. type spansetIterator struct { @@ -399,11 +521,11 @@ func (i *spansetIterator) Next(ctx context.Context) (*traceql.Spanset, error) { // The spanset is in the OtherEntries iface := res.OtherValueFromKey(otherEntrySpansetKey) if iface == nil { - return nil, fmt.Errorf("engine assumption broken: spanset not found in other entries") + return nil, fmt.Errorf("engine assumption broken: spanset not found in other entries in spansetIterator") } ss, ok := iface.(*traceql.Spanset) if !ok { - return nil, fmt.Errorf("engine assumption broken: spanset is not of type *traceql.Spanset") + return nil, fmt.Errorf("engine assumption broken: spanset is not of type *traceql.Spanset in spansetIterator") } return ss, nil @@ -521,7 +643,7 @@ func fetch(ctx context.Context, req traceql.FetchSpansRequest, pf *parquet.File, } if req.SecondPass != nil { - iter = newBridgeIterator(iter, req.SecondPass) + iter = newBridgeIterator(newRebatchIterator(iter), req.SecondPass) iter, err = createAllIterator(ctx, iter, req.SecondPassConditions, false, 0, 0, pf, opts) if err != nil { @@ -529,7 +651,7 @@ func fetch(ctx context.Context, req traceql.FetchSpansRequest, pf *parquet.File, } } - return newSpansetIterator(iter), nil + return newSpansetIterator(newRebatchIterator(iter)), nil } func createAllIterator(ctx context.Context, primaryIter parquetquery.Iterator, conds []traceql.Condition, allConditions bool, start uint64, end uint64, pf *parquet.File, opts common.SearchOptions) (parquetquery.Iterator, error) { diff --git a/tempodb/encoding/vparquet/block_traceql_meta_test.go b/tempodb/encoding/vparquet/block_traceql_meta_test.go index e27f977978a..8bbc2a2ab6d 100644 --- a/tempodb/encoding/vparquet/block_traceql_meta_test.go +++ b/tempodb/encoding/vparquet/block_traceql_meta_test.go @@ -292,6 +292,8 @@ func TestBackendBlockSearchFetchMetaData(t *testing.T) { // fetch layer. just wipe them out here for _, s := range ss { for _, sp := range s.Spans { + sp.(*span).cbSpanset = nil + sp.(*span).cbSpansetFinal = false sp.(*span).rowNum = parquetquery.RowNumber{} } } diff --git a/tempodb/encoding/vparquet2/block_traceql.go b/tempodb/encoding/vparquet2/block_traceql.go index b07ab1d4b5e..bb0883c7d92 100644 --- a/tempodb/encoding/vparquet2/block_traceql.go +++ b/tempodb/encoding/vparquet2/block_traceql.go @@ -369,6 +369,38 @@ func (i *bridgeIterator) Close() { i.iter.Close() } +// confirm rebatchIterator implements pq.Iterator +var _ pq.Iterator = (*rebatchIterator)(nil) + +// rebatchIterator either passes spansets through directly OR rebatches them based on metadata +// in OtherEntries +type rebatchIterator struct { + iter parquetquery.Iterator +} + +func newRebatchIterator(iter parquetquery.Iterator) *rebatchIterator { + return &rebatchIterator{ + iter: iter, + } +} + +func (i *rebatchIterator) String() string { + return fmt.Sprintf("rebatchIterator: \n\t%s", util.TabOut(i.iter)) +} + +func (i *rebatchIterator) Next() (*pq.IteratorResult, error) { + // jpe do something worth doing + return i.iter.Next() +} + +func (i *rebatchIterator) SeekTo(to pq.RowNumber, definitionLevel int) (*pq.IteratorResult, error) { + return i.iter.SeekTo(to, definitionLevel) +} + +func (i *rebatchIterator) Close() { + i.iter.Close() +} + // spansetIterator turns the parquet iterator into the final // traceql iterator. Every row it receives is one spanset. type spansetIterator struct { @@ -517,7 +549,7 @@ func fetch(ctx context.Context, req traceql.FetchSpansRequest, pf *parquet.File, } if req.SecondPass != nil { - iter = newBridgeIterator(iter, req.SecondPass) + iter = newBridgeIterator(newRebatchIterator(iter), req.SecondPass) iter, err = createAllIterator(ctx, iter, req.SecondPassConditions, false, 0, 0, pf, opts) if err != nil { @@ -525,7 +557,7 @@ func fetch(ctx context.Context, req traceql.FetchSpansRequest, pf *parquet.File, } } - return newSpansetIterator(iter), nil + return newSpansetIterator(newRebatchIterator(iter)), nil } func createAllIterator(ctx context.Context, primaryIter parquetquery.Iterator, conds []traceql.Condition, allConditions bool, start uint64, end uint64, pf *parquet.File, opts common.SearchOptions) (parquetquery.Iterator, error) { From e4ea85a229ebb4826aebe44341c63c156a0039a0 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Thu, 18 May 2023 15:05:52 -0400 Subject: [PATCH 07/18] tests pass? Signed-off-by: Joe Elliott --- tempodb/tempodb_search_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tempodb/tempodb_search_test.go b/tempodb/tempodb_search_test.go index f6603de7fe2..89e3df94565 100644 --- a/tempodb/tempodb_search_test.go +++ b/tempodb/tempodb_search_test.go @@ -197,6 +197,9 @@ func testAdvancedTraceQLCompleteBlock(t *testing.T, blockVersion string) { rando(trueConditionsBySpan[0]), rando(trueConditionsBySpan[0]), rando(trueConditionsBySpan[1]), rando(trueConditionsBySpan[1]), durationBySpan[0]+durationBySpan[1])}, + // groupin' (.foo is a known attribute that is the same on both spans) + {Query: "{} | by(span.foo) | count() = 2"}, + {Query: "{} | by(resource.service.name) | count() = 1"}, } searchesThatDontMatch := []*tempopb.SearchRequest{ // conditions @@ -225,6 +228,9 @@ func testAdvancedTraceQLCompleteBlock(t *testing.T, blockVersion string) { {Query: "{ } | min(duration) < 0"}, {Query: "{ } | max(duration) < 0"}, {Query: "{ } | sum(duration) < 0"}, + // groupin' (.foo is a known attribute that is the same on both spans) + {Query: "{} | by(span.foo) | count() = 1"}, + {Query: "{} | by(resource.service.name) | count() = 2"}, } for _, req := range searchesThatMatch { @@ -522,6 +528,9 @@ func searchTestSuite() ( StartTimeUnixNano: uint64(1000 * time.Second), EndTimeUnixNano: uint64(1002 * time.Second), Status: &v1.Status{}, + Attributes: []*v1_common.KeyValue{ + stringKV("foo", "Bar"), + }, }, }, }, From 8f205762332468ee37a1c3a87c3c31c6420d5de9 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Thu, 18 May 2023 15:42:24 -0400 Subject: [PATCH 08/18] tests at tempodb lvl Signed-off-by: Joe Elliott --- tempodb/tempodb_search_test.go | 144 ++++++++++++++++++++++++++++++++- 1 file changed, 142 insertions(+), 2 deletions(-) diff --git a/tempodb/tempodb_search_test.go b/tempodb/tempodb_search_test.go index 89e3df94565..b9e4d3dba04 100644 --- a/tempodb/tempodb_search_test.go +++ b/tempodb/tempodb_search_test.go @@ -258,6 +258,146 @@ func testAdvancedTraceQLCompleteBlock(t *testing.T, blockVersion string) { }) } +// TestGroupTraceQLCompleteBlock is broken out into its own method b/c the returned metadata is distinct +func TestGroupTraceQLCompleteBlock(t *testing.T) { + for _, v := range encoding.AllEncodings() { + vers := v.Version() + t.Run(vers, func(t *testing.T) { + testGroupTraceQLCompleteBlock(t, vers) + }) + } +} + +func testGroupTraceQLCompleteBlock(t *testing.T, blockVersion string) { + e := traceql.NewEngine() + + runCompleteBlockSearchTest(t, blockVersion, func(wantTr *tempopb.Trace, wantMeta *tempopb.TraceSearchMetadata, _, _ []*tempopb.SearchRequest, meta *backend.BlockMeta, r Reader) { + ctx := context.Background() + + type test struct { + req *tempopb.SearchRequest + expected []*tempopb.TraceSearchMetadata + } + + searchesThatMatch := []*test{ + { + req: &tempopb.SearchRequest{Query: "{} | by(span.foo) | count() = 2"}, + expected: []*tempopb.TraceSearchMetadata{ + { + SpanSet: &tempopb.SpanSet{ + Spans: []*tempopb.Span{ + { + SpanID: "0000000000010203", + StartTimeUnixNano: 1000000000000, + DurationNanos: 1000000000, + Name: "", // jpe name? + Attributes: []*v1_common.KeyValue{ + {Key: "foo", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "Bar"}}}, + }, + }, + { + SpanID: "0000000000000000", + StartTimeUnixNano: 1000000000000, + DurationNanos: 2000000000, + Name: "", + Attributes: []*v1_common.KeyValue{ + {Key: "foo", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "Bar"}}}, + }, + }, + }, + Matched: 2, + Attributes: []*v1_common.KeyValue{ + {Key: "by(span.foo)", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "Bar"}}}, + {Key: "count()", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_IntValue{IntValue: 2}}}, + }, + }, + }, + }, + }, + { + req: &tempopb.SearchRequest{Query: "{} | by(resource.service.name) | count() = 1"}, + expected: []*tempopb.TraceSearchMetadata{ + { + SpanSet: &tempopb.SpanSet{ + Spans: []*tempopb.Span{ + { + SpanID: "0000000000010203", + StartTimeUnixNano: 1000000000000, + DurationNanos: 1000000000, + Name: "", // jpe name? + Attributes: []*v1_common.KeyValue{ + {Key: "service.name", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "MyService"}}}, + }, + }, + }, + Matched: 1, + Attributes: []*v1_common.KeyValue{ + {Key: "by(resource.service.name)", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "MyService"}}}, + {Key: "count()", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_IntValue{IntValue: 1}}}, + }, + }, + }, + { + SpanSet: &tempopb.SpanSet{ + Spans: []*tempopb.Span{ + { + SpanID: "0000000000000000", + StartTimeUnixNano: 1000000000000, + DurationNanos: 2000000000, + Name: "", + Attributes: []*v1_common.KeyValue{ + {Key: "service.name", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "RootService"}}}, + }, + }, + }, + Matched: 1, + Attributes: []*v1_common.KeyValue{ + {Key: "by(resource.service.name)", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "RootService"}}}, + {Key: "count()", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_IntValue{IntValue: 1}}}, + }, + }, + }, + }, + }, + } + searchesThatDontMatch := []*tempopb.SearchRequest{ + {Query: "{} | by(span.foo) | count() = 1"}, + {Query: "{} | by(resource.service.name) | count() = 2"}, + } + + for _, tc := range searchesThatMatch { + fetcher := traceql.NewSpansetFetcherWrapper(func(ctx context.Context, req traceql.FetchSpansRequest) (traceql.FetchSpansResponse, error) { + return r.Fetch(ctx, meta, req, common.DefaultSearchOptions()) + }) + + res, err := e.ExecuteSearch(ctx, tc.req, fetcher) + require.NoError(t, err, "search request: %+v", tc) + + // copy the root stuff in directly, spansets defined in test cases above + for _, ss := range tc.expected { + ss.DurationMs = wantMeta.DurationMs + ss.RootServiceName = wantMeta.RootServiceName + ss.RootTraceName = wantMeta.RootTraceName + ss.StartTimeUnixNano = wantMeta.StartTimeUnixNano + ss.TraceID = wantMeta.TraceID + } + + require.NotNil(t, res, "search request: %v", tc) + require.Equal(t, tc.expected, res.Traces, "search request: %v", tc) + } + + for _, tc := range searchesThatDontMatch { + fetcher := traceql.NewSpansetFetcherWrapper(func(ctx context.Context, req traceql.FetchSpansRequest) (traceql.FetchSpansResponse, error) { + return r.Fetch(ctx, meta, req, common.DefaultSearchOptions()) + }) + + res, err := e.ExecuteSearch(ctx, tc, fetcher) + require.NoError(t, err, "search request: %+v", tc) + require.Nil(t, actualForExpectedMeta(wantMeta, res), "search request: %v", tc) + } + }) +} + func conditionsForAttributes(atts []*v1_common.KeyValue, scope string) ([]string, []string) { trueConditions := []string{} falseConditions := []string{} @@ -355,8 +495,8 @@ func runCompleteBlockSearchTest(t testing.TB, blockVersion string, runner runner require.NoError(t, err) dec := model.MustNewSegmentDecoder(model.CurrentEncoding) - totalTraces := 250 - wantTrIdx := rand.Intn(250) + totalTraces := 50 + wantTrIdx := rand.Intn(50) for i := 0; i < totalTraces; i++ { var tr *tempopb.Trace var id []byte From fd79a7c32a37295f1721f623d321eb6d346469fe Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Thu, 18 May 2023 15:56:38 -0400 Subject: [PATCH 09/18] OMG Signed-off-by: Joe Elliott --- tempodb/encoding/vparquet2/block_traceql.go | 163 ++++++++++++++---- .../vparquet2/block_traceql_meta_test.go | 2 + 2 files changed, 131 insertions(+), 34 deletions(-) diff --git a/tempodb/encoding/vparquet2/block_traceql.go b/tempodb/encoding/vparquet2/block_traceql.go index bb0883c7d92..a293f6abbe6 100644 --- a/tempodb/encoding/vparquet2/block_traceql.go +++ b/tempodb/encoding/vparquet2/block_traceql.go @@ -6,6 +6,7 @@ import ( "io" "math" "reflect" + "sort" "strings" "sync" "time" @@ -27,7 +28,11 @@ type span struct { id []byte startTimeUnixNanos uint64 durationNanos uint64 - rowNum parquetquery.RowNumber + + // metadata used to track the span in the parquet file + rowNum parquetquery.RowNumber + cbSpansetFinal bool + cbSpanset *traceql.Spanset } func (s *span) Attributes() map[traceql.Attribute]traceql.Static { @@ -85,6 +90,8 @@ func putSpan(s *span) { s.startTimeUnixNanos = 0 s.durationNanos = 0 s.rowNum = parquetquery.EmptyRowNumber() + s.cbSpansetFinal = false + s.cbSpanset = nil // clear attributes for k := range s.attributes { @@ -270,7 +277,7 @@ type bridgeIterator struct { iter parquetquery.Iterator cb traceql.SecondPassFn - currentSpans []*span + nextSpans []*span } func newBridgeIterator(iter parquetquery.Iterator, cb traceql.SecondPassFn) *bridgeIterator { @@ -286,9 +293,9 @@ func (i *bridgeIterator) String() string { func (i *bridgeIterator) Next() (*pq.IteratorResult, error) { // drain current buffer - if len(i.currentSpans) > 0 { - ret := i.currentSpans[0] - i.currentSpans = i.currentSpans[1:] + if len(i.nextSpans) > 0 { + ret := i.nextSpans[0] + i.nextSpans = i.nextSpans[1:] return spanToIteratorResult(ret), nil } @@ -304,45 +311,49 @@ func (i *bridgeIterator) Next() (*pq.IteratorResult, error) { // The spanset is in the OtherEntries iface := res.OtherValueFromKey(otherEntrySpansetKey) if iface == nil { - return nil, fmt.Errorf("engine assumption broken: spanset not found in other entries") + return nil, fmt.Errorf("engine assumption broken: spanset not found in other entries in bridge") } spanset, ok := iface.(*traceql.Spanset) if !ok { - return nil, fmt.Errorf("engine assumption broken: spanset is not of type *traceql.Spanset") + return nil, fmt.Errorf("engine assumption broken: spanset is not of type *traceql.Spanset in bridge") } - var filteredSpansets []*traceql.Spanset - if i.cb != nil { - filteredSpansets, err = i.cb(spanset) - if err == io.EOF { - return nil, nil - } - if err != nil { - return nil, err - } - // if the filter removed all spansets then let's release all back to the pool - // no reason to try anything more nuanced than this. it will handle nearly all cases - if len(filteredSpansets) == 0 { - for _, s := range spanset.Spans { - putSpan(s.(*span)) - } + filteredSpansets, err := i.cb(spanset) + if err == io.EOF { + return nil, nil + } + if err != nil { + return nil, err + } + // if the filter removed all spansets then let's release all back to the pool + // no reason to try anything more nuanced than this. it will handle nearly all cases + if len(filteredSpansets) == 0 { + for _, s := range spanset.Spans { + putSpan(s.(*span)) } - } else { - filteredSpansets = []*traceql.Spanset{spanset} } // flatten spans into i.currentSpans for _, ss := range filteredSpansets { - for _, s := range ss.Spans { + for idx, s := range ss.Spans { span := s.(*span) - i.currentSpans = append(i.currentSpans, span) + + // use otherEntryCallbackSpansetKey to indicate to the rebatchIterator that either + // 1) this is the last span in the spanset, or 2) there are more spans in the spanset + span.cbSpansetFinal = idx == len(ss.Spans)-1 + span.cbSpanset = ss + i.nextSpans = append(i.nextSpans, span) } } + sort.Slice(i.nextSpans, func(j, k int) bool { + return parquetquery.CompareRowNumbers(DefinitionLevelResourceSpans, i.nextSpans[j].rowNum, i.nextSpans[k].rowNum) == -1 + }) + // found something! - if len(i.currentSpans) > 0 { - ret := i.currentSpans[0] - i.currentSpans = i.currentSpans[1:] + if len(i.nextSpans) > 0 { + ret := i.nextSpans[0] + i.nextSpans = i.nextSpans[1:] return spanToIteratorResult(ret), nil } } @@ -373,9 +384,11 @@ func (i *bridgeIterator) Close() { var _ pq.Iterator = (*rebatchIterator)(nil) // rebatchIterator either passes spansets through directly OR rebatches them based on metadata -// in OtherEntries +// in OtherEntries jpe - test type rebatchIterator struct { iter parquetquery.Iterator + + nextSpans []*span } func newRebatchIterator(iter parquetquery.Iterator) *rebatchIterator { @@ -388,9 +401,91 @@ func (i *rebatchIterator) String() string { return fmt.Sprintf("rebatchIterator: \n\t%s", util.TabOut(i.iter)) } +// Next has to handle two different style results. First is an initial set of spans +// that does not have a callback spanset. These can be passed directly through. +// Second is a set of spans that have spansets imposed by the callback (i.e. for grouping) +// these must be regrouped into the callback spansets func (i *rebatchIterator) Next() (*pq.IteratorResult, error) { - // jpe do something worth doing - return i.iter.Next() + for { + // see if we have a queue + res := i.resultFromNextSpans() + if res != nil { + return res, nil + } + + // check the iterator for anything + res, err := i.iter.Next() + if err != nil { + return nil, err + } + if res == nil { + return nil, nil + } + + // get the spanset and see if we should pass it through or buffer for rebatching + iface := res.OtherValueFromKey(otherEntrySpansetKey) + if iface == nil { + return nil, fmt.Errorf("engine assumption broken: spanset not found in other entries in rebatch") + } + ss, ok := iface.(*traceql.Spanset) + if !ok { + return nil, fmt.Errorf("engine assumption broken: spanset is not of type *traceql.Spanset in rebatch") + } + + // if this has no call back spanset just pass it on + if len(ss.Spans) > 0 && ss.Spans[0].(*span).cbSpanset == nil { + return res, nil + } + + // dump all spans into our buffer + for _, s := range ss.Spans { + sp := s.(*span) + if !sp.cbSpansetFinal { + continue + } + + // copy trace level data from the current iteration spanset into the rebatch spanset. only do this if + // we don't have current data + if sp.cbSpanset.DurationNanos == 0 { + sp.cbSpanset.DurationNanos = ss.DurationNanos + } + if len(sp.cbSpanset.TraceID) == 0 { + sp.cbSpanset.TraceID = ss.TraceID + } + if len(sp.cbSpanset.RootSpanName) == 0 { + sp.cbSpanset.RootSpanName = ss.RootSpanName + } + if len(sp.cbSpanset.RootServiceName) == 0 { + sp.cbSpanset.RootServiceName = ss.RootServiceName + } + if sp.cbSpanset.StartTimeUnixNanos == 0 { + sp.cbSpanset.StartTimeUnixNanos = ss.StartTimeUnixNanos + } + + i.nextSpans = append(i.nextSpans, sp) + } + + res = i.resultFromNextSpans() + if res != nil { + return res, nil + } + // if we don't find anything in that spanset, start over + } +} + +func (i *rebatchIterator) resultFromNextSpans() *pq.IteratorResult { + for len(i.nextSpans) > 0 { + ret := i.nextSpans[0] + i.nextSpans = i.nextSpans[1:] + + if ret.cbSpansetFinal && ret.cbSpanset != nil { + res := &pq.IteratorResult{} + res.AppendOtherValue(otherEntrySpansetKey, ret.cbSpanset) + return res + } + } + + return nil } func (i *rebatchIterator) SeekTo(to pq.RowNumber, definitionLevel int) (*pq.IteratorResult, error) { @@ -427,11 +522,11 @@ func (i *spansetIterator) Next(ctx context.Context) (*traceql.Spanset, error) { // The spanset is in the OtherEntries iface := res.OtherValueFromKey(otherEntrySpansetKey) if iface == nil { - return nil, fmt.Errorf("engine assumption broken: spanset not found in other entries") + return nil, fmt.Errorf("engine assumption broken: spanset not found in other entries in spansetIterator") } ss, ok := iface.(*traceql.Spanset) if !ok { - return nil, fmt.Errorf("engine assumption broken: spanset is not of type *traceql.Spanset") + return nil, fmt.Errorf("engine assumption broken: spanset is not of type *traceql.Spanset in spansetIterator") } return ss, nil diff --git a/tempodb/encoding/vparquet2/block_traceql_meta_test.go b/tempodb/encoding/vparquet2/block_traceql_meta_test.go index 11c556b25af..9554311aad8 100644 --- a/tempodb/encoding/vparquet2/block_traceql_meta_test.go +++ b/tempodb/encoding/vparquet2/block_traceql_meta_test.go @@ -292,6 +292,8 @@ func TestBackendBlockSearchFetchMetaData(t *testing.T) { // fetch layer. just wipe them out here for _, s := range ss { for _, sp := range s.Spans { + sp.(*span).cbSpanset = nil + sp.(*span).cbSpansetFinal = false sp.(*span).rowNum = parquetquery.RowNumber{} } } From 2683ad467014af4ee0a0080c860ec21bd46bdba1 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Thu, 18 May 2023 16:01:46 -0400 Subject: [PATCH 10/18] sort Signed-off-by: Joe Elliott --- tempodb/tempodb_search_test.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tempodb/tempodb_search_test.go b/tempodb/tempodb_search_test.go index b9e4d3dba04..b36bdfba06d 100644 --- a/tempodb/tempodb_search_test.go +++ b/tempodb/tempodb_search_test.go @@ -5,6 +5,7 @@ import ( "fmt" "math/rand" "path" + "sort" "strings" "testing" "time" @@ -373,13 +374,18 @@ func testGroupTraceQLCompleteBlock(t *testing.T, blockVersion string) { res, err := e.ExecuteSearch(ctx, tc.req, fetcher) require.NoError(t, err, "search request: %+v", tc) - // copy the root stuff in directly, spansets defined in test cases above + // copy the root stuff in directly, spansets defined in test cases above. sorting cause maps for _, ss := range tc.expected { ss.DurationMs = wantMeta.DurationMs ss.RootServiceName = wantMeta.RootServiceName ss.RootTraceName = wantMeta.RootTraceName ss.StartTimeUnixNano = wantMeta.StartTimeUnixNano ss.TraceID = wantMeta.TraceID + sort.Slice(ss.SpanSet.Attributes, func(i, j int) bool { return ss.SpanSet.Attributes[i].Key < ss.SpanSet.Attributes[j].Key }) + } + + for _, ss := range res.Traces { + sort.Slice(ss.SpanSet.Attributes, func(i, j int) bool { return ss.SpanSet.Attributes[i].Key < ss.SpanSet.Attributes[j].Key }) } require.NotNil(t, res, "search request: %v", tc) From 752cb8966ef4e9cc6548b2116a6e397bd900516a Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Thu, 18 May 2023 16:06:51 -0400 Subject: [PATCH 11/18] changelog Signed-off-by: Joe Elliott --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c5aeff00168..dea984ca248 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ ``` * [FEATURE] Add support for `q` query param in `/api/v2/search//values` to filter results based on a TraceQL query [#2253](https://github.com/grafana/tempo/pull/2253) (@mapno) To make use of filtering, configure `autocomplete_filtering_enabled`. +* [FEATURE] Add support for `by()` and `coalesce()` to TraceQL. [#2490](https://github.com/grafana/tempo/pull/2490) * [FEATURE] Add a GRPC streaming endpoint for traceql search [#2366](https://github.com/grafana/tempo/pull/2366) (@joe-elliott) * [ENHANCEMENT] Add `scope` parameter to `/api/search/tags` [#2282](https://github.com/grafana/tempo/pull/2282) (@joe-elliott) Create new endpoint `/api/v2/search/tags` that returns all tags organized by scope. @@ -36,6 +37,7 @@ To make use of filtering, configure `autocomplete_filtering_enabled`. * [ENHANCEMENT] Introduce `overrides.Interface` to decouple implementation from usage [#2482](https://github.com/grafana/tempo/pull/2482) (@kvrhdn) * [BUGFIX] tempodb integer divide by zero error [#2167](https://github.com/grafana/tempo/issues/2167) (@kroksys) * [BUGFIX] metrics-generator: ensure Prometheus will scale up shards when remote write is lagging behind [#2463](https://github.com/grafana/tempo/issues/2463) (@kvrhdn) +* [BUGFIX] Fixes issue where matches and other spanset level attributes were not persisted to the TraceQL results. [#2490](https://github.com/grafana/tempo/pull/2490) * [CHANGE] **Breaking Change** Rename s3.insecure_skip_verify [#2407](https://github.com/grafana/tempo/pull/2407) (@zalegrala) ```yaml storage: From ee0f2710aae095fdfac8b55b118a89620e8ef245 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Mon, 22 May 2023 08:39:01 -0400 Subject: [PATCH 12/18] attributes as slice for order Signed-off-by: Joe Elliott --- pkg/traceql/ast_execute.go | 4 +--- pkg/traceql/ast_execute_test.go | 35 +++++++++++++++------------------ pkg/traceql/engine.go | 13 ++++++------ pkg/traceql/engine_test.go | 9 ++++----- pkg/traceql/storage.go | 25 ++++++++++------------- pkg/traceql/storage_test.go | 4 +--- tempodb/tempodb_search_test.go | 8 +------- 7 files changed, 39 insertions(+), 59 deletions(-) diff --git a/pkg/traceql/ast_execute.go b/pkg/traceql/ast_execute.go index 0afd098c0a2..b0eed97f071 100644 --- a/pkg/traceql/ast_execute.go +++ b/pkg/traceql/ast_execute.go @@ -33,9 +33,7 @@ func (g GroupOperation) evaluate(ss []*Spanset) ([]*Spanset, error) { // If not, create a new group and add it to the map group = &Spanset{} // copy all existing attributes forward - for k, att := range spanset.Attributes { - group.AddAttribute(k, att) - } + group.Attributes = append(group.Attributes, spanset.Attributes...) group.AddAttribute(g.String(), result) groups[result] = group } diff --git a/pkg/traceql/ast_execute_test.go b/pkg/traceql/ast_execute_test.go index 7e74295fdaa..b0144f08255 100644 --- a/pkg/traceql/ast_execute_test.go +++ b/pkg/traceql/ast_execute_test.go @@ -3,7 +3,6 @@ package traceql import ( "bytes" "fmt" - "reflect" "sort" "testing" "time" @@ -44,13 +43,8 @@ func testEvaluator(t *testing.T, tc evalTC) { sort.Slice(actual, makeSort(actual)) sort.Slice(tc.output, makeSort(tc.output)) - // reflect.DeepEqual() used b/c it correctly compares maps - if eq := reflect.DeepEqual(actual, tc.output); !eq { - require.Equal(t, tc.output, actual) // this is will nicely print diffs but some diffs may be red herrings due to map iteration. - } - if eq := reflect.DeepEqual(cloneIn, tc.input); !eq { - require.Equal(t, tc.input, cloneIn) - } + require.Equal(t, tc.output, actual) + require.Equal(t, tc.input, cloneIn) }) } @@ -199,13 +193,13 @@ func TestGroup(t *testing.T) { {Spans: []Span{ &mockSpan{id: []byte{1}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("a")}}, }, - Attributes: map[string]Static{"by(.foo)": NewStaticString("a")}, + Attributes: []*SpansetAttribute{{Name: "by(.foo)", Val: NewStaticString("a")}}, }, {Spans: []Span{ &mockSpan{id: []byte{2}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b")}}, &mockSpan{id: []byte{3}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b")}}, }, - Attributes: map[string]Static{"by(.foo)": NewStaticString("b")}, + Attributes: []*SpansetAttribute{{Name: "by(.foo)", Val: NewStaticString("b")}}, }, }, }, @@ -222,17 +216,17 @@ func TestGroup(t *testing.T) { {Spans: []Span{ &mockSpan{id: []byte{1}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("a"), NewAttribute("bar"): NewStaticString("1")}}, }, - Attributes: map[string]Static{"by(.foo)": NewStaticString("a"), "by(.bar)": NewStaticString("1")}, + Attributes: []*SpansetAttribute{{Name: "by(.foo)", Val: NewStaticString("a")}, {Name: "by(.bar)", Val: NewStaticString("1")}}, }, {Spans: []Span{ &mockSpan{id: []byte{2}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b"), NewAttribute("bar"): NewStaticString("1")}}, }, - Attributes: map[string]Static{"by(.foo)": NewStaticString("b"), "by(.bar)": NewStaticString("1")}, + Attributes: []*SpansetAttribute{{Name: "by(.foo)", Val: NewStaticString("b")}, {Name: "by(.bar)", Val: NewStaticString("1")}}, }, {Spans: []Span{ &mockSpan{id: []byte{3}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b"), NewAttribute("bar"): NewStaticString("2")}}, }, - Attributes: map[string]Static{"by(.foo)": NewStaticString("b"), "by(.bar)": NewStaticString("2")}, + Attributes: []*SpansetAttribute{{Name: "by(.foo)", Val: NewStaticString("b")}, {Name: "by(.bar)", Val: NewStaticString("2")}}, }, }, }, @@ -254,7 +248,10 @@ func TestCoalesce(t *testing.T) { {Spans: []Span{ &mockSpan{id: []byte{2}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b")}}, &mockSpan{id: []byte{3}, attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("b")}}, - }}, + }, + // coalesce() should drop attributes + Attributes: []*SpansetAttribute{{Name: "by(.foo)", Val: NewStaticString("a")}}, + }, }, []*Spanset{ {Spans: []Span{ @@ -372,7 +369,7 @@ func TestScalarFilterEvaluate(t *testing.T) { &mockSpan{attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("a")}}, &mockSpan{attributes: map[Attribute]Static{NewAttribute("foo"): NewStaticString("a")}}, }, - Attributes: map[string]Static{"count()": NewStaticInt(2)}, + Attributes: []*SpansetAttribute{{Name: "count()", Val: NewStaticInt(2)}}, }, }, }, @@ -415,7 +412,7 @@ func TestScalarFilterEvaluate(t *testing.T) { NewIntrinsic(IntrinsicDuration): NewStaticDuration(15 * time.Millisecond)}, }, }, - Attributes: map[string]Static{"avg(duration)": NewStaticDuration(10 * time.Millisecond)}, + Attributes: []*SpansetAttribute{{Name: "avg(duration)", Val: NewStaticDuration(10 * time.Millisecond)}}, }, }, }, @@ -459,7 +456,7 @@ func TestScalarFilterEvaluate(t *testing.T) { NewIntrinsic(IntrinsicDuration): NewStaticDuration(15 * time.Millisecond)}, }, }, - Attributes: map[string]Static{"max(duration)": NewStaticDuration(15 * time.Millisecond)}, + Attributes: []*SpansetAttribute{{Name: "max(duration)", Val: NewStaticDuration(15 * time.Millisecond)}}, }, }, }, @@ -503,7 +500,7 @@ func TestScalarFilterEvaluate(t *testing.T) { NewIntrinsic(IntrinsicDuration): NewStaticDuration(8 * time.Millisecond)}, }, }, - Attributes: map[string]Static{"min(duration)": NewStaticDuration(2 * time.Millisecond)}, + Attributes: []*SpansetAttribute{{Name: "min(duration)", Val: NewStaticDuration(2 * time.Millisecond)}}, }, }, }, @@ -547,7 +544,7 @@ func TestScalarFilterEvaluate(t *testing.T) { NewIntrinsic(IntrinsicDuration): NewStaticDuration(8 * time.Millisecond)}, }, }, - Attributes: map[string]Static{"sum(duration)": NewStaticDuration(10 * time.Millisecond)}, + Attributes: []*SpansetAttribute{{Name: "sum(duration)", Val: NewStaticDuration(10 * time.Millisecond)}}, }, }, }, diff --git a/pkg/traceql/engine.go b/pkg/traceql/engine.go index da3dec2a8d8..0730d4875bd 100644 --- a/pkg/traceql/engine.go +++ b/pkg/traceql/engine.go @@ -295,9 +295,7 @@ func (e *Engine) asTraceSearchMetadata(spanset *Spanset) *tempopb.TraceSearchMet RootTraceName: spanset.RootSpanName, StartTimeUnixNano: spanset.StartTimeUnixNanos, DurationMs: uint32(spanset.DurationNanos / 1_000_000), - SpanSet: &tempopb.SpanSet{ - Matched: uint32(spanset.Attributes[attributeMatched].N), - }, + SpanSet: &tempopb.SpanSet{}, } for _, span := range spanset.Spans { @@ -333,14 +331,15 @@ func (e *Engine) asTraceSearchMetadata(spanset *Spanset) *tempopb.TraceSearchMet } // add attributes - for key, static := range spanset.Attributes { - if key == attributeMatched { + for _, att := range spanset.Attributes { + if att.Name == attributeMatched { + metadata.SpanSet.Matched = uint32(att.Val.N) continue } - staticAnyValue := static.asAnyValue() + staticAnyValue := att.Val.asAnyValue() keyValue := &common_v1.KeyValue{ - Key: key, + Key: att.Name, Value: staticAnyValue, } metadata.SpanSet.Attributes = append(metadata.SpanSet.Attributes, keyValue) diff --git a/pkg/traceql/engine_test.go b/pkg/traceql/engine_test.go index cc577d909bd..ab2968a2985 100644 --- a/pkg/traceql/engine_test.go +++ b/pkg/traceql/engine_test.go @@ -78,7 +78,6 @@ func TestEngine_Execute(t *testing.T) { }, }, }, - Attributes: map[string]Static{attributeMatched: NewStaticInt(1)}, }, { TraceID: []byte{2}, @@ -165,7 +164,7 @@ func TestEngine_Execute(t *testing.T) { }, }, }, - Matched: 3, + Matched: 0, }, }, } @@ -226,9 +225,9 @@ func TestEngine_asTraceSearchMetadata(t *testing.T) { attributes: map[Attribute]Static{}, }, }, - Attributes: map[string]Static{ - attributeMatched: NewStaticInt(2), - "avg(duration)": NewStaticFloat(15.0), + Attributes: []*SpansetAttribute{ + {Name: attributeMatched, Val: NewStaticInt(2)}, + {Name: "avg(duration)", Val: NewStaticFloat(15.0)}, }, } diff --git a/pkg/traceql/storage.go b/pkg/traceql/storage.go index d1532ac4bf1..7f5caa94333 100644 --- a/pkg/traceql/storage.go +++ b/pkg/traceql/storage.go @@ -78,8 +78,14 @@ type Span interface { DurationNanos() uint64 } +// should we just make matched a field on the spanset instead of a special attribute? const attributeMatched = "__matched" +type SpansetAttribute struct { + Name string + Val Static +} + type Spanset struct { // these fields are actually used by the engine to evaluate queries Scalar Static @@ -90,25 +96,14 @@ type Spanset struct { RootServiceName string StartTimeUnixNanos uint64 DurationNanos uint64 - Attributes map[string]Static + Attributes []*SpansetAttribute } func (s *Spanset) AddAttribute(key string, value Static) { - if s.Attributes == nil { - s.Attributes = make(map[string]Static) - } - s.Attributes[key] = value + s.Attributes = append(s.Attributes, &SpansetAttribute{Name: key, Val: value}) } func (s *Spanset) clone() *Spanset { - var atts map[string]Static - if s.Attributes != nil { - atts = make(map[string]Static, len(s.Attributes)) - for k, v := range s.Attributes { - atts[k] = v - } - } - return &Spanset{ TraceID: s.TraceID, Scalar: s.Scalar, @@ -116,8 +111,8 @@ func (s *Spanset) clone() *Spanset { RootServiceName: s.RootServiceName, StartTimeUnixNanos: s.StartTimeUnixNanos, DurationNanos: s.DurationNanos, - Spans: s.Spans, // we're not deep cloning into the spans themselves - Attributes: atts, + Spans: s.Spans, // we're not deep cloning into the spans or attributes + Attributes: s.Attributes, } } diff --git a/pkg/traceql/storage_test.go b/pkg/traceql/storage_test.go index ef95d1eec75..65781a41a7a 100644 --- a/pkg/traceql/storage_test.go +++ b/pkg/traceql/storage_test.go @@ -23,9 +23,7 @@ func TestSpansetClone(t *testing.T) { RootServiceName: "b", StartTimeUnixNanos: 1, DurationNanos: 5, - Attributes: map[string]Static{ - "foo": NewStaticString("bar"), - }, + Attributes: []*SpansetAttribute{{Name: "foo", Val: NewStaticString("bar")}}, }, { Spans: []Span{ diff --git a/tempodb/tempodb_search_test.go b/tempodb/tempodb_search_test.go index b36bdfba06d..e171058e265 100644 --- a/tempodb/tempodb_search_test.go +++ b/tempodb/tempodb_search_test.go @@ -5,7 +5,6 @@ import ( "fmt" "math/rand" "path" - "sort" "strings" "testing" "time" @@ -374,18 +373,13 @@ func testGroupTraceQLCompleteBlock(t *testing.T, blockVersion string) { res, err := e.ExecuteSearch(ctx, tc.req, fetcher) require.NoError(t, err, "search request: %+v", tc) - // copy the root stuff in directly, spansets defined in test cases above. sorting cause maps + // copy the root stuff in directly, spansets defined in test cases above. for _, ss := range tc.expected { ss.DurationMs = wantMeta.DurationMs ss.RootServiceName = wantMeta.RootServiceName ss.RootTraceName = wantMeta.RootTraceName ss.StartTimeUnixNano = wantMeta.StartTimeUnixNano ss.TraceID = wantMeta.TraceID - sort.Slice(ss.SpanSet.Attributes, func(i, j int) bool { return ss.SpanSet.Attributes[i].Key < ss.SpanSet.Attributes[j].Key }) - } - - for _, ss := range res.Traces { - sort.Slice(ss.SpanSet.Attributes, func(i, j int) bool { return ss.SpanSet.Attributes[i].Key < ss.SpanSet.Attributes[j].Key }) } require.NotNil(t, res, "search request: %v", tc) From 8c9fd17626638ed6a1a5009b0054c4bbf2cd8c60 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Mon, 22 May 2023 13:18:31 -0400 Subject: [PATCH 13/18] Added spansets and updated Combine Signed-off-by: Joe Elliott --- modules/frontend/search_progress.go | 4 +- modules/ingester/instance_search.go | 2 +- pkg/search/util.go | 27 --- pkg/tempopb/tempo.pb.go | 297 +++++++++++++++++----------- pkg/tempopb/tempo.proto | 3 +- pkg/traceql/engine.go | 19 ++ 6 files changed, 204 insertions(+), 148 deletions(-) diff --git a/modules/frontend/search_progress.go b/modules/frontend/search_progress.go index ccf282dc413..c324773ef9e 100644 --- a/modules/frontend/search_progress.go +++ b/modules/frontend/search_progress.go @@ -6,8 +6,8 @@ import ( "sort" "sync" - "github.com/grafana/tempo/pkg/search" "github.com/grafana/tempo/pkg/tempopb" + "github.com/grafana/tempo/pkg/traceql" ) // searchProgressFactory is used to provide a way to construct a shardedSearchProgress to the searchSharder. It exists @@ -92,7 +92,7 @@ func (r *searchProgress) addResponse(res *tempopb.SearchResponse) { // combine into the incoming trace and then set in the map. this prevents // race conditions on pointers to traces that we've already returned from // .result() - search.CombineSearchResults(t, r.resultsMap[t.TraceID]) + traceql.CombineSearchResults(t, r.resultsMap[t.TraceID]) r.resultsMap[t.TraceID] = t } } diff --git a/modules/ingester/instance_search.go b/modules/ingester/instance_search.go index 847a67b6351..75141a95df1 100644 --- a/modules/ingester/instance_search.go +++ b/modules/ingester/instance_search.go @@ -65,7 +65,7 @@ func (i *instance) Search(ctx context.Context, req *tempopb.SearchRequest) (*tem // Dedupe/combine results if existing := resultsMap[result.TraceID]; existing != nil { - search.CombineSearchResults(existing, result) + traceql.CombineSearchResults(existing, result) } else { resultsMap[result.TraceID] = result } diff --git a/pkg/search/util.go b/pkg/search/util.go index a5c28832960..bfa53cdf11a 100644 --- a/pkg/search/util.go +++ b/pkg/search/util.go @@ -59,30 +59,3 @@ func GetVirtualIntrinsicValues() []string { traceql.IntrinsicStatus.String(), } } - -// CombineSearchResults overlays the incoming search result with the existing result. This is required -// for the following reason: a trace may be present in multiple blocks, or in partial segments -// in live traces. The results should reflect elements of all segments. -func CombineSearchResults(existing *tempopb.TraceSearchMetadata, incoming *tempopb.TraceSearchMetadata) { - if existing.TraceID == "" { - existing.TraceID = incoming.TraceID - } - - if existing.RootServiceName == "" { - existing.RootServiceName = incoming.RootServiceName - } - - if existing.RootTraceName == "" { - existing.RootTraceName = incoming.RootTraceName - } - - // Earliest start time. - if existing.StartTimeUnixNano > incoming.StartTimeUnixNano { - existing.StartTimeUnixNano = incoming.StartTimeUnixNano - } - - // Longest duration - if existing.DurationMs < incoming.DurationMs { - existing.DurationMs = incoming.DurationMs - } -} diff --git a/pkg/tempopb/tempo.pb.go b/pkg/tempopb/tempo.pb.go index 8f8be779598..6ec23b3a6de 100644 --- a/pkg/tempopb/tempo.pb.go +++ b/pkg/tempopb/tempo.pb.go @@ -469,12 +469,13 @@ func (m *SearchResponse) GetMetrics() *SearchMetrics { } type TraceSearchMetadata struct { - TraceID string `protobuf:"bytes,1,opt,name=traceID,proto3" json:"traceID,omitempty"` - RootServiceName string `protobuf:"bytes,2,opt,name=rootServiceName,proto3" json:"rootServiceName,omitempty"` - RootTraceName string `protobuf:"bytes,3,opt,name=rootTraceName,proto3" json:"rootTraceName,omitempty"` - StartTimeUnixNano uint64 `protobuf:"varint,4,opt,name=startTimeUnixNano,proto3" json:"startTimeUnixNano,omitempty"` - DurationMs uint32 `protobuf:"varint,5,opt,name=durationMs,proto3" json:"durationMs,omitempty"` - SpanSet *SpanSet `protobuf:"bytes,6,opt,name=spanSet,proto3" json:"spanSet,omitempty"` + TraceID string `protobuf:"bytes,1,opt,name=traceID,proto3" json:"traceID,omitempty"` + RootServiceName string `protobuf:"bytes,2,opt,name=rootServiceName,proto3" json:"rootServiceName,omitempty"` + RootTraceName string `protobuf:"bytes,3,opt,name=rootTraceName,proto3" json:"rootTraceName,omitempty"` + StartTimeUnixNano uint64 `protobuf:"varint,4,opt,name=startTimeUnixNano,proto3" json:"startTimeUnixNano,omitempty"` + DurationMs uint32 `protobuf:"varint,5,opt,name=durationMs,proto3" json:"durationMs,omitempty"` + SpanSet *SpanSet `protobuf:"bytes,6,opt,name=spanSet,proto3" json:"spanSet,omitempty"` + SpanSets []*SpanSet `protobuf:"bytes,7,rep,name=spanSets,proto3" json:"spanSets,omitempty"` } func (m *TraceSearchMetadata) Reset() { *m = TraceSearchMetadata{} } @@ -552,6 +553,13 @@ func (m *TraceSearchMetadata) GetSpanSet() *SpanSet { return nil } +func (m *TraceSearchMetadata) GetSpanSets() []*SpanSet { + if m != nil { + return m.SpanSets + } + return nil +} + type SpanSet struct { Spans []*Span `protobuf:"bytes,1,rep,name=spans,proto3" json:"spans,omitempty"` Matched uint32 `protobuf:"varint,2,opt,name=matched,proto3" json:"matched,omitempty"` @@ -1916,119 +1924,120 @@ func init() { func init() { proto.RegisterFile("pkg/tempopb/tempo.proto", fileDescriptor_f22805646f4f62b6) } var fileDescriptor_f22805646f4f62b6 = []byte{ - // 1784 bytes of a gzipped FileDescriptorProto + // 1796 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x4f, 0x73, 0x1b, 0x49, 0x15, 0xf7, 0x58, 0x7f, 0x6c, 0x3d, 0x59, 0x8e, 0xdd, 0x9b, 0x38, 0x8a, 0x12, 0x1c, 0xd7, 0x90, - 0x5a, 0x0c, 0xc5, 0xca, 0x8e, 0x36, 0xae, 0x25, 0x1b, 0x0a, 0x0a, 0xe1, 0x90, 0x64, 0x13, 0x2f, - 0xd9, 0x91, 0xc9, 0x61, 0x2f, 0x54, 0x6b, 0xa6, 0xa3, 0x4c, 0x59, 0x9a, 0xd6, 0xce, 0xb4, 0x8c, - 0xc5, 0x09, 0x0e, 0x70, 0xe2, 0xc0, 0x85, 0x03, 0x17, 0xaa, 0x38, 0x51, 0xc5, 0xd7, 0xe0, 0xb2, - 0x27, 0x6a, 0x8f, 0x14, 0x87, 0x2d, 0x2a, 0xf9, 0x04, 0x1c, 0xb9, 0x51, 0xef, 0x75, 0xf7, 0xfc, - 0x93, 0x1c, 0x6a, 0x97, 0x93, 0xfa, 0xfd, 0xfa, 0xd7, 0xef, 0xbd, 0x7e, 0xfd, 0xfa, 0xf5, 0x1b, - 0xc1, 0xf5, 0xe9, 0xd9, 0xe8, 0x40, 0x89, 0xc9, 0x54, 0x4e, 0x87, 0xfa, 0xb7, 0x3b, 0x8d, 0xa5, - 0x92, 0x6c, 0xcd, 0x80, 0x9d, 0xab, 0x2a, 0xe6, 0xbe, 0x38, 0x38, 0xbf, 0x7b, 0x40, 0x03, 0x3d, - 0xdd, 0xd9, 0xf1, 0xe5, 0x64, 0x22, 0x23, 0x84, 0xf5, 0xc8, 0xe0, 0xef, 0x8d, 0x42, 0xf5, 0x6a, - 0x36, 0xec, 0xfa, 0x72, 0x72, 0x30, 0x92, 0x23, 0x79, 0x40, 0xf0, 0x70, 0xf6, 0x92, 0x24, 0x12, - 0x68, 0xa4, 0xe9, 0xee, 0x6f, 0x1d, 0xd8, 0x3a, 0x45, 0xb5, 0xfd, 0xf9, 0x93, 0x63, 0x4f, 0x7c, - 0x36, 0x13, 0x89, 0x62, 0x6d, 0x58, 0x23, 0x53, 0x4f, 0x8e, 0xdb, 0xce, 0x9e, 0xb3, 0xbf, 0xe1, - 0x59, 0x91, 0xed, 0x02, 0x0c, 0xc7, 0xd2, 0x3f, 0x1b, 0x28, 0x1e, 0xab, 0xf6, 0xea, 0x9e, 0xb3, - 0xdf, 0xf0, 0x72, 0x08, 0xeb, 0xc0, 0x3a, 0x49, 0x0f, 0xa3, 0xa0, 0x5d, 0xa1, 0xd9, 0x54, 0x66, - 0xb7, 0xa0, 0xf1, 0xd9, 0x4c, 0xc4, 0xf3, 0x13, 0x19, 0x88, 0x76, 0x8d, 0x26, 0x33, 0xc0, 0x8d, - 0x60, 0x3b, 0xe7, 0x47, 0x32, 0x95, 0x51, 0x22, 0xd8, 0x1d, 0xa8, 0x91, 0x65, 0x72, 0xa3, 0xd9, - 0xdb, 0xec, 0x9a, 0x98, 0x74, 0x89, 0xea, 0xe9, 0x49, 0xf6, 0x3e, 0xac, 0x4d, 0x84, 0x8a, 0x43, - 0x3f, 0x21, 0x8f, 0x9a, 0xbd, 0x1b, 0x45, 0x1e, 0xaa, 0x3c, 0xd1, 0x04, 0xcf, 0x32, 0x5d, 0x96, - 0xdb, 0xb7, 0x99, 0x74, 0xff, 0xbe, 0x0a, 0xad, 0x81, 0xe0, 0xb1, 0xff, 0xca, 0x46, 0xe2, 0x43, - 0xa8, 0x9e, 0xf2, 0x51, 0xd2, 0x76, 0xf6, 0x2a, 0xfb, 0xcd, 0xde, 0x5e, 0xaa, 0xb7, 0xc0, 0xea, - 0x22, 0xe5, 0x61, 0xa4, 0xe2, 0x79, 0xbf, 0xfa, 0xf9, 0x97, 0xb7, 0x57, 0x3c, 0x5a, 0xc3, 0xee, - 0x40, 0xeb, 0x24, 0x8c, 0x8e, 0x67, 0x31, 0x57, 0xa1, 0x8c, 0x4e, 0xb4, 0x73, 0x2d, 0xaf, 0x08, - 0x12, 0x8b, 0x5f, 0xe4, 0x58, 0x15, 0xc3, 0xca, 0x83, 0xec, 0x2a, 0xd4, 0x9e, 0x85, 0x93, 0x50, - 0xb5, 0xab, 0x34, 0xab, 0x05, 0x44, 0x13, 0x3a, 0x88, 0x9a, 0x46, 0x49, 0x60, 0x5b, 0x50, 0x11, - 0x51, 0xd0, 0xae, 0x13, 0x86, 0x43, 0xe4, 0x7d, 0x82, 0x81, 0x6e, 0xaf, 0x53, 0xd4, 0xb5, 0xc0, - 0xf6, 0xe1, 0xca, 0x60, 0xca, 0xa3, 0xe4, 0xb9, 0x88, 0xf1, 0x77, 0x20, 0x54, 0xbb, 0x41, 0x6b, - 0xca, 0x70, 0xe7, 0x03, 0x68, 0xa4, 0x5b, 0x44, 0xf5, 0x67, 0x62, 0x4e, 0x27, 0xd2, 0xf0, 0x70, - 0x88, 0xea, 0xcf, 0xf9, 0x78, 0x26, 0x4c, 0x3e, 0x68, 0xe1, 0xc3, 0xd5, 0xef, 0x39, 0xee, 0xaf, - 0x2a, 0xc0, 0x74, 0xa8, 0xfa, 0x98, 0x05, 0x36, 0xaa, 0xf7, 0xa0, 0x91, 0xd8, 0x00, 0x9a, 0xa3, - 0xdd, 0x59, 0x1e, 0x5a, 0x2f, 0x23, 0x62, 0x56, 0x52, 0x2e, 0x3d, 0x39, 0x36, 0x86, 0xac, 0x88, - 0x99, 0x45, 0x5b, 0x7f, 0xce, 0x47, 0xc2, 0xc4, 0x2f, 0x03, 0x30, 0xc2, 0x53, 0x3e, 0x12, 0xc9, - 0xa9, 0xd4, 0xaa, 0x4d, 0x0c, 0x8b, 0x20, 0x66, 0xae, 0x88, 0x7c, 0x19, 0x84, 0xd1, 0xc8, 0x24, - 0x67, 0x2a, 0xa3, 0x86, 0x30, 0x0a, 0xc4, 0x05, 0xaa, 0x1b, 0x84, 0xbf, 0x14, 0x26, 0xb6, 0x45, - 0x90, 0xb9, 0xb0, 0xa1, 0xa4, 0xe2, 0x63, 0x4f, 0xf8, 0x32, 0x0e, 0x92, 0xf6, 0x1a, 0x91, 0x0a, - 0x18, 0x72, 0x02, 0xae, 0xf8, 0x43, 0x6b, 0x49, 0x1f, 0x48, 0x01, 0xc3, 0x7d, 0x9e, 0x8b, 0x38, - 0x09, 0x65, 0x44, 0xe7, 0xd1, 0xf0, 0xac, 0xc8, 0x18, 0x54, 0x13, 0x34, 0x0f, 0x7b, 0xce, 0x7e, - 0xd5, 0xa3, 0x31, 0xde, 0xc8, 0x97, 0x52, 0x2a, 0x11, 0x93, 0x63, 0x4d, 0xb2, 0x99, 0x43, 0xdc, - 0x0b, 0xd8, 0xb4, 0x11, 0x35, 0x97, 0xea, 0x1e, 0xd4, 0xe9, 0xde, 0xd8, 0xac, 0xbe, 0x55, 0xbc, - 0x2d, 0x9a, 0x7d, 0x22, 0x14, 0x47, 0xaf, 0x3c, 0xc3, 0x65, 0x87, 0xe5, 0x4b, 0x56, 0x3e, 0xb1, - 0x85, 0x1b, 0xf6, 0x1f, 0x07, 0xde, 0x59, 0xa2, 0xb1, 0x5c, 0x5d, 0x1a, 0x59, 0x75, 0xd9, 0x87, - 0x2b, 0xb1, 0x94, 0x6a, 0x20, 0xe2, 0xf3, 0xd0, 0x17, 0x1f, 0xf3, 0x89, 0x4d, 0xa9, 0x32, 0x8c, - 0x27, 0x82, 0x10, 0xa9, 0x27, 0x9e, 0x2e, 0x36, 0x45, 0x90, 0x7d, 0x17, 0xb6, 0x29, 0x0d, 0x4e, - 0xc3, 0x89, 0xf8, 0x59, 0x14, 0x5e, 0x7c, 0xcc, 0x23, 0x49, 0xa7, 0x5f, 0xf5, 0x16, 0x27, 0x30, - 0x92, 0x41, 0x76, 0x0d, 0xf5, 0x95, 0xca, 0x21, 0xec, 0x3b, 0xb0, 0x96, 0x98, 0x7b, 0x52, 0xa7, - 0x08, 0x6c, 0x65, 0x11, 0xd0, 0xb8, 0x67, 0x09, 0xee, 0x6f, 0x1c, 0x58, 0x33, 0x20, 0xfb, 0x26, - 0xd4, 0x10, 0xb6, 0xe1, 0x6e, 0x15, 0x56, 0x79, 0x7a, 0x0e, 0x83, 0x32, 0xe1, 0xca, 0x7f, 0x25, - 0x02, 0x53, 0x26, 0xac, 0xc8, 0x1e, 0x00, 0x70, 0xa5, 0xe2, 0x70, 0x38, 0x53, 0x02, 0xab, 0x03, - 0xea, 0xb8, 0x99, 0xea, 0x30, 0xb5, 0xff, 0xfc, 0x6e, 0xf7, 0xa9, 0x98, 0xbf, 0xc0, 0x8b, 0xe7, - 0xe5, 0xe8, 0xee, 0xdf, 0x1c, 0xa8, 0xa2, 0x19, 0xb6, 0x03, 0x75, 0x34, 0x94, 0xc6, 0xdc, 0x48, - 0x98, 0x52, 0x51, 0x16, 0x67, 0x1a, 0x2f, 0x0f, 0x5b, 0xe5, 0xb2, 0xb0, 0xdd, 0x81, 0x96, 0x0d, - 0x12, 0xca, 0x89, 0x09, 0x70, 0x11, 0x2c, 0xed, 0xa2, 0xf6, 0xd5, 0x76, 0xf1, 0x6f, 0xc7, 0xd6, - 0x65, 0x93, 0x64, 0x98, 0x29, 0x61, 0x94, 0x4c, 0x85, 0xaf, 0x44, 0x70, 0x6a, 0x93, 0x99, 0x6a, - 0x57, 0x09, 0x66, 0xef, 0xc2, 0x66, 0x0a, 0xf5, 0xe7, 0x68, 0x7c, 0x95, 0xfc, 0x2b, 0xa1, 0x6c, - 0x0f, 0x9a, 0x74, 0x53, 0xa9, 0x50, 0xd9, 0x2a, 0x9c, 0x87, 0x70, 0xa3, 0xbe, 0x9c, 0x4c, 0xc7, - 0x42, 0x89, 0xe0, 0x23, 0x39, 0x4c, 0x6c, 0x1d, 0x29, 0x80, 0x58, 0x8b, 0x68, 0x11, 0x31, 0x74, - 0x12, 0x65, 0x00, 0xfa, 0x9d, 0xa9, 0xd4, 0xee, 0xd4, 0xc9, 0x9d, 0x32, 0xec, 0x7e, 0x1b, 0xb6, - 0xf5, 0x96, 0xb1, 0xf2, 0xda, 0xc2, 0x89, 0x05, 0xdf, 0x97, 0x53, 0x61, 0x0e, 0x51, 0x0b, 0xee, - 0xa1, 0x2d, 0xb2, 0x9a, 0x6a, 0xae, 0x79, 0x07, 0xd6, 0x15, 0x1f, 0xe1, 0x3d, 0xd0, 0x99, 0xd7, - 0xf0, 0x52, 0xd9, 0xfd, 0x08, 0xae, 0x66, 0x2b, 0x5e, 0xf4, 0xd2, 0x35, 0x3d, 0xa8, 0x93, 0x4a, - 0x9b, 0xab, 0x9d, 0xd2, 0x1d, 0xd7, 0xf4, 0x01, 0x52, 0x3c, 0xc3, 0x74, 0x1f, 0xe4, 0x1d, 0x35, - 0x93, 0x69, 0x5a, 0x39, 0xb9, 0xb4, 0x62, 0x50, 0x55, 0xf8, 0x96, 0xae, 0x92, 0x33, 0x34, 0x76, - 0x1f, 0xc3, 0x4e, 0xba, 0x98, 0xce, 0x3d, 0xc9, 0xf7, 0x20, 0xda, 0xdd, 0xb4, 0x4a, 0x68, 0x11, - 0x83, 0x40, 0x6d, 0x83, 0x7d, 0x6e, 0x48, 0x70, 0x3f, 0x80, 0xeb, 0x0b, 0x9a, 0xcc, 0xae, 0xf0, - 0x48, 0x2c, 0x68, 0x42, 0x91, 0x01, 0xee, 0x3d, 0x58, 0xb7, 0x4b, 0xc8, 0xc5, 0x79, 0x1a, 0x5e, - 0x1a, 0x2f, 0x7f, 0xdd, 0xdc, 0x67, 0x70, 0xa3, 0x64, 0x2e, 0x17, 0xc6, 0x83, 0xb2, 0xc1, 0x66, - 0x6f, 0x3b, 0x2b, 0xb2, 0x66, 0x26, 0xef, 0x43, 0x1f, 0x6a, 0x94, 0xae, 0xec, 0x3e, 0xac, 0x0d, - 0xe9, 0xde, 0xdb, 0x75, 0xb7, 0xd3, 0x75, 0xba, 0xf9, 0x3b, 0xbf, 0xdb, 0xf5, 0x44, 0x22, 0x67, - 0xb1, 0x2f, 0xe8, 0x95, 0xf6, 0x2c, 0xdf, 0xdd, 0x84, 0x8d, 0xe7, 0xb3, 0x24, 0x2d, 0xf3, 0xee, - 0x9f, 0x1d, 0xd8, 0x42, 0x80, 0xd2, 0xc9, 0x46, 0xf5, 0xbd, 0xb4, 0xf6, 0xe3, 0x29, 0x6c, 0xf4, - 0xaf, 0x61, 0xbf, 0xf2, 0xcf, 0x2f, 0x6f, 0xb7, 0x9e, 0xc7, 0x82, 0x8f, 0xc7, 0xd2, 0xd7, 0x6c, - 0x5b, 0xf4, 0xbf, 0x05, 0x95, 0x30, 0xd0, 0x45, 0xe7, 0x52, 0x2e, 0x32, 0xd8, 0x11, 0x80, 0x7e, - 0xa8, 0x8f, 0xb9, 0xe2, 0xed, 0xea, 0xdb, 0xf8, 0x39, 0xa2, 0x7b, 0xa2, 0x5d, 0xd4, 0x3b, 0x31, - 0x2e, 0xfe, 0x1f, 0x21, 0xb8, 0x03, 0x60, 0x7a, 0x3a, 0xbc, 0xd1, 0x3b, 0x85, 0x77, 0x6e, 0xc3, - 0x6e, 0xca, 0xfd, 0x01, 0x34, 0x9e, 0x85, 0xd1, 0xd9, 0x60, 0x1c, 0xfa, 0x82, 0xdd, 0x85, 0xda, - 0x38, 0x8c, 0xce, 0xac, 0xad, 0x9b, 0x8b, 0xb6, 0xd0, 0x46, 0x17, 0x17, 0x78, 0x9a, 0xe9, 0x7e, - 0x0a, 0x0c, 0x31, 0xfb, 0xde, 0x65, 0x57, 0x53, 0x67, 0xa5, 0x93, 0xcb, 0x4a, 0xcc, 0xe2, 0x51, - 0x2c, 0x67, 0xd3, 0xbe, 0xcd, 0x56, 0x2b, 0x22, 0x7f, 0x4c, 0x1d, 0x9d, 0x2e, 0xac, 0x5a, 0x70, - 0x7f, 0xed, 0xc0, 0x3b, 0x05, 0xe5, 0x59, 0x0a, 0x8b, 0x44, 0x85, 0x13, 0xae, 0x44, 0x40, 0x16, - 0xd6, 0xbd, 0x0c, 0xa0, 0xfe, 0x67, 0xca, 0xa3, 0x1f, 0xcb, 0x59, 0xa4, 0x4c, 0x79, 0xcb, 0x00, - 0xd6, 0xcd, 0x5e, 0x6e, 0xfd, 0x7a, 0x5c, 0x2d, 0xbc, 0x40, 0x0b, 0xef, 0xf6, 0xf7, 0x61, 0xc3, - 0xe3, 0xbf, 0x78, 0x1c, 0x26, 0x4a, 0x8e, 0x62, 0x3e, 0xc1, 0x38, 0x0e, 0x67, 0xfe, 0x99, 0x50, - 0x64, 0xb8, 0xea, 0x19, 0x09, 0x77, 0xe0, 0xe7, 0x2c, 0x6a, 0xc1, 0xfd, 0xa3, 0x03, 0xcd, 0x9c, - 0x5a, 0xd6, 0x87, 0xed, 0x31, 0x57, 0x22, 0xf2, 0xe7, 0x3f, 0x7f, 0x65, 0x55, 0x9a, 0x60, 0x5f, - 0x4b, 0xfd, 0xc8, 0xdb, 0xf3, 0xb6, 0x0c, 0x3f, 0xf3, 0xa0, 0x0b, 0xf5, 0x44, 0x71, 0x15, 0xfa, - 0x0b, 0xad, 0x07, 0x1d, 0xf7, 0x27, 0xcf, 0x06, 0x34, 0xeb, 0x19, 0x16, 0x7a, 0x2c, 0xe2, 0x58, - 0xc6, 0x89, 0x09, 0xae, 0x91, 0xdc, 0xa7, 0x70, 0x23, 0xe7, 0xda, 0x60, 0x36, 0x99, 0xf0, 0x78, - 0xfe, 0x35, 0x0f, 0xd0, 0xfd, 0xab, 0x03, 0x9d, 0x65, 0xda, 0xb2, 0x13, 0x2b, 0x9e, 0x49, 0x25, - 0x7f, 0x26, 0xef, 0xc2, 0x26, 0xf9, 0x34, 0x48, 0x29, 0x15, 0xa2, 0x94, 0x50, 0x6c, 0xb6, 0xa7, - 0xf7, 0xef, 0x9b, 0x27, 0x15, 0x87, 0x1a, 0x39, 0xa2, 0x97, 0x85, 0x90, 0x23, 0x8d, 0x1c, 0x9a, - 0x77, 0x04, 0x87, 0x84, 0x1c, 0x1d, 0x52, 0x03, 0x8a, 0xc8, 0xd1, 0xa1, 0xfb, 0x07, 0x07, 0x5a, - 0x85, 0x58, 0x15, 0x4a, 0x5d, 0xcd, 0x94, 0xba, 0x0d, 0x70, 0x22, 0xe3, 0xab, 0x13, 0xa1, 0xf4, - 0x92, 0xdc, 0x72, 0x3c, 0xe7, 0x25, 0x4a, 0xfa, 0xc5, 0x6b, 0x78, 0x4e, 0x82, 0xd2, 0x90, 0x7c, - 0x58, 0xf7, 0x9c, 0x21, 0x4a, 0x81, 0xb1, 0xef, 0x04, 0xd4, 0x6a, 0x28, 0xae, 0x66, 0xba, 0x03, - 0xae, 0x79, 0x46, 0x42, 0x8b, 0x67, 0x61, 0x14, 0x50, 0xcf, 0x5b, 0xf3, 0x68, 0xdc, 0xfb, 0x9d, - 0x03, 0x75, 0xac, 0x00, 0x22, 0x66, 0x3f, 0x84, 0x46, 0x5a, 0xae, 0x58, 0xf6, 0x05, 0x57, 0x2e, - 0x61, 0x9d, 0x6b, 0x85, 0xa9, 0xb4, 0xdc, 0xad, 0xb0, 0x1f, 0x41, 0x33, 0x25, 0xbf, 0xe8, 0x7d, - 0x1d, 0x15, 0xbd, 0x3f, 0x39, 0xb0, 0x65, 0xce, 0xf3, 0x91, 0x88, 0x44, 0xcc, 0x95, 0x4c, 0x1d, - 0xa3, 0x5a, 0x53, 0xd2, 0x9a, 0x2f, 0x5c, 0x97, 0x3b, 0xf6, 0x04, 0xe0, 0x91, 0x50, 0xf6, 0x42, - 0xdc, 0x5c, 0x7a, 0xfb, 0x8c, 0x8e, 0x5b, 0xcb, 0x27, 0x53, 0x07, 0xff, 0x52, 0x85, 0x35, 0xfc, - 0x7a, 0x0b, 0x45, 0xcc, 0x1e, 0x43, 0xeb, 0x27, 0x61, 0x14, 0xa4, 0x5f, 0xb1, 0x6c, 0xc9, 0x67, - 0xaf, 0xd5, 0xdb, 0x59, 0x36, 0x95, 0x8b, 0xdc, 0x86, 0xfd, 0x46, 0xf0, 0x45, 0xa4, 0xd8, 0x25, - 0x1f, 0x63, 0x9d, 0xeb, 0x0b, 0x78, 0xaa, 0xe2, 0x21, 0x34, 0x73, 0x1f, 0x7a, 0xf9, 0x4d, 0x2e, - 0x7c, 0xfe, 0xbd, 0x4d, 0xcd, 0x23, 0x80, 0xac, 0x99, 0x60, 0xcb, 0xda, 0x0f, 0xab, 0xe4, 0xe6, - 0xd2, 0xb9, 0x54, 0xd1, 0x53, 0xbb, 0x25, 0xdd, 0x95, 0xbc, 0x55, 0xd5, 0x37, 0x96, 0x76, 0x39, - 0x39, 0x65, 0x2f, 0xe0, 0x4a, 0xe9, 0xb1, 0x67, 0xb7, 0x17, 0xd7, 0x14, 0xfa, 0x97, 0xce, 0xde, - 0xe5, 0x84, 0x54, 0xef, 0xa7, 0xb9, 0xd6, 0xc9, 0x36, 0x11, 0xff, 0x5b, 0xb3, 0x7b, 0x19, 0x21, - 0xef, 0x73, 0xef, 0xa7, 0xb0, 0x35, 0x50, 0xb1, 0xe0, 0x93, 0x30, 0x1a, 0xd9, 0x8c, 0x79, 0x00, - 0x75, 0xf3, 0xb5, 0xfb, 0x55, 0x4f, 0xf8, 0xd0, 0xe9, 0xb7, 0x3f, 0x7f, 0xbd, 0xeb, 0x7c, 0xf1, - 0x7a, 0xd7, 0xf9, 0xd7, 0xeb, 0x5d, 0xe7, 0xf7, 0x6f, 0x76, 0x57, 0xbe, 0x78, 0xb3, 0xbb, 0xf2, - 0x8f, 0x37, 0xbb, 0x2b, 0xc3, 0x3a, 0xfd, 0x95, 0xf4, 0xfe, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, - 0x06, 0xd0, 0x50, 0x6a, 0xcb, 0x12, 0x00, 0x00, + 0x5a, 0x0c, 0xb5, 0x2b, 0x3b, 0xda, 0xb8, 0x96, 0x6c, 0x28, 0x28, 0x84, 0x43, 0x92, 0x4d, 0xbc, + 0x64, 0x47, 0x26, 0x87, 0xbd, 0x50, 0xad, 0x99, 0x8e, 0x32, 0x65, 0x69, 0x5a, 0x3b, 0xd3, 0x32, + 0x11, 0x27, 0x38, 0xc0, 0x89, 0x03, 0x17, 0x0e, 0x5c, 0xa8, 0xe2, 0xb4, 0x55, 0x7c, 0x0d, 0x2e, + 0x7b, 0xa2, 0xf6, 0x48, 0x71, 0xd8, 0xa2, 0x92, 0x4f, 0xc0, 0x37, 0xa0, 0xde, 0xeb, 0xee, 0xf9, + 0x27, 0x39, 0xd4, 0x66, 0x4f, 0xea, 0xf7, 0xeb, 0x5f, 0xbf, 0x7e, 0xfd, 0xde, 0xeb, 0xf7, 0x7a, + 0x04, 0x57, 0xa7, 0x67, 0xa3, 0x03, 0x25, 0x26, 0x53, 0x39, 0x1d, 0xea, 0xdf, 0xee, 0x34, 0x96, + 0x4a, 0xb2, 0x35, 0x03, 0x76, 0x2e, 0xab, 0x98, 0xfb, 0xe2, 0xe0, 0xfc, 0xf6, 0x01, 0x0d, 0xf4, + 0x74, 0x67, 0xc7, 0x97, 0x93, 0x89, 0x8c, 0x10, 0xd6, 0x23, 0x83, 0xbf, 0x3f, 0x0a, 0xd5, 0x8b, + 0xd9, 0xb0, 0xeb, 0xcb, 0xc9, 0xc1, 0x48, 0x8e, 0xe4, 0x01, 0xc1, 0xc3, 0xd9, 0x73, 0x92, 0x48, + 0xa0, 0x91, 0xa6, 0xbb, 0x7f, 0x70, 0x60, 0xeb, 0x14, 0xd5, 0xf6, 0xe7, 0x8f, 0x8e, 0x3d, 0xf1, + 0xf9, 0x4c, 0x24, 0x8a, 0xb5, 0x61, 0x8d, 0xb6, 0x7a, 0x74, 0xdc, 0x76, 0xf6, 0x9c, 0xfd, 0x0d, + 0xcf, 0x8a, 0x6c, 0x17, 0x60, 0x38, 0x96, 0xfe, 0xd9, 0x40, 0xf1, 0x58, 0xb5, 0x57, 0xf7, 0x9c, + 0xfd, 0x86, 0x97, 0x43, 0x58, 0x07, 0xd6, 0x49, 0xba, 0x1f, 0x05, 0xed, 0x0a, 0xcd, 0xa6, 0x32, + 0xbb, 0x01, 0x8d, 0xcf, 0x67, 0x22, 0x9e, 0x9f, 0xc8, 0x40, 0xb4, 0x6b, 0x34, 0x99, 0x01, 0x6e, + 0x04, 0xdb, 0x39, 0x3b, 0x92, 0xa9, 0x8c, 0x12, 0xc1, 0x6e, 0x41, 0x8d, 0x76, 0x26, 0x33, 0x9a, + 0xbd, 0xcd, 0xae, 0xf1, 0x49, 0x97, 0xa8, 0x9e, 0x9e, 0x64, 0x1f, 0xc0, 0xda, 0x44, 0xa8, 0x38, + 0xf4, 0x13, 0xb2, 0xa8, 0xd9, 0xbb, 0x56, 0xe4, 0xa1, 0xca, 0x13, 0x4d, 0xf0, 0x2c, 0xd3, 0x65, + 0xb9, 0x73, 0x9b, 0x49, 0xf7, 0x9f, 0xab, 0xd0, 0x1a, 0x08, 0x1e, 0xfb, 0x2f, 0xac, 0x27, 0x3e, + 0x82, 0xea, 0x29, 0x1f, 0x25, 0x6d, 0x67, 0xaf, 0xb2, 0xdf, 0xec, 0xed, 0xa5, 0x7a, 0x0b, 0xac, + 0x2e, 0x52, 0xee, 0x47, 0x2a, 0x9e, 0xf7, 0xab, 0x5f, 0x7e, 0x7d, 0x73, 0xc5, 0xa3, 0x35, 0xec, + 0x16, 0xb4, 0x4e, 0xc2, 0xe8, 0x78, 0x16, 0x73, 0x15, 0xca, 0xe8, 0x44, 0x1b, 0xd7, 0xf2, 0x8a, + 0x20, 0xb1, 0xf8, 0xcb, 0x1c, 0xab, 0x62, 0x58, 0x79, 0x90, 0x5d, 0x86, 0xda, 0x93, 0x70, 0x12, + 0xaa, 0x76, 0x95, 0x66, 0xb5, 0x80, 0x68, 0x42, 0x81, 0xa8, 0x69, 0x94, 0x04, 0xb6, 0x05, 0x15, + 0x11, 0x05, 0xed, 0x3a, 0x61, 0x38, 0x44, 0xde, 0xa7, 0xe8, 0xe8, 0xf6, 0x3a, 0x79, 0x5d, 0x0b, + 0x6c, 0x1f, 0x2e, 0x0d, 0xa6, 0x3c, 0x4a, 0x9e, 0x8a, 0x18, 0x7f, 0x07, 0x42, 0xb5, 0x1b, 0xb4, + 0xa6, 0x0c, 0x77, 0x3e, 0x84, 0x46, 0x7a, 0x44, 0x54, 0x7f, 0x26, 0xe6, 0x14, 0x91, 0x86, 0x87, + 0x43, 0x54, 0x7f, 0xce, 0xc7, 0x33, 0x61, 0xf2, 0x41, 0x0b, 0x1f, 0xad, 0xfe, 0xd0, 0x71, 0x7f, + 0x5b, 0x01, 0xa6, 0x5d, 0xd5, 0xc7, 0x2c, 0xb0, 0x5e, 0xbd, 0x03, 0x8d, 0xc4, 0x3a, 0xd0, 0x84, + 0x76, 0x67, 0xb9, 0x6b, 0xbd, 0x8c, 0x88, 0x59, 0x49, 0xb9, 0xf4, 0xe8, 0xd8, 0x6c, 0x64, 0x45, + 0xcc, 0x2c, 0x3a, 0xfa, 0x53, 0x3e, 0x12, 0xc6, 0x7f, 0x19, 0x80, 0x1e, 0x9e, 0xf2, 0x91, 0x48, + 0x4e, 0xa5, 0x56, 0x6d, 0x7c, 0x58, 0x04, 0x31, 0x73, 0x45, 0xe4, 0xcb, 0x20, 0x8c, 0x46, 0x26, + 0x39, 0x53, 0x19, 0x35, 0x84, 0x51, 0x20, 0x5e, 0xa2, 0xba, 0x41, 0xf8, 0x1b, 0x61, 0x7c, 0x5b, + 0x04, 0x99, 0x0b, 0x1b, 0x4a, 0x2a, 0x3e, 0xf6, 0x84, 0x2f, 0xe3, 0x20, 0x69, 0xaf, 0x11, 0xa9, + 0x80, 0x21, 0x27, 0xe0, 0x8a, 0xdf, 0xb7, 0x3b, 0xe9, 0x80, 0x14, 0x30, 0x3c, 0xe7, 0xb9, 0x88, + 0x93, 0x50, 0x46, 0x14, 0x8f, 0x86, 0x67, 0x45, 0xc6, 0xa0, 0x9a, 0xe0, 0xf6, 0xb0, 0xe7, 0xec, + 0x57, 0x3d, 0x1a, 0xe3, 0x8d, 0x7c, 0x2e, 0xa5, 0x12, 0x31, 0x19, 0xd6, 0xa4, 0x3d, 0x73, 0x88, + 0xfb, 0x12, 0x36, 0xad, 0x47, 0xcd, 0xa5, 0xba, 0x03, 0x75, 0xba, 0x37, 0x36, 0xab, 0x6f, 0x14, + 0x6f, 0x8b, 0x66, 0x9f, 0x08, 0xc5, 0xd1, 0x2a, 0xcf, 0x70, 0xd9, 0x61, 0xf9, 0x92, 0x95, 0x23, + 0xb6, 0x70, 0xc3, 0xbe, 0x58, 0x85, 0x77, 0x96, 0x68, 0x2c, 0x57, 0x97, 0x46, 0x56, 0x5d, 0xf6, + 0xe1, 0x52, 0x2c, 0xa5, 0x1a, 0x88, 0xf8, 0x3c, 0xf4, 0xc5, 0x27, 0x7c, 0x62, 0x53, 0xaa, 0x0c, + 0x63, 0x44, 0x10, 0x22, 0xf5, 0xc4, 0xd3, 0xc5, 0xa6, 0x08, 0xb2, 0xf7, 0x60, 0x9b, 0xd2, 0xe0, + 0x34, 0x9c, 0x88, 0x5f, 0x46, 0xe1, 0xcb, 0x4f, 0x78, 0x24, 0x29, 0xfa, 0x55, 0x6f, 0x71, 0x02, + 0x3d, 0x19, 0x64, 0xd7, 0x50, 0x5f, 0xa9, 0x1c, 0xc2, 0x7e, 0x00, 0x6b, 0x89, 0xb9, 0x27, 0x75, + 0xf2, 0xc0, 0x56, 0xe6, 0x01, 0x8d, 0x7b, 0x96, 0xc0, 0xde, 0x83, 0x75, 0x33, 0xc4, 0x3c, 0xa8, + 0x2c, 0x25, 0xa7, 0x0c, 0xf7, 0xf7, 0x0e, 0xac, 0x19, 0x94, 0x7d, 0x17, 0x6a, 0x88, 0xdb, 0xe0, + 0xb4, 0x0a, 0xcb, 0x3c, 0x3d, 0x87, 0x2e, 0x9c, 0x70, 0xe5, 0xbf, 0x10, 0x81, 0x29, 0x2a, 0x56, + 0x64, 0xf7, 0x00, 0xb8, 0x52, 0x71, 0x38, 0x9c, 0x29, 0x81, 0xb5, 0x04, 0x75, 0x5c, 0x4f, 0x75, + 0x98, 0x4e, 0x71, 0x7e, 0xbb, 0xfb, 0x58, 0xcc, 0x9f, 0xe1, 0x35, 0xf5, 0x72, 0x74, 0xf7, 0x1f, + 0x0e, 0x54, 0x71, 0x1b, 0xb6, 0x03, 0x75, 0xdc, 0x28, 0x8d, 0x90, 0x91, 0x30, 0x01, 0xa3, 0x2c, + 0x2a, 0x34, 0x5e, 0xee, 0xe4, 0xca, 0x45, 0x4e, 0xbe, 0x05, 0x2d, 0xeb, 0x52, 0x94, 0x13, 0x13, + 0x8e, 0x22, 0x58, 0x3a, 0x45, 0xed, 0x9b, 0x9d, 0xe2, 0xbf, 0x8e, 0xad, 0xe2, 0x26, 0x25, 0x31, + 0xaf, 0xc2, 0x28, 0x99, 0x0a, 0x5f, 0x89, 0xe0, 0xd4, 0xa6, 0x3e, 0x55, 0xba, 0x12, 0xcc, 0xde, + 0x85, 0xcd, 0x14, 0xea, 0xcf, 0x71, 0xf3, 0x55, 0xb2, 0xaf, 0x84, 0xb2, 0x3d, 0x68, 0xd2, 0xbd, + 0xa6, 0xb2, 0x66, 0x6b, 0x76, 0x1e, 0xc2, 0x83, 0xfa, 0x72, 0x32, 0x1d, 0x0b, 0x25, 0x82, 0x8f, + 0xe5, 0x30, 0xb1, 0x55, 0xa7, 0x00, 0x62, 0xe5, 0xa2, 0x45, 0xc4, 0xd0, 0x29, 0x97, 0x01, 0x68, + 0x77, 0xa6, 0x52, 0x9b, 0x53, 0x27, 0x73, 0xca, 0xb0, 0xfb, 0x7d, 0xd8, 0xd6, 0x47, 0xc6, 0x3a, + 0x6d, 0xcb, 0x2c, 0xb6, 0x07, 0x5f, 0x4e, 0x85, 0x09, 0xa2, 0x16, 0xdc, 0x43, 0x5b, 0x92, 0x35, + 0xd5, 0x14, 0x85, 0x0e, 0xac, 0x2b, 0x3e, 0xc2, 0x5b, 0xa3, 0x33, 0xaf, 0xe1, 0xa5, 0xb2, 0xfb, + 0x31, 0x5c, 0xce, 0x56, 0x3c, 0xeb, 0xa5, 0x6b, 0x7a, 0x50, 0x27, 0x95, 0x36, 0x57, 0x3b, 0xa5, + 0x8a, 0xa0, 0xe9, 0x03, 0xa4, 0x78, 0x86, 0xe9, 0xde, 0xcb, 0x1b, 0x6a, 0x26, 0xd3, 0xb4, 0x72, + 0x72, 0x69, 0xc5, 0xa0, 0xaa, 0xb0, 0xf3, 0xae, 0x92, 0x31, 0x34, 0x76, 0x1f, 0xc2, 0x4e, 0xba, + 0x98, 0xe2, 0x9e, 0xe4, 0x5f, 0x2c, 0xda, 0xdc, 0xb4, 0xa6, 0x68, 0x11, 0x9d, 0x40, 0x8f, 0x0c, + 0xdb, 0x9c, 0x48, 0x70, 0x3f, 0x84, 0xab, 0x0b, 0x9a, 0xcc, 0xa9, 0x30, 0x24, 0x16, 0x34, 0xae, + 0xc8, 0x00, 0xf7, 0x0e, 0xac, 0xdb, 0x25, 0x64, 0xe2, 0x3c, 0x75, 0x2f, 0x8d, 0x97, 0xf7, 0x42, + 0xf7, 0x09, 0x5c, 0x2b, 0x6d, 0x97, 0x73, 0xe3, 0x41, 0x79, 0xc3, 0x66, 0x6f, 0x3b, 0x2b, 0xc9, + 0x66, 0x26, 0x6f, 0x43, 0x1f, 0x6a, 0x94, 0xae, 0xec, 0x2e, 0xac, 0x0d, 0xe9, 0xde, 0xdb, 0x75, + 0x37, 0xd3, 0x75, 0xfa, 0xa9, 0x78, 0x7e, 0xbb, 0xeb, 0x89, 0x44, 0xce, 0x62, 0x5f, 0x50, 0x4f, + 0xf7, 0x2c, 0xdf, 0xdd, 0x84, 0x8d, 0xa7, 0xb3, 0x24, 0x6d, 0x0a, 0xee, 0xdf, 0x1c, 0xd8, 0x42, + 0x80, 0xd2, 0xc9, 0x7a, 0xf5, 0xfd, 0xb4, 0x53, 0x60, 0x14, 0x36, 0xfa, 0x57, 0xf0, 0x75, 0xf3, + 0xef, 0xaf, 0x6f, 0xb6, 0x9e, 0xc6, 0x82, 0x8f, 0xc7, 0xd2, 0xd7, 0x6c, 0xdb, 0x22, 0xbe, 0x07, + 0x95, 0x30, 0xd0, 0x45, 0xe7, 0x42, 0x2e, 0x32, 0xd8, 0x11, 0x80, 0x6e, 0xeb, 0xc7, 0x5c, 0xf1, + 0x76, 0xf5, 0x4d, 0xfc, 0x1c, 0xd1, 0x3d, 0xd1, 0x26, 0xea, 0x93, 0x18, 0x13, 0xbf, 0x85, 0x0b, + 0x6e, 0x01, 0x98, 0x17, 0x20, 0xde, 0xe8, 0x9d, 0x42, 0x57, 0xdc, 0xb0, 0x87, 0x72, 0x7f, 0x0c, + 0x8d, 0x27, 0x61, 0x74, 0x36, 0x18, 0x87, 0xbe, 0x60, 0xb7, 0xa1, 0x36, 0x0e, 0xa3, 0x33, 0xbb, + 0xd7, 0xf5, 0xc5, 0xbd, 0x70, 0x8f, 0x2e, 0x2e, 0xf0, 0x34, 0xd3, 0xfd, 0x0c, 0x18, 0x62, 0xb6, + 0x3b, 0x66, 0x57, 0x53, 0x67, 0xa5, 0x93, 0xcb, 0x4a, 0xcc, 0xe2, 0x51, 0x2c, 0x67, 0xd3, 0xbe, + 0xcd, 0x56, 0x2b, 0x22, 0x7f, 0x4c, 0xef, 0x3f, 0x5d, 0x58, 0xb5, 0xe0, 0xfe, 0xce, 0x81, 0x77, + 0x0a, 0xca, 0xb3, 0x14, 0x16, 0x89, 0x0a, 0x27, 0x5c, 0x89, 0x80, 0x76, 0x58, 0xf7, 0x32, 0x80, + 0x5e, 0x4b, 0x53, 0x1e, 0xfd, 0x4c, 0xce, 0x22, 0x65, 0xca, 0x5b, 0x06, 0xb0, 0x6e, 0xd6, 0xe7, + 0x75, 0xf7, 0xb8, 0x5c, 0xe8, 0x40, 0x0b, 0x5d, 0xfe, 0x47, 0xb0, 0xe1, 0xf1, 0x5f, 0x3f, 0x0c, + 0x13, 0x25, 0x47, 0x31, 0x9f, 0xa0, 0x1f, 0x87, 0x33, 0xff, 0x4c, 0x28, 0xda, 0xb8, 0xea, 0x19, + 0x09, 0x4f, 0xe0, 0xe7, 0x76, 0xd4, 0x82, 0xfb, 0x17, 0x07, 0x9a, 0x39, 0xb5, 0xac, 0x0f, 0xdb, + 0x63, 0xae, 0x44, 0xe4, 0xcf, 0x7f, 0xf5, 0xc2, 0xaa, 0x34, 0xce, 0xbe, 0x92, 0xda, 0x91, 0xdf, + 0xcf, 0xdb, 0x32, 0xfc, 0xcc, 0x82, 0x2e, 0xd4, 0x13, 0xc5, 0x55, 0xe8, 0x2f, 0x3c, 0x54, 0x28, + 0xdc, 0x9f, 0x3e, 0x19, 0xd0, 0xac, 0x67, 0x58, 0x68, 0xb1, 0x88, 0x63, 0x19, 0x27, 0xc6, 0xb9, + 0x46, 0x72, 0x1f, 0xc3, 0xb5, 0x9c, 0x69, 0x83, 0xd9, 0x64, 0xc2, 0xe3, 0xf9, 0x5b, 0x06, 0xd0, + 0xfd, 0xbb, 0x03, 0x9d, 0x65, 0xda, 0xb2, 0x88, 0x15, 0x63, 0x52, 0xc9, 0xc7, 0xe4, 0x5d, 0xd8, + 0x24, 0x9b, 0x06, 0x29, 0xa5, 0x42, 0x94, 0x12, 0x8a, 0x4f, 0xf3, 0xe9, 0xdd, 0xbb, 0xa6, 0xa5, + 0xe2, 0x50, 0x23, 0x47, 0xd4, 0x59, 0x08, 0x39, 0xd2, 0xc8, 0xa1, 0xe9, 0x23, 0x38, 0x24, 0xe4, + 0xe8, 0x90, 0x9e, 0xab, 0x88, 0x1c, 0x1d, 0xba, 0x7f, 0x76, 0xa0, 0x55, 0xf0, 0x55, 0xa1, 0xd4, + 0xd5, 0x4c, 0xa9, 0xdb, 0x00, 0x27, 0x32, 0xb6, 0x3a, 0x11, 0x4a, 0xcf, 0xc9, 0x2c, 0xc7, 0x73, + 0x9e, 0xa3, 0xa4, 0x3b, 0x5e, 0xc3, 0x73, 0x12, 0x94, 0x86, 0x64, 0xc3, 0xba, 0xe7, 0x0c, 0x51, + 0x0a, 0xcc, 0xfe, 0x4e, 0x40, 0x4f, 0x0d, 0xc5, 0xd5, 0x4c, 0xbf, 0x97, 0x6b, 0x9e, 0x91, 0x70, + 0xc7, 0xb3, 0x30, 0x0a, 0xe8, 0x85, 0x5c, 0xf3, 0x68, 0xdc, 0xfb, 0xa3, 0x03, 0x75, 0xac, 0x00, + 0x22, 0x66, 0x3f, 0x81, 0x46, 0x5a, 0xae, 0x58, 0xf6, 0xbd, 0x57, 0x2e, 0x61, 0x9d, 0x2b, 0x85, + 0xa9, 0xb4, 0xdc, 0xad, 0xb0, 0x9f, 0x42, 0x33, 0x25, 0x3f, 0xeb, 0xbd, 0x8d, 0x8a, 0xde, 0x5f, + 0x1d, 0xd8, 0x32, 0xf1, 0x7c, 0x20, 0x22, 0x11, 0x73, 0x25, 0x53, 0xc3, 0xa8, 0xd6, 0x94, 0xb4, + 0xe6, 0x0b, 0xd7, 0xc5, 0x86, 0x3d, 0x02, 0x78, 0x20, 0x94, 0xbd, 0x10, 0xd7, 0x97, 0xde, 0x3e, + 0xa3, 0xe3, 0xc6, 0xf2, 0xc9, 0xd4, 0xc0, 0x2f, 0xaa, 0xb0, 0x86, 0xdf, 0x7a, 0xa1, 0x88, 0xd9, + 0x43, 0x68, 0xfd, 0x3c, 0x8c, 0x82, 0xf4, 0x9b, 0x97, 0x2d, 0xf9, 0x48, 0xb6, 0x7a, 0x3b, 0xcb, + 0xa6, 0x72, 0x9e, 0xdb, 0xb0, 0x5f, 0x14, 0xbe, 0x88, 0x14, 0xbb, 0xe0, 0xd3, 0xad, 0x73, 0x75, + 0x01, 0x4f, 0x55, 0xdc, 0x87, 0x66, 0xee, 0xb3, 0x30, 0x7f, 0xc8, 0x85, 0x8f, 0xc5, 0x37, 0xa9, + 0x79, 0x00, 0x90, 0x3d, 0x26, 0xd8, 0xb2, 0xe7, 0x87, 0x55, 0x72, 0x7d, 0xe9, 0x5c, 0xaa, 0xe8, + 0xb1, 0x3d, 0x92, 0x7e, 0x95, 0xbc, 0x51, 0xd5, 0x77, 0x96, 0xbe, 0x72, 0x72, 0xca, 0x9e, 0xc1, + 0xa5, 0x52, 0xb3, 0x67, 0x37, 0x17, 0xd7, 0x14, 0xde, 0x2f, 0x9d, 0xbd, 0x8b, 0x09, 0xa9, 0xde, + 0xcf, 0x72, 0x4f, 0x27, 0xfb, 0x88, 0xf8, 0xff, 0x9a, 0xdd, 0x8b, 0x08, 0x79, 0x9b, 0x7b, 0xbf, + 0x80, 0xad, 0x81, 0x8a, 0x05, 0x9f, 0x84, 0xd1, 0xc8, 0x66, 0xcc, 0x3d, 0xa8, 0x9b, 0x6f, 0xe3, + 0x6f, 0x1a, 0xe1, 0x43, 0xa7, 0xdf, 0xfe, 0xf2, 0xd5, 0xae, 0xf3, 0xd5, 0xab, 0x5d, 0xe7, 0x3f, + 0xaf, 0x76, 0x9d, 0x3f, 0xbd, 0xde, 0x5d, 0xf9, 0xea, 0xf5, 0xee, 0xca, 0xbf, 0x5e, 0xef, 0xae, + 0x0c, 0xeb, 0xf4, 0xc7, 0xd3, 0x07, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xff, 0xf0, 0x4f, 0x79, + 0xf9, 0x12, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -3006,6 +3015,20 @@ func (m *TraceSearchMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.SpanSets) > 0 { + for iNdEx := len(m.SpanSets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SpanSets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTempo(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } if m.SpanSet != nil { { size, err := m.SpanSet.MarshalToSizedBuffer(dAtA[:i]) @@ -4271,6 +4294,12 @@ func (m *TraceSearchMetadata) Size() (n int) { l = m.SpanSet.Size() n += 1 + l + sovTempo(uint64(l)) } + if len(m.SpanSets) > 0 { + for _, e := range m.SpanSets { + l = e.Size() + n += 1 + l + sovTempo(uint64(l)) + } + } return n } @@ -6060,6 +6089,40 @@ func (m *TraceSearchMetadata) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanSets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTempo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTempo + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTempo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpanSets = append(m.SpanSets, &SpanSet{}) + if err := m.SpanSets[len(m.SpanSets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTempo(dAtA[iNdEx:]) diff --git a/pkg/tempopb/tempo.proto b/pkg/tempopb/tempo.proto index 20d5511f8af..7edbb948e44 100644 --- a/pkg/tempopb/tempo.proto +++ b/pkg/tempopb/tempo.proto @@ -89,7 +89,8 @@ message TraceSearchMetadata { string rootTraceName = 3; uint64 startTimeUnixNano = 4; uint32 durationMs = 5; - SpanSet spanSet = 6; // only returned from TraceQL queries + SpanSet spanSet = 6; // deprecated. use SpanSets field below + repeated SpanSet spanSets = 7; } message SpanSet { diff --git a/pkg/traceql/engine.go b/pkg/traceql/engine.go index 0730d4875bd..f650a53fef6 100644 --- a/pkg/traceql/engine.go +++ b/pkg/traceql/engine.go @@ -288,6 +288,21 @@ func (e *Engine) createFetchSpansRequest(searchReq *tempopb.SearchRequest, pipel return req } +func addTraceSearchMetadata(existing []*tempopb.TraceSearchMetadata, new *tempopb.TraceSearchMetadata) []*tempopb.TraceSearchMetadata { + // search for an existing traceid first + for _, t := range existing { + if t.TraceID == new.TraceID { + // found a match, combine into th existing one + CombineSearchResults(t, new) + return existing + } + } + + // otherwise append a new one + return append(existing, new) +} + +// jpe - update this to use Spansets func (e *Engine) asTraceSearchMetadata(spanset *Spanset) *tempopb.TraceSearchMetadata { metadata := &tempopb.TraceSearchMetadata{ TraceID: util.TraceIDToHexString(spanset.TraceID), @@ -330,6 +345,10 @@ func (e *Engine) asTraceSearchMetadata(spanset *Spanset) *tempopb.TraceSearchMet metadata.SpanSet.Spans = append(metadata.SpanSet.Spans, tempopbSpan) } + // create a new slice and add the spanset to it. eventually we will deprecate + // metadata.SpanSet + metadata.SpanSets = []*tempopb.SpanSet{metadata.SpanSet} + // add attributes for _, att := range spanset.Attributes { if att.Name == attributeMatched { From 6d0db55ab6593945b48cb5061bc20ab60258402e Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Mon, 22 May 2023 13:57:28 -0400 Subject: [PATCH 14/18] tests Signed-off-by: Joe Elliott --- pkg/traceql/combine.go | 84 ++++++++++++++ pkg/traceql/combine_test.go | 218 +++++++++++++++++++++++++++++++++++ pkg/traceql/engine_test.go | 220 ++++++++++++++++++------------------ 3 files changed, 414 insertions(+), 108 deletions(-) create mode 100644 pkg/traceql/combine.go create mode 100644 pkg/traceql/combine_test.go diff --git a/pkg/traceql/combine.go b/pkg/traceql/combine.go new file mode 100644 index 00000000000..3de7aa2059a --- /dev/null +++ b/pkg/traceql/combine.go @@ -0,0 +1,84 @@ +package traceql + +import ( + "strings" + + "github.com/grafana/tempo/pkg/tempopb" +) + +// CombineSearchResults overlays the incoming search result with the existing result. This is required +// for the following reason: a trace may be present in multiple blocks, or in partial segments +// in live traces. The results should reflect elements of all segments. +func CombineSearchResults(existing *tempopb.TraceSearchMetadata, incoming *tempopb.TraceSearchMetadata) { + if existing.TraceID == "" { + existing.TraceID = incoming.TraceID + } + + if existing.RootServiceName == "" { + existing.RootServiceName = incoming.RootServiceName + } + + if existing.RootTraceName == "" { + existing.RootTraceName = incoming.RootTraceName + } + + // Earliest start time. + if existing.StartTimeUnixNano > incoming.StartTimeUnixNano || existing.StartTimeUnixNano == 0 { + existing.StartTimeUnixNano = incoming.StartTimeUnixNano + } + + // Longest duration + if existing.DurationMs < incoming.DurationMs || existing.DurationMs == 0 { + existing.DurationMs = incoming.DurationMs + } + + // make a map of existing Spansets + existingSS := make(map[string]*tempopb.SpanSet) + for _, ss := range existing.SpanSets { + existingSS[spansetID(ss)] = ss + } + + // add any new spansets + for _, ss := range incoming.SpanSets { + id := spansetID(ss) + // if not found just add directly + if _, ok := existingSS[id]; !ok { + existing.SpanSets = append(existing.SpanSets, ss) + continue + } + + // otherwise combine with existing + combineSpansets(existingSS[id], ss) + } + + // choose an arbitrary spanset to be the "main" one. this field is deprecated + if len(existing.SpanSets) > 0 { + existing.SpanSet = existing.SpanSets[0] + } +} + +// combineSpansets "combines" spansets. This isn't actually possible so it just +// choose the spanset that has the highest "Matched" number as it is hopefully +// more representative of the spanset +func combineSpansets(existing *tempopb.SpanSet, new *tempopb.SpanSet) { + if existing.Matched >= new.Matched { + return + } + + existing.Matched = new.Matched + existing.Attributes = new.Attributes + existing.Spans = new.Spans +} + +func spansetID(ss *tempopb.SpanSet) string { + id := "" + + for _, s := range ss.Attributes { + // any attributes that start with "by" are considered to be part of the spanset identity + if strings.HasPrefix(s.Key, "by") { + id += s.Key + s.Value.String() + } + } + + return id +} diff --git a/pkg/traceql/combine_test.go b/pkg/traceql/combine_test.go new file mode 100644 index 00000000000..4566b4cff50 --- /dev/null +++ b/pkg/traceql/combine_test.go @@ -0,0 +1,218 @@ +package traceql + +import ( + "testing" + + "github.com/grafana/tempo/pkg/tempopb" + v1 "github.com/grafana/tempo/pkg/tempopb/common/v1" + "github.com/stretchr/testify/require" +) + +func TestCombineResults(t *testing.T) { + tcs := []struct { + name string + existing *tempopb.TraceSearchMetadata + new *tempopb.TraceSearchMetadata + expected *tempopb.TraceSearchMetadata + }{ + { + name: "overwrite nothing", + existing: &tempopb.TraceSearchMetadata{ + SpanSet: &tempopb.SpanSet{}, + SpanSets: []*tempopb.SpanSet{}, + }, + new: &tempopb.TraceSearchMetadata{ + TraceID: "trace-1", + RootServiceName: "service-1", + RootTraceName: "root-trace-1", + StartTimeUnixNano: 123, + DurationMs: 100, + SpanSets: []*tempopb.SpanSet{}, + }, + expected: &tempopb.TraceSearchMetadata{ + TraceID: "trace-1", + RootServiceName: "service-1", + RootTraceName: "root-trace-1", + StartTimeUnixNano: 123, + DurationMs: 100, + SpanSets: []*tempopb.SpanSet{}, + }, + }, + { + name: "mixed copying in fields", + existing: &tempopb.TraceSearchMetadata{ + TraceID: "existing-trace", + RootServiceName: "existing-service", + RootTraceName: "existing-root-trace", + StartTimeUnixNano: 100, + DurationMs: 200, + SpanSets: []*tempopb.SpanSet{}, + }, + new: &tempopb.TraceSearchMetadata{ + TraceID: "new-trace", + RootServiceName: "new-service", + RootTraceName: "new-root-trace", + StartTimeUnixNano: 150, + DurationMs: 300, + SpanSets: []*tempopb.SpanSet{}, + }, + expected: &tempopb.TraceSearchMetadata{ + TraceID: "existing-trace", + RootServiceName: "existing-service", + RootTraceName: "existing-root-trace", + StartTimeUnixNano: 100, + DurationMs: 300, + SpanSets: []*tempopb.SpanSet{}, + }, + }, + { + name: "copy in spansets", + existing: &tempopb.TraceSearchMetadata{ + SpanSet: &tempopb.SpanSet{}, + SpanSets: []*tempopb.SpanSet{}, + }, + new: &tempopb.TraceSearchMetadata{ + SpanSets: []*tempopb.SpanSet{ + { + Matched: 3, + Spans: []*tempopb.Span{{SpanID: "span-1"}}, + Attributes: []*v1.KeyValue{{Key: "avg(test)", Value: &v1.AnyValue{Value: &v1.AnyValue_DoubleValue{DoubleValue: 1}}}}, + }, + }, + }, + expected: &tempopb.TraceSearchMetadata{ + SpanSets: []*tempopb.SpanSet{ + { + Matched: 3, + Spans: []*tempopb.Span{{SpanID: "span-1"}}, + Attributes: []*v1.KeyValue{{Key: "avg(test)", Value: &v1.AnyValue{Value: &v1.AnyValue_DoubleValue{DoubleValue: 1}}}}, + }, + }, + }, + }, + { + name: "take higher matches", + existing: &tempopb.TraceSearchMetadata{ + SpanSet: &tempopb.SpanSet{}, + SpanSets: []*tempopb.SpanSet{ + { + Matched: 3, + Spans: []*tempopb.Span{{SpanID: "span-1"}}, + Attributes: []*v1.KeyValue{{Key: "avg(test)", Value: &v1.AnyValue{Value: &v1.AnyValue_DoubleValue{DoubleValue: 1}}}}, + }, + }, + }, + new: &tempopb.TraceSearchMetadata{ + SpanSets: []*tempopb.SpanSet{ + { + Matched: 5, + Spans: []*tempopb.Span{{SpanID: "span-2"}}, + Attributes: []*v1.KeyValue{{Key: "avg(test)", Value: &v1.AnyValue{Value: &v1.AnyValue_DoubleValue{DoubleValue: 3}}}}, + }, + }, + }, + expected: &tempopb.TraceSearchMetadata{ + SpanSets: []*tempopb.SpanSet{ + { + Matched: 5, + Spans: []*tempopb.Span{{SpanID: "span-2"}}, + Attributes: []*v1.KeyValue{{Key: "avg(test)", Value: &v1.AnyValue{Value: &v1.AnyValue_DoubleValue{DoubleValue: 3}}}}, + }, + }, + }, + }, + { + name: "keep higher matches", + existing: &tempopb.TraceSearchMetadata{ + SpanSet: &tempopb.SpanSet{}, + SpanSets: []*tempopb.SpanSet{ + { + Matched: 7, + Spans: []*tempopb.Span{{SpanID: "span-1"}}, + Attributes: []*v1.KeyValue{{Key: "avg(test)", Value: &v1.AnyValue{Value: &v1.AnyValue_DoubleValue{DoubleValue: 1}}}}, + }, + }, + }, + new: &tempopb.TraceSearchMetadata{ + SpanSets: []*tempopb.SpanSet{ + { + Matched: 5, + Spans: []*tempopb.Span{{SpanID: "span-2"}}, + Attributes: []*v1.KeyValue{{Key: "avg(test)", Value: &v1.AnyValue{Value: &v1.AnyValue_DoubleValue{DoubleValue: 3}}}}, + }, + }, + }, + expected: &tempopb.TraceSearchMetadata{ + SpanSets: []*tempopb.SpanSet{ + { + Matched: 7, + Spans: []*tempopb.Span{{SpanID: "span-1"}}, + Attributes: []*v1.KeyValue{{Key: "avg(test)", Value: &v1.AnyValue{Value: &v1.AnyValue_DoubleValue{DoubleValue: 1}}}}, + }, + }, + }, + }, + { + name: "respect by()", + existing: &tempopb.TraceSearchMetadata{ + SpanSet: &tempopb.SpanSet{}, + SpanSets: []*tempopb.SpanSet{ + { + Matched: 7, + Spans: []*tempopb.Span{{SpanID: "span-1"}}, + Attributes: []*v1.KeyValue{{Key: "by(name)", Value: &v1.AnyValue{Value: &v1.AnyValue_StringValue{StringValue: "a"}}}}, + }, + { + Matched: 3, + Spans: []*tempopb.Span{{SpanID: "span-1"}}, + Attributes: []*v1.KeyValue{{Key: "by(duration)", Value: &v1.AnyValue{Value: &v1.AnyValue_DoubleValue{DoubleValue: 1.1}}}}, + }, + }, + }, + new: &tempopb.TraceSearchMetadata{ + SpanSets: []*tempopb.SpanSet{ + { + Matched: 5, + Spans: []*tempopb.Span{{SpanID: "span-2"}}, + Attributes: []*v1.KeyValue{{Key: "by(name)", Value: &v1.AnyValue{Value: &v1.AnyValue_StringValue{StringValue: "a"}}}}, + }, + }, + }, + expected: &tempopb.TraceSearchMetadata{ + SpanSets: []*tempopb.SpanSet{ + { + Matched: 7, + Spans: []*tempopb.Span{{SpanID: "span-1"}}, + Attributes: []*v1.KeyValue{{Key: "by(name)", Value: &v1.AnyValue{Value: &v1.AnyValue_StringValue{StringValue: "a"}}}}, + }, + { + Matched: 3, + Spans: []*tempopb.Span{{SpanID: "span-1"}}, + Attributes: []*v1.KeyValue{{Key: "by(duration)", Value: &v1.AnyValue{Value: &v1.AnyValue_DoubleValue{DoubleValue: 1.1}}}}, + }, + }, + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + CombineSearchResults(tc.existing, tc.new) + + // confirm that the SpanSet on tc.existing is contained in the slice of SpanSets + // then nil out. the actual spanset chosen is based on map iteration order + found := len(tc.existing.SpanSets) == 0 + for _, ss := range tc.existing.SpanSets { + if ss == tc.existing.SpanSet { + found = true + break + } + } + require.True(t, found) + tc.expected.SpanSet = nil + tc.existing.SpanSet = nil + + require.Equal(t, tc.expected, tc.existing) + }) + } +} diff --git a/pkg/traceql/engine_test.go b/pkg/traceql/engine_test.go index ab2968a2985..79dae674f1e 100644 --- a/pkg/traceql/engine_test.go +++ b/pkg/traceql/engine_test.go @@ -110,63 +110,65 @@ func TestEngine_Execute(t *testing.T) { spanSetFetcher.capturedRequest.SecondPass = nil // have to set this to nil b/c assert.Equal does not handle function pointers assert.Equal(t, expectedFetchSpansRequest, spanSetFetcher.capturedRequest) - expectedTraceSearchMetadata := []*tempopb.TraceSearchMetadata{ - { - TraceID: "1", - RootServiceName: "my-service", - RootTraceName: "HTTP GET", - SpanSet: &tempopb.SpanSet{ - Spans: []*tempopb.Span{ + expectedSpanset := &tempopb.SpanSet{ + Spans: []*tempopb.Span{ + { + SpanID: "0000000000000002", + StartTimeUnixNano: uint64(now.UnixNano()), + DurationNanos: 100_000_000, + Attributes: []*v1.KeyValue{ { - SpanID: "0000000000000002", - StartTimeUnixNano: uint64(now.UnixNano()), - DurationNanos: 100_000_000, - Attributes: []*v1.KeyValue{ - { - Key: "foo", - Value: &v1.AnyValue{ - Value: &v1.AnyValue_StringValue{ - StringValue: "value", - }, - }, + Key: "foo", + Value: &v1.AnyValue{ + Value: &v1.AnyValue_StringValue{ + StringValue: "value", }, - { - Key: "bar", - Value: &v1.AnyValue{ - Value: &v1.AnyValue_StringValue{ - StringValue: "value", - }, - }, + }, + }, + { + Key: "bar", + Value: &v1.AnyValue{ + Value: &v1.AnyValue_StringValue{ + StringValue: "value", }, }, }, + }, + }, + { + SpanID: "0000000000000003", + StartTimeUnixNano: uint64(now.UnixNano()), + DurationNanos: 200_000_000, + Attributes: []*v1.KeyValue{ { - SpanID: "0000000000000003", - StartTimeUnixNano: uint64(now.UnixNano()), - DurationNanos: 200_000_000, - Attributes: []*v1.KeyValue{ - { - Key: "foo", - Value: &v1.AnyValue{ - Value: &v1.AnyValue_StringValue{ - StringValue: "value", - }, - }, + Key: "foo", + Value: &v1.AnyValue{ + Value: &v1.AnyValue_StringValue{ + StringValue: "value", }, - { - Key: "bar", - Value: &v1.AnyValue{ - Value: &v1.AnyValue_StringValue{ - StringValue: "value", - }, - }, + }, + }, + { + Key: "bar", + Value: &v1.AnyValue{ + Value: &v1.AnyValue_StringValue{ + StringValue: "value", }, }, }, }, - Matched: 0, }, }, + Matched: 0, + } + expectedTraceSearchMetadata := []*tempopb.TraceSearchMetadata{ + { + TraceID: "1", + RootServiceName: "my-service", + RootTraceName: "HTTP GET", + SpanSet: expectedSpanset, + SpanSets: []*tempopb.SpanSet{expectedSpanset}, + }, } // Sort attributes for consistent equality checks @@ -235,90 +237,92 @@ func TestEngine_asTraceSearchMetadata(t *testing.T) { traceSearchMetadata := e.asTraceSearchMetadata(spanSet) - expectedTraceSearchMetadata := &tempopb.TraceSearchMetadata{ - TraceID: util.TraceIDToHexString(traceID), - RootServiceName: "my-service", - RootTraceName: "HTTP GET", - StartTimeUnixNano: 1000, - DurationMs: uint32(time.Second.Milliseconds()), - SpanSet: &tempopb.SpanSet{ - Matched: 2, - Spans: []*tempopb.Span{ - { - SpanID: util.SpanIDToHexString(spanID1), - Name: "HTTP GET", - StartTimeUnixNano: uint64(now.UnixNano()), - DurationNanos: 10_000_000_000, - Attributes: []*v1.KeyValue{ - { - Key: "cluster", - Value: &v1.AnyValue{ - Value: &v1.AnyValue_StringValue{ - StringValue: "prod", - }, + expectedSpanset := &tempopb.SpanSet{ + Matched: 2, + Spans: []*tempopb.Span{ + { + SpanID: util.SpanIDToHexString(spanID1), + Name: "HTTP GET", + StartTimeUnixNano: uint64(now.UnixNano()), + DurationNanos: 10_000_000_000, + Attributes: []*v1.KeyValue{ + { + Key: "cluster", + Value: &v1.AnyValue{ + Value: &v1.AnyValue_StringValue{ + StringValue: "prod", }, }, - { - Key: "count", - Value: &v1.AnyValue{ - Value: &v1.AnyValue_IntValue{ - IntValue: 5, - }, + }, + { + Key: "count", + Value: &v1.AnyValue{ + Value: &v1.AnyValue_IntValue{ + IntValue: 5, }, }, - { - Key: "count_but_float", - Value: &v1.AnyValue{ - Value: &v1.AnyValue_DoubleValue{ - DoubleValue: 5.0, - }, + }, + { + Key: "count_but_float", + Value: &v1.AnyValue{ + Value: &v1.AnyValue_DoubleValue{ + DoubleValue: 5.0, }, }, - { - Key: "is_ok", - Value: &v1.AnyValue{ - Value: &v1.AnyValue_BoolValue{ - BoolValue: true, - }, + }, + { + Key: "is_ok", + Value: &v1.AnyValue{ + Value: &v1.AnyValue_BoolValue{ + BoolValue: true, }, }, - { - Key: "kind", - Value: &v1.AnyValue{ - Value: &v1.AnyValue_StringValue{ - StringValue: KindClient.String(), - }, + }, + { + Key: "kind", + Value: &v1.AnyValue{ + Value: &v1.AnyValue_StringValue{ + StringValue: KindClient.String(), }, }, - { - Key: "status", - Value: &v1.AnyValue{ - Value: &v1.AnyValue_StringValue{ - StringValue: StatusOk.String(), - }, + }, + { + Key: "status", + Value: &v1.AnyValue{ + Value: &v1.AnyValue_StringValue{ + StringValue: StatusOk.String(), }, }, }, }, - { - SpanID: util.SpanIDToHexString(spanID2), - StartTimeUnixNano: uint64(now.Add(2 * time.Second).UnixNano()), - DurationNanos: 20_000_000_000, - Attributes: nil, - }, }, - Attributes: []*v1.KeyValue{ - { - Key: "avg(duration)", - Value: &v1.AnyValue{ - Value: &v1.AnyValue_DoubleValue{ - DoubleValue: 15.0, - }, + { + SpanID: util.SpanIDToHexString(spanID2), + StartTimeUnixNano: uint64(now.Add(2 * time.Second).UnixNano()), + DurationNanos: 20_000_000_000, + Attributes: nil, + }, + }, + Attributes: []*v1.KeyValue{ + { + Key: "avg(duration)", + Value: &v1.AnyValue{ + Value: &v1.AnyValue_DoubleValue{ + DoubleValue: 15.0, }, }, }, }, } + expectedTraceSearchMetadata := &tempopb.TraceSearchMetadata{ + TraceID: util.TraceIDToHexString(traceID), + RootServiceName: "my-service", + RootTraceName: "HTTP GET", + StartTimeUnixNano: 1000, + DurationMs: uint32(time.Second.Milliseconds()), + SpanSet: expectedSpanset, + SpanSets: []*tempopb.SpanSet{expectedSpanset}, + } // Ensure attributes are sorted to avoid a flaky test sort.Slice(traceSearchMetadata.SpanSet.Spans[0].Attributes, func(i, j int) bool { From 0b27a5b848a58d289d459a4594889e280a68ee53 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Mon, 22 May 2023 14:02:35 -0400 Subject: [PATCH 15/18] docs Signed-off-by: Joe Elliott --- docs/sources/tempo/api_docs/_index.md | 50 ++++++++++++++------------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/docs/sources/tempo/api_docs/_index.md b/docs/sources/tempo/api_docs/_index.md index 3d0b0943fe2..47081a66591 100644 --- a/docs/sources/tempo/api_docs/_index.md +++ b/docs/sources/tempo/api_docs/_index.md @@ -185,30 +185,31 @@ $ curl -G -s http://localhost:3200/api/search --data-urlencode 'q={ status=error { "traces": [ { - "traceID": "169bdefcae1f19", - "rootServiceName": "gme-ruler", - "rootTraceName": "rule", - "startTimeUnixNano": "1675090379953800000", - "durationMs": 3, - "spanSet": { - "spans": [ - { - "spanID": "45b795d0c4f9f6ae", - "startTimeUnixNano": "1675090379955688000", - "durationNanos": "525000", - "attributes": [ - { - "key": "status", - "value": { - "stringValue": "error" + "traceID": "2f3e0cee77ae5dc9c17ade3689eb2e54", + "rootServiceName": "shop-backend", + "rootTraceName": "update-billing", + "startTimeUnixNano": "1684778327699392724", + "durationMs": 557, + "spanSets": [ + { + "spans": [ + { + "spanID": "563d623c76514f8e", + "startTimeUnixNano": "1684778327735077898", + "durationNanos": "446979497", + "attributes": [ + { + "key": "status", + "value": { + "stringValue": "error" + } } - } - ] - } - ], - "matched": 1 - } - }, + ] + } + ], + "matched": 1 + } + ] ], "metrics": { "totalBlocks": 13 @@ -591,7 +592,8 @@ message TraceSearchMetadata { string rootTraceName = 3; uint64 startTimeUnixNano = 4; uint32 durationMs = 5; - SpanSet spanSet = 6; + SpanSet spanSet = 6; // deprecated. use SpanSets field below + repeated SpanSet spanSets = 7; } message SpanSet { From e3e4ec2d423818997fc1bcac14be4dd0ab49029e Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Mon, 22 May 2023 14:09:23 -0400 Subject: [PATCH 16/18] lint Signed-off-by: Joe Elliott --- pkg/traceql/engine.go | 15 --------------- tempodb/encoding/vparquet/block_traceql.go | 2 +- tempodb/encoding/vparquet2/block_traceql.go | 2 +- tempodb/tempodb_search_test.go | 4 ++-- 4 files changed, 4 insertions(+), 19 deletions(-) diff --git a/pkg/traceql/engine.go b/pkg/traceql/engine.go index f650a53fef6..b0d43c293de 100644 --- a/pkg/traceql/engine.go +++ b/pkg/traceql/engine.go @@ -288,21 +288,6 @@ func (e *Engine) createFetchSpansRequest(searchReq *tempopb.SearchRequest, pipel return req } -func addTraceSearchMetadata(existing []*tempopb.TraceSearchMetadata, new *tempopb.TraceSearchMetadata) []*tempopb.TraceSearchMetadata { - // search for an existing traceid first - for _, t := range existing { - if t.TraceID == new.TraceID { - // found a match, combine into th existing one - CombineSearchResults(t, new) - return existing - } - } - - // otherwise append a new one - return append(existing, new) -} - -// jpe - update this to use Spansets func (e *Engine) asTraceSearchMetadata(spanset *Spanset) *tempopb.TraceSearchMetadata { metadata := &tempopb.TraceSearchMetadata{ TraceID: util.TraceIDToHexString(spanset.TraceID), diff --git a/tempodb/encoding/vparquet/block_traceql.go b/tempodb/encoding/vparquet/block_traceql.go index f1d78645315..f7adcb5ba25 100644 --- a/tempodb/encoding/vparquet/block_traceql.go +++ b/tempodb/encoding/vparquet/block_traceql.go @@ -383,7 +383,7 @@ func (i *bridgeIterator) Close() { var _ pq.Iterator = (*rebatchIterator)(nil) // rebatchIterator either passes spansets through directly OR rebatches them based on metadata -// in OtherEntries jpe - test +// in OtherEntries type rebatchIterator struct { iter parquetquery.Iterator diff --git a/tempodb/encoding/vparquet2/block_traceql.go b/tempodb/encoding/vparquet2/block_traceql.go index a293f6abbe6..1595f9e7bd6 100644 --- a/tempodb/encoding/vparquet2/block_traceql.go +++ b/tempodb/encoding/vparquet2/block_traceql.go @@ -384,7 +384,7 @@ func (i *bridgeIterator) Close() { var _ pq.Iterator = (*rebatchIterator)(nil) // rebatchIterator either passes spansets through directly OR rebatches them based on metadata -// in OtherEntries jpe - test +// in OtherEntries type rebatchIterator struct { iter parquetquery.Iterator diff --git a/tempodb/tempodb_search_test.go b/tempodb/tempodb_search_test.go index e171058e265..3600c3b525a 100644 --- a/tempodb/tempodb_search_test.go +++ b/tempodb/tempodb_search_test.go @@ -290,7 +290,7 @@ func testGroupTraceQLCompleteBlock(t *testing.T, blockVersion string) { SpanID: "0000000000010203", StartTimeUnixNano: 1000000000000, DurationNanos: 1000000000, - Name: "", // jpe name? + Name: "", Attributes: []*v1_common.KeyValue{ {Key: "foo", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "Bar"}}}, }, @@ -324,7 +324,7 @@ func testGroupTraceQLCompleteBlock(t *testing.T, blockVersion string) { SpanID: "0000000000010203", StartTimeUnixNano: 1000000000000, DurationNanos: 1000000000, - Name: "", // jpe name? + Name: "", Attributes: []*v1_common.KeyValue{ {Key: "service.name", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "MyService"}}}, }, From fc677581c5f914e2d5ed258012cf50a7e9f1ec38 Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Tue, 23 May 2023 07:45:08 -0400 Subject: [PATCH 17/18] search combiner + tests Signed-off-by: Joe Elliott --- modules/frontend/search_progress.go | 25 +---- modules/ingester/instance_search.go | 25 +---- pkg/traceql/combine.go | 41 ++++++- pkg/traceql/combine_test.go | 2 +- pkg/traceql/engine.go | 6 +- tempodb/encoding/vparquet2/block_traceql.go | 3 +- tempodb/tempodb_search_test.go | 116 +++++++++++--------- 7 files changed, 117 insertions(+), 101 deletions(-) diff --git a/modules/frontend/search_progress.go b/modules/frontend/search_progress.go index c324773ef9e..155c373aab8 100644 --- a/modules/frontend/search_progress.go +++ b/modules/frontend/search_progress.go @@ -3,7 +3,6 @@ package frontend import ( "context" "net/http" - "sort" "sync" "github.com/grafana/tempo/pkg/tempopb" @@ -43,7 +42,7 @@ type searchProgress struct { statusMsg string ctx context.Context - resultsMap map[string]*tempopb.TraceSearchMetadata + resultsCombiner *traceql.MetadataCombiner resultsMetrics *tempopb.SearchMetrics finishedRequests int @@ -62,7 +61,7 @@ func newSearchProgress(ctx context.Context, limit, totalJobs, totalBlocks, total TotalBlockBytes: uint64(totalBlockBytes), TotalJobs: uint32(totalJobs), }, - resultsMap: map[string]*tempopb.TraceSearchMetadata{}, + resultsCombiner: traceql.NewMetadataCombiner(), } } @@ -86,15 +85,7 @@ func (r *searchProgress) addResponse(res *tempopb.SearchResponse) { defer r.mtx.Unlock() for _, t := range res.Traces { - if _, ok := r.resultsMap[t.TraceID]; !ok { - r.resultsMap[t.TraceID] = t - } else { - // combine into the incoming trace and then set in the map. this prevents - // race conditions on pointers to traces that we've already returned from - // .result() - traceql.CombineSearchResults(t, r.resultsMap[t.TraceID]) - r.resultsMap[t.TraceID] = t - } + r.resultsCombiner.AddMetadata(t) } // purposefully ignoring TotalBlocks as that value is set by the sharder @@ -126,7 +117,7 @@ func (r *searchProgress) internalShouldQuit() bool { if r.statusCode/100 != 2 { return true } - if len(r.resultsMap) > r.limit { + if r.resultsCombiner.Count() > r.limit { return true } @@ -154,15 +145,9 @@ func (r *searchProgress) result() *shardedSearchResults { TotalJobs: r.resultsMetrics.TotalJobs, TotalBlockBytes: r.resultsMetrics.TotalBlockBytes, }, + Traces: r.resultsCombiner.Metadata(), } - for _, t := range r.resultsMap { - searchRes.Traces = append(searchRes.Traces, t) - } - sort.Slice(searchRes.Traces, func(i, j int) bool { - return searchRes.Traces[i].StartTimeUnixNano > searchRes.Traces[j].StartTimeUnixNano - }) - res.response = searchRes return res diff --git a/modules/ingester/instance_search.go b/modules/ingester/instance_search.go index 75141a95df1..1bb4724a094 100644 --- a/modules/ingester/instance_search.go +++ b/modules/ingester/instance_search.go @@ -3,7 +3,6 @@ package ingester import ( "context" "fmt" - "sort" "strings" "sync" @@ -53,7 +52,7 @@ func (i *instance) Search(ctx context.Context, req *tempopb.SearchRequest) (*tem sr.AllWorkersStarted() // read and combine search results - resultsMap := map[string]*tempopb.TraceSearchMetadata{} + combiner := traceql.NewMetadataCombiner() // collect results from all the goroutines via sr.Results channel. // range loop will exit when sr.Results channel is closed. @@ -63,14 +62,8 @@ func (i *instance) Search(ctx context.Context, req *tempopb.SearchRequest) (*tem return nil, sr.Error() } - // Dedupe/combine results - if existing := resultsMap[result.TraceID]; existing != nil { - traceql.CombineSearchResults(existing, result) - } else { - resultsMap[result.TraceID] = result - } - - if len(resultsMap) >= maxResults { + combiner.AddMetadata(result) + if combiner.Count() >= maxResults { sr.Close() // signal pending workers to exit break } @@ -81,18 +74,8 @@ func (i *instance) Search(ctx context.Context, req *tempopb.SearchRequest) (*tem return nil, sr.Error() } - results := make([]*tempopb.TraceSearchMetadata, 0, len(resultsMap)) - for _, result := range resultsMap { - results = append(results, result) - } - - // Sort - sort.Slice(results, func(i, j int) bool { - return results[i].StartTimeUnixNano > results[j].StartTimeUnixNano - }) - return &tempopb.SearchResponse{ - Traces: results, + Traces: combiner.Metadata(), Metrics: &tempopb.SearchMetrics{ InspectedTraces: sr.TracesInspected(), InspectedBytes: sr.BytesInspected(), diff --git a/pkg/traceql/combine.go b/pkg/traceql/combine.go index 3de7aa2059a..60ea04fd5f6 100644 --- a/pkg/traceql/combine.go +++ b/pkg/traceql/combine.go @@ -1,15 +1,52 @@ package traceql import ( + "sort" "strings" "github.com/grafana/tempo/pkg/tempopb" ) -// CombineSearchResults overlays the incoming search result with the existing result. This is required +type MetadataCombiner struct { + trs map[string]*tempopb.TraceSearchMetadata +} + +func NewMetadataCombiner() *MetadataCombiner { + return &MetadataCombiner{ + trs: make(map[string]*tempopb.TraceSearchMetadata), + } +} + +// AddMetadata adds the new metadata to the map. if it already exists +// use CombineSearchResults to combine the two +func (c *MetadataCombiner) AddMetadata(new *tempopb.TraceSearchMetadata) { + if existing, ok := c.trs[new.TraceID]; ok { + combineSearchResults(existing, new) + return + } + + c.trs[new.TraceID] = new +} + +func (c *MetadataCombiner) Count() int { + return len(c.trs) +} + +func (c *MetadataCombiner) Metadata() []*tempopb.TraceSearchMetadata { + m := make([]*tempopb.TraceSearchMetadata, 0, len(c.trs)) + for _, tr := range c.trs { + m = append(m, tr) + } + sort.Slice(m, func(i, j int) bool { + return m[i].StartTimeUnixNano > m[j].StartTimeUnixNano + }) + return m +} + +// combineSearchResults overlays the incoming search result with the existing result. This is required // for the following reason: a trace may be present in multiple blocks, or in partial segments // in live traces. The results should reflect elements of all segments. -func CombineSearchResults(existing *tempopb.TraceSearchMetadata, incoming *tempopb.TraceSearchMetadata) { +func combineSearchResults(existing *tempopb.TraceSearchMetadata, incoming *tempopb.TraceSearchMetadata) { if existing.TraceID == "" { existing.TraceID = incoming.TraceID } diff --git a/pkg/traceql/combine_test.go b/pkg/traceql/combine_test.go index 4566b4cff50..801b9aae35b 100644 --- a/pkg/traceql/combine_test.go +++ b/pkg/traceql/combine_test.go @@ -197,7 +197,7 @@ func TestCombineResults(t *testing.T) { for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - CombineSearchResults(tc.existing, tc.new) + combineSearchResults(tc.existing, tc.new) // confirm that the SpanSet on tc.existing is contained in the slice of SpanSets // then nil out. the actual spanset chosen is based on map iteration order diff --git a/pkg/traceql/engine.go b/pkg/traceql/engine.go index b0d43c293de..8f06ff429ff 100644 --- a/pkg/traceql/engine.go +++ b/pkg/traceql/engine.go @@ -115,6 +115,7 @@ func (e *Engine) ExecuteSearch(ctx context.Context, searchReq *tempopb.SearchReq Traces: nil, Metrics: &tempopb.SearchMetrics{}, } + combiner := NewMetadataCombiner() for { spanset, err := iterator.Next(ctx) if err != nil && err != io.EOF { @@ -124,12 +125,13 @@ func (e *Engine) ExecuteSearch(ctx context.Context, searchReq *tempopb.SearchReq if spanset == nil { break } - res.Traces = append(res.Traces, e.asTraceSearchMetadata(spanset)) + combiner.AddMetadata(e.asTraceSearchMetadata(spanset)) - if len(res.Traces) >= int(searchReq.Limit) && searchReq.Limit > 0 { + if combiner.Count() >= int(searchReq.Limit) && searchReq.Limit > 0 { break } } + res.Traces = combiner.Metadata() span.SetTag("spansets_evaluated", spansetsEvaluated) span.SetTag("spansets_found", len(res.Traces)) diff --git a/tempodb/encoding/vparquet2/block_traceql.go b/tempodb/encoding/vparquet2/block_traceql.go index 1595f9e7bd6..a3736b8147e 100644 --- a/tempodb/encoding/vparquet2/block_traceql.go +++ b/tempodb/encoding/vparquet2/block_traceql.go @@ -338,8 +338,7 @@ func (i *bridgeIterator) Next() (*pq.IteratorResult, error) { for idx, s := range ss.Spans { span := s.(*span) - // use otherEntryCallbackSpansetKey to indicate to the rebatchIterator that either - // 1) this is the last span in the spanset, or 2) there are more spans in the spanset + // mark whether this is the last span in the spanset span.cbSpansetFinal = idx == len(ss.Spans)-1 span.cbSpanset = ss i.nextSpans = append(i.nextSpans, span) diff --git a/tempodb/tempodb_search_test.go b/tempodb/tempodb_search_test.go index 3600c3b525a..0b286102136 100644 --- a/tempodb/tempodb_search_test.go +++ b/tempodb/tempodb_search_test.go @@ -87,6 +87,7 @@ func testTraceQLCompleteBlock(t *testing.T, blockVersion string) { actual := actualForExpectedMeta(wantMeta, res) require.NotNil(t, actual, "search request: %v", req) actual.SpanSet = nil // todo: add the matching spansets to wantmeta + actual.SpanSets = nil require.Equal(t, wantMeta, actual, "search request: %v", req) } @@ -243,6 +244,7 @@ func testAdvancedTraceQLCompleteBlock(t *testing.T, blockVersion string) { actual := actualForExpectedMeta(wantMeta, res) require.NotNil(t, actual, "search request: %v", req) actual.SpanSet = nil // todo: add the matching spansets to wantmeta + actual.SpanSets = nil require.Equal(t, wantMeta, actual, "search request: %v", req) } @@ -284,31 +286,33 @@ func testGroupTraceQLCompleteBlock(t *testing.T, blockVersion string) { req: &tempopb.SearchRequest{Query: "{} | by(span.foo) | count() = 2"}, expected: []*tempopb.TraceSearchMetadata{ { - SpanSet: &tempopb.SpanSet{ - Spans: []*tempopb.Span{ - { - SpanID: "0000000000010203", - StartTimeUnixNano: 1000000000000, - DurationNanos: 1000000000, - Name: "", - Attributes: []*v1_common.KeyValue{ - {Key: "foo", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "Bar"}}}, + SpanSets: []*tempopb.SpanSet{ + { + Spans: []*tempopb.Span{ + { + SpanID: "0000000000010203", + StartTimeUnixNano: 1000000000000, + DurationNanos: 1000000000, + Name: "", + Attributes: []*v1_common.KeyValue{ + {Key: "foo", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "Bar"}}}, + }, }, - }, - { - SpanID: "0000000000000000", - StartTimeUnixNano: 1000000000000, - DurationNanos: 2000000000, - Name: "", - Attributes: []*v1_common.KeyValue{ - {Key: "foo", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "Bar"}}}, + { + SpanID: "0000000000000000", + StartTimeUnixNano: 1000000000000, + DurationNanos: 2000000000, + Name: "", + Attributes: []*v1_common.KeyValue{ + {Key: "foo", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "Bar"}}}, + }, }, }, - }, - Matched: 2, - Attributes: []*v1_common.KeyValue{ - {Key: "by(span.foo)", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "Bar"}}}, - {Key: "count()", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_IntValue{IntValue: 2}}}, + Matched: 2, + Attributes: []*v1_common.KeyValue{ + {Key: "by(span.foo)", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "Bar"}}}, + {Key: "count()", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_IntValue{IntValue: 2}}}, + }, }, }, }, @@ -318,42 +322,42 @@ func testGroupTraceQLCompleteBlock(t *testing.T, blockVersion string) { req: &tempopb.SearchRequest{Query: "{} | by(resource.service.name) | count() = 1"}, expected: []*tempopb.TraceSearchMetadata{ { - SpanSet: &tempopb.SpanSet{ - Spans: []*tempopb.Span{ - { - SpanID: "0000000000010203", - StartTimeUnixNano: 1000000000000, - DurationNanos: 1000000000, - Name: "", - Attributes: []*v1_common.KeyValue{ - {Key: "service.name", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "MyService"}}}, + SpanSets: []*tempopb.SpanSet{ + { + Spans: []*tempopb.Span{ + { + SpanID: "0000000000010203", + StartTimeUnixNano: 1000000000000, + DurationNanos: 1000000000, + Name: "", + Attributes: []*v1_common.KeyValue{ + {Key: "service.name", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "MyService"}}}, + }, }, }, + Matched: 1, + Attributes: []*v1_common.KeyValue{ + {Key: "by(resource.service.name)", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "MyService"}}}, + {Key: "count()", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_IntValue{IntValue: 1}}}, + }, }, - Matched: 1, - Attributes: []*v1_common.KeyValue{ - {Key: "by(resource.service.name)", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "MyService"}}}, - {Key: "count()", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_IntValue{IntValue: 1}}}, - }, - }, - }, - { - SpanSet: &tempopb.SpanSet{ - Spans: []*tempopb.Span{ - { - SpanID: "0000000000000000", - StartTimeUnixNano: 1000000000000, - DurationNanos: 2000000000, - Name: "", - Attributes: []*v1_common.KeyValue{ - {Key: "service.name", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "RootService"}}}, + { + Spans: []*tempopb.Span{ + { + SpanID: "0000000000000000", + StartTimeUnixNano: 1000000000000, + DurationNanos: 2000000000, + Name: "", + Attributes: []*v1_common.KeyValue{ + {Key: "service.name", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "RootService"}}}, + }, }, }, - }, - Matched: 1, - Attributes: []*v1_common.KeyValue{ - {Key: "by(resource.service.name)", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "RootService"}}}, - {Key: "count()", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_IntValue{IntValue: 1}}}, + Matched: 1, + Attributes: []*v1_common.KeyValue{ + {Key: "by(resource.service.name)", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_StringValue{StringValue: "RootService"}}}, + {Key: "count()", Value: &v1_common.AnyValue{Value: &v1_common.AnyValue_IntValue{IntValue: 1}}}, + }, }, }, }, @@ -382,6 +386,12 @@ func testGroupTraceQLCompleteBlock(t *testing.T, blockVersion string) { ss.TraceID = wantMeta.TraceID } + // the actual spanset is impossible to predict since it's chosen randomly from the Spansets slice + // so set it to nil here and just test the slice using the testcases above + for _, tr := range res.Traces { + tr.SpanSet = nil + } + require.NotNil(t, res, "search request: %v", tc) require.Equal(t, tc.expected, res.Traces, "search request: %v", tc) } From 710afbdc9d3459b2225b52af14db614d162a570c Mon Sep 17 00:00:00 2001 From: Joe Elliott Date: Tue, 23 May 2023 08:13:59 -0400 Subject: [PATCH 18/18] comment Signed-off-by: Joe Elliott --- pkg/traceql/engine.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pkg/traceql/engine.go b/pkg/traceql/engine.go index 8f06ff429ff..ed5cd999317 100644 --- a/pkg/traceql/engine.go +++ b/pkg/traceql/engine.go @@ -333,7 +333,10 @@ func (e *Engine) asTraceSearchMetadata(spanset *Spanset) *tempopb.TraceSearchMet } // create a new slice and add the spanset to it. eventually we will deprecate - // metadata.SpanSet + // metadata.SpanSet. populating both the SpanSet and the []SpanSets is for + // backwards compatibility with Grafana. since this method only translates one + // spanset into a TraceSearchMetadata Spansets[0] == Spanset. Higher up the chain + // we will combine Spansets with the same trace id. metadata.SpanSets = []*tempopb.SpanSet{metadata.SpanSet} // add attributes