From a9b46f61acc4f0183d05e4885a7d69031dce3364 Mon Sep 17 00:00:00 2001 From: Lukas Malkmus Date: Wed, 25 Jan 2023 00:01:53 +0100 Subject: [PATCH 1/8] feat(query): tabular result format --- Makefile | 1 - axiom/datasets.go | 112 +++----- axiom/datasets_integration_test.go | 108 +++++-- axiom/datasets_test.go | 444 +++++++++++++++++------------ axiom/query/doc.go | 24 ++ axiom/query/result.go | 335 +++++++--------------- axiom/query/result_string.go | 51 ---- axiom/query/result_test.go | 155 ---------- 8 files changed, 519 insertions(+), 711 deletions(-) delete mode 100644 axiom/query/result_string.go diff --git a/Makefile b/Makefile index a41524be..350163a9 100644 --- a/Makefile +++ b/Makefile @@ -74,7 +74,6 @@ fmt: ## Format and simplify the source code using `gofmt` .PHONY: generate generate: \ - axiom/query/result_string.go \ axiom/querylegacy/aggregation_string.go \ axiom/querylegacy/filter_string.go \ axiom/querylegacy/kind_string.go \ diff --git a/axiom/datasets.go b/axiom/datasets.go index 64100ffd..ea620728 100644 --- a/axiom/datasets.go +++ b/axiom/datasets.go @@ -124,42 +124,10 @@ type aplQueryRequest struct { type aplQueryResponse struct { query.Result - // HINT(lukasmalkmus): Ignore these fields as they are not relevant for the - // user and/or will change with the new query result format. - LegacyRequest struct { - StartTime any `json:"startTime"` - EndTime any `json:"endTime"` - Resolution any `json:"resolution"` - Aggregations any `json:"aggregations"` - Filter any `json:"filter"` - Order any `json:"order"` - Limit any `json:"limit"` - VirtualFields any `json:"virtualFields"` - Projections any `json:"project"` - Cursor any `json:"cursor"` - IncludeCursor any `json:"includeCursor"` - ContinuationToken any `json:"continuationToken"` - - // HINT(lukasmalkmus): Preserve the legacy request's "groupBy" - // field for now. This is needed to properly render some results. - GroupBy []string `json:"groupBy"` - } `json:"request"` - FieldsMeta any `json:"fieldsMetaMap"` -} - -// UnmarshalJSON implements [json.Unmarshaler]. It is in place to unmarshal the -// groupBy field of the legacy request that is part of the response into the -// actual [query.Result.GroupBy] field. -func (r *aplQueryResponse) UnmarshalJSON(b []byte) error { - type localResponse *aplQueryResponse - - if err := json.Unmarshal(b, localResponse(r)); err != nil { - return err - } - - r.GroupBy = r.LegacyRequest.GroupBy - - return nil + Format any `json:"format"` + Request any `json:"request"` + DatasetNames any `json:"datasetNames"` + FieldsMetaMap any `json:"fieldsMetaMap"` } // DatasetsService handles communication with the dataset related operations of @@ -362,7 +330,7 @@ func (s *DatasetsService) Ingest(ctx context.Context, id string, r io.Reader, ty } res.TraceID = resp.TraceID() - setIngestResultOnSpan(span, res) + setIngestStatusOnSpan(span, res) return &res, nil } @@ -468,7 +436,7 @@ func (s *DatasetsService) IngestEvents(ctx context.Context, id string, events [] } res.TraceID = resp.TraceID() - setIngestResultOnSpan(span, res) + setIngestStatusOnSpan(span, res) return &res, nil } @@ -525,7 +493,7 @@ func (s *DatasetsService) IngestChannel(ctx context.Context, id string, events < var ingestStatus ingest.Status defer func() { - setIngestResultOnSpan(span, ingestStatus) + setIngestStatusOnSpan(span, ingestStatus) }() flush := func() error { @@ -598,7 +566,7 @@ func (s *DatasetsService) Query(ctx context.Context, apl string, options ...quer queryParams := struct { Format string `url:"format"` }{ - Format: "legacy", // Hardcode legacy APL format for now. + Format: "tabular", // Hardcode tabular result format for now. } // TODO(lukasmalkmus): Use 's.basePath' once ingest v2 is available. @@ -627,7 +595,8 @@ func (s *DatasetsService) Query(ctx context.Context, apl string, options ...quer } res.TraceID = resp.TraceID() - setQueryResultOnSpan(span, res.Result) + setQueryStatusOnSpan(span, res.Result.Status) + span.SetAttributes(attribute.String("axiom.result.trace_id", res.TraceID)) return &res.Result, nil } @@ -679,7 +648,8 @@ func (s *DatasetsService) QueryLegacy(ctx context.Context, id string, q queryleg res.SavedQueryID = resp.Header.Get("X-Axiom-History-Query-Id") res.TraceID = resp.TraceID() - setLegacyQueryResultOnSpan(span, res.Result) + setLegacyQueryStatusOnSpan(span, res.Result.Status) + span.SetAttributes(attribute.String("axiom.result.trace_id", res.TraceID)) return &res.Result, nil } @@ -731,60 +701,52 @@ func DetectContentType(r io.Reader) (io.Reader, ContentType, error) { return r, typ, nil } -func setIngestResultOnSpan(span trace.Span, res ingest.Status) { +func setIngestStatusOnSpan(span trace.Span, status ingest.Status) { if !span.IsRecording() { return } span.SetAttributes( - attribute.String("axiom.result.trace_id", res.TraceID), - attribute.Int64("axiom.events.ingested", int64(res.Ingested)), //nolint:gosec // Fine for this use case. - attribute.Int64("axiom.events.failed", int64(res.Failed)), //nolint:gosec // Fine for this use case. - attribute.Int64("axiom.events.processed_bytes", int64(res.ProcessedBytes)), //nolint:gosec // Fine for this use case. + attribute.String("axiom.result.trace_id", status.TraceID), + attribute.Int64("axiom.events.ingested", int64(status.Ingested)), + attribute.Int64("axiom.events.failed", int64(status.Failed)), + attribute.Int64("axiom.events.processed_bytes", int64(status.ProcessedBytes)), ) } -//nolint:dupl // We need to support both query packages and their types. -func setQueryResultOnSpan(span trace.Span, res query.Result) { +func setQueryStatusOnSpan(span trace.Span, status query.Status) { if !span.IsRecording() { return } span.SetAttributes( - attribute.String("axiom.result.trace_id", res.TraceID), - attribute.String("axiom.result.status.elapsed_time", res.Status.ElapsedTime.String()), - attribute.Int64("axiom.result.status.blocks_examined", int64(res.Status.BlocksExamined)), //nolint:gosec // Fine for this use case. - attribute.Int64("axiom.result.status.rows_examined", int64(res.Status.RowsExamined)), //nolint:gosec // Fine for this use case. - attribute.Int64("axiom.result.status.rows_matched", int64(res.Status.RowsMatched)), //nolint:gosec // Fine for this use case. - attribute.Int64("axiom.result.status.num_groups", int64(res.Status.NumGroups)), - attribute.Bool("axiom.result.status.is_partial", res.Status.IsPartial), - attribute.Bool("axiom.result.status.is_estimate", res.Status.IsEstimate), - attribute.String("axiom.result.status.min_block_time", res.Status.MinBlockTime.String()), - attribute.String("axiom.result.status.max_block_time", res.Status.MaxBlockTime.String()), - attribute.String("axiom.result.status.min_cursor", res.Status.MinCursor), - attribute.String("axiom.result.status.max_cursor", res.Status.MaxCursor), + attribute.String("axiom.query.min_cursor", status.MinCursor), + attribute.String("axiom.query.max_cursor", status.MaxCursor), + attribute.String("axiom.query.min_cursor", status.MinCursor), + attribute.String("axiom.query.max_cursor", status.MaxCursor), + attribute.String("axiom.query.elapsed_time", status.ElapsedTime.String()), + attribute.Int64("axiom.query.rows_examined", int64(status.RowsExamined)), + attribute.Int64("axiom.query.rows_matched", int64(status.RowsMatched)), ) } -//nolint:dupl // We need to support both query packages and their types. -func setLegacyQueryResultOnSpan(span trace.Span, res querylegacy.Result) { +func setLegacyQueryStatusOnSpan(span trace.Span, status querylegacy.Status) { if !span.IsRecording() { return } span.SetAttributes( - attribute.String("axiom.result.trace_id", res.TraceID), - attribute.String("axiom.result.status.elapsed_time", res.Status.ElapsedTime.String()), - attribute.Int64("axiom.result.status.blocks_examined", int64(res.Status.BlocksExamined)), //nolint:gosec // Fine for this use case. - attribute.Int64("axiom.result.status.rows_examined", int64(res.Status.RowsExamined)), //nolint:gosec // Fine for this use case. - attribute.Int64("axiom.result.status.rows_matched", int64(res.Status.RowsMatched)), //nolint:gosec // Fine for this use case. - attribute.Int64("axiom.result.status.num_groups", int64(res.Status.NumGroups)), - attribute.Bool("axiom.result.status.is_partial", res.Status.IsPartial), - attribute.Bool("axiom.result.status.is_estimate", res.Status.IsEstimate), - attribute.String("axiom.result.status.min_block_time", res.Status.MinBlockTime.String()), - attribute.String("axiom.result.status.max_block_time", res.Status.MaxBlockTime.String()), - attribute.String("axiom.result.status.min_cursor", res.Status.MinCursor), - attribute.String("axiom.result.status.max_cursor", res.Status.MaxCursor), + attribute.String("axiom.querylegacy.elapsed_time", status.ElapsedTime.String()), + attribute.Int64("axiom.querylegacy.blocks_examined", int64(status.BlocksExamined)), + attribute.Int64("axiom.querylegacy.rows_examined", int64(status.RowsExamined)), + attribute.Int64("axiom.querylegacy.rows_matched", int64(status.RowsMatched)), + attribute.Int64("axiom.querylegacy.num_groups", int64(status.NumGroups)), + attribute.Bool("axiom.querylegacy.is_partial", status.IsPartial), + attribute.Bool("axiom.querylegacy.is_estimate", status.IsEstimate), + attribute.String("axiom.querylegacy.min_block_time", status.MinBlockTime.String()), + attribute.String("axiom.querylegacy.max_block_time", status.MaxBlockTime.String()), + attribute.String("axiom.querylegacy.min_cursor", status.MinCursor), + attribute.String("axiom.querylegacy.max_cursor", status.MaxCursor), ) } diff --git a/axiom/datasets_integration_test.go b/axiom/datasets_integration_test.go index 99fd00c7..e5cf2e3d 100644 --- a/axiom/datasets_integration_test.go +++ b/axiom/datasets_integration_test.go @@ -239,9 +239,60 @@ func (s *DatasetsTestSuite) Test() { s.Require().NoError(err) s.Require().NotNil(queryResult) + s.NotZero(queryResult.Status.ElapsedTime) s.EqualValues(14, queryResult.Status.RowsExamined) s.EqualValues(14, queryResult.Status.RowsMatched) - s.Len(queryResult.Matches, 14) + if s.Len(queryResult.Tables, 1) { + table := queryResult.Tables[0] + + if s.Len(table.Sources, 1) { + s.Equal(s.dataset.ID, table.Sources[0].Name) + } + + // FIXME(lukasmalkmus): Tabular results format is not yet returning the + // _rowID column. + s.Len(table.Fields, 11) // 8 event fields + 1 label field + 2 system fields + s.Len(table.Columns, 11) // 8 event fields + 1 label field + 2 system fields + // s.Len(table.Fields, 12) // 8 event fields + 1 label field + 3 system fields + // s.Len(table.Columns, 12) // 8 event fields + 1 label field + 3 system fields + } + + // ... and a slightly more complex (analytic) APL query. + apl = fmt.Sprintf("['%s'] | summarize topk(remote_ip, 1)", s.dataset.ID) + queryResult, err = s.client.Datasets.Query(s.ctx, apl, + query.SetStartTime(startTime), + query.SetEndTime(endTime), + ) + s.Require().NoError(err) + s.Require().NotNil(queryResult) + + s.NotZero(queryResult.Status.ElapsedTime) + s.EqualValues(14, queryResult.Status.RowsExamined) + s.EqualValues(14, queryResult.Status.RowsMatched) + if s.Len(queryResult.Tables, 1) { + table := queryResult.Tables[0] + + if s.Len(table.Sources, 1) { + s.Equal(s.dataset.ID, table.Sources[0].Name) + } + + if s.Len(table.Fields, 1) && s.NotNil(table.Fields[0].Aggregation) { + agg := table.Fields[0].Aggregation + + s.Equal(query.OpTopk, agg.Op) + s.Equal([]string{"remote_ip"}, agg.Fields) + s.Equal([]any{1.}, agg.Args) + } + + if s.Len(table.Columns, 1) && s.Len(table.Columns[0], 1) { + v := table.Columns[0][0].([]any) + m := v[0].(map[string]any) + + s.Equal("93.180.71.1", m["key"]) + s.Equal(7., m["count"]) + s.Equal(0., m["error"]) + } + } // Also run a legacy query and make sure we see some results. legacyQueryResult, err := s.client.Datasets.QueryLegacy(s.ctx, s.dataset.ID, querylegacy.Query{ @@ -251,6 +302,7 @@ func (s *DatasetsTestSuite) Test() { s.Require().NoError(err) s.Require().NotNil(legacyQueryResult) + s.NotZero(queryResult.Status.ElapsedTime) s.EqualValues(14, legacyQueryResult.Status.RowsExamined) s.EqualValues(14, legacyQueryResult.Status.RowsMatched) s.Len(legacyQueryResult.Matches, 14) @@ -324,16 +376,16 @@ func (s *DatasetsTestSuite) TestCursor() { now := time.Now().Truncate(time.Second) _, err := s.client.Datasets.IngestEvents(s.ctx, s.dataset.ID, []axiom.Event{ { // Oldest - "_time": now.Add(-time.Second * 3), - "foo": "bar", + ingest.TimestampField: now.Add(-time.Second * 3), + "foo": "bar", }, { - "_time": now.Add(-time.Second * 2), - "foo": "baz", + ingest.TimestampField: now.Add(-time.Second * 2), + "foo": "baz", }, { // Newest - "_time": now.Add(-time.Second * 1), - "foo": "buz", + ingest.TimestampField: now.Add(-time.Second * 1), + "foo": "buz", }, }) s.Require().NoError(err) @@ -349,16 +401,28 @@ func (s *DatasetsTestSuite) TestCursor() { ) s.Require().NoError(err) - if s.Len(queryResult.Matches, 3) { - s.Equal("buz", queryResult.Matches[0].Data["foo"]) - s.Equal("baz", queryResult.Matches[1].Data["foo"]) - s.Equal("bar", queryResult.Matches[2].Data["foo"]) + // FIXME(lukasmalkmus): Tabular results format is not yet returning the + // _rowID column. + s.T().Skip() + + // HINT(lukasmalkmus): Expecting four columns: _time, _sysTime, _rowID, foo. + // This is only checked once for the first query result to verify the + // dataset scheme. The following queries will only check the results in the + // columns. + s.Require().Len(queryResult.Tables, 1) + s.Require().Len(queryResult.Tables[0].Columns, 4) + s.Require().Len(queryResult.Tables[0].Columns[0], 3) + + if s.Len(queryResult.Tables, 1) { + s.Equal("buz", queryResult.Tables[0].Columns[2][0]) + s.Equal("baz", queryResult.Tables[0].Columns[2][1]) + s.Equal("bar", queryResult.Tables[0].Columns[2][2]) } // HINT(lukasmalkmus): In a real-world scenario, the cursor would be // retrieved from the query status MinCursor or MaxCursor fields, depending // on the queries sort order. - midRowID := queryResult.Matches[1].RowID + midRowID := queryResult.Tables[0].Columns[0][2].(string) // Query events with a cursor in descending order... apl = fmt.Sprintf("['%s'] | sort by _time desc", s.dataset.ID) @@ -371,8 +435,8 @@ func (s *DatasetsTestSuite) TestCursor() { // "buz" and "baz" skipped by the cursor, only "bar" is returned. The cursor // is exclusive, so "baz" is not included. - if s.Len(queryResult.Matches, 1) { - s.Equal("bar", queryResult.Matches[0].Data["foo"]) + if s.Len(queryResult.Tables[0].Columns[0], 1) { + s.Equal("bar", queryResult.Tables[0].Columns[0][0]) } // ...again, but with the cursor inclusive. @@ -385,9 +449,9 @@ func (s *DatasetsTestSuite) TestCursor() { // "buz" skipped by the cursor, only "baz" and "bar" is returned. The cursor // is inclusive, so "baz" is included. - if s.Len(queryResult.Matches, 2) { - s.Equal("baz", queryResult.Matches[0].Data["foo"]) - s.Equal("bar", queryResult.Matches[1].Data["foo"]) + if s.Len(queryResult.Tables[0].Columns[0], 2) { + s.Equal("baz", queryResult.Tables[0].Columns[0][0]) + s.Equal("bar", queryResult.Tables[0].Columns[0][1]) } // Query events with a cursor in ascending order... @@ -401,8 +465,8 @@ func (s *DatasetsTestSuite) TestCursor() { // "bar" and "baz" skipped by the cursor, only "buz" is returned. The cursor // is exclusive, so "baz" is not included. - if s.Len(queryResult.Matches, 1) { - s.Equal("buz", queryResult.Matches[0].Data["foo"]) + if s.Len(queryResult.Tables[0].Columns[0], 1) { + s.Equal("buz", queryResult.Tables[0].Columns[0][0]) } // ...again, but with the cursor inclusive. @@ -415,9 +479,9 @@ func (s *DatasetsTestSuite) TestCursor() { // "bar" skipped by the cursor, only "baz" and "buz" is returned. The cursor // is inclusive, so "baz" is included. - if s.Len(queryResult.Matches, 2) { - s.Equal("baz", queryResult.Matches[0].Data["foo"]) - s.Equal("buz", queryResult.Matches[1].Data["foo"]) + if s.Len(queryResult.Tables[0].Columns[0], 2) { + s.Equal("baz", queryResult.Tables[0].Columns[0][0]) + s.Equal("buz", queryResult.Tables[0].Columns[0][1]) } } diff --git a/axiom/datasets_test.go b/axiom/datasets_test.go index be544ea0..dd658291 100644 --- a/axiom/datasets_test.go +++ b/axiom/datasets_test.go @@ -21,169 +21,285 @@ import ( ) const actQueryResp = `{ - "request": { - "startTime": "2021-07-20T16:34:57.911170243Z", - "endTime": "2021-08-19T16:34:57.885821616Z", - "resolution": "", - "aggregations": null, - "groupBy": null, - "order": null, - "limit": 1000, - "virtualFields": null, - "project": null, - "cursor": "", - "includeCursor": false - }, - "status": { - "elapsedTime": 542114, - "blocksExamined": 4, - "rowsExamined": 142655, - "rowsMatched": 142655, - "numGroups": 0, - "isPartial": false, - "cacheStatus": 1, - "minBlockTime": "2020-11-19T11:06:31.569475746Z", - "maxBlockTime": "2020-11-27T12:06:38.966791794Z" - }, - "matches": [ - { - "_time": "2020-11-19T11:06:31.569475746Z", - "_sysTime": "2020-11-19T11:06:31.581384524Z", - "_rowId": "c776x1uafkpu-4918f6cb9000095-0", - "data": { - "agent": "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", - "bytes": 0, - "referrer": "-", - "remote_ip": "93.180.71.3", - "remote_user": "-", - "request": "GET /downloads/product_1 HTTP/1.1", - "response": 304, - "time": "17/May/2015:08:05:32 +0000" + "tables": [ + { + "name": "0", + "sources": [ + { + "name": "test" } - }, - { - "_time": "2020-11-19T11:06:31.569479846Z", - "_sysTime": "2020-11-19T11:06:31.581384524Z", - "_rowId": "c776x1uafnvq-4918f6cb9000095-1", - "data": { - "agent": "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", - "bytes": 0, - "referrer": "-", - "remote_ip": "93.180.71.3", - "remote_user": "-", - "request": "GET /downloads/product_1 HTTP/1.1", - "response": 304, - "time": "17/May/2015:08:05:23 +0000" + ], + "fields": [ + { + "name": "_time", + "type": "string" + }, + { + "name": "_sysTime", + "type": "string" + }, + { + "name": "_rowId", + "type": "string" + }, + { + "name": "agent", + "type": "string" + }, + { + "name": "bytes", + "type": "float64" + }, + { + "name": "referrer", + "type": "string" + }, + { + "name": "remote_ip", + "type": "string" + }, + { + "name": "remote_user", + "type": "string" + }, + { + "name": "request", + "type": "string" + }, + { + "name": "response", + "type": "float64" + }, + { + "name": "time", + "type": "string" } - } - ], - "buckets": { - "series": [], - "totals": [] - }, - "datasetNames": [ - "test" - ] - }` + ], + "range": { + "field": "_time", + "start": "2023-03-21T13:38:51.735448191Z", + "end": "2023-03-28T13:38:51.735448191Z" + }, + "columns": [ + [ + "2020-11-19T11:06:31.569475746Z", + "2020-11-19T11:06:31.569479846Z" + ], + [ + "2020-11-19T11:06:31.581384524Z", + "2020-11-19T11:06:31.581384524Z" + ], + [ + "c776x1uafkpu-4918f6cb9000095-0", + "c776x1uafnvq-4918f6cb9000095-1" + ], + [ + "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", + "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" + ], + [ + 0, + 0 + ], + [ + "-", + "-" + ], + [ + "93.180.71.3", + "93.180.71.3" + ], + [ + "-", + "-" + ], + [ + "GET /downloads/product_1 HTTP/1.1", + "GET /downloads/product_1 HTTP/1.1" + ], + [ + 304, + 304 + ], + [ + "17/May/2015:08:05:32 +0000", + "17/May/2015:08:05:23 +0000" + ] + ] + } + ], + "status": { + "minCursor": "c776x1uafkpu-4918f6cb9000095-0", + "maxCursor": "c776x1uafnvq-4918f6cb9000095-1", + "elapsedTime": 542114, + "rowsExamined": 142655, + "rowsMatched": 142655 + } +}` const actLegacyQueryResp = `{ - "status": { - "elapsedTime": 542114, - "blocksExamined": 4, - "rowsExamined": 142655, - "rowsMatched": 142655, - "numGroups": 0, - "isPartial": false, - "cacheStatus": 1, - "minBlockTime": "2020-11-19T11:06:31.569475746Z", - "maxBlockTime": "2020-11-27T12:06:38.966791794Z" + "status": { + "minCursor": "c776x1uafkpu-4918f6cb9000095-0", + "maxCursor": "c776x1uafnvq-4918f6cb9000095-1", + "elapsedTime": 542114, + "blocksExamined": 4, + "rowsExamined": 142655, + "rowsMatched": 142655, + "numGroups": 0, + "isPartial": false, + "cacheStatus": 1, + "minBlockTime": "2020-11-19T11:06:31.569475746Z", + "maxBlockTime": "2020-11-27T12:06:38.966791794Z" + }, + "matches": [ + { + "_time": "2020-11-19T11:06:31.569475746Z", + "_sysTime": "2020-11-19T11:06:31.581384524Z", + "_rowId": "c776x1uafkpu-4918f6cb9000095-0", + "data": { + "agent": "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", + "bytes": 0, + "referrer": "-", + "remote_ip": "93.180.71.3", + "remote_user": "-", + "request": "GET /downloads/product_1 HTTP/1.1", + "response": 304, + "time": "17/May/2015:08:05:32 +0000" + } }, - "matches": [ - { - "_time": "2020-11-19T11:06:31.569475746Z", - "_sysTime": "2020-11-19T11:06:31.581384524Z", - "_rowId": "c776x1uafkpu-4918f6cb9000095-0", - "data": { - "agent": "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", - "bytes": 0, - "referrer": "-", - "remote_ip": "93.180.71.3", - "remote_user": "-", - "request": "GET /downloads/product_1 HTTP/1.1", - "response": 304, - "time": "17/May/2015:08:05:32 +0000" - } - }, - { - "_time": "2020-11-19T11:06:31.569479846Z", - "_sysTime": "2020-11-19T11:06:31.581384524Z", - "_rowId": "c776x1uafnvq-4918f6cb9000095-1", - "data": { - "agent": "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", - "bytes": 0, - "referrer": "-", - "remote_ip": "93.180.71.3", - "remote_user": "-", - "request": "GET /downloads/product_1 HTTP/1.1", - "response": 304, - "time": "17/May/2015:08:05:23 +0000" - } + { + "_time": "2020-11-19T11:06:31.569479846Z", + "_sysTime": "2020-11-19T11:06:31.581384524Z", + "_rowId": "c776x1uafnvq-4918f6cb9000095-1", + "data": { + "agent": "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", + "bytes": 0, + "referrer": "-", + "remote_ip": "93.180.71.3", + "remote_user": "-", + "request": "GET /downloads/product_1 HTTP/1.1", + "response": 304, + "time": "17/May/2015:08:05:23 +0000" } - ], - "buckets": { - "series": [], - "totals": [] } - }` + ] +}` var ( expQueryRes = &query.Result{ - Datasets: []string{"test"}, - Status: query.Status{ - ElapsedTime: time.Microsecond * 542_114, - BlocksExamined: 4, - RowsExamined: 142655, - RowsMatched: 142655, - NumGroups: 0, - IsPartial: false, - MinBlockTime: parseTimeOrPanic("2020-11-19T11:06:31.569475746Z"), - MaxBlockTime: parseTimeOrPanic("2020-11-27T12:06:38.966791794Z"), - }, - Matches: []query.Entry{ + Tables: []query.Table{ { - Time: parseTimeOrPanic("2020-11-19T11:06:31.569475746Z"), - SysTime: parseTimeOrPanic("2020-11-19T11:06:31.581384524Z"), - RowID: "c776x1uafkpu-4918f6cb9000095-0", - Data: map[string]any{ - "agent": "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", - "bytes": float64(0), - "referrer": "-", - "remote_ip": "93.180.71.3", - "remote_user": "-", - "request": "GET /downloads/product_1 HTTP/1.1", - "response": float64(304), - "time": "17/May/2015:08:05:32 +0000", + Name: "0", + Sources: []query.Source{ + { + Name: "test", + }, }, - }, - { - Time: parseTimeOrPanic("2020-11-19T11:06:31.569479846Z"), - SysTime: parseTimeOrPanic("2020-11-19T11:06:31.581384524Z"), - RowID: "c776x1uafnvq-4918f6cb9000095-1", - Data: map[string]any{ - "agent": "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", - "bytes": float64(0), - "referrer": "-", - "remote_ip": "93.180.71.3", - "remote_user": "-", - "request": "GET /downloads/product_1 HTTP/1.1", - "response": float64(304), - "time": "17/May/2015:08:05:23 +0000", + Fields: []query.Field{ + { + Name: "_time", + Type: "string", + }, + { + Name: "_sysTime", + Type: "string", + }, + { + Name: "_rowId", + Type: "string", + }, + { + Name: "agent", + Type: "string", + }, + { + Name: "bytes", + Type: "float64", + }, + { + Name: "referrer", + Type: "string", + }, + { + Name: "remote_ip", + Type: "string", + }, + { + Name: "remote_user", + Type: "string", + }, + { + Name: "request", + Type: "string", + }, + { + Name: "response", + Type: "float64", + }, + { + Name: "time", + Type: "string", + }, + }, + Range: &query.RangeInfo{ + Field: "_time", + Start: parseTimeOrPanic("2023-03-21T13:38:51.735448191Z"), + End: parseTimeOrPanic("2023-03-28T13:38:51.735448191Z"), + }, + Columns: []query.Column{ + []any{ + "2020-11-19T11:06:31.569475746Z", + "2020-11-19T11:06:31.569479846Z", + }, + []any{ + "2020-11-19T11:06:31.581384524Z", + "2020-11-19T11:06:31.581384524Z", + }, + []any{ + "c776x1uafkpu-4918f6cb9000095-0", + "c776x1uafnvq-4918f6cb9000095-1", + }, + []any{ + "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", + "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", + }, + []any{ + float64(0), + float64(0), + }, + []any{ + "-", + "-", + }, + []any{ + "93.180.71.3", + "93.180.71.3", + }, + []any{ + "-", + "-", + }, + []any{ + "GET /downloads/product_1 HTTP/1.1", + "GET /downloads/product_1 HTTP/1.1", + }, + []any{ + float64(304), + float64(304), + }, + []any{ + "17/May/2015:08:05:32 +0000", + "17/May/2015:08:05:23 +0000", + }, }, }, }, - Buckets: query.Timeseries{ - Series: []query.Interval{}, - Totals: []query.EntryGroup{}, + Status: query.Status{ + ElapsedTime: time.Microsecond * 542_114, + MinCursor: "c776x1uafkpu-4918f6cb9000095-0", + MaxCursor: "c776x1uafnvq-4918f6cb9000095-1", + RowsExamined: 142655, + RowsMatched: 142655, }, TraceID: "abc", } @@ -191,6 +307,8 @@ var ( expLegacyQueryRes = &querylegacy.Result{ Status: querylegacy.Status{ ElapsedTime: time.Microsecond * 542_114, + MinCursor: "c776x1uafkpu-4918f6cb9000095-0", + MaxCursor: "c776x1uafnvq-4918f6cb9000095-1", BlocksExamined: 4, RowsExamined: 142655, RowsMatched: 142655, @@ -231,10 +349,6 @@ var ( }, }, }, - Buckets: querylegacy.Timeseries{ - Series: []querylegacy.Interval{}, - Totals: []querylegacy.EntryGroup{}, - }, SavedQueryID: "fyTFUldK4Z5219rWaz", TraceID: "abc", } @@ -949,7 +1063,9 @@ func TestDatasetsService_IngestChannel_BufferedSlow(t *testing.T) { func TestDatasetsService_Query(t *testing.T) { hf := func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, http.MethodPost, r.Method) - assert.Equal(t, mediaTypeJSON, r.Header.Get("content-type")) + assert.Equal(t, mediaTypeJSON, r.Header.Get("Content-Type")) + + assert.Equal(t, "tabular", r.URL.Query().Get("format")) var req aplQueryRequest err := json.NewDecoder(r.Body).Decode(&req) @@ -976,28 +1092,6 @@ func TestDatasetsService_Query(t *testing.T) { assert.Equal(t, expQueryRes, res) } -func TestDatasetsService_Query_WithGroupBy(t *testing.T) { - hf := func(w http.ResponseWriter, _ *http.Request) { - w.Header().Set("Content-Type", mediaTypeJSON) - w.Header().Set("X-Axiom-Trace-Id", "abc") - _, _ = fmt.Fprint(w, `{ - "request": { - "groupBy": [ - "code", - "path" - ] - } - }`) - } - - client := setup(t, "POST /v1/datasets/_apl", hf) - - res, err := client.Datasets.Query(context.Background(), "test") - require.NoError(t, err) - - assert.Equal(t, []string{"code", "path"}, res.GroupBy) -} - func TestDatasetsService_QueryLegacy(t *testing.T) { hf := func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, http.MethodPost, r.Method) diff --git a/axiom/query/doc.go b/axiom/query/doc.go index 37e278d2..b6111814 100644 --- a/axiom/query/doc.go +++ b/axiom/query/doc.go @@ -4,4 +4,28 @@ // Usage: // // import "github.com/axiomhq/axiom-go/axiom/query" +// +// # Tabular Result Format +// +// Query results are returned in a tabular format. Each query [Result] contains +// one or more [Table]s. Each [Table] contains a list of [Field]s and a list of +// [Column]s. All [Column]s are equally sized and there are as much [Column]s as +// there are [Field]s. +// +// In case you want to work with events that are usually composed of multiple +// fields, you will find the values separated by [Column]. To aid with working +// with events in the tabular result format, the [Table] type provides the +// [Table.Rows] method that returns an [iter.Iter] over the [Row]s of the +// [Table]. Under the hood, each call to [iter.Iter.Next] composes a [Row] from +// the [Column]s of the [Table]. Alternatively, you can compose an [iter.Iter] +// over the [Row]s yourself using the [Rows] function. This allows for passing +// in a subset of the [Column]s of the [Table] to work with: +// +// // Only build rows from the first two columns of the table. Returns an +// // iterator for over the rows. +// rows := query.Rows(result.Tables[0].Columns[0:2]) +// +// Keep in mind that it is preferable to alter the APL query to only return the +// fields you are interested in instead of working with a subset of the columns +// after the query has been executed. package query diff --git a/axiom/query/result.go b/axiom/query/result.go index a311eae0..fdd2be31 100644 --- a/axiom/query/result.go +++ b/axiom/query/result.go @@ -2,175 +2,116 @@ package query import ( "encoding/json" - "fmt" - "strings" "time" ) -//go:generate go run golang.org/x/tools/cmd/stringer -type=MessageCode,MessagePriority -linecomment -output=result_string.go - -// MessageCode represents the code of a message associated with a query. -type MessageCode uint8 - -// All available message codes. -const ( - emptyMessageCode MessageCode = iota // - - VirtualFieldFinalizeError // virtual_field_finalize_error - MissingColumn // missing_column - LicenseLimitForQueryWarning // license_limit_for_query_warning - DefaultLimitWarning // default_limit_warning - - // CompilerWarning is a generic code. Please inspect the message text for - // more details. - CompilerWarning // apl_ -) - -func messageCodeFromString(s string) (mc MessageCode, err error) { - if strings.HasPrefix(s, CompilerWarning.String()) { - return CompilerWarning, nil - } - - switch s { - case emptyMessageCode.String(): - mc = emptyMessageCode - case VirtualFieldFinalizeError.String(): - mc = VirtualFieldFinalizeError - case MissingColumn.String(): - mc = MissingColumn - case LicenseLimitForQueryWarning.String(): - mc = LicenseLimitForQueryWarning - case DefaultLimitWarning.String(): - mc = DefaultLimitWarning - default: - err = fmt.Errorf("unknown message code %q", s) - } - - return mc, err -} - -// MarshalJSON implements [json.Marshaler]. It is in place to marshal the -// message code to its string representation because that's what the server -// expects. -func (mc MessageCode) MarshalJSON() ([]byte, error) { - return json.Marshal(mc.String()) -} - -// UnmarshalJSON implements [json.Unmarshaler]. It is in place to unmarshal the -// message code from the string representation the server returns. -func (mc *MessageCode) UnmarshalJSON(b []byte) (err error) { - var s string - if err = json.Unmarshal(b, &s); err != nil { - return err - } - - *mc, err = messageCodeFromString(s) - - return err -} - -// MessagePriority represents the priority of a message associated with a query. -type MessagePriority uint8 - -// All available message priorities. -const ( - emptyMessagePriority MessagePriority = iota // - - Trace // trace - Debug // debug - Info // info - Warn // warn - Error // error - Fatal // fatal -) - -func messagePriorityFromString(s string) (mp MessagePriority, err error) { - switch s { - case emptyMessagePriority.String(): - mp = emptyMessagePriority - case Trace.String(): - mp = Trace - case Debug.String(): - mp = Debug - case Info.String(): - mp = Info - case Warn.String(): - mp = Warn - case Error.String(): - mp = Error - case Fatal.String(): - mp = Fatal - default: - err = fmt.Errorf("unknown message priority %q", s) - } - - return mp, err -} - -// MarshalJSON implements [json.Marshaler]. It is in place to marshal the -// message priority to its string representation because that's what the server -// expects. -func (mp MessagePriority) MarshalJSON() ([]byte, error) { - return json.Marshal(mp.String()) -} - -// UnmarshalJSON implements [json.Unmarshaler]. It is in place to unmarshal the -// message priority from the string representation the server returns. -func (mp *MessagePriority) UnmarshalJSON(b []byte) (err error) { - var s string - if err = json.Unmarshal(b, &s); err != nil { - return err - } - - *mp, err = messagePriorityFromString(s) - - return err -} - // Result is the result of an APL query. type Result struct { - // The datasets that were queried in order to create the result. - Datasets []string `json:"datasetNames"` + // Tables in the query result. + Tables []Table `json:"tables"` // Status of the query result. Status Status `json:"status"` - // Matches are the events that matched the query. - Matches []Entry `json:"matches"` - // Buckets are the time series buckets. - Buckets Timeseries `json:"buckets"` - // GroupBy is a list of field names to group the query result by. Only valid - // when at least one aggregation is specified. - GroupBy []string `json:"-"` // TraceID is the ID of the trace that was generated by the server for this // results query request. TraceID string `json:"-"` } -// Status is the status of a query result. +// Table in the [Result] of an APL query. +type Table struct { + // Name of the table. Default name for unnamed results is "0", "1", "2", ... + // etc. + Name string `json:"name"` + // Sources are the datasets that were consulted in order to create the + // table. + Sources []Source `json:"sources"` + // Fields in the table matching the order of the [Columns] (e.g. the + // [Column] at index 0 has the values for the [Field] at index 0). + Fields []Field `json:"fields"` + // Order of the fields in the table. + Order []Order `json:"order"` + // Groups are the groups of the table. + Groups []Group `json:"groups"` + // Range specifies the window the query was restricted to. Nil if the query + // was not restricted to a time window. + Range *RangeInfo `json:"range"` + // Buckets defines if the query is bucketed (usually on the "_time" field). + // Nil if the query returns a non-bucketed result. + Buckets *BucketInfo `json:"buckets"` + // Columns in the table matching the order of the [Fields] (e.g. the + // [Column] at index 0 has the values for the [Field] at index 0). In case + // of sub-groups, rows will repeat the group value. + Columns []Column `json:"columns"` +} + +// Field in a [Table]. +type Field struct { + // Name of the field. + Name string `json:"name"` + // Type of the field. Can also be composite types which are types separated + // by a horizontal line "|". + Type string `json:"type"` + // Aggregation is the aggregation applied to the field. + Aggregation Aggregation `json:"agg"` +} + +// Aggregation that is applied to a [Field] in a [Table]. +type Aggregation struct { + // Name of the aggregation. + Name string `json:"name"` + // Args are the arguments of the aggregation. + Args []any `json:"args"` +} + +// Source that was consulted in order to create a [Table]. +type Source struct { + // Name of the source. + Name string `json:"name"` +} + +// Order of a [Field] in a [Table]. +type Order struct { + // Field is the name of the field to order by. + Field string `json:"field"` + // Desc is true if the order is descending. Otherwise the order is + // ascending. + Desc bool `json:"desc"` +} + +// Group in a [Table]. +type Group struct { + // Name of the group. + Name string `json:"name"` +} + +// RangeInfo specifies the window a query was restricted to. +type RangeInfo struct { + // Field specifies the field name on which the query range was restricted. + // Usually "_time": + Field string + // Start is the starting time the query is limited by. Usually the start of + // the time window. Queries are restricted to the interval [start,end). + Start time.Time + // End is the ending time the query is limited by. Usually the end of the + // time window. Queries are restricted to the interval [start,end). + End time.Time +} + +// BucketInfo captures information about how a grouped query is sorted into +// buckets. Usually buckets are created on the "_time" column, +type BucketInfo struct { + // Field specifies the field used to create buckets on. Usually this would + // be "_time". + Field string + // An integer or float representing the fixed bucket size. + // When the bucket field is "_time" this value is in nanoseconds. + Size any +} + +// Column in a [Table] containing the raw values of a [Field]. +type Column []any + +// Status of an APL query [Result]. type Status struct { - // ElapsedTime is the duration it took the query to execute. - ElapsedTime time.Duration `json:"elapsedTime"` - // BlocksExamined is the amount of blocks that have been examined by the - // query. - BlocksExamined uint64 `json:"blocksExamined"` - // RowsExamined is the amount of rows that have been examined by the query. - RowsExamined uint64 `json:"rowsExamined"` - // RowsMatched is the amount of rows that matched the query. - RowsMatched uint64 `json:"rowsMatched"` - // NumGroups is the amount of groups returned by the query. - NumGroups uint32 `json:"numGroups"` - // IsPartial describes if the query result is a partial result. - IsPartial bool `json:"isPartial"` - // ContinuationToken is populated when IsPartial is true and must be passed - // to the next query request to retrieve the next result set. - ContinuationToken string `json:"continuationToken"` - // IsEstimate describes if the query result is estimated. - IsEstimate bool `json:"isEstimate"` - // MinBlockTime is the timestamp of the oldest block examined. - MinBlockTime time.Time `json:"minBlockTime"` - // MaxBlockTime is the timestamp of the newest block examined. - MaxBlockTime time.Time `json:"maxBlockTime"` - // Messages associated with the query. - Messages []Message `json:"messages"` // MinCursor is the id of the oldest row, as seen server side. May be lower // than what the results include if the server scanned more data than // included in the results. Can be used to efficiently resume time-sorted @@ -181,18 +122,12 @@ type Status struct { // included in the results. Can be used to efficiently resume time-sorted // non-aggregating queries (i.e. filtering only). MaxCursor string `json:"maxCursor"` -} - -// MarshalJSON implements [json.Marshaler]. It is in place to marshal the -// elapsed time into its microsecond representation because that's what the -// server expects. -func (s Status) MarshalJSON() ([]byte, error) { - type localStatus Status - - // Set to the value in microseconds. - s.ElapsedTime = time.Duration(s.ElapsedTime.Microseconds()) - - return json.Marshal(localStatus(s)) + // ElapsedTime is the duration it took the query to execute. + ElapsedTime time.Duration `json:"elapsedTime"` + // RowsExamined is the amount of rows that have been examined by the query. + RowsExamined uint64 `json:"rowsExamined"` + // RowsMatched is the amount of rows that matched the query. + RowsMatched uint64 `json:"rowsMatched"` } // UnmarshalJSON implements [json.Unmarshaler]. It is in place to unmarshal the @@ -211,67 +146,3 @@ func (s *Status) UnmarshalJSON(b []byte) error { return nil } - -// Message is a message associated with a query result. -type Message struct { - // Priority of the message. - Priority MessagePriority `json:"priority"` - // Code of the message. - Code MessageCode `json:"code"` - // Count describes how often a message of this type was raised by the query. - Count uint `json:"count"` - // Text is a human readable text representation of the message. - Text string `json:"msg"` -} - -// Entry is an event that matched a query and is thus part of the result set. -type Entry struct { - // Time is the time the event occurred. Matches SysTime if not specified - // during ingestion. - Time time.Time `json:"_time"` - // SysTime is the time the event was recorded on the server. - SysTime time.Time `json:"_sysTime"` - // RowID is the unique ID of the event row. It can be used as a cursor to - // resume a query. See [query.SetCursor]. - RowID string `json:"_rowId"` - // Data contains the raw data of the event (with filters and aggregations - // applied). - Data map[string]any `json:"data"` -} - -// Timeseries are queried time series. -type Timeseries struct { - // Series are the intervals that build a time series. - Series []Interval `json:"series"` - // Totals of the time series. - Totals []EntryGroup `json:"totals"` -} - -// Interval is the interval of queried time series. -type Interval struct { - // StartTime of the interval. - StartTime time.Time `json:"startTime"` - // EndTime of the interval. - EndTime time.Time `json:"endTime"` - // Groups of the interval. - Groups []EntryGroup `json:"groups"` -} - -// EntryGroup is a group of queried event. -type EntryGroup struct { - // ID is the unique the group. - ID uint64 `json:"id"` - // Group maps the fieldnames to the unique values for the entry. - Group map[string]any `json:"group"` - // Aggregations of the group. - Aggregations []EntryGroupAgg `json:"aggregations"` -} - -// EntryGroupAgg is an aggregation which is part of a group of queried events. -type EntryGroupAgg struct { - // Alias is the aggregations alias. If it wasn't specified at query time, it - // is the uppercased string representation of the aggregation operation. - Alias string `json:"op"` - // Value is the result value of the aggregation. - Value any `json:"value"` -} diff --git a/axiom/query/result_string.go b/axiom/query/result_string.go deleted file mode 100644 index 082c0f3b..00000000 --- a/axiom/query/result_string.go +++ /dev/null @@ -1,51 +0,0 @@ -// Code generated by "stringer -type=MessageCode,MessagePriority -linecomment -output=result_string.go"; DO NOT EDIT. - -package query - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[emptyMessageCode-0] - _ = x[VirtualFieldFinalizeError-1] - _ = x[MissingColumn-2] - _ = x[LicenseLimitForQueryWarning-3] - _ = x[DefaultLimitWarning-4] - _ = x[CompilerWarning-5] -} - -const _MessageCode_name = "virtual_field_finalize_errormissing_columnlicense_limit_for_query_warningdefault_limit_warningapl_" - -var _MessageCode_index = [...]uint8{0, 0, 28, 42, 73, 94, 98} - -func (i MessageCode) String() string { - if i >= MessageCode(len(_MessageCode_index)-1) { - return "MessageCode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _MessageCode_name[_MessageCode_index[i]:_MessageCode_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[emptyMessagePriority-0] - _ = x[Trace-1] - _ = x[Debug-2] - _ = x[Info-3] - _ = x[Warn-4] - _ = x[Error-5] - _ = x[Fatal-6] -} - -const _MessagePriority_name = "tracedebuginfowarnerrorfatal" - -var _MessagePriority_index = [...]uint8{0, 0, 5, 10, 14, 18, 23, 28} - -func (i MessagePriority) String() string { - if i >= MessagePriority(len(_MessagePriority_index)-1) { - return "MessagePriority(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _MessagePriority_name[_MessagePriority_index[i]:_MessagePriority_index[i+1]] -} diff --git a/axiom/query/result_test.go b/axiom/query/result_test.go index c1a35fe3..990b9169 100644 --- a/axiom/query/result_test.go +++ b/axiom/query/result_test.go @@ -1,96 +1,13 @@ package query import ( - "encoding/json" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/axiomhq/axiom-go/internal/test/testhelper" -) - -var ( - expStatus = Status{ - ElapsedTime: time.Second, - BlocksExamined: 10, - RowsExamined: 100_000, - RowsMatched: 2, - NumGroups: 1, - IsPartial: true, - ContinuationToken: "123", - IsEstimate: true, - MinBlockTime: parseTimeOrPanic("2022-08-15T10:55:53Z"), - MaxBlockTime: parseTimeOrPanic("2022-08-15T11:55:53Z"), - Messages: []Message{ - { - Priority: Error, - Code: MissingColumn, - Count: 2, - Text: "missing column", - }, - { - Priority: Warn, - Code: CompilerWarning, - Count: 1, - Text: "some apl compiler warning", - }, - }, - MinCursor: "c776x1uafkpu-4918f6cb9000095-0", - MaxCursor: "c776x1uafnvq-4918f6cb9000095-1", - } - - expStatusJSON = `{ - "elapsedTime": 1000000, - "blocksExamined": 10, - "rowsExamined": 100000, - "rowsMatched": 2, - "numGroups": 1, - "isPartial": true, - "continuationToken": "123", - "isEstimate": true, - "minBlockTime": "2022-08-15T10:55:53Z", - "maxBlockTime": "2022-08-15T11:55:53Z", - "messages": [ - { - "priority": "error", - "code": "missing_column", - "count": 2, - "msg": "missing column" - }, - { - "priority": "warn", - "code": "apl_convertingfromtypestotypes_1", - "count": 1, - "msg": "some apl compiler warning" - } - ], - "minCursor": "c776x1uafkpu-4918f6cb9000095-0", - "maxCursor": "c776x1uafnvq-4918f6cb9000095-1" - }` ) -func TestStatus(t *testing.T) { - b, err := json.Marshal(expStatus) - require.NoError(t, err) - require.NotEmpty(t, b) - - var act Status - err = json.Unmarshal(b, &act) - require.NoError(t, err) - - assert.Equal(t, expStatus, act) -} - -func TestStatus_MarshalJSON(t *testing.T) { - act, err := expStatus.MarshalJSON() - require.NoError(t, err) - require.NotEmpty(t, act) - - testhelper.JSONEqExp(t, expStatusJSON, string(act), []string{"messages.1.code"}) -} - func TestStatus_UnmarshalJSON(t *testing.T) { exp := Status{ ElapsedTime: time.Second, @@ -102,75 +19,3 @@ func TestStatus_UnmarshalJSON(t *testing.T) { assert.Equal(t, exp, act) } - -func TestMessageCode_Unmarshal(t *testing.T) { - var act struct { - MessageCode MessageCode `json:"code"` - } - err := json.Unmarshal([]byte(`{ "code": "missing_column" }`), &act) - require.NoError(t, err) - - assert.Equal(t, MissingColumn, act.MessageCode) -} - -func TestMessageCode_String(t *testing.T) { - // Check outer bounds. - assert.Empty(t, MessageCode(0).String()) - assert.Empty(t, emptyMessageCode.String()) - assert.Equal(t, emptyMessageCode, MessageCode(0)) - assert.Contains(t, (CompilerWarning + 1).String(), "MessageCode(") - - for mc := VirtualFieldFinalizeError; mc <= CompilerWarning; mc++ { - s := mc.String() - assert.NotEmpty(t, s) - assert.NotContains(t, s, "MessageCode(") - } -} - -func TestMessageCodeFromString(t *testing.T) { - for mc := VirtualFieldFinalizeError; mc <= CompilerWarning; mc++ { - parsed, err := messageCodeFromString(mc.String()) - assert.NoError(t, err) - assert.Equal(t, mc, parsed) - } -} - -func TestMessagePriority_Unmarshal(t *testing.T) { - var act struct { - MessagePriority MessagePriority `json:"priority"` - } - err := json.Unmarshal([]byte(`{ "priority": "info" }`), &act) - require.NoError(t, err) - - assert.Equal(t, Info, act.MessagePriority) -} - -func TestMessagePriority_String(t *testing.T) { - // Check outer bounds. - assert.Empty(t, MessagePriority(0).String()) - assert.Empty(t, emptyMessagePriority.String()) - assert.Equal(t, emptyMessagePriority, MessagePriority(0)) - assert.Contains(t, (Fatal + 1).String(), "MessagePriority(") - - for mp := Trace; mp <= Fatal; mp++ { - s := mp.String() - assert.NotEmpty(t, s) - assert.NotContains(t, s, "MessagePriority(") - } -} - -func TestMessagePriorityFromString(t *testing.T) { - for mp := Trace; mp <= Fatal; mp++ { - parsedMP, err := messagePriorityFromString(mp.String()) - assert.NoError(t, err) - assert.Equal(t, mp, parsedMP) - } -} - -func parseTimeOrPanic(value string) time.Time { - t, err := time.Parse(time.RFC3339, value) - if err != nil { - panic(err) - } - return t -} From 6debfe2aa0c2967d3c60ed8f77a0722e864339b1 Mon Sep 17 00:00:00 2001 From: Lukas Malkmus Date: Wed, 25 Jan 2023 00:02:27 +0100 Subject: [PATCH 2/8] feat(query): iterator for columns and rows --- README.md | 17 +++-- axiom/query/iter/doc.go | 13 ++++ axiom/query/iter/iter.go | 95 ++++++++++++++++++++++++++ axiom/query/iter/iter_test.go | 124 ++++++++++++++++++++++++++++++++++ axiom/query/result.go | 15 ++++ axiom/query/row.go | 41 +++++++++++ axiom/query/row_test.go | 55 +++++++++++++++ examples/README.md | 2 +- examples/query/main.go | 19 ++++-- 9 files changed, 370 insertions(+), 11 deletions(-) create mode 100644 axiom/query/iter/doc.go create mode 100644 axiom/query/iter/iter.go create mode 100644 axiom/query/iter/iter_test.go create mode 100644 axiom/query/row.go create mode 100644 axiom/query/row_test.go diff --git a/README.md b/README.md index 44fecafa..d91b76be 100644 --- a/README.md +++ b/README.md @@ -93,22 +93,29 @@ func main() { client, err := axiom.NewClient() if err != nil { - log.Fatalln(err) + log.Fatal(err) } if _, err = client.IngestEvents(ctx, "my-dataset", []axiom.Event{ {ingest.TimestampField: time.Now(), "foo": "bar"}, {ingest.TimestampField: time.Now(), "bar": "foo"}, }); err != nil { - log.Fatalln(err) + log.Fatal(err) } res, err := client.Query(ctx, "['my-dataset'] | where foo == 'bar' | limit 100") if err != nil { - log.Fatalln(err) + log.Fatal(err) + } else if res.Status.RowsMatched == 0 { + log.Fatal("No matches found") } - for _, match := range res.Matches { - fmt.Println(match.Data) + + rows := res.Tables[0].Rows() + if err := rows.Range(ctx, func(_ context.Context, row query.Row) error { + _, err := fmt.Println(row) + return err + }); err != nil { + log.Fatal(err) } } ``` diff --git a/axiom/query/iter/doc.go b/axiom/query/iter/doc.go new file mode 100644 index 00000000..b2b20984 --- /dev/null +++ b/axiom/query/iter/doc.go @@ -0,0 +1,13 @@ +// Package iter provides a generic iterator implementation and helper functions +// to construct iterators from slices and ranges. +// +// To construct an [Iter], use the [Range] or [Slice] functions: +// +// // Construct an iterator that returns a, b and c on successive calls. +// slice := []string{"a", "b", "c"} +// itr := iter.Slice(slice, func(_ context.Context, item string) (string, error) { +// return item, nil +// }) +// +// An [Iter] always returns a [Done] error when it is exhausted. +package iter diff --git a/axiom/query/iter/iter.go b/axiom/query/iter/iter.go new file mode 100644 index 00000000..b0903fda --- /dev/null +++ b/axiom/query/iter/iter.go @@ -0,0 +1,95 @@ +package iter + +import ( + "context" + "errors" +) + +// Done is returned if the iterator does not contain any more elements. +// +//nolint:revive,stylecheck // No leading "Err" as "Done" is like [io.EOF]. +var Done = errors.New("no more elements in iterator") + +// Element is a type that can be iterated over. +type Element any + +// Iter is a function that returns the next element in the iterator. It returns +// the Done error if the iterator does not contain any more elements. +type Iter[T Element] func(context.Context) (T, error) + +// Range creates an iterator that executes the given function for each index in +// the specified range. +func Range[T Element](start, end int, f func(context.Context, int) (T, error)) Iter[T] { + var idx = start + return func(ctx context.Context) (t T, err error) { + if ctx.Err() != nil { + return t, ctx.Err() + } + if idx > end { + return t, Done + } + t, err = f(ctx, idx) + idx++ + return + } +} + +// Slice creates an iterator that executes the given function for each element +// in the slice. +func Slice[T Element](slice []T, f func(context.Context, T) (T, error)) Iter[T] { + var ( + idx = 0 + end = len(slice) - 1 + ) + return func(ctx context.Context) (t T, err error) { + if ctx.Err() != nil { + return t, ctx.Err() + } + if idx > end { + return t, Done + } + t, err = f(ctx, slice[idx]) + idx++ + return + } +} + +// Next returns the next [Element] in the iterator. +func (itr Iter[T]) Next(ctx context.Context) (T, error) { + return itr(ctx) +} + +// Take returns up to n elements from the iterator. The iterator is only +// guaranteed to return a slice of length n if the error is [nil]. +func (itr Iter[T]) Take(ctx context.Context, n int) ([]T, error) { + res := make([]T, n) + for i := 0; i < n; i++ { + if ctx.Err() != nil { + return res[:i], ctx.Err() + } + var err error + if res[i], err = itr.Next(ctx); err != nil { + return res[:i], err + } + } + return res, nil +} + +// Range executes the given function for each [Element] in the iterator until it +// is exhausted in which case it returns [nil] instead of [Done]. +func (itr Iter[T]) Range(ctx context.Context, f func(context.Context, T) error) error { + for { + if err := ctx.Err(); err != nil { + return err + } + t, err := itr.Next(ctx) + if err != nil { + if err == Done { + return nil + } + return err + } else if err := f(ctx, t); err != nil { + return err + } + } +} diff --git a/axiom/query/iter/iter_test.go b/axiom/query/iter/iter_test.go new file mode 100644 index 00000000..6b6c8ee8 --- /dev/null +++ b/axiom/query/iter/iter_test.go @@ -0,0 +1,124 @@ +package iter_test + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/axiomhq/axiom-go/axiom/query/iter" +) + +func TestRange(t *testing.T) { + itr := iter.Range(1, 2, func(_ context.Context, idx int) (int, error) { + return idx, nil + }) + + ctx := context.Background() + + res, err := itr.Next(ctx) + require.NoError(t, err) + + assert.Equal(t, 1, res) + + res, err = itr.Next(ctx) + require.NoError(t, err) + + assert.Equal(t, 2, res) + + res, err = itr.Next(ctx) + require.Error(t, err) + + assert.Equal(t, iter.Done, err) + assert.Zero(t, res) +} + +func TestSlice(t *testing.T) { + slice := []int{1, 2} + itr := iter.Slice(slice, func(_ context.Context, item int) (int, error) { + return item, nil + }) + + ctx := context.Background() + + res, err := itr.Next(ctx) + require.NoError(t, err) + + assert.Equal(t, 1, res) + + res, err = itr.Next(ctx) + require.NoError(t, err) + + assert.Equal(t, 2, res) + + res, err = itr.Next(ctx) + require.Error(t, err) + + assert.Equal(t, iter.Done, err) + assert.Zero(t, res) +} + +func TestIter_Next(t *testing.T) { + itr := iter.Iter[int](func(context.Context) (int, error) { + return 1, nil + }) + + ctx := context.Background() + + res1, _ := itr(ctx) + res2, _ := itr.Next(ctx) + + assert.Equal(t, res1, res2) +} + +func TestIter_Take(t *testing.T) { + itr := iter.Iter[int](func(context.Context) (int, error) { + return 1, nil + }) + + ctx := context.Background() + + res, _ := itr.Take(ctx, 3) + if assert.Len(t, res, 3) { + assert.Equal(t, []int{1, 1, 1}, res) + } +} + +func TestIter_Take_Error(t *testing.T) { + var count int + itr := iter.Iter[int](func(context.Context) (int, error) { + if count > 1 { + return 0, errors.New("an error") + } + count++ + return 1, nil + }) + + ctx := context.Background() + + res, err := itr.Take(ctx, 3) + + if assert.Error(t, err) { + assert.EqualError(t, err, "an error") + } + if assert.Len(t, res, 2) { + assert.Equal(t, []int{1, 1}, res) + } +} + +func TestIter_Range(t *testing.T) { + itr := iter.Range(1, 5, func(_ context.Context, idx int) (int, error) { + return idx, nil + }) + + var res int + err := itr.Range(context.Background(), func(_ context.Context, i int) error { + res += i + return nil + }) + require.NoError(t, err) + + assert.Equal(t, 15, res) +} diff --git a/axiom/query/result.go b/axiom/query/result.go index fdd2be31..9cdf8525 100644 --- a/axiom/query/result.go +++ b/axiom/query/result.go @@ -1,8 +1,11 @@ package query import ( + "context" "encoding/json" "time" + + "github.com/axiomhq/axiom-go/axiom/query/iter" ) // Result is the result of an APL query. @@ -43,6 +46,11 @@ type Table struct { Columns []Column `json:"columns"` } +// Rows returns an iterator over the rows build from the columns the table. +func (t Table) Rows() iter.Iter[Row] { + return Rows(t.Columns) +} + // Field in a [Table]. type Field struct { // Name of the field. @@ -110,6 +118,13 @@ type BucketInfo struct { // Column in a [Table] containing the raw values of a [Field]. type Column []any +// Values returns an iterator over the values of the column. +func (c Column) Values() iter.Iter[any] { + return iter.Slice(c, func(_ context.Context, v any) (any, error) { + return v, nil + }) +} + // Status of an APL query [Result]. type Status struct { // MinCursor is the id of the oldest row, as seen server side. May be lower diff --git a/axiom/query/row.go b/axiom/query/row.go new file mode 100644 index 00000000..207041f6 --- /dev/null +++ b/axiom/query/row.go @@ -0,0 +1,41 @@ +package query + +import ( + "context" + + "github.com/axiomhq/axiom-go/axiom/query/iter" +) + +// Row represents a single row of a tabular query [Result]. +type Row []any + +// Values returns an iterator over the values of the row. +func (r Row) Values() iter.Iter[any] { + return iter.Slice(r, func(_ context.Context, v any) (any, error) { + return v, nil + }) +} + +// Rows returns an iterator over the rows build from the columns of a tabular +// query [Result]. +func Rows(columns []Column) iter.Iter[Row] { + // Return an empty iterator if there are no columns or column values. + if len(columns) == 0 || len(columns[0]) == 0 { + return func(context.Context) (Row, error) { + return nil, iter.Done + } + } + + return iter.Range(0, len(columns[0]), func(_ context.Context, idx int) (Row, error) { + if idx >= len(columns[0]) { + return nil, iter.Done + } + + row := make(Row, len(columns)) + for columnIdx, column := range columns { + row[columnIdx] = column[idx] + } + + return row, nil + }) +} diff --git a/axiom/query/row_test.go b/axiom/query/row_test.go new file mode 100644 index 00000000..219df475 --- /dev/null +++ b/axiom/query/row_test.go @@ -0,0 +1,55 @@ +package query_test + +import ( + "context" + "fmt" + "log" + "strings" + + "github.com/axiomhq/axiom-go/axiom/query" + "github.com/axiomhq/axiom-go/axiom/query/iter" +) + +func ExampleRows() { + columns := []query.Column{ + []any{ + "2020-11-19T11:06:31.569475746Z", + "2020-11-19T11:06:31.569479846Z", + }, + []any{ + "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", + "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", + }, + []any{ + "93.180.71.3", + "93.180.71.3", + }, + []any{ + "GET /downloads/product_1 HTTP/1.1", + "GET /downloads/product_1 HTTP/1.1", + }, + []any{ + 304, + 304, + }, + } + + var ( + rows = query.Rows(columns) + buf = new(strings.Builder) + ) + for { + row, err := rows.Next(context.Background()) + if err == iter.Done { + break + } else if err != nil { + log.Fatal(err) + } + _, _ = fmt.Fprintln(buf, row) + } + + // Output: + // [2020-11-19T11:06:31.569475746Z Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21) 93.180.71.3 GET /downloads/product_1 HTTP/1.1 304] + // [2020-11-19T11:06:31.569479846Z Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21) 93.180.71.3 GET /downloads/product_1 HTTP/1.1 304] + fmt.Print(buf.String()) +} diff --git a/examples/README.md b/examples/README.md index da0c8146..5b0f942d 100644 --- a/examples/README.md +++ b/examples/README.md @@ -12,7 +12,7 @@ go run ./{example} Axiom Go and the adapters automatically pick up their configuration from the environment, if not otherwise specified. To learn more about configuration, check the -[documentation](https://pkg.go.dev/github.com/axiomhq/axiom-go/adapters). +[documentation](https://pkg.go.dev/github.com/axiomhq/axiom-go). To quickstart, export the environment variables below. diff --git a/examples/query/main.go b/examples/query/main.go index 807f64b5..a9f5fb29 100644 --- a/examples/query/main.go +++ b/examples/query/main.go @@ -9,6 +9,7 @@ import ( "os" "github.com/axiomhq/axiom-go/axiom" + "github.com/axiomhq/axiom-go/axiom/query" ) func main() { @@ -19,6 +20,8 @@ func main() { log.Fatal("AXIOM_DATASET is required") } + ctx := context.Background() + // 1. Initialize the Axiom API client. client, err := axiom.NewClient() if err != nil { @@ -27,15 +30,21 @@ func main() { // 2. Query all events using APL ⚡ apl := fmt.Sprintf("['%s']", dataset) // E.g. ['test'] - res, err := client.Query(context.Background(), apl) + res, err := client.Query(ctx, apl) if err != nil { log.Fatal(err) - } else if len(res.Matches) == 0 { + } else if res.Status.RowsMatched == 0 { log.Fatal("No matches found") } - // 3. Print the queried results. - for _, match := range res.Matches { - fmt.Println(match.Data) + // 3. Print the queried results by creating a iterator for the rows from the + // tabular query result (as it is organized in columns) and iterating over + // the rows. + rows := res.Tables[0].Rows() + if err := rows.Range(ctx, func(_ context.Context, row query.Row) error { + _, err := fmt.Println(row) + return err + }); err != nil { + log.Fatal(err) } } From d3ef537c9fbe76723e777e4e4d39cb3fe8035dbf Mon Sep 17 00:00:00 2001 From: Lukas Malkmus Date: Wed, 1 Feb 2023 13:03:41 +0100 Subject: [PATCH 3/8] feat(query): more typing --- Makefile | 1 + axiom/datasets_integration_test.go | 15 ++-- axiom/datasets_test.go | 26 +++--- axiom/query/aggregation.go | 134 +++++++++++++++++++++++++++++ axiom/query/aggregation_string.go | 50 +++++++++++ axiom/query/aggregation_test.go | 47 ++++++++++ axiom/query/field.go | 126 +++++++++++++++++++++++++++ axiom/query/field_test.go | 45 ++++++++++ axiom/query/result.go | 33 ++----- 9 files changed, 433 insertions(+), 44 deletions(-) create mode 100644 axiom/query/aggregation.go create mode 100644 axiom/query/aggregation_string.go create mode 100644 axiom/query/aggregation_test.go create mode 100644 axiom/query/field.go create mode 100644 axiom/query/field_test.go diff --git a/Makefile b/Makefile index 350163a9..d2022099 100644 --- a/Makefile +++ b/Makefile @@ -74,6 +74,7 @@ fmt: ## Format and simplify the source code using `gofmt` .PHONY: generate generate: \ + axiom/query/aggregation_string.go \ axiom/querylegacy/aggregation_string.go \ axiom/querylegacy/filter_string.go \ axiom/querylegacy/kind_string.go \ diff --git a/axiom/datasets_integration_test.go b/axiom/datasets_integration_test.go index e5cf2e3d..2fe673b9 100644 --- a/axiom/datasets_integration_test.go +++ b/axiom/datasets_integration_test.go @@ -230,7 +230,7 @@ func (s *DatasetsTestSuite) Test() { startTime := now.Add(-time.Minute) endTime := now.Add(time.Minute) - // Run a simple APL query. + // Run a simple APL query... apl := fmt.Sprintf("['%s']", s.dataset.ID) queryResult, err := s.client.Datasets.Query(s.ctx, apl, query.SetStartTime(startTime), @@ -401,16 +401,15 @@ func (s *DatasetsTestSuite) TestCursor() { ) s.Require().NoError(err) - // FIXME(lukasmalkmus): Tabular results format is not yet returning the - // _rowID column. - s.T().Skip() - // HINT(lukasmalkmus): Expecting four columns: _time, _sysTime, _rowID, foo. // This is only checked once for the first query result to verify the // dataset scheme. The following queries will only check the results in the // columns. + // FIXME(lukasmalkmus): Tabular results format is not yet returning the + // _rowID column. s.Require().Len(queryResult.Tables, 1) - s.Require().Len(queryResult.Tables[0].Columns, 4) + s.Require().Len(queryResult.Tables[0].Columns, 3) + // s.Require().Len(queryResult.Tables[0].Columns, 4) s.Require().Len(queryResult.Tables[0].Columns[0], 3) if s.Len(queryResult.Tables, 1) { @@ -419,6 +418,10 @@ func (s *DatasetsTestSuite) TestCursor() { s.Equal("bar", queryResult.Tables[0].Columns[2][2]) } + // FIXME(lukasmalkmus): Tabular results format is not yet returning the + // _rowID column. + s.T().Skip() + // HINT(lukasmalkmus): In a real-world scenario, the cursor would be // retrieved from the query status MinCursor or MaxCursor fields, depending // on the queries sort order. diff --git a/axiom/datasets_test.go b/axiom/datasets_test.go index dd658291..287a2dfe 100644 --- a/axiom/datasets_test.go +++ b/axiom/datasets_test.go @@ -198,50 +198,50 @@ var ( Fields: []query.Field{ { Name: "_time", - Type: "string", + Type: query.TypeString, }, { Name: "_sysTime", - Type: "string", + Type: query.TypeString, }, { Name: "_rowId", - Type: "string", + Type: query.TypeString, }, { Name: "agent", - Type: "string", + Type: query.TypeString, }, { Name: "bytes", - Type: "float64", + Type: query.TypeReal, }, { Name: "referrer", - Type: "string", + Type: query.TypeString, }, { Name: "remote_ip", - Type: "string", + Type: query.TypeString, }, { Name: "remote_user", - Type: "string", + Type: query.TypeString, }, { Name: "request", - Type: "string", + Type: query.TypeString, }, { Name: "response", - Type: "float64", + Type: query.TypeReal, }, { Name: "time", - Type: "string", + Type: query.TypeString, }, }, - Range: &query.RangeInfo{ + Range: &query.Range{ Field: "_time", Start: parseTimeOrPanic("2023-03-21T13:38:51.735448191Z"), End: parseTimeOrPanic("2023-03-28T13:38:51.735448191Z"), @@ -1092,6 +1092,8 @@ func TestDatasetsService_Query(t *testing.T) { assert.Equal(t, expQueryRes, res) } +// TODO(lukasmalkmus): Add test for a query with an aggregation. + func TestDatasetsService_QueryLegacy(t *testing.T) { hf := func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, http.MethodPost, r.Method) diff --git a/axiom/query/aggregation.go b/axiom/query/aggregation.go new file mode 100644 index 00000000..7c3f1120 --- /dev/null +++ b/axiom/query/aggregation.go @@ -0,0 +1,134 @@ +package query + +import ( + "encoding/json" + "fmt" + "strings" +) + +//go:generate go run golang.org/x/tools/cmd/stringer -type=AggregationOp -linecomment -output=aggregation_string.go + +// An AggregationOp describes the [Aggregation] operation applied on a [Field]. +type AggregationOp uint8 + +// All available [Aggregation] operations. +const ( + OpUnknown AggregationOp = iota // unknown + + OpCount // count + OpCountIf // countif + OpDistinct // distinct + OpDistinctIf // distinctif + OpSum // sum + OpSumIf // sumif + OpAvg // avg + OpAvgIf // avgif + OpMin // min + OpMinIf // minif + OpMax // max + OpMaxIf // maxif + OpTopk // topk + OpPercentiles // percentiles + OpHistogram // histogram + OpStandardDeviation // stdev + OpStandardDeviationIf // stdevif + OpVariance // variance + OpVarianceIf // varianceif + OpArgMin // argmin + OpArgMax // argmax + OpRate // rate + OpPearson // pearson_correlation + OpMakeSet // makeset + OpMakeSetIf // makesetif + OpMakeList // makelist + OpMakeListIf // makelistif +) + +func aggregationOpFromString(s string) (op AggregationOp, err error) { + switch strings.ToLower(s) { + case OpCount.String(): + op = OpCount + case OpCountIf.String(): + op = OpCountIf + case OpDistinct.String(): + op = OpDistinct + case OpDistinctIf.String(): + op = OpDistinctIf + case OpSum.String(): + op = OpSum + case OpSumIf.String(): + op = OpSumIf + case OpAvg.String(): + op = OpAvg + case OpAvgIf.String(): + op = OpAvgIf + case OpMin.String(): + op = OpMin + case OpMinIf.String(): + op = OpMinIf + case OpMax.String(): + op = OpMax + case OpMaxIf.String(): + op = OpMaxIf + case OpTopk.String(): + op = OpTopk + case OpPercentiles.String(): + op = OpPercentiles + case OpHistogram.String(): + op = OpHistogram + case OpStandardDeviation.String(): + op = OpStandardDeviation + case OpStandardDeviationIf.String(): + op = OpStandardDeviationIf + case OpVariance.String(): + op = OpVariance + case OpVarianceIf.String(): + op = OpVarianceIf + case OpArgMin.String(): + op = OpArgMin + case OpArgMax.String(): + op = OpArgMax + case OpRate.String(): + op = OpRate + case OpPearson.String(): + op = OpPearson + case OpMakeSet.String(): + op = OpMakeSet + case OpMakeSetIf.String(): + op = OpMakeSetIf + case OpMakeList.String(): + op = OpMakeList + case OpMakeListIf.String(): + op = OpMakeListIf + default: + return OpUnknown, fmt.Errorf("unknown aggregation operation: %s", s) + } + + return op, nil +} + +// UnmarshalJSON implements [json.Unmarshaler]. It is in place to unmarshal the +// AggregationOp from the string representation the server returns. +func (op *AggregationOp) UnmarshalJSON(b []byte) (err error) { + var s string + if err = json.Unmarshal(b, &s); err != nil { + return err + } + + *op, err = aggregationOpFromString(s) + + return err +} + +// Aggregation that is applied to a [Field] in a [Table]. +type Aggregation struct { + // Op is the aggregation operation. If the aggregation is aliased, the alias + // is stored in the parent [Field.Name]. + Op AggregationOp `json:"name"` + // Fields specifies the names of the fields this aggregation is computed on. + // E.g. ["players"] for "topk(players, 10)". + Fields []string `json:"fields"` + // Args are the non-field arguments of the aggregation, if any. E.g. "10" + // for "topk(players, 10)". + Args []any `json:"args"` +} diff --git a/axiom/query/aggregation_string.go b/axiom/query/aggregation_string.go new file mode 100644 index 00000000..ee4ceb73 --- /dev/null +++ b/axiom/query/aggregation_string.go @@ -0,0 +1,50 @@ +// Code generated by "stringer -type=AggregationOp -linecomment -output=aggregation_string.go"; DO NOT EDIT. + +package query + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[OpUnknown-0] + _ = x[OpCount-1] + _ = x[OpCountIf-2] + _ = x[OpDistinct-3] + _ = x[OpDistinctIf-4] + _ = x[OpSum-5] + _ = x[OpSumIf-6] + _ = x[OpAvg-7] + _ = x[OpAvgIf-8] + _ = x[OpMin-9] + _ = x[OpMinIf-10] + _ = x[OpMax-11] + _ = x[OpMaxIf-12] + _ = x[OpTopk-13] + _ = x[OpPercentiles-14] + _ = x[OpHistogram-15] + _ = x[OpStandardDeviation-16] + _ = x[OpStandardDeviationIf-17] + _ = x[OpVariance-18] + _ = x[OpVarianceIf-19] + _ = x[OpArgMin-20] + _ = x[OpArgMax-21] + _ = x[OpRate-22] + _ = x[OpPearson-23] + _ = x[OpMakeSet-24] + _ = x[OpMakeSetIf-25] + _ = x[OpMakeList-26] + _ = x[OpMakeListIf-27] +} + +const _AggregationOp_name = "unknowncountcountifdistinctdistinctifsumsumifavgavgifminminifmaxmaxiftopkpercentileshistogramstdevstdevifvariancevarianceifargminargmaxratepearson_correlationmakesetmakesetifmakelistmakelistif" + +var _AggregationOp_index = [...]uint8{0, 7, 12, 19, 27, 37, 40, 45, 48, 53, 56, 61, 64, 69, 73, 84, 93, 98, 105, 113, 123, 129, 135, 139, 158, 165, 174, 182, 192} + +func (i AggregationOp) String() string { + if i >= AggregationOp(len(_AggregationOp_index)-1) { + return "AggregationOp(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AggregationOp_name[_AggregationOp_index[i]:_AggregationOp_index[i+1]] +} diff --git a/axiom/query/aggregation_test.go b/axiom/query/aggregation_test.go new file mode 100644 index 00000000..cd0a65e8 --- /dev/null +++ b/axiom/query/aggregation_test.go @@ -0,0 +1,47 @@ +package query + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAggregationOp_Unmarshal(t *testing.T) { + var act struct { + Op AggregationOp `json:"name"` + } + err := json.Unmarshal([]byte(`{ "name": "count" }`), &act) + require.NoError(t, err) + + assert.Equal(t, OpCount, act.Op) +} + +func TestAggregationOp_String(t *testing.T) { + // Check outer bounds. + assert.Equal(t, OpUnknown, AggregationOp(0)) + assert.Contains(t, (OpMakeListIf + 1).String(), "AggregationOp(") + + for op := OpUnknown; op <= OpMakeListIf; op++ { + s := op.String() + assert.NotEmpty(t, s) + assert.NotContains(t, s, "AggregationOp(") + } +} + +func TestAggregationOpFromString(t *testing.T) { + for op := OpCount; op <= OpMakeListIf; op++ { + s := op.String() + + parsedOp, err := aggregationOpFromString(s) + if assert.NoError(t, err) { + assert.NotEmpty(t, s) + assert.Equal(t, op, parsedOp) + } + } + + op, err := aggregationOpFromString("abc") + assert.Equal(t, OpUnknown, op) + assert.EqualError(t, err, "unknown aggregation operation: abc") +} diff --git a/axiom/query/field.go b/axiom/query/field.go new file mode 100644 index 00000000..997b6987 --- /dev/null +++ b/axiom/query/field.go @@ -0,0 +1,126 @@ +package query + +import ( + "encoding/json" + "fmt" + "strings" +) + +// A FieldType describes the type of a [Field]. +type FieldType uint16 + +// All available [Field] types. +const ( + TypeInvalid FieldType = 0 // invalid + TypeBool FieldType = 1 << iota // bool + TypeDateTime // datetime + TypeInt // int + TypeLong // long + TypeReal // real + TypeString // string + TypeTimespan // timespan + TypeArray // array + TypeDictionary // dictionary + TypeUnknown // unknown + maxFieldType +) + +func fieldTypeFromString(s string) (ft FieldType, err error) { + types := strings.Split(s, "|") + + // FIXME(lukasmalkmus): It looks like there are more/different type aliases + // then documented: https://axiom.co/docs/apl/data-types/scalar-data-types. + for _, t := range types { + switch strings.ToLower(t) { + case TypeBool.String(), "boolean": + ft |= TypeBool + case TypeDateTime.String(), "date": + ft |= TypeDateTime + case TypeInt.String(), "integer": // "integer" is not documented. + ft |= TypeInt + case TypeLong.String(): + ft |= TypeLong + case TypeReal.String(), "double", "float", "float64": // "float" and "float64" are not documented. + ft |= TypeReal + case TypeString.String(): + ft |= TypeString + case TypeTimespan.String(), "time": + ft |= TypeTimespan + case TypeArray.String(): + ft |= TypeArray + case TypeDictionary.String(): + ft |= TypeDictionary + case TypeUnknown.String(): + ft |= TypeUnknown + default: + return TypeInvalid, fmt.Errorf("invalid field type: %s", t) + } + } + + return ft, nil +} + +// String returns a string representation of the field type. +// +// It implements [fmt.Stringer]. +func (ft FieldType) String() string { + if ft >= maxFieldType { + return fmt.Sprintf("", ft, ft) + } + + //nolint:exhaustive // maxFieldType is not a valid field type and already + // handled above. + switch ft { + case TypeBool: + return "bool" + case TypeDateTime: + return "datetime" + case TypeInt: + return "int" + case TypeLong: + return "long" + case TypeReal: + return "real" + case TypeString: + return "string" + case TypeTimespan: + return "timespan" + case TypeArray: + return "array" + case TypeDictionary: + return "dictionary" + case TypeUnknown: + return "unknown" + } + + var res []string + for fieldType := TypeBool; fieldType < maxFieldType; fieldType <<= 1 { + if ft&fieldType != 0 { + res = append(res, fieldType.String()) + } + } + return strings.Join(res, "|") +} + +// UnmarshalJSON implements [json.Unmarshaler]. It is in place to unmarshal the +// FieldType from the string representation the server returns. +func (ft *FieldType) UnmarshalJSON(b []byte) (err error) { + var s string + if err = json.Unmarshal(b, &s); err != nil { + return err + } + + *ft, err = fieldTypeFromString(s) + + return err +} + +// Field in a [Table]. +type Field struct { + // Name of the field. + Name string `json:"name"` + // Type of the field. Can also be composite types. + Type FieldType `json:"type"` + // Aggregation is the aggregation applied to the field. + Aggregation *Aggregation `json:"agg"` +} diff --git a/axiom/query/field_test.go b/axiom/query/field_test.go new file mode 100644 index 00000000..c05f8ab0 --- /dev/null +++ b/axiom/query/field_test.go @@ -0,0 +1,45 @@ +package query + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFieldType_Unmarshal(t *testing.T) { + var act struct { + Type FieldType `json:"type"` + } + err := json.Unmarshal([]byte(`{ "type": "int|real" }`), &act) + require.NoError(t, err) + + assert.Equal(t, TypeInt|TypeReal, act.Type) +} + +func TestFieldType_String(t *testing.T) { + assert.Equal(t, TypeInvalid, FieldType(0)) + + typ := TypeInt + assert.Equal(t, "int", typ.String()) + + typ |= TypeReal + assert.Equal(t, "int|real", typ.String()) +} + +func TestFieldTypeFromString(t *testing.T) { + for typ := TypeBool; typ <= TypeUnknown; typ <<= 1 { + s := typ.String() + + parsedOp, err := fieldTypeFromString(s) + if assert.NoError(t, err) { + assert.NotEmpty(t, s) + assert.Equal(t, typ, parsedOp) + } + } + + typ, err := fieldTypeFromString("abc") + assert.Equal(t, TypeInvalid, typ) + assert.EqualError(t, err, "invalid field type: abc") +} diff --git a/axiom/query/result.go b/axiom/query/result.go index 9cdf8525..e46d531a 100644 --- a/axiom/query/result.go +++ b/axiom/query/result.go @@ -36,10 +36,10 @@ type Table struct { Groups []Group `json:"groups"` // Range specifies the window the query was restricted to. Nil if the query // was not restricted to a time window. - Range *RangeInfo `json:"range"` + Range *Range `json:"range"` // Buckets defines if the query is bucketed (usually on the "_time" field). // Nil if the query returns a non-bucketed result. - Buckets *BucketInfo `json:"buckets"` + Buckets *Buckets `json:"buckets"` // Columns in the table matching the order of the [Fields] (e.g. the // [Column] at index 0 has the values for the [Field] at index 0). In case // of sub-groups, rows will repeat the group value. @@ -51,25 +51,6 @@ func (t Table) Rows() iter.Iter[Row] { return Rows(t.Columns) } -// Field in a [Table]. -type Field struct { - // Name of the field. - Name string `json:"name"` - // Type of the field. Can also be composite types which are types separated - // by a horizontal line "|". - Type string `json:"type"` - // Aggregation is the aggregation applied to the field. - Aggregation Aggregation `json:"agg"` -} - -// Aggregation that is applied to a [Field] in a [Table]. -type Aggregation struct { - // Name of the aggregation. - Name string `json:"name"` - // Args are the arguments of the aggregation. - Args []any `json:"args"` -} - // Source that was consulted in order to create a [Table]. type Source struct { // Name of the source. @@ -91,8 +72,8 @@ type Group struct { Name string `json:"name"` } -// RangeInfo specifies the window a query was restricted to. -type RangeInfo struct { +// Range specifies the window a query was restricted to. +type Range struct { // Field specifies the field name on which the query range was restricted. // Usually "_time": Field string @@ -104,9 +85,9 @@ type RangeInfo struct { End time.Time } -// BucketInfo captures information about how a grouped query is sorted into -// buckets. Usually buckets are created on the "_time" column, -type BucketInfo struct { +// Buckets captures information about how a grouped query is sorted into +// buckets. Usually buckets are created on the "_time" column. +type Buckets struct { // Field specifies the field used to create buckets on. Usually this would // be "_time". Field string From 7b63da47c75d75b2039979862c3933b96599bee6 Mon Sep 17 00:00:00 2001 From: Lukas Malkmus Date: Sat, 10 Aug 2024 15:40:54 +0200 Subject: [PATCH 4/8] feat(query): go 1.23 iterator support --- .github/workflows/test_examples.yaml | 2 +- README.md | 1 - axiom/query/result.go | 15 ------ axiom/query/result_iter_go122.go | 21 +++++++++ axiom/query/result_iter_go123.go | 21 +++++++++ axiom/query/row.go | 37 --------------- .../query/{row_test.go => row_go122_test.go} | 2 + axiom/query/row_go123_test.go | 45 ++++++++++++++++++ axiom/query/row_iter_go122.go | 40 ++++++++++++++++ axiom/query/row_iter_go123.go | 39 +++++++++++++++ examples/query/{main.go => main_go122.go} | 2 + examples/query/main_go123.go | 47 +++++++++++++++++++ 12 files changed, 218 insertions(+), 54 deletions(-) create mode 100644 axiom/query/result_iter_go122.go create mode 100644 axiom/query/result_iter_go123.go rename axiom/query/{row_test.go => row_go122_test.go} (98%) create mode 100644 axiom/query/row_go123_test.go create mode 100644 axiom/query/row_iter_go122.go create mode 100644 axiom/query/row_iter_go123.go rename examples/query/{main.go => main_go122.go} (98%) create mode 100644 examples/query/main_go123.go diff --git a/.github/workflows/test_examples.yaml b/.github/workflows/test_examples.yaml index 1c7f1299..8c21d073 100644 --- a/.github/workflows/test_examples.yaml +++ b/.github/workflows/test_examples.yaml @@ -101,7 +101,7 @@ jobs: if: matrix.setup run: ${{ matrix.setup }} - name: Run example - run: go run ./examples/${{ matrix.example }}/main.go + run: go run ./examples/${{ matrix.example }} - name: Verify example if: matrix.verify run: ${{ matrix.verify }} diff --git a/README.md b/README.md index d91b76be..7ac3372a 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,6 @@ [![Latest Release][release_badge]][release] [![License][license_badge]][license] - [Axiom](https://axiom.co) unlocks observability at any scale. - **Ingest with ease, store without limits:** Axiom's next-generation datastore diff --git a/axiom/query/result.go b/axiom/query/result.go index e46d531a..9a6c6c7d 100644 --- a/axiom/query/result.go +++ b/axiom/query/result.go @@ -1,11 +1,8 @@ package query import ( - "context" "encoding/json" "time" - - "github.com/axiomhq/axiom-go/axiom/query/iter" ) // Result is the result of an APL query. @@ -46,11 +43,6 @@ type Table struct { Columns []Column `json:"columns"` } -// Rows returns an iterator over the rows build from the columns the table. -func (t Table) Rows() iter.Iter[Row] { - return Rows(t.Columns) -} - // Source that was consulted in order to create a [Table]. type Source struct { // Name of the source. @@ -99,13 +91,6 @@ type Buckets struct { // Column in a [Table] containing the raw values of a [Field]. type Column []any -// Values returns an iterator over the values of the column. -func (c Column) Values() iter.Iter[any] { - return iter.Slice(c, func(_ context.Context, v any) (any, error) { - return v, nil - }) -} - // Status of an APL query [Result]. type Status struct { // MinCursor is the id of the oldest row, as seen server side. May be lower diff --git a/axiom/query/result_iter_go122.go b/axiom/query/result_iter_go122.go new file mode 100644 index 00000000..7e40c40b --- /dev/null +++ b/axiom/query/result_iter_go122.go @@ -0,0 +1,21 @@ +//go:build !go1.23 + +package query + +import ( + "context" + + "github.com/axiomhq/axiom-go/axiom/query/iter" +) + +// Rows returns an iterator over the rows build from the columns the table. +func (t Table) Rows() iter.Iter[Row] { + return Rows(t.Columns) +} + +// Values returns an iterator over the values of the column. +func (c Column) Values() iter.Iter[any] { + return iter.Slice(c, func(_ context.Context, v any) (any, error) { + return v, nil + }) +} diff --git a/axiom/query/result_iter_go123.go b/axiom/query/result_iter_go123.go new file mode 100644 index 00000000..bf7238cf --- /dev/null +++ b/axiom/query/result_iter_go123.go @@ -0,0 +1,21 @@ +//go:build go1.23 + +package query + +import "iter" + +// Rows returns an iterator over the rows build from the columns the table. +func (t Table) Rows() iter.Seq[Row] { + return Rows(t.Columns) +} + +// Values returns an iterator over the values of the column. +func (c Column) Values() iter.Seq[any] { + return func(yield func(any) bool) { + for _, v := range c { + if !yield(v) { + return + } + } + } +} diff --git a/axiom/query/row.go b/axiom/query/row.go index 207041f6..a2d01a73 100644 --- a/axiom/query/row.go +++ b/axiom/query/row.go @@ -1,41 +1,4 @@ package query -import ( - "context" - - "github.com/axiomhq/axiom-go/axiom/query/iter" -) - // Row represents a single row of a tabular query [Result]. type Row []any - -// Values returns an iterator over the values of the row. -func (r Row) Values() iter.Iter[any] { - return iter.Slice(r, func(_ context.Context, v any) (any, error) { - return v, nil - }) -} - -// Rows returns an iterator over the rows build from the columns of a tabular -// query [Result]. -func Rows(columns []Column) iter.Iter[Row] { - // Return an empty iterator if there are no columns or column values. - if len(columns) == 0 || len(columns[0]) == 0 { - return func(context.Context) (Row, error) { - return nil, iter.Done - } - } - - return iter.Range(0, len(columns[0]), func(_ context.Context, idx int) (Row, error) { - if idx >= len(columns[0]) { - return nil, iter.Done - } - - row := make(Row, len(columns)) - for columnIdx, column := range columns { - row[columnIdx] = column[idx] - } - - return row, nil - }) -} diff --git a/axiom/query/row_test.go b/axiom/query/row_go122_test.go similarity index 98% rename from axiom/query/row_test.go rename to axiom/query/row_go122_test.go index 219df475..c05de0ad 100644 --- a/axiom/query/row_test.go +++ b/axiom/query/row_go122_test.go @@ -1,3 +1,5 @@ +//go:build !go1.23 + package query_test import ( diff --git a/axiom/query/row_go123_test.go b/axiom/query/row_go123_test.go new file mode 100644 index 00000000..e4be5a34 --- /dev/null +++ b/axiom/query/row_go123_test.go @@ -0,0 +1,45 @@ +//go:build go1.23 + +package query_test + +import ( + "fmt" + "strings" + + "github.com/axiomhq/axiom-go/axiom/query" +) + +func ExampleRows() { + columns := []query.Column{ + []any{ + "2020-11-19T11:06:31.569475746Z", + "2020-11-19T11:06:31.569479846Z", + }, + []any{ + "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", + "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", + }, + []any{ + "93.180.71.3", + "93.180.71.3", + }, + []any{ + "GET /downloads/product_1 HTTP/1.1", + "GET /downloads/product_1 HTTP/1.1", + }, + []any{ + 304, + 304, + }, + } + + var buf strings.Builder + for row := range query.Rows(columns) { + _, _ = fmt.Fprintln(&buf, row) + } + + // Output: + // [2020-11-19T11:06:31.569475746Z Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21) 93.180.71.3 GET /downloads/product_1 HTTP/1.1 304] + // [2020-11-19T11:06:31.569479846Z Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21) 93.180.71.3 GET /downloads/product_1 HTTP/1.1 304] + fmt.Print(buf.String()) +} diff --git a/axiom/query/row_iter_go122.go b/axiom/query/row_iter_go122.go new file mode 100644 index 00000000..fdf42d90 --- /dev/null +++ b/axiom/query/row_iter_go122.go @@ -0,0 +1,40 @@ +//go:build !go1.23 + +package query + +import ( + "context" + + "github.com/axiomhq/axiom-go/axiom/query/iter" +) + +// Values returns an iterator over the values of the row. +func (r Row) Values() iter.Iter[any] { + return iter.Slice(r, func(_ context.Context, v any) (any, error) { + return v, nil + }) +} + +// Rows returns an iterator over the rows build from the columns of a tabular +// query [Result]. +func Rows(columns []Column) iter.Iter[Row] { + // Return an empty iterator if there are no columns or column values. + if len(columns) == 0 || len(columns[0]) == 0 { + return func(context.Context) (Row, error) { + return nil, iter.Done + } + } + + return iter.Range(0, len(columns[0]), func(_ context.Context, idx int) (Row, error) { + if idx >= len(columns[0]) { + return nil, iter.Done + } + + row := make(Row, len(columns)) + for columnIdx, column := range columns { + row[columnIdx] = column[idx] + } + + return row, nil + }) +} diff --git a/axiom/query/row_iter_go123.go b/axiom/query/row_iter_go123.go new file mode 100644 index 00000000..236fdc5c --- /dev/null +++ b/axiom/query/row_iter_go123.go @@ -0,0 +1,39 @@ +//go:build go1.23 + +package query + +import ( + "iter" +) + +// Values returns an iterator over the values of the row. +func (r Row) Values() iter.Seq[any] { + return func(yield func(any) bool) { + for _, v := range r { + if !yield(v) { + return + } + } + } +} + +// Rows returns an iterator over the rows build from the columns of a tabular +// query [Result]. +func Rows(columns []Column) iter.Seq[Row] { + // Return an empty iterator if there are no columns or column values. + if len(columns) == 0 || len(columns[0]) == 0 { + return func(func(Row) bool) {} + } + + return func(yield func(Row) bool) { + for i := range columns[0] { + row := make(Row, len(columns)) + for j, column := range columns { + row[j] = column[i] + } + if !yield(row) { + return + } + } + } +} diff --git a/examples/query/main.go b/examples/query/main_go122.go similarity index 98% rename from examples/query/main.go rename to examples/query/main_go122.go index a9f5fb29..910c6be9 100644 --- a/examples/query/main.go +++ b/examples/query/main_go122.go @@ -1,3 +1,5 @@ +//go:build !go1.23 + // The purpose of this example is to show how to query a dataset using the Axiom // Processing Language (APL). package main diff --git a/examples/query/main_go123.go b/examples/query/main_go123.go new file mode 100644 index 00000000..687b91f4 --- /dev/null +++ b/examples/query/main_go123.go @@ -0,0 +1,47 @@ +//go:build go1.23 + +// The purpose of this example is to show how to query a dataset using the Axiom +// Processing Language (APL). +package main + +import ( + "context" + "fmt" + "log" + "os" + + "github.com/axiomhq/axiom-go/axiom" +) + +func main() { + // Export "AXIOM_DATASET" in addition to the required environment variables. + + dataset := os.Getenv("AXIOM_DATASET") + if dataset == "" { + log.Fatal("AXIOM_DATASET is required") + } + + ctx := context.Background() + + // 1. Initialize the Axiom API client. + client, err := axiom.NewClient() + if err != nil { + log.Fatal(err) + } + + // 2. Query all events using APL ⚡ + apl := fmt.Sprintf("['%s']", dataset) // E.g. ['test'] + res, err := client.Query(ctx, apl) + if err != nil { + log.Fatal(err) + } else if res.Status.RowsMatched == 0 { + log.Fatal("No matches found") + } + + // 3. Print the queried results by creating a iterator for the rows from the + // tabular query result (as it is organized in columns) and iterating over + // the rows. + for row := range res.Tables[0].Rows() { + _, _ = fmt.Println(row) + } +} From efd08b8c714973ef9142e56f1776d2b93284bb2c Mon Sep 17 00:00:00 2001 From: Islam Shehata Date: Thu, 15 Aug 2024 13:20:20 +0300 Subject: [PATCH 5/8] tabular: conform to DB type fields --- axiom/datasets_test.go | 4 +-- axiom/query/field.go | 54 ++++++++++++++++++--------------------- axiom/query/field_test.go | 18 ++++++++----- 3 files changed, 39 insertions(+), 37 deletions(-) diff --git a/axiom/datasets_test.go b/axiom/datasets_test.go index 287a2dfe..cd7a775e 100644 --- a/axiom/datasets_test.go +++ b/axiom/datasets_test.go @@ -214,7 +214,7 @@ var ( }, { Name: "bytes", - Type: query.TypeReal, + Type: query.TypeFloat, }, { Name: "referrer", @@ -234,7 +234,7 @@ var ( }, { Name: "response", - Type: query.TypeReal, + Type: query.TypeFloat, }, { Name: "time", diff --git a/axiom/query/field.go b/axiom/query/field.go index 997b6987..447a9840 100644 --- a/axiom/query/field.go +++ b/axiom/query/field.go @@ -9,19 +9,19 @@ import ( // A FieldType describes the type of a [Field]. type FieldType uint16 -// All available [Field] types. +// All available [Field] types. Conforms to DB Types const ( - TypeInvalid FieldType = 0 // invalid - TypeBool FieldType = 1 << iota // bool - TypeDateTime // datetime - TypeInt // int - TypeLong // long - TypeReal // real - TypeString // string - TypeTimespan // timespan - TypeArray // array - TypeDictionary // dictionary - TypeUnknown // unknown + TypeInvalid FieldType = 0 // invalid + TypeUnknown FieldType = 1 << iota // unknown + TypeInteger // integer + TypeString // string + TypeBool // boolean + TypeDateTime // datetime + TypeFloat // float + TypeTimespan // timespan + TypeMap // map + TypeArray // array + maxFieldType ) @@ -36,20 +36,18 @@ func fieldTypeFromString(s string) (ft FieldType, err error) { ft |= TypeBool case TypeDateTime.String(), "date": ft |= TypeDateTime - case TypeInt.String(), "integer": // "integer" is not documented. - ft |= TypeInt - case TypeLong.String(): - ft |= TypeLong - case TypeReal.String(), "double", "float", "float64": // "float" and "float64" are not documented. - ft |= TypeReal + case TypeInteger.String(), "int": + ft |= TypeInteger + case TypeFloat.String(), "double", "float", "float64": // "float" and "float64" are not documented. + ft |= TypeFloat case TypeString.String(): ft |= TypeString case TypeTimespan.String(), "time": ft |= TypeTimespan case TypeArray.String(): ft |= TypeArray - case TypeDictionary.String(): - ft |= TypeDictionary + case TypeMap.String(): + ft |= TypeMap case TypeUnknown.String(): ft |= TypeUnknown default: @@ -72,23 +70,21 @@ func (ft FieldType) String() string { // handled above. switch ft { case TypeBool: - return "bool" + return "boolean" case TypeDateTime: return "datetime" - case TypeInt: - return "int" - case TypeLong: - return "long" - case TypeReal: - return "real" + case TypeInteger: + return "integer" + case TypeFloat: + return "float" case TypeString: return "string" case TypeTimespan: return "timespan" case TypeArray: return "array" - case TypeDictionary: - return "dictionary" + case TypeMap: + return "map" case TypeUnknown: return "unknown" } diff --git a/axiom/query/field_test.go b/axiom/query/field_test.go index c05f8ab0..fd349cce 100644 --- a/axiom/query/field_test.go +++ b/axiom/query/field_test.go @@ -12,20 +12,26 @@ func TestFieldType_Unmarshal(t *testing.T) { var act struct { Type FieldType `json:"type"` } - err := json.Unmarshal([]byte(`{ "type": "int|real" }`), &act) + err := json.Unmarshal([]byte(`{ "type": "int|string" }`), &act) require.NoError(t, err) - assert.Equal(t, TypeInt|TypeReal, act.Type) + assert.Equal(t, TypeInteger|TypeString, act.Type) } func TestFieldType_String(t *testing.T) { assert.Equal(t, TypeInvalid, FieldType(0)) - typ := TypeInt - assert.Equal(t, "int", typ.String()) + typ := TypeDateTime + assert.Equal(t, "datetime", typ.String()) - typ |= TypeReal - assert.Equal(t, "int|real", typ.String()) + typ |= TypeTimespan + assert.Equal(t, "datetime|timespan", typ.String()) +} + +func TestFieldType_Bool(t *testing.T) { + assert.Equal(t, TypeInvalid, FieldType(0)) + + assert.Equal(t, "boolean", TypeBool.String()) } func TestFieldTypeFromString(t *testing.T) { From 792d60e5ca7019865602ebf32f30435aa92609c4 Mon Sep 17 00:00:00 2001 From: Lukas Malkmus Date: Tue, 17 Sep 2024 12:37:00 +0200 Subject: [PATCH 6/8] omnia fixes --- axiom/datasets.go | 24 ++-- axiom/datasets_test.go | 22 ++-- axiom/query/field.go | 122 ----------------- axiom/query/field_test.go | 51 ------- axiom/query/iter/doc.go | 13 -- axiom/query/iter/iter.go | 95 -------------- axiom/query/iter/iter_test.go | 124 ------------------ axiom/query/result.go | 10 ++ .../{result_iter_go123.go => result_go123.go} | 3 + axiom/query/result_iter_go122.go | 21 --- axiom/query/row.go | 38 ++++++ axiom/query/row_go122_test.go | 57 -------- axiom/query/row_iter_go122.go | 40 ------ axiom/query/row_iter_go123.go | 39 ------ .../query/{row_go123_test.go => row_test.go} | 2 + 15 files changed, 75 insertions(+), 586 deletions(-) delete mode 100644 axiom/query/field.go delete mode 100644 axiom/query/field_test.go delete mode 100644 axiom/query/iter/doc.go delete mode 100644 axiom/query/iter/iter.go delete mode 100644 axiom/query/iter/iter_test.go rename axiom/query/{result_iter_go123.go => result_go123.go} (76%) delete mode 100644 axiom/query/result_iter_go122.go delete mode 100644 axiom/query/row_go122_test.go delete mode 100644 axiom/query/row_iter_go122.go delete mode 100644 axiom/query/row_iter_go123.go rename axiom/query/{row_go123_test.go => row_test.go} (92%) diff --git a/axiom/datasets.go b/axiom/datasets.go index ea620728..7e52759e 100644 --- a/axiom/datasets.go +++ b/axiom/datasets.go @@ -596,7 +596,7 @@ func (s *DatasetsService) Query(ctx context.Context, apl string, options ...quer res.TraceID = resp.TraceID() setQueryStatusOnSpan(span, res.Result.Status) - span.SetAttributes(attribute.String("axiom.result.trace_id", res.TraceID)) + span.SetAttributes(attribute.String("axiom.trace_id", res.TraceID)) return &res.Result, nil } @@ -649,7 +649,7 @@ func (s *DatasetsService) QueryLegacy(ctx context.Context, id string, q queryleg res.TraceID = resp.TraceID() setLegacyQueryStatusOnSpan(span, res.Result.Status) - span.SetAttributes(attribute.String("axiom.result.trace_id", res.TraceID)) + span.SetAttributes(attribute.String("axiom.trace_id", res.TraceID)) return &res.Result, nil } @@ -707,10 +707,10 @@ func setIngestStatusOnSpan(span trace.Span, status ingest.Status) { } span.SetAttributes( - attribute.String("axiom.result.trace_id", status.TraceID), - attribute.Int64("axiom.events.ingested", int64(status.Ingested)), - attribute.Int64("axiom.events.failed", int64(status.Failed)), - attribute.Int64("axiom.events.processed_bytes", int64(status.ProcessedBytes)), + attribute.String("axiom.trace_id", status.TraceID), + attribute.Int64("axiom.events.ingested", int64(status.Ingested)), //nolint:gosec // Fine for this use case. + attribute.Int64("axiom.events.failed", int64(status.Failed)), //nolint:gosec // Fine for this use case. + attribute.Int64("axiom.events.processed_bytes", int64(status.ProcessedBytes)), //nolint:gosec // Fine for this use case. ) } @@ -720,13 +720,11 @@ func setQueryStatusOnSpan(span trace.Span, status query.Status) { } span.SetAttributes( - attribute.String("axiom.query.min_cursor", status.MinCursor), - attribute.String("axiom.query.max_cursor", status.MaxCursor), attribute.String("axiom.query.min_cursor", status.MinCursor), attribute.String("axiom.query.max_cursor", status.MaxCursor), attribute.String("axiom.query.elapsed_time", status.ElapsedTime.String()), - attribute.Int64("axiom.query.rows_examined", int64(status.RowsExamined)), - attribute.Int64("axiom.query.rows_matched", int64(status.RowsMatched)), + attribute.Int64("axiom.query.rows_examined", int64(status.RowsExamined)), //nolint:gosec // Fine for this use case. + attribute.Int64("axiom.query.rows_matched", int64(status.RowsMatched)), //nolint:gosec // Fine for this use case. ) } @@ -737,9 +735,9 @@ func setLegacyQueryStatusOnSpan(span trace.Span, status querylegacy.Status) { span.SetAttributes( attribute.String("axiom.querylegacy.elapsed_time", status.ElapsedTime.String()), - attribute.Int64("axiom.querylegacy.blocks_examined", int64(status.BlocksExamined)), - attribute.Int64("axiom.querylegacy.rows_examined", int64(status.RowsExamined)), - attribute.Int64("axiom.querylegacy.rows_matched", int64(status.RowsMatched)), + attribute.Int64("axiom.querylegacy.blocks_examined", int64(status.BlocksExamined)), //nolint:gosec // Fine for this use case. + attribute.Int64("axiom.querylegacy.rows_examined", int64(status.RowsExamined)), //nolint:gosec // Fine for this use case. + attribute.Int64("axiom.querylegacy.rows_matched", int64(status.RowsMatched)), //nolint:gosec // Fine for this use case. attribute.Int64("axiom.querylegacy.num_groups", int64(status.NumGroups)), attribute.Bool("axiom.querylegacy.is_partial", status.IsPartial), attribute.Bool("axiom.querylegacy.is_estimate", status.IsEstimate), diff --git a/axiom/datasets_test.go b/axiom/datasets_test.go index cd7a775e..6c8a4f5b 100644 --- a/axiom/datasets_test.go +++ b/axiom/datasets_test.go @@ -198,47 +198,47 @@ var ( Fields: []query.Field{ { Name: "_time", - Type: query.TypeString, + Type: "string", }, { Name: "_sysTime", - Type: query.TypeString, + Type: "string", }, { Name: "_rowId", - Type: query.TypeString, + Type: "string", }, { Name: "agent", - Type: query.TypeString, + Type: "string", }, { Name: "bytes", - Type: query.TypeFloat, + Type: "float64", }, { Name: "referrer", - Type: query.TypeString, + Type: "string", }, { Name: "remote_ip", - Type: query.TypeString, + Type: "string", }, { Name: "remote_user", - Type: query.TypeString, + Type: "string", }, { Name: "request", - Type: query.TypeString, + Type: "string", }, { Name: "response", - Type: query.TypeFloat, + Type: "float64", }, { Name: "time", - Type: query.TypeString, + Type: "string", }, }, Range: &query.Range{ diff --git a/axiom/query/field.go b/axiom/query/field.go deleted file mode 100644 index 447a9840..00000000 --- a/axiom/query/field.go +++ /dev/null @@ -1,122 +0,0 @@ -package query - -import ( - "encoding/json" - "fmt" - "strings" -) - -// A FieldType describes the type of a [Field]. -type FieldType uint16 - -// All available [Field] types. Conforms to DB Types -const ( - TypeInvalid FieldType = 0 // invalid - TypeUnknown FieldType = 1 << iota // unknown - TypeInteger // integer - TypeString // string - TypeBool // boolean - TypeDateTime // datetime - TypeFloat // float - TypeTimespan // timespan - TypeMap // map - TypeArray // array - - maxFieldType -) - -func fieldTypeFromString(s string) (ft FieldType, err error) { - types := strings.Split(s, "|") - - // FIXME(lukasmalkmus): It looks like there are more/different type aliases - // then documented: https://axiom.co/docs/apl/data-types/scalar-data-types. - for _, t := range types { - switch strings.ToLower(t) { - case TypeBool.String(), "boolean": - ft |= TypeBool - case TypeDateTime.String(), "date": - ft |= TypeDateTime - case TypeInteger.String(), "int": - ft |= TypeInteger - case TypeFloat.String(), "double", "float", "float64": // "float" and "float64" are not documented. - ft |= TypeFloat - case TypeString.String(): - ft |= TypeString - case TypeTimespan.String(), "time": - ft |= TypeTimespan - case TypeArray.String(): - ft |= TypeArray - case TypeMap.String(): - ft |= TypeMap - case TypeUnknown.String(): - ft |= TypeUnknown - default: - return TypeInvalid, fmt.Errorf("invalid field type: %s", t) - } - } - - return ft, nil -} - -// String returns a string representation of the field type. -// -// It implements [fmt.Stringer]. -func (ft FieldType) String() string { - if ft >= maxFieldType { - return fmt.Sprintf("", ft, ft) - } - - //nolint:exhaustive // maxFieldType is not a valid field type and already - // handled above. - switch ft { - case TypeBool: - return "boolean" - case TypeDateTime: - return "datetime" - case TypeInteger: - return "integer" - case TypeFloat: - return "float" - case TypeString: - return "string" - case TypeTimespan: - return "timespan" - case TypeArray: - return "array" - case TypeMap: - return "map" - case TypeUnknown: - return "unknown" - } - - var res []string - for fieldType := TypeBool; fieldType < maxFieldType; fieldType <<= 1 { - if ft&fieldType != 0 { - res = append(res, fieldType.String()) - } - } - return strings.Join(res, "|") -} - -// UnmarshalJSON implements [json.Unmarshaler]. It is in place to unmarshal the -// FieldType from the string representation the server returns. -func (ft *FieldType) UnmarshalJSON(b []byte) (err error) { - var s string - if err = json.Unmarshal(b, &s); err != nil { - return err - } - - *ft, err = fieldTypeFromString(s) - - return err -} - -// Field in a [Table]. -type Field struct { - // Name of the field. - Name string `json:"name"` - // Type of the field. Can also be composite types. - Type FieldType `json:"type"` - // Aggregation is the aggregation applied to the field. - Aggregation *Aggregation `json:"agg"` -} diff --git a/axiom/query/field_test.go b/axiom/query/field_test.go deleted file mode 100644 index fd349cce..00000000 --- a/axiom/query/field_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package query - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestFieldType_Unmarshal(t *testing.T) { - var act struct { - Type FieldType `json:"type"` - } - err := json.Unmarshal([]byte(`{ "type": "int|string" }`), &act) - require.NoError(t, err) - - assert.Equal(t, TypeInteger|TypeString, act.Type) -} - -func TestFieldType_String(t *testing.T) { - assert.Equal(t, TypeInvalid, FieldType(0)) - - typ := TypeDateTime - assert.Equal(t, "datetime", typ.String()) - - typ |= TypeTimespan - assert.Equal(t, "datetime|timespan", typ.String()) -} - -func TestFieldType_Bool(t *testing.T) { - assert.Equal(t, TypeInvalid, FieldType(0)) - - assert.Equal(t, "boolean", TypeBool.String()) -} - -func TestFieldTypeFromString(t *testing.T) { - for typ := TypeBool; typ <= TypeUnknown; typ <<= 1 { - s := typ.String() - - parsedOp, err := fieldTypeFromString(s) - if assert.NoError(t, err) { - assert.NotEmpty(t, s) - assert.Equal(t, typ, parsedOp) - } - } - - typ, err := fieldTypeFromString("abc") - assert.Equal(t, TypeInvalid, typ) - assert.EqualError(t, err, "invalid field type: abc") -} diff --git a/axiom/query/iter/doc.go b/axiom/query/iter/doc.go deleted file mode 100644 index b2b20984..00000000 --- a/axiom/query/iter/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Package iter provides a generic iterator implementation and helper functions -// to construct iterators from slices and ranges. -// -// To construct an [Iter], use the [Range] or [Slice] functions: -// -// // Construct an iterator that returns a, b and c on successive calls. -// slice := []string{"a", "b", "c"} -// itr := iter.Slice(slice, func(_ context.Context, item string) (string, error) { -// return item, nil -// }) -// -// An [Iter] always returns a [Done] error when it is exhausted. -package iter diff --git a/axiom/query/iter/iter.go b/axiom/query/iter/iter.go deleted file mode 100644 index b0903fda..00000000 --- a/axiom/query/iter/iter.go +++ /dev/null @@ -1,95 +0,0 @@ -package iter - -import ( - "context" - "errors" -) - -// Done is returned if the iterator does not contain any more elements. -// -//nolint:revive,stylecheck // No leading "Err" as "Done" is like [io.EOF]. -var Done = errors.New("no more elements in iterator") - -// Element is a type that can be iterated over. -type Element any - -// Iter is a function that returns the next element in the iterator. It returns -// the Done error if the iterator does not contain any more elements. -type Iter[T Element] func(context.Context) (T, error) - -// Range creates an iterator that executes the given function for each index in -// the specified range. -func Range[T Element](start, end int, f func(context.Context, int) (T, error)) Iter[T] { - var idx = start - return func(ctx context.Context) (t T, err error) { - if ctx.Err() != nil { - return t, ctx.Err() - } - if idx > end { - return t, Done - } - t, err = f(ctx, idx) - idx++ - return - } -} - -// Slice creates an iterator that executes the given function for each element -// in the slice. -func Slice[T Element](slice []T, f func(context.Context, T) (T, error)) Iter[T] { - var ( - idx = 0 - end = len(slice) - 1 - ) - return func(ctx context.Context) (t T, err error) { - if ctx.Err() != nil { - return t, ctx.Err() - } - if idx > end { - return t, Done - } - t, err = f(ctx, slice[idx]) - idx++ - return - } -} - -// Next returns the next [Element] in the iterator. -func (itr Iter[T]) Next(ctx context.Context) (T, error) { - return itr(ctx) -} - -// Take returns up to n elements from the iterator. The iterator is only -// guaranteed to return a slice of length n if the error is [nil]. -func (itr Iter[T]) Take(ctx context.Context, n int) ([]T, error) { - res := make([]T, n) - for i := 0; i < n; i++ { - if ctx.Err() != nil { - return res[:i], ctx.Err() - } - var err error - if res[i], err = itr.Next(ctx); err != nil { - return res[:i], err - } - } - return res, nil -} - -// Range executes the given function for each [Element] in the iterator until it -// is exhausted in which case it returns [nil] instead of [Done]. -func (itr Iter[T]) Range(ctx context.Context, f func(context.Context, T) error) error { - for { - if err := ctx.Err(); err != nil { - return err - } - t, err := itr.Next(ctx) - if err != nil { - if err == Done { - return nil - } - return err - } else if err := f(ctx, t); err != nil { - return err - } - } -} diff --git a/axiom/query/iter/iter_test.go b/axiom/query/iter/iter_test.go deleted file mode 100644 index 6b6c8ee8..00000000 --- a/axiom/query/iter/iter_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package iter_test - -import ( - "context" - "errors" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/axiomhq/axiom-go/axiom/query/iter" -) - -func TestRange(t *testing.T) { - itr := iter.Range(1, 2, func(_ context.Context, idx int) (int, error) { - return idx, nil - }) - - ctx := context.Background() - - res, err := itr.Next(ctx) - require.NoError(t, err) - - assert.Equal(t, 1, res) - - res, err = itr.Next(ctx) - require.NoError(t, err) - - assert.Equal(t, 2, res) - - res, err = itr.Next(ctx) - require.Error(t, err) - - assert.Equal(t, iter.Done, err) - assert.Zero(t, res) -} - -func TestSlice(t *testing.T) { - slice := []int{1, 2} - itr := iter.Slice(slice, func(_ context.Context, item int) (int, error) { - return item, nil - }) - - ctx := context.Background() - - res, err := itr.Next(ctx) - require.NoError(t, err) - - assert.Equal(t, 1, res) - - res, err = itr.Next(ctx) - require.NoError(t, err) - - assert.Equal(t, 2, res) - - res, err = itr.Next(ctx) - require.Error(t, err) - - assert.Equal(t, iter.Done, err) - assert.Zero(t, res) -} - -func TestIter_Next(t *testing.T) { - itr := iter.Iter[int](func(context.Context) (int, error) { - return 1, nil - }) - - ctx := context.Background() - - res1, _ := itr(ctx) - res2, _ := itr.Next(ctx) - - assert.Equal(t, res1, res2) -} - -func TestIter_Take(t *testing.T) { - itr := iter.Iter[int](func(context.Context) (int, error) { - return 1, nil - }) - - ctx := context.Background() - - res, _ := itr.Take(ctx, 3) - if assert.Len(t, res, 3) { - assert.Equal(t, []int{1, 1, 1}, res) - } -} - -func TestIter_Take_Error(t *testing.T) { - var count int - itr := iter.Iter[int](func(context.Context) (int, error) { - if count > 1 { - return 0, errors.New("an error") - } - count++ - return 1, nil - }) - - ctx := context.Background() - - res, err := itr.Take(ctx, 3) - - if assert.Error(t, err) { - assert.EqualError(t, err, "an error") - } - if assert.Len(t, res, 2) { - assert.Equal(t, []int{1, 1}, res) - } -} - -func TestIter_Range(t *testing.T) { - itr := iter.Range(1, 5, func(_ context.Context, idx int) (int, error) { - return idx, nil - }) - - var res int - err := itr.Range(context.Background(), func(_ context.Context, i int) error { - res += i - return nil - }) - require.NoError(t, err) - - assert.Equal(t, 15, res) -} diff --git a/axiom/query/result.go b/axiom/query/result.go index 9a6c6c7d..831e10f0 100644 --- a/axiom/query/result.go +++ b/axiom/query/result.go @@ -49,6 +49,16 @@ type Source struct { Name string `json:"name"` } +// Field in a [Table]. +type Field struct { + // Name of the field. + Name string `json:"name"` + // Type of the field. Can also be a composite type. + Type string `json:"type"` + // Aggregation is the aggregation applied to the field. + Aggregation *Aggregation `json:"agg"` +} + // Order of a [Field] in a [Table]. type Order struct { // Field is the name of the field to order by. diff --git a/axiom/query/result_iter_go123.go b/axiom/query/result_go123.go similarity index 76% rename from axiom/query/result_iter_go123.go rename to axiom/query/result_go123.go index bf7238cf..cfaee66c 100644 --- a/axiom/query/result_iter_go123.go +++ b/axiom/query/result_go123.go @@ -1,5 +1,8 @@ //go:build go1.23 +// FIXME(lukasmalkmus): Once Go 1.24 is released, remove this file and move the +// Rows and Values methods to result.go. + package query import "iter" diff --git a/axiom/query/result_iter_go122.go b/axiom/query/result_iter_go122.go deleted file mode 100644 index 7e40c40b..00000000 --- a/axiom/query/result_iter_go122.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !go1.23 - -package query - -import ( - "context" - - "github.com/axiomhq/axiom-go/axiom/query/iter" -) - -// Rows returns an iterator over the rows build from the columns the table. -func (t Table) Rows() iter.Iter[Row] { - return Rows(t.Columns) -} - -// Values returns an iterator over the values of the column. -func (c Column) Values() iter.Iter[any] { - return iter.Slice(c, func(_ context.Context, v any) (any, error) { - return v, nil - }) -} diff --git a/axiom/query/row.go b/axiom/query/row.go index a2d01a73..5c0cbe15 100644 --- a/axiom/query/row.go +++ b/axiom/query/row.go @@ -1,4 +1,42 @@ +//go:build go1.23 + +// TODO(lukasmalkmus): Once Go 1.24 is released, remove the build constraint. + package query +import "iter" + // Row represents a single row of a tabular query [Result]. type Row []any + +// Values returns an iterator over the values of the row. +func (r Row) Values() iter.Seq[any] { + return func(yield func(any) bool) { + for _, v := range r { + if !yield(v) { + return + } + } + } +} + +// Rows returns an iterator over the rows build from the columns of a tabular +// query [Result]. +func Rows(columns []Column) iter.Seq[Row] { + // Return an empty iterator if there are no columns or column values. + if len(columns) == 0 || len(columns[0]) == 0 { + return func(func(Row) bool) {} + } + + return func(yield func(Row) bool) { + for i := range columns[0] { + row := make(Row, len(columns)) + for j, column := range columns { + row[j] = column[i] + } + if !yield(row) { + return + } + } + } +} diff --git a/axiom/query/row_go122_test.go b/axiom/query/row_go122_test.go deleted file mode 100644 index c05de0ad..00000000 --- a/axiom/query/row_go122_test.go +++ /dev/null @@ -1,57 +0,0 @@ -//go:build !go1.23 - -package query_test - -import ( - "context" - "fmt" - "log" - "strings" - - "github.com/axiomhq/axiom-go/axiom/query" - "github.com/axiomhq/axiom-go/axiom/query/iter" -) - -func ExampleRows() { - columns := []query.Column{ - []any{ - "2020-11-19T11:06:31.569475746Z", - "2020-11-19T11:06:31.569479846Z", - }, - []any{ - "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", - "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", - }, - []any{ - "93.180.71.3", - "93.180.71.3", - }, - []any{ - "GET /downloads/product_1 HTTP/1.1", - "GET /downloads/product_1 HTTP/1.1", - }, - []any{ - 304, - 304, - }, - } - - var ( - rows = query.Rows(columns) - buf = new(strings.Builder) - ) - for { - row, err := rows.Next(context.Background()) - if err == iter.Done { - break - } else if err != nil { - log.Fatal(err) - } - _, _ = fmt.Fprintln(buf, row) - } - - // Output: - // [2020-11-19T11:06:31.569475746Z Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21) 93.180.71.3 GET /downloads/product_1 HTTP/1.1 304] - // [2020-11-19T11:06:31.569479846Z Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21) 93.180.71.3 GET /downloads/product_1 HTTP/1.1 304] - fmt.Print(buf.String()) -} diff --git a/axiom/query/row_iter_go122.go b/axiom/query/row_iter_go122.go deleted file mode 100644 index fdf42d90..00000000 --- a/axiom/query/row_iter_go122.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build !go1.23 - -package query - -import ( - "context" - - "github.com/axiomhq/axiom-go/axiom/query/iter" -) - -// Values returns an iterator over the values of the row. -func (r Row) Values() iter.Iter[any] { - return iter.Slice(r, func(_ context.Context, v any) (any, error) { - return v, nil - }) -} - -// Rows returns an iterator over the rows build from the columns of a tabular -// query [Result]. -func Rows(columns []Column) iter.Iter[Row] { - // Return an empty iterator if there are no columns or column values. - if len(columns) == 0 || len(columns[0]) == 0 { - return func(context.Context) (Row, error) { - return nil, iter.Done - } - } - - return iter.Range(0, len(columns[0]), func(_ context.Context, idx int) (Row, error) { - if idx >= len(columns[0]) { - return nil, iter.Done - } - - row := make(Row, len(columns)) - for columnIdx, column := range columns { - row[columnIdx] = column[idx] - } - - return row, nil - }) -} diff --git a/axiom/query/row_iter_go123.go b/axiom/query/row_iter_go123.go deleted file mode 100644 index 236fdc5c..00000000 --- a/axiom/query/row_iter_go123.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build go1.23 - -package query - -import ( - "iter" -) - -// Values returns an iterator over the values of the row. -func (r Row) Values() iter.Seq[any] { - return func(yield func(any) bool) { - for _, v := range r { - if !yield(v) { - return - } - } - } -} - -// Rows returns an iterator over the rows build from the columns of a tabular -// query [Result]. -func Rows(columns []Column) iter.Seq[Row] { - // Return an empty iterator if there are no columns or column values. - if len(columns) == 0 || len(columns[0]) == 0 { - return func(func(Row) bool) {} - } - - return func(yield func(Row) bool) { - for i := range columns[0] { - row := make(Row, len(columns)) - for j, column := range columns { - row[j] = column[i] - } - if !yield(row) { - return - } - } - } -} diff --git a/axiom/query/row_go123_test.go b/axiom/query/row_test.go similarity index 92% rename from axiom/query/row_go123_test.go rename to axiom/query/row_test.go index e4be5a34..86ff69a9 100644 --- a/axiom/query/row_go123_test.go +++ b/axiom/query/row_test.go @@ -1,5 +1,7 @@ //go:build go1.23 +// TODO(lukasmalkmus): Once Go 1.24 is released, remove the build constraint. + package query_test import ( From 9c7d7cae3254c698ef4257e70c2001d3edccf861 Mon Sep 17 00:00:00 2001 From: Lukas Malkmus Date: Tue, 17 Sep 2024 12:37:55 +0200 Subject: [PATCH 7/8] deps: update --- go.mod | 60 +++++++++++++------------- go.sum | 134 ++++++++++++++++++++++++++++----------------------------- 2 files changed, 96 insertions(+), 98 deletions(-) diff --git a/go.mod b/go.mod index e8d3c83a..d36a4114 100644 --- a/go.mod +++ b/go.mod @@ -5,22 +5,22 @@ go 1.22.1 require ( github.com/apex/log v1.9.0 github.com/cenkalti/backoff/v4 v4.3.0 - github.com/golangci/golangci-lint v1.60.2 + github.com/golangci/golangci-lint v1.61.0 github.com/google/go-querystring v1.1.0 github.com/klauspost/compress v1.17.9 - github.com/schollz/progressbar/v3 v3.14.6 + github.com/schollz/progressbar/v3 v3.15.0 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 github.com/tidwall/sjson v1.2.5 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 - go.opentelemetry.io/otel v1.29.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 - go.opentelemetry.io/otel/sdk v1.29.0 - go.opentelemetry.io/otel/trace v1.29.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 + go.opentelemetry.io/otel v1.30.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 + go.opentelemetry.io/otel/sdk v1.30.0 + go.opentelemetry.io/otel/trace v1.30.0 go.uber.org/zap v1.27.0 golang.org/x/sync v0.8.0 - golang.org/x/tools v0.24.0 + golang.org/x/tools v0.25.0 gotest.tools/gotestsum v1.12.0 ) @@ -28,15 +28,15 @@ require ( 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect 4d63.com/gochecknoglobals v0.2.1 // indirect github.com/4meepo/tagalign v1.3.4 // indirect - github.com/Abirdcfly/dupword v0.0.14 // indirect + github.com/Abirdcfly/dupword v0.1.1 // indirect github.com/Antonboom/errname v0.1.13 // indirect github.com/Antonboom/nilnil v0.1.9 // indirect github.com/Antonboom/testifylint v1.4.3 // indirect github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect - github.com/Crocmagnon/fatcontext v0.4.0 // indirect + github.com/Crocmagnon/fatcontext v0.5.2 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 // indirect - github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/semver/v3 v3.3.0 // indirect github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect github.com/alecthomas/go-check-sumtype v0.1.4 // indirect github.com/alexkohler/nakedret/v2 v2.0.4 // indirect @@ -59,9 +59,9 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect github.com/chavacava/garif v0.1.0 // indirect - github.com/ckaznocha/intrange v0.1.2 // indirect + github.com/ckaznocha/intrange v0.2.0 // indirect github.com/curioswitch/go-reassign v0.2.0 // indirect - github.com/daixiang0/gci v0.13.4 // indirect + github.com/daixiang0/gci v0.13.5 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect github.com/dnephin/pflag v1.0.7 // indirect @@ -83,7 +83,7 @@ require ( github.com/go-toolsmith/astp v1.1.0 // indirect github.com/go-toolsmith/strparse v1.1.0 // indirect github.com/go-toolsmith/typep v1.1.0 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0 // indirect + github.com/go-viper/mapstructure/v2 v2.1.0 // indirect github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.12.1 // indirect @@ -144,7 +144,7 @@ require ( github.com/nunnatsa/ginkgolinter v0.16.2 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polyfloyd/go-errorlint v1.6.0 // indirect @@ -152,20 +152,20 @@ require ( github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect - github.com/quasilyte/go-ruleguard v0.4.2 // indirect + github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 // indirect github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rs/zerolog v1.33.0 - github.com/ryancurrah/gomodguard v1.3.3 // indirect + github.com/ryancurrah/gomodguard v1.3.5 // indirect github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect github.com/sashamelentyev/usestdlibvars v1.27.0 // indirect - github.com/securego/gosec/v2 v2.20.1-0.20240820084340-81cda2f91fbe // indirect + github.com/securego/gosec/v2 v2.21.2 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect github.com/sivchari/containedctx v1.0.3 // indirect github.com/sivchari/tenv v1.10.0 // indirect @@ -182,7 +182,7 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.4.1 // indirect github.com/tdakkota/asciicheck v0.2.0 // indirect - github.com/tetafro/godot v1.4.16 // indirect + github.com/tetafro/godot v1.4.17 // indirect github.com/tidwall/gjson v1.14.4 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect @@ -200,20 +200,20 @@ require ( gitlab.com/bosi/decorder v0.4.2 // indirect go-simpler.org/musttag v0.12.2 // indirect go-simpler.org/sloglint v0.7.2 // indirect - go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.30.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/automaxprocs v1.5.3 // indirect go.uber.org/multierr v1.10.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e // indirect golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240823204242-4ba0660f739c // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c // indirect - google.golang.org/grpc v1.65.0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/term v0.24.0 // indirect + golang.org/x/text v0.18.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/grpc v1.66.1 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index c59f91e2..6211d166 100644 --- a/go.sum +++ b/go.sum @@ -37,8 +37,8 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/4meepo/tagalign v1.3.4 h1:P51VcvBnf04YkHzjfclN6BbsopfJR5rxs1n+5zHt+w8= github.com/4meepo/tagalign v1.3.4/go.mod h1:M+pnkHH2vG8+qhE5bVc/zeP7HS/j910Fwa9TUSyZVI0= -github.com/Abirdcfly/dupword v0.0.14 h1:3U4ulkc8EUo+CaT105/GJ1BQwtgyj6+VaBVbAX11Ba8= -github.com/Abirdcfly/dupword v0.0.14/go.mod h1:VKDAbxdY8YbKUByLGg8EETzYSuC4crm9WwI6Y3S0cLI= +github.com/Abirdcfly/dupword v0.1.1 h1:Bsxe0fIw6OwBtXMIncaTxCLHYO5BB+3mcsR5E8VXloY= +github.com/Abirdcfly/dupword v0.1.1/go.mod h1:B49AcJdTYYkpd4HjgAcutNGG9HZ2JWwKunH9Y2BA6sM= github.com/Antonboom/errname v0.1.13 h1:JHICqsewj/fNckzrfVSe+T33svwQxmjC+1ntDsHOVvM= github.com/Antonboom/errname v0.1.13/go.mod h1:uWyefRYRN54lBg6HseYCFhs6Qjcy41Y3Jl/dVhA87Ns= github.com/Antonboom/nilnil v0.1.9 h1:eKFMejSxPSA9eLSensFmjW2XTgTwJMjZ8hUHtV4s/SQ= @@ -49,14 +49,14 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Crocmagnon/fatcontext v0.4.0 h1:4ykozu23YHA0JB6+thiuEv7iT6xq995qS1vcuWZq0tg= -github.com/Crocmagnon/fatcontext v0.4.0/go.mod h1:ZtWrXkgyfsYPzS6K3O88va6t2GEglG93vnII/F94WC0= +github.com/Crocmagnon/fatcontext v0.5.2 h1:vhSEg8Gqng8awhPju2w7MKHqMlg4/NI+gSDHtR3xgwA= +github.com/Crocmagnon/fatcontext v0.5.2/go.mod h1:87XhRMaInHP44Q7Tlc7jkgKKB7kZAOPiDkFMdKCC+74= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 h1:/fTUt5vmbkAcMBt4YQiuC23cV0kEsN1MVMNqeOW43cU= github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0/go.mod h1:ONJg5sxcbsdQQ4pOW8TGdTidT2TMAUy/2Xhr8mrYaao= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA= github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk= @@ -127,16 +127,16 @@ github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+U github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/ckaznocha/intrange v0.1.2 h1:3Y4JAxcMntgb/wABQ6e8Q8leMd26JbX2790lIss9MTI= -github.com/ckaznocha/intrange v0.1.2/go.mod h1:RWffCw/vKBwHeOEwWdCikAtY0q4gGt8VhJZEEA5n+RE= +github.com/ckaznocha/intrange v0.2.0 h1:FykcZuJ8BD7oX93YbO1UY9oZtkRbp+1/kJcDjkefYLs= +github.com/ckaznocha/intrange v0.2.0/go.mod h1:r5I7nUlAAG56xmkOpw4XVr16BXhwYTUdcuRFeevn1oE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= -github.com/daixiang0/gci v0.13.4 h1:61UGkmpoAcxHM2hhNkZEf5SzwQtWJXTSws7jaPyqwlw= -github.com/daixiang0/gci v0.13.4/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= +github.com/daixiang0/gci v0.13.5 h1:kThgmH1yBmZSBCh1EJVxQ7JsHpm5Oms0AMed/0LaH4c= +github.com/daixiang0/gci v0.13.5/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -212,8 +212,8 @@ github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQi github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= -github.com/go-viper/mapstructure/v2 v2.0.0 h1:dhn8MZ1gZ0mzeodTG3jt5Vj/o87xZKuNAprG2mQfMfc= -github.com/go-viper/mapstructure/v2 v2.0.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.1.0 h1:gHnMa2Y/pIxElCH2GlZZ1lZSsn6XMtufpGyP1XxdC/w= +github.com/go-viper/mapstructure/v2 v2.1.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= @@ -255,8 +255,8 @@ github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9 github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 h1:/1322Qns6BtQxUZDTAT4SdcoxknUki7IAoK4SAXr8ME= github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9/go.mod h1:Oesb/0uFAyWoaw1U1qS5zyjCg5NP9C9iwjnI4tIsXEE= -github.com/golangci/golangci-lint v1.60.2 h1:Y8aWnZCMOLY5T7Ga5hcoemyKsZZJCUmIIK3xTD3jIhc= -github.com/golangci/golangci-lint v1.60.2/go.mod h1:4UvjLpOJoQSvmyWkmO1urDR3txhL9R9sn4oM/evJ95g= +github.com/golangci/golangci-lint v1.61.0 h1:VvbOLaRVWmyxCnUIMTbf1kDsaJbTzH20FAMXTAlQGu8= +github.com/golangci/golangci-lint v1.61.0/go.mod h1:e4lztIrJJgLPhWvFPDkhiMwEFRrWlmFbrZea3FsJyN8= github.com/golangci/misspell v0.6.0 h1:JCle2HUTNWirNlDIAUO44hUsKhOFqGPoC4LZxlaSXDs= github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo= github.com/golangci/modinfo v0.3.4 h1:oU5huX3fbxqQXdfspamej74DFX0kyGLkw1ppvXoJ8GA= @@ -296,8 +296,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= +github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= @@ -452,11 +452,11 @@ github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= -github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= +github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= +github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= -github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= @@ -466,8 +466,8 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9 github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -503,8 +503,8 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/quasilyte/go-ruleguard v0.4.2 h1:htXcXDK6/rO12kiTHKfHuqR4kr3Y4M0J0rOL6CH/BYs= -github.com/quasilyte/go-ruleguard v0.4.2/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= +github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 h1:+Wl/0aFp0hpuHM3H//KMft64WQ1yX9LdJY64Qm/gFCo= +github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE= github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= @@ -525,8 +525,8 @@ github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.3.3 h1:eiSQdJVNr9KTNxY2Niij8UReSwR8Xrte3exBrAZfqpg= -github.com/ryancurrah/gomodguard v1.3.3/go.mod h1:rsKQjj4l3LXe8N344Ow7agAy5p9yjsWOtRzUMYmA0QY= +github.com/ryancurrah/gomodguard v1.3.5 h1:cShyguSwUEeC0jS7ylOiG/idnd1TpJ1LfHGpV3oJmPU= +github.com/ryancurrah/gomodguard v1.3.5/go.mod h1:MXlEPQRxgfPQa62O8wzK3Ozbkv9Rkqr+wKjSxTdsNJE= github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= @@ -537,10 +537,10 @@ github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tM github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= github.com/sashamelentyev/usestdlibvars v1.27.0 h1:t/3jZpSXtRPRf2xr0m63i32ZrusyurIGT9E5wAvXQnI= github.com/sashamelentyev/usestdlibvars v1.27.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= -github.com/schollz/progressbar/v3 v3.14.6 h1:GyjwcWBAf+GFDMLziwerKvpuS7ZF+mNTAXIB2aspiZs= -github.com/schollz/progressbar/v3 v3.14.6/go.mod h1:Nrzpuw3Nl0srLY0VlTvC4V6RL50pcEymjy6qyJAaLa0= -github.com/securego/gosec/v2 v2.20.1-0.20240820084340-81cda2f91fbe h1:exdneYmXwZ4+VaIWv9mQ47uIHkTQSN50DYdCjXJ1cdQ= -github.com/securego/gosec/v2 v2.20.1-0.20240820084340-81cda2f91fbe/go.mod h1:iyeMMRw8QEmueUSZ2VqmkQMiDyDcobfPnG00CV/NWdE= +github.com/schollz/progressbar/v3 v3.15.0 h1:cNZmcNiVyea6oofBTg80ZhVXxf3wG/JoAhqCCwopkQo= +github.com/schollz/progressbar/v3 v3.15.0/go.mod h1:ncBdc++eweU0dQoeZJ3loXoAc+bjaallHRIm8pVVeQM= +github.com/securego/gosec/v2 v2.21.2 h1:deZp5zmYf3TWwU7A7cR2+SolbTpZ3HQiwFqnzQyEl3M= +github.com/securego/gosec/v2 v2.21.2/go.mod h1:au33kg78rNseF5PwPnTWhuYBFf534bvJRvOrgZ/bFzU= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= @@ -603,8 +603,8 @@ github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.16 h1:4ChfhveiNLk4NveAZ9Pu2AN8QZ2nkUGFuadM9lrr5D0= -github.com/tetafro/godot v1.4.16/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= +github.com/tetafro/godot v1.4.17 h1:pGzu+Ye7ZUEFx7LHU0dAKmCOXWsPjl7qA6iMGndsjPs= +github.com/tetafro/godot v1.4.17/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= @@ -663,20 +663,20 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= -go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= -go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 h1:JAv0Jwtl01UFiyWZEMiJZBiTlv5A50zNs8lsthXqIio= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0/go.mod h1:QNKLmUEAq2QUbPQUfvw4fmv0bgbK7UlOSFCnXyfvSNc= -go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= -go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= -go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= -go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= +go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= +go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 h1:lsInsfvhVIfOI6qHVyysXMNDnjO9Npvl7tlDPJFBVd4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0/go.mod h1:KQsVNh4OjgjTG0G6EiNi1jVpnaeeKsKMRwbLN+f1+8M= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0 h1:umZgi92IyxfXd/l4kaDhnKgY8rnN/cZcF1LKc6I8OQ8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.30.0/go.mod h1:4lVs6obhSVRb1EW5FhOuBTyiQhtRtAnnva9vD3yRfq8= +go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= +go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= +go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= +go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= +go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= @@ -710,8 +710,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e h1:I88y4caeGeuDQxgdoFPUq097j7kNfw6uvuiNxUBfcBk= +golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8= @@ -748,8 +748,8 @@ golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -796,8 +796,8 @@ golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -883,9 +883,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -898,9 +897,8 @@ golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -915,8 +913,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -983,8 +981,8 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1040,10 +1038,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20240823204242-4ba0660f739c h1:e0zB268kOca6FbuJkYUGxfwG4DKFZG/8DLyv9Zv66cE= -google.golang.org/genproto/googleapis/api v0.0.0-20240823204242-4ba0660f739c/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c h1:Kqjm4WpoWvwhMPcrAczoTyMySQmYa9Wy2iL6Con4zn8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240823204242-4ba0660f739c/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1056,8 +1054,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.66.1 h1:hO5qAXR19+/Z44hmvIM4dQFMSYX9XcWsByfoxutBpAM= +google.golang.org/grpc v1.66.1/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 9e33ffb04338aa875fb071ad3c975301614df49b Mon Sep 17 00:00:00 2001 From: Lukas Malkmus Date: Tue, 17 Sep 2024 12:48:24 +0200 Subject: [PATCH 8/8] fix example --- examples/query/main_go122.go | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/examples/query/main_go122.go b/examples/query/main_go122.go index 910c6be9..a7b434a9 100644 --- a/examples/query/main_go122.go +++ b/examples/query/main_go122.go @@ -11,7 +11,6 @@ import ( "os" "github.com/axiomhq/axiom-go/axiom" - "github.com/axiomhq/axiom-go/axiom/query" ) func main() { @@ -39,14 +38,11 @@ func main() { log.Fatal("No matches found") } - // 3. Print the queried results by creating a iterator for the rows from the - // tabular query result (as it is organized in columns) and iterating over - // the rows. - rows := res.Tables[0].Rows() - if err := rows.Range(ctx, func(_ context.Context, row query.Row) error { - _, err := fmt.Println(row) - return err - }); err != nil { - log.Fatal(err) + // 3. Print the queried results by iterating through each column with the + // same row index. + for i := range len(res.Tables[0].Columns[0]) { + for j := range res.Tables[0].Columns { + fmt.Println(res.Tables[0].Columns[j][i]) + } } }