diff --git a/Makefile b/Makefile index a0f83cff..3a04a466 100644 --- a/Makefile +++ b/Makefile @@ -74,7 +74,6 @@ fmt: ## Format and simplify the source code using `gofmt` .PHONY: generate generate: \ - axiom/query/result_string.go \ axiom/querylegacy/aggregation_string.go \ axiom/querylegacy/filter_string.go \ axiom/querylegacy/kind_string.go \ diff --git a/axiom/datasets.go b/axiom/datasets.go index 33dd3de4..a24068d3 100644 --- a/axiom/datasets.go +++ b/axiom/datasets.go @@ -135,42 +135,10 @@ type aplQueryRequest struct { type aplQueryResponse struct { query.Result - // HINT(lukasmalkmus): Ignore these fields as they are not relevant for the - // user and/or will change with the new query result format. - LegacyRequest struct { - StartTime any `json:"startTime"` - EndTime any `json:"endTime"` - Resolution any `json:"resolution"` - Aggregations any `json:"aggregations"` - Filter any `json:"filter"` - Order any `json:"order"` - Limit any `json:"limit"` - VirtualFields any `json:"virtualFields"` - Projections any `json:"project"` - Cursor any `json:"cursor"` - IncludeCursor any `json:"includeCursor"` - ContinuationToken any `json:"continuationToken"` - - // HINT(lukasmalkmus): Preserve the legacy request's "groupBy" - // field for now. This is needed to properly render some results. - GroupBy []string `json:"groupBy"` - } `json:"request"` - FieldsMeta any `json:"fieldsMetaMap"` -} - -// UnmarshalJSON implements [json.Unmarshaler]. It is in place to unmarshal the -// groupBy field of the legacy request that is part of the response into the -// actual [query.Result.GroupBy] field. -func (r *aplQueryResponse) UnmarshalJSON(b []byte) error { - type localResponse *aplQueryResponse - - if err := json.Unmarshal(b, localResponse(r)); err != nil { - return err - } - - r.GroupBy = r.LegacyRequest.GroupBy - - return nil + Format any `json:"format"` + Request any `json:"request"` + DatasetNames any `json:"datasetNames"` + FieldsMetaMap any `json:"fieldsMetaMap"` } // DatasetsService handles communication with the dataset related operations of @@ -373,7 +341,7 @@ func (s *DatasetsService) Ingest(ctx context.Context, id string, r io.Reader, ty } res.TraceID = resp.TraceID() - setIngestResultOnSpan(span, res) + setIngestStatusOnSpan(span, res) return &res, nil } @@ -478,7 +446,7 @@ func (s *DatasetsService) IngestEvents(ctx context.Context, id string, events [] } res.TraceID = resp.TraceID() - setIngestResultOnSpan(span, res) + setIngestStatusOnSpan(span, res) return &res, nil } @@ -535,7 +503,7 @@ func (s *DatasetsService) IngestChannel(ctx context.Context, id string, events < var ingestStatus ingest.Status defer func() { - setIngestResultOnSpan(span, ingestStatus) + setIngestStatusOnSpan(span, ingestStatus) }() flush := func() error { @@ -608,7 +576,7 @@ func (s *DatasetsService) Query(ctx context.Context, apl string, options ...quer queryParams := struct { Format string `url:"format"` }{ - Format: "legacy", // Hardcode legacy APL format for now. + Format: "tabular", // Hardcode tabular result format for now. } path, err := url.JoinPath("/v1/datasets", "_apl") @@ -636,7 +604,8 @@ func (s *DatasetsService) Query(ctx context.Context, apl string, options ...quer } res.TraceID = resp.TraceID() - setQueryResultOnSpan(span, res.Result) + setQueryStatusOnSpan(span, res.Result.Status) + span.SetAttributes(attribute.String("axiom.result.trace_id", res.TraceID)) return &res.Result, nil } @@ -688,7 +657,8 @@ func (s *DatasetsService) QueryLegacy(ctx context.Context, id string, q queryleg res.SavedQueryID = resp.Header.Get("X-Axiom-History-Query-Id") res.TraceID = resp.TraceID() - setLegacyQueryResultOnSpan(span, res.Result) + setLegacyQueryStatusOnSpan(span, res.Result.Status) + span.SetAttributes(attribute.String("axiom.result.trace_id", res.TraceID)) return &res.Result, nil } @@ -740,60 +710,52 @@ func DetectContentType(r io.Reader) (io.Reader, ContentType, error) { return r, typ, nil } -func setIngestResultOnSpan(span trace.Span, res ingest.Status) { +func setIngestStatusOnSpan(span trace.Span, status ingest.Status) { if !span.IsRecording() { return } span.SetAttributes( - attribute.String("axiom.result.trace_id", res.TraceID), - attribute.Int64("axiom.events.ingested", int64(res.Ingested)), - attribute.Int64("axiom.events.failed", int64(res.Failed)), - attribute.Int64("axiom.events.processed_bytes", int64(res.ProcessedBytes)), + attribute.String("axiom.result.trace_id", status.TraceID), + attribute.Int64("axiom.events.ingested", int64(status.Ingested)), + attribute.Int64("axiom.events.failed", int64(status.Failed)), + attribute.Int64("axiom.events.processed_bytes", int64(status.ProcessedBytes)), ) } -//nolint:dupl // We need to support both query packages and their types. -func setQueryResultOnSpan(span trace.Span, res query.Result) { +func setQueryStatusOnSpan(span trace.Span, status query.Status) { if !span.IsRecording() { return } span.SetAttributes( - attribute.String("axiom.result.trace_id", res.TraceID), - attribute.String("axiom.result.status.elapsed_time", res.Status.ElapsedTime.String()), - attribute.Int64("axiom.result.status.blocks_examined", int64(res.Status.BlocksExamined)), - attribute.Int64("axiom.result.status.rows_examined", int64(res.Status.RowsExamined)), - attribute.Int64("axiom.result.status.rows_matched", int64(res.Status.RowsMatched)), - attribute.Int64("axiom.result.status.num_groups", int64(res.Status.NumGroups)), - attribute.Bool("axiom.result.status.is_partial", res.Status.IsPartial), - attribute.Bool("axiom.result.status.is_estimate", res.Status.IsEstimate), - attribute.String("axiom.result.status.min_block_time", res.Status.MinBlockTime.String()), - attribute.String("axiom.result.status.max_block_time", res.Status.MaxBlockTime.String()), - attribute.String("axiom.result.status.min_cursor", res.Status.MinCursor), - attribute.String("axiom.result.status.max_cursor", res.Status.MaxCursor), + attribute.String("axiom.result.status.min_cursor", status.MinCursor), + attribute.String("axiom.result.status.max_cursor", status.MaxCursor), + attribute.String("axiom.query.min_cursor", status.MinCursor), + attribute.String("axiom.query.max_cursor", status.MaxCursor), + attribute.String("axiom.query.elapsed_time", status.ElapsedTime.String()), + attribute.Int64("axiom.query.rows_examined", int64(status.RowsExamined)), + attribute.Int64("axiom.query.rows_matched", int64(status.RowsMatched)), ) } -//nolint:dupl // We need to support both query packages and their types. -func setLegacyQueryResultOnSpan(span trace.Span, res querylegacy.Result) { +func setLegacyQueryStatusOnSpan(span trace.Span, status querylegacy.Status) { if !span.IsRecording() { return } span.SetAttributes( - attribute.String("axiom.result.trace_id", res.TraceID), - attribute.String("axiom.result.status.elapsed_time", res.Status.ElapsedTime.String()), - attribute.Int64("axiom.result.status.blocks_examined", int64(res.Status.BlocksExamined)), - attribute.Int64("axiom.result.status.rows_examined", int64(res.Status.RowsExamined)), - attribute.Int64("axiom.result.status.rows_matched", int64(res.Status.RowsMatched)), - attribute.Int64("axiom.result.status.num_groups", int64(res.Status.NumGroups)), - attribute.Bool("axiom.result.status.is_partial", res.Status.IsPartial), - attribute.Bool("axiom.result.status.is_estimate", res.Status.IsEstimate), - attribute.String("axiom.result.status.min_block_time", res.Status.MinBlockTime.String()), - attribute.String("axiom.result.status.max_block_time", res.Status.MaxBlockTime.String()), - attribute.String("axiom.result.status.min_cursor", res.Status.MinCursor), - attribute.String("axiom.result.status.max_cursor", res.Status.MaxCursor), + attribute.String("axiom.querylegacy.elapsed_time", status.ElapsedTime.String()), + attribute.Int64("axiom.querylegacy.blocks_examined", int64(status.BlocksExamined)), + attribute.Int64("axiom.querylegacy.rows_examined", int64(status.RowsExamined)), + attribute.Int64("axiom.querylegacy.rows_matched", int64(status.RowsMatched)), + attribute.Int64("axiom.querylegacy.num_groups", int64(status.NumGroups)), + attribute.Bool("axiom.querylegacy.is_partial", status.IsPartial), + attribute.Bool("axiom.querylegacy.is_estimate", status.IsEstimate), + attribute.String("axiom.querylegacy.min_block_time", status.MinBlockTime.String()), + attribute.String("axiom.querylegacy.max_block_time", status.MaxBlockTime.String()), + attribute.String("axiom.querylegacy.min_cursor", status.MinCursor), + attribute.String("axiom.querylegacy.max_cursor", status.MaxCursor), ) } diff --git a/axiom/datasets_integration_test.go b/axiom/datasets_integration_test.go index 683203b9..55d7c209 100644 --- a/axiom/datasets_integration_test.go +++ b/axiom/datasets_integration_test.go @@ -246,9 +246,64 @@ func (s *DatasetsTestSuite) Test() { s.Require().NoError(err) s.Require().NotNil(queryResult) + s.NotZero(queryResult.Status.ElapsedTime) s.EqualValues(14, queryResult.Status.RowsExamined) s.EqualValues(14, queryResult.Status.RowsMatched) - s.Len(queryResult.Matches, 14) + if s.Len(queryResult.Tables, 1) { + table := queryResult.Tables[0] + + if s.Len(table.Sources, 1) { + // FIXME(lukasmalkmus): Uncomment once there is consensus on the + // source name format. + // s.Equal(s.dataset.ID, table.Sources[0].Name) + } + + // FIXME(lukasmalkmus): Tabular results format is not yet returning the + // _rowID column. + s.Len(table.Fields, 11) // 8 event fields + 1 label field + 2 system fields + s.Len(table.Columns, 11) // 8 event fields + 1 label field + 2 system fields + // s.Len(table.Fields, 12) // 8 event fields + 1 label field + 3 system fields + // s.Len(table.Columns, 12) // 8 event fields + 1 label field + 3 system fields + } + + // ... and a slightly more complex (analytic) APL query. + apl = fmt.Sprintf("['%s'] | summarize topk(remote_ip, 1)", s.dataset.ID) + queryResult, err = s.client.Datasets.Query(s.ctx, apl, + query.SetStartTime(startTime), + query.SetEndTime(endTime), + ) + s.Require().NoError(err) + s.Require().NotNil(queryResult) + + s.NotZero(queryResult.Status.ElapsedTime) + s.EqualValues(14, queryResult.Status.RowsExamined) + s.EqualValues(14, queryResult.Status.RowsMatched) + if s.Len(queryResult.Tables, 1) { + table := queryResult.Tables[0] + + if s.Len(table.Sources, 1) { + // FIXME(lukasmalkmus): Uncomment once there is consensus on the + // source name format. + // s.Equal(s.dataset.ID, table.Sources[0].Name) + } + + if s.Len(table.Fields, 1) && s.NotNil(table.Fields[0].Aggregation) { + agg := table.Fields[0].Aggregation + + s.Equal(query.OpTopk, agg.Op) + s.Equal([]string{"remote_ip"}, agg.Fields) + s.Equal([]any{1.}, agg.Args) + } + + if s.Len(table.Columns, 1) && s.Len(table.Columns[0], 1) { + v := table.Columns[0][0].([]any) + m := v[0].(map[string]any) + + s.Equal("93.180.71.1", m["key"]) + s.Equal(7., m["count"]) + s.Equal(0., m["error"]) + } + } // Also run a legacy query and make sure we see some results. legacyQueryResult, err := s.client.Datasets.QueryLegacy(s.ctx, s.dataset.ID, querylegacy.Query{ @@ -258,6 +313,7 @@ func (s *DatasetsTestSuite) Test() { s.Require().NoError(err) s.Require().NotNil(legacyQueryResult) + s.NotZero(queryResult.Status.ElapsedTime) s.EqualValues(14, legacyQueryResult.Status.RowsExamined) s.EqualValues(14, legacyQueryResult.Status.RowsMatched) s.Len(legacyQueryResult.Matches, 14) @@ -335,16 +391,16 @@ func (s *DatasetsTestSuite) TestCursor() { now := time.Now().Truncate(time.Second) _, err := s.client.Datasets.IngestEvents(s.ctx, s.dataset.ID, []axiom.Event{ { // Oldest - "_time": now.Add(-time.Second * 3), - "foo": "bar", + ingest.TimestampField: now.Add(-time.Second * 3), + "foo": "bar", }, { - "_time": now.Add(-time.Second * 2), - "foo": "baz", + ingest.TimestampField: now.Add(-time.Second * 2), + "foo": "baz", }, { // Newest - "_time": now.Add(-time.Second * 1), - "foo": "buz", + ingest.TimestampField: now.Add(-time.Second * 1), + "foo": "buz", }, }) s.Require().NoError(err) @@ -360,16 +416,28 @@ func (s *DatasetsTestSuite) TestCursor() { ) s.Require().NoError(err) - if s.Len(queryResult.Matches, 3) { - s.Equal("buz", queryResult.Matches[0].Data["foo"]) - s.Equal("baz", queryResult.Matches[1].Data["foo"]) - s.Equal("bar", queryResult.Matches[2].Data["foo"]) + // FIXME(lukasmalkmus): Tabular results format is not yet returning the + // _rowID column. + s.T().Skip() + + // HINT(lukasmalkmus): Expecting four columns: _time, _sysTime, _rowID, foo. + // This is only checked once for the first query result to verify the + // dataset scheme. The following queries will only check the results in the + // columns. + s.Require().Len(queryResult.Tables, 1) + s.Require().Len(queryResult.Tables[0].Columns, 4) + s.Require().Len(queryResult.Tables[0].Columns[0], 3) + + if s.Len(queryResult.Tables, 1) { + s.Equal("buz", queryResult.Tables[0].Columns[2][0]) + s.Equal("baz", queryResult.Tables[0].Columns[2][1]) + s.Equal("bar", queryResult.Tables[0].Columns[2][2]) } // HINT(lukasmalkmus): In a real-world scenario, the cursor would be // retrieved from the query status MinCursor or MaxCursor fields, depending // on the queries sort order. - midRowID := queryResult.Matches[1].RowID + midRowID := queryResult.Tables[0].Columns[0][2].(string) // Query events with a cursor in descending order... apl = fmt.Sprintf("['%s'] | sort by _time desc", s.dataset.ID) @@ -382,8 +450,8 @@ func (s *DatasetsTestSuite) TestCursor() { // "buz" and "baz" skipped by the cursor, only "bar" is returned. The cursor // is exclusive, so "baz" is not included. - if s.Len(queryResult.Matches, 1) { - s.Equal("bar", queryResult.Matches[0].Data["foo"]) + if s.Len(queryResult.Tables[0].Columns[0], 1) { + s.Equal("bar", queryResult.Tables[0].Columns[0][0]) } // ...again, but with the cursor inclusive. @@ -396,9 +464,9 @@ func (s *DatasetsTestSuite) TestCursor() { // "buz" skipped by the cursor, only "baz" and "bar" is returned. The cursor // is inclusive, so "baz" is included. - if s.Len(queryResult.Matches, 2) { - s.Equal("baz", queryResult.Matches[0].Data["foo"]) - s.Equal("bar", queryResult.Matches[1].Data["foo"]) + if s.Len(queryResult.Tables[0].Columns[0], 2) { + s.Equal("baz", queryResult.Tables[0].Columns[0][0]) + s.Equal("bar", queryResult.Tables[0].Columns[0][1]) } // Query events with a cursor in ascending order... @@ -412,8 +480,8 @@ func (s *DatasetsTestSuite) TestCursor() { // "bar" and "baz" skipped by the cursor, only "buz" is returned. The cursor // is exclusive, so "baz" is not included. - if s.Len(queryResult.Matches, 1) { - s.Equal("buz", queryResult.Matches[0].Data["foo"]) + if s.Len(queryResult.Tables[0].Columns[0], 1) { + s.Equal("buz", queryResult.Tables[0].Columns[0][0]) } // ...again, but with the cursor inclusive. @@ -426,9 +494,9 @@ func (s *DatasetsTestSuite) TestCursor() { // "bar" skipped by the cursor, only "baz" and "buz" is returned. The cursor // is inclusive, so "baz" is included. - if s.Len(queryResult.Matches, 2) { - s.Equal("baz", queryResult.Matches[0].Data["foo"]) - s.Equal("buz", queryResult.Matches[1].Data["foo"]) + if s.Len(queryResult.Tables[0].Columns[0], 2) { + s.Equal("baz", queryResult.Tables[0].Columns[0][0]) + s.Equal("buz", queryResult.Tables[0].Columns[0][1]) } } diff --git a/axiom/datasets_test.go b/axiom/datasets_test.go index 9aa273ab..ad288a15 100644 --- a/axiom/datasets_test.go +++ b/axiom/datasets_test.go @@ -21,169 +21,285 @@ import ( ) const actQueryResp = `{ - "request": { - "startTime": "2021-07-20T16:34:57.911170243Z", - "endTime": "2021-08-19T16:34:57.885821616Z", - "resolution": "", - "aggregations": null, - "groupBy": null, - "order": null, - "limit": 1000, - "virtualFields": null, - "project": null, - "cursor": "", - "includeCursor": false - }, - "status": { - "elapsedTime": 542114, - "blocksExamined": 4, - "rowsExamined": 142655, - "rowsMatched": 142655, - "numGroups": 0, - "isPartial": false, - "cacheStatus": 1, - "minBlockTime": "2020-11-19T11:06:31.569475746Z", - "maxBlockTime": "2020-11-27T12:06:38.966791794Z" - }, - "matches": [ - { - "_time": "2020-11-19T11:06:31.569475746Z", - "_sysTime": "2020-11-19T11:06:31.581384524Z", - "_rowId": "c776x1uafkpu-4918f6cb9000095-0", - "data": { - "agent": "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", - "bytes": 0, - "referrer": "-", - "remote_ip": "93.180.71.3", - "remote_user": "-", - "request": "GET /downloads/product_1 HTTP/1.1", - "response": 304, - "time": "17/May/2015:08:05:32 +0000" + "tables": [ + { + "name": "0", + "sources": [ + { + "name": "test" } - }, - { - "_time": "2020-11-19T11:06:31.569479846Z", - "_sysTime": "2020-11-19T11:06:31.581384524Z", - "_rowId": "c776x1uafnvq-4918f6cb9000095-1", - "data": { - "agent": "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", - "bytes": 0, - "referrer": "-", - "remote_ip": "93.180.71.3", - "remote_user": "-", - "request": "GET /downloads/product_1 HTTP/1.1", - "response": 304, - "time": "17/May/2015:08:05:23 +0000" + ], + "fields": [ + { + "name": "_time", + "type": "string" + }, + { + "name": "_sysTime", + "type": "string" + }, + { + "name": "_rowId", + "type": "string" + }, + { + "name": "agent", + "type": "string" + }, + { + "name": "bytes", + "type": "float64" + }, + { + "name": "referrer", + "type": "string" + }, + { + "name": "remote_ip", + "type": "string" + }, + { + "name": "remote_user", + "type": "string" + }, + { + "name": "request", + "type": "string" + }, + { + "name": "response", + "type": "float64" + }, + { + "name": "time", + "type": "string" } - } - ], - "buckets": { - "series": [], - "totals": [] - }, - "datasetNames": [ - "test" - ] - }` + ], + "range": { + "field": "_time", + "start": "2023-03-21T13:38:51.735448191Z", + "end": "2023-03-28T13:38:51.735448191Z" + }, + "columns": [ + [ + "2020-11-19T11:06:31.569475746Z", + "2020-11-19T11:06:31.569479846Z" + ], + [ + "2020-11-19T11:06:31.581384524Z", + "2020-11-19T11:06:31.581384524Z" + ], + [ + "c776x1uafkpu-4918f6cb9000095-0", + "c776x1uafnvq-4918f6cb9000095-1" + ], + [ + "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", + "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)" + ], + [ + 0, + 0 + ], + [ + "-", + "-" + ], + [ + "93.180.71.3", + "93.180.71.3" + ], + [ + "-", + "-" + ], + [ + "GET /downloads/product_1 HTTP/1.1", + "GET /downloads/product_1 HTTP/1.1" + ], + [ + 304, + 304 + ], + [ + "17/May/2015:08:05:32 +0000", + "17/May/2015:08:05:23 +0000" + ] + ] + } + ], + "status": { + "minCursor": "c776x1uafkpu-4918f6cb9000095-0", + "maxCursor": "c776x1uafnvq-4918f6cb9000095-1", + "elapsedTime": 542114, + "rowsExamined": 142655, + "rowsMatched": 142655 + } +}` const actLegacyQueryResp = `{ - "status": { - "elapsedTime": 542114, - "blocksExamined": 4, - "rowsExamined": 142655, - "rowsMatched": 142655, - "numGroups": 0, - "isPartial": false, - "cacheStatus": 1, - "minBlockTime": "2020-11-19T11:06:31.569475746Z", - "maxBlockTime": "2020-11-27T12:06:38.966791794Z" + "status": { + "minCursor": "c776x1uafkpu-4918f6cb9000095-0", + "maxCursor": "c776x1uafnvq-4918f6cb9000095-1", + "elapsedTime": 542114, + "blocksExamined": 4, + "rowsExamined": 142655, + "rowsMatched": 142655, + "numGroups": 0, + "isPartial": false, + "cacheStatus": 1, + "minBlockTime": "2020-11-19T11:06:31.569475746Z", + "maxBlockTime": "2020-11-27T12:06:38.966791794Z" + }, + "matches": [ + { + "_time": "2020-11-19T11:06:31.569475746Z", + "_sysTime": "2020-11-19T11:06:31.581384524Z", + "_rowId": "c776x1uafkpu-4918f6cb9000095-0", + "data": { + "agent": "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", + "bytes": 0, + "referrer": "-", + "remote_ip": "93.180.71.3", + "remote_user": "-", + "request": "GET /downloads/product_1 HTTP/1.1", + "response": 304, + "time": "17/May/2015:08:05:32 +0000" + } }, - "matches": [ - { - "_time": "2020-11-19T11:06:31.569475746Z", - "_sysTime": "2020-11-19T11:06:31.581384524Z", - "_rowId": "c776x1uafkpu-4918f6cb9000095-0", - "data": { - "agent": "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", - "bytes": 0, - "referrer": "-", - "remote_ip": "93.180.71.3", - "remote_user": "-", - "request": "GET /downloads/product_1 HTTP/1.1", - "response": 304, - "time": "17/May/2015:08:05:32 +0000" - } - }, - { - "_time": "2020-11-19T11:06:31.569479846Z", - "_sysTime": "2020-11-19T11:06:31.581384524Z", - "_rowId": "c776x1uafnvq-4918f6cb9000095-1", - "data": { - "agent": "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", - "bytes": 0, - "referrer": "-", - "remote_ip": "93.180.71.3", - "remote_user": "-", - "request": "GET /downloads/product_1 HTTP/1.1", - "response": 304, - "time": "17/May/2015:08:05:23 +0000" - } + { + "_time": "2020-11-19T11:06:31.569479846Z", + "_sysTime": "2020-11-19T11:06:31.581384524Z", + "_rowId": "c776x1uafnvq-4918f6cb9000095-1", + "data": { + "agent": "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", + "bytes": 0, + "referrer": "-", + "remote_ip": "93.180.71.3", + "remote_user": "-", + "request": "GET /downloads/product_1 HTTP/1.1", + "response": 304, + "time": "17/May/2015:08:05:23 +0000" } - ], - "buckets": { - "series": [], - "totals": [] } - }` + ] +}` var ( expQueryRes = &query.Result{ - Datasets: []string{"test"}, - Status: query.Status{ - ElapsedTime: time.Microsecond * 542_114, - BlocksExamined: 4, - RowsExamined: 142655, - RowsMatched: 142655, - NumGroups: 0, - IsPartial: false, - MinBlockTime: parseTimeOrPanic("2020-11-19T11:06:31.569475746Z"), - MaxBlockTime: parseTimeOrPanic("2020-11-27T12:06:38.966791794Z"), - }, - Matches: []query.Entry{ + Tables: []query.Table{ { - Time: parseTimeOrPanic("2020-11-19T11:06:31.569475746Z"), - SysTime: parseTimeOrPanic("2020-11-19T11:06:31.581384524Z"), - RowID: "c776x1uafkpu-4918f6cb9000095-0", - Data: map[string]any{ - "agent": "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", - "bytes": float64(0), - "referrer": "-", - "remote_ip": "93.180.71.3", - "remote_user": "-", - "request": "GET /downloads/product_1 HTTP/1.1", - "response": float64(304), - "time": "17/May/2015:08:05:32 +0000", + Name: "0", + Sources: []query.Source{ + { + Name: "test", + }, }, - }, - { - Time: parseTimeOrPanic("2020-11-19T11:06:31.569479846Z"), - SysTime: parseTimeOrPanic("2020-11-19T11:06:31.581384524Z"), - RowID: "c776x1uafnvq-4918f6cb9000095-1", - Data: map[string]any{ - "agent": "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", - "bytes": float64(0), - "referrer": "-", - "remote_ip": "93.180.71.3", - "remote_user": "-", - "request": "GET /downloads/product_1 HTTP/1.1", - "response": float64(304), - "time": "17/May/2015:08:05:23 +0000", + Fields: []query.Field{ + { + Name: "_time", + Type: "string", + }, + { + Name: "_sysTime", + Type: "string", + }, + { + Name: "_rowId", + Type: "string", + }, + { + Name: "agent", + Type: "string", + }, + { + Name: "bytes", + Type: "float64", + }, + { + Name: "referrer", + Type: "string", + }, + { + Name: "remote_ip", + Type: "string", + }, + { + Name: "remote_user", + Type: "string", + }, + { + Name: "request", + Type: "string", + }, + { + Name: "response", + Type: "float64", + }, + { + Name: "time", + Type: "string", + }, + }, + Range: &query.RangeInfo{ + Field: "_time", + Start: parseTimeOrPanic("2023-03-21T13:38:51.735448191Z"), + End: parseTimeOrPanic("2023-03-28T13:38:51.735448191Z"), + }, + Columns: []query.Column{ + []any{ + "2020-11-19T11:06:31.569475746Z", + "2020-11-19T11:06:31.569479846Z", + }, + []any{ + "2020-11-19T11:06:31.581384524Z", + "2020-11-19T11:06:31.581384524Z", + }, + []any{ + "c776x1uafkpu-4918f6cb9000095-0", + "c776x1uafnvq-4918f6cb9000095-1", + }, + []any{ + "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", + "Debian APT-HTTP/1.3 (0.8.16~exp12ubuntu10.21)", + }, + []any{ + float64(0), + float64(0), + }, + []any{ + "-", + "-", + }, + []any{ + "93.180.71.3", + "93.180.71.3", + }, + []any{ + "-", + "-", + }, + []any{ + "GET /downloads/product_1 HTTP/1.1", + "GET /downloads/product_1 HTTP/1.1", + }, + []any{ + float64(304), + float64(304), + }, + []any{ + "17/May/2015:08:05:32 +0000", + "17/May/2015:08:05:23 +0000", + }, }, }, }, - Buckets: query.Timeseries{ - Series: []query.Interval{}, - Totals: []query.EntryGroup{}, + Status: query.Status{ + ElapsedTime: time.Microsecond * 542_114, + MinCursor: "c776x1uafkpu-4918f6cb9000095-0", + MaxCursor: "c776x1uafnvq-4918f6cb9000095-1", + RowsExamined: 142655, + RowsMatched: 142655, }, TraceID: "abc", } @@ -191,6 +307,8 @@ var ( expLegacyQueryRes = &querylegacy.Result{ Status: querylegacy.Status{ ElapsedTime: time.Microsecond * 542_114, + MinCursor: "c776x1uafkpu-4918f6cb9000095-0", + MaxCursor: "c776x1uafnvq-4918f6cb9000095-1", BlocksExamined: 4, RowsExamined: 142655, RowsMatched: 142655, @@ -231,10 +349,6 @@ var ( }, }, }, - Buckets: querylegacy.Timeseries{ - Series: []querylegacy.Interval{}, - Totals: []querylegacy.EntryGroup{}, - }, SavedQueryID: "fyTFUldK4Z5219rWaz", TraceID: "abc", } @@ -961,7 +1075,9 @@ func TestDatasetsService_IngestChannel_BufferedSlow(t *testing.T) { func TestDatasetsService_Query(t *testing.T) { hf := func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, http.MethodPost, r.Method) - assert.Equal(t, mediaTypeJSON, r.Header.Get("content-type")) + assert.Equal(t, mediaTypeJSON, r.Header.Get("Content-Type")) + + assert.Equal(t, "tabular", r.URL.Query().Get("format")) var req aplQueryRequest err := json.NewDecoder(r.Body).Decode(&req) @@ -988,28 +1104,6 @@ func TestDatasetsService_Query(t *testing.T) { assert.Equal(t, expQueryRes, res) } -func TestDatasetsService_Query_WithGroupBy(t *testing.T) { - hf := func(w http.ResponseWriter, _ *http.Request) { - w.Header().Set("Content-Type", mediaTypeJSON) - w.Header().Set("X-Axiom-Trace-Id", "abc") - _, _ = fmt.Fprint(w, `{ - "request": { - "groupBy": [ - "code", - "path" - ] - } - }`) - } - - client := setup(t, "/v1/datasets/_apl", hf) - - res, err := client.Datasets.Query(context.Background(), "test") - require.NoError(t, err) - - assert.Equal(t, []string{"code", "path"}, res.GroupBy) -} - func TestDatasetsService_QueryLegacy(t *testing.T) { hf := func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, http.MethodPost, r.Method) diff --git a/axiom/query/doc.go b/axiom/query/doc.go index 37e278d2..b6111814 100644 --- a/axiom/query/doc.go +++ b/axiom/query/doc.go @@ -4,4 +4,28 @@ // Usage: // // import "github.com/axiomhq/axiom-go/axiom/query" +// +// # Tabular Result Format +// +// Query results are returned in a tabular format. Each query [Result] contains +// one or more [Table]s. Each [Table] contains a list of [Field]s and a list of +// [Column]s. All [Column]s are equally sized and there are as much [Column]s as +// there are [Field]s. +// +// In case you want to work with events that are usually composed of multiple +// fields, you will find the values separated by [Column]. To aid with working +// with events in the tabular result format, the [Table] type provides the +// [Table.Rows] method that returns an [iter.Iter] over the [Row]s of the +// [Table]. Under the hood, each call to [iter.Iter.Next] composes a [Row] from +// the [Column]s of the [Table]. Alternatively, you can compose an [iter.Iter] +// over the [Row]s yourself using the [Rows] function. This allows for passing +// in a subset of the [Column]s of the [Table] to work with: +// +// // Only build rows from the first two columns of the table. Returns an +// // iterator for over the rows. +// rows := query.Rows(result.Tables[0].Columns[0:2]) +// +// Keep in mind that it is preferable to alter the APL query to only return the +// fields you are interested in instead of working with a subset of the columns +// after the query has been executed. package query diff --git a/axiom/query/result.go b/axiom/query/result.go index a311eae0..fdd2be31 100644 --- a/axiom/query/result.go +++ b/axiom/query/result.go @@ -2,175 +2,116 @@ package query import ( "encoding/json" - "fmt" - "strings" "time" ) -//go:generate go run golang.org/x/tools/cmd/stringer -type=MessageCode,MessagePriority -linecomment -output=result_string.go - -// MessageCode represents the code of a message associated with a query. -type MessageCode uint8 - -// All available message codes. -const ( - emptyMessageCode MessageCode = iota // - - VirtualFieldFinalizeError // virtual_field_finalize_error - MissingColumn // missing_column - LicenseLimitForQueryWarning // license_limit_for_query_warning - DefaultLimitWarning // default_limit_warning - - // CompilerWarning is a generic code. Please inspect the message text for - // more details. - CompilerWarning // apl_ -) - -func messageCodeFromString(s string) (mc MessageCode, err error) { - if strings.HasPrefix(s, CompilerWarning.String()) { - return CompilerWarning, nil - } - - switch s { - case emptyMessageCode.String(): - mc = emptyMessageCode - case VirtualFieldFinalizeError.String(): - mc = VirtualFieldFinalizeError - case MissingColumn.String(): - mc = MissingColumn - case LicenseLimitForQueryWarning.String(): - mc = LicenseLimitForQueryWarning - case DefaultLimitWarning.String(): - mc = DefaultLimitWarning - default: - err = fmt.Errorf("unknown message code %q", s) - } - - return mc, err -} - -// MarshalJSON implements [json.Marshaler]. It is in place to marshal the -// message code to its string representation because that's what the server -// expects. -func (mc MessageCode) MarshalJSON() ([]byte, error) { - return json.Marshal(mc.String()) -} - -// UnmarshalJSON implements [json.Unmarshaler]. It is in place to unmarshal the -// message code from the string representation the server returns. -func (mc *MessageCode) UnmarshalJSON(b []byte) (err error) { - var s string - if err = json.Unmarshal(b, &s); err != nil { - return err - } - - *mc, err = messageCodeFromString(s) - - return err -} - -// MessagePriority represents the priority of a message associated with a query. -type MessagePriority uint8 - -// All available message priorities. -const ( - emptyMessagePriority MessagePriority = iota // - - Trace // trace - Debug // debug - Info // info - Warn // warn - Error // error - Fatal // fatal -) - -func messagePriorityFromString(s string) (mp MessagePriority, err error) { - switch s { - case emptyMessagePriority.String(): - mp = emptyMessagePriority - case Trace.String(): - mp = Trace - case Debug.String(): - mp = Debug - case Info.String(): - mp = Info - case Warn.String(): - mp = Warn - case Error.String(): - mp = Error - case Fatal.String(): - mp = Fatal - default: - err = fmt.Errorf("unknown message priority %q", s) - } - - return mp, err -} - -// MarshalJSON implements [json.Marshaler]. It is in place to marshal the -// message priority to its string representation because that's what the server -// expects. -func (mp MessagePriority) MarshalJSON() ([]byte, error) { - return json.Marshal(mp.String()) -} - -// UnmarshalJSON implements [json.Unmarshaler]. It is in place to unmarshal the -// message priority from the string representation the server returns. -func (mp *MessagePriority) UnmarshalJSON(b []byte) (err error) { - var s string - if err = json.Unmarshal(b, &s); err != nil { - return err - } - - *mp, err = messagePriorityFromString(s) - - return err -} - // Result is the result of an APL query. type Result struct { - // The datasets that were queried in order to create the result. - Datasets []string `json:"datasetNames"` + // Tables in the query result. + Tables []Table `json:"tables"` // Status of the query result. Status Status `json:"status"` - // Matches are the events that matched the query. - Matches []Entry `json:"matches"` - // Buckets are the time series buckets. - Buckets Timeseries `json:"buckets"` - // GroupBy is a list of field names to group the query result by. Only valid - // when at least one aggregation is specified. - GroupBy []string `json:"-"` // TraceID is the ID of the trace that was generated by the server for this // results query request. TraceID string `json:"-"` } -// Status is the status of a query result. +// Table in the [Result] of an APL query. +type Table struct { + // Name of the table. Default name for unnamed results is "0", "1", "2", ... + // etc. + Name string `json:"name"` + // Sources are the datasets that were consulted in order to create the + // table. + Sources []Source `json:"sources"` + // Fields in the table matching the order of the [Columns] (e.g. the + // [Column] at index 0 has the values for the [Field] at index 0). + Fields []Field `json:"fields"` + // Order of the fields in the table. + Order []Order `json:"order"` + // Groups are the groups of the table. + Groups []Group `json:"groups"` + // Range specifies the window the query was restricted to. Nil if the query + // was not restricted to a time window. + Range *RangeInfo `json:"range"` + // Buckets defines if the query is bucketed (usually on the "_time" field). + // Nil if the query returns a non-bucketed result. + Buckets *BucketInfo `json:"buckets"` + // Columns in the table matching the order of the [Fields] (e.g. the + // [Column] at index 0 has the values for the [Field] at index 0). In case + // of sub-groups, rows will repeat the group value. + Columns []Column `json:"columns"` +} + +// Field in a [Table]. +type Field struct { + // Name of the field. + Name string `json:"name"` + // Type of the field. Can also be composite types which are types separated + // by a horizontal line "|". + Type string `json:"type"` + // Aggregation is the aggregation applied to the field. + Aggregation Aggregation `json:"agg"` +} + +// Aggregation that is applied to a [Field] in a [Table]. +type Aggregation struct { + // Name of the aggregation. + Name string `json:"name"` + // Args are the arguments of the aggregation. + Args []any `json:"args"` +} + +// Source that was consulted in order to create a [Table]. +type Source struct { + // Name of the source. + Name string `json:"name"` +} + +// Order of a [Field] in a [Table]. +type Order struct { + // Field is the name of the field to order by. + Field string `json:"field"` + // Desc is true if the order is descending. Otherwise the order is + // ascending. + Desc bool `json:"desc"` +} + +// Group in a [Table]. +type Group struct { + // Name of the group. + Name string `json:"name"` +} + +// RangeInfo specifies the window a query was restricted to. +type RangeInfo struct { + // Field specifies the field name on which the query range was restricted. + // Usually "_time": + Field string + // Start is the starting time the query is limited by. Usually the start of + // the time window. Queries are restricted to the interval [start,end). + Start time.Time + // End is the ending time the query is limited by. Usually the end of the + // time window. Queries are restricted to the interval [start,end). + End time.Time +} + +// BucketInfo captures information about how a grouped query is sorted into +// buckets. Usually buckets are created on the "_time" column, +type BucketInfo struct { + // Field specifies the field used to create buckets on. Usually this would + // be "_time". + Field string + // An integer or float representing the fixed bucket size. + // When the bucket field is "_time" this value is in nanoseconds. + Size any +} + +// Column in a [Table] containing the raw values of a [Field]. +type Column []any + +// Status of an APL query [Result]. type Status struct { - // ElapsedTime is the duration it took the query to execute. - ElapsedTime time.Duration `json:"elapsedTime"` - // BlocksExamined is the amount of blocks that have been examined by the - // query. - BlocksExamined uint64 `json:"blocksExamined"` - // RowsExamined is the amount of rows that have been examined by the query. - RowsExamined uint64 `json:"rowsExamined"` - // RowsMatched is the amount of rows that matched the query. - RowsMatched uint64 `json:"rowsMatched"` - // NumGroups is the amount of groups returned by the query. - NumGroups uint32 `json:"numGroups"` - // IsPartial describes if the query result is a partial result. - IsPartial bool `json:"isPartial"` - // ContinuationToken is populated when IsPartial is true and must be passed - // to the next query request to retrieve the next result set. - ContinuationToken string `json:"continuationToken"` - // IsEstimate describes if the query result is estimated. - IsEstimate bool `json:"isEstimate"` - // MinBlockTime is the timestamp of the oldest block examined. - MinBlockTime time.Time `json:"minBlockTime"` - // MaxBlockTime is the timestamp of the newest block examined. - MaxBlockTime time.Time `json:"maxBlockTime"` - // Messages associated with the query. - Messages []Message `json:"messages"` // MinCursor is the id of the oldest row, as seen server side. May be lower // than what the results include if the server scanned more data than // included in the results. Can be used to efficiently resume time-sorted @@ -181,18 +122,12 @@ type Status struct { // included in the results. Can be used to efficiently resume time-sorted // non-aggregating queries (i.e. filtering only). MaxCursor string `json:"maxCursor"` -} - -// MarshalJSON implements [json.Marshaler]. It is in place to marshal the -// elapsed time into its microsecond representation because that's what the -// server expects. -func (s Status) MarshalJSON() ([]byte, error) { - type localStatus Status - - // Set to the value in microseconds. - s.ElapsedTime = time.Duration(s.ElapsedTime.Microseconds()) - - return json.Marshal(localStatus(s)) + // ElapsedTime is the duration it took the query to execute. + ElapsedTime time.Duration `json:"elapsedTime"` + // RowsExamined is the amount of rows that have been examined by the query. + RowsExamined uint64 `json:"rowsExamined"` + // RowsMatched is the amount of rows that matched the query. + RowsMatched uint64 `json:"rowsMatched"` } // UnmarshalJSON implements [json.Unmarshaler]. It is in place to unmarshal the @@ -211,67 +146,3 @@ func (s *Status) UnmarshalJSON(b []byte) error { return nil } - -// Message is a message associated with a query result. -type Message struct { - // Priority of the message. - Priority MessagePriority `json:"priority"` - // Code of the message. - Code MessageCode `json:"code"` - // Count describes how often a message of this type was raised by the query. - Count uint `json:"count"` - // Text is a human readable text representation of the message. - Text string `json:"msg"` -} - -// Entry is an event that matched a query and is thus part of the result set. -type Entry struct { - // Time is the time the event occurred. Matches SysTime if not specified - // during ingestion. - Time time.Time `json:"_time"` - // SysTime is the time the event was recorded on the server. - SysTime time.Time `json:"_sysTime"` - // RowID is the unique ID of the event row. It can be used as a cursor to - // resume a query. See [query.SetCursor]. - RowID string `json:"_rowId"` - // Data contains the raw data of the event (with filters and aggregations - // applied). - Data map[string]any `json:"data"` -} - -// Timeseries are queried time series. -type Timeseries struct { - // Series are the intervals that build a time series. - Series []Interval `json:"series"` - // Totals of the time series. - Totals []EntryGroup `json:"totals"` -} - -// Interval is the interval of queried time series. -type Interval struct { - // StartTime of the interval. - StartTime time.Time `json:"startTime"` - // EndTime of the interval. - EndTime time.Time `json:"endTime"` - // Groups of the interval. - Groups []EntryGroup `json:"groups"` -} - -// EntryGroup is a group of queried event. -type EntryGroup struct { - // ID is the unique the group. - ID uint64 `json:"id"` - // Group maps the fieldnames to the unique values for the entry. - Group map[string]any `json:"group"` - // Aggregations of the group. - Aggregations []EntryGroupAgg `json:"aggregations"` -} - -// EntryGroupAgg is an aggregation which is part of a group of queried events. -type EntryGroupAgg struct { - // Alias is the aggregations alias. If it wasn't specified at query time, it - // is the uppercased string representation of the aggregation operation. - Alias string `json:"op"` - // Value is the result value of the aggregation. - Value any `json:"value"` -} diff --git a/axiom/query/result_string.go b/axiom/query/result_string.go deleted file mode 100644 index 082c0f3b..00000000 --- a/axiom/query/result_string.go +++ /dev/null @@ -1,51 +0,0 @@ -// Code generated by "stringer -type=MessageCode,MessagePriority -linecomment -output=result_string.go"; DO NOT EDIT. - -package query - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[emptyMessageCode-0] - _ = x[VirtualFieldFinalizeError-1] - _ = x[MissingColumn-2] - _ = x[LicenseLimitForQueryWarning-3] - _ = x[DefaultLimitWarning-4] - _ = x[CompilerWarning-5] -} - -const _MessageCode_name = "virtual_field_finalize_errormissing_columnlicense_limit_for_query_warningdefault_limit_warningapl_" - -var _MessageCode_index = [...]uint8{0, 0, 28, 42, 73, 94, 98} - -func (i MessageCode) String() string { - if i >= MessageCode(len(_MessageCode_index)-1) { - return "MessageCode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _MessageCode_name[_MessageCode_index[i]:_MessageCode_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[emptyMessagePriority-0] - _ = x[Trace-1] - _ = x[Debug-2] - _ = x[Info-3] - _ = x[Warn-4] - _ = x[Error-5] - _ = x[Fatal-6] -} - -const _MessagePriority_name = "tracedebuginfowarnerrorfatal" - -var _MessagePriority_index = [...]uint8{0, 0, 5, 10, 14, 18, 23, 28} - -func (i MessagePriority) String() string { - if i >= MessagePriority(len(_MessagePriority_index)-1) { - return "MessagePriority(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _MessagePriority_name[_MessagePriority_index[i]:_MessagePriority_index[i+1]] -} diff --git a/axiom/query/result_test.go b/axiom/query/result_test.go index 75882cb2..990b9169 100644 --- a/axiom/query/result_test.go +++ b/axiom/query/result_test.go @@ -1,96 +1,13 @@ package query import ( - "encoding/json" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/axiomhq/axiom-go/internal/test/testhelper" -) - -var ( - expStatus = Status{ - ElapsedTime: time.Second, - BlocksExamined: 10, - RowsExamined: 100000, - RowsMatched: 2, - NumGroups: 1, - IsPartial: true, - ContinuationToken: "123", - IsEstimate: true, - MinBlockTime: parseTimeOrPanic("2022-08-15T10:55:53Z"), - MaxBlockTime: parseTimeOrPanic("2022-08-15T11:55:53Z"), - Messages: []Message{ - { - Priority: Error, - Code: MissingColumn, - Count: 2, - Text: "missing column", - }, - { - Priority: Warn, - Code: CompilerWarning, - Count: 1, - Text: "some apl compiler warning", - }, - }, - MinCursor: "c776x1uafkpu-4918f6cb9000095-0", - MaxCursor: "c776x1uafnvq-4918f6cb9000095-1", - } - - expStatusJSON = `{ - "elapsedTime": 1000000, - "blocksExamined": 10, - "rowsExamined": 100000, - "rowsMatched": 2, - "numGroups": 1, - "isPartial": true, - "continuationToken": "123", - "isEstimate": true, - "minBlockTime": "2022-08-15T10:55:53Z", - "maxBlockTime": "2022-08-15T11:55:53Z", - "messages": [ - { - "priority": "error", - "code": "missing_column", - "count": 2, - "msg": "missing column" - }, - { - "priority": "warn", - "code": "apl_convertingfromtypestotypes_1", - "count": 1, - "msg": "some apl compiler warning" - } - ], - "minCursor": "c776x1uafkpu-4918f6cb9000095-0", - "maxCursor": "c776x1uafnvq-4918f6cb9000095-1" - }` ) -func TestStatus(t *testing.T) { - b, err := json.Marshal(expStatus) - require.NoError(t, err) - require.NotEmpty(t, b) - - var act Status - err = json.Unmarshal(b, &act) - require.NoError(t, err) - - assert.Equal(t, expStatus, act) -} - -func TestStatus_MarshalJSON(t *testing.T) { - act, err := expStatus.MarshalJSON() - require.NoError(t, err) - require.NotEmpty(t, act) - - testhelper.JSONEqExp(t, expStatusJSON, string(act), []string{"messages.1.code"}) -} - func TestStatus_UnmarshalJSON(t *testing.T) { exp := Status{ ElapsedTime: time.Second, @@ -102,75 +19,3 @@ func TestStatus_UnmarshalJSON(t *testing.T) { assert.Equal(t, exp, act) } - -func TestMessageCode_Unmarshal(t *testing.T) { - var act struct { - MessageCode MessageCode `json:"code"` - } - err := json.Unmarshal([]byte(`{ "code": "missing_column" }`), &act) - require.NoError(t, err) - - assert.Equal(t, MissingColumn, act.MessageCode) -} - -func TestMessageCode_String(t *testing.T) { - // Check outer bounds. - assert.Empty(t, MessageCode(0).String()) - assert.Empty(t, emptyMessageCode.String()) - assert.Equal(t, emptyMessageCode, MessageCode(0)) - assert.Contains(t, (CompilerWarning + 1).String(), "MessageCode(") - - for mc := VirtualFieldFinalizeError; mc <= CompilerWarning; mc++ { - s := mc.String() - assert.NotEmpty(t, s) - assert.NotContains(t, s, "MessageCode(") - } -} - -func TestMessageCodeFromString(t *testing.T) { - for mc := VirtualFieldFinalizeError; mc <= CompilerWarning; mc++ { - parsed, err := messageCodeFromString(mc.String()) - assert.NoError(t, err) - assert.Equal(t, mc, parsed) - } -} - -func TestMessagePriority_Unmarshal(t *testing.T) { - var act struct { - MessagePriority MessagePriority `json:"priority"` - } - err := json.Unmarshal([]byte(`{ "priority": "info" }`), &act) - require.NoError(t, err) - - assert.Equal(t, Info, act.MessagePriority) -} - -func TestMessagePriority_String(t *testing.T) { - // Check outer bounds. - assert.Empty(t, MessagePriority(0).String()) - assert.Empty(t, emptyMessagePriority.String()) - assert.Equal(t, emptyMessagePriority, MessagePriority(0)) - assert.Contains(t, (Fatal + 1).String(), "MessagePriority(") - - for mp := Trace; mp <= Fatal; mp++ { - s := mp.String() - assert.NotEmpty(t, s) - assert.NotContains(t, s, "MessagePriority(") - } -} - -func TestMessagePriorityFromString(t *testing.T) { - for mp := Trace; mp <= Fatal; mp++ { - parsedMP, err := messagePriorityFromString(mp.String()) - assert.NoError(t, err) - assert.Equal(t, mp, parsedMP) - } -} - -func parseTimeOrPanic(value string) time.Time { - t, err := time.Parse(time.RFC3339, value) - if err != nil { - panic(err) - } - return t -}