Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: default values for materialized columns added #3183

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 14 additions & 2 deletions pkg/query-service/app/clickhouseReader/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -3443,9 +3443,21 @@ func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.Upda
if field.Selected {
// if the type is attribute or resource, create the materialized column first
if field.Type == constants.Attributes || field.Type == constants.Resources {
// create materialized
query := fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS %s %s MATERIALIZED %s_%s_value[indexOf(%s_%s_key, '%s')] CODEC(LZ4)", r.logsDB, r.logsLocalTable, cluster, field.Name, field.DataType, field.Type, strings.ToLower(field.DataType), field.Type, strings.ToLower(field.DataType), field.Name)
defaultValue := ""
if value, ok := constants.LogsDataTypeDefaultValue[strings.ToLower(field.DataType)]; ok {
defaultValue = value
} else {
return &model.ApiError{Err: errors.New("dataType not correct"), Typ: model.ErrorBadData}
}

// create materialized
query := fmt.Sprintf("ALTER TABLE %s.%s ON CLUSTER %s ADD COLUMN IF NOT EXISTS %s %s MATERIALIZED "+
"if(indexOf(%s_%s_key, '%s') !=0, %s_%s_value[indexOf(%s_%s_key, '%s')], %s) CODEC(LZ4)",
r.logsDB, r.logsLocalTable, cluster, field.Name, field.DataType,
field.Type, strings.ToLower(field.DataType), field.Name,
field.Type, strings.ToLower(field.DataType),
field.Type, strings.ToLower(field.DataType), field.Name,
defaultValue)
err := r.db.Exec(ctx, query)
if err != nil {
return &model.ApiError{Err: err, Typ: model.ErrorInternal}
Expand Down
50 changes: 40 additions & 10 deletions pkg/query-service/app/logs/v3/query_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,11 +146,14 @@ func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey)

// add group by conditions to filter out log lines which doesn't have the key
for _, attr := range groupBy {
if !attr.IsColumn {
columnType := getClickhouseLogsColumnType(attr.Type)
columnDataType := getClickhouseLogsColumnDataType(attr.DataType)
conditions = append(conditions, fmt.Sprintf("indexOf(%s_%s_key, '%s') > 0", columnType, columnDataType, attr.Key))
filter, skip, err := getExistsFilter(attr)
if err != nil {
return "", err
}
if skip {
continue
}
conditions = append(conditions, filter)
}

queryString := strings.Join(conditions, " AND ")
Expand All @@ -161,6 +164,27 @@ func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey)
return queryString, nil
}

func getExistsFilter(attr v3.AttributeKey) (filter string, skip bool, err error) {
if attr.IsColumn {
var defaultValue string
if _, ok := constants.StaticFieldsLogsV3[attr.Key]; ok {
// for top level fields the defaults are decided by the protocol
return "", true, nil
} else if val, ok := constants.LogsDataTypeDefaultValue[string(attr.DataType)]; ok {
defaultValue = val
} else {
return "", false, fmt.Errorf("data type not valid: %s", attr.DataType)
}
filter := fmt.Sprintf("%s != %s", attr.Key, defaultValue)
return filter, false, nil
}

columnType := getClickhouseLogsColumnType(attr.Type)
columnDataType := getClickhouseLogsColumnDataType(attr.DataType)
filter = fmt.Sprintf("has(%s_%s_key, '%s')", columnType, columnDataType, attr.Key)
return filter, false, nil
}

// getZerosForEpochNano returns the number of zeros to be appended to the epoch time for converting it to nanoseconds
func getZerosForEpochNano(epoch int64) int64 {
count := 0
Expand Down Expand Up @@ -227,6 +251,18 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
orderBy = " order by " + orderBy
}

// if any aggregation operation is performed we need to ignore the default values added
if mq.AggregateAttribute.Key != "" {
// this filter is added so that default values are not considered
filter, skip, err := getExistsFilter(mq.AggregateAttribute)
if err != nil {
return "", err
}
if !skip {
filterSubQuery = fmt.Sprintf("%s AND %s", filterSubQuery, filter)
}
}

if graphLimitQtype == constants.SecondQueryGraphLimit {
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) IN (", getSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)"
}
Expand Down Expand Up @@ -267,12 +303,6 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
case v3.AggregateOperatorCount:
if mq.AggregateAttribute.Key != "" {
columnType := getClickhouseLogsColumnType(mq.AggregateAttribute.Type)
columnDataType := getClickhouseLogsColumnDataType(mq.AggregateAttribute.DataType)
filterSubQuery = fmt.Sprintf("%s AND has(%s_%s_key, '%s')", filterSubQuery, columnType, columnDataType, mq.AggregateAttribute.Key)
}

op := "toFloat64(count(*))"
query := fmt.Sprintf(queryTmpl, op, filterSubQuery, groupBy, having, orderBy)
return query, nil
Expand Down
Loading