diff --git a/.env.example b/.env.example index 369ebb8e2f43..f8518696580a 100644 --- a/.env.example +++ b/.env.example @@ -24,3 +24,7 @@ GT_KAFKA_ENDPOINTS = localhost:9092 # Setting for fuzz tests GT_MYSQL_ADDR = localhost:4002 + +# Setting for unstable fuzz tests +GT_FUZZ_BINARY_PATH=/path/to/ +GT_FUZZ_INSTANCE_ROOT_DIR=/tmp/unstable_greptime diff --git a/.github/actions/fuzz-test/action.yaml b/.github/actions/fuzz-test/action.yaml index d50d5be6ef26..e3b4970d0caa 100644 --- a/.github/actions/fuzz-test/action.yaml +++ b/.github/actions/fuzz-test/action.yaml @@ -3,11 +3,17 @@ description: 'Fuzz test given setup and service' inputs: target: description: "The fuzz target to test" + required: true + max-total-time: + description: "Max total time(secs)" + required: true + unstable: + default: 'false' + description: "Enable unstable feature" runs: using: composite steps: - name: Run Fuzz Test shell: bash - run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none -- -max_total_time=120 - env: - GT_MYSQL_ADDR: 127.0.0.1:4002 + run: cargo fuzz run ${{ inputs.target }} --fuzz-dir tests-fuzz -D -s none ${{ inputs.unstable == 'true' && '--features=unstable' || '' }} -- -max_total_time=${{ inputs.max-total-time }} + diff --git a/.github/workflows/develop.yml b/.github/workflows/develop.yml index 8bf2395df936..4c64b85e3cd5 100644 --- a/.github/workflows/develop.yml +++ b/.github/workflows/develop.yml @@ -38,13 +38,20 @@ jobs: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v4 - - uses: crate-ci/typos@v1.13.10 + - uses: crate-ci/typos@master - name: Check the config docs run: | make config-docs && \ git diff --name-only --exit-code ./config/config.md \ || (echo "'config/config.md' is not up-to-date, please run 'make config-docs'." && exit 1) + license-header-check: + runs-on: ubuntu-20.04 + name: Check License Header + steps: + - uses: actions/checkout@v4 + - uses: korandoru/hawkeye@v5 + check: name: Check runs-on: ${{ matrix.os }} @@ -130,7 +137,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table" ] + target: [ "fuzz_create_table", "fuzz_alter_table", "fuzz_create_database", "fuzz_create_logical_table", "fuzz_alter_logical_table", "fuzz_insert", "fuzz_insert_logical_table" ] steps: - uses: actions/checkout@v4 - uses: arduino/setup-protoc@v3 @@ -164,8 +171,62 @@ jobs: uses: ./.github/actions/fuzz-test env: CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a + GT_MYSQL_ADDR: 127.0.0.1:4002 + with: + target: ${{ matrix.target }} + max-total-time: 120 + + unstable-fuzztest: + name: Unstable Fuzz Test + needs: build + runs-on: ubuntu-latest + strategy: + matrix: + target: [ "unstable_fuzz_create_table_standalone" ] + steps: + - uses: actions/checkout@v4 + - uses: arduino/setup-protoc@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + - uses: dtolnay/rust-toolchain@master + with: + toolchain: ${{ env.RUST_TOOLCHAIN }} + - name: Rust Cache + uses: Swatinem/rust-cache@v2 + with: + # Shares across multiple jobs + shared-key: "fuzz-test-targets" + - name: Set Rust Fuzz + shell: bash + run: | + sudo apt update && sudo apt install -y libfuzzer-14-dev + cargo install cargo-fuzz + - name: Download pre-built binaries + uses: actions/download-artifact@v4 + with: + name: bins + path: . + - name: Unzip binaries + run: tar -xvf ./bins.tar.gz + - name: Fuzz Test + uses: ./.github/actions/fuzz-test + env: + CUSTOM_LIBFUZZER_PATH: /usr/lib/llvm-14/lib/libFuzzer.a + GT_MYSQL_ADDR: 127.0.0.1:4002 + GT_FUZZ_BINARY_PATH: ./bins/greptime + GT_FUZZ_INSTANCE_ROOT_DIR: /tmp/unstable-greptime/ with: target: ${{ matrix.target }} + max-total-time: 120 + unstable: 'true' + - name: Upload unstable fuzz test logs + if: failure() + uses: actions/upload-artifact@v4 + with: + name: unstable-fuzz-logs + path: /tmp/unstable-greptime/ + retention-days: 3 + sqlness: name: Sqlness Test diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index c9c516c576f5..37d92ea4ddf7 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -34,7 +34,14 @@ jobs: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v4 - - uses: crate-ci/typos@v1.13.10 + - uses: crate-ci/typos@master + + license-header-check: + runs-on: ubuntu-20.04 + name: Check License Header + steps: + - uses: actions/checkout@v4 + - uses: korandoru/hawkeye@v5 check: name: Check diff --git a/.github/workflows/license.yaml b/.github/workflows/license.yaml deleted file mode 100644 index b7fc58267ceb..000000000000 --- a/.github/workflows/license.yaml +++ /dev/null @@ -1,16 +0,0 @@ -name: License checker - -on: - push: - branches: - - main - pull_request: - types: [opened, synchronize, reopened, ready_for_review] -jobs: - license-header-check: - runs-on: ubuntu-20.04 - name: license-header-check - steps: - - uses: actions/checkout@v4 - - name: Check License Header - uses: korandoru/hawkeye@v5 diff --git a/.github/workflows/nightly-ci.yml b/.github/workflows/nightly-ci.yml index b635ab16d6b7..e4d6a793d063 100644 --- a/.github/workflows/nightly-ci.yml +++ b/.github/workflows/nightly-ci.yml @@ -1,5 +1,3 @@ -# Nightly CI: runs tests every night for our second tier plaforms (Windows) - on: schedule: - cron: '0 23 * * 1-5' @@ -15,13 +13,29 @@ env: RUST_TOOLCHAIN: nightly-2024-04-18 jobs: - sqlness: - name: Sqlness Test + sqlness-test: + name: Run sqlness test + if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} + runs-on: ubuntu-22.04 + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Run sqlness test + uses: ./.github/actions/sqlness-test + with: + data-root: sqlness-test + aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }} + aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }} + aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }} + + sqlness-windows: + name: Sqlness tests on Windows if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: [ windows-latest-8-cores ] + runs-on: windows-latest-8-cores timeout-minutes: 60 steps: - uses: actions/checkout@v4 @@ -52,6 +66,7 @@ jobs: retention-days: 3 test-on-windows: + name: Run tests on Windows if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} runs-on: windows-latest-8-cores timeout-minutes: 60 diff --git a/.github/workflows/nightly-funtional-tests.yml b/.github/workflows/nightly-funtional-tests.yml deleted file mode 100644 index b4e7ebf927eb..000000000000 --- a/.github/workflows/nightly-funtional-tests.yml +++ /dev/null @@ -1,27 +0,0 @@ -name: Nightly functional tests - -on: - schedule: - # At 00:00 on Tuesday. - - cron: '0 0 * * 2' - workflow_dispatch: - -jobs: - sqlness-test: - name: Run sqlness test - if: ${{ github.repository == 'GreptimeTeam/greptimedb' }} - runs-on: ubuntu-22.04 - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Run sqlness test - uses: ./.github/actions/sqlness-test - with: - data-root: sqlness-test - aws-ci-test-bucket: ${{ vars.AWS_CI_TEST_BUCKET }} - aws-region: ${{ vars.AWS_CI_TEST_BUCKET_REGION }} - aws-access-key-id: ${{ secrets.AWS_CI_TEST_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.AWS_CI_TEST_SECRET_ACCESS_KEY }} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 94bfb60ac35c..97e88e843d00 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -50,7 +50,7 @@ GreptimeDB uses the [Apache 2.0 license](https://github.com/GreptimeTeam/greptim - To ensure that community is free and confident in its ability to use your contributions, please sign the Contributor License Agreement (CLA) which will be incorporated in the pull request process. - Make sure all files have proper license header (running `docker run --rm -v $(pwd):/github/workspace ghcr.io/korandoru/hawkeye-native:v3 format` from the project root). -- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](http://github.com/greptimeTeam/docs/style-guide.md). +- Make sure all your codes are formatted and follow the [coding style](https://pingcap.github.io/style-guide/rust/) and [style guide](docs/style-guide.md). - Make sure all unit tests are passed (using `cargo test --workspace` or [nextest](https://nexte.st/index.html) `cargo nextest run`). - Make sure all clippy warnings are fixed (you can check it locally by running `cargo clippy --workspace --all-targets -- -D warnings`). diff --git a/Cargo.lock b/Cargo.lock index 5799b6a6708c..284dcaa4c10e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -897,6 +897,7 @@ dependencies = [ "rskafka", "serde", "store-api", + "tests-integration", "tokio", "toml 0.8.12", "uuid", @@ -1582,6 +1583,7 @@ version = "0.7.2" dependencies = [ "async-trait", "auth", + "base64 0.21.7", "catalog", "chrono", "clap 4.5.4", @@ -1621,6 +1623,7 @@ dependencies = [ "query", "rand", "regex", + "reqwest", "rexpect", "rustyline 10.1.1", "serde", @@ -9210,6 +9213,7 @@ dependencies = [ "strum 0.25.0", "table", "tempfile", + "tests-integration", "tikv-jemalloc-ctl", "tokio", "tokio-postgres", @@ -9553,6 +9557,7 @@ dependencies = [ "serde_json", "sqlness", "tempfile", + "tests-integration", "tinytemplate", "tokio", ] @@ -10238,15 +10243,18 @@ dependencies = [ "dotenv", "lazy_static", "libfuzzer-sys", + "nix 0.28.0", "partition", "rand", "rand_chacha", + "reqwest", "serde", "serde_json", "snafu", "sql", "sqlparser 0.44.0 (git+https://github.com/GreptimeTeam/sqlparser-rs.git?rev=c919990bf62ad38d2b0c0a3bc90b26ad919d51b0)", "sqlx", + "tinytemplate", "tokio", ] @@ -10256,11 +10264,13 @@ version = "0.7.2" dependencies = [ "api", "arrow-flight", + "async-stream", "async-trait", "auth", "axum", "catalog", "chrono", + "clap 4.5.4", "client", "cmd", "common-base", @@ -10283,6 +10293,7 @@ dependencies = [ "dotenv", "frontend", "futures", + "futures-util", "itertools 0.10.5", "meta-client", "meta-srv", @@ -10301,6 +10312,7 @@ dependencies = [ "serde_json", "servers", "session", + "snafu", "sql", "sqlx", "store-api", @@ -10310,6 +10322,7 @@ dependencies = [ "time", "tokio", "tokio-postgres", + "tokio-stream", "tonic 0.11.0", "tower", "uuid", diff --git a/Cargo.toml b/Cargo.toml index ef6ff6eb84e2..28c1c4973f4d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -225,6 +225,8 @@ sql = { path = "src/sql" } store-api = { path = "src/store-api" } substrait = { path = "src/common/substrait" } table = { path = "src/table" } +# TODO some code depends on this +tests-integration = { path = "tests-integration" } [workspace.dependencies.meter-macros] git = "https://github.com/GreptimeTeam/greptime-meter.git" diff --git a/benchmarks/Cargo.toml b/benchmarks/Cargo.toml index 18b44e944858..ed7f038596e3 100644 --- a/benchmarks/Cargo.toml +++ b/benchmarks/Cargo.toml @@ -33,6 +33,8 @@ rand.workspace = true rskafka.workspace = true serde.workspace = true store-api.workspace = true +# TODO depend `Database` client +tests-integration.workspace = true tokio.workspace = true toml.workspace = true uuid.workspace = true diff --git a/benchmarks/src/bin/nyc-taxi.rs b/benchmarks/src/bin/nyc-taxi.rs index bfc26f3daeae..43d28414fa54 100644 --- a/benchmarks/src/bin/nyc-taxi.rs +++ b/benchmarks/src/bin/nyc-taxi.rs @@ -29,10 +29,11 @@ use client::api::v1::column::Values; use client::api::v1::{ Column, ColumnDataType, ColumnDef, CreateTableExpr, InsertRequest, InsertRequests, SemanticType, }; -use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; +use client::{Client, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use futures_util::TryStreamExt; use indicatif::{MultiProgress, ProgressBar, ProgressStyle}; use parquet::arrow::arrow_reader::ParquetRecordBatchReaderBuilder; +use tests_integration::database::Database; use tokio::task::JoinSet; const CATALOG_NAME: &str = "greptime"; diff --git a/grafana/README.md b/grafana/README.md index 0fc62f388b9b..264ee23c5771 100644 --- a/grafana/README.md +++ b/grafana/README.md @@ -7,4 +7,60 @@ Status notify: we are still working on this config. It's expected to change freq # How to use +## `greptimedb.json` + Open Grafana Dashboard page, choose `New` -> `Import`. And upload `greptimedb.json` file. + +## `greptimedb-cluster.json` + +This cluster dashboard provides a comprehensive view of incoming requests, response statuses, and internal activities such as flush and compaction, with a layered structure from frontend to datanode. Designed with a focus on alert functionality, its primary aim is to highlight any anomalies in metrics, allowing users to quickly pinpoint the cause of errors. + +We use Prometheus to scrape off metrics from nodes in GreptimeDB cluster, Grafana to visualize the diagram. Any compatible stack should work too. + +__Note__: This dashboard is still in an early stage of development. Any issue or advice on improvement is welcomed. + +### Configuration + +Please ensure the following configuration before importing the dashboard into Grafana. + +__1. Prometheus scrape config__ + +Assign `greptime_pod` label to each host target. We use this label to identify each node instance. + +```yml +# example config +# only to indicate how to assign labels to each target +# modify yours accordingly +scrape_configs: + - job_name: metasrv + static_configs: + - targets: [':'] + labels: + greptime_pod: metasrv + + - job_name: datanode + static_configs: + - targets: [':'] + labels: + greptime_pod: datanode1 + - targets: [':'] + labels: + greptime_pod: datanode2 + - targets: [':'] + labels: + greptime_pod: datanode3 + + - job_name: frontend + static_configs: + - targets: [':'] + labels: + greptime_pod: frontend +``` + +__2. Grafana config__ + +Create a Prometheus data source in Grafana before using this dashboard. We use `datasource` as a variable in Grafana dashboard so that multiple environments are supported. + +### Usage + +Use `datasource` or `greptime_pod` on the upper-left corner to filter data from certain node. diff --git a/grafana/greptimedb-cluster.json b/grafana/greptimedb-cluster.json new file mode 100644 index 000000000000..1e2473fda302 --- /dev/null +++ b/grafana/greptimedb-cluster.json @@ -0,0 +1,4862 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "id": 1, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 155, + "panels": [], + "title": "Frontend Entry Middleware", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 152, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, path, code) (rate(greptime_servers_grpc_requests_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{path}}-{{code}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(greptime_pod, path, code) (rate(greptime_servers_grpc_requests_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{path}}-{{code}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "gRPC middleware", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 154, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, path, method, code) (rate(greptime_servers_http_requests_elapsed_bucket{greptime_pod=~\"$greptime_pod\",path!~\"/health|/metrics\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{path}}-{{method}}-{{code}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(greptime_pod, path, method, code) (rate(greptime_servers_http_requests_elapsed_count{greptime_pod=~\"$greptime_pod\",path!~\"/health|/metrics\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{path}}-{{method}}-{{code}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "HTTP middleware", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 156, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, subprotocol, db) (rate(greptime_servers_mysql_query_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{subprotocol}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(greptime_pod, subprotocol, db) (rate(greptime_servers_mysql_query_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{subprotocol}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "MySQL per DB", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 157, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, subprotocol, db) (rate(greptime_servers_postgres_query_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{subprotocol}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(greptime_pod, subprotocol, db) (rate(greptime_servers_postgres_query_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{subprotocol}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "PostgreSQL per DB", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 158, + "panels": [], + "title": "Frontend HTTP per DB", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 18 + }, + "id": 159, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_sql_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_sql_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "HTTP sql per DB", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 18 + }, + "id": 160, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_promql_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_promql_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "HTTP promql per DB", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 18 + }, + "id": 161, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_influxdb_write_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_influxdb_write_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "HTTP influxdb per DB", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 26 + }, + "id": 162, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_prometheus_write_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_prometheus_write_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "HTTP prom store write per DB", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 26 + }, + "id": 183, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_prometheus_read_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_prometheus_read_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "HTTP prom store read per DB", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 26 + }, + "id": 184, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_otlp_metrics_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_otlp_metrics_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "HTTP otlp metrics per DB", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 26 + }, + "id": 185, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_http_otlp_traces_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, db) (rate(greptime_servers_http_otlp_traces_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "HTTP otlp traces per DB", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 163, + "panels": [], + "title": "Frontend gRPC per DB", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 35 + }, + "id": 164, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "label_replace(histogram_quantile(0.99, sum by(greptime_pod, le, db, type, code) (rate(greptime_servers_grpc_db_request_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))), \"db\", \"$1\", \"db\", \"(.*)-public\")", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-{{type}}-{{code}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, db, type, code) (rate(greptime_servers_grpc_db_request_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-{{type}}-{{code}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "gRPC per DB", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 35 + }, + "id": 165, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": false + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, db) (rate(greptime_servers_grpc_prom_request_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{db}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, db) (rate(greptime_servers_grpc_prom_request_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{db}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "gRPC prom per DB", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 43 + }, + "id": 166, + "panels": [], + "title": "Frontend-Datanode Call", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-rps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "rowsps" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 44 + }, + "id": 186, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Name", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "rate(greptime_table_operator_ingest_rows{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-rps", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "ingested rows", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 44 + }, + "id": 167, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, request_type) (rate(greptime_grpc_region_request_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{request_type}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, request_type) (rate(greptime_grpc_region_request_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{request_type}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "gRPC region call", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 52 + }, + "id": 168, + "panels": [], + "title": "Datanode Mito", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "points" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 53 + }, + "id": 188, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, type) (rate(greptime_mito_handle_request_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{type}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, type) (rate(greptime_mito_handle_request_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{type}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "handle request", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 53 + }, + "id": 187, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "greptime_mito_write_buffer_bytes{greptime_pod=~\"$greptime_pod\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Write buffer bytes", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ops" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-bytes" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "points" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 61 + }, + "id": 169, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, reason) (rate(greptime_mito_flush_requests_total{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{reason}}-success", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, reason) (rate(greptime_mito_flush_errors_total{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{reason}}-error", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod) (rate(greptime_mito_flush_bytes_total{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-bytes", + "range": true, + "refId": "C", + "useBackend": false + } + ], + "title": "flush total", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 61 + }, + "id": 170, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, stage) (rate(greptime_mito_write_stage_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{stage}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, stage) (rate(greptime_mito_write_stage_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{stage}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "write stage", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 69 + }, + "id": 172, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le) (rate(greptime_mito_compaction_total_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod) (rate(greptime_mito_compaction_total_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "compaction total", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 69 + }, + "id": 171, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, stage) (rate(greptime_mito_compaction_stage_elapsed_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{stage}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, stage) (rate(greptime_mito_compaction_stage_elapsed_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{stage}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "compaction stage", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 77 + }, + "id": 173, + "panels": [], + "title": "OpenDAL", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 78 + }, + "id": 178, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{greptime_pod=~\"$greptime_pod\",operation=\"read\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{scheme}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, scheme) (rate(opendal_requests_duration_seconds_count{greptime_pod=~\"$greptime_pod\", operation=\"read\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{scheme}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "requests_duration_seconds_READ", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 78 + }, + "id": 179, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{greptime_pod=~\"$greptime_pod\", operation=\"write\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{scheme}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, scheme) (rate(opendal_requests_duration_seconds_count{greptime_pod=~\"$greptime_pod\", operation=\"write\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{scheme}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "requests_duration_seconds_WRITE", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 78 + }, + "id": 180, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{greptime_pod=~\"$greptime_pod\", operation=\"list\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{scheme}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, scheme) (rate(opendal_requests_duration_seconds_count{greptime_pod=~\"$greptime_pod\", operation=\"list\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{scheme}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "requests_duration_seconds_LIST", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 78 + }, + "id": 182, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme) (rate(opendal_requests_duration_seconds_bucket{greptime_pod=~\"$greptime_pod\", operation=\"stat\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{scheme}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, scheme) (rate(opendal_requests_duration_seconds_count{greptime_pod=~\"$greptime_pod\", operation=\"stat\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{scheme}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "requests_duration_seconds_STAT", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 86 + }, + "id": 181, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme, operation) (rate(opendal_requests_duration_seconds_bucket{greptime_pod=~\"$greptime_pod\", operation!~\"read|write|list\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{scheme}}-{{operation}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, scheme, operation) (rate(opendal_requests_duration_seconds_count{greptime_pod=~\"$greptime_pod\", operation!~\"read|write|list\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{scheme}}-{{operation}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "requests_duration_seconds", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-bytes" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "bytes" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 86 + }, + "id": 177, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "histogram_quantile(0.99, sum by(greptime_pod, le, scheme, operation) (rate(opendal_bytes_total_bucket{greptime_pod=~\"$greptime_pod\"}[$__rate_interval])))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{scheme}}-{{operation}}-p99", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, scheme, operation) (rate(opendal_bytes_total_count{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "interval": "$__rate_interval", + "legendFormat": "{{greptime_pod}}-{{scheme}}-{{operation}}-bytes", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "bytes_total", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": ".*?-qps" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "unit", + "value": "ops" + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "none" + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 86 + }, + "id": 176, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(greptime_pod, scheme, operation) (rate(opendal_requests_total{greptime_pod=~\"$greptime_pod\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{greptime_pod}}-{{scheme}}-{{operation}}-qps", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "requests_total", + "type": "timeseries" + } + ], + "refresh": "5s", + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [ + { + "current": {}, + "hide": 0, + "includeAll": false, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${datasource}" + }, + "definition": "label_values(greptime_pod)", + "hide": 0, + "includeAll": true, + "multi": false, + "name": "greptime_pod", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(greptime_pod)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "GreptimeDB-Cluster", + "uid": "ea35efe5-918e-44fa-9743-e9aa1a340a3f", + "version": 9, + "weekStart": "" + } \ No newline at end of file diff --git a/src/client/examples/logical.rs b/src/client/examples/logical.rs deleted file mode 100644 index 13f116555519..000000000000 --- a/src/client/examples/logical.rs +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2023 Greptime Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use api::v1::{ColumnDataType, ColumnDef, CreateTableExpr, SemanticType, TableId}; -use client::{Client, Database}; -use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, MITO_ENGINE}; -use prost::Message; -use substrait_proto::proto::plan_rel::RelType as PlanRelType; -use substrait_proto::proto::read_rel::{NamedTable, ReadType}; -use substrait_proto::proto::rel::RelType; -use substrait_proto::proto::{PlanRel, ReadRel, Rel}; -use tracing::{event, Level}; - -fn main() { - tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish()) - .unwrap(); - - run(); -} - -#[tokio::main] -async fn run() { - let client = Client::with_urls(vec!["127.0.0.1:3001"]); - - let create_table_expr = CreateTableExpr { - catalog_name: "greptime".to_string(), - schema_name: "public".to_string(), - table_name: "test_logical_dist_exec".to_string(), - desc: String::default(), - column_defs: vec![ - ColumnDef { - name: "timestamp".to_string(), - data_type: ColumnDataType::TimestampMillisecond as i32, - is_nullable: false, - default_constraint: vec![], - semantic_type: SemanticType::Timestamp as i32, - comment: String::new(), - ..Default::default() - }, - ColumnDef { - name: "key".to_string(), - data_type: ColumnDataType::Uint64 as i32, - is_nullable: false, - default_constraint: vec![], - semantic_type: SemanticType::Tag as i32, - comment: String::new(), - ..Default::default() - }, - ColumnDef { - name: "value".to_string(), - data_type: ColumnDataType::Uint64 as i32, - is_nullable: false, - default_constraint: vec![], - semantic_type: SemanticType::Field as i32, - comment: String::new(), - ..Default::default() - }, - ], - time_index: "timestamp".to_string(), - primary_keys: vec!["key".to_string()], - create_if_not_exists: false, - table_options: Default::default(), - table_id: Some(TableId { id: 1024 }), - engine: MITO_ENGINE.to_string(), - }; - - let db = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client); - let result = db.create(create_table_expr).await.unwrap(); - event!(Level::INFO, "create table result: {:#?}", result); - - let logical = mock_logical_plan(); - event!(Level::INFO, "plan size: {:#?}", logical.len()); - let result = db.logical_plan(logical).await.unwrap(); - - event!(Level::INFO, "result: {:#?}", result); -} - -fn mock_logical_plan() -> Vec { - let catalog_name = "greptime".to_string(); - let schema_name = "public".to_string(); - let table_name = "test_logical_dist_exec".to_string(); - - let named_table = NamedTable { - names: vec![catalog_name, schema_name, table_name], - advanced_extension: None, - }; - let read_type = ReadType::NamedTable(named_table); - - let read_rel = ReadRel { - read_type: Some(read_type), - ..Default::default() - }; - - let mut buf = vec![]; - let rel = Rel { - rel_type: Some(RelType::Read(Box::new(read_rel))), - }; - let plan_rel = PlanRel { - rel_type: Some(PlanRelType::Rel(rel)), - }; - plan_rel.encode(&mut buf).unwrap(); - - buf -} diff --git a/src/client/examples/stream_ingest.rs b/src/client/examples/stream_ingest.rs deleted file mode 100644 index 94f9773096b9..000000000000 --- a/src/client/examples/stream_ingest.rs +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2023 Greptime Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use api::v1::*; -use client::{Client, Database, DEFAULT_SCHEMA_NAME}; -use derive_new::new; -use tracing::{error, info}; - -fn main() { - tracing::subscriber::set_global_default(tracing_subscriber::FmtSubscriber::builder().finish()) - .unwrap(); - - run(); -} - -#[tokio::main] -async fn run() { - let greptimedb_endpoint = - std::env::var("GREPTIMEDB_ENDPOINT").unwrap_or_else(|_| "localhost:4001".to_owned()); - - let greptimedb_dbname = - std::env::var("GREPTIMEDB_DBNAME").unwrap_or_else(|_| DEFAULT_SCHEMA_NAME.to_owned()); - - let grpc_client = Client::with_urls(vec![&greptimedb_endpoint]); - - let client = Database::new_with_dbname(greptimedb_dbname, grpc_client); - - let stream_inserter = client.streaming_inserter().unwrap(); - - if let Err(e) = stream_inserter - .insert(vec![to_insert_request(weather_records_1())]) - .await - { - error!("Error: {e:?}"); - } - - if let Err(e) = stream_inserter - .insert(vec![to_insert_request(weather_records_2())]) - .await - { - error!("Error: {e:?}"); - } - - let result = stream_inserter.finish().await; - - match result { - Ok(rows) => { - info!("Rows written: {rows}"); - } - Err(e) => { - error!("Error: {e:?}"); - } - }; -} - -#[derive(new)] -struct WeatherRecord { - timestamp_millis: i64, - collector: String, - temperature: f32, - humidity: i32, -} - -fn weather_records_1() -> Vec { - vec![ - WeatherRecord::new(1686109527000, "c1".to_owned(), 26.4, 15), - WeatherRecord::new(1686023127000, "c1".to_owned(), 29.3, 20), - WeatherRecord::new(1685936727000, "c1".to_owned(), 31.8, 13), - WeatherRecord::new(1686109527000, "c2".to_owned(), 20.4, 67), - WeatherRecord::new(1686023127000, "c2".to_owned(), 18.0, 74), - WeatherRecord::new(1685936727000, "c2".to_owned(), 19.2, 81), - ] -} - -fn weather_records_2() -> Vec { - vec![ - WeatherRecord::new(1686109527001, "c3".to_owned(), 26.4, 15), - WeatherRecord::new(1686023127002, "c3".to_owned(), 29.3, 20), - WeatherRecord::new(1685936727003, "c3".to_owned(), 31.8, 13), - WeatherRecord::new(1686109527004, "c4".to_owned(), 20.4, 67), - WeatherRecord::new(1686023127005, "c4".to_owned(), 18.0, 74), - WeatherRecord::new(1685936727006, "c4".to_owned(), 19.2, 81), - ] -} - -/// This function generates some random data and bundle them into a -/// `InsertRequest`. -/// -/// Data structure: -/// -/// - `ts`: a timestamp column -/// - `collector`: a tag column -/// - `temperature`: a value field of f32 -/// - `humidity`: a value field of i32 -/// -fn to_insert_request(records: Vec) -> InsertRequest { - // convert records into columns - let rows = records.len(); - - // transpose records into columns - let (timestamp_millis, collectors, temp, humidity) = records.into_iter().fold( - ( - Vec::with_capacity(rows), - Vec::with_capacity(rows), - Vec::with_capacity(rows), - Vec::with_capacity(rows), - ), - |mut acc, rec| { - acc.0.push(rec.timestamp_millis); - acc.1.push(rec.collector); - acc.2.push(rec.temperature); - acc.3.push(rec.humidity); - - acc - }, - ); - - let columns = vec![ - // timestamp column: `ts` - Column { - column_name: "ts".to_owned(), - values: Some(column::Values { - timestamp_millisecond_values: timestamp_millis, - ..Default::default() - }), - semantic_type: SemanticType::Timestamp as i32, - datatype: ColumnDataType::TimestampMillisecond as i32, - ..Default::default() - }, - // tag column: collectors - Column { - column_name: "collector".to_owned(), - values: Some(column::Values { - string_values: collectors.into_iter().collect(), - ..Default::default() - }), - semantic_type: SemanticType::Tag as i32, - datatype: ColumnDataType::String as i32, - ..Default::default() - }, - // field column: temperature - Column { - column_name: "temperature".to_owned(), - values: Some(column::Values { - f32_values: temp, - ..Default::default() - }), - semantic_type: SemanticType::Field as i32, - datatype: ColumnDataType::Float32 as i32, - ..Default::default() - }, - // field column: humidity - Column { - column_name: "humidity".to_owned(), - values: Some(column::Values { - i32_values: humidity, - ..Default::default() - }), - semantic_type: SemanticType::Field as i32, - datatype: ColumnDataType::Int32 as i32, - ..Default::default() - }, - ]; - - InsertRequest { - table_name: "weather_demo".to_owned(), - columns, - row_count: rows as u32, - } -} diff --git a/src/client/src/client.rs b/src/client/src/client.rs index 47a8df49f156..5e82295c16f6 100644 --- a/src/client/src/client.rs +++ b/src/client/src/client.rs @@ -14,7 +14,6 @@ use std::sync::Arc; -use api::v1::greptime_database_client::GreptimeDatabaseClient; use api::v1::health_check_client::HealthCheckClient; use api::v1::prometheus_gateway_client::PrometheusGatewayClient; use api::v1::region::region_client::RegionClient as PbRegionClient; @@ -28,21 +27,17 @@ use tonic::transport::Channel; use crate::load_balance::{LoadBalance, Loadbalancer}; use crate::{error, Result}; -pub(crate) struct DatabaseClient { - pub(crate) inner: GreptimeDatabaseClient, -} - -pub(crate) struct FlightClient { +pub struct FlightClient { addr: String, client: FlightServiceClient, } impl FlightClient { - pub(crate) fn addr(&self) -> &str { + pub fn addr(&self) -> &str { &self.addr } - pub(crate) fn mut_inner(&mut self) -> &mut FlightServiceClient { + pub fn mut_inner(&mut self) -> &mut FlightServiceClient { &mut self.client } } @@ -138,7 +133,7 @@ impl Client { Ok((addr, channel)) } - fn max_grpc_recv_message_size(&self) -> usize { + pub fn max_grpc_recv_message_size(&self) -> usize { self.inner .channel_manager .config() @@ -146,7 +141,7 @@ impl Client { .as_bytes() as usize } - fn max_grpc_send_message_size(&self) -> usize { + pub fn max_grpc_send_message_size(&self) -> usize { self.inner .channel_manager .config() @@ -154,7 +149,7 @@ impl Client { .as_bytes() as usize } - pub(crate) fn make_flight_client(&self) -> Result { + pub fn make_flight_client(&self) -> Result { let (addr, channel) = self.find_channel()?; Ok(FlightClient { addr, @@ -164,15 +159,6 @@ impl Client { }) } - pub(crate) fn make_database_client(&self) -> Result { - let (_, channel) = self.find_channel()?; - Ok(DatabaseClient { - inner: GreptimeDatabaseClient::new(channel) - .max_decoding_message_size(self.max_grpc_recv_message_size()) - .max_encoding_message_size(self.max_grpc_send_message_size()), - }) - } - pub(crate) fn raw_region_client(&self) -> Result> { let (_, channel) = self.find_channel()?; Ok(PbRegionClient::new(channel) diff --git a/src/client/src/lib.rs b/src/client/src/lib.rs index 1a854c5daa27..be8346faf7b0 100644 --- a/src/client/src/lib.rs +++ b/src/client/src/lib.rs @@ -14,12 +14,10 @@ mod client; pub mod client_manager; -mod database; pub mod error; pub mod load_balance; mod metrics; pub mod region; -mod stream_insert; pub use api; use api::v1::greptime_response::Response; @@ -31,9 +29,7 @@ pub use common_recordbatch::{RecordBatches, SendableRecordBatchStream}; use snafu::OptionExt; pub use self::client::Client; -pub use self::database::Database; pub use self::error::{Error, Result}; -pub use self::stream_insert::StreamInserter; use crate::error::{IllegalDatabaseResponseSnafu, ServerSnafu}; pub fn from_grpc_response(response: GreptimeResponse) -> Result { diff --git a/src/client/src/stream_insert.rs b/src/client/src/stream_insert.rs deleted file mode 100644 index a75144786012..000000000000 --- a/src/client/src/stream_insert.rs +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2023 Greptime Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use api::v1::greptime_database_client::GreptimeDatabaseClient; -use api::v1::greptime_request::Request; -use api::v1::{ - AuthHeader, GreptimeRequest, GreptimeResponse, InsertRequest, InsertRequests, RequestHeader, - RowInsertRequest, RowInsertRequests, -}; -use tokio::sync::mpsc; -use tokio::task::JoinHandle; -use tokio_stream::wrappers::ReceiverStream; -use tonic::transport::Channel; -use tonic::{Response, Status}; - -use crate::error::{self, Result}; -use crate::from_grpc_response; - -/// A structure that provides some methods for streaming data insert. -/// -/// [`StreamInserter`] cannot be constructed via the `StreamInserter::new` method. -/// You can use the following way to obtain [`StreamInserter`]. -/// -/// ```ignore -/// let grpc_client = Client::with_urls(vec!["127.0.0.1:4002"]); -/// let client = Database::new_with_dbname("db_name", grpc_client); -/// let stream_inserter = client.streaming_inserter().unwrap(); -/// ``` -/// -/// If you want to see a concrete usage example, please see -/// [stream_inserter.rs](https://github.com/GreptimeTeam/greptimedb/blob/main/src/client/examples/stream_ingest.rs). -pub struct StreamInserter { - sender: mpsc::Sender, - - auth_header: Option, - - dbname: String, - - join: JoinHandle, Status>>, -} - -impl StreamInserter { - pub(crate) fn new( - mut client: GreptimeDatabaseClient, - dbname: String, - auth_header: Option, - channel_size: usize, - ) -> StreamInserter { - let (send, recv) = tokio::sync::mpsc::channel(channel_size); - - let join: JoinHandle, Status>> = - tokio::spawn(async move { - let recv_stream = ReceiverStream::new(recv); - client.handle_requests(recv_stream).await - }); - - StreamInserter { - sender: send, - auth_header, - dbname, - join, - } - } - - pub async fn insert(&self, requests: Vec) -> Result<()> { - let inserts = InsertRequests { inserts: requests }; - let request = self.to_rpc_request(Request::Inserts(inserts)); - - self.sender.send(request).await.map_err(|e| { - error::ClientStreamingSnafu { - err_msg: e.to_string(), - } - .build() - }) - } - - pub async fn row_insert(&self, requests: Vec) -> Result<()> { - let inserts = RowInsertRequests { inserts: requests }; - let request = self.to_rpc_request(Request::RowInserts(inserts)); - - self.sender.send(request).await.map_err(|e| { - error::ClientStreamingSnafu { - err_msg: e.to_string(), - } - .build() - }) - } - - pub async fn finish(self) -> Result { - drop(self.sender); - - let response = self.join.await.unwrap()?; - let response = response.into_inner(); - from_grpc_response(response) - } - - fn to_rpc_request(&self, request: Request) -> GreptimeRequest { - GreptimeRequest { - header: Some(RequestHeader { - authorization: self.auth_header.clone(), - dbname: self.dbname.clone(), - ..Default::default() - }), - request: Some(request), - } - } -} diff --git a/src/cmd/Cargo.toml b/src/cmd/Cargo.toml index 24bbe69df18c..ae27c4e374d6 100644 --- a/src/cmd/Cargo.toml +++ b/src/cmd/Cargo.toml @@ -18,6 +18,7 @@ workspace = true [dependencies] async-trait.workspace = true auth.workspace = true +base64.workspace = true catalog.workspace = true chrono.workspace = true clap.workspace = true @@ -58,6 +59,7 @@ prost.workspace = true query.workspace = true rand.workspace = true regex.workspace = true +reqwest.workspace = true rustyline = "10.1" serde.workspace = true serde_json.workspace = true diff --git a/src/cmd/src/cli.rs b/src/cmd/src/cli.rs index 35dc1e4ba7dc..c44b99ad6bf9 100644 --- a/src/cmd/src/cli.rs +++ b/src/cmd/src/cli.rs @@ -22,7 +22,7 @@ mod helper; // Wait for https://github.com/GreptimeTeam/greptimedb/issues/2373 #[allow(unused)] -mod repl; +// mod repl; // TODO(weny): Removes it #[allow(deprecated)] mod upgrade; @@ -31,7 +31,7 @@ use async_trait::async_trait; use bench::BenchTableMetadataCommand; use clap::Parser; use common_telemetry::logging::LoggingOptions; -pub use repl::Repl; +// pub use repl::Repl; use upgrade::UpgradeCommand; use self::export::ExportCommand; diff --git a/src/cmd/src/cli/export.rs b/src/cmd/src/cli/export.rs index 4122fbd6d7a1..70ca80d11db3 100644 --- a/src/cmd/src/cli/export.rs +++ b/src/cmd/src/cli/export.rs @@ -16,14 +16,14 @@ use std::path::Path; use std::sync::Arc; use async_trait::async_trait; +use base64::engine::general_purpose; +use base64::Engine; use clap::{Parser, ValueEnum}; -use client::api::v1::auth_header::AuthScheme; -use client::api::v1::Basic; -use client::{Client, Database, OutputData, DEFAULT_SCHEMA_NAME}; -use common_recordbatch::util::collect; +use client::DEFAULT_SCHEMA_NAME; use common_telemetry::{debug, error, info, warn}; -use datatypes::scalars::ScalarVector; -use datatypes::vectors::{StringVector, Vector}; +use serde_json::Value; +use servers::http::greptime_result_v1::GreptimedbV1Response; +use servers::http::GreptimeQueryOutput; use snafu::{OptionExt, ResultExt}; use tokio::fs::File; use tokio::io::{AsyncWriteExt, BufWriter}; @@ -31,9 +31,8 @@ use tokio::sync::Semaphore; use crate::cli::{Instance, Tool}; use crate::error::{ - CollectRecordBatchesSnafu, ConnectServerSnafu, EmptyResultSnafu, Error, FileIoSnafu, - IllegalConfigSnafu, InvalidDatabaseNameSnafu, NotDataFromOutputSnafu, RequestDatabaseSnafu, - Result, + EmptyResultSnafu, Error, FileIoSnafu, HttpQuerySqlSnafu, InvalidDatabaseNameSnafu, Result, + SerdeJsonSnafu, }; type TableReference = (String, String, String); @@ -80,51 +79,75 @@ pub struct ExportCommand { impl ExportCommand { pub async fn build(&self) -> Result { - let client = Client::with_urls([self.addr.clone()]); - client - .health_check() - .await - .with_context(|_| ConnectServerSnafu { - addr: self.addr.clone(), - })?; let (catalog, schema) = split_database(&self.database)?; - let mut database_client = Database::new( - catalog.clone(), - schema.clone().unwrap_or(DEFAULT_SCHEMA_NAME.to_string()), - client, - ); - if let Some(auth_basic) = &self.auth_basic { - let (username, password) = auth_basic.split_once(':').context(IllegalConfigSnafu { - msg: "auth_basic cannot be split by ':'".to_string(), - })?; - database_client.set_auth(AuthScheme::Basic(Basic { - username: username.to_string(), - password: password.to_string(), - })); - } + let auth_header = if let Some(basic) = &self.auth_basic { + let encoded = general_purpose::STANDARD.encode(basic); + Some(format!("basic {}", encoded)) + } else { + None + }; Ok(Instance::new(Box::new(Export { - client: database_client, + addr: self.addr.clone(), catalog, schema, output_dir: self.output_dir.clone(), parallelism: self.export_jobs, target: self.target.clone(), + auth_header, }))) } } pub struct Export { - client: Database, + addr: String, catalog: String, schema: Option, output_dir: String, parallelism: usize, target: ExportTarget, + auth_header: Option, } impl Export { + /// Execute one single sql query. + async fn sql(&self, sql: &str) -> Result>>> { + let url = format!( + "http://{}/v1/sql?db={}-{}&sql={}", + self.addr, + self.catalog, + self.schema.as_deref().unwrap_or(DEFAULT_SCHEMA_NAME), + sql + ); + + let mut request = reqwest::Client::new() + .get(&url) + .header("Content-Type", "application/x-www-form-urlencoded"); + if let Some(ref auth) = self.auth_header { + request = request.header("Authorization", auth); + } + + let response = request.send().await.with_context(|_| HttpQuerySqlSnafu { + reason: format!("bad url: {}", url), + })?; + let response = response + .error_for_status() + .with_context(|_| HttpQuerySqlSnafu { + reason: format!("query failed: {}", sql), + })?; + + let text = response.text().await.with_context(|_| HttpQuerySqlSnafu { + reason: "cannot get response text".to_string(), + })?; + + let body = serde_json::from_str::(&text).context(SerdeJsonSnafu)?; + Ok(body.output().first().and_then(|output| match output { + GreptimeQueryOutput::Records(records) => Some(records.rows().clone()), + GreptimeQueryOutput::AffectedRows(_) => None, + })) + } + /// Iterate over all db names. /// /// Newbie: `db_name` is catalog + schema. @@ -132,35 +155,19 @@ impl Export { if let Some(schema) = &self.schema { Ok(vec![(self.catalog.clone(), schema.clone())]) } else { - let mut client = self.client.clone(); - client.set_catalog(self.catalog.clone()); - let result = - client - .sql("show databases") - .await - .with_context(|_| RequestDatabaseSnafu { - sql: "show databases".to_string(), - })?; - let OutputData::Stream(stream) = result.data else { - NotDataFromOutputSnafu.fail()? + let result = self.sql("show databases").await?; + let Some(records) = result else { + EmptyResultSnafu.fail()? }; - let record_batch = collect(stream) - .await - .context(CollectRecordBatchesSnafu)? - .pop() - .context(EmptyResultSnafu)?; - let schemas = record_batch - .column(0) - .as_any() - .downcast_ref::() - .unwrap(); - let mut result = Vec::with_capacity(schemas.len()); - for i in 0..schemas.len() { - let schema = schemas.get_data(i).unwrap().to_owned(); + let mut result = Vec::with_capacity(records.len()); + for value in records { + let serde_json::Value::String(schema) = &value[0] else { + unreachable!() + }; if schema == common_catalog::consts::INFORMATION_SCHEMA_NAME { continue; } - result.push((self.catalog.clone(), schema)); + result.push((self.catalog.clone(), schema.clone())); } Ok(result) } @@ -172,54 +179,30 @@ impl Export { // TODO: SQL injection hurts let sql = format!( "select table_catalog, table_schema, table_name from \ - information_schema.tables where table_type = \'BASE TABLE\'\ + information_schema.tables where table_type = \'BASE TABLE\' \ and table_catalog = \'{catalog}\' and table_schema = \'{schema}\'", ); - let mut client = self.client.clone(); - client.set_catalog(catalog); - client.set_schema(schema); - let result = client - .sql(&sql) - .await - .with_context(|_| RequestDatabaseSnafu { sql })?; - let OutputData::Stream(stream) = result.data else { - NotDataFromOutputSnafu.fail()? - }; - let Some(record_batch) = collect(stream) - .await - .context(CollectRecordBatchesSnafu)? - .pop() - else { - return Ok(vec![]); + let result = self.sql(&sql).await?; + let Some(records) = result else { + EmptyResultSnafu.fail()? }; - debug!("Fetched table list: {}", record_batch.pretty_print()); + debug!("Fetched table list: {:?}", records); - if record_batch.num_rows() == 0 { + if records.is_empty() { return Ok(vec![]); } - let mut result = Vec::with_capacity(record_batch.num_rows()); - let catalog_column = record_batch - .column(0) - .as_any() - .downcast_ref::() - .unwrap(); - let schema_column = record_batch - .column(1) - .as_any() - .downcast_ref::() - .unwrap(); - let table_column = record_batch - .column(2) - .as_any() - .downcast_ref::() - .unwrap(); - for i in 0..record_batch.num_rows() { - let catalog = catalog_column.get_data(i).unwrap().to_owned(); - let schema = schema_column.get_data(i).unwrap().to_owned(); - let table = table_column.get_data(i).unwrap().to_owned(); - result.push((catalog, schema, table)); + let mut result = Vec::with_capacity(records.len()); + for value in records { + let mut t = Vec::with_capacity(3); + for v in &value { + let serde_json::Value::String(value) = v else { + unreachable!() + }; + t.push(value); + } + result.push((t[0].clone(), t[1].clone(), t[2].clone())); } Ok(result) @@ -230,30 +213,15 @@ impl Export { r#"show create table "{}"."{}"."{}""#, catalog, schema, table ); - let mut client = self.client.clone(); - client.set_catalog(catalog); - client.set_schema(schema); - let result = client - .sql(&sql) - .await - .with_context(|_| RequestDatabaseSnafu { sql })?; - let OutputData::Stream(stream) = result.data else { - NotDataFromOutputSnafu.fail()? + let result = self.sql(&sql).await?; + let Some(records) = result else { + EmptyResultSnafu.fail()? }; - let record_batch = collect(stream) - .await - .context(CollectRecordBatchesSnafu)? - .pop() - .context(EmptyResultSnafu)?; - let create_table = record_batch - .column(1) - .as_any() - .downcast_ref::() - .unwrap() - .get_data(0) - .unwrap(); - - Ok(format!("{create_table};\n")) + let serde_json::Value::String(create_table) = &records[0][1] else { + unreachable!() + }; + + Ok(format!("{};\n", create_table)) } async fn export_create_table(&self) -> Result<()> { @@ -321,20 +289,13 @@ impl Export { .context(FileIoSnafu)?; let output_dir = Path::new(&self.output_dir).join(format!("{catalog}-{schema}/")); - let mut client = self.client.clone(); - client.set_catalog(catalog.clone()); - client.set_schema(schema.clone()); - // copy database to let sql = format!( "copy database {} to '{}' with (format='parquet');", schema, output_dir.to_str().unwrap() ); - client - .sql(sql.clone()) - .await - .context(RequestDatabaseSnafu { sql })?; + self.sql(&sql).await?; info!("finished exporting {catalog}.{schema} data"); // export copy from sql @@ -420,82 +381,3 @@ fn split_database(database: &str) -> Result<(String, Option)> { Ok((catalog.to_string(), Some(schema.to_string()))) } } - -#[cfg(test)] -mod tests { - use clap::Parser; - use client::{Client, Database}; - use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; - - use crate::error::Result; - use crate::options::{CliOptions, Options}; - use crate::{cli, standalone, App}; - - #[tokio::test(flavor = "multi_thread")] - async fn test_export_create_table_with_quoted_names() -> Result<()> { - let output_dir = tempfile::tempdir().unwrap(); - - let standalone = standalone::Command::parse_from([ - "standalone", - "start", - "--data-home", - &*output_dir.path().to_string_lossy(), - ]); - let Options::Standalone(standalone_opts) = - standalone.load_options(&CliOptions::default())? - else { - unreachable!() - }; - let mut instance = standalone.build(*standalone_opts).await?; - instance.start().await?; - - let client = Client::with_urls(["127.0.0.1:4001"]); - let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client); - database - .sql(r#"CREATE DATABASE "cli.export.create_table";"#) - .await - .unwrap(); - database - .sql( - r#"CREATE TABLE "cli.export.create_table"."a.b.c"( - ts TIMESTAMP, - TIME INDEX (ts) - ) engine=mito; - "#, - ) - .await - .unwrap(); - - let output_dir = tempfile::tempdir().unwrap(); - let cli = cli::Command::parse_from([ - "cli", - "export", - "--addr", - "127.0.0.1:4001", - "--output-dir", - &*output_dir.path().to_string_lossy(), - "--target", - "create-table", - ]); - let mut cli_app = cli.build().await?; - cli_app.start().await?; - - instance.stop().await?; - - let output_file = output_dir - .path() - .join("greptime-cli.export.create_table.sql"); - let res = std::fs::read_to_string(output_file).unwrap(); - let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" ( - "ts" TIMESTAMP(3) NOT NULL, - TIME INDEX ("ts") -) - -ENGINE=mito -; -"#; - assert_eq!(res.trim(), expect.trim()); - - Ok(()) - } -} diff --git a/src/cmd/src/cli/repl.rs b/src/cmd/src/cli/repl.rs index 4a2bc4165934..edd174699cc7 100644 --- a/src/cmd/src/cli/repl.rs +++ b/src/cmd/src/cli/repl.rs @@ -19,7 +19,7 @@ use std::time::Instant; use catalog::kvbackend::{ CachedMetaKvBackend, CachedMetaKvBackendBuilder, KvBackendCatalogManager, }; -use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; +use client::{Client, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_base::Plugins; use common_config::Mode; use common_error::ext::ErrorExt; diff --git a/src/cmd/src/cli/upgrade.rs b/src/cmd/src/cli/upgrade.rs index 8d1be2c3c554..a36333c9c483 100644 --- a/src/cmd/src/cli/upgrade.rs +++ b/src/cmd/src/cli/upgrade.rs @@ -27,7 +27,7 @@ use common_meta::key::table_info::{TableInfoKey, TableInfoValue}; use common_meta::key::table_name::{TableNameKey, TableNameValue}; use common_meta::key::table_region::{TableRegionKey, TableRegionValue}; use common_meta::key::table_route::{TableRouteKey, TableRouteValue as NextTableRouteValue}; -use common_meta::key::{RegionDistribution, TableMetaKey, TableMetaValue}; +use common_meta::key::{MetaKey, RegionDistribution, TableMetaValue}; use common_meta::kv_backend::etcd::EtcdStore; use common_meta::kv_backend::KvBackendRef; use common_meta::range_stream::PaginationStream; @@ -137,7 +137,7 @@ impl MigrateTableMetadata { while let Some((key, value)) = stream.try_next().await.context(error::IterStreamSnafu)? { let table_id = self.migrate_table_route_key(value).await?; keys.push(key); - keys.push(TableRegionKey::new(table_id).as_raw_key()) + keys.push(TableRegionKey::new(table_id).to_bytes()) } info!("Total migrated TableRouteKeys: {}", keys.len() / 2); @@ -165,7 +165,7 @@ impl MigrateTableMetadata { self.etcd_store .put( PutRequest::new() - .with_key(new_key.as_raw_key()) + .with_key(new_key.to_bytes()) .with_value(new_table_value.try_as_raw_value().unwrap()), ) .await @@ -217,7 +217,7 @@ impl MigrateTableMetadata { self.etcd_store .put( PutRequest::new() - .with_key(new_key.as_raw_key()) + .with_key(new_key.to_bytes()) .with_value(schema_name_value.try_as_raw_value().unwrap()), ) .await @@ -269,7 +269,7 @@ impl MigrateTableMetadata { self.etcd_store .put( PutRequest::new() - .with_key(new_key.as_raw_key()) + .with_key(new_key.to_bytes()) .with_value(catalog_name_value.try_as_raw_value().unwrap()), ) .await @@ -346,11 +346,11 @@ impl MigrateTableMetadata { .batch_put( BatchPutRequest::new() .add_kv( - table_info_key.as_raw_key(), + table_info_key.to_bytes(), table_info_value.try_as_raw_value().unwrap(), ) .add_kv( - table_region_key.as_raw_key(), + table_region_key.to_bytes(), table_region_value.try_as_raw_value().unwrap(), ), ) @@ -378,7 +378,7 @@ impl MigrateTableMetadata { self.etcd_store .put( PutRequest::new() - .with_key(table_name_key.as_raw_key()) + .with_key(table_name_key.to_bytes()) .with_value(table_name_value.try_as_raw_value().unwrap()), ) .await @@ -425,7 +425,7 @@ impl MigrateTableMetadata { } else { let mut req = BatchPutRequest::new(); for (key, value) in datanode_table_kvs { - req = req.add_kv(key.as_raw_key(), value.try_as_raw_value().unwrap()); + req = req.add_kv(key.to_bytes(), value.try_as_raw_value().unwrap()); } self.etcd_store.batch_put(req).await.unwrap(); } diff --git a/src/cmd/src/error.rs b/src/cmd/src/error.rs index 1951ed5b0e29..d24f00f5be44 100644 --- a/src/cmd/src/error.rs +++ b/src/cmd/src/error.rs @@ -139,13 +139,6 @@ pub enum Error { location: Location, }, - #[snafu(display("Failed to request database, sql: {sql}"))] - RequestDatabase { - sql: String, - location: Location, - source: client::Error, - }, - #[snafu(display("Failed to collect RecordBatches"))] CollectRecordBatches { location: Location, @@ -218,6 +211,14 @@ pub enum Error { location: Location, }, + #[snafu(display("Failed to run http request: {reason}"))] + HttpQuerySql { + reason: String, + #[snafu(source)] + error: reqwest::Error, + location: Location, + }, + #[snafu(display("Expect data from output, but got another thing"))] NotDataFromOutput { location: Location }, @@ -290,8 +291,9 @@ impl ErrorExt for Error { Error::StartProcedureManager { source, .. } | Error::StopProcedureManager { source, .. } => source.status_code(), Error::StartWalOptionsAllocator { source, .. } => source.status_code(), - Error::ReplCreation { .. } | Error::Readline { .. } => StatusCode::Internal, - Error::RequestDatabase { source, .. } => source.status_code(), + Error::ReplCreation { .. } | Error::Readline { .. } | Error::HttpQuerySql { .. } => { + StatusCode::Internal + } Error::CollectRecordBatches { source, .. } | Error::PrettyPrintRecordBatches { source, .. } => source.status_code(), Error::StartMetaClient { source, .. } => source.status_code(), diff --git a/src/common/base/src/secrets.rs b/src/common/base/src/secrets.rs index 49c9b37de35b..702b6d3ccd8a 100644 --- a/src/common/base/src/secrets.rs +++ b/src/common/base/src/secrets.rs @@ -1,4 +1,4 @@ -// This file is copied from: https://github.com/iqlusioninc/crates/blob/f98d4ccf/secrecy/src/lib.rs. +// This file is copied from https://github.com/iqlusioninc/crates/blob/f98d4ccf/secrecy/src/lib.rs //! [`SecretBox`] wrapper type for more carefully handling secret values //! (e.g. passwords, cryptographic keys, access tokens or other credentials) diff --git a/src/common/datasource/src/object_store.rs b/src/common/datasource/src/object_store.rs index c9e36018c22b..d2ed0a4ad82d 100644 --- a/src/common/datasource/src/object_store.rs +++ b/src/common/datasource/src/object_store.rs @@ -35,7 +35,7 @@ pub fn parse_url(url: &str) -> Result<(String, Option, String)> { #[cfg(windows)] { // On Windows, the url may start with `C:/`. - if let Some(_) = handle_windows_path(url) { + if handle_windows_path(url).is_some() { return Ok((FS_SCHEMA.to_string(), None, url.to_string())); } } diff --git a/src/common/function/src/scalars/numpy/interp.rs b/src/common/function/src/scalars/numpy/interp.rs index 0ae9275032d2..85f25d8b3b3d 100644 --- a/src/common/function/src/scalars/numpy/interp.rs +++ b/src/common/function/src/scalars/numpy/interp.rs @@ -152,7 +152,7 @@ pub fn interp(args: &[VectorRef]) -> Result { let res; if xp.len() == 1 { - let datas = x + let data = x .iter_data() .map(|x| { if Value::from(x) < xp.get(0) { @@ -164,7 +164,7 @@ pub fn interp(args: &[VectorRef]) -> Result { } }) .collect::>(); - res = Float64Vector::from(datas); + res = Float64Vector::from(data); } else { let mut j = 0; /* only pre-calculate slopes if there are relatively few of them. */ @@ -191,7 +191,7 @@ pub fn interp(args: &[VectorRef]) -> Result { } slopes = Some(slopes_tmp); } - let datas = x + let data = x .iter_data() .map(|x| match x { Some(xi) => { @@ -255,7 +255,7 @@ pub fn interp(args: &[VectorRef]) -> Result { _ => None, }) .collect::>(); - res = Float64Vector::from(datas); + res = Float64Vector::from(data); } Ok(Arc::new(res) as _) } diff --git a/src/common/grpc-expr/src/alter.rs b/src/common/grpc-expr/src/alter.rs index fcf4486b4628..ac49069412cd 100644 --- a/src/common/grpc-expr/src/alter.rs +++ b/src/common/grpc-expr/src/alter.rs @@ -12,17 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. +use api::helper::ColumnDataTypeWrapper; use api::v1::add_column_location::LocationType; use api::v1::alter_expr::Kind; use api::v1::{ - column_def, AddColumnLocation as Location, AlterExpr, CreateTableExpr, DropColumns, - RenameTable, SemanticType, + column_def, AddColumnLocation as Location, AlterExpr, ChangeColumnTypes, CreateTableExpr, + DropColumns, RenameTable, SemanticType, }; use common_query::AddColumnLocation; use datatypes::schema::{ColumnSchema, RawSchema}; use snafu::{ensure, OptionExt, ResultExt}; use table::metadata::TableId; -use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest}; +use table::requests::{AddColumnRequest, AlterKind, AlterTableRequest, ChangeColumnTypeRequest}; use crate::error::{ InvalidColumnDefSnafu, MissingFieldSnafu, MissingTimestampColumnSnafu, Result, @@ -64,13 +65,33 @@ pub fn alter_expr_to_request(table_id: TableId, expr: AlterExpr) -> Result { + let change_column_type_requests = change_column_types + .into_iter() + .map(|cct| { + let target_type = + ColumnDataTypeWrapper::new(cct.target_type(), cct.target_type_extension) + .into(); + + Ok(ChangeColumnTypeRequest { + column_name: cct.column_name, + target_type, + }) + }) + .collect::>>()?; + + AlterKind::ChangeColumnTypes { + columns: change_column_type_requests, + } + } Kind::DropColumns(DropColumns { drop_columns }) => AlterKind::DropColumns { names: drop_columns.into_iter().map(|c| c.name).collect(), }, Kind::RenameTable(RenameTable { new_table_name }) => { AlterKind::RenameTable { new_table_name } } - Kind::ChangeColumnTypes(_) => unimplemented!(), }; let request = AlterTableRequest { @@ -138,7 +159,10 @@ fn parse_location(location: Option) -> Result columns, + _ => unreachable!(), + }; + + let change_column_type = change_column_types.pop().unwrap(); + assert_eq!("mem_usage", change_column_type.column_name); + assert_eq!( + ConcreteDataType::string_datatype(), + change_column_type.target_type + ); + } + #[test] fn test_drop_column_expr() { let expr = AlterExpr { diff --git a/src/common/meta/src/cache_invalidator.rs b/src/common/meta/src/cache_invalidator.rs index 2e7afd37efc7..fb62e7a61a25 100644 --- a/src/common/meta/src/cache_invalidator.rs +++ b/src/common/meta/src/cache_invalidator.rs @@ -22,7 +22,7 @@ use crate::key::schema_name::SchemaNameKey; use crate::key::table_info::TableInfoKey; use crate::key::table_name::TableNameKey; use crate::key::table_route::TableRouteKey; -use crate::key::TableMetaKey; +use crate::key::MetaKey; /// KvBackend cache invalidator #[async_trait::async_trait] @@ -99,18 +99,18 @@ where match cache { CacheIdent::TableId(table_id) => { let key = TableInfoKey::new(table_id); - self.invalidate_key(&key.as_raw_key()).await; + self.invalidate_key(&key.to_bytes()).await; let key = &TableRouteKey { table_id }; - self.invalidate_key(&key.as_raw_key()).await; + self.invalidate_key(&key.to_bytes()).await; } CacheIdent::TableName(table_name) => { let key: TableNameKey = (&table_name).into(); - self.invalidate_key(&key.as_raw_key()).await + self.invalidate_key(&key.to_bytes()).await } CacheIdent::SchemaName(schema_name) => { let key: SchemaNameKey = (&schema_name).into(); - self.invalidate_key(&key.as_raw_key()).await; + self.invalidate_key(&key.to_bytes()).await; } } } diff --git a/src/common/meta/src/ddl/alter_table/region_request.rs b/src/common/meta/src/ddl/alter_table/region_request.rs index 265466b69442..b4223b8ea05d 100644 --- a/src/common/meta/src/ddl/alter_table/region_request.rs +++ b/src/common/meta/src/ddl/alter_table/region_request.rs @@ -91,6 +91,7 @@ fn create_proto_alter_kind( add_columns, }))) } + Kind::ChangeColumnTypes(x) => Ok(Some(alter_request::Kind::ChangeColumnTypes(x.clone()))), Kind::DropColumns(x) => { let drop_columns = x .drop_columns @@ -105,7 +106,6 @@ fn create_proto_alter_kind( }))) } Kind::RenameTable(_) => Ok(None), - Kind::ChangeColumnTypes(_) => unimplemented!(), } } @@ -119,27 +119,27 @@ mod tests { use api::v1::region::region_request::Body; use api::v1::region::RegionColumnDef; use api::v1::{ - region, AddColumn, AddColumnLocation, AddColumns, AlterExpr, ColumnDataType, - ColumnDef as PbColumnDef, SemanticType, + region, AddColumn, AddColumnLocation, AddColumns, AlterExpr, ChangeColumnType, + ChangeColumnTypes, ColumnDataType, ColumnDef as PbColumnDef, SemanticType, }; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; - use store_api::storage::RegionId; + use store_api::storage::{RegionId, TableId}; use crate::ddl::alter_table::AlterTableProcedure; use crate::ddl::test_util::columns::TestColumnDefBuilder; use crate::ddl::test_util::create_table::{ build_raw_table_info_from_expr, TestCreateTableExprBuilder, }; + use crate::ddl::DdlContext; use crate::key::table_route::TableRouteValue; use crate::peer::Peer; use crate::rpc::ddl::AlterTableTask; use crate::rpc::router::{Region, RegionRoute}; use crate::test_util::{new_ddl_context, MockDatanodeManager}; - #[tokio::test] - async fn test_make_alter_region_request() { - let node_manager = Arc::new(MockDatanodeManager::new(())); - let ddl_context = new_ddl_context(node_manager); + async fn prepare_ddl_context() -> (DdlContext, u64, TableId, RegionId, String) { + let datanode_manager = Arc::new(MockDatanodeManager::new(())); + let ddl_context = new_ddl_context(datanode_manager); let cluster_id = 1; let table_id = 1024; let region_id = RegionId::new(table_id, 1); @@ -194,12 +194,25 @@ mod tests { ) .await .unwrap(); + ( + ddl_context, + cluster_id, + table_id, + region_id, + table_name.to_string(), + ) + } + + #[tokio::test] + async fn test_make_alter_region_request() { + let (ddl_context, cluster_id, table_id, region_id, table_name) = + prepare_ddl_context().await; let task = AlterTableTask { alter_table: AlterExpr { catalog_name: DEFAULT_CATALOG_NAME.to_string(), schema_name: DEFAULT_SCHEMA_NAME.to_string(), - table_name: table_name.to_string(), + table_name, kind: Some(Kind::AddColumns(AddColumns { add_columns: vec![AddColumn { column_def: Some(PbColumnDef { @@ -256,4 +269,48 @@ mod tests { )) ); } + + #[tokio::test] + async fn test_make_alter_column_type_region_request() { + let (ddl_context, cluster_id, table_id, region_id, table_name) = + prepare_ddl_context().await; + + let task = AlterTableTask { + alter_table: AlterExpr { + catalog_name: DEFAULT_CATALOG_NAME.to_string(), + schema_name: DEFAULT_SCHEMA_NAME.to_string(), + table_name, + kind: Some(Kind::ChangeColumnTypes(ChangeColumnTypes { + change_column_types: vec![ChangeColumnType { + column_name: "cpu".to_string(), + target_type: ColumnDataType::String as i32, + target_type_extension: None, + }], + })), + }, + }; + + let mut procedure = + AlterTableProcedure::new(cluster_id, table_id, task, ddl_context).unwrap(); + procedure.on_prepare().await.unwrap(); + let Some(Body::Alter(alter_region_request)) = + procedure.make_alter_region_request(region_id).unwrap().body + else { + unreachable!() + }; + assert_eq!(alter_region_request.region_id, region_id.as_u64()); + assert_eq!(alter_region_request.schema_version, 1); + assert_eq!( + alter_region_request.kind, + Some(region::alter_request::Kind::ChangeColumnTypes( + ChangeColumnTypes { + change_column_types: vec![ChangeColumnType { + column_name: "cpu".to_string(), + target_type: ColumnDataType::String as i32, + target_type_extension: None, + }] + } + )) + ); + } } diff --git a/src/common/meta/src/key.rs b/src/common/meta/src/key.rs index 30455181dcc3..7fb1fedeff64 100644 --- a/src/common/meta/src/key.rs +++ b/src/common/meta/src/key.rs @@ -36,16 +36,16 @@ //! - The value is a [TableNameValue] struct; it contains the table id. //! - Used in the table name to table id lookup. //! -//! 6. Flow info key: `__flow/{catalog}/info/{flow_id}` +//! 6. Flow info key: `__flow/info/{flow_id}` //! - Stores metadata of the flow. //! -//! 7. Flow name key: `__flow/{catalog}/name/{flow_name}` +//! 7. Flow name key: `__flow/name/{catalog}/{flow_name}` //! - Mapping {catalog}/{flow_name} to {flow_id} //! -//! 8. Flownode flow key: `__flow/{catalog}/flownode/{flownode_id}/{flow_id}/{partition_id}` +//! 8. Flownode flow key: `__flow/flownode/{flownode_id}/{flow_id}/{partition_id}` //! - Mapping {flownode_id} to {flow_id} //! -//! 9. Table flow key: `__table_flow/{catalog}/source_table/{table_id}/{flownode_id}/{flow_id}/{partition_id}` +//! 9. Table flow key: `__flow/source_table/{table_id}/{flownode_id}/{flow_id}/{partition_id}` //! - Mapping source table's {table_id} to {flownode_id} //! - Used in `Flownode` booting. //! @@ -60,12 +60,12 @@ //! The whole picture of flow keys will be like this: //! //! __flow/ -//! {catalog}/ -//! info/ -//! {tsak_id} +//! info/ +//! {flow_id} //! //! name/ -//! {flow_name} +//! {catalog_name} +//! {flow_name} //! //! flownode/ //! {flownode_id}/ @@ -84,7 +84,6 @@ pub mod datanode_table; #[allow(unused)] pub mod flow; pub mod schema_name; -pub mod scope; pub mod table_info; pub mod table_name; // TODO(weny): removes it. @@ -162,6 +161,16 @@ pub type FlowId = u32; /// The partition of flow. pub type FlowPartitionId = u32; +lazy_static! { + static ref TABLE_INFO_KEY_PATTERN: Regex = + Regex::new(&format!("^{TABLE_INFO_KEY_PREFIX}/([0-9]+)$")).unwrap(); +} + +lazy_static! { + static ref TABLE_ROUTE_KEY_PATTERN: Regex = + Regex::new(&format!("^{TABLE_ROUTE_PREFIX}/([0-9]+)$")).unwrap(); +} + lazy_static! { static ref DATANODE_TABLE_KEY_PATTERN: Regex = Regex::new(&format!("^{DATANODE_TABLE_KEY_PREFIX}/([0-9]+)/([0-9]+)$")).unwrap(); @@ -190,35 +199,39 @@ lazy_static! { .unwrap(); } -pub trait TableMetaKey { - fn as_raw_key(&self) -> Vec; +/// The key of metadata. +pub trait MetaKey<'a, T> { + fn to_bytes(&self) -> Vec; + + fn from_bytes(bytes: &'a [u8]) -> Result; } -pub(crate) trait TableMetaKeyGetTxnOp { - fn build_get_op( - &self, - ) -> ( - TxnOp, - impl for<'a> FnMut(&'a mut TxnOpGetResponseSet) -> Option>, - ); +#[derive(Debug, Clone, PartialEq)] +pub struct BytesAdapter(Vec); + +impl From> for BytesAdapter { + fn from(value: Vec) -> Self { + Self(value) + } } -impl TableMetaKey for String { - fn as_raw_key(&self) -> Vec { - self.as_bytes().to_vec() +impl<'a> MetaKey<'a, BytesAdapter> for BytesAdapter { + fn to_bytes(&self) -> Vec { + self.0.clone() + } + + fn from_bytes(bytes: &'a [u8]) -> Result { + Ok(BytesAdapter(bytes.to_vec())) } } -impl TableMetaKeyGetTxnOp for String { +pub(crate) trait TableMetaKeyGetTxnOp { fn build_get_op( &self, ) -> ( TxnOp, impl for<'a> FnMut(&'a mut TxnOpGetResponseSet) -> Option>, - ) { - let key = self.as_raw_key(); - (TxnOp::Get(key.clone()), TxnOpGetResponseSet::filter(key)) - } + ); } pub trait TableMetaValue { @@ -650,11 +663,11 @@ impl TableMetadataManager { .map(|datanode_id| DatanodeTableKey::new(datanode_id, table_id)) .collect::>(); - keys.push(table_name.as_raw_key()); - keys.push(table_info_key.as_raw_key()); - keys.push(table_route_key.as_raw_key()); + keys.push(table_name.to_bytes()); + keys.push(table_info_key.to_bytes()); + keys.push(table_route_key.to_bytes()); for key in &datanode_table_keys { - keys.push(key.as_raw_key()); + keys.push(key.to_bytes()); } Ok(keys) } @@ -967,21 +980,6 @@ impl TableMetadataManager { } } -#[macro_export] -macro_rules! impl_table_meta_key { - ($($val_ty: ty), *) => { - $( - impl std::fmt::Display for $val_ty { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", String::from_utf8_lossy(&self.as_raw_key())) - } - } - )* - } -} - -impl_table_meta_key!(TableNameKey<'_>, TableInfoKey, DatanodeTableKey); - #[macro_export] macro_rules! impl_table_meta_value { ($($val_ty: ty), *) => { @@ -999,7 +997,7 @@ macro_rules! impl_table_meta_value { } } -macro_rules! impl_table_meta_key_get_txn_op { +macro_rules! impl_meta_key_get_txn_op { ($($key: ty), *) => { $( impl $crate::key::TableMetaKeyGetTxnOp for $key { @@ -1013,7 +1011,7 @@ macro_rules! impl_table_meta_key_get_txn_op { &'a mut TxnOpGetResponseSet, ) -> Option>, ) { - let raw_key = self.as_raw_key(); + let raw_key = self.to_bytes(); ( TxnOp::Get(raw_key.clone()), TxnOpGetResponseSet::filter(raw_key), @@ -1024,7 +1022,7 @@ macro_rules! impl_table_meta_key_get_txn_op { } } -impl_table_meta_key_get_txn_op! { +impl_meta_key_get_txn_op! { TableNameKey<'_>, TableInfoKey, TableRouteKey, diff --git a/src/common/meta/src/key/catalog_name.rs b/src/common/meta/src/key/catalog_name.rs index 63873177b1b7..fd67acf34800 100644 --- a/src/common/meta/src/key/catalog_name.rs +++ b/src/common/meta/src/key/catalog_name.rs @@ -21,12 +21,15 @@ use serde::{Deserialize, Serialize}; use snafu::{OptionExt, ResultExt}; use crate::error::{self, Error, InvalidTableMetadataSnafu, Result}; -use crate::key::{TableMetaKey, CATALOG_NAME_KEY_PATTERN, CATALOG_NAME_KEY_PREFIX}; +use crate::key::{MetaKey, CATALOG_NAME_KEY_PATTERN, CATALOG_NAME_KEY_PREFIX}; use crate::kv_backend::KvBackendRef; use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; use crate::rpc::store::RangeRequest; use crate::rpc::KeyValue; +/// The catalog name key, indices all catalog names +/// +/// The layout: `__catalog_name/{catalog_name}` #[derive(Debug, Clone, Copy, PartialEq)] pub struct CatalogNameKey<'a> { pub catalog: &'a str, @@ -53,15 +56,28 @@ impl<'a> CatalogNameKey<'a> { } } -impl Display for CatalogNameKey<'_> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}/{}", CATALOG_NAME_KEY_PREFIX, self.catalog) +impl<'a> MetaKey<'a, CatalogNameKey<'a>> for CatalogNameKey<'_> { + fn to_bytes(&self) -> Vec { + self.to_string().into_bytes() + } + + fn from_bytes(bytes: &'a [u8]) -> Result> { + let key = std::str::from_utf8(bytes).map_err(|e| { + InvalidTableMetadataSnafu { + err_msg: format!( + "CatalogNameKey '{}' is not a valid UTF8 string: {e}", + String::from_utf8_lossy(bytes) + ), + } + .build() + })?; + CatalogNameKey::try_from(key) } } -impl TableMetaKey for CatalogNameKey<'_> { - fn as_raw_key(&self) -> Vec { - self.to_string().into_bytes() +impl Display for CatalogNameKey<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}/{}", CATALOG_NAME_KEY_PREFIX, self.catalog) } } @@ -82,7 +98,7 @@ impl<'a> TryFrom<&'a str> for CatalogNameKey<'a> { } } -/// Decoder `KeyValue` to ({catalog},()) +/// Decoder `KeyValue` to {catalog} pub fn catalog_decoder(kv: KeyValue) -> Result { let str = std::str::from_utf8(&kv.key).context(error::ConvertRawKeySnafu)?; let catalog_name = CatalogNameKey::try_from(str)?; @@ -103,7 +119,7 @@ impl CatalogManager { pub async fn create(&self, catalog: CatalogNameKey<'_>, if_not_exists: bool) -> Result<()> { let _timer = crate::metrics::METRIC_META_CREATE_CATALOG.start_timer(); - let raw_key = catalog.as_raw_key(); + let raw_key = catalog.to_bytes(); let raw_value = CatalogNameValue.try_as_raw_value()?; if self .kv_backend @@ -117,7 +133,7 @@ impl CatalogManager { } pub async fn exists(&self, catalog: CatalogNameKey<'_>) -> Result { - let raw_key = catalog.as_raw_key(); + let raw_key = catalog.to_bytes(); self.kv_backend.exists(&raw_key).await } @@ -148,7 +164,7 @@ mod tests { assert_eq!(key.to_string(), "__catalog_name/my-catalog"); - let parsed: CatalogNameKey = "__catalog_name/my-catalog".try_into().unwrap(); + let parsed = CatalogNameKey::from_bytes(b"__catalog_name/my-catalog").unwrap(); assert_eq!(key, parsed); } diff --git a/src/common/meta/src/key/datanode_table.rs b/src/common/meta/src/key/datanode_table.rs index 96bebb74662e..c20243bfd7d4 100644 --- a/src/common/meta/src/key/datanode_table.rs +++ b/src/common/meta/src/key/datanode_table.rs @@ -13,6 +13,7 @@ // limitations under the License. use std::collections::HashMap; +use std::fmt::Display; use std::sync::Arc; use futures::stream::BoxStream; @@ -21,10 +22,10 @@ use snafu::OptionExt; use store_api::storage::RegionNumber; use table::metadata::TableId; +use super::MetaKey; use crate::error::{InvalidTableMetadataSnafu, Result}; use crate::key::{ - RegionDistribution, TableMetaKey, TableMetaValue, DATANODE_TABLE_KEY_PATTERN, - DATANODE_TABLE_KEY_PREFIX, + RegionDistribution, TableMetaValue, DATANODE_TABLE_KEY_PATTERN, DATANODE_TABLE_KEY_PREFIX, }; use crate::kv_backend::txn::{Txn, TxnOp}; use crate::kv_backend::KvBackendRef; @@ -54,6 +55,9 @@ pub struct RegionInfo { pub region_wal_options: HashMap, } +/// The key mapping {datanode_id} to {table_id} +/// +/// The layout: `__dn_table/{datanode_id}/{table_id}`. #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct DatanodeTableKey { pub datanode_id: DatanodeId, @@ -75,32 +79,42 @@ impl DatanodeTableKey { pub fn range_start_key(datanode_id: DatanodeId) -> String { format!("{}/", Self::prefix(datanode_id)) } +} + +impl<'a> MetaKey<'a, DatanodeTableKey> for DatanodeTableKey { + fn to_bytes(&self) -> Vec { + self.to_string().into_bytes() + } - pub fn strip_table_id(raw_key: &[u8]) -> Result { - let key = String::from_utf8(raw_key.to_vec()).map_err(|e| { + fn from_bytes(bytes: &[u8]) -> Result { + let key = std::str::from_utf8(bytes).map_err(|e| { InvalidTableMetadataSnafu { err_msg: format!( "DatanodeTableKey '{}' is not a valid UTF8 string: {e}", - String::from_utf8_lossy(raw_key) + String::from_utf8_lossy(bytes) ), } .build() })?; let captures = DATANODE_TABLE_KEY_PATTERN - .captures(&key) + .captures(key) .context(InvalidTableMetadataSnafu { err_msg: format!("Invalid DatanodeTableKey '{key}'"), })?; // Safety: pass the regex check above + let datanode_id = captures[1].parse::().unwrap(); let table_id = captures[2].parse::().unwrap(); - Ok(table_id) + Ok(DatanodeTableKey { + datanode_id, + table_id, + }) } } -impl TableMetaKey for DatanodeTableKey { - fn as_raw_key(&self) -> Vec { - format!("{}/{}", Self::prefix(self.datanode_id), self.table_id).into_bytes() +impl Display for DatanodeTableKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}/{}", Self::prefix(self.datanode_id), self.table_id) } } @@ -140,7 +154,7 @@ impl DatanodeTableManager { pub async fn get(&self, key: &DatanodeTableKey) -> Result> { self.kv_backend - .get(&key.as_raw_key()) + .get(&key.to_bytes()) .await? .map(|kv| DatanodeTableValue::try_from_raw_value(&kv.value)) .transpose() @@ -190,7 +204,7 @@ impl DatanodeTableManager { }, ); - Ok(TxnOp::Put(key.as_raw_key(), val.try_as_raw_value()?)) + Ok(TxnOp::Put(key.to_bytes(), val.try_as_raw_value()?)) }) .collect::>>()?; @@ -215,7 +229,7 @@ impl DatanodeTableManager { for current_datanode in current_region_distribution.keys() { if !new_region_distribution.contains_key(current_datanode) { let key = DatanodeTableKey::new(*current_datanode, table_id); - let raw_key = key.as_raw_key(); + let raw_key = key.to_bytes(); opts.push(TxnOp::Delete(raw_key)) } } @@ -233,7 +247,7 @@ impl DatanodeTableManager { }; if need_update { let key = DatanodeTableKey::new(datanode, table_id); - let raw_key = key.as_raw_key(); + let raw_key = key.to_bytes(); // FIXME(weny): add unit tests. let mut new_region_info = region_info.clone(); if need_update_options { @@ -266,7 +280,7 @@ impl DatanodeTableManager { .into_keys() .map(|datanode_id| { let key = DatanodeTableKey::new(datanode_id, table_id); - let raw_key = key.as_raw_key(); + let raw_key = key.to_bytes(); Ok(TxnOp::Delete(raw_key)) }) @@ -283,12 +297,12 @@ mod tests { use super::*; #[test] - fn test_serde() { + fn test_serialization() { let key = DatanodeTableKey { datanode_id: 1, table_id: 2, }; - let raw_key = key.as_raw_key(); + let raw_key = key.to_bytes(); assert_eq!(raw_key, b"__dn_table/1/2"); let value = DatanodeTableValue { @@ -402,9 +416,9 @@ mod tests { } #[test] - fn test_strip_table_id() { + fn test_deserialization() { fn test_err(raw_key: &[u8]) { - let result = DatanodeTableKey::strip_table_id(raw_key); + let result = DatanodeTableKey::from_bytes(raw_key); assert!(result.is_err()); } @@ -417,7 +431,7 @@ mod tests { test_err(b"__dn_table/invalid_node_id/2"); test_err(b"__dn_table/1/invalid_table_id"); - let table_id = DatanodeTableKey::strip_table_id(b"__dn_table/1/2").unwrap(); - assert_eq!(table_id, 2); + let key = DatanodeTableKey::from_bytes(b"__dn_table/11/21").unwrap(); + assert_eq!(DatanodeTableKey::new(11, 21), key); } } diff --git a/src/common/meta/src/key/flow.rs b/src/common/meta/src/key/flow.rs index cbda6aa88276..44fae286815f 100644 --- a/src/common/meta/src/key/flow.rs +++ b/src/common/meta/src/key/flow.rs @@ -30,9 +30,8 @@ use crate::key::flow::flow_info::FlowInfoManager; use crate::key::flow::flow_name::FlowNameManager; use crate::key::flow::flownode_flow::FlownodeFlowManager; use crate::key::flow::table_flow::TableFlowManager; -use crate::key::scope::MetaKey; use crate::key::txn_helper::TxnOpGetResponseSet; -use crate::key::FlowId; +use crate::key::{FlowId, MetaKey}; use crate::kv_backend::txn::Txn; use crate::kv_backend::KvBackendRef; @@ -59,7 +58,7 @@ impl FlowScoped { } } -impl> MetaKey> for FlowScoped { +impl<'a, T: MetaKey<'a, T>> MetaKey<'a, FlowScoped> for FlowScoped { fn to_bytes(&self) -> Vec { let prefix = FlowScoped::::PREFIX.as_bytes(); let inner = self.inner.to_bytes(); @@ -69,7 +68,7 @@ impl> MetaKey> for FlowScoped { bytes } - fn from_bytes(bytes: &[u8]) -> Result> { + fn from_bytes(bytes: &'a [u8]) -> Result> { let prefix = FlowScoped::::PREFIX.as_bytes(); ensure!( bytes.starts_with(prefix), @@ -139,20 +138,15 @@ impl FlowMetadataManager { .flow_name_manager .build_create_txn(&flow_value.catalog_name, &flow_value.flow_name, flow_id)?; - let (create_flow_txn, on_create_flow_failure) = self.flow_info_manager.build_create_txn( - &flow_value.catalog_name, - flow_id, - &flow_value, - )?; + let (create_flow_txn, on_create_flow_failure) = self + .flow_info_manager + .build_create_txn(flow_id, &flow_value)?; - let create_flownode_flow_txn = self.flownode_flow_manager.build_create_txn( - &flow_value.catalog_name, - flow_id, - flow_value.flownode_ids().clone(), - ); + let create_flownode_flow_txn = self + .flownode_flow_manager + .build_create_txn(flow_id, flow_value.flownode_ids().clone()); let create_table_flow_txn = self.table_flow_manager.build_create_txn( - &flow_value.catalog_name, flow_id, flow_value.flownode_ids().clone(), flow_value.source_table_ids(), @@ -222,7 +216,6 @@ mod tests { use super::*; use crate::key::flow::table_flow::TableFlowKey; - use crate::key::scope::CatalogScoped; use crate::kv_backend::memory::MemoryKvBackend; use crate::table_name::TableName; @@ -231,12 +224,12 @@ mod tests { inner: Vec, } - impl MetaKey for MockKey { + impl<'a> MetaKey<'a, MockKey> for MockKey { fn to_bytes(&self) -> Vec { self.inner.clone() } - fn from_bytes(bytes: &[u8]) -> Result { + fn from_bytes(bytes: &'a [u8]) -> Result { Ok(MockKey { inner: bytes.to_vec(), }) @@ -245,27 +238,23 @@ mod tests { #[test] fn test_flow_scoped_to_bytes() { - let key = FlowScoped::new(CatalogScoped::new( - "my_catalog".to_string(), - MockKey { - inner: b"hi".to_vec(), - }, - )); - assert_eq!(b"__flow/my_catalog/hi".to_vec(), key.to_bytes()); + let key = FlowScoped::new(MockKey { + inner: b"hi".to_vec(), + }); + assert_eq!(b"__flow/hi".to_vec(), key.to_bytes()); } #[test] fn test_flow_scoped_from_bytes() { - let bytes = b"__flow/my_catalog/hi"; - let key = FlowScoped::>::from_bytes(bytes).unwrap(); - assert_eq!(key.catalog(), "my_catalog"); + let bytes = b"__flow/hi"; + let key = FlowScoped::::from_bytes(bytes).unwrap(); assert_eq!(key.inner.inner, b"hi".to_vec()); } #[test] fn test_flow_scoped_from_bytes_mismatch() { - let bytes = b"__table/my_catalog/hi"; - let err = FlowScoped::>::from_bytes(bytes).unwrap_err(); + let bytes = b"__table/hi"; + let err = FlowScoped::::from_bytes(bytes).unwrap_err(); assert_matches!(err, error::Error::MismatchPrefix { .. }); } @@ -302,14 +291,14 @@ mod tests { .unwrap(); let got = flow_metadata_manager .flow_info_manager() - .get(catalog_name, flow_id) + .get(flow_id) .await .unwrap() .unwrap(); assert_eq!(got, flow_value); let flows = flow_metadata_manager .flownode_flow_manager() - .flows(catalog_name, 1) + .flows(1) .try_collect::>() .await .unwrap(); @@ -317,20 +306,11 @@ mod tests { for table_id in [1024, 1025, 1026] { let nodes = flow_metadata_manager .table_flow_manager() - .nodes(catalog_name, table_id) + .nodes(table_id) .try_collect::>() .await .unwrap(); - assert_eq!( - nodes, - vec![TableFlowKey::new( - catalog_name.to_string(), - table_id, - 1, - flow_id, - 0 - )] - ); + assert_eq!(nodes, vec![TableFlowKey::new(table_id, 1, flow_id, 0)]); } } diff --git a/src/common/meta/src/key/flow/flow_info.rs b/src/common/meta/src/key/flow/flow_info.rs index f9b9ae4b259d..20d0e4598780 100644 --- a/src/common/meta/src/key/flow/flow_info.rs +++ b/src/common/meta/src/key/flow/flow_info.rs @@ -22,9 +22,10 @@ use table::metadata::TableId; use crate::error::{self, Result}; use crate::key::flow::FlowScoped; -use crate::key::scope::{CatalogScoped, MetaKey}; use crate::key::txn_helper::TxnOpGetResponseSet; -use crate::key::{txn_helper, DeserializedValueWithBytes, FlowId, FlowPartitionId, TableMetaValue}; +use crate::key::{ + txn_helper, DeserializedValueWithBytes, FlowId, FlowPartitionId, MetaKey, TableMetaValue, +}; use crate::kv_backend::txn::Txn; use crate::kv_backend::KvBackendRef; use crate::table_name::TableName; @@ -39,31 +40,26 @@ lazy_static! { /// The key stores the metadata of the flow. /// -/// The layout: `__flow/{catalog}/info/{flow_id}`. -pub struct FlowInfoKey(FlowScoped>); +/// The layout: `__flow/info/{flow_id}`. +pub struct FlowInfoKey(FlowScoped); -impl MetaKey for FlowInfoKey { +impl<'a> MetaKey<'a, FlowInfoKey> for FlowInfoKey { fn to_bytes(&self) -> Vec { self.0.to_bytes() } - fn from_bytes(bytes: &[u8]) -> Result { - Ok(FlowInfoKey( - FlowScoped::>::from_bytes(bytes)?, - )) + fn from_bytes(bytes: &'a [u8]) -> Result { + Ok(FlowInfoKey(FlowScoped::::from_bytes( + bytes, + )?)) } } impl FlowInfoKey { /// Returns the [FlowInfoKey]. - pub fn new(catalog: String, flow_id: FlowId) -> FlowInfoKey { + pub fn new(flow_id: FlowId) -> FlowInfoKey { let inner = FlowInfoKeyInner::new(flow_id); - FlowInfoKey(FlowScoped::new(CatalogScoped::new(catalog, inner))) - } - - /// Returns the catalog. - pub fn catalog(&self) -> &str { - self.0.catalog() + FlowInfoKey(FlowScoped::new(inner)) } /// Returns the [FlowId]. @@ -85,12 +81,12 @@ impl FlowInfoKeyInner { } } -impl MetaKey for FlowInfoKeyInner { +impl<'a> MetaKey<'a, FlowInfoKeyInner> for FlowInfoKeyInner { fn to_bytes(&self) -> Vec { format!("{FLOW_INFO_KEY_PREFIX}/{}", self.flow_id).into_bytes() } - fn from_bytes(bytes: &[u8]) -> Result { + fn from_bytes(bytes: &'a [u8]) -> Result { let key = std::str::from_utf8(bytes).map_err(|e| { error::InvalidTableMetadataSnafu { err_msg: format!( @@ -159,8 +155,8 @@ impl FlowInfoManager { } /// Returns the [FlowInfoValue] of specified `flow_id`. - pub async fn get(&self, catalog: &str, flow_id: FlowId) -> Result> { - let key = FlowInfoKey::new(catalog.to_string(), flow_id).to_bytes(); + pub async fn get(&self, flow_id: FlowId) -> Result> { + let key = FlowInfoKey::new(flow_id).to_bytes(); self.kv_backend .get(&key) .await? @@ -169,11 +165,10 @@ impl FlowInfoManager { } /// Builds a create flow transaction. - /// It is expected that the `__flow/{catalog}/info/{flow_id}` wasn't occupied. + /// It is expected that the `__flow/info/{flow_id}` wasn't occupied. /// Otherwise, the transaction will retrieve existing value. pub(crate) fn build_create_txn( &self, - catalog: &str, flow_id: FlowId, flow_value: &FlowInfoValue, ) -> Result<( @@ -182,7 +177,7 @@ impl FlowInfoManager { &mut TxnOpGetResponseSet, ) -> Result>>, )> { - let key = FlowInfoKey::new(catalog.to_string(), flow_id).to_bytes(); + let key = FlowInfoKey::new(flow_id).to_bytes(); let txn = txn_helper::build_put_if_absent_txn(key.clone(), flow_value.try_as_raw_value()?); Ok(( @@ -198,15 +193,14 @@ mod tests { #[test] fn test_key_serialization() { - let flow_info = FlowInfoKey::new("my_catalog".to_string(), 2); - assert_eq!(b"__flow/my_catalog/info/2".to_vec(), flow_info.to_bytes()); + let flow_info = FlowInfoKey::new(2); + assert_eq!(b"__flow/info/2".to_vec(), flow_info.to_bytes()); } #[test] fn test_key_deserialization() { - let bytes = b"__flow/my_catalog/info/2".to_vec(); + let bytes = b"__flow/info/2".to_vec(); let key = FlowInfoKey::from_bytes(&bytes).unwrap(); - assert_eq!(key.catalog(), "my_catalog"); assert_eq!(key.flow_id(), 2); } } diff --git a/src/common/meta/src/key/flow/flow_name.rs b/src/common/meta/src/key/flow/flow_name.rs index dbb6d81c35b1..24608498c4f1 100644 --- a/src/common/meta/src/key/flow/flow_name.rs +++ b/src/common/meta/src/key/flow/flow_name.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use api::v1::flow::flow_server::Flow; use lazy_static::lazy_static; use regex::Regex; use serde::{Deserialize, Serialize}; @@ -19,66 +20,74 @@ use snafu::OptionExt; use crate::error::{self, Result}; use crate::key::flow::FlowScoped; -use crate::key::scope::{CatalogScoped, MetaKey}; use crate::key::txn_helper::TxnOpGetResponseSet; -use crate::key::{txn_helper, DeserializedValueWithBytes, FlowId, TableMetaValue, NAME_PATTERN}; +use crate::key::{ + txn_helper, DeserializedValueWithBytes, FlowId, MetaKey, TableMetaValue, NAME_PATTERN, +}; use crate::kv_backend::txn::Txn; use crate::kv_backend::KvBackendRef; const FLOW_NAME_KEY_PREFIX: &str = "name"; lazy_static! { - static ref FLOW_NAME_KEY_PATTERN: Regex = - Regex::new(&format!("^{FLOW_NAME_KEY_PREFIX}/({NAME_PATTERN})$")).unwrap(); + static ref FLOW_NAME_KEY_PATTERN: Regex = Regex::new(&format!( + "^{FLOW_NAME_KEY_PREFIX}/({NAME_PATTERN})/({NAME_PATTERN})$" + )) + .unwrap(); } /// The key of mapping {flow_name} to [FlowId]. /// -/// The layout: `__flow/{catalog}/name/{flow_name}`. -pub struct FlowNameKey(FlowScoped>); +/// The layout: `__flow/name/{catalog_name}/{flow_name}`. +pub struct FlowNameKey<'a>(FlowScoped>); -impl FlowNameKey { +impl<'a> FlowNameKey<'a> { /// Returns the [FlowNameKey] - pub fn new(catalog: String, flow_name: String) -> FlowNameKey { - let inner = FlowNameKeyInner::new(flow_name); - FlowNameKey(FlowScoped::new(CatalogScoped::new(catalog, inner))) + pub fn new(catalog: &'a str, flow_name: &'a str) -> FlowNameKey<'a> { + let inner = FlowNameKeyInner::new(catalog, flow_name); + FlowNameKey(FlowScoped::new(inner)) } /// Returns the catalog. pub fn catalog(&self) -> &str { - self.0.catalog() + self.0.catalog_name } /// Return the `flow_name` pub fn flow_name(&self) -> &str { - &self.0.flow_name + self.0.flow_name } } -impl MetaKey for FlowNameKey { +impl<'a> MetaKey<'a, FlowNameKey<'a>> for FlowNameKey<'a> { fn to_bytes(&self) -> Vec { self.0.to_bytes() } - fn from_bytes(bytes: &[u8]) -> Result { - Ok(FlowNameKey( - FlowScoped::>::from_bytes(bytes)?, - )) + fn from_bytes(bytes: &'a [u8]) -> Result> { + Ok(FlowNameKey(FlowScoped::::from_bytes( + bytes, + )?)) } } /// The key of mapping name to [FlowId] #[derive(Debug, Clone, PartialEq, Eq)] -pub struct FlowNameKeyInner { - pub flow_name: String, +pub struct FlowNameKeyInner<'a> { + pub catalog_name: &'a str, + pub flow_name: &'a str, } -impl MetaKey for FlowNameKeyInner { +impl<'a> MetaKey<'a, FlowNameKeyInner<'a>> for FlowNameKeyInner<'_> { fn to_bytes(&self) -> Vec { - format!("{FLOW_NAME_KEY_PREFIX}/{}", self.flow_name).into_bytes() + format!( + "{FLOW_NAME_KEY_PREFIX}/{}/{}", + self.catalog_name, self.flow_name + ) + .into_bytes() } - fn from_bytes(bytes: &[u8]) -> Result { + fn from_bytes(bytes: &'a [u8]) -> Result { let key = std::str::from_utf8(bytes).map_err(|e| { error::InvalidTableMetadataSnafu { err_msg: format!( @@ -95,15 +104,22 @@ impl MetaKey for FlowNameKeyInner { err_msg: format!("Invalid FlowNameKeyInner '{key}'"), })?; // Safety: pass the regex check above - let flow_name = captures[1].to_string(); - Ok(FlowNameKeyInner { flow_name }) + let catalog_name = captures.get(1).unwrap().as_str(); + let flow_name = captures.get(2).unwrap().as_str(); + Ok(FlowNameKeyInner { + catalog_name, + flow_name, + }) } } -impl FlowNameKeyInner { +impl<'a> FlowNameKeyInner<'a> { /// Returns a [FlowNameKeyInner]. - pub fn new(flow_name: String) -> Self { - Self { flow_name } + pub fn new(catalog_name: &'a str, flow_name: &'a str) -> Self { + Self { + catalog_name, + flow_name, + } } } @@ -138,7 +154,7 @@ impl FlowNameManager { /// Returns the [FlowNameValue] of specified `catalog.flow`. pub async fn get(&self, catalog: &str, flow: &str) -> Result> { - let key = FlowNameKey::new(catalog.to_string(), flow.to_string()); + let key = FlowNameKey::new(catalog, flow); let raw_key = key.to_bytes(); self.kv_backend .get(&raw_key) @@ -149,18 +165,18 @@ impl FlowNameManager { /// Returns true if the `flow` exists. pub async fn exists(&self, catalog: &str, flow: &str) -> Result { - let key = FlowNameKey::new(catalog.to_string(), flow.to_string()); + let key = FlowNameKey::new(catalog, flow); let raw_key = key.to_bytes(); self.kv_backend.exists(&raw_key).await } /// Builds a create flow name transaction. - /// It's expected that the `__flow/{catalog}/name/{flow_name}` wasn't occupied. + /// It's expected that the `__flow/name/{catalog}/{flow_name}` wasn't occupied. /// Otherwise, the transaction will retrieve existing value. pub fn build_create_txn( &self, - catalog: &str, - name: &str, + catalog_name: &str, + flow_name: &str, flow_id: FlowId, ) -> Result<( Txn, @@ -168,7 +184,7 @@ impl FlowNameManager { &mut TxnOpGetResponseSet, ) -> Result>>, )> { - let key = FlowNameKey::new(catalog.to_string(), name.to_string()); + let key = FlowNameKey::new(catalog_name, flow_name); let raw_key = key.to_bytes(); let flow_flow_name_value = FlowNameValue::new(flow_id); let txn = txn_helper::build_put_if_absent_txn( @@ -189,13 +205,13 @@ mod tests { #[test] fn test_key_serialization() { - let key = FlowNameKey::new("my_catalog".to_string(), "my_task".to_string()); - assert_eq!(b"__flow/my_catalog/name/my_task".to_vec(), key.to_bytes(),); + let key = FlowNameKey::new("my_catalog", "my_task"); + assert_eq!(b"__flow/name/my_catalog/my_task".to_vec(), key.to_bytes(),); } #[test] fn test_key_deserialization() { - let bytes = b"__flow/my_catalog/name/my_task".to_vec(); + let bytes = b"__flow/name/my_catalog/my_task".to_vec(); let key = FlowNameKey::from_bytes(&bytes).unwrap(); assert_eq!(key.catalog(), "my_catalog"); assert_eq!(key.flow_name(), "my_task"); diff --git a/src/common/meta/src/key/flow/flownode_flow.rs b/src/common/meta/src/key/flow/flownode_flow.rs index 360b96b0f56f..cffafaa870c7 100644 --- a/src/common/meta/src/key/flow/flownode_flow.rs +++ b/src/common/meta/src/key/flow/flownode_flow.rs @@ -22,8 +22,7 @@ use snafu::OptionExt; use crate::error::{self, Result}; use crate::key::flow::FlowScoped; -use crate::key::scope::{BytesAdapter, CatalogScoped, MetaKey}; -use crate::key::{FlowId, FlowPartitionId}; +use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetaKey}; use crate::kv_backend::txn::{Txn, TxnOp}; use crate::kv_backend::KvBackendRef; use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; @@ -42,46 +41,38 @@ const FLOWNODE_FLOW_KEY_PREFIX: &str = "flownode"; /// The key of mapping [FlownodeId] to [FlowId]. /// -/// The layout `__flow/{catalog}/flownode/{flownode_id}/{flow_id}/{partition_id}` -pub struct FlownodeFlowKey(FlowScoped>); +/// The layout `__flow/flownode/{flownode_id}/{flow_id}/{partition_id}` +pub struct FlownodeFlowKey(FlowScoped); -impl MetaKey for FlownodeFlowKey { +impl<'a> MetaKey<'a, FlownodeFlowKey> for FlownodeFlowKey { fn to_bytes(&self) -> Vec { self.0.to_bytes() } - fn from_bytes(bytes: &[u8]) -> Result { - Ok(FlownodeFlowKey(FlowScoped::< - CatalogScoped, - >::from_bytes(bytes)?)) + fn from_bytes(bytes: &'a [u8]) -> Result { + Ok(FlownodeFlowKey( + FlowScoped::::from_bytes(bytes)?, + )) } } impl FlownodeFlowKey { /// Returns a new [FlownodeFlowKey]. pub fn new( - catalog: String, flownode_id: FlownodeId, flow_id: FlowId, partition_id: FlowPartitionId, ) -> FlownodeFlowKey { let inner = FlownodeFlowKeyInner::new(flownode_id, flow_id, partition_id); - FlownodeFlowKey(FlowScoped::new(CatalogScoped::new(catalog, inner))) + FlownodeFlowKey(FlowScoped::new(inner)) } /// The prefix used to retrieve all [FlownodeFlowKey]s with the specified `flownode_id`. - pub fn range_start_key(catalog: String, flownode_id: FlownodeId) -> Vec { - let catalog_scoped_key = CatalogScoped::new( - catalog, - BytesAdapter::from(FlownodeFlowKeyInner::range_start_key(flownode_id).into_bytes()), - ); - - FlowScoped::new(catalog_scoped_key).to_bytes() - } + pub fn range_start_key(flownode_id: FlownodeId) -> Vec { + let inner = + BytesAdapter::from(FlownodeFlowKeyInner::range_start_key(flownode_id).into_bytes()); - /// Returns the catalog. - pub fn catalog(&self) -> &str { - self.0.catalog() + FlowScoped::new(inner).to_bytes() } /// Returns the [FlowId]. @@ -127,7 +118,7 @@ impl FlownodeFlowKeyInner { } } -impl MetaKey for FlownodeFlowKeyInner { +impl<'a> MetaKey<'a, FlownodeFlowKeyInner> for FlownodeFlowKeyInner { fn to_bytes(&self) -> Vec { format!( "{FLOWNODE_FLOW_KEY_PREFIX}/{}/{}/{}", @@ -136,7 +127,7 @@ impl MetaKey for FlownodeFlowKeyInner { .into_bytes() } - fn from_bytes(bytes: &[u8]) -> Result { + fn from_bytes(bytes: &'a [u8]) -> Result { let key = std::str::from_utf8(bytes).map_err(|e| { error::InvalidTableMetadataSnafu { err_msg: format!( @@ -184,10 +175,9 @@ impl FlownodeFlowManager { /// Retrieves all [FlowId] and [FlowPartitionId]s of the specified `flownode_id`. pub fn flows( &self, - catalog: &str, flownode_id: FlownodeId, ) -> BoxStream<'static, Result<(FlowId, FlowPartitionId)>> { - let start_key = FlownodeFlowKey::range_start_key(catalog.to_string(), flownode_id); + let start_key = FlownodeFlowKey::range_start_key(flownode_id); let req = RangeRequest::new().with_prefix(start_key); let stream = PaginationStream::new( @@ -205,16 +195,13 @@ impl FlownodeFlowManager { /// Puts `__flownode_flow/{flownode_id}/{flow_id}/{partition_id}` keys. pub(crate) fn build_create_txn>( &self, - catalog: &str, flow_id: FlowId, flownode_ids: I, ) -> Txn { let txns = flownode_ids .into_iter() .map(|(partition_id, flownode_id)| { - let key = - FlownodeFlowKey::new(catalog.to_string(), flownode_id, flow_id, partition_id) - .to_bytes(); + let key = FlownodeFlowKey::new(flownode_id, flow_id, partition_id).to_bytes(); TxnOp::Put(key, vec![]) }) .collect::>(); @@ -226,24 +213,20 @@ impl FlownodeFlowManager { #[cfg(test)] mod tests { use crate::key::flow::flownode_flow::FlownodeFlowKey; - use crate::key::scope::MetaKey; + use crate::key::MetaKey; #[test] fn test_key_serialization() { - let flownode_flow = FlownodeFlowKey::new("my_catalog".to_string(), 1, 2, 0); - assert_eq!( - b"__flow/my_catalog/flownode/1/2/0".to_vec(), - flownode_flow.to_bytes() - ); - let prefix = FlownodeFlowKey::range_start_key("my_catalog".to_string(), 1); - assert_eq!(b"__flow/my_catalog/flownode/1/".to_vec(), prefix); + let flownode_flow = FlownodeFlowKey::new(1, 2, 0); + assert_eq!(b"__flow/flownode/1/2/0".to_vec(), flownode_flow.to_bytes()); + let prefix = FlownodeFlowKey::range_start_key(1); + assert_eq!(b"__flow/flownode/1/".to_vec(), prefix); } #[test] fn test_key_deserialization() { - let bytes = b"__flow/my_catalog/flownode/1/2/0".to_vec(); + let bytes = b"__flow/flownode/1/2/0".to_vec(); let key = FlownodeFlowKey::from_bytes(&bytes).unwrap(); - assert_eq!(key.catalog(), "my_catalog"); assert_eq!(key.flownode_id(), 1); assert_eq!(key.flow_id(), 2); assert_eq!(key.partition_id(), 0); diff --git a/src/common/meta/src/key/flow/table_flow.rs b/src/common/meta/src/key/flow/table_flow.rs index d3cabd86f276..9fdde5c95c8c 100644 --- a/src/common/meta/src/key/flow/table_flow.rs +++ b/src/common/meta/src/key/flow/table_flow.rs @@ -22,8 +22,7 @@ use table::metadata::TableId; use crate::error::{self, Result}; use crate::key::flow::FlowScoped; -use crate::key::scope::{BytesAdapter, CatalogScoped, MetaKey}; -use crate::key::{FlowId, FlowPartitionId}; +use crate::key::{BytesAdapter, FlowId, FlowPartitionId, MetaKey}; use crate::kv_backend::txn::{Txn, TxnOp}; use crate::kv_backend::KvBackendRef; use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; @@ -51,48 +50,39 @@ struct TableFlowKeyInner { /// The key of mapping [TableId] to [FlownodeId] and [FlowId]. /// -/// The layout: `__flow/{catalog}/table/{table_id}/{flownode_id}/{flow_id}/{partition_id}`. +/// The layout: `__flow/source_table/{table_id}/{flownode_id}/{flow_id}/{partition_id}`. #[derive(Debug, PartialEq)] -pub struct TableFlowKey(FlowScoped>); +pub struct TableFlowKey(FlowScoped); -impl MetaKey for TableFlowKey { +impl<'a> MetaKey<'a, TableFlowKey> for TableFlowKey { fn to_bytes(&self) -> Vec { self.0.to_bytes() } - fn from_bytes(bytes: &[u8]) -> Result { - Ok(TableFlowKey( - FlowScoped::>::from_bytes(bytes)?, - )) + fn from_bytes(bytes: &'a [u8]) -> Result { + Ok(TableFlowKey(FlowScoped::::from_bytes( + bytes, + )?)) } } impl TableFlowKey { /// Returns a new [TableFlowKey]. pub fn new( - catalog: String, table_id: TableId, flownode_id: FlownodeId, flow_id: FlowId, partition_id: FlowPartitionId, ) -> TableFlowKey { let inner = TableFlowKeyInner::new(table_id, flownode_id, flow_id, partition_id); - TableFlowKey(FlowScoped::new(CatalogScoped::new(catalog, inner))) + TableFlowKey(FlowScoped::new(inner)) } /// The prefix used to retrieve all [TableFlowKey]s with the specified `table_id`. - pub fn range_start_key(catalog: String, table_id: TableId) -> Vec { - let catalog_scoped_key = CatalogScoped::new( - catalog, - BytesAdapter::from(TableFlowKeyInner::range_start_key(table_id).into_bytes()), - ); - - FlowScoped::new(catalog_scoped_key).to_bytes() - } + pub fn range_start_key(table_id: TableId) -> Vec { + let inner = BytesAdapter::from(TableFlowKeyInner::range_start_key(table_id).into_bytes()); - /// Returns the catalog. - pub fn catalog(&self) -> &str { - self.0.catalog() + FlowScoped::new(inner).to_bytes() } /// Returns the source [TableId]. @@ -142,7 +132,7 @@ impl TableFlowKeyInner { } } -impl MetaKey for TableFlowKeyInner { +impl<'a> MetaKey<'a, TableFlowKeyInner> for TableFlowKeyInner { fn to_bytes(&self) -> Vec { format!( "{TABLE_FLOW_KEY_PREFIX}/{}/{}/{}/{}", @@ -151,7 +141,7 @@ impl MetaKey for TableFlowKeyInner { .into_bytes() } - fn from_bytes(bytes: &[u8]) -> Result { + fn from_bytes(bytes: &'a [u8]) -> Result { let key = std::str::from_utf8(bytes).map_err(|e| { error::InvalidTableMetadataSnafu { err_msg: format!( @@ -198,12 +188,8 @@ impl TableFlowManager { } /// Retrieves all [TableFlowKey]s of the specified `table_id`. - pub fn nodes( - &self, - catalog: &str, - table_id: TableId, - ) -> BoxStream<'static, Result> { - let start_key = TableFlowKey::range_start_key(catalog.to_string(), table_id); + pub fn nodes(&self, table_id: TableId) -> BoxStream<'static, Result> { + let start_key = TableFlowKey::range_start_key(table_id); let req = RangeRequest::new().with_prefix(start_key); let stream = PaginationStream::new( self.kv_backend.clone(), @@ -217,10 +203,9 @@ impl TableFlowManager { /// Builds a create table flow transaction. /// - /// Puts `__table_flow/{table_id}/{node_id}/{partition_id}` keys. + /// Puts `__flow/source_table/{table_id}/{node_id}/{partition_id}` keys. pub fn build_create_txn>( &self, - catalog: &str, flow_id: FlowId, flownode_ids: I, source_table_ids: &[TableId], @@ -230,14 +215,7 @@ impl TableFlowManager { .flat_map(|(partition_id, flownode_id)| { source_table_ids.iter().map(move |table_id| { TxnOp::Put( - TableFlowKey::new( - catalog.to_string(), - *table_id, - flownode_id, - flow_id, - partition_id, - ) - .to_bytes(), + TableFlowKey::new(*table_id, flownode_id, flow_id, partition_id).to_bytes(), vec![], ) }) @@ -254,20 +232,19 @@ mod tests { #[test] fn test_key_serialization() { - let table_flow_key = TableFlowKey::new("my_catalog".to_string(), 1024, 1, 2, 0); + let table_flow_key = TableFlowKey::new(1024, 1, 2, 0); assert_eq!( - b"__flow/my_catalog/source_table/1024/1/2/0".to_vec(), + b"__flow/source_table/1024/1/2/0".to_vec(), table_flow_key.to_bytes(), ); - let prefix = TableFlowKey::range_start_key("my_catalog".to_string(), 1024); - assert_eq!(b"__flow/my_catalog/source_table/1024/".to_vec(), prefix); + let prefix = TableFlowKey::range_start_key(1024); + assert_eq!(b"__flow/source_table/1024/".to_vec(), prefix); } #[test] fn test_key_deserialization() { - let bytes = b"__flow/my_catalog/source_table/1024/1/2/0".to_vec(); + let bytes = b"__flow/source_table/1024/1/2/0".to_vec(); let key = TableFlowKey::from_bytes(&bytes).unwrap(); - assert_eq!(key.catalog(), "my_catalog"); assert_eq!(key.source_table_id(), 1024); assert_eq!(key.flownode_id(), 1); assert_eq!(key.flow_id(), 2); diff --git a/src/common/meta/src/key/schema_name.rs b/src/common/meta/src/key/schema_name.rs index f56adbaec440..91c4c74bc104 100644 --- a/src/common/meta/src/key/schema_name.rs +++ b/src/common/meta/src/key/schema_name.rs @@ -24,7 +24,7 @@ use serde::{Deserialize, Serialize}; use snafu::{OptionExt, ResultExt}; use crate::error::{self, Error, InvalidTableMetadataSnafu, ParseOptionSnafu, Result}; -use crate::key::{TableMetaKey, SCHEMA_NAME_KEY_PATTERN, SCHEMA_NAME_KEY_PREFIX}; +use crate::key::{MetaKey, SCHEMA_NAME_KEY_PATTERN, SCHEMA_NAME_KEY_PREFIX}; use crate::kv_backend::KvBackendRef; use crate::range_stream::{PaginationStream, DEFAULT_PAGE_SIZE}; use crate::rpc::store::RangeRequest; @@ -32,6 +32,9 @@ use crate::rpc::KeyValue; const OPT_KEY_TTL: &str = "ttl"; +/// The schema name key, indices all schema names belong to the {catalog_name} +/// +/// The layout: `__schema_name/{catalog_name}/{schema_name}`. #[derive(Debug, Clone, Copy, PartialEq)] pub struct SchemaNameKey<'a> { pub catalog: &'a str, @@ -95,13 +98,26 @@ impl Display for SchemaNameKey<'_> { } } -impl TableMetaKey for SchemaNameKey<'_> { - fn as_raw_key(&self) -> Vec { +impl<'a> MetaKey<'a, SchemaNameKey<'a>> for SchemaNameKey<'_> { + fn to_bytes(&self) -> Vec { self.to_string().into_bytes() } + + fn from_bytes(bytes: &'a [u8]) -> Result> { + let key = std::str::from_utf8(bytes).map_err(|e| { + InvalidTableMetadataSnafu { + err_msg: format!( + "SchemaNameKey '{}' is not a valid UTF8 string: {e}", + String::from_utf8_lossy(bytes) + ), + } + .build() + })?; + SchemaNameKey::try_from(key) + } } -/// Decodes `KeyValue` to ({schema},()) +/// Decodes `KeyValue` to {schema} pub fn schema_decoder(kv: KeyValue) -> Result { let str = std::str::from_utf8(&kv.key).context(error::ConvertRawKeySnafu)?; let schema_name = SchemaNameKey::try_from(str)?; @@ -145,7 +161,7 @@ impl SchemaManager { ) -> Result<()> { let _timer = crate::metrics::METRIC_META_CREATE_SCHEMA.start_timer(); - let raw_key = schema.as_raw_key(); + let raw_key = schema.to_bytes(); let raw_value = value.unwrap_or_default().try_as_raw_value()?; if self .kv_backend @@ -159,13 +175,13 @@ impl SchemaManager { } pub async fn exists(&self, schema: SchemaNameKey<'_>) -> Result { - let raw_key = schema.as_raw_key(); + let raw_key = schema.to_bytes(); self.kv_backend.exists(&raw_key).await } pub async fn get(&self, schema: SchemaNameKey<'_>) -> Result> { - let raw_key = schema.as_raw_key(); + let raw_key = schema.to_bytes(); let value = self.kv_backend.get(&raw_key).await?; value .and_then(|v| SchemaNameValue::try_from_raw_value(v.value.as_ref()).transpose()) @@ -174,7 +190,7 @@ impl SchemaManager { /// Deletes a [SchemaNameKey]. pub async fn delete(&self, schema: SchemaNameKey<'_>) -> Result<()> { - let raw_key = schema.as_raw_key(); + let raw_key = schema.to_bytes(); self.kv_backend.delete(&raw_key, false).await?; Ok(()) @@ -222,7 +238,8 @@ mod tests { let key = SchemaNameKey::new("my-catalog", "my-schema"); assert_eq!(key.to_string(), "__schema_name/my-catalog/my-schema"); - let parsed: SchemaNameKey<'_> = "__schema_name/my-catalog/my-schema".try_into().unwrap(); + let parsed = SchemaNameKey::from_bytes(b"__schema_name/my-catalog/my-schema").unwrap(); + assert_eq!(key, parsed); let value = SchemaNameValue { diff --git a/src/common/meta/src/key/scope.rs b/src/common/meta/src/key/scope.rs deleted file mode 100644 index 7f185a81d326..000000000000 --- a/src/common/meta/src/key/scope.rs +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2023 Greptime Team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use std::ops::Deref; - -use snafu::OptionExt; - -use crate::error::{self, Result}; - -/// The delimiter of key. -pub(crate) const DELIMITER: u8 = b'/'; - -/// The key of metadata. -pub trait MetaKey { - fn to_bytes(&self) -> Vec; - - fn from_bytes(bytes: &[u8]) -> Result; -} - -/// The key of `{catalog}/` scope. -#[derive(Debug, PartialEq)] -pub struct CatalogScoped { - inner: T, - catalog: String, -} - -impl Deref for CatalogScoped { - type Target = T; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -impl CatalogScoped { - /// Returns a new [CatalogScoped] key. - pub fn new(catalog: String, inner: T) -> CatalogScoped { - CatalogScoped { inner, catalog } - } - - /// Returns the `catalog`. - pub fn catalog(&self) -> &str { - &self.catalog - } -} - -impl> MetaKey> for CatalogScoped { - fn to_bytes(&self) -> Vec { - let prefix = self.catalog.as_bytes(); - let inner = self.inner.to_bytes(); - let mut bytes = Vec::with_capacity(prefix.len() + inner.len() + 1); - bytes.extend(prefix); - bytes.push(DELIMITER); - bytes.extend(inner); - bytes - } - - fn from_bytes(bytes: &[u8]) -> Result> { - let pos = bytes - .iter() - .position(|c| *c == DELIMITER) - .with_context(|| error::DelimiterNotFoundSnafu { - key: String::from_utf8_lossy(bytes), - })?; - let catalog = String::from_utf8_lossy(&bytes[0..pos]).to_string(); - // Safety: We don't need the `DELIMITER` char. - let inner = T::from_bytes(&bytes[pos + 1..])?; - Ok(CatalogScoped { inner, catalog }) - } -} - -#[derive(Debug, Clone, PartialEq)] -pub struct BytesAdapter(Vec); - -impl From> for BytesAdapter { - fn from(value: Vec) -> Self { - Self(value) - } -} - -impl MetaKey for BytesAdapter { - fn to_bytes(&self) -> Vec { - self.0.clone() - } - - fn from_bytes(bytes: &[u8]) -> Result { - Ok(BytesAdapter(bytes.to_vec())) - } -} - -#[cfg(test)] -mod tests { - use std::assert_matches::assert_matches; - - use super::*; - use crate::error::Result; - - #[derive(Debug)] - struct MockKey { - inner: Vec, - } - - impl MetaKey for MockKey { - fn to_bytes(&self) -> Vec { - self.inner.clone() - } - - fn from_bytes(bytes: &[u8]) -> Result { - Ok(MockKey { - inner: bytes.to_vec(), - }) - } - } - - #[test] - fn test_catalog_scoped_from_bytes() { - let key = "test_catalog_name/key"; - let scoped_key = CatalogScoped::::from_bytes(key.as_bytes()).unwrap(); - assert_eq!(scoped_key.catalog, "test_catalog_name"); - assert_eq!(scoped_key.inner.inner, b"key".to_vec()); - assert_eq!(key.as_bytes(), &scoped_key.to_bytes()); - } - - #[test] - fn test_catalog_scoped_from_bytes_delimiter_not_found() { - let key = "test_catalog_name"; - let err = CatalogScoped::::from_bytes(key.as_bytes()).unwrap_err(); - assert_matches!(err, error::Error::DelimiterNotFound { .. }); - } - - #[test] - fn test_catalog_scoped_to_bytes() { - let scoped_key = CatalogScoped { - inner: MockKey { - inner: b"hi".to_vec(), - }, - catalog: "test_catalog".to_string(), - }; - assert_eq!(b"test_catalog/hi".to_vec(), scoped_key.to_bytes()); - } -} diff --git a/src/common/meta/src/key/table_info.rs b/src/common/meta/src/key/table_info.rs index 798dbf1beb68..7283281c14ab 100644 --- a/src/common/meta/src/key/table_info.rs +++ b/src/common/meta/src/key/table_info.rs @@ -13,34 +13,68 @@ // limitations under the License. use std::collections::HashMap; +use std::fmt::Display; use serde::{Deserialize, Serialize}; +use snafu::OptionExt; use table::metadata::{RawTableInfo, TableId}; use table::table_reference::TableReference; -use super::txn_helper::TxnOpGetResponseSet; -use crate::error::Result; +use super::TABLE_INFO_KEY_PATTERN; +use crate::error::{InvalidTableMetadataSnafu, Result}; +use crate::key::txn_helper::TxnOpGetResponseSet; use crate::key::{ - txn_helper, DeserializedValueWithBytes, TableMetaKey, TableMetaValue, TABLE_INFO_KEY_PREFIX, + txn_helper, DeserializedValueWithBytes, MetaKey, TableMetaValue, TABLE_INFO_KEY_PREFIX, }; use crate::kv_backend::txn::Txn; use crate::kv_backend::KvBackendRef; use crate::rpc::store::BatchGetRequest; use crate::table_name::TableName; +/// The key stores the metadata of the table. +/// +/// The layout: `__table_info/{table_id}`. +#[derive(Debug, PartialEq)] pub struct TableInfoKey { table_id: TableId, } impl TableInfoKey { + /// Returns a new [TableInfoKey]. pub fn new(table_id: TableId) -> Self { Self { table_id } } } -impl TableMetaKey for TableInfoKey { - fn as_raw_key(&self) -> Vec { - format!("{}/{}", TABLE_INFO_KEY_PREFIX, self.table_id).into_bytes() +impl Display for TableInfoKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}/{}", TABLE_INFO_KEY_PREFIX, self.table_id) + } +} + +impl<'a> MetaKey<'a, TableInfoKey> for TableInfoKey { + fn to_bytes(&self) -> Vec { + self.to_string().into_bytes() + } + + fn from_bytes(bytes: &[u8]) -> Result { + let key = std::str::from_utf8(bytes).map_err(|e| { + InvalidTableMetadataSnafu { + err_msg: format!( + "TableInfoKey '{}' is not a valid UTF8 string: {e}", + String::from_utf8_lossy(bytes) + ), + } + .build() + })?; + let captures = TABLE_INFO_KEY_PATTERN + .captures(key) + .context(InvalidTableMetadataSnafu { + err_msg: format!("Invalid TableInfoKey '{key}'"), + })?; + // Safety: pass the regex check above + let table_id = captures[1].parse::().unwrap(); + Ok(TableInfoKey { table_id }) } } @@ -115,7 +149,7 @@ impl TableInfoManager { ) -> Result>>, )> { let key = TableInfoKey::new(table_id); - let raw_key = key.as_raw_key(); + let raw_key = key.to_bytes(); let txn = txn_helper::build_put_if_absent_txn( raw_key.clone(), @@ -142,7 +176,7 @@ impl TableInfoManager { ) -> Result>>, )> { let key = TableInfoKey::new(table_id); - let raw_key = key.as_raw_key(); + let raw_key = key.to_bytes(); let raw_value = current_table_info_value.get_raw_bytes(); let new_raw_value: Vec = new_table_info_value.try_as_raw_value()?; @@ -159,7 +193,7 @@ impl TableInfoManager { table_id: TableId, ) -> Result>> { let key = TableInfoKey::new(table_id); - let raw_key = key.as_raw_key(); + let raw_key = key.to_bytes(); self.kv_backend .get(&raw_key) .await? @@ -173,7 +207,7 @@ impl TableInfoManager { ) -> Result> { let lookup_table = table_ids .iter() - .map(|id| (TableInfoKey::new(*id).as_raw_key(), id)) + .map(|id| (TableInfoKey::new(*id).to_bytes(), id)) .collect::>(); let resp = self @@ -205,7 +239,7 @@ impl TableInfoManager { ) -> Result>> { let lookup_table = table_ids .iter() - .map(|id| (TableInfoKey::new(*id).as_raw_key(), id)) + .map(|id| (TableInfoKey::new(*id).to_bytes(), id)) .collect::>(); let resp = self @@ -248,14 +282,21 @@ mod tests { } #[test] - fn test_key_serde() { + fn test_key_serialization() { let key = TableInfoKey::new(42); - let raw_key = key.as_raw_key(); + let raw_key = key.to_bytes(); assert_eq!(raw_key, b"__table_info/42"); } #[test] - fn test_value_serde() { + fn test_key_deserialization() { + let expected = TableInfoKey::new(42); + let key = TableInfoKey::from_bytes(b"__table_info/42").unwrap(); + assert_eq!(key, expected); + } + + #[test] + fn test_value_serialization() { let value = TableInfoValue { table_info: new_table_info(42), version: 1, diff --git a/src/common/meta/src/key/table_name.rs b/src/common/meta/src/key/table_name.rs index 83e1cb7fb254..2eb30381fcdf 100644 --- a/src/common/meta/src/key/table_name.rs +++ b/src/common/meta/src/key/table_name.rs @@ -13,6 +13,7 @@ // limitations under the License. use std::collections::HashMap; +use std::fmt::Display; use std::sync::Arc; use futures_util::stream::BoxStream; @@ -20,9 +21,8 @@ use serde::{Deserialize, Serialize}; use snafu::OptionExt; use table::metadata::TableId; -use super::{TableMetaValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX}; +use super::{MetaKey, TableMetaValue, TABLE_NAME_KEY_PATTERN, TABLE_NAME_KEY_PREFIX}; use crate::error::{Error, InvalidTableMetadataSnafu, Result}; -use crate::key::TableMetaKey; use crate::kv_backend::memory::MemoryKvBackend; use crate::kv_backend::txn::{Txn, TxnOp}; use crate::kv_backend::KvBackendRef; @@ -50,45 +50,56 @@ impl<'a> TableNameKey<'a> { pub fn prefix_to_table(catalog: &str, schema: &str) -> String { format!("{}/{}/{}", TABLE_NAME_KEY_PREFIX, catalog, schema) } +} - fn strip_table_name(raw_key: &[u8]) -> Result { - let key = String::from_utf8(raw_key.to_vec()).map_err(|e| { +impl Display for TableNameKey<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}/{}", + Self::prefix_to_table(self.catalog, self.schema), + self.table + ) + } +} + +impl<'a> MetaKey<'a, TableNameKey<'a>> for TableNameKey<'_> { + fn to_bytes(&self) -> Vec { + self.to_string().into_bytes() + } + + fn from_bytes(bytes: &'a [u8]) -> Result> { + let key = std::str::from_utf8(bytes).map_err(|e| { InvalidTableMetadataSnafu { err_msg: format!( "TableNameKey '{}' is not a valid UTF8 string: {e}", - String::from_utf8_lossy(raw_key) + String::from_utf8_lossy(bytes) ), } .build() })?; - let captures = - TABLE_NAME_KEY_PATTERN - .captures(&key) - .context(InvalidTableMetadataSnafu { - err_msg: format!("Invalid TableNameKey '{key}'"), - })?; - // Safety: pass the regex check above - Ok(captures[3].to_string()) - } -} - -impl TableMetaKey for TableNameKey<'_> { - fn as_raw_key(&self) -> Vec { - format!( - "{}/{}", - Self::prefix_to_table(self.catalog, self.schema), - self.table - ) - .into_bytes() + let captures = TABLE_NAME_KEY_PATTERN + .captures(key) + .context(InvalidTableMetadataSnafu { + err_msg: format!("Invalid TableNameKey '{key}'"), + })?; + let catalog = captures.get(1).unwrap().as_str(); + let schema = captures.get(2).unwrap().as_str(); + let table = captures.get(3).unwrap().as_str(); + Ok(TableNameKey { + catalog, + schema, + table, + }) } } /// Decodes `KeyValue` to ({table_name}, TableNameValue) pub fn table_decoder(kv: KeyValue) -> Result<(String, TableNameValue)> { - let table_name = TableNameKey::strip_table_name(kv.key())?; + let table_name_key = TableNameKey::from_bytes(&kv.key)?; let table_name_value = TableNameValue::try_from_raw_value(&kv.value)?; - Ok((table_name, table_name_value)) + Ok((table_name_key.table.to_string(), table_name_value)) } impl<'a> From<&'a TableName> for TableNameKey<'a> { @@ -166,7 +177,7 @@ impl TableNameManager { key: &TableNameKey<'_>, table_id: TableId, ) -> Result { - let raw_key = key.as_raw_key(); + let raw_key = key.to_bytes(); let value = TableNameValue::new(table_id); let raw_value = value.try_as_raw_value()?; @@ -182,8 +193,8 @@ impl TableNameManager { new_key: &TableNameKey<'_>, table_id: TableId, ) -> Result { - let raw_key = key.as_raw_key(); - let new_raw_key = new_key.as_raw_key(); + let raw_key = key.to_bytes(); + let new_raw_key = new_key.to_bytes(); let value = TableNameValue::new(table_id); let raw_value = value.try_as_raw_value()?; @@ -195,7 +206,7 @@ impl TableNameManager { } pub async fn get(&self, key: TableNameKey<'_>) -> Result> { - let raw_key = key.as_raw_key(); + let raw_key = key.to_bytes(); self.kv_backend .get(&raw_key) .await? @@ -209,7 +220,7 @@ impl TableNameManager { ) -> Result>> { let raw_keys = keys .into_iter() - .map(|key| key.as_raw_key()) + .map(|key| key.to_bytes()) .collect::>(); let req = BatchGetRequest::new().with_keys(raw_keys.clone()); let res = self.kv_backend.batch_get(req).await?; @@ -229,7 +240,7 @@ impl TableNameManager { } pub async fn exists(&self, key: TableNameKey<'_>) -> Result { - let raw_key = key.as_raw_key(); + let raw_key = key.to_bytes(); self.kv_backend.exists(&raw_key).await } @@ -259,8 +270,8 @@ mod tests { #[test] fn test_strip_table_name() { - fn test_err(raw_key: &[u8]) { - assert!(TableNameKey::strip_table_name(raw_key).is_err()); + fn test_err(bytes: &[u8]) { + assert!(TableNameKey::from_bytes(bytes).is_err()); } test_err(b""); @@ -277,10 +288,11 @@ mod tests { fn test_ok(table_name: &str) { assert_eq!( table_name, - TableNameKey::strip_table_name( + TableNameKey::from_bytes( format!("__table_name/my_catalog/my_schema/{}", table_name).as_bytes() ) .unwrap() + .table ); } test_ok("my_table"); @@ -291,13 +303,18 @@ mod tests { } #[test] - fn test_serde() { + fn test_serialization() { let key = TableNameKey::new("my_catalog", "my_schema", "my_table"); - let raw_key = key.as_raw_key(); + let raw_key = key.to_bytes(); assert_eq!( b"__table_name/my_catalog/my_schema/my_table", raw_key.as_slice() ); + let table_name_key = + TableNameKey::from_bytes(b"__table_name/my_catalog/my_schema/my_table").unwrap(); + assert_eq!(table_name_key.catalog, "my_catalog"); + assert_eq!(table_name_key.schema, "my_schema"); + assert_eq!(table_name_key.table, "my_table"); let value = TableNameValue::new(1); let literal = br#"{"table_id":1}"#; diff --git a/src/common/meta/src/key/table_region.rs b/src/common/meta/src/key/table_region.rs index e51e1a547194..4ccc99ba513d 100644 --- a/src/common/meta/src/key/table_region.rs +++ b/src/common/meta/src/key/table_region.rs @@ -13,16 +13,18 @@ // limitations under the License. use std::collections::BTreeMap; +use std::fmt::Display; +use lazy_static::lazy_static; +use regex::Regex; use serde::{Deserialize, Serialize}; -use snafu::ResultExt; +use snafu::{OptionExt, ResultExt}; use store_api::storage::RegionNumber; use table::metadata::TableId; -use super::TABLE_REGION_KEY_PREFIX; -use crate::error::{Result, SerdeJsonSnafu}; -use crate::key::TableMetaKey; -use crate::{impl_table_meta_key, impl_table_meta_value, DatanodeId}; +use super::{MetaKey, TABLE_REGION_KEY_PREFIX}; +use crate::error::{InvalidTableMetadataSnafu, Result, SerdeJsonSnafu}; +use crate::{impl_table_meta_value, DatanodeId}; pub type RegionDistribution = BTreeMap>; @@ -30,23 +32,54 @@ pub type RegionDistribution = BTreeMap>; since = "0.4.0", note = "Please use the TableRouteManager's get_region_distribution method instead" )] +#[derive(Debug, PartialEq)] pub struct TableRegionKey { table_id: TableId, } +lazy_static! { + static ref TABLE_REGION_KEY_PATTERN: Regex = + Regex::new(&format!("^{TABLE_REGION_KEY_PREFIX}/([0-9]+)$")).unwrap(); +} + impl TableRegionKey { pub fn new(table_id: TableId) -> Self { Self { table_id } } } -impl TableMetaKey for TableRegionKey { - fn as_raw_key(&self) -> Vec { - format!("{}/{}", TABLE_REGION_KEY_PREFIX, self.table_id).into_bytes() +impl Display for TableRegionKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}/{}", TABLE_REGION_KEY_PREFIX, self.table_id) } } -impl_table_meta_key! {TableRegionKey} +impl<'a> MetaKey<'a, TableRegionKey> for TableRegionKey { + fn to_bytes(&self) -> Vec { + self.to_string().into_bytes() + } + + fn from_bytes(bytes: &'a [u8]) -> Result { + let key = std::str::from_utf8(bytes).map_err(|e| { + InvalidTableMetadataSnafu { + err_msg: format!( + "TableRegionKey '{}' is not a valid UTF8 string: {e}", + String::from_utf8_lossy(bytes) + ), + } + .build() + })?; + let captures = + TABLE_REGION_KEY_PATTERN + .captures(key) + .context(InvalidTableMetadataSnafu { + err_msg: format!("Invalid TableRegionKey '{key}'"), + })?; + // Safety: pass the regex check above + let table_id = captures[1].parse::().unwrap(); + Ok(TableRegionKey { table_id }) + } +} #[deprecated( since = "0.4.0", @@ -75,10 +108,12 @@ mod tests { use crate::key::TableMetaValue; #[test] - fn test_serde() { - let key = TableRegionKey::new(1); - let raw_key = key.as_raw_key(); - assert_eq!(raw_key, b"__table_region/1"); + fn test_serialization() { + let key = TableRegionKey::new(24); + let raw_key = key.to_bytes(); + assert_eq!(raw_key, b"__table_region/24"); + let deserialized = TableRegionKey::from_bytes(b"__table_region/24").unwrap(); + assert_eq!(key, deserialized); let value = TableRegionValue { region_distribution: RegionDistribution::from([(1, vec![1, 2, 3]), (2, vec![4, 5, 6])]), diff --git a/src/common/meta/src/key/table_route.rs b/src/common/meta/src/key/table_route.rs index a6745f8db29b..eca8d702ef09 100644 --- a/src/common/meta/src/key/table_route.rs +++ b/src/common/meta/src/key/table_route.rs @@ -21,19 +21,23 @@ use store_api::storage::{RegionId, RegionNumber}; use table::metadata::TableId; use crate::error::{ - self, MetadataCorruptionSnafu, Result, SerdeJsonSnafu, TableRouteNotFoundSnafu, - UnexpectedLogicalRouteTableSnafu, + self, InvalidTableMetadataSnafu, MetadataCorruptionSnafu, Result, SerdeJsonSnafu, + TableRouteNotFoundSnafu, UnexpectedLogicalRouteTableSnafu, }; use crate::key::txn_helper::TxnOpGetResponseSet; use crate::key::{ - txn_helper, DeserializedValueWithBytes, RegionDistribution, TableMetaKey, TableMetaValue, - TABLE_ROUTE_PREFIX, + txn_helper, DeserializedValueWithBytes, MetaKey, RegionDistribution, TableMetaValue, + TABLE_ROUTE_KEY_PATTERN, TABLE_ROUTE_PREFIX, }; use crate::kv_backend::txn::Txn; use crate::kv_backend::KvBackendRef; use crate::rpc::router::{region_distribution, RegionRoute}; use crate::rpc::store::BatchGetRequest; +/// The key stores table routes +/// +/// The layout: `__table_route/{table_id}`. +#[derive(Debug, PartialEq)] pub struct TableRouteKey { pub table_id: TableId, } @@ -239,10 +243,31 @@ impl LogicalTableRouteValue { } } -impl TableMetaKey for TableRouteKey { - fn as_raw_key(&self) -> Vec { +impl<'a> MetaKey<'a, TableRouteKey> for TableRouteKey { + fn to_bytes(&self) -> Vec { self.to_string().into_bytes() } + + fn from_bytes(bytes: &[u8]) -> Result { + let key = std::str::from_utf8(bytes).map_err(|e| { + InvalidTableMetadataSnafu { + err_msg: format!( + "TableRouteKey '{}' is not a valid UTF8 string: {e}", + String::from_utf8_lossy(bytes) + ), + } + .build() + })?; + let captures = + TABLE_ROUTE_KEY_PATTERN + .captures(key) + .context(InvalidTableMetadataSnafu { + err_msg: format!("Invalid TableRouteKey '{key}'"), + })?; + // Safety: pass the regex check above + let table_id = captures[1].parse::().unwrap(); + Ok(TableRouteKey { table_id }) + } } impl Display for TableRouteKey { @@ -462,7 +487,7 @@ impl TableRouteStorage { ) -> Result>>, )> { let key = TableRouteKey::new(table_id); - let raw_key = key.as_raw_key(); + let raw_key = key.to_bytes(); let txn = txn_helper::build_put_if_absent_txn( raw_key.clone(), @@ -490,7 +515,7 @@ impl TableRouteStorage { ) -> Result>>, )> { let key = TableRouteKey::new(table_id); - let raw_key = key.as_raw_key(); + let raw_key = key.to_bytes(); let raw_value = current_table_route_value.get_raw_bytes(); let new_raw_value: Vec = new_table_route_value.try_as_raw_value()?; @@ -506,7 +531,7 @@ impl TableRouteStorage { pub async fn get(&self, table_id: TableId) -> Result> { let key = TableRouteKey::new(table_id); self.kv_backend - .get(&key.as_raw_key()) + .get(&key.to_bytes()) .await? .map(|kv| TableRouteValue::try_from_raw_value(&kv.value)) .transpose() @@ -519,7 +544,7 @@ impl TableRouteStorage { ) -> Result>> { let key = TableRouteKey::new(table_id); self.kv_backend - .get(&key.as_raw_key()) + .get(&key.to_bytes()) .await? .map(|kv| DeserializedValueWithBytes::from_inner_slice(&kv.value)) .transpose() @@ -560,7 +585,7 @@ impl TableRouteStorage { pub async fn batch_get(&self, table_ids: &[TableId]) -> Result>> { let keys = table_ids .iter() - .map(|id| TableRouteKey::new(*id).as_raw_key()) + .map(|id| TableRouteKey::new(*id).to_bytes()) .collect::>(); let resp = self .kv_backend @@ -604,6 +629,20 @@ mod tests { ); } + #[test] + fn test_key_serialization() { + let key = TableRouteKey::new(42); + let raw_key = key.to_bytes(); + assert_eq!(raw_key, b"__table_route/42"); + } + + #[test] + fn test_key_deserialization() { + let expected = TableRouteKey::new(42); + let key = TableRouteKey::from_bytes(b"__table_route/42").unwrap(); + assert_eq!(key, expected); + } + #[tokio::test] async fn test_table_route_storage_get_raw_empty() { let kv = Arc::new(MemoryKvBackend::default()); diff --git a/src/common/query/src/lib.rs b/src/common/query/src/lib.rs index 3686cff83699..09d4d411bbfc 100644 --- a/src/common/query/src/lib.rs +++ b/src/common/query/src/lib.rs @@ -119,6 +119,7 @@ impl OutputMeta { } pub use datafusion::physical_plan::ExecutionPlan as DfPhysicalPlan; +pub type DfPhysicalPlanRef = Arc; #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Visit, VisitMut)] pub enum AddColumnLocation { diff --git a/src/common/test-util/src/recordbatch.rs b/src/common/test-util/src/recordbatch.rs index 64a6262a08f3..47c949d40715 100644 --- a/src/common/test-util/src/recordbatch.rs +++ b/src/common/test-util/src/recordbatch.rs @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use client::Database; use common_query::OutputData; use common_recordbatch::util; @@ -21,22 +20,6 @@ pub enum ExpectedOutput<'a> { QueryResult(&'a str), } -pub async fn execute_and_check_output(db: &Database, sql: &str, expected: ExpectedOutput<'_>) { - let output = db.sql(sql).await.unwrap(); - let output = output.data; - - match (&output, expected) { - (OutputData::AffectedRows(x), ExpectedOutput::AffectedRows(y)) => { - assert_eq!(*x, y, "actual: \n{}", x) - } - (OutputData::RecordBatches(_), ExpectedOutput::QueryResult(x)) - | (OutputData::Stream(_), ExpectedOutput::QueryResult(x)) => { - check_output_stream(output, x).await - } - _ => panic!(), - } -} - pub async fn check_output_stream(output: OutputData, expected: &str) { let recordbatches = match output { OutputData::Stream(stream) => util::collect_batches(stream).await.unwrap(), diff --git a/src/frontend/src/heartbeat/handler/tests.rs b/src/frontend/src/heartbeat/handler/tests.rs index c5b5b4ecde81..9bbe8d903ae2 100644 --- a/src/frontend/src/heartbeat/handler/tests.rs +++ b/src/frontend/src/heartbeat/handler/tests.rs @@ -24,7 +24,7 @@ use common_meta::heartbeat::mailbox::{HeartbeatMailbox, MessageMeta}; use common_meta::instruction::{CacheIdent, Instruction}; use common_meta::key::schema_name::{SchemaName, SchemaNameKey}; use common_meta::key::table_info::TableInfoKey; -use common_meta::key::TableMetaKey; +use common_meta::key::MetaKey; use partition::manager::TableRouteCacheInvalidator; use table::metadata::TableId; use tokio::sync::mpsc; @@ -79,7 +79,7 @@ async fn handle_instruction( async fn test_invalidate_table_cache_handler() { let table_id = 1; let table_info_key = TableInfoKey::new(table_id); - let inner = HashMap::from([(table_info_key.as_raw_key(), 1)]); + let inner = HashMap::from([(table_info_key.to_bytes(), 1)]); let backend = Arc::new(MockKvCacheInvalidator { inner: Mutex::new(inner), }); @@ -103,7 +103,7 @@ async fn test_invalidate_table_cache_handler() { .inner .lock() .unwrap() - .contains_key(&table_info_key.as_raw_key())); + .contains_key(&table_info_key.to_bytes())); // removes a invalid key handle_instruction( @@ -118,7 +118,7 @@ async fn test_invalidate_table_cache_handler() { async fn test_invalidate_schema_key_handler() { let (catalog, schema) = ("foo", "bar"); let schema_key = SchemaNameKey { catalog, schema }; - let inner = HashMap::from([(schema_key.as_raw_key(), 1)]); + let inner = HashMap::from([(schema_key.to_bytes(), 1)]); let backend = Arc::new(MockKvCacheInvalidator { inner: Mutex::new(inner), }); @@ -146,7 +146,7 @@ async fn test_invalidate_schema_key_handler() { .inner .lock() .unwrap() - .contains_key(&schema_key.as_raw_key())); + .contains_key(&schema_key.to_bytes())); // removes a invalid key handle_instruction( diff --git a/src/metric-engine/src/data_region.rs b/src/metric-engine/src/data_region.rs index 1dd1b53faad1..0ed19db600be 100644 --- a/src/metric-engine/src/data_region.rs +++ b/src/metric-engine/src/data_region.rs @@ -91,7 +91,7 @@ impl DataRegion { Ok(()) } - /// Generate warpped [RegionAlterRequest] with given [ColumnMetadata]. + /// Generate wrapped [RegionAlterRequest] with given [ColumnMetadata]. /// This method will modify `columns` in-place. async fn assemble_alter_request( &self, diff --git a/src/metric-engine/src/test_util.rs b/src/metric-engine/src/test_util.rs index 2da459419d65..79a523bb5758 100644 --- a/src/metric-engine/src/test_util.rs +++ b/src/metric-engine/src/test_util.rs @@ -291,7 +291,8 @@ pub fn build_rows(num_tags: usize, num_rows: usize) -> Vec { #[cfg(test)] mod test { - + use object_store::services::Fs; + use object_store::ObjectStore; use store_api::metric_engine_consts::{DATA_REGION_SUBDIR, METADATA_REGION_SUBDIR}; use super::*; @@ -302,21 +303,21 @@ mod test { let env = TestEnv::new().await; env.init_metric_region().await; let region_id = to_metadata_region_id(env.default_physical_region_id()); - let region_dir = join_dir(&env.data_home(), "test_metric_region"); - // `join_dir` doesn't suit windows path - #[cfg(not(target_os = "windows"))] - { - // assert metadata region's dir - let metadata_region_dir = join_dir(®ion_dir, METADATA_REGION_SUBDIR); - let exist = tokio::fs::try_exists(metadata_region_dir).await.unwrap(); - assert!(exist); + let mut builder = Fs::default(); + builder.root(&env.data_home()); + let object_store = ObjectStore::new(builder).unwrap().finish(); - // assert data region's dir - let data_region_dir = join_dir(®ion_dir, DATA_REGION_SUBDIR); - let exist = tokio::fs::try_exists(data_region_dir).await.unwrap(); - assert!(exist); - } + let region_dir = "test_metric_region"; + // assert metadata region's dir + let metadata_region_dir = join_dir(region_dir, METADATA_REGION_SUBDIR); + let exist = object_store.is_exist(&metadata_region_dir).await.unwrap(); + assert!(exist); + + // assert data region's dir + let data_region_dir = join_dir(region_dir, DATA_REGION_SUBDIR); + let exist = object_store.is_exist(&data_region_dir).await.unwrap(); + assert!(exist); // check mito engine let metadata_region_id = utils::to_metadata_region_id(region_id); diff --git a/src/mito2/src/cache/file_cache.rs b/src/mito2/src/cache/file_cache.rs index 457f881c43e6..890d0b773975 100644 --- a/src/mito2/src/cache/file_cache.rs +++ b/src/mito2/src/cache/file_cache.rs @@ -231,7 +231,7 @@ impl FileCache { /// Get the parquet metadata in file cache. /// If the file is not in the cache or fail to load metadata, return None. pub(crate) async fn get_parquet_meta_data(&self, key: IndexKey) -> Option { - // Check if file cache contrains the key + // Check if file cache contains the key if let Some(index_value) = self.memory_index.get(&key).await { // Load metadata from file cache let local_store = self.local_store(); diff --git a/src/operator/src/expr_factory.rs b/src/operator/src/expr_factory.rs index f0305ed981a5..17782c3addac 100644 --- a/src/operator/src/expr_factory.rs +++ b/src/operator/src/expr_factory.rs @@ -17,8 +17,8 @@ use std::collections::{HashMap, HashSet}; use api::helper::ColumnDataTypeWrapper; use api::v1::alter_expr::Kind; use api::v1::{ - AddColumn, AddColumns, AlterExpr, Column, ColumnDataType, ColumnDataTypeExtension, - CreateTableExpr, DropColumn, DropColumns, RenameTable, SemanticType, + AddColumn, AddColumns, AlterExpr, ChangeColumnType, ChangeColumnTypes, Column, ColumnDataType, + ColumnDataTypeExtension, CreateTableExpr, DropColumn, DropColumns, RenameTable, SemanticType, }; use common_error::ext::BoxedError; use common_grpc_expr::util::ColumnExpr; @@ -38,7 +38,9 @@ use snafu::{ensure, OptionExt, ResultExt}; use sql::ast::{ColumnDef, ColumnOption, TableConstraint}; use sql::statements::alter::{AlterTable, AlterTableOperation}; use sql::statements::create::{CreateExternalTable, CreateFlow, CreateTable, TIME_INDEX}; -use sql::statements::{column_def_to_schema, sql_column_def_to_grpc_column_def}; +use sql::statements::{ + column_def_to_schema, sql_column_def_to_grpc_column_def, sql_data_type_to_concrete_data_type, +}; use sql::util::extract_tables_from_query; use table::requests::{TableOptions, FILE_TABLE_META_KEY}; use table::table_reference::TableReference; @@ -474,6 +476,23 @@ pub(crate) fn to_alter_expr( location: location.as_ref().map(From::from), }], }), + AlterTableOperation::ChangeColumnType { + column_name, + target_type, + } => { + let target_type = + sql_data_type_to_concrete_data_type(target_type).context(ParseSqlSnafu)?; + let (target_type, target_type_extension) = ColumnDataTypeWrapper::try_from(target_type) + .map(|w| w.to_parts()) + .context(ColumnDataTypeSnafu)?; + Kind::ChangeColumnTypes(ChangeColumnTypes { + change_column_types: vec![ChangeColumnType { + column_name: column_name.value.to_string(), + target_type: target_type as i32, + target_type_extension, + }], + }) + } AlterTableOperation::DropColumn { name } => Kind::DropColumns(DropColumns { drop_columns: vec![DropColumn { name: name.value.to_string(), @@ -709,4 +728,39 @@ mod tests { if ts.to_iso8601_string() == "2024-01-29 16:01:01+0000") ); } + + #[test] + fn test_to_alter_change_column_type_expr() { + let sql = "ALTER TABLE monitor MODIFY mem_usage STRING;"; + let stmt = + ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()) + .unwrap() + .pop() + .unwrap(); + + let Statement::Alter(alter_table) = stmt else { + unreachable!() + }; + + // query context with system timezone UTC. + let expr = to_alter_expr(alter_table.clone(), QueryContext::arc()).unwrap(); + let kind = expr.kind.unwrap(); + + let Kind::ChangeColumnTypes(ChangeColumnTypes { + change_column_types, + }) = kind + else { + unreachable!() + }; + + assert_eq!(1, change_column_types.len()); + let change_column_type = &change_column_types[0]; + + assert_eq!("mem_usage", change_column_type.column_name); + assert_eq!( + ColumnDataType::String as i32, + change_column_type.target_type + ); + assert!(change_column_type.target_type_extension.is_none()); + } } diff --git a/src/promql/src/extension_plan/union_distinct_on.rs b/src/promql/src/extension_plan/union_distinct_on.rs index 4624544645da..78a2cc913d85 100644 --- a/src/promql/src/extension_plan/union_distinct_on.rs +++ b/src/promql/src/extension_plan/union_distinct_on.rs @@ -398,7 +398,7 @@ impl HashedData { } } - // Finilize the hash map + // Finalize the hash map let batch = interleave_batches(schema, batches, interleave_indices)?; Ok(Self { diff --git a/src/query/src/optimizer.rs b/src/query/src/optimizer.rs index da95851b87a6..e6a971417c23 100644 --- a/src/query/src/optimizer.rs +++ b/src/query/src/optimizer.rs @@ -12,7 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +pub mod count_wildcard; pub mod order_hint; +pub mod remove_duplicate; pub mod string_normalization; #[cfg(test)] mod test_util; @@ -26,7 +28,7 @@ use crate::QueryEngineContext; /// [`ExtensionAnalyzerRule`]s transform [`LogicalPlan`]s in some way to make /// the plan valid prior to the rest of the DataFusion optimization process. -/// It's an extension of datafusion [`AnalyzerRule`]s but accepts [`QueryEngineContext` as the second parameter. +/// It's an extension of datafusion [`AnalyzerRule`]s but accepts [`QueryEngineContext`] as the second parameter. pub trait ExtensionAnalyzerRule { /// Rewrite `plan` fn analyze( diff --git a/src/query/src/optimizer/count_wildcard.rs b/src/query/src/optimizer/count_wildcard.rs new file mode 100644 index 000000000000..b8a491003b98 --- /dev/null +++ b/src/query/src/optimizer/count_wildcard.rs @@ -0,0 +1,217 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use datafusion::datasource::DefaultTableSource; +use datafusion_common::tree_node::{ + Transformed, TransformedResult, TreeNode, TreeNodeRecursion, TreeNodeVisitor, +}; +use datafusion_common::{Column, Result as DataFusionResult}; +use datafusion_expr::expr::{AggregateFunction, AggregateFunctionDefinition, WindowFunction}; +use datafusion_expr::utils::COUNT_STAR_EXPANSION; +use datafusion_expr::{col, lit, Expr, LogicalPlan, WindowFunctionDefinition}; +use datafusion_optimizer::utils::NamePreserver; +use datafusion_optimizer::AnalyzerRule; +use datafusion_sql::TableReference; +use table::table::adapter::DfTableProviderAdapter; + +/// A replacement to DataFusion's [`CountWildcardRule`]. This rule +/// would prefer to use TIME INDEX for counting wildcard as it's +/// faster to read comparing to PRIMARY KEYs. +/// +/// [`CountWildcardRule`]: datafusion::optimizer::analyzer::CountWildcardRule +pub struct CountWildcardToTimeIndexRule; + +impl AnalyzerRule for CountWildcardToTimeIndexRule { + fn name(&self) -> &str { + "count_wildcard_to_time_index_rule" + } + + fn analyze( + &self, + plan: LogicalPlan, + _config: &datafusion::config::ConfigOptions, + ) -> DataFusionResult { + plan.transform_down_with_subqueries(&Self::analyze_internal) + .data() + } +} + +impl CountWildcardToTimeIndexRule { + fn analyze_internal(plan: LogicalPlan) -> DataFusionResult> { + let name_preserver = NamePreserver::new(&plan); + let new_arg = if let Some(time_index) = Self::try_find_time_index_col(&plan) { + vec![col(time_index)] + } else { + vec![lit(COUNT_STAR_EXPANSION)] + }; + plan.map_expressions(|expr| { + let original_name = name_preserver.save(&expr)?; + let transformed_expr = expr.transform_up_mut(&mut |expr| match expr { + Expr::WindowFunction(mut window_function) + if Self::is_count_star_window_aggregate(&window_function) => + { + window_function.args.clone_from(&new_arg); + Ok(Transformed::yes(Expr::WindowFunction(window_function))) + } + Expr::AggregateFunction(mut aggregate_function) + if Self::is_count_star_aggregate(&aggregate_function) => + { + aggregate_function.args.clone_from(&new_arg); + Ok(Transformed::yes(Expr::AggregateFunction( + aggregate_function, + ))) + } + _ => Ok(Transformed::no(expr)), + })?; + transformed_expr.map_data(|data| original_name.restore(data)) + }) + } + + fn try_find_time_index_col(plan: &LogicalPlan) -> Option { + let mut finder = TimeIndexFinder::default(); + // Safety: `TimeIndexFinder` won't throw error. + plan.visit(&mut finder).unwrap(); + let col = finder.into_column(); + + // check if the time index is a valid column as for current plan + if let Some(col) = &col { + let mut is_valid = false; + for input in plan.inputs() { + if input.schema().has_column(col) { + is_valid = true; + break; + } + } + if !is_valid { + return None; + } + } + + col + } +} + +/// Utility functions from the original rule. +impl CountWildcardToTimeIndexRule { + fn is_wildcard(expr: &Expr) -> bool { + matches!(expr, Expr::Wildcard { qualifier: None }) + } + + fn is_count_star_aggregate(aggregate_function: &AggregateFunction) -> bool { + matches!( + &aggregate_function.func_def, + AggregateFunctionDefinition::BuiltIn( + datafusion_expr::aggregate_function::AggregateFunction::Count, + ) + ) && aggregate_function.args.len() == 1 + && Self::is_wildcard(&aggregate_function.args[0]) + } + + fn is_count_star_window_aggregate(window_function: &WindowFunction) -> bool { + matches!( + &window_function.fun, + WindowFunctionDefinition::AggregateFunction( + datafusion_expr::aggregate_function::AggregateFunction::Count, + ) + ) && window_function.args.len() == 1 + && Self::is_wildcard(&window_function.args[0]) + } +} + +#[derive(Default)] +struct TimeIndexFinder { + time_index_col: Option, + table_alias: Option, +} + +impl TreeNodeVisitor for TimeIndexFinder { + type Node = LogicalPlan; + + fn f_down(&mut self, node: &Self::Node) -> DataFusionResult { + if let LogicalPlan::SubqueryAlias(subquery_alias) = node { + self.table_alias = Some(subquery_alias.alias.clone()); + } + + if let LogicalPlan::TableScan(table_scan) = &node { + if let Some(source) = table_scan + .source + .as_any() + .downcast_ref::() + { + if let Some(adapter) = source + .table_provider + .as_any() + .downcast_ref::() + { + let table_info = adapter.table().table_info(); + self.table_alias + .get_or_insert(TableReference::bare(table_info.name.clone())); + self.time_index_col = table_info + .meta + .schema + .timestamp_column() + .map(|c| c.name.clone()); + + return Ok(TreeNodeRecursion::Stop); + } + } + } + + Ok(TreeNodeRecursion::Continue) + } + + fn f_up(&mut self, _node: &Self::Node) -> DataFusionResult { + Ok(TreeNodeRecursion::Stop) + } +} + +impl TimeIndexFinder { + fn into_column(self) -> Option { + self.time_index_col + .map(|c| Column::new(self.table_alias, c)) + } +} + +#[cfg(test)] +mod test { + use std::sync::Arc; + + use datafusion_expr::{count, wildcard, LogicalPlanBuilder}; + use table::table::numbers::NumbersTable; + + use super::*; + + #[test] + fn uppercase_table_name() { + let numbers_table = NumbersTable::table_with_name(0, "AbCdE".to_string()); + let table_source = Arc::new(DefaultTableSource::new(Arc::new( + DfTableProviderAdapter::new(numbers_table), + ))); + + let plan = LogicalPlanBuilder::scan_with_filters("t", table_source, None, vec![]) + .unwrap() + .aggregate(Vec::::new(), vec![count(wildcard())]) + .unwrap() + .alias(r#""FgHiJ""#) + .unwrap() + .build() + .unwrap(); + + let mut finder = TimeIndexFinder::default(); + plan.visit(&mut finder).unwrap(); + + assert_eq!(finder.table_alias, Some(TableReference::bare("FgHiJ"))); + assert!(finder.time_index_col.is_none()); + } +} diff --git a/src/query/src/optimizer/remove_duplicate.rs b/src/query/src/optimizer/remove_duplicate.rs new file mode 100644 index 000000000000..10973e5ed5ee --- /dev/null +++ b/src/query/src/optimizer/remove_duplicate.rs @@ -0,0 +1,117 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use common_query::DfPhysicalPlanRef; +use datafusion::config::ConfigOptions; +use datafusion::physical_optimizer::PhysicalOptimizerRule; +use datafusion::physical_plan::coalesce_batches::CoalesceBatchesExec; +use datafusion::physical_plan::repartition::RepartitionExec; +use datafusion_common::tree_node::{Transformed, TreeNode}; +use datafusion_common::Result as DfResult; + +/// This is [PhysicalOptimizerRule] to remove duplicate physical plans such as two +/// adjoining [CoalesceBatchesExec] or [RepartitionExec]. They won't have any effect +/// if one runs right after another. +/// +/// This rule is expected to be run in the final stage of the optimization process. +pub struct RemoveDuplicate; + +impl PhysicalOptimizerRule for RemoveDuplicate { + fn optimize( + &self, + plan: DfPhysicalPlanRef, + _config: &ConfigOptions, + ) -> DfResult { + Self::do_optimize(plan) + } + + fn name(&self) -> &str { + "RemoveDuplicateRule" + } + + fn schema_check(&self) -> bool { + false + } +} + +impl RemoveDuplicate { + fn do_optimize(plan: DfPhysicalPlanRef) -> DfResult { + let result = plan + .transform_down_mut(&mut |plan| { + if plan.as_any().is::() + || plan.as_any().is::() + { + // check child + let child = plan.children()[0].clone(); + if child.as_any().type_id() == plan.as_any().type_id() { + // remove child + let grand_child = child.children()[0].clone(); + let new_plan = plan.with_new_children(vec![grand_child])?; + return Ok(Transformed::yes(new_plan)); + } + } + + Ok(Transformed::no(plan)) + })? + .data; + + Ok(result) + } +} + +#[cfg(test)] +mod test { + use std::sync::Arc; + + use arrow_schema::Schema; + use datafusion::physical_plan::displayable; + use datafusion::physical_plan::empty::EmptyExec; + use datafusion_physical_expr::Partitioning; + + use super::*; + + #[test] + fn remove_coalesce_batches() { + let empty = Arc::new(EmptyExec::new(Arc::new(Schema::empty()))); + let coalesce_batches = Arc::new(CoalesceBatchesExec::new(empty, 1024)); + let another_coalesce_batches = Arc::new(CoalesceBatchesExec::new(coalesce_batches, 8192)); + + let optimized = RemoveDuplicate::do_optimize(another_coalesce_batches).unwrap(); + let formatted = displayable(optimized.as_ref()).indent(true).to_string(); + let expected = "CoalesceBatchesExec: target_batch_size=8192\ + \n EmptyExec\n"; + + assert_eq!(expected, formatted); + } + + #[test] + fn non_continuous_coalesce_batches() { + let empty = Arc::new(EmptyExec::new(Arc::new(Schema::empty()))); + let coalesce_batches = Arc::new(CoalesceBatchesExec::new(empty, 1024)); + let repartition = Arc::new( + RepartitionExec::try_new(coalesce_batches, Partitioning::UnknownPartitioning(1)) + .unwrap(), + ); + let another_coalesce_batches = Arc::new(CoalesceBatchesExec::new(repartition, 8192)); + + let optimized = RemoveDuplicate::do_optimize(another_coalesce_batches).unwrap(); + let formatted = displayable(optimized.as_ref()).indent(true).to_string(); + let expected = "CoalesceBatchesExec: target_batch_size=8192\ + \n RepartitionExec: partitioning=UnknownPartitioning(1), input_partitions=1\ + \n CoalesceBatchesExec: target_batch_size=1024\ + \n EmptyExec\n"; + + assert_eq!(expected, formatted); + } +} diff --git a/src/query/src/query_engine/state.rs b/src/query/src/query_engine/state.rs index 96e88255188f..3fa44a280617 100644 --- a/src/query/src/query_engine/state.rs +++ b/src/query/src/query_engine/state.rs @@ -30,6 +30,7 @@ use datafusion::dataframe::DataFrame; use datafusion::error::Result as DfResult; use datafusion::execution::context::{QueryPlanner, SessionConfig, SessionState}; use datafusion::execution::runtime_env::RuntimeEnv; +use datafusion::physical_optimizer::optimizer::PhysicalOptimizer; use datafusion::physical_plan::ExecutionPlan; use datafusion::physical_planner::{DefaultPhysicalPlanner, ExtensionPlanner, PhysicalPlanner}; use datafusion_expr::LogicalPlan as DfLogicalPlan; @@ -42,7 +43,9 @@ use table::table::adapter::DfTableProviderAdapter; use table::TableRef; use crate::dist_plan::{DistExtensionPlanner, DistPlannerAnalyzer}; +use crate::optimizer::count_wildcard::CountWildcardToTimeIndexRule; use crate::optimizer::order_hint::OrderHintRule; +use crate::optimizer::remove_duplicate::RemoveDuplicate; use crate::optimizer::string_normalization::StringNormalizationRule; use crate::optimizer::type_conversion::TypeConversionRule; use crate::optimizer::ExtensionAnalyzerRule; @@ -87,19 +90,31 @@ impl QueryEngineState { let session_config = SessionConfig::new().with_create_default_catalog_and_schema(false); // Apply extension rules let mut extension_rules = Vec::new(); + // The [`TypeConversionRule`] must be at first extension_rules.insert(0, Arc::new(TypeConversionRule) as _); + // Apply the datafusion rules let mut analyzer = Analyzer::new(); analyzer.rules.insert(0, Arc::new(StringNormalizationRule)); + + // Use our custom rule instead to optimize the count(*) query Self::remove_analyzer_rule(&mut analyzer.rules, CountWildcardRule {}.name()); - analyzer.rules.insert(0, Arc::new(CountWildcardRule {})); + analyzer + .rules + .insert(0, Arc::new(CountWildcardToTimeIndexRule)); + if with_dist_planner { analyzer.rules.push(Arc::new(DistPlannerAnalyzer)); } + let mut optimizer = Optimizer::new(); optimizer.rules.push(Arc::new(OrderHintRule)); + // add physical optimizer + let mut physical_optimizer = PhysicalOptimizer::new(); + physical_optimizer.rules.push(Arc::new(RemoveDuplicate)); + let session_state = SessionState::new_with_config_rt(session_config, runtime_env) .with_serializer_registry(Arc::new(ExtensionSerializer)) .with_analyzer_rules(analyzer.rules) @@ -107,7 +122,8 @@ impl QueryEngineState { catalog_list.clone(), region_query_handler, ))) - .with_optimizer_rules(optimizer.rules); + .with_optimizer_rules(optimizer.rules) + .with_physical_optimizer_rules(physical_optimizer.rules); let df_context = SessionContext::new_with_state(session_state); diff --git a/src/query/src/range_select/plan.rs b/src/query/src/range_select/plan.rs index d31097efc0e9..73be735d4de3 100644 --- a/src/query/src/range_select/plan.rs +++ b/src/query/src/range_select/plan.rs @@ -667,7 +667,13 @@ impl RangeSelect { .range_expr .iter() .map(|range_fn| { - let expr = match &range_fn.expr { + let name = range_fn.expr.display_name()?; + let range_expr = match &range_fn.expr { + Expr::Alias(expr) => expr.expr.as_ref(), + others => others, + }; + + let expr = match &range_expr { Expr::AggregateFunction( aggr @ datafusion_expr::expr::AggregateFunction { func_def: @@ -778,7 +784,7 @@ impl RangeSelect { &input_phy_exprs, &order_by, &input_schema, - range_fn.expr.display_name()?, + name, false, ), AggregateFunctionDefinition::UDF(fun) => create_aggr_udf_expr( @@ -787,7 +793,7 @@ impl RangeSelect { &[], &[], &input_schema, - range_fn.expr.display_name()?, + name, false, ), f => Err(DataFusionError::NotImplemented(format!( @@ -796,8 +802,8 @@ impl RangeSelect { } } _ => Err(DataFusionError::Plan(format!( - "Unexpected Expr:{} in RangeSelect", - range_fn.expr.display_name()? + "Unexpected Expr: {} in RangeSelect", + range_fn.expr.canonical_name() ))), }?; let args = expr.expressions(); diff --git a/src/servers/Cargo.toml b/src/servers/Cargo.toml index 72f45e957405..27920758e719 100644 --- a/src/servers/Cargo.toml +++ b/src/servers/Cargo.toml @@ -125,6 +125,8 @@ serde_json.workspace = true session = { workspace = true, features = ["testing"] } table.workspace = true tempfile = "3.0.0" +# TODO depend `Database` client +tests-integration.workspace = true tokio-postgres = "0.7" tokio-postgres-rustls = "0.11" tokio-test = "0.4" diff --git a/src/servers/src/lib.rs b/src/servers/src/lib.rs index c2246823faf3..ad3f566b2f04 100644 --- a/src/servers/src/lib.rs +++ b/src/servers/src/lib.rs @@ -39,8 +39,7 @@ pub mod prom_store; pub mod prometheus_handler; pub mod proto; pub mod query_handler; -#[allow(clippy::all)] -mod repeated_field; +pub mod repeated_field; mod row_writer; pub mod server; mod shutdown; diff --git a/src/servers/src/repeated_field.rs b/src/servers/src/repeated_field.rs index 8346427a3368..1b8f80bea274 100644 --- a/src/servers/src/repeated_field.rs +++ b/src/servers/src/repeated_field.rs @@ -18,8 +18,9 @@ // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE // OR OTHER DEALINGS IN THE SOFTWARE. -// The [Clear] trait and [RepeatedField] are taken from [rust-protobuf](https://github.com/stepancheg/rust-protobuf/tree/master/protobuf-examples/vs-prost) -// to leverage the pooling mechanism to avoid frequent heap allocation/deallocation when decoding deeply nested structs. +// The Clear trait is copied from https://github.com/stepancheg/rust-protobuf/blob/v2.28.0/protobuf/src/clear.rs +// The RepeatedField struct is copied from https://github.com/stepancheg/rust-protobuf/blob/v2.28.0/protobuf/src/repeated.rs +// This code is to leverage the pooling mechanism to avoid frequent heap allocation/de-allocation when decoding deeply nested structs. use std::borrow::Borrow; use std::cmp::Ordering; @@ -74,6 +75,12 @@ impl RepeatedField { self.len } + /// Returns true if this container is empty. + #[inline] + pub fn is_empty(&self) -> bool { + self.len == 0 + } + /// Clear. #[inline] pub fn clear(&mut self) { @@ -121,13 +128,13 @@ impl RepeatedField { /// View data as slice. #[inline] - pub fn as_slice<'a>(&'a self) -> &'a [T] { + pub fn as_slice(&self) -> &[T] { &self.vec[..self.len] } /// View data as mutable slice. #[inline] - pub fn as_mut_slice<'a>(&'a mut self) -> &'a mut [T] { + pub fn as_mut_slice(&mut self) -> &mut [T] { &mut self.vec[..self.len] } @@ -169,13 +176,13 @@ impl RepeatedField { /// View this container as two slices split at given index. #[inline] - pub fn split_at<'a>(&'a self, mid: usize) -> (&'a [T], &'a [T]) { + pub fn split_at(&self, mid: usize) -> (&[T], &[T]) { self.as_ref().split_at(mid) } /// View this container as two mutable slices split at given index. #[inline] - pub fn split_at_mut<'a>(&'a mut self, mid: usize) -> (&'a mut [T], &'a mut [T]) { + pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) { self.as_mut_slice().split_at_mut(mid) } @@ -193,13 +200,13 @@ impl RepeatedField { /// Mutable last element of this container. #[inline] - pub fn last_mut<'a>(&'a mut self) -> Option<&'a mut T> { + pub fn last_mut(&mut self) -> Option<&mut T> { self.as_mut_slice().last_mut() } /// View all but last elements of this container. #[inline] - pub fn init<'a>(&'a self) -> &'a [T] { + pub fn init(&self) -> &[T] { let s = self.as_ref(); &s[0..s.len() - 1] } @@ -252,7 +259,7 @@ impl RepeatedField { /// # Examples /// /// ``` - /// # use protobuf::RepeatedField; + /// use servers::repeated_field::RepeatedField; /// /// let mut vec = RepeatedField::from(vec![1, 2, 3, 4]); /// vec.retain(|&x| x % 2 == 0); @@ -282,22 +289,15 @@ impl RepeatedField { self.as_mut_slice().reverse() } - /// Into owned iterator. - #[inline] - pub fn into_iter(mut self) -> vec::IntoIter { - self.vec.truncate(self.len); - self.vec.into_iter() - } - /// Immutable data iterator. #[inline] - pub fn iter<'a>(&'a self) -> slice::Iter<'a, T> { + pub fn iter(&self) -> slice::Iter { self.as_ref().iter() } /// Mutable data iterator. #[inline] - pub fn iter_mut<'a>(&'a mut self) -> slice::IterMut<'a, T> { + pub fn iter_mut(&mut self) -> slice::IterMut { self.as_mut_slice().iter_mut() } @@ -327,7 +327,7 @@ impl RepeatedField { /// Push default value. /// This operation could be faster than `rf.push(Default::default())`, /// because it may reuse previously allocated and cleared element. - pub fn push_default<'a>(&'a mut self) -> &'a mut T { + pub fn push_default(&mut self) -> &mut T { if self.len == self.vec.len() { self.vec.push(Default::default()); } else { @@ -352,10 +352,10 @@ impl<'a, T: Clone> From<&'a [T]> for RepeatedField { } } -impl Into> for RepeatedField { +impl From> for Vec { #[inline] - fn into(self) -> Vec { - self.into_vec() + fn from(val: RepeatedField) -> Self { + val.into_vec() } } @@ -414,12 +414,13 @@ impl<'a, T> IntoIterator for &'a mut RepeatedField { } } -impl<'a, T> IntoIterator for RepeatedField { +impl IntoIterator for RepeatedField { type Item = T; type IntoIter = vec::IntoIter; - fn into_iter(self) -> vec::IntoIter { - self.into_iter() + fn into_iter(mut self) -> vec::IntoIter { + self.vec.truncate(self.len); + self.vec.into_iter() } } @@ -460,7 +461,7 @@ impl Hash for RepeatedField { impl AsRef<[T]> for RepeatedField { #[inline] - fn as_ref<'a>(&'a self) -> &'a [T] { + fn as_ref(&self) -> &[T] { &self.vec[..self.len] } } @@ -491,14 +492,14 @@ impl Index for RepeatedField { type Output = T; #[inline] - fn index<'a>(&'a self, index: usize) -> &'a T { + fn index(&self, index: usize) -> &T { &self.as_ref()[index] } } impl IndexMut for RepeatedField { #[inline] - fn index_mut<'a>(&'a mut self, index: usize) -> &'a mut T { + fn index_mut(&mut self, index: usize) -> &mut T { &mut self.as_mut_slice()[index] } } diff --git a/src/servers/tests/grpc/mod.rs b/src/servers/tests/grpc/mod.rs index 701dce7419f5..183fabb5d44e 100644 --- a/src/servers/tests/grpc/mod.rs +++ b/src/servers/tests/grpc/mod.rs @@ -21,7 +21,7 @@ use arrow_flight::flight_service_server::{FlightService, FlightServiceServer}; use async_trait::async_trait; use auth::tests::MockUserProvider; use auth::UserProviderRef; -use client::{Client, Database, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; +use client::{Client, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_runtime::{Builder as RuntimeBuilder, Runtime}; use servers::error::{Result, StartGrpcSnafu, TcpBindSnafu}; use servers::grpc::flight::FlightCraftWrapper; @@ -31,6 +31,7 @@ use servers::server::Server; use snafu::ResultExt; use table::test_util::MemTable; use table::TableRef; +use tests_integration::database::Database; use tokio::net::TcpListener; use tokio_stream::wrappers::TcpListenerStream; diff --git a/src/sql/src/parsers/alter_parser.rs b/src/sql/src/parsers/alter_parser.rs index 687604e37022..cf3e1d0fb8ed 100644 --- a/src/sql/src/parsers/alter_parser.rs +++ b/src/sql/src/parsers/alter_parser.rs @@ -30,24 +30,24 @@ impl<'a> ParserContext<'a> { } fn parse_alter_table(&mut self) -> std::result::Result { - let parser = &mut self.parser; - parser.expect_keywords(&[Keyword::ALTER, Keyword::TABLE])?; + self.parser + .expect_keywords(&[Keyword::ALTER, Keyword::TABLE])?; - let raw_table_name = parser.parse_object_name(false)?; + let raw_table_name = self.parser.parse_object_name(false)?; let table_name = Self::canonicalize_object_name(raw_table_name); - let alter_operation = if parser.parse_keyword(Keyword::ADD) { - if let Some(constraint) = parser.parse_optional_table_constraint()? { + let alter_operation = if self.parser.parse_keyword(Keyword::ADD) { + if let Some(constraint) = self.parser.parse_optional_table_constraint()? { AlterTableOperation::AddConstraint(constraint) } else { - let _ = parser.parse_keyword(Keyword::COLUMN); - let mut column_def = parser.parse_column_def()?; + let _ = self.parser.parse_keyword(Keyword::COLUMN); + let mut column_def = self.parser.parse_column_def()?; column_def.name = Self::canonicalize_identifier(column_def.name); - let location = if parser.parse_keyword(Keyword::FIRST) { + let location = if self.parser.parse_keyword(Keyword::FIRST) { Some(AddColumnLocation::First) - } else if let Token::Word(word) = parser.peek_token().token { + } else if let Token::Word(word) = self.parser.peek_token().token { if word.value.to_ascii_uppercase() == "AFTER" { - let _ = parser.next_token(); + let _ = self.parser.next_token(); let name = Self::canonicalize_identifier(self.parse_identifier()?); Some(AddColumnLocation::After { column_name: name.value, @@ -63,17 +63,26 @@ impl<'a> ParserContext<'a> { location, } } - } else if parser.parse_keyword(Keyword::DROP) { - if parser.parse_keyword(Keyword::COLUMN) { + } else if self.parser.parse_keyword(Keyword::DROP) { + if self.parser.parse_keyword(Keyword::COLUMN) { let name = Self::canonicalize_identifier(self.parse_identifier()?); AlterTableOperation::DropColumn { name } } else { return Err(ParserError::ParserError(format!( "expect keyword COLUMN after ALTER TABLE DROP, found {}", - parser.peek_token() + self.parser.peek_token() ))); } - } else if parser.parse_keyword(Keyword::RENAME) { + } else if self.consume_token("MODIFY") { + let _ = self.parser.parse_keyword(Keyword::COLUMN); + let column_name = Self::canonicalize_identifier(self.parser.parse_identifier(false)?); + let target_type = self.parser.parse_data_type()?; + + AlterTableOperation::ChangeColumnType { + column_name, + target_type, + } + } else if self.parser.parse_keyword(Keyword::RENAME) { let new_table_name_obj_raw = self.parse_object_name()?; let new_table_name_obj = Self::canonicalize_object_name(new_table_name_obj_raw); let new_table_name = match &new_table_name_obj.0[..] { @@ -87,8 +96,8 @@ impl<'a> ParserContext<'a> { AlterTableOperation::RenameTable { new_table_name } } else { return Err(ParserError::ParserError(format!( - "expect keyword ADD or DROP or RENAME after ALTER TABLE, found {}", - parser.peek_token() + "expect keyword ADD or DROP or MODIFY or RENAME after ALTER TABLE, found {}", + self.parser.peek_token() ))); }; Ok(AlterTable::new(table_name, alter_operation)) @@ -253,6 +262,52 @@ mod tests { } } + #[test] + fn test_parse_alter_change_column_type() { + let sql_1 = "ALTER TABLE my_metric_1 MODIFY COLUMN a STRING"; + let result_1 = ParserContext::create_with_dialect( + sql_1, + &GreptimeDbDialect {}, + ParseOptions::default(), + ) + .unwrap(); + + let sql_2 = "ALTER TABLE my_metric_1 MODIFY a STRING"; + let mut result_2 = ParserContext::create_with_dialect( + sql_2, + &GreptimeDbDialect {}, + ParseOptions::default(), + ) + .unwrap(); + assert_eq!(result_1, result_2); + assert_eq!(1, result_2.len()); + + let statement = result_2.remove(0); + assert_matches!(statement, Statement::Alter { .. }); + match statement { + Statement::Alter(alter_table) => { + assert_eq!("my_metric_1", alter_table.table_name().0[0].value); + + let alter_operation = alter_table.alter_operation(); + assert_matches!( + alter_operation, + AlterTableOperation::ChangeColumnType { .. } + ); + match alter_operation { + AlterTableOperation::ChangeColumnType { + column_name, + target_type, + } => { + assert_eq!("a", column_name.value); + assert_eq!(DataType::String(None), *target_type); + } + _ => unreachable!(), + } + } + _ => unreachable!(), + } + } + #[test] fn test_parse_alter_rename_table() { let sql = "ALTER TABLE test_table table_t"; @@ -260,7 +315,7 @@ mod tests { ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()) .unwrap_err(); let err = result.output_msg(); - assert!(err.contains("expect keyword ADD or DROP or RENAME after ALTER TABLE")); + assert!(err.contains("expect keyword ADD or DROP or MODIFY or RENAME after ALTER TABLE")); let sql = "ALTER TABLE test_table RENAME table_t"; let mut result = diff --git a/src/sql/src/statements/alter.rs b/src/sql/src/statements/alter.rs index a54ba2d41b74..41a0997ecdb3 100644 --- a/src/sql/src/statements/alter.rs +++ b/src/sql/src/statements/alter.rs @@ -15,7 +15,7 @@ use std::fmt::{Debug, Display}; use common_query::AddColumnLocation; -use sqlparser::ast::{ColumnDef, Ident, ObjectName, TableConstraint}; +use sqlparser::ast::{ColumnDef, DataType, Ident, ObjectName, TableConstraint}; use sqlparser_derive::{Visit, VisitMut}; #[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)] @@ -58,6 +58,11 @@ pub enum AlterTableOperation { column_def: ColumnDef, location: Option, }, + /// `MODIFY [target_type]` + ChangeColumnType { + column_name: Ident, + target_type: DataType, + }, /// `DROP COLUMN ` DropColumn { name: Ident }, /// `RENAME ` @@ -82,6 +87,12 @@ impl Display for AlterTableOperation { AlterTableOperation::RenameTable { new_table_name } => { write!(f, r#"RENAME {new_table_name}"#) } + AlterTableOperation::ChangeColumnType { + column_name, + target_type, + } => { + write!(f, r#"MODIFY COLUMN {column_name} {target_type}"#) + } } } } @@ -117,6 +128,27 @@ ALTER TABLE monitor ADD COLUMN app STRING DEFAULT 'shop' PRIMARY KEY"#, } } + let sql = r"alter table monitor modify column load_15 string;"; + let stmts = + ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()) + .unwrap(); + assert_eq!(1, stmts.len()); + assert_matches!(&stmts[0], Statement::Alter { .. }); + + match &stmts[0] { + Statement::Alter(set) => { + let new_sql = format!("\n{}", set); + assert_eq!( + r#" +ALTER TABLE monitor MODIFY COLUMN load_15 STRING"#, + &new_sql + ); + } + _ => { + unreachable!(); + } + } + let sql = r"alter table monitor drop column load_15;"; let stmts = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}, ParseOptions::default()) diff --git a/src/store-api/src/region_request.rs b/src/store-api/src/region_request.rs index 58210e45c2b7..6c4aaa28f47c 100644 --- a/src/store-api/src/region_request.rs +++ b/src/store-api/src/region_request.rs @@ -13,8 +13,9 @@ // limitations under the License. use std::collections::HashMap; -use std::fmt::{self}; +use std::fmt; +use api::helper::ColumnDataTypeWrapper; use api::v1::add_column_location::LocationType; use api::v1::region::{ alter_request, region_request, AlterRequest, AlterRequests, CloseRequest, CompactRequest, @@ -457,13 +458,18 @@ impl TryFrom for AlterKind { .collect::>>()?; AlterKind::AddColumns { columns } } + alter_request::Kind::ChangeColumnTypes(x) => { + let columns = x + .change_column_types + .into_iter() + .map(|x| x.into()) + .collect::>(); + AlterKind::ChangeColumnTypes { columns } + } alter_request::Kind::DropColumns(x) => { let names = x.drop_columns.into_iter().map(|x| x.name).collect(); AlterKind::DropColumns { names } } - alter_request::Kind::ChangeColumnTypes(_) => { - unimplemented!() - } }; Ok(alter_kind) @@ -615,6 +621,21 @@ impl ChangeColumnType { } } +impl From for ChangeColumnType { + fn from(change_column_type: v1::ChangeColumnType) -> Self { + let target_type = ColumnDataTypeWrapper::new( + change_column_type.target_type(), + change_column_type.target_type_extension, + ) + .into(); + + ChangeColumnType { + column_name: change_column_type.column_name, + target_type, + } + } +} + #[derive(Debug, Default)] pub struct RegionFlushRequest { pub row_group_size: Option, diff --git a/tests-fuzz/Cargo.toml b/tests-fuzz/Cargo.toml index 03b708b9feb7..c7e733448988 100644 --- a/tests-fuzz/Cargo.toml +++ b/tests-fuzz/Cargo.toml @@ -10,6 +10,10 @@ workspace = true [package.metadata] cargo-fuzz = true +[features] +default = [] +unstable = ["nix"] + [dependencies] arbitrary = { version = "1.3.0", features = ["derive"] } async-trait = { workspace = true } @@ -24,9 +28,11 @@ derive_builder = { workspace = true } dotenv = "0.15" lazy_static = { workspace = true } libfuzzer-sys = "0.4" +nix = { version = "0.28", features = ["process", "signal"], optional = true } partition = { workspace = true } rand = { workspace = true } rand_chacha = "0.3.1" +reqwest = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } snafu = { workspace = true } @@ -38,10 +44,11 @@ sqlx = { version = "0.6", features = [ "postgres", "chrono", ] } +tinytemplate = "1.2" +tokio = { workspace = true } [dev-dependencies] dotenv.workspace = true -tokio = { workspace = true } [[bin]] name = "fuzz_create_table" @@ -64,6 +71,13 @@ test = false bench = false doc = false +[[bin]] +name = "fuzz_insert_logical_table" +path = "targets/fuzz_insert_logical_table.rs" +test = false +bench = false +doc = false + [[bin]] name = "fuzz_alter_table" path = "targets/fuzz_alter_table.rs" @@ -84,3 +98,11 @@ path = "targets/fuzz_create_database.rs" test = false bench = false doc = false + +[[bin]] +name = "unstable_fuzz_create_table_standalone" +path = "targets/unstable/fuzz_create_table_standalone.rs" +test = false +bench = false +doc = false +required-features = ["unstable"] diff --git a/tests-fuzz/README.md b/tests-fuzz/README.md index c1e2147fb4bd..780107a65002 100644 --- a/tests-fuzz/README.md +++ b/tests-fuzz/README.md @@ -9,6 +9,22 @@ cargo install cargo-fuzz 2. Start GreptimeDB 3. Copy the `.env.example`, which is at project root, to `.env` and change the values on need. +### For stable fuzz tests +Set the GreptimeDB MySQL address. +``` +GT_MYSQL_ADDR = localhost:4002 +``` + +### For unstable fuzz tests +Set the binary path of the GreptimeDB: +``` +GT_FUZZ_BINARY_PATH = /path/to/ +``` + +Change the instance root directory(the default value: `/tmp/unstable_greptime/`) +``` +GT_FUZZ_INSTANCE_ROOT_DIR = /path/to/ +``` ## Run 1. List all fuzz targets ```bash diff --git a/tests-fuzz/conf/standalone.template.toml b/tests-fuzz/conf/standalone.template.toml new file mode 100644 index 000000000000..f0ddc38d048e --- /dev/null +++ b/tests-fuzz/conf/standalone.template.toml @@ -0,0 +1,23 @@ +mode = 'standalone' +enable_memory_catalog = false +require_lease_before_startup = true + +[wal] +provider = "raft_engine" +file_size = '1GB' +purge_interval = '10m' +purge_threshold = '10GB' +read_batch_size = 128 +sync_write = false + +[storage] +type = 'File' +data_home = '{data_home}' + +[grpc_options] +addr = '127.0.0.1:4001' +runtime_size = 8 + +[procedure] +max_retry_times = 3 +retry_delay = "500ms" diff --git a/tests-fuzz/src/error.rs b/tests-fuzz/src/error.rs index add82c854187..72033b257521 100644 --- a/tests-fuzz/src/error.rs +++ b/tests-fuzz/src/error.rs @@ -16,6 +16,8 @@ use common_macro::stack_trace_debug; use snafu::{Location, Snafu}; use crate::ir::create_expr::{CreateDatabaseExprBuilderError, CreateTableExprBuilderError}; +#[cfg(feature = "unstable")] +use crate::utils::process::Pid; pub type Result = std::result::Result; @@ -23,6 +25,22 @@ pub type Result = std::result::Result; #[snafu(visibility(pub))] #[stack_trace_debug] pub enum Error { + #[snafu(display("Failed to create a file: {}", path))] + CreateFile { + path: String, + location: Location, + #[snafu(source)] + error: std::io::Error, + }, + + #[snafu(display("Failed to write a file: {}", path))] + WriteFile { + path: String, + location: Location, + #[snafu(source)] + error: std::io::Error, + }, + #[snafu(display("Unexpected, violated: {violated}"))] Unexpected { violated: String, @@ -56,4 +74,23 @@ pub enum Error { #[snafu(display("Failed to assert: {}", reason))] Assert { reason: String, location: Location }, + + #[snafu(display("Child process exited unexpected"))] + UnexpectedExited { location: Location }, + + #[snafu(display("Failed to spawn a child process"))] + SpawnChild { + location: Location, + #[snafu(source)] + error: std::io::Error, + }, + + #[cfg(feature = "unstable")] + #[snafu(display("Failed to kill a process, pid: {}", pid))] + KillProcess { + location: Location, + #[snafu(source)] + error: nix::Error, + pid: Pid, + }, } diff --git a/tests-fuzz/src/generator/insert_expr.rs b/tests-fuzz/src/generator/insert_expr.rs index f3f0dba11646..5af3289c0436 100644 --- a/tests-fuzz/src/generator/insert_expr.rs +++ b/tests-fuzz/src/generator/insert_expr.rs @@ -31,6 +31,8 @@ use crate::ir::{generate_random_value, Ident}; #[builder(pattern = "owned")] pub struct InsertExprGenerator { table_ctx: TableContextRef, + // Whether to omit all columns, i.e. INSERT INTO table_name VALUES (...) + omit_column_list: bool, #[builder(default = "1")] rows: usize, #[builder(default = "Box::new(WordGenerator)")] @@ -44,11 +46,8 @@ impl Generator for InsertExprGenerator { /// Generates the [InsertIntoExpr]. fn generate(&self, rng: &mut R) -> Result { - // Whether to omit all columns, i.e. INSERT INTO table_name VALUES (...) - let omit_column_list = rng.gen_bool(0.2); - let mut values_columns = vec![]; - if omit_column_list { + if self.omit_column_list { // If omit column list, then all columns are required in the values list values_columns.clone_from(&self.table_ctx.columns); } else { @@ -94,7 +93,7 @@ impl Generator for InsertExprGenerator { Ok(InsertIntoExpr { table_name: self.table_ctx.name.to_string(), - columns: if omit_column_list { + columns: if self.omit_column_list { vec![] } else { values_columns diff --git a/tests-fuzz/src/translator/mysql/insert_expr.rs b/tests-fuzz/src/translator/mysql/insert_expr.rs index 49ff192afb14..0e2252cbc54a 100644 --- a/tests-fuzz/src/translator/mysql/insert_expr.rs +++ b/tests-fuzz/src/translator/mysql/insert_expr.rs @@ -71,7 +71,7 @@ impl InsertIntoExprTranslator { mod tests { use std::sync::Arc; - use rand::SeedableRng; + use rand::{Rng, SeedableRng}; use super::*; use crate::generator::insert_expr::InsertExprGeneratorBuilder; @@ -82,10 +82,12 @@ mod tests { #[test] fn test_insert_into_translator() { let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(0); + let omit_column_list = rng.gen_bool(0.2); let test_ctx = test_utils::new_test_ctx(); let insert_expr_generator = InsertExprGeneratorBuilder::default() .table_ctx(Arc::new(test_ctx)) + .omit_column_list(omit_column_list) .rows(2) .build() .unwrap(); @@ -100,16 +102,16 @@ mod tests { let insert_expr = insert_expr_generator.generate(&mut rng).unwrap(); let output = InsertIntoExprTranslator.translate(&insert_expr).unwrap(); - let expected = r#"INSERT INTO test (cpu_util, disk_util, ts) VALUES -(0.7074194466620976, 0.661288102315126, '-47252-05-08 07:33:49.567+0000'), -(0.8266101224213618, 0.7947724277743285, '-224292-12-07 02:51:53.371+0000');"#; + let expected = r#"INSERT INTO test (ts, memory_util) VALUES +('+22606-05-02 04:44:02.976+0000', 0.7074194466620976), +('+33689-06-12 08:42:11.037+0000', 0.40987428386535585);"#; assert_eq!(output, expected); let insert_expr = insert_expr_generator.generate(&mut rng).unwrap(); let output = InsertIntoExprTranslator.translate(&insert_expr).unwrap(); - let expected = r#"INSERT INTO test VALUES -('odio', NULL, 0.48809950435391647, 0.5228925709595407, 0.9091528874275897, '+241156-12-16 20:52:15.185+0000'), -('dignissimos', 'labore', NULL, 0.12983559048685023, 0.6362040919831425, '-30691-06-17 23:41:09.938+0000');"#; + let expected = r#"INSERT INTO test (ts, disk_util, cpu_util, host) VALUES +('+200107-10-22 01:36:36.924+0000', 0.9082597320638828, 0.020853190804573818, 'voluptates'), +('+241156-12-16 20:52:15.185+0000', 0.6492772846116915, 0.18078027701087784, 'repellat');"#; assert_eq!(output, expected); } } diff --git a/tests-fuzz/src/utils.rs b/tests-fuzz/src/utils.rs index 7c50b0ac66cb..9156067b253e 100644 --- a/tests-fuzz/src/utils.rs +++ b/tests-fuzz/src/utils.rs @@ -12,21 +12,40 @@ // See the License for the specific language governing permissions and // limitations under the License. +pub mod config; +pub mod health; +#[cfg(feature = "unstable")] +pub mod process; + use std::env; use common_telemetry::info; use sqlx::mysql::MySqlPoolOptions; use sqlx::{MySql, Pool}; +/// Database connections pub struct Connections { pub mysql: Option>, } const GT_MYSQL_ADDR: &str = "GT_MYSQL_ADDR"; -pub async fn init_greptime_connections() -> Connections { +/// Connects to GreptimeDB via env variables. +pub async fn init_greptime_connections_via_env() -> Connections { let _ = dotenv::dotenv(); let mysql = if let Ok(addr) = env::var(GT_MYSQL_ADDR) { + Some(addr) + } else { + info!("GT_MYSQL_ADDR is empty, ignores test"); + None + }; + + init_greptime_connections(mysql).await +} + +/// Connects to GreptimeDB. +pub async fn init_greptime_connections(mysql: Option) -> Connections { + let mysql = if let Some(addr) = mysql { Some( MySqlPoolOptions::new() .connect(&format!("mysql://{addr}/public")) @@ -34,9 +53,33 @@ pub async fn init_greptime_connections() -> Connections { .unwrap(), ) } else { - info!("GT_MYSQL_ADDR is empty, ignores test"); None }; Connections { mysql } } + +const GT_FUZZ_BINARY_PATH: &str = "GT_FUZZ_BINARY_PATH"; +const GT_FUZZ_INSTANCE_ROOT_DIR: &str = "GT_FUZZ_INSTANCE_ROOT_DIR"; + +/// The variables for unstable test +pub struct UnstableTestVariables { + pub binary_path: String, + pub root_dir: Option, +} + +/// Loads env variables for unstable test +pub fn load_unstable_test_env_variables() -> UnstableTestVariables { + let _ = dotenv::dotenv(); + let binary_path = env::var(GT_FUZZ_BINARY_PATH).expect("GT_FUZZ_BINARY_PATH not found"); + let root_dir = if let Ok(root) = env::var(GT_FUZZ_INSTANCE_ROOT_DIR) { + Some(root) + } else { + None + }; + + UnstableTestVariables { + binary_path, + root_dir, + } +} diff --git a/tests-fuzz/src/utils/config.rs b/tests-fuzz/src/utils/config.rs new file mode 100644 index 000000000000..5692ff478fab --- /dev/null +++ b/tests-fuzz/src/utils/config.rs @@ -0,0 +1,58 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::path::PathBuf; + +use common_telemetry::tracing::info; +use serde::Serialize; +use snafu::ResultExt; +use tinytemplate::TinyTemplate; +use tokio::fs::File; +use tokio::io::AsyncWriteExt; + +use crate::error; +use crate::error::Result; + +/// Get the path of config dir `tests-fuzz/conf`. +pub fn get_conf_path() -> PathBuf { + let mut root_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + root_path.push("conf"); + root_path +} + +/// Returns rendered config file. +pub fn render_config_file(template_path: &str, context: &C) -> String { + let mut tt = TinyTemplate::new(); + let template = std::fs::read_to_string(template_path).unwrap(); + tt.add_template(template_path, &template).unwrap(); + tt.render(template_path, context).unwrap() +} + +// Writes config file to `output_path`. +pub async fn write_config_file( + template_path: &str, + context: &C, + output_path: &str, +) -> Result<()> { + info!("template_path: {template_path}, output_path: {output_path}"); + let content = render_config_file(template_path, context); + let mut config_file = File::create(output_path) + .await + .context(error::CreateFileSnafu { path: output_path })?; + config_file + .write_all(content.as_bytes()) + .await + .context(error::WriteFileSnafu { path: output_path })?; + Ok(()) +} diff --git a/tests-fuzz/src/utils/health.rs b/tests-fuzz/src/utils/health.rs new file mode 100644 index 000000000000..88f0c97321f1 --- /dev/null +++ b/tests-fuzz/src/utils/health.rs @@ -0,0 +1,57 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::time::Duration; + +use crate::utils::info; + +/// Check health of the processing. +#[async_trait::async_trait] +pub trait HealthChecker: Send + Sync { + async fn check(&self); + + fn wait_timeout(&self) -> Duration; +} + +/// Http health checker. +pub struct HttpHealthChecker { + pub url: String, +} + +#[async_trait::async_trait] +impl HealthChecker for HttpHealthChecker { + async fn check(&self) { + loop { + match reqwest::get(&self.url).await { + Ok(resp) => { + if resp.status() == 200 { + info!("Health checked!"); + return; + } + info!("Failed to check health, status: {}", resp.status()); + } + Err(err) => { + info!("Failed to check health, error: {err:?}"); + } + } + + info!("Checking health later..."); + tokio::time::sleep(Duration::from_secs(1)).await; + } + } + + fn wait_timeout(&self) -> Duration { + Duration::from_secs(5) + } +} diff --git a/tests-fuzz/src/utils/process.rs b/tests-fuzz/src/utils/process.rs new file mode 100644 index 000000000000..b3b03c042b2b --- /dev/null +++ b/tests-fuzz/src/utils/process.rs @@ -0,0 +1,264 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashMap; +use std::process::{ExitStatus, Stdio}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use common_telemetry::{info, warn}; +use nix::sys::signal::Signal; +use rand::{Rng, SeedableRng}; +use rand_chacha::ChaChaRng; +use snafu::{ensure, ResultExt}; +use tokio::fs::OpenOptions; +use tokio::process::Child; + +use crate::error::{self, Result}; +use crate::utils::health::HealthChecker; + +pub type Pid = u32; + +/// The state of a process. +#[derive(Debug, Clone)] +pub struct Process { + pub(crate) exit_status: Option, + pub(crate) exited: bool, +} + +/// ProcessManager provides the ability to spawn/wait/kill a child process. +#[derive(Debug, Clone)] +pub struct ProcessManager { + processes: Arc>>, +} + +/// The callback while the child process exits. +pub type OnChildExitResult = std::result::Result; + +impl Default for ProcessManager { + fn default() -> Self { + Self::new() + } +} + +impl ProcessManager { + pub fn new() -> Self { + Self { + processes: Arc::new(Default::default()), + } + } + + pub fn get(&self, pid: Pid) -> Option { + self.processes.lock().unwrap().get(&pid).cloned() + } + + fn wait(&self, mut child: Child, f: F) + where + F: FnOnce(Pid, OnChildExitResult) + Send + 'static, + { + let processes = self.processes.clone(); + tokio::spawn(async move { + // Safety: caller checked + let pid = child.id().unwrap(); + let result = child.wait().await; + + match result { + Ok(code) => { + warn!("pid: {pid} exited with status: {}", code); + f(pid, Ok(code)); + processes.lock().unwrap().entry(pid).and_modify(|process| { + process.exit_status = Some(code); + process.exited = true; + }); + } + Err(err) => { + warn!("pid: {pid} exited with error: {}", err); + f(pid, Err(err)); + processes.lock().unwrap().entry(pid).and_modify(|process| { + process.exited = true; + }); + } + } + }); + } + + /// Spawns a new process. + pub fn spawn, F>( + &self, + binary: &str, + args: &[String], + stdout: T, + stderr: T, + on_exit: F, + ) -> Result + where + F: FnOnce(Pid, OnChildExitResult) + Send + 'static, + { + info!("starting {} with {:?}", binary, args); + let child = tokio::process::Command::new(binary) + .args(args) + .stdout(stdout) + .stderr(stderr) + .spawn() + .context(error::SpawnChildSnafu)?; + let pid = child.id(); + + if let Some(pid) = pid { + self.processes.lock().unwrap().insert( + pid, + Process { + exit_status: None, + exited: false, + }, + ); + + self.wait(child, on_exit); + Ok(pid) + } else { + error::UnexpectedExitedSnafu {}.fail() + } + } + + /// Kills a process via [Pid]. + pub fn kill>>(pid: Pid, signal: T) -> Result<()> { + let signal: Option = signal.into(); + info!("kill pid :{} signal: {:?}", pid, signal); + // Safety: checked. + nix::sys::signal::kill(nix::unistd::Pid::from_raw(pid as i32), signal) + .context(error::KillProcessSnafu { pid })?; + + Ok(()) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ProcessState { + NotSpawn, + Spawning, + HealthChecking(Pid), + Health(Pid), + Killing(Pid), + Exited(Pid), +} + +impl ProcessState { + /// Returns true if it's [ProcessState::Health]. + pub fn health(&self) -> bool { + matches!(self, ProcessState::Health(_)) + } +} + +/// The controller of an unstable process. +pub struct UnstableProcessController { + pub binary_path: String, + pub args: Vec, + pub root_dir: String, + pub seed: u64, + pub process_manager: ProcessManager, + pub health_check: Box, + pub sender: tokio::sync::watch::Sender, + pub running: Arc, +} + +async fn path_to_stdio(path: &str) -> Result { + Ok(OpenOptions::new() + .create(true) + .truncate(true) + .read(true) + .write(true) + .open(path) + .await + .context(error::CreateFileSnafu { path })? + .into_std() + .await) +} + +impl UnstableProcessController { + /// Start the unstable processes. + pub async fn start(&self) { + self.running.store(true, Ordering::Relaxed); + let mut rng = ChaChaRng::seed_from_u64(self.seed); + while self.running.load(Ordering::Relaxed) { + let min = rng.gen_range(50..100); + let max = rng.gen_range(300..600); + let ms = rng.gen_range(min..max); + let pid = self + .start_process_with_retry(3) + .await + .expect("Failed to start process"); + tokio::time::sleep(Duration::from_millis(ms)).await; + warn!("After {ms}ms, killing pid: {pid}"); + self.sender.send(ProcessState::Killing(pid)).unwrap(); + ProcessManager::kill(pid, Signal::SIGKILL).expect("Failed to kill"); + } + } + + pub fn stop(&self) { + self.running.store(false, Ordering::Relaxed); + } + + async fn start_process_with_retry(&self, max_retry: usize) -> Result { + for _ in 0..max_retry { + let pid = self.start_process().await.unwrap(); + let wait_timeout = self.health_check.wait_timeout(); + let result = tokio::time::timeout(wait_timeout, async { + self.sender.send(ProcessState::HealthChecking(pid)).unwrap(); + self.health_check.check().await; + }) + .await; + match result { + Ok(_) => { + self.sender.send(ProcessState::Health(pid)).unwrap(); + return Ok(pid); + } + Err(_) => { + ensure!( + self.process_manager.get(pid).unwrap().exited, + error::UnexpectedSnafu { + violated: format!("Failed to start process: pid: {pid}") + } + ); + self.sender.send(ProcessState::Exited(pid)).unwrap(); + // Retry alter + warn!("Wait for health checking timeout, retry later..."); + } + } + } + + error::UnexpectedSnafu { + violated: "Failed to start process", + } + .fail() + } + + async fn start_process(&self) -> Result { + let on_exit = move |pid, result| { + info!("The pid: {pid} exited, result: {result:?}"); + }; + let now = common_time::util::current_time_millis(); + let stdout = format!("{}stdout-{}", self.root_dir, now); + let stderr = format!("{}stderr-{}", self.root_dir, now); + let stdout = path_to_stdio(&stdout).await?; + let stderr = path_to_stdio(&stderr).await?; + self.sender.send(ProcessState::Spawning).unwrap(); + self.process_manager.spawn( + &self.binary_path, + &self.args.clone(), + stdout, + stderr, + on_exit, + ) + } +} diff --git a/tests-fuzz/targets/fuzz_alter_logical_table.rs b/tests-fuzz/targets/fuzz_alter_logical_table.rs index 3b0e25443097..57a773c56050 100644 --- a/tests-fuzz/targets/fuzz_alter_logical_table.rs +++ b/tests-fuzz/targets/fuzz_alter_logical_table.rs @@ -42,7 +42,7 @@ use tests_fuzz::ir::{ use tests_fuzz::translator::mysql::alter_expr::AlterTableExprTranslator; use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; use tests_fuzz::translator::DslTranslator; -use tests_fuzz::utils::{init_greptime_connections, Connections}; +use tests_fuzz::utils::{init_greptime_connections_via_env, Connections}; use tests_fuzz::validator; struct FuzzContext { @@ -229,7 +229,7 @@ async fn execute_alter_table(ctx: FuzzContext, input: FuzzInput) -> Result<()> { fuzz_target!(|input: FuzzInput| { common_telemetry::init_default_ut_logging(); common_runtime::block_on_write(async { - let Connections { mysql } = init_greptime_connections().await; + let Connections { mysql } = init_greptime_connections_via_env().await; let ctx = FuzzContext { greptime: mysql.expect("mysql connection init must be succeed"), }; diff --git a/tests-fuzz/targets/fuzz_alter_table.rs b/tests-fuzz/targets/fuzz_alter_table.rs index 3d345c2f16e7..a38e9d355a4b 100644 --- a/tests-fuzz/targets/fuzz_alter_table.rs +++ b/tests-fuzz/targets/fuzz_alter_table.rs @@ -39,7 +39,7 @@ use tests_fuzz::ir::{droppable_columns, AlterTableExpr, CreateTableExpr}; use tests_fuzz::translator::mysql::alter_expr::AlterTableExprTranslator; use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; use tests_fuzz::translator::DslTranslator; -use tests_fuzz::utils::{init_greptime_connections, Connections}; +use tests_fuzz::utils::{init_greptime_connections_via_env, Connections}; use tests_fuzz::validator; struct FuzzContext { @@ -174,7 +174,7 @@ async fn execute_alter_table(ctx: FuzzContext, input: FuzzInput) -> Result<()> { fuzz_target!(|input: FuzzInput| { common_telemetry::init_default_ut_logging(); common_runtime::block_on_write(async { - let Connections { mysql } = init_greptime_connections().await; + let Connections { mysql } = init_greptime_connections_via_env().await; let ctx = FuzzContext { greptime: mysql.expect("mysql connection init must be succeed"), }; diff --git a/tests-fuzz/targets/fuzz_create_database.rs b/tests-fuzz/targets/fuzz_create_database.rs index b59ed4fe8e69..7fd3f1c3d9c8 100644 --- a/tests-fuzz/targets/fuzz_create_database.rs +++ b/tests-fuzz/targets/fuzz_create_database.rs @@ -31,7 +31,7 @@ use tests_fuzz::generator::Generator; use tests_fuzz::ir::CreateDatabaseExpr; use tests_fuzz::translator::mysql::create_expr::CreateDatabaseExprTranslator; use tests_fuzz::translator::DslTranslator; -use tests_fuzz::utils::{init_greptime_connections, Connections}; +use tests_fuzz::utils::{init_greptime_connections_via_env, Connections}; struct FuzzContext { greptime: Pool, @@ -95,7 +95,7 @@ async fn execute_create_database(ctx: FuzzContext, input: FuzzInput) -> Result<( fuzz_target!(|input: FuzzInput| { common_telemetry::init_default_ut_logging(); common_runtime::block_on_write(async { - let Connections { mysql } = init_greptime_connections().await; + let Connections { mysql } = init_greptime_connections_via_env().await; let ctx = FuzzContext { greptime: mysql.expect("mysql connection init must be succeed"), }; diff --git a/tests-fuzz/targets/fuzz_create_logical_table.rs b/tests-fuzz/targets/fuzz_create_logical_table.rs index e66ea4518966..c54b8f9ab7ca 100644 --- a/tests-fuzz/targets/fuzz_create_logical_table.rs +++ b/tests-fuzz/targets/fuzz_create_logical_table.rs @@ -37,7 +37,7 @@ use tests_fuzz::generator::Generator; use tests_fuzz::ir::{primary_key_and_not_null_column_options_generator, Column}; use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; use tests_fuzz::translator::DslTranslator; -use tests_fuzz::utils::{init_greptime_connections, Connections}; +use tests_fuzz::utils::{init_greptime_connections_via_env, Connections}; use tests_fuzz::validator; struct FuzzContext { @@ -184,7 +184,7 @@ async fn execute_create_logic_table(ctx: FuzzContext, input: FuzzInput) -> Resul fuzz_target!(|input: FuzzInput| { common_telemetry::init_default_ut_logging(); common_runtime::block_on_write(async { - let Connections { mysql } = init_greptime_connections().await; + let Connections { mysql } = init_greptime_connections_via_env().await; let ctx = FuzzContext { greptime: mysql.expect("mysql connection init must be succeed"), }; diff --git a/tests-fuzz/targets/fuzz_create_table.rs b/tests-fuzz/targets/fuzz_create_table.rs index ae43e6d6966f..0eb29ec7c6dd 100644 --- a/tests-fuzz/targets/fuzz_create_table.rs +++ b/tests-fuzz/targets/fuzz_create_table.rs @@ -31,7 +31,7 @@ use tests_fuzz::generator::Generator; use tests_fuzz::ir::CreateTableExpr; use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; use tests_fuzz::translator::DslTranslator; -use tests_fuzz::utils::{init_greptime_connections, Connections}; +use tests_fuzz::utils::{init_greptime_connections_via_env, Connections}; use tests_fuzz::validator; struct FuzzContext { @@ -111,7 +111,7 @@ async fn execute_create_table(ctx: FuzzContext, input: FuzzInput) -> Result<()> fuzz_target!(|input: FuzzInput| { common_telemetry::init_default_ut_logging(); common_runtime::block_on_write(async { - let Connections { mysql } = init_greptime_connections().await; + let Connections { mysql } = init_greptime_connections_via_env().await; let ctx = FuzzContext { greptime: mysql.expect("mysql connection init must be succeed"), }; diff --git a/tests-fuzz/targets/fuzz_insert.rs b/tests-fuzz/targets/fuzz_insert.rs index 7fc6d30a237a..3f133b289424 100644 --- a/tests-fuzz/targets/fuzz_insert.rs +++ b/tests-fuzz/targets/fuzz_insert.rs @@ -36,7 +36,7 @@ use tests_fuzz::ir::{CreateTableExpr, InsertIntoExpr}; use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; use tests_fuzz::translator::mysql::insert_expr::InsertIntoExprTranslator; use tests_fuzz::translator::DslTranslator; -use tests_fuzz::utils::{init_greptime_connections, Connections}; +use tests_fuzz::utils::{init_greptime_connections_via_env, Connections}; struct FuzzContext { greptime: Pool, @@ -90,8 +90,11 @@ fn generate_insert_expr( rng: &mut R, table_ctx: TableContextRef, ) -> Result { + let omit_column_list = rng.gen_bool(0.2); + let insert_generator = InsertExprGeneratorBuilder::default() .table_ctx(table_ctx) + .omit_column_list(omit_column_list) .rows(input.rows) .build() .unwrap(); @@ -152,7 +155,7 @@ async fn execute_insert(ctx: FuzzContext, input: FuzzInput) -> Result<()> { fuzz_target!(|input: FuzzInput| { common_telemetry::init_default_ut_logging(); common_runtime::block_on_write(async { - let Connections { mysql } = init_greptime_connections().await; + let Connections { mysql } = init_greptime_connections_via_env().await; let ctx = FuzzContext { greptime: mysql.expect("mysql connection init must be succeed"), }; diff --git a/tests-fuzz/targets/fuzz_insert_logical_table.rs b/tests-fuzz/targets/fuzz_insert_logical_table.rs new file mode 100644 index 000000000000..47f53386a859 --- /dev/null +++ b/tests-fuzz/targets/fuzz_insert_logical_table.rs @@ -0,0 +1,202 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_main] + +use std::sync::Arc; + +use common_telemetry::info; +use libfuzzer_sys::arbitrary::{Arbitrary, Unstructured}; +use libfuzzer_sys::fuzz_target; +use rand::{Rng, SeedableRng}; +use rand_chacha::ChaChaRng; +use snafu::{ensure, ResultExt}; +use sqlx::{Executor, MySql, Pool}; +use tests_fuzz::context::{TableContext, TableContextRef}; +use tests_fuzz::error::{self, Result}; +use tests_fuzz::fake::{ + merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map, + MappedGenerator, WordGenerator, +}; +use tests_fuzz::generator::create_expr::{ + CreateLogicalTableExprGeneratorBuilder, CreatePhysicalTableExprGeneratorBuilder, +}; +use tests_fuzz::generator::insert_expr::InsertExprGeneratorBuilder; +use tests_fuzz::generator::Generator; +use tests_fuzz::ir::{CreateTableExpr, InsertIntoExpr}; +use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; +use tests_fuzz::translator::mysql::insert_expr::InsertIntoExprTranslator; +use tests_fuzz::translator::DslTranslator; +use tests_fuzz::utils::{init_greptime_connections_via_env, Connections}; + +struct FuzzContext { + greptime: Pool, +} + +impl FuzzContext { + async fn close(self) { + self.greptime.close().await; + } +} + +#[derive(Copy, Clone, Debug)] +struct FuzzInput { + seed: u64, + rows: usize, +} + +impl Arbitrary<'_> for FuzzInput { + fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result { + let seed = u.int_in_range(u64::MIN..=u64::MAX)?; + let mut rng = ChaChaRng::seed_from_u64(seed); + let rows = rng.gen_range(1..4096); + Ok(FuzzInput { rows, seed }) + } +} + +fn generate_create_physical_table_expr(rng: &mut R) -> Result { + let physical_table_if_not_exists = rng.gen_bool(0.5); + let create_physical_table_expr = CreatePhysicalTableExprGeneratorBuilder::default() + .name_generator(Box::new(MappedGenerator::new( + WordGenerator, + merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map), + ))) + .if_not_exists(physical_table_if_not_exists) + .build() + .unwrap(); + create_physical_table_expr.generate(rng) +} + +fn generate_create_logical_table_expr( + physical_table_ctx: TableContextRef, + rng: &mut R, +) -> Result { + let labels = rng.gen_range(1..=5); + let logical_table_if_not_exists = rng.gen_bool(0.5); + + let create_logical_table_expr = CreateLogicalTableExprGeneratorBuilder::default() + .name_generator(Box::new(MappedGenerator::new( + WordGenerator, + merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map), + ))) + .physical_table_ctx(physical_table_ctx) + .labels(labels) + .if_not_exists(logical_table_if_not_exists) + .build() + .unwrap(); + create_logical_table_expr.generate(rng) +} + +fn generate_insert_expr( + input: FuzzInput, + rng: &mut R, + table_ctx: TableContextRef, +) -> Result { + let insert_generator = InsertExprGeneratorBuilder::default() + .omit_column_list(false) + .table_ctx(table_ctx) + .rows(input.rows) + .build() + .unwrap(); + insert_generator.generate(rng) +} + +async fn execute_insert(ctx: FuzzContext, input: FuzzInput) -> Result<()> { + info!("input: {input:?}"); + let mut rng = ChaChaRng::seed_from_u64(input.seed); + + // Create a physical table and a logical table on top of it + let create_physical_table_expr = generate_create_physical_table_expr(&mut rng).unwrap(); + let translator = CreateTableExprTranslator; + let sql = translator.translate(&create_physical_table_expr)?; + let result = sqlx::query(&sql) + .execute(&ctx.greptime) + .await + .context(error::ExecuteQuerySnafu { sql: &sql })?; + info!("Create physical table: {sql}, result: {result:?}"); + + let physical_table_ctx = Arc::new(TableContext::from(&create_physical_table_expr)); + + let create_logical_table_expr = + generate_create_logical_table_expr(physical_table_ctx, &mut rng).unwrap(); + let sql = translator.translate(&create_logical_table_expr)?; + let result = sqlx::query(&sql) + .execute(&ctx.greptime) + .await + .context(error::ExecuteQuerySnafu { sql: &sql })?; + info!("Create logical table: {sql}, result: {result:?}"); + + let logical_table_ctx = Arc::new(TableContext::from(&create_logical_table_expr)); + + let insert_expr = generate_insert_expr(input, &mut rng, logical_table_ctx)?; + let translator = InsertIntoExprTranslator; + let sql = translator.translate(&insert_expr)?; + let result = ctx + .greptime + // unprepared query, see + .execute(sql.as_str()) + .await + .context(error::ExecuteQuerySnafu { sql: &sql })?; + + ensure!( + result.rows_affected() == input.rows as u64, + error::AssertSnafu { + reason: format!( + "expected rows affected: {}, actual: {}", + input.rows, + result.rows_affected(), + ) + } + ); + + // TODO: Validate inserted rows + + // Clean up logical table + let sql = format!("DROP TABLE {}", create_logical_table_expr.table_name); + let result = sqlx::query(&sql) + .execute(&ctx.greptime) + .await + .context(error::ExecuteQuerySnafu { sql: &sql })?; + info!( + "Drop table: {}, result: {result:?}", + create_logical_table_expr.table_name + ); + + // Clean up physical table + let sql = format!("DROP TABLE {}", create_physical_table_expr.table_name); + let result = sqlx::query(&sql) + .execute(&ctx.greptime) + .await + .context(error::ExecuteQuerySnafu { sql })?; + info!( + "Drop table: {}, result: {result:?}", + create_physical_table_expr.table_name + ); + ctx.close().await; + + Ok(()) +} + +fuzz_target!(|input: FuzzInput| { + common_telemetry::init_default_ut_logging(); + common_runtime::block_on_write(async { + let Connections { mysql } = init_greptime_connections_via_env().await; + let ctx = FuzzContext { + greptime: mysql.expect("mysql connection init must be succeed"), + }; + execute_insert(ctx, input) + .await + .unwrap_or_else(|err| panic!("fuzz test must be succeed: {err:?}")); + }) +}); diff --git a/tests-fuzz/targets/unstable/fuzz_create_table_standalone.rs b/tests-fuzz/targets/unstable/fuzz_create_table_standalone.rs new file mode 100644 index 000000000000..c4b60b50d16b --- /dev/null +++ b/tests-fuzz/targets/unstable/fuzz_create_table_standalone.rs @@ -0,0 +1,246 @@ +// Copyright 2023 Greptime Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![no_main] + +use std::collections::HashMap; +use std::fs::create_dir_all; +use std::sync::atomic::AtomicBool; +use std::sync::Arc; + +use common_telemetry::info; +use common_telemetry::tracing::warn; +use libfuzzer_sys::arbitrary::{Arbitrary, Unstructured}; +use libfuzzer_sys::fuzz_target; +use rand::{Rng, SeedableRng}; +use rand_chacha::ChaChaRng; +use serde::Serialize; +use snafu::ensure; +use sqlx::mysql::MySqlPoolOptions; +use sqlx::{MySql, Pool}; +use tests_fuzz::context::TableContext; +use tests_fuzz::error::Result; +use tests_fuzz::fake::{ + merge_two_word_map_fn, random_capitalize_map, uppercase_and_keyword_backtick_map, + MappedGenerator, WordGenerator, +}; +use tests_fuzz::generator::create_expr::CreateTableExprGeneratorBuilder; +use tests_fuzz::generator::Generator; +use tests_fuzz::ir::CreateTableExpr; +use tests_fuzz::translator::mysql::create_expr::CreateTableExprTranslator; +use tests_fuzz::translator::DslTranslator; +use tests_fuzz::utils::config::{get_conf_path, write_config_file}; +use tests_fuzz::utils::health::HttpHealthChecker; +use tests_fuzz::utils::load_unstable_test_env_variables; +use tests_fuzz::utils::process::{ProcessManager, ProcessState, UnstableProcessController}; +use tests_fuzz::{error, validator}; +use tokio::sync::watch; + +struct FuzzContext { + greptime: Pool, +} + +impl FuzzContext { + async fn close(self) { + self.greptime.close().await; + } +} + +#[derive(Clone, Debug)] +struct FuzzInput { + seed: u64, + num: usize, +} + +impl Arbitrary<'_> for FuzzInput { + fn arbitrary(u: &mut Unstructured<'_>) -> arbitrary::Result { + let seed = u.int_in_range(u64::MIN..=u64::MAX)?; + let mut rng = ChaChaRng::seed_from_u64(seed); + let num = rng.gen_range(1..500); + Ok(FuzzInput { seed, num }) + } +} + +const DEFAULT_TEMPLATE: &str = "standalone.template.toml"; +const DEFAULT_CONFIG_NAME: &str = "standalone.template.toml"; +const DEFAULT_ROOT_DIR: &str = "/tmp/unstable_greptime/"; +const DEFAULT_DATA_HOME: &str = "/tmp/unstable_greptime/datahome/"; +const DEFAULT_MYSQL_URL: &str = "127.0.0.1:4002"; +const DEFAULT_HTTP_HEALTH_URL: &str = "http://127.0.0.1:4000/health"; + +fn generate_create_table_expr(rng: &mut R) -> CreateTableExpr { + let columns = rng.gen_range(2..30); + let create_table_generator = CreateTableExprGeneratorBuilder::default() + .name_generator(Box::new(MappedGenerator::new( + WordGenerator, + merge_two_word_map_fn(random_capitalize_map, uppercase_and_keyword_backtick_map), + ))) + .columns(columns) + .engine("mito") + .build() + .unwrap(); + create_table_generator.generate(rng).unwrap() +} + +async fn connect_mysql(addr: &str) -> Pool { + loop { + match MySqlPoolOptions::new() + .connect(&format!("mysql://{addr}/public")) + .await + { + Ok(mysql) => return mysql, + Err(err) => { + warn!("Reconnecting to {addr}, error: {err}") + } + } + } +} + +async fn execute_unstable_create_table( + unstable_process_controller: Arc, + rx: watch::Receiver, + input: FuzzInput, +) -> Result<()> { + // Starts the unstable process. + let moved_unstable_process_controller = unstable_process_controller.clone(); + let handler = tokio::spawn(async move { moved_unstable_process_controller.start().await }); + let mut rng = ChaChaRng::seed_from_u64(input.seed); + let mysql = connect_mysql(DEFAULT_MYSQL_URL).await; + let ctx = FuzzContext { greptime: mysql }; + + let mut table_states = HashMap::new(); + + for _ in 0..input.num { + let expr = generate_create_table_expr(&mut rng); + let table_ctx = Arc::new(TableContext::from(&expr)); + let table_name = expr.table_name.to_string(); + if table_states.contains_key(&table_name) { + warn!("ignores same name table: {table_name}"); + // ignores. + continue; + } + + let translator = CreateTableExprTranslator; + let sql = translator.translate(&expr).unwrap(); + let result = sqlx::query(&sql).execute(&ctx.greptime).await; + match result { + Ok(result) => { + let state = *rx.borrow(); + table_states.insert(table_name, state); + validate_columns(&ctx.greptime, &table_ctx).await; + info!("Create table: {sql}, result: {result:?}"); + } + Err(err) => { + let state = *rx.borrow(); + ensure!( + !state.health(), + error::UnexpectedSnafu { + violated: format!("Failed to create table: {sql}, error: {err}") + } + ); + table_states.insert(table_name, state); + continue; + } + } + } + + loop { + let sql = "DROP DATABASE IF EXISTS public"; + match sqlx::query(sql).execute(&ctx.greptime).await { + Ok(result) => { + info!("Drop table: {}, result: {result:?}", sql); + break; + } + Err(err) => warn!("Failed to drop table: {}, error: {err}", sql), + } + } + // Cleans up + ctx.close().await; + unstable_process_controller.stop(); + let _ = handler.await; + info!("Finishing test for input: {:?}", input); + Ok(()) +} + +async fn validate_columns(client: &Pool, table_ctx: &TableContext) { + loop { + match validator::column::fetch_columns(client, "public".into(), table_ctx.name.clone()) + .await + { + Ok(mut column_entries) => { + column_entries.sort_by(|a, b| a.column_name.cmp(&b.column_name)); + let mut columns = table_ctx.columns.clone(); + columns.sort_by(|a, b| a.name.value.cmp(&b.name.value)); + validator::column::assert_eq(&column_entries, &columns).unwrap(); + return; + } + Err(err) => warn!( + "Failed to fetch table '{}' columns, error: {}", + table_ctx.name, err + ), + } + } +} + +fuzz_target!(|input: FuzzInput| { + common_telemetry::init_default_ut_logging(); + common_runtime::block_on_write(async { + let variables = load_unstable_test_env_variables(); + let root_dir = variables.root_dir.unwrap_or(DEFAULT_ROOT_DIR.to_string()); + create_dir_all(&root_dir).unwrap(); + let output_config_path = format!("{root_dir}{DEFAULT_CONFIG_NAME}"); + let mut conf_path = get_conf_path(); + conf_path.push(DEFAULT_TEMPLATE); + let template_path = conf_path.to_str().unwrap().to_string(); + + // Writes config file. + #[derive(Serialize)] + struct Context { + data_home: String, + } + write_config_file( + &template_path, + &Context { + data_home: DEFAULT_DATA_HOME.to_string(), + }, + &output_config_path, + ) + .await + .unwrap(); + + let args = vec![ + "standalone".to_string(), + "start".to_string(), + format!("--config-file={output_config_path}"), + ]; + let process_manager = ProcessManager::new(); + let (tx, rx) = watch::channel(ProcessState::NotSpawn); + let unstable_process_controller = Arc::new(UnstableProcessController { + binary_path: variables.binary_path, + args, + root_dir, + seed: input.seed, + process_manager, + health_check: Box::new(HttpHealthChecker { + url: DEFAULT_HTTP_HEALTH_URL.to_string(), + }), + sender: tx, + running: Arc::new(AtomicBool::new(false)), + }); + + execute_unstable_create_table(unstable_process_controller, rx, input) + .await + .unwrap_or_else(|err| panic!("fuzz test must be succeed: {err:?}")); + }) +}); diff --git a/tests-integration/Cargo.toml b/tests-integration/Cargo.toml index 1a87d5209c48..c8be68ce8965 100644 --- a/tests-integration/Cargo.toml +++ b/tests-integration/Cargo.toml @@ -13,11 +13,13 @@ workspace = true [dependencies] api.workspace = true arrow-flight.workspace = true +async-stream.workspace = true async-trait = "0.1" auth.workspace = true axum.workspace = true catalog.workspace = true chrono.workspace = true +clap.workspace = true client = { workspace = true, features = ["testing"] } cmd.workspace = true common-base.workspace = true @@ -38,6 +40,7 @@ datatypes.workspace = true dotenv.workspace = true frontend = { workspace = true, features = ["testing"] } futures.workspace = true +futures-util.workspace = true meta-client.workspace = true meta-srv = { workspace = true, features = ["mock"] } mysql_async = { version = "0.33", default-features = false, features = [ @@ -52,6 +55,7 @@ rstest_reuse = "0.5" serde_json.workspace = true servers = { workspace = true, features = ["testing"] } session.workspace = true +snafu.workspace = true sql.workspace = true sqlx = { version = "0.6", features = [ "runtime-tokio-rustls", @@ -64,6 +68,7 @@ table.workspace = true tempfile.workspace = true time = "0.3" tokio.workspace = true +tokio-stream = { workspace = true, features = ["net"] } tonic.workspace = true tower = "0.4" uuid.workspace = true diff --git a/src/client/src/database.rs b/tests-integration/src/database.rs similarity index 72% rename from src/client/src/database.rs rename to tests-integration/src/database.rs index 70dc7397f5ed..31254cefdcc2 100644 --- a/src/client/src/database.rs +++ b/tests-integration/src/database.rs @@ -14,15 +14,17 @@ use api::v1::auth_header::AuthScheme; use api::v1::ddl_request::Expr as DdlExpr; +use api::v1::greptime_database_client::GreptimeDatabaseClient; use api::v1::greptime_request::Request; use api::v1::query_request::Query; use api::v1::{ - AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, DeleteRequests, DropTableExpr, - GreptimeRequest, InsertRequests, PromRangeQuery, QueryRequest, RequestHeader, - RowInsertRequests, TruncateTableExpr, + AlterExpr, AuthHeader, CreateTableExpr, DdlRequest, GreptimeRequest, InsertRequests, + QueryRequest, RequestHeader, }; use arrow_flight::Ticket; use async_stream::stream; +use client::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu}; +use client::{from_grpc_response, Client, Result}; use common_error::ext::{BoxedError, ErrorExt}; use common_grpc::flight::{FlightDecoder, FlightMessage}; use common_query::Output; @@ -33,9 +35,7 @@ use common_telemetry::tracing_context::W3cTrace; use futures_util::StreamExt; use prost::Message; use snafu::{ensure, ResultExt}; - -use crate::error::{ConvertFlightDataSnafu, Error, IllegalFlightMessagesSnafu, ServerSnafu}; -use crate::{error, from_grpc_response, metrics, Client, Result, StreamInserter}; +use tonic::transport::Channel; pub const DEFAULT_LOOKBACK_STRING: &str = "5m"; @@ -57,6 +57,19 @@ pub struct Database { ctx: FlightContext, } +pub struct DatabaseClient { + pub inner: GreptimeDatabaseClient, +} + +fn make_database_client(client: &Client) -> Result { + let (_, channel) = client.find_channel()?; + Ok(DatabaseClient { + inner: GreptimeDatabaseClient::new(channel) + .max_decoding_message_size(client.max_grpc_recv_message_size()) + .max_encoding_message_size(client.max_grpc_send_message_size()), + }) +} + impl Database { /// Create database service client using catalog and schema pub fn new(catalog: impl Into, schema: impl Into, client: Client) -> Self { @@ -88,34 +101,14 @@ impl Database { } } - pub fn catalog(&self) -> &String { - &self.catalog - } - pub fn set_catalog(&mut self, catalog: impl Into) { self.catalog = catalog.into(); } - pub fn schema(&self) -> &String { - &self.schema - } - pub fn set_schema(&mut self, schema: impl Into) { self.schema = schema.into(); } - pub fn dbname(&self) -> &String { - &self.dbname - } - - pub fn set_dbname(&mut self, dbname: impl Into) { - self.dbname = dbname.into(); - } - - pub fn timezone(&self) -> &String { - &self.timezone - } - pub fn set_timezone(&mut self, timezone: impl Into) { self.timezone = timezone.into(); } @@ -127,42 +120,11 @@ impl Database { } pub async fn insert(&self, requests: InsertRequests) -> Result { - let _timer = metrics::METRIC_GRPC_INSERT.start_timer(); self.handle(Request::Inserts(requests)).await } - pub async fn row_insert(&self, requests: RowInsertRequests) -> Result { - let _timer = metrics::METRIC_GRPC_INSERT.start_timer(); - self.handle(Request::RowInserts(requests)).await - } - - pub fn streaming_inserter(&self) -> Result { - self.streaming_inserter_with_channel_size(65536) - } - - pub fn streaming_inserter_with_channel_size( - &self, - channel_size: usize, - ) -> Result { - let client = self.client.make_database_client()?.inner; - - let stream_inserter = StreamInserter::new( - client, - self.dbname().to_string(), - self.ctx.auth_header.clone(), - channel_size, - ); - - Ok(stream_inserter) - } - - pub async fn delete(&self, request: DeleteRequests) -> Result { - let _timer = metrics::METRIC_GRPC_DELETE.start_timer(); - self.handle(Request::Deletes(request)).await - } - async fn handle(&self, request: Request) -> Result { - let mut client = self.client.make_database_client()?.inner; + let mut client = make_database_client(&self.client)?.inner; let request = self.to_rpc_request(request); let response = client.handle(request).await?.into_inner(); from_grpc_response(response) @@ -188,43 +150,13 @@ impl Database { where S: AsRef, { - let _timer = metrics::METRIC_GRPC_SQL.start_timer(); self.do_get(Request::Query(QueryRequest { query: Some(Query::Sql(sql.as_ref().to_string())), })) .await } - pub async fn logical_plan(&self, logical_plan: Vec) -> Result { - let _timer = metrics::METRIC_GRPC_LOGICAL_PLAN.start_timer(); - self.do_get(Request::Query(QueryRequest { - query: Some(Query::LogicalPlan(logical_plan)), - })) - .await - } - - pub async fn prom_range_query( - &self, - promql: &str, - start: &str, - end: &str, - step: &str, - ) -> Result { - let _timer = metrics::METRIC_GRPC_PROMQL_RANGE_QUERY.start_timer(); - self.do_get(Request::Query(QueryRequest { - query: Some(Query::PromRangeQuery(PromRangeQuery { - query: promql.to_string(), - start: start.to_string(), - end: end.to_string(), - step: step.to_string(), - lookback: DEFAULT_LOOKBACK_STRING.to_string(), - })), - })) - .await - } - pub async fn create(&self, expr: CreateTableExpr) -> Result { - let _timer = metrics::METRIC_GRPC_CREATE_TABLE.start_timer(); self.do_get(Request::Ddl(DdlRequest { expr: Some(DdlExpr::CreateTable(expr)), })) @@ -232,32 +164,13 @@ impl Database { } pub async fn alter(&self, expr: AlterExpr) -> Result { - let _timer = metrics::METRIC_GRPC_ALTER.start_timer(); self.do_get(Request::Ddl(DdlRequest { expr: Some(DdlExpr::Alter(expr)), })) .await } - pub async fn drop_table(&self, expr: DropTableExpr) -> Result { - let _timer = metrics::METRIC_GRPC_DROP_TABLE.start_timer(); - self.do_get(Request::Ddl(DdlRequest { - expr: Some(DdlExpr::DropTable(expr)), - })) - .await - } - - pub async fn truncate_table(&self, expr: TruncateTableExpr) -> Result { - let _timer = metrics::METRIC_GRPC_TRUNCATE_TABLE.start_timer(); - self.do_get(Request::Ddl(DdlRequest { - expr: Some(DdlExpr::TruncateTable(expr)), - })) - .await - } - async fn do_get(&self, request: Request) -> Result { - // FIXME(paomian): should be added some labels for metrics - let _timer = metrics::METRIC_GRPC_DO_GET.start_timer(); let request = self.to_rpc_request(request); let request = Ticket { ticket: request.encode_to_vec().into(), @@ -267,7 +180,7 @@ impl Database { let response = client.mut_inner().do_get(request).await.map_err(|e| { let tonic_code = e.code(); - let e: error::Error = e.into(); + let e: Error = e.into(); let code = e.status_code(); let msg = e.to_string(); let error = Error::FlightGet { @@ -350,7 +263,7 @@ impl Database { } #[derive(Default, Debug, Clone)] -pub struct FlightContext { +struct FlightContext { auth_header: Option, } @@ -358,8 +271,14 @@ pub struct FlightContext { mod tests { use api::v1::auth_header::AuthScheme; use api::v1::{AuthHeader, Basic}; + use clap::Parser; + use client::Client; + use cmd::error::Result as CmdResult; + use cmd::options::{CliOptions, Options}; + use cmd::{cli, standalone, App}; + use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; - use crate::database::FlightContext; + use super::{Database, FlightContext}; #[test] fn test_flight_ctx() { @@ -382,4 +301,72 @@ mod tests { }) )) } + + #[tokio::test(flavor = "multi_thread")] + async fn test_export_create_table_with_quoted_names() -> CmdResult<()> { + let output_dir = tempfile::tempdir().unwrap(); + + let standalone = standalone::Command::parse_from([ + "standalone", + "start", + "--data-home", + &*output_dir.path().to_string_lossy(), + ]); + let Options::Standalone(standalone_opts) = + standalone.load_options(&CliOptions::default())? + else { + unreachable!() + }; + let mut instance = standalone.build(*standalone_opts).await?; + instance.start().await?; + + let client = Client::with_urls(["127.0.0.1:4001"]); + let database = Database::new(DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, client); + database + .sql(r#"CREATE DATABASE "cli.export.create_table";"#) + .await + .unwrap(); + database + .sql( + r#"CREATE TABLE "cli.export.create_table"."a.b.c"( + ts TIMESTAMP, + TIME INDEX (ts) + ) engine=mito; + "#, + ) + .await + .unwrap(); + + let output_dir = tempfile::tempdir().unwrap(); + let cli = cli::Command::parse_from([ + "cli", + "export", + "--addr", + "127.0.0.1:4000", + "--output-dir", + &*output_dir.path().to_string_lossy(), + "--target", + "create-table", + ]); + let mut cli_app = cli.build().await?; + cli_app.start().await?; + + instance.stop().await?; + + let output_file = output_dir + .path() + .join("greptime-cli.export.create_table.sql"); + let res = std::fs::read_to_string(output_file).unwrap(); + let expect = r#"CREATE TABLE IF NOT EXISTS "a.b.c" ( + "ts" TIMESTAMP(3) NOT NULL, + TIME INDEX ("ts") +) + +ENGINE=mito +; +"#; + assert_eq!(res.trim(), expect.trim()); + + Ok(()) + } } diff --git a/tests-integration/src/lib.rs b/tests-integration/src/lib.rs index d3e700151345..e4db599fd5e7 100644 --- a/tests-integration/src/lib.rs +++ b/tests-integration/src/lib.rs @@ -15,6 +15,7 @@ #![feature(assert_matches)] pub mod cluster; +pub mod database; mod grpc; mod influxdb; mod instance; diff --git a/tests-integration/tests/grpc.rs b/tests-integration/tests/grpc.rs index 0b38ac252c30..5a14751bbc45 100644 --- a/tests-integration/tests/grpc.rs +++ b/tests-integration/tests/grpc.rs @@ -20,7 +20,7 @@ use api::v1::{ PromqlRequest, RequestHeader, SemanticType, }; use auth::user_provider_from_option; -use client::{Client, Database, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; +use client::{Client, OutputData, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_catalog::consts::MITO_ENGINE; use common_query::Output; use common_recordbatch::RecordBatches; @@ -30,6 +30,7 @@ use servers::http::prometheus::{ PrometheusResponse, }; use servers::server::Server; +use tests_integration::database::Database; use tests_integration::test_util::{ setup_grpc_server, setup_grpc_server_with, setup_grpc_server_with_user_provider, StorageType, }; diff --git a/tests-integration/tests/region_failover.rs b/tests-integration/tests/region_failover.rs index 8ca53a6f463a..f2430a0d48f0 100644 --- a/tests-integration/tests/region_failover.rs +++ b/tests-integration/tests/region_failover.rs @@ -19,7 +19,7 @@ use catalog::kvbackend::{CachedMetaKvBackend, KvBackendCatalogManager}; use client::OutputData; use common_catalog::consts::{DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_meta::key::table_route::TableRouteKey; -use common_meta::key::{RegionDistribution, TableMetaKey}; +use common_meta::key::{MetaKey, RegionDistribution}; use common_meta::peer::Peer; use common_meta::{distributed_time_constants, RegionIdent}; use common_procedure::{watcher, ProcedureWithId}; @@ -176,7 +176,7 @@ async fn has_route_cache(instance: &Arc, table_id: TableId) -> bool { .cache(); cache - .get(TableRouteKey::new(table_id).as_raw_key().as_slice()) + .get(TableRouteKey::new(table_id).to_bytes().as_slice()) .await .is_some() } diff --git a/tests/cases/distributed/explain/join_10_tables.result b/tests/cases/distributed/explain/join_10_tables.result index 8b40f21fc66f..f44fc41fa79a 100644 --- a/tests/cases/distributed/explain/join_10_tables.result +++ b/tests/cases/distributed/explain/join_10_tables.result @@ -132,35 +132,27 @@ limit 1; |_|_MergeScanExec: REDACTED |_|_CoalesceBatchesExec: target_batch_size=8192_| |_|_RepartitionExec: partitioning=REDACTED -|_|_RepartitionExec: partitioning=REDACTED |_|_MergeScanExec: REDACTED |_|_CoalesceBatchesExec: target_batch_size=8192_| |_|_RepartitionExec: partitioning=REDACTED -|_|_RepartitionExec: partitioning=REDACTED |_|_MergeScanExec: REDACTED |_|_CoalesceBatchesExec: target_batch_size=8192_| |_|_RepartitionExec: partitioning=REDACTED -|_|_RepartitionExec: partitioning=REDACTED |_|_MergeScanExec: REDACTED |_|_CoalesceBatchesExec: target_batch_size=8192_| |_|_RepartitionExec: partitioning=REDACTED -|_|_RepartitionExec: partitioning=REDACTED |_|_MergeScanExec: REDACTED |_|_CoalesceBatchesExec: target_batch_size=8192_| |_|_RepartitionExec: partitioning=REDACTED -|_|_RepartitionExec: partitioning=REDACTED |_|_MergeScanExec: REDACTED |_|_CoalesceBatchesExec: target_batch_size=8192_| |_|_RepartitionExec: partitioning=REDACTED -|_|_RepartitionExec: partitioning=REDACTED |_|_MergeScanExec: REDACTED |_|_CoalesceBatchesExec: target_batch_size=8192_| |_|_RepartitionExec: partitioning=REDACTED -|_|_RepartitionExec: partitioning=REDACTED |_|_MergeScanExec: REDACTED |_|_CoalesceBatchesExec: target_batch_size=8192_| |_|_RepartitionExec: partitioning=REDACTED -|_|_RepartitionExec: partitioning=REDACTED |_|_MergeScanExec: REDACTED |_|_| +-+-+ diff --git a/tests/cases/distributed/explain/subqueries.result b/tests/cases/distributed/explain/subqueries.result index 593e745e1d99..ad31923c97fe 100644 --- a/tests/cases/distributed/explain/subqueries.result +++ b/tests/cases/distributed/explain/subqueries.result @@ -25,11 +25,9 @@ EXPLAIN SELECT * FROM integers WHERE i IN ((SELECT i FROM integers)) ORDER BY i; |_|_REDACTED |_|_CoalesceBatchesExec: target_batch_size=8192_| |_|_RepartitionExec: partitioning=REDACTED -|_|_RepartitionExec: partitioning=REDACTED |_|_MergeScanExec: REDACTED |_|_CoalesceBatchesExec: target_batch_size=8192_| |_|_RepartitionExec: partitioning=REDACTED -|_|_RepartitionExec: partitioning=REDACTED |_|_MergeScanExec: REDACTED |_|_| +-+-+ @@ -58,11 +56,9 @@ EXPLAIN SELECT * FROM integers i1 WHERE EXISTS(SELECT i FROM integers WHERE i=i1 |_|_REDACTED |_|_CoalesceBatchesExec: target_batch_size=8192_| |_|_RepartitionExec: partitioning=REDACTED -|_|_RepartitionExec: partitioning=REDACTED |_|_MergeScanExec: REDACTED |_|_CoalesceBatchesExec: target_batch_size=8192_| |_|_RepartitionExec: partitioning=REDACTED -|_|_RepartitionExec: partitioning=REDACTED |_|_ProjectionExec: expr=[i@0 as i]_| |_|_MergeScanExec: REDACTED |_|_| diff --git a/tests/cases/standalone/common/aggregate/count.result b/tests/cases/standalone/common/aggregate/count.result new file mode 100644 index 000000000000..4523118d18ac --- /dev/null +++ b/tests/cases/standalone/common/aggregate/count.result @@ -0,0 +1,56 @@ +create table "HelloWorld" (a string, b timestamp time index); + +Affected Rows: 0 + +insert into "HelloWorld" values ("a", 1) ,("b", 2); + +Affected Rows: 2 + +select count(*) from "HelloWorld"; + ++----------+ +| COUNT(*) | ++----------+ +| 2 | ++----------+ + +create table test (a string, "BbB" timestamp time index); + +Affected Rows: 0 + +insert into test values ("c", 1) ; + +Affected Rows: 1 + +select count(*) from test; + ++----------+ +| COUNT(*) | ++----------+ +| 1 | ++----------+ + +select count(*) from (select count(*) from test where a = 'a'); + ++----------+ +| COUNT(*) | ++----------+ +| 1 | ++----------+ + +select count(*) from (select * from test cross join "HelloWorld"); + ++----------+ +| COUNT(*) | ++----------+ +| 2 | ++----------+ + +drop table "HelloWorld"; + +Affected Rows: 0 + +drop table test; + +Affected Rows: 0 + diff --git a/tests/cases/standalone/common/aggregate/count.sql b/tests/cases/standalone/common/aggregate/count.sql new file mode 100644 index 000000000000..80100c96aecf --- /dev/null +++ b/tests/cases/standalone/common/aggregate/count.sql @@ -0,0 +1,19 @@ +create table "HelloWorld" (a string, b timestamp time index); + +insert into "HelloWorld" values ("a", 1) ,("b", 2); + +select count(*) from "HelloWorld"; + +create table test (a string, "BbB" timestamp time index); + +insert into test values ("c", 1) ; + +select count(*) from test; + +select count(*) from (select count(*) from test where a = 'a'); + +select count(*) from (select * from test cross join "HelloWorld"); + +drop table "HelloWorld"; + +drop table test; diff --git a/tests/cases/standalone/common/alter/change_col_type.result b/tests/cases/standalone/common/alter/change_col_type.result new file mode 100644 index 000000000000..3d9500105a6d --- /dev/null +++ b/tests/cases/standalone/common/alter/change_col_type.result @@ -0,0 +1,91 @@ +CREATE TABLE test(id INTEGER PRIMARY KEY, i INTEGER NULL, j TIMESTAMP TIME INDEX, k BOOLEAN); + +Affected Rows: 0 + +INSERT INTO test VALUES (1, 1, 1, false), (2, 2, 2, true); + +Affected Rows: 2 + +ALTER TABLE test MODIFY "I" STRING; + +Error: 4002(TableColumnNotFound), Column I not exists in table test + +ALTER TABLE test MODIFY k DATE; + +Error: 1004(InvalidArguments), Invalid alter table(test) request: column 'k' cannot be cast automatically to type 'Date' + +ALTER TABLE test MODIFY id STRING; + +Error: 1004(InvalidArguments), Invalid alter table(test) request: Not allowed to change primary key index column 'id' + +ALTER TABLE test MODIFY j STRING; + +Error: 1004(InvalidArguments), Invalid alter table(test) request: Not allowed to change timestamp index column 'j' datatype + +ALTER TABLE test MODIFY I STRING; + +Affected Rows: 0 + +SELECT * FROM test; + ++----+---+-------------------------+-------+ +| id | i | j | k | ++----+---+-------------------------+-------+ +| 1 | 1 | 1970-01-01T00:00:00.001 | false | +| 2 | 2 | 1970-01-01T00:00:00.002 | true | ++----+---+-------------------------+-------+ + +INSERT INTO test VALUES (3, "greptime", 3, true); + +Affected Rows: 1 + +SELECT * FROM test; + ++----+----------+-------------------------+-------+ +| id | i | j | k | ++----+----------+-------------------------+-------+ +| 1 | 1 | 1970-01-01T00:00:00.001 | false | +| 2 | 2 | 1970-01-01T00:00:00.002 | true | +| 3 | greptime | 1970-01-01T00:00:00.003 | true | ++----+----------+-------------------------+-------+ + +DESCRIBE test; + ++--------+----------------------+-----+------+---------+---------------+ +| Column | Type | Key | Null | Default | Semantic Type | ++--------+----------------------+-----+------+---------+---------------+ +| id | Int32 | PRI | YES | | TAG | +| i | String | | YES | | FIELD | +| j | TimestampMillisecond | PRI | NO | | TIMESTAMP | +| k | Boolean | | YES | | FIELD | ++--------+----------------------+-----+------+---------+---------------+ + +ALTER TABLE test MODIFY I INTEGER; + +Affected Rows: 0 + +SELECT * FROM test; + ++----+---+-------------------------+-------+ +| id | i | j | k | ++----+---+-------------------------+-------+ +| 1 | 1 | 1970-01-01T00:00:00.001 | false | +| 2 | 2 | 1970-01-01T00:00:00.002 | true | +| 3 | | 1970-01-01T00:00:00.003 | true | ++----+---+-------------------------+-------+ + +DESCRIBE test; + ++--------+----------------------+-----+------+---------+---------------+ +| Column | Type | Key | Null | Default | Semantic Type | ++--------+----------------------+-----+------+---------+---------------+ +| id | Int32 | PRI | YES | | TAG | +| i | Int32 | | YES | | FIELD | +| j | TimestampMillisecond | PRI | NO | | TIMESTAMP | +| k | Boolean | | YES | | FIELD | ++--------+----------------------+-----+------+---------+---------------+ + +DROP TABLE test; + +Affected Rows: 0 + diff --git a/tests/cases/standalone/common/alter/change_col_type.sql b/tests/cases/standalone/common/alter/change_col_type.sql new file mode 100644 index 000000000000..1eb95c719cdc --- /dev/null +++ b/tests/cases/standalone/common/alter/change_col_type.sql @@ -0,0 +1,29 @@ +CREATE TABLE test(id INTEGER PRIMARY KEY, i INTEGER NULL, j TIMESTAMP TIME INDEX, k BOOLEAN); + +INSERT INTO test VALUES (1, 1, 1, false), (2, 2, 2, true); + +ALTER TABLE test MODIFY "I" STRING; + +ALTER TABLE test MODIFY k DATE; + +ALTER TABLE test MODIFY id STRING; + +ALTER TABLE test MODIFY j STRING; + +ALTER TABLE test MODIFY I STRING; + +SELECT * FROM test; + +INSERT INTO test VALUES (3, "greptime", 3, true); + +SELECT * FROM test; + +DESCRIBE test; + +ALTER TABLE test MODIFY I INTEGER; + +SELECT * FROM test; + +DESCRIBE test; + +DROP TABLE test; diff --git a/tests/cases/standalone/common/alter/change_col_type_not_null.result b/tests/cases/standalone/common/alter/change_col_type_not_null.result new file mode 100644 index 000000000000..79f03c9cb023 --- /dev/null +++ b/tests/cases/standalone/common/alter/change_col_type_not_null.result @@ -0,0 +1,43 @@ +CREATE TABLE test(i TIMESTAMP TIME INDEX, j INTEGER NOT NULL); + +Affected Rows: 0 + +INSERT INTO test VALUES (1, 1), (2, 2); + +Affected Rows: 2 + +SELECT * FROM test; + ++-------------------------+---+ +| i | j | ++-------------------------+---+ +| 1970-01-01T00:00:00.001 | 1 | +| 1970-01-01T00:00:00.002 | 2 | ++-------------------------+---+ + +ALTER TABLE test MODIFY j STRING; + +Error: 1004(InvalidArguments), Invalid alter table(test) request: column 'j' must be nullable to ensure safe conversion. + +SELECT * FROM test; + ++-------------------------+---+ +| i | j | ++-------------------------+---+ +| 1970-01-01T00:00:00.001 | 1 | +| 1970-01-01T00:00:00.002 | 2 | ++-------------------------+---+ + +DESCRIBE test; + ++--------+----------------------+-----+------+---------+---------------+ +| Column | Type | Key | Null | Default | Semantic Type | ++--------+----------------------+-----+------+---------+---------------+ +| i | TimestampMillisecond | PRI | NO | | TIMESTAMP | +| j | Int32 | | NO | | FIELD | ++--------+----------------------+-----+------+---------+---------------+ + +DROP TABLE test; + +Affected Rows: 0 + diff --git a/tests/cases/standalone/common/alter/change_col_type_not_null.sql b/tests/cases/standalone/common/alter/change_col_type_not_null.sql new file mode 100644 index 000000000000..c91ae44a2c14 --- /dev/null +++ b/tests/cases/standalone/common/alter/change_col_type_not_null.sql @@ -0,0 +1,13 @@ +CREATE TABLE test(i TIMESTAMP TIME INDEX, j INTEGER NOT NULL); + +INSERT INTO test VALUES (1, 1), (2, 2); + +SELECT * FROM test; + +ALTER TABLE test MODIFY j STRING; + +SELECT * FROM test; + +DESCRIBE test; + +DROP TABLE test; diff --git a/tests/cases/standalone/common/range/special_aggr.result b/tests/cases/standalone/common/range/special_aggr.result index 6fab9998d4e1..2240d744389d 100644 --- a/tests/cases/standalone/common/range/special_aggr.result +++ b/tests/cases/standalone/common/range/special_aggr.result @@ -143,7 +143,7 @@ SELECT ts, host, first_value(addon ORDER BY val ASC, ts ASC) RANGE '5s', last_va | 1970-01-01T00:00:20 | host2 | 28 | 30 | +---------------------+-------+---------------------------------------------------------------------------------------------+--------------------------------------------------------------------------------------------+ -SELECT ts, host, count(val) RANGE '5s'FROM host ALIGN '5s' ORDER BY host, ts; +SELECT ts, host, count(val) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts; +---------------------+-------+--------------------------+ | ts | host | COUNT(host.val) RANGE 5s | @@ -160,7 +160,7 @@ SELECT ts, host, count(val) RANGE '5s'FROM host ALIGN '5s' ORDER BY host, ts; | 1970-01-01T00:00:20 | host2 | 2 | +---------------------+-------+--------------------------+ -SELECT ts, host, count(distinct val) RANGE '5s'FROM host ALIGN '5s' ORDER BY host, ts; +SELECT ts, host, count(distinct val) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts; +---------------------+-------+-----------------------------------+ | ts | host | COUNT(DISTINCT host.val) RANGE 5s | @@ -177,7 +177,7 @@ SELECT ts, host, count(distinct val) RANGE '5s'FROM host ALIGN '5s' ORDER BY hos | 1970-01-01T00:00:20 | host2 | 2 | +---------------------+-------+-----------------------------------+ -SELECT ts, host, count(*) RANGE '5s'FROM host ALIGN '5s' ORDER BY host, ts; +SELECT ts, host, count(*) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts; +---------------------+-------+-------------------+ | ts | host | COUNT(*) RANGE 5s | @@ -194,7 +194,24 @@ SELECT ts, host, count(*) RANGE '5s'FROM host ALIGN '5s' ORDER BY host, ts; | 1970-01-01T00:00:20 | host2 | 3 | +---------------------+-------+-------------------+ -SELECT ts, host, count(distinct *) RANGE '5s'FROM host ALIGN '5s' ORDER BY host, ts; +SELECT ts, host, count(1) RANGE '5s' as abc FROM host ALIGN '5s' ORDER BY host, ts; + ++---------------------+-------+-----+ +| ts | host | abc | ++---------------------+-------+-----+ +| 1970-01-01T00:00:00 | host1 | 3 | +| 1970-01-01T00:00:05 | host1 | 3 | +| 1970-01-01T00:00:10 | host1 | 3 | +| 1970-01-01T00:00:15 | host1 | 3 | +| 1970-01-01T00:00:20 | host1 | 3 | +| 1970-01-01T00:00:00 | host2 | 3 | +| 1970-01-01T00:00:05 | host2 | 3 | +| 1970-01-01T00:00:10 | host2 | 3 | +| 1970-01-01T00:00:15 | host2 | 3 | +| 1970-01-01T00:00:20 | host2 | 3 | ++---------------------+-------+-----+ + +SELECT ts, host, count(distinct *) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts; +---------------------+-------+----------------------------+ | ts | host | COUNT(DISTINCT *) RANGE 5s | diff --git a/tests/cases/standalone/common/range/special_aggr.sql b/tests/cases/standalone/common/range/special_aggr.sql index bf3cd9e29c6d..34a6b691443c 100644 --- a/tests/cases/standalone/common/range/special_aggr.sql +++ b/tests/cases/standalone/common/range/special_aggr.sql @@ -58,13 +58,15 @@ SELECT ts, host, first_value(addon ORDER BY val ASC NULLS FIRST) RANGE '5s', las SELECT ts, host, first_value(addon ORDER BY val ASC, ts ASC) RANGE '5s', last_value(addon ORDER BY val ASC, ts ASC) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts; -SELECT ts, host, count(val) RANGE '5s'FROM host ALIGN '5s' ORDER BY host, ts; +SELECT ts, host, count(val) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts; -SELECT ts, host, count(distinct val) RANGE '5s'FROM host ALIGN '5s' ORDER BY host, ts; +SELECT ts, host, count(distinct val) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts; -SELECT ts, host, count(*) RANGE '5s'FROM host ALIGN '5s' ORDER BY host, ts; +SELECT ts, host, count(*) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts; -SELECT ts, host, count(distinct *) RANGE '5s'FROM host ALIGN '5s' ORDER BY host, ts; +SELECT ts, host, count(1) RANGE '5s' as abc FROM host ALIGN '5s' ORDER BY host, ts; + +SELECT ts, host, count(distinct *) RANGE '5s' FROM host ALIGN '5s' ORDER BY host, ts; -- Test error first_value/last_value diff --git a/tests/cases/standalone/common/tql-explain-analyze/explain.result b/tests/cases/standalone/common/tql-explain-analyze/explain.result index eb6be427d30c..a49624011d6c 100644 --- a/tests/cases/standalone/common/tql-explain-analyze/explain.result +++ b/tests/cases/standalone/common/tql-explain-analyze/explain.result @@ -86,7 +86,7 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test; |_|_Filter: test.j >= TimestampMillisecond(-300000, None) AND test.j <= TimestampMillisecond(300000, None)_| |_|_TableScan: test_| | logical_plan after apply_function_rewrites_| SAME TEXT AS ABOVE_| -| logical_plan after count_wildcard_rule_| SAME TEXT AS ABOVE_| +| logical_plan after count_wildcard_to_time_index_rule_| SAME TEXT AS ABOVE_| | logical_plan after StringNormalizationRule_| SAME TEXT AS ABOVE_| | logical_plan after inline_table_scan_| SAME TEXT AS ABOVE_| | logical_plan after type_coercion_| SAME TEXT AS ABOVE_| @@ -165,6 +165,7 @@ TQL EXPLAIN VERBOSE (0, 10, '5s') test; | physical_plan after PipelineChecker_| SAME TEXT AS ABOVE_| | physical_plan after LimitAggregation_| SAME TEXT AS ABOVE_| | physical_plan after ProjectionPushdown_| SAME TEXT AS ABOVE_| +| physical_plan after RemoveDuplicateRule_| SAME TEXT AS ABOVE_| | physical_plan_| PromInstantManipulateExec: range=[0..0], lookback=[300000], interval=[300000], time index=[j]_| |_|_PromSeriesNormalizeExec: offset=[0], time index=[j], filter NaN: [false]_| |_|_PromSeriesDivideExec: tags=["k"]_| diff --git a/tests/runner/Cargo.toml b/tests/runner/Cargo.toml index 6e8848de5c83..7cb36c1645cb 100644 --- a/tests/runner/Cargo.toml +++ b/tests/runner/Cargo.toml @@ -19,5 +19,7 @@ serde.workspace = true serde_json.workspace = true sqlness = { version = "0.5" } tempfile.workspace = true +# TODO depend `Database` client +tests-integration.workspace = true tinytemplate = "1.2" tokio.workspace = true diff --git a/tests/runner/src/env.rs b/tests/runner/src/env.rs index ea3e3e1bc10a..399f65840b9c 100644 --- a/tests/runner/src/env.rs +++ b/tests/runner/src/env.rs @@ -24,14 +24,13 @@ use std::time::Duration; use async_trait::async_trait; use client::error::ServerSnafu; -use client::{ - Client, Database as DB, Error as ClientError, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME, -}; +use client::{Client, Error as ClientError, DEFAULT_CATALOG_NAME, DEFAULT_SCHEMA_NAME}; use common_error::ext::ErrorExt; use common_query::{Output, OutputData}; use common_recordbatch::RecordBatches; use serde::Serialize; use sqlness::{Database, EnvController, QueryContext}; +use tests_integration::database::Database as DB; use tinytemplate::TinyTemplate; use tokio::sync::Mutex as TokioMutex; diff --git a/typos.toml b/typos.toml index 61ba5dd44429..02f2ed6e695a 100644 --- a/typos.toml +++ b/typos.toml @@ -1,6 +1,7 @@ [default.extend-words] +Pn = "Pn" ue = "ue" -datas = "datas" +worl = "worl" [files] extend-exclude = [