From d68f39388cf4889cd0ed9b2f85488d5f5c1ab0ac Mon Sep 17 00:00:00 2001 From: Christian Thiel Date: Mon, 7 Oct 2024 12:23:55 +0200 Subject: [PATCH] WIP OpenFGA --- .github/workflows/authz.yml | 48 + .run/Run iceberg-catalog.run.xml | 20 + .run/Test iceberg-catalog.run.xml | 20 + ...47d4135e95e648756ed48d4c7307e836fc445.json | 22 + ...e9b51086398b361f68cecf288b938419299d.json} | 6 +- ...9c0fd7bbd66f5d6cb3e2647ede55908dfcd0.json} | 4 +- ...5111399d8f4ebbeef8305ec7191e5ed4fe62f.json | 22 - ...52bc41f9c7370f807c06d519d65afd95de29d.json | 97 - ...9bb678d30c9f7ab7619a8050e5c7c4456517a.json | 35 - ...a165160719ce6d16ce3b1c86fbc128eb4c6cb.json | 41 + ...7070e2d8ae7595b5ef43eb4ddffdbf169482a.json | 16 - ...4d9bdae0e58c4af5c31c2079d17a58c336e5b.json | 16 + ...2533513fdbb2bac703800af6b2dcda06eec0.json} | 16 +- ...cb5d6a2985c60f83623bad3aa02a8065f1b3.json} | 16 +- Cargo.lock | 1150 +++++++--- Cargo.toml | 9 +- .../openfga/collaboration_model/v1/schema.fga | 220 ++ .../collaboration_model/v1/schema.json | 1976 +++++++++++++++++ .../collaboration_model/v1/store.fga.yaml | 1148 ++++++++++ crates/iceberg-catalog-bin/src/serve.rs | 18 +- crates/iceberg-catalog/Cargo.toml | 5 + crates/iceberg-catalog/src/api/iceberg/mod.rs | 12 +- .../src/api/iceberg/v1/tables.rs | 7 + .../iceberg-catalog/src/api/management/mod.rs | 52 +- .../src/api/management/v1/project.rs | 27 +- .../src/api/management/v1/warehouse.rs | 225 +- crates/iceberg-catalog/src/api/mod.rs | 8 + crates/iceberg-catalog/src/api/router.rs | 26 +- crates/iceberg-catalog/src/catalog/config.rs | 171 +- crates/iceberg-catalog/src/catalog/metrics.rs | 4 +- crates/iceberg-catalog/src/catalog/mod.rs | 7 +- .../iceberg-catalog/src/catalog/namespace.rs | 204 +- .../src/catalog/s3_signer/sign.rs | 116 +- crates/iceberg-catalog/src/catalog/tables.rs | 450 ++-- crates/iceberg-catalog/src/catalog/views.rs | 14 +- .../src/catalog/views/commit.rs | 70 +- .../src/catalog/views/create.rs | 44 +- .../iceberg-catalog/src/catalog/views/drop.rs | 56 +- .../src/catalog/views/exists.rs | 52 +- .../iceberg-catalog/src/catalog/views/list.rs | 52 +- .../iceberg-catalog/src/catalog/views/load.rs | 59 +- .../src/catalog/views/rename.rs | 75 +- crates/iceberg-catalog/src/config.rs | 288 ++- .../src/implementations/authz.rs | 348 --- .../src/implementations/mod.rs | 79 +- .../src/implementations/postgres/catalog.rs | 103 +- .../src/implementations/postgres/namespace.rs | 164 +- .../src/implementations/postgres/tabular.rs | 6 +- .../implementations/postgres/tabular/table.rs | 132 +- .../implementations/postgres/tabular/view.rs | 20 +- .../src/implementations/postgres/warehouse.rs | 145 +- crates/iceberg-catalog/src/lib.rs | 6 +- .../iceberg-catalog/src/request_metadata.rs | 10 +- crates/iceberg-catalog/src/service/auth.rs | 303 --- .../authz/implementations/allow_all.rs | 86 + .../src/service/authz/implementations/mod.rs | 268 +++ .../authz/implementations/openfga/client.rs | 236 ++ .../authz/implementations/openfga/health.rs | 28 + .../implementations/openfga/migration.rs | 69 + .../authz/implementations/openfga/mod.rs | 417 ++++ .../authz/implementations/openfga/models.rs | 752 +++++++ .../implementations/openfga/service_ext.rs | 238 ++ .../iceberg-catalog/src/service/authz/mod.rs | 383 ++++ crates/iceberg-catalog/src/service/catalog.rs | 110 +- crates/iceberg-catalog/src/service/config.rs | 60 - .../src/service/contract_verification.rs | 4 +- crates/iceberg-catalog/src/service/mod.rs | 56 +- crates/iceberg-catalog/src/service/secrets.rs | 67 +- .../src/service/token_verification.rs | 32 + crates/iceberg-ext/src/catalog/rest/error.rs | 4 +- docker/full.Dockerfile | 5 +- openapi/management-open-api.yaml | 13 +- tests/python/tests/conftest.py | 2 +- 73 files changed, 8551 insertions(+), 2519 deletions(-) create mode 100644 .github/workflows/authz.yml create mode 100644 .run/Run iceberg-catalog.run.xml create mode 100644 .run/Test iceberg-catalog.run.xml create mode 100644 .sqlx/query-26bd0bc7465c39b18aead34361447d4135e95e648756ed48d4c7307e836fc445.json rename .sqlx/{query-0dbfc72afe4edaa22da92de1a7eefc3a26029093aeab467746b9364678d60e39.json => query-610520e15443a567c7711ff3ae0ee9b51086398b361f68cecf288b938419299d.json} (54%) rename .sqlx/{query-6fc20a2081eb2e8f45b8471b6383f73b595e13f359b5a5262886d0fdd7787d89.json => query-73faafc4dd0e2d9c87bcc1d5fa939c0fd7bbd66f5d6cb3e2647ede55908dfcd0.json} (51%) delete mode 100644 .sqlx/query-8433eb675bfcb5889af1630acc05111399d8f4ebbeef8305ec7191e5ed4fe62f.json delete mode 100644 .sqlx/query-912d8790ca799aee4352541d7d952bc41f9c7370f807c06d519d65afd95de29d.json delete mode 100644 .sqlx/query-a3f11deca8e24c4cf4ad2a5e0c39bb678d30c9f7ab7619a8050e5c7c4456517a.json create mode 100644 .sqlx/query-c55a718665b391b12d46e0e3e3fa165160719ce6d16ce3b1c86fbc128eb4c6cb.json delete mode 100644 .sqlx/query-d5bd155e925d579cac2091d20b67070e2d8ae7595b5ef43eb4ddffdbf169482a.json create mode 100644 .sqlx/query-d91c1870ad0bed668cff251ac0f4d9bdae0e58c4af5c31c2079d17a58c336e5b.json rename .sqlx/{query-c2ff81aa6a79c363a29b55bd6d34e54048c44df5acf8adf7ec9aee8e2d45c365.json => query-d9ceb7b7537001258aee242505202533513fdbb2bac703800af6b2dcda06eec0.json} (59%) rename .sqlx/{query-a72bbdf696c60afc7b544d6ed8d30aeff99f473ab6a35fd5328c11c9f1b4de1d.json => query-df59096046402097aaa58f58bc17cb5d6a2985c60f83623bad3aa02a8065f1b3.json} (60%) create mode 100644 authz/openfga/collaboration_model/v1/schema.fga create mode 100644 authz/openfga/collaboration_model/v1/schema.json create mode 100644 authz/openfga/collaboration_model/v1/store.fga.yaml delete mode 100644 crates/iceberg-catalog/src/implementations/authz.rs delete mode 100644 crates/iceberg-catalog/src/service/auth.rs create mode 100644 crates/iceberg-catalog/src/service/authz/implementations/allow_all.rs create mode 100644 crates/iceberg-catalog/src/service/authz/implementations/mod.rs create mode 100644 crates/iceberg-catalog/src/service/authz/implementations/openfga/client.rs create mode 100644 crates/iceberg-catalog/src/service/authz/implementations/openfga/health.rs create mode 100644 crates/iceberg-catalog/src/service/authz/implementations/openfga/migration.rs create mode 100644 crates/iceberg-catalog/src/service/authz/implementations/openfga/mod.rs create mode 100644 crates/iceberg-catalog/src/service/authz/implementations/openfga/models.rs create mode 100644 crates/iceberg-catalog/src/service/authz/implementations/openfga/service_ext.rs create mode 100644 crates/iceberg-catalog/src/service/authz/mod.rs delete mode 100644 crates/iceberg-catalog/src/service/config.rs diff --git a/.github/workflows/authz.yml b/.github/workflows/authz.yml new file mode 100644 index 00000000..cb45f2a3 --- /dev/null +++ b/.github/workflows/authz.yml @@ -0,0 +1,48 @@ +name: AuthZ Unittests + +on: + push: + branches: + - main + pull_request: + branches: + - main + +env: + CARGO_TERM_COLOR: always + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name }} + cancel-in-progress: true + +jobs: + openfga: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Install FGA CLI + run: | + wget https://github.com/openfga/cli/releases/download/v0.6.1/fga_0.6.1_linux_amd64.deb + sudo apt-get install -yqq ./fga_0.6.1_linux_amd64.deb + + - name: Validate Collaboration Model + run: | + BASE_PATH=authz/openfga/collaboration_model + LAST_VERSION=$(ls $BASE_PATH | sort -r | head -n 1) + VALIDATION_OUTPUT=$(fga model validate --file $BASE_PATH/$LAST_VERSION/schema.fga) + echo $VALIDATION_OUTPUT | jq -e '.is_valid == true' > /dev/null || { echo "Validation failed"; exit 1; } + + - name: Test Collaboration Model + run: | + BASE_PATH=authz/openfga/collaboration_model + LAST_VERSION=$(ls $BASE_PATH | sort -r | head -n 1) + fga model test --tests $BASE_PATH/$LAST_VERSION/store.fga.yaml + + - name: Check json up-to-date + run: | + BASE_PATH=authz/openfga/collaboration_model + LAST_VERSION=$(ls $BASE_PATH | sort -r | head -n 1) + DESIRED_SCHEMA_JSON=$(fga model transform --file $BASE_PATH/$LAST_VERSION/schema.fga) + CURRENT_SCHEMA_JSON=$(cat $BASE_PATH/$LAST_VERSION/schema.json) + echo $DESIRED_SCHEMA_JSON | jq -e '. == '"$CURRENT_SCHEMA_JSON" > /dev/null || { echo "Schema json is not up-to-date"; exit 1; } diff --git a/.run/Run iceberg-catalog.run.xml b/.run/Run iceberg-catalog.run.xml new file mode 100644 index 00000000..b30e0934 --- /dev/null +++ b/.run/Run iceberg-catalog.run.xml @@ -0,0 +1,20 @@ + + + + \ No newline at end of file diff --git a/.run/Test iceberg-catalog.run.xml b/.run/Test iceberg-catalog.run.xml new file mode 100644 index 00000000..2a846bf2 --- /dev/null +++ b/.run/Test iceberg-catalog.run.xml @@ -0,0 +1,20 @@ + + + + \ No newline at end of file diff --git a/.sqlx/query-26bd0bc7465c39b18aead34361447d4135e95e648756ed48d4c7307e836fc445.json b/.sqlx/query-26bd0bc7465c39b18aead34361447d4135e95e648756ed48d4c7307e836fc445.json new file mode 100644 index 00000000..77127a66 --- /dev/null +++ b/.sqlx/query-26bd0bc7465c39b18aead34361447d4135e95e648756ed48d4c7307e836fc445.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT \n storage_profile as \"storage_profile: Json\"\n FROM warehouse\n WHERE warehouse_id = $1\n AND status = 'active'\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "storage_profile: Json", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid" + ] + }, + "nullable": [ + false + ] + }, + "hash": "26bd0bc7465c39b18aead34361447d4135e95e648756ed48d4c7307e836fc445" +} diff --git a/.sqlx/query-0dbfc72afe4edaa22da92de1a7eefc3a26029093aeab467746b9364678d60e39.json b/.sqlx/query-610520e15443a567c7711ff3ae0ee9b51086398b361f68cecf288b938419299d.json similarity index 54% rename from .sqlx/query-0dbfc72afe4edaa22da92de1a7eefc3a26029093aeab467746b9364678d60e39.json rename to .sqlx/query-610520e15443a567c7711ff3ae0ee9b51086398b361f68cecf288b938419299d.json index 800f8221..17176fdf 100644 --- a/.sqlx/query-0dbfc72afe4edaa22da92de1a7eefc3a26029093aeab467746b9364678d60e39.json +++ b/.sqlx/query-610520e15443a567c7711ff3ae0ee9b51086398b361f68cecf288b938419299d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH deleted AS (\n DELETE FROM namespace\n WHERE warehouse_id = $1 \n AND namespace_name = $2\n AND warehouse_id IN (\n SELECT warehouse_id FROM warehouse WHERE status = 'active'\n )\n RETURNING *\n )\n SELECT count(*) FROM deleted\n ", + "query": "\n WITH deleted AS (\n DELETE FROM namespace\n WHERE warehouse_id = $1 \n AND namespace_id = $2\n AND warehouse_id IN (\n SELECT warehouse_id FROM warehouse WHERE status = 'active'\n )\n RETURNING *\n )\n SELECT count(*) FROM deleted\n ", "describe": { "columns": [ { @@ -12,12 +12,12 @@ "parameters": { "Left": [ "Uuid", - "TextArray" + "Uuid" ] }, "nullable": [ null ] }, - "hash": "0dbfc72afe4edaa22da92de1a7eefc3a26029093aeab467746b9364678d60e39" + "hash": "610520e15443a567c7711ff3ae0ee9b51086398b361f68cecf288b938419299d" } diff --git a/.sqlx/query-6fc20a2081eb2e8f45b8471b6383f73b595e13f359b5a5262886d0fdd7787d89.json b/.sqlx/query-73faafc4dd0e2d9c87bcc1d5fa939c0fd7bbd66f5d6cb3e2647ede55908dfcd0.json similarity index 51% rename from .sqlx/query-6fc20a2081eb2e8f45b8471b6383f73b595e13f359b5a5262886d0fdd7787d89.json rename to .sqlx/query-73faafc4dd0e2d9c87bcc1d5fa939c0fd7bbd66f5d6cb3e2647ede55908dfcd0.json index f446b8ff..e8c60369 100644 --- a/.sqlx/query-6fc20a2081eb2e8f45b8471b6383f73b595e13f359b5a5262886d0fdd7787d89.json +++ b/.sqlx/query-73faafc4dd0e2d9c87bcc1d5fa939c0fd7bbd66f5d6cb3e2647ede55908dfcd0.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT \n warehouse_id\n FROM warehouse\n WHERE warehouse_name = $1 AND project_id = $2\n AND status = 'active'\n ", + "query": "\n SELECT \n warehouse_id\n FROM warehouse\n WHERE warehouse_name = $1 AND project_id = $2\n AND status = 'active'\n ", "describe": { "columns": [ { @@ -19,5 +19,5 @@ false ] }, - "hash": "6fc20a2081eb2e8f45b8471b6383f73b595e13f359b5a5262886d0fdd7787d89" + "hash": "73faafc4dd0e2d9c87bcc1d5fa939c0fd7bbd66f5d6cb3e2647ede55908dfcd0" } diff --git a/.sqlx/query-8433eb675bfcb5889af1630acc05111399d8f4ebbeef8305ec7191e5ed4fe62f.json b/.sqlx/query-8433eb675bfcb5889af1630acc05111399d8f4ebbeef8305ec7191e5ed4fe62f.json deleted file mode 100644 index aa000c00..00000000 --- a/.sqlx/query-8433eb675bfcb5889af1630acc05111399d8f4ebbeef8305ec7191e5ed4fe62f.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT \n storage_profile as \"storage_profile: Json\"\n FROM warehouse\n WHERE warehouse_id = $1\n AND status = 'active'\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "storage_profile: Json", - "type_info": "Jsonb" - } - ], - "parameters": { - "Left": [ - "Uuid" - ] - }, - "nullable": [ - false - ] - }, - "hash": "8433eb675bfcb5889af1630acc05111399d8f4ebbeef8305ec7191e5ed4fe62f" -} diff --git a/.sqlx/query-912d8790ca799aee4352541d7d952bc41f9c7370f807c06d519d65afd95de29d.json b/.sqlx/query-912d8790ca799aee4352541d7d952bc41f9c7370f807c06d519d65afd95de29d.json deleted file mode 100644 index 74139a28..00000000 --- a/.sqlx/query-912d8790ca799aee4352541d7d952bc41f9c7370f807c06d519d65afd95de29d.json +++ /dev/null @@ -1,97 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT \n warehouse_id,\n warehouse_name,\n storage_profile as \"storage_profile: Json\",\n storage_secret_id,\n status AS \"status: WarehouseStatus\",\n tabular_delete_mode as \"tabular_delete_mode: DbTabularDeleteProfile\",\n tabular_expiration_seconds\n FROM warehouse\n WHERE project_id = $1 AND warehouse_id = ANY($2)\n AND status = ANY($3)\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "warehouse_id", - "type_info": "Uuid" - }, - { - "ordinal": 1, - "name": "warehouse_name", - "type_info": "Text" - }, - { - "ordinal": 2, - "name": "storage_profile: Json", - "type_info": "Jsonb" - }, - { - "ordinal": 3, - "name": "storage_secret_id", - "type_info": "Uuid" - }, - { - "ordinal": 4, - "name": "status: WarehouseStatus", - "type_info": { - "Custom": { - "name": "warehouse_status", - "kind": { - "Enum": [ - "active", - "inactive" - ] - } - } - } - }, - { - "ordinal": 5, - "name": "tabular_delete_mode: DbTabularDeleteProfile", - "type_info": { - "Custom": { - "name": "tabular_delete_mode", - "kind": { - "Enum": [ - "soft", - "hard" - ] - } - } - } - }, - { - "ordinal": 6, - "name": "tabular_expiration_seconds", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Uuid", - "UuidArray", - { - "Custom": { - "name": "warehouse_status[]", - "kind": { - "Array": { - "Custom": { - "name": "warehouse_status", - "kind": { - "Enum": [ - "active", - "inactive" - ] - } - } - } - } - } - } - ] - }, - "nullable": [ - false, - false, - false, - true, - false, - false, - true - ] - }, - "hash": "912d8790ca799aee4352541d7d952bc41f9c7370f807c06d519d65afd95de29d" -} diff --git a/.sqlx/query-a3f11deca8e24c4cf4ad2a5e0c39bb678d30c9f7ab7619a8050e5c7c4456517a.json b/.sqlx/query-a3f11deca8e24c4cf4ad2a5e0c39bb678d30c9f7ab7619a8050e5c7c4456517a.json deleted file mode 100644 index d4868e4d..00000000 --- a/.sqlx/query-a3f11deca8e24c4cf4ad2a5e0c39bb678d30c9f7ab7619a8050e5c7c4456517a.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT \n namespace_id,\n n.warehouse_id,\n namespace_properties as \"properties: Json>>\"\n FROM namespace n\n INNER JOIN warehouse w ON n.warehouse_id = w.warehouse_id\n WHERE n.warehouse_id = $1 AND n.namespace_name = $2\n AND w.status = 'active'\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "namespace_id", - "type_info": "Uuid" - }, - { - "ordinal": 1, - "name": "warehouse_id", - "type_info": "Uuid" - }, - { - "ordinal": 2, - "name": "properties: Json>>", - "type_info": "Jsonb" - } - ], - "parameters": { - "Left": [ - "Uuid", - "TextArray" - ] - }, - "nullable": [ - false, - false, - false - ] - }, - "hash": "a3f11deca8e24c4cf4ad2a5e0c39bb678d30c9f7ab7619a8050e5c7c4456517a" -} diff --git a/.sqlx/query-c55a718665b391b12d46e0e3e3fa165160719ce6d16ce3b1c86fbc128eb4c6cb.json b/.sqlx/query-c55a718665b391b12d46e0e3e3fa165160719ce6d16ce3b1c86fbc128eb4c6cb.json new file mode 100644 index 00000000..20300170 --- /dev/null +++ b/.sqlx/query-c55a718665b391b12d46e0e3e3fa165160719ce6d16ce3b1c86fbc128eb4c6cb.json @@ -0,0 +1,41 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT \n namespace_name as \"namespace_name: Vec\",\n n.namespace_id,\n n.warehouse_id,\n namespace_properties as \"properties: Json>>\"\n FROM namespace n\n INNER JOIN warehouse w ON n.warehouse_id = w.warehouse_id\n WHERE n.warehouse_id = $1 AND n.namespace_id = $2\n AND w.status = 'active'\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "namespace_name: Vec", + "type_info": "TextArray" + }, + { + "ordinal": 1, + "name": "namespace_id", + "type_info": "Uuid" + }, + { + "ordinal": 2, + "name": "warehouse_id", + "type_info": "Uuid" + }, + { + "ordinal": 3, + "name": "properties: Json>>", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Uuid", + "Uuid" + ] + }, + "nullable": [ + false, + false, + false, + false + ] + }, + "hash": "c55a718665b391b12d46e0e3e3fa165160719ce6d16ce3b1c86fbc128eb4c6cb" +} diff --git a/.sqlx/query-d5bd155e925d579cac2091d20b67070e2d8ae7595b5ef43eb4ddffdbf169482a.json b/.sqlx/query-d5bd155e925d579cac2091d20b67070e2d8ae7595b5ef43eb4ddffdbf169482a.json deleted file mode 100644 index 5d6670d6..00000000 --- a/.sqlx/query-d5bd155e925d579cac2091d20b67070e2d8ae7595b5ef43eb4ddffdbf169482a.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE namespace\n SET namespace_properties = $1\n WHERE warehouse_id = $2 AND namespace_name = $3\n AND warehouse_id IN (\n SELECT warehouse_id FROM warehouse WHERE status = 'active'\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Jsonb", - "Uuid", - "TextArray" - ] - }, - "nullable": [] - }, - "hash": "d5bd155e925d579cac2091d20b67070e2d8ae7595b5ef43eb4ddffdbf169482a" -} diff --git a/.sqlx/query-d91c1870ad0bed668cff251ac0f4d9bdae0e58c4af5c31c2079d17a58c336e5b.json b/.sqlx/query-d91c1870ad0bed668cff251ac0f4d9bdae0e58c4af5c31c2079d17a58c336e5b.json new file mode 100644 index 00000000..236bbebc --- /dev/null +++ b/.sqlx/query-d91c1870ad0bed668cff251ac0f4d9bdae0e58c4af5c31c2079d17a58c336e5b.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE namespace\n SET namespace_properties = $1\n WHERE warehouse_id = $2 AND namespace_id = $3\n AND warehouse_id IN (\n SELECT warehouse_id FROM warehouse WHERE status = 'active'\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Uuid", + "Uuid" + ] + }, + "nullable": [] + }, + "hash": "d91c1870ad0bed668cff251ac0f4d9bdae0e58c4af5c31c2079d17a58c336e5b" +} diff --git a/.sqlx/query-c2ff81aa6a79c363a29b55bd6d34e54048c44df5acf8adf7ec9aee8e2d45c365.json b/.sqlx/query-d9ceb7b7537001258aee242505202533513fdbb2bac703800af6b2dcda06eec0.json similarity index 59% rename from .sqlx/query-c2ff81aa6a79c363a29b55bd6d34e54048c44df5acf8adf7ec9aee8e2d45c365.json rename to .sqlx/query-d9ceb7b7537001258aee242505202533513fdbb2bac703800af6b2dcda06eec0.json index a2bb469c..4cecf878 100644 --- a/.sqlx/query-c2ff81aa6a79c363a29b55bd6d34e54048c44df5acf8adf7ec9aee8e2d45c365.json +++ b/.sqlx/query-d9ceb7b7537001258aee242505202533513fdbb2bac703800af6b2dcda06eec0.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n t.\"table_id\",\n ti.name as \"table_name\",\n ti.location as \"table_location\",\n namespace_name,\n t.\"metadata\" as \"metadata: Json\",\n ti.\"metadata_location\",\n w.storage_profile as \"storage_profile: Json\",\n w.\"storage_secret_id\"\n FROM \"table\" t\n INNER JOIN tabular ti ON t.table_id = ti.tabular_id\n INNER JOIN namespace n ON ti.namespace_id = n.namespace_id\n INNER JOIN warehouse w ON n.warehouse_id = w.warehouse_id\n WHERE w.warehouse_id = $1 AND t.\"table_id\" = $2\n AND w.status = 'active'\n AND (ti.deleted_at IS NULL OR $3)\n ", + "query": "\n SELECT\n t.\"table_id\",\n ti.name as \"table_name\",\n ti.location as \"table_location\",\n namespace_name,\n ti.namespace_id,\n t.\"metadata\" as \"metadata: Json\",\n ti.\"metadata_location\",\n w.storage_profile as \"storage_profile: Json\",\n w.\"storage_secret_id\"\n FROM \"table\" t\n INNER JOIN tabular ti ON t.table_id = ti.tabular_id\n INNER JOIN namespace n ON ti.namespace_id = n.namespace_id\n INNER JOIN warehouse w ON n.warehouse_id = w.warehouse_id\n WHERE w.warehouse_id = $1 AND t.\"table_id\" = $2\n AND w.status = 'active'\n AND (ti.deleted_at IS NULL OR $3)\n ", "describe": { "columns": [ { @@ -25,21 +25,26 @@ }, { "ordinal": 4, + "name": "namespace_id", + "type_info": "Uuid" + }, + { + "ordinal": 5, "name": "metadata: Json", "type_info": "Jsonb" }, { - "ordinal": 5, + "ordinal": 6, "name": "metadata_location", "type_info": "Text" }, { - "ordinal": 6, + "ordinal": 7, "name": "storage_profile: Json", "type_info": "Jsonb" }, { - "ordinal": 7, + "ordinal": 8, "name": "storage_secret_id", "type_info": "Uuid" } @@ -57,10 +62,11 @@ false, false, false, + false, true, false, true ] }, - "hash": "c2ff81aa6a79c363a29b55bd6d34e54048c44df5acf8adf7ec9aee8e2d45c365" + "hash": "d9ceb7b7537001258aee242505202533513fdbb2bac703800af6b2dcda06eec0" } diff --git a/.sqlx/query-a72bbdf696c60afc7b544d6ed8d30aeff99f473ab6a35fd5328c11c9f1b4de1d.json b/.sqlx/query-df59096046402097aaa58f58bc17cb5d6a2985c60f83623bad3aa02a8065f1b3.json similarity index 60% rename from .sqlx/query-a72bbdf696c60afc7b544d6ed8d30aeff99f473ab6a35fd5328c11c9f1b4de1d.json rename to .sqlx/query-df59096046402097aaa58f58bc17cb5d6a2985c60f83623bad3aa02a8065f1b3.json index 32e07ccc..8c798771 100644 --- a/.sqlx/query-a72bbdf696c60afc7b544d6ed8d30aeff99f473ab6a35fd5328c11c9f1b4de1d.json +++ b/.sqlx/query-df59096046402097aaa58f58bc17cb5d6a2985c60f83623bad3aa02a8065f1b3.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n t.\"table_id\",\n ti.name as \"table_name\",\n ti.location as \"table_location\",\n namespace_name,\n t.\"metadata\" as \"metadata: Json\",\n ti.\"metadata_location\",\n w.storage_profile as \"storage_profile: Json\",\n w.\"storage_secret_id\"\n FROM \"table\" t\n INNER JOIN tabular ti ON t.table_id = ti.tabular_id\n INNER JOIN namespace n ON ti.namespace_id = n.namespace_id\n INNER JOIN warehouse w ON n.warehouse_id = w.warehouse_id\n WHERE w.warehouse_id = $1\n AND ti.location = ANY($2)\n AND LENGTH(ti.location) <= $3\n AND w.status = 'active'\n AND (ti.deleted_at IS NULL OR $4)\n ", + "query": "\n SELECT\n t.\"table_id\",\n ti.name as \"table_name\",\n ti.location as \"table_location\",\n namespace_name,\n ti.namespace_id,\n t.\"metadata\" as \"metadata: Json\",\n ti.\"metadata_location\",\n w.storage_profile as \"storage_profile: Json\",\n w.\"storage_secret_id\"\n FROM \"table\" t\n INNER JOIN tabular ti ON t.table_id = ti.tabular_id\n INNER JOIN namespace n ON ti.namespace_id = n.namespace_id\n INNER JOIN warehouse w ON n.warehouse_id = w.warehouse_id\n WHERE w.warehouse_id = $1\n AND ti.location = ANY($2)\n AND LENGTH(ti.location) <= $3\n AND w.status = 'active'\n AND (ti.deleted_at IS NULL OR $4)\n ", "describe": { "columns": [ { @@ -25,21 +25,26 @@ }, { "ordinal": 4, + "name": "namespace_id", + "type_info": "Uuid" + }, + { + "ordinal": 5, "name": "metadata: Json", "type_info": "Jsonb" }, { - "ordinal": 5, + "ordinal": 6, "name": "metadata_location", "type_info": "Text" }, { - "ordinal": 6, + "ordinal": 7, "name": "storage_profile: Json", "type_info": "Jsonb" }, { - "ordinal": 7, + "ordinal": 8, "name": "storage_secret_id", "type_info": "Uuid" } @@ -58,10 +63,11 @@ false, false, false, + false, true, false, true ] }, - "hash": "a72bbdf696c60afc7b544d6ed8d30aeff99f473ab6a35fd5328c11c9f1b4de1d" + "hash": "df59096046402097aaa58f58bc17cb5d6a2985c60f83623bad3aa02a8065f1b3" } diff --git a/Cargo.lock b/Cargo.lock index fceaf05e..f0c947e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10,19 +10,13 @@ checksum = "8b5ace29ee3216de37c0546865ad08edef58b0f9e76838ed8959a84a990e58c5" [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "adler2" version = "2.0.0" @@ -212,15 +206,15 @@ checksum = "3d62b7694a562cdf5a74227903507c56ab2cc8bdd1f781ed5cb4cf9c9f810bfc" [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "arrow-arith" -version = "53.0.0" +version = "53.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03675e42d1560790f3524800e41403b40d0da1c793fe9528929fde06d8c7649a" +checksum = "d60afcdc004841a5c8d8da4f4fa22d64eb19c0c01ef4bcedd77f175a7cf6e38f" dependencies = [ "arrow-array", "arrow-buffer", @@ -233,9 +227,9 @@ dependencies = [ [[package]] name = "arrow-array" -version = "53.0.0" +version = "53.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd2bf348cf9f02a5975c5962c7fa6dee107a2009a7b41ac5fb1a027e12dc033f" +checksum = "7f16835e8599dbbb1659fd869d865254c4cf32c6c2bb60b6942ac9fc36bfa5da" dependencies = [ "ahash 0.8.11", "arrow-buffer", @@ -249,9 +243,9 @@ dependencies = [ [[package]] name = "arrow-buffer" -version = "53.0.0" +version = "53.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3092e37715f168976012ce52273c3989b5793b0db5f06cbaa246be25e5f0924d" +checksum = "1a1f34f0faae77da6b142db61deba2cb6d60167592b178be317b341440acba80" dependencies = [ "bytes", "half", @@ -260,9 +254,9 @@ dependencies = [ [[package]] name = "arrow-cast" -version = "53.0.0" +version = "53.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ce1018bb710d502f9db06af026ed3561552e493e989a79d0d0f5d9cf267a785" +checksum = "450e4abb5775bca0740bec0bcf1b1a5ae07eff43bd625661c4436d8e8e4540c4" dependencies = [ "arrow-array", "arrow-buffer", @@ -280,9 +274,9 @@ dependencies = [ [[package]] name = "arrow-data" -version = "53.0.0" +version = "53.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4ac0c4ee79150afe067dc4857154b3ee9c1cd52b5f40d59a77306d0ed18d65" +checksum = "2b1e618bbf714c7a9e8d97203c806734f012ff71ae3adc8ad1b075689f540634" dependencies = [ "arrow-buffer", "arrow-schema", @@ -292,9 +286,9 @@ dependencies = [ [[package]] name = "arrow-ipc" -version = "53.0.0" +version = "53.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb307482348a1267f91b0912e962cd53440e5de0f7fb24c5f7b10da70b38c94a" +checksum = "f98e983549259a2b97049af7edfb8f28b8911682040e99a94e4ceb1196bd65c2" dependencies = [ "arrow-array", "arrow-buffer", @@ -306,9 +300,9 @@ dependencies = [ [[package]] name = "arrow-ord" -version = "53.0.0" +version = "53.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644046c479d80ae8ed02a7f1e1399072ea344ca6a7b0e293ab2d5d9ed924aa3b" +checksum = "2427f37b4459a4b9e533045abe87a5183a5e0995a3fc2c2fd45027ae2cc4ef3f" dependencies = [ "arrow-array", "arrow-buffer", @@ -321,15 +315,15 @@ dependencies = [ [[package]] name = "arrow-schema" -version = "53.0.0" +version = "53.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85320a3a2facf2b2822b57aa9d6d9d55edb8aee0b6b5d3b8df158e503d10858" +checksum = "fbf0388a18fd7f7f3fe3de01852d30f54ed5182f9004db700fbe3ba843ed2794" [[package]] name = "arrow-select" -version = "53.0.0" +version = "53.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cc7e6b582e23855fd1625ce46e51647aa440c20ea2e71b1d748e0839dd73cba" +checksum = "b83e5723d307a38bf00ecd2972cd078d1339c7fd3eb044f609958a9a24463f3a" dependencies = [ "ahash 0.8.11", "arrow-array", @@ -341,9 +335,9 @@ dependencies = [ [[package]] name = "arrow-string" -version = "53.0.0" +version = "53.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0775b6567c66e56ded19b87a954b6b1beffbdd784ef95a3a2b03f59570c1d230" +checksum = "7ab3db7c09dd826e74079661d84ed01ed06547cf75d52c2818ef776d0d852305" dependencies = [ "arrow-array", "arrow-buffer", @@ -353,7 +347,7 @@ dependencies = [ "memchr", "num", "regex", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -442,9 +436,9 @@ dependencies = [ "rand 0.8.5", "regex", "ring", - "rustls-native-certs 0.7.1", - "rustls-pemfile 2.1.3", - "rustls-webpki 0.102.6", + "rustls-native-certs 0.7.3", + "rustls-pemfile 2.2.0", + "rustls-webpki 0.102.8", "serde", "serde_json", "serde_nanos", @@ -461,9 +455,9 @@ dependencies = [ [[package]] name = "async-process" -version = "2.2.4" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a07789659a4d385b79b18b9127fc27e1a59e1e89117c78c5ea3b806f016374" +checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" dependencies = [ "async-channel 2.3.1", "async-io", @@ -476,7 +470,6 @@ dependencies = [ "futures-lite 2.3.0", "rustix", "tracing", - "windows-sys 0.59.0", ] [[package]] @@ -497,6 +490,28 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "async-task" version = "4.7.1" @@ -511,7 +526,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -540,15 +555,15 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-config" -version = "1.5.7" +version = "1.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8191fb3091fa0561d1379ef80333c3c7191c6f0435d986e85821bcf7acbd1126" +checksum = "7198e6f03240fdceba36656d8be440297b6b82270325908c7381f37d826a74f6" dependencies = [ "aws-credential-types", "aws-runtime", @@ -563,7 +578,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "fastrand 2.1.0", + "fastrand 2.1.1", "hex", "http 0.2.12", "ring", @@ -602,7 +617,7 @@ dependencies = [ "aws-smithy-types", "aws-types", "bytes", - "fastrand 2.1.0", + "fastrand 2.1.1", "http 0.2.12", "http-body 0.4.6", "once_cell", @@ -633,7 +648,7 @@ dependencies = [ "aws-smithy-xml", "aws-types", "bytes", - "fastrand 2.1.0", + "fastrand 2.1.1", "hex", "hmac", "http 0.2.12", @@ -649,9 +664,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.44.0" +version = "1.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b90cfe6504115e13c41d3ea90286ede5aa14da294f3fe077027a6e83850843c" +checksum = "e33ae899566f3d395cbf42858e433930682cc9c1889fa89318896082fef45efb" dependencies = [ "aws-credential-types", "aws-runtime", @@ -671,9 +686,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.45.0" +version = "1.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167c0fad1f212952084137308359e8e4c4724d1c643038ce163f06de9662c1d0" +checksum = "f39c09e199ebd96b9f860b0fce4b6625f211e064ad7c8693b72ecf7ef03881e0" dependencies = [ "aws-credential-types", "aws-runtime", @@ -693,9 +708,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.44.0" +version = "1.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb5f98188ec1435b68097daa2a37d74b9d17c9caa799466338a8d1544e71b9d" +checksum = "3d95f93a98130389eb6233b9d615249e543f6c24a68ca1f109af9ca5164a8765" dependencies = [ "aws-credential-types", "aws-runtime", @@ -837,7 +852,7 @@ dependencies = [ "aws-smithy-runtime-api", "aws-smithy-types", "bytes", - "fastrand 2.1.0", + "fastrand 2.1.1", "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", @@ -1015,7 +1030,7 @@ dependencies = [ "pin-project", "tokio", "tower 0.4.13", - "tower-http", + "tower-http 0.5.2", ] [[package]] @@ -1130,7 +1145,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d67782c3f868daa71d3533538e98a8e13713231969def7536e8039606fc46bf0" dependencies = [ - "fastrand 2.1.0", + "fastrand 2.1.1", "futures-core", "pin-project", "tokio", @@ -1138,17 +1153,17 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", - "miniz_oxide 0.7.4", + "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -1295,7 +1310,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", "syn_derive", ] @@ -1361,9 +1376,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.16.3" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "102087e286b4677862ea56cf8fc58bb2cdfa8725c40ffb80fe3a008eb7f2fc83" +checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" [[package]] name = "byteorder" @@ -1373,9 +1388,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.1" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" dependencies = [ "serde", ] @@ -1401,12 +1416,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.10" +version = "1.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e8aabfac534be767c909e0690571677d49f41bd8465ae876fe043d52ba5292" +checksum = "2e80e3b6a3ab07840e1cae9b0666a63970dc28e8ed5ffbcdacbfc760c281bfc1" dependencies = [ "jobserver", "libc", + "shlex", ] [[package]] @@ -1477,7 +1493,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -1573,9 +1589,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -1707,7 +1723,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -1755,7 +1771,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -1777,7 +1793,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -1842,7 +1858,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -1856,11 +1872,11 @@ dependencies = [ [[package]] name = "derive_builder" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0350b5cb0331628a5916d6c5c0b72e97393b8b6b03b47a9284f4e7f5a405ffd7" +checksum = "cd33f37ee6a119146a1781d3356a7c26028f83d779b2e04ecd45fdc75c76877b" dependencies = [ - "derive_builder_macro 0.20.0", + "derive_builder_macro 0.20.1", ] [[package]] @@ -1877,14 +1893,14 @@ dependencies = [ [[package]] name = "derive_builder_core" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d48cda787f839151732d396ac69e3473923d54312c070ee21e9effcaa8ca0b1d" +checksum = "7431fa049613920234f22c47fdc33e6cf3ee83067091ea4277a3f8c4587aae38" dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -1899,12 +1915,12 @@ dependencies = [ [[package]] name = "derive_builder_macro" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206868b8242f27cecce124c19fd88157fbd0dd334df2587f36417bafbc85097b" +checksum = "4abae7035bf79b9877b779505d8cf3749285b80c43941eda66604841889451dc" dependencies = [ - "derive_builder_core 0.20.0", - "syn 2.0.74", + "derive_builder_core 0.20.1", + "syn 2.0.79", ] [[package]] @@ -1924,9 +1940,15 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + [[package]] name = "digest" version = "0.10.7" @@ -1947,7 +1969,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -2055,6 +2077,16 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +[[package]] +name = "erased-serde" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24e2389d65ab4fab27dc2a5de7b191e1f6617d1f1c8855c0dc569c94a4cbb18d" +dependencies = [ + "serde", + "typeid", +] + [[package]] name = "errno" version = "0.3.9" @@ -2114,9 +2146,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "ff" @@ -2141,12 +2173,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3" dependencies = [ "atomic", + "parking_lot", "pear", "serde", + "tempfile", "uncased", "version_check", ] +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + [[package]] name = "flagset" version = "0.4.6" @@ -2170,7 +2210,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", - "miniz_oxide 0.8.0", + "miniz_oxide", ] [[package]] @@ -2190,6 +2230,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -2207,9 +2262,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -2222,9 +2277,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -2232,15 +2287,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -2260,9 +2315,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" @@ -2285,7 +2340,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand 2.1.0", + "fastrand 2.1.1", "futures-core", "futures-io", "parking", @@ -2294,32 +2349,32 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -2369,9 +2424,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "google-cloud-auth" @@ -2438,7 +2493,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.3.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -2447,9 +2502,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", "bytes", @@ -2457,7 +2512,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.3.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -2494,6 +2549,12 @@ dependencies = [ "allocator-api2", ] +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" + [[package]] name = "hashlink" version = "0.9.1" @@ -2678,9 +2739,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -2721,7 +2782,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", "http-body 1.0.1", "httparse", @@ -2751,27 +2812,56 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", "hyper 1.4.1", "hyper-util", - "rustls 0.23.12", + "rustls 0.23.14", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", "tower-service", - "webpki-roots 0.26.3", + "webpki-roots 0.26.6", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +dependencies = [ + "hyper 1.4.1", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", ] [[package]] name = "hyper-util" -version = "0.1.7" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", "futures-channel", @@ -2782,16 +2872,15 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower 0.4.13", "tower-service", "tracing", ] [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2829,7 +2918,7 @@ dependencies = [ "bitvec", "bytes", "chrono", - "derive_builder 0.20.0", + "derive_builder 0.20.1", "fnv", "futures", "itertools", @@ -2837,7 +2926,7 @@ dependencies = [ "murmur3", "once_cell", "opendal", - "ordered-float 4.2.2", + "ordered-float 4.3.0", "parquet", "paste", "rand 0.8.5", @@ -2897,10 +2986,13 @@ dependencies = [ "lru", "maplit", "needs_env_var", + "openfga-rs", "percent-encoding", + "pretty_assertions", "rand 0.8.5", "reqwest 0.12.8", "serde", + "serde-aux", "serde_json", "serde_urlencoded", "sqlx", @@ -2909,8 +3001,8 @@ dependencies = [ "thiserror", "time", "tokio", - "tower 0.4.13", - "tower-http", + "tower 0.5.1", + "tower-http 0.6.1", "tracing", "tracing-subscriber", "url", @@ -2996,12 +3088,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.3.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", "serde", ] @@ -3042,11 +3134,17 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" +[[package]] +name = "inventory" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f958d3d68f4167080a18141e10381e7634563984a537f2a49a30fd8e53ac5767" + [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is_terminal_polyfill" @@ -3080,9 +3178,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] @@ -3141,7 +3239,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -3155,9 +3253,9 @@ dependencies = [ [[package]] name = "lexical-core" -version = "0.8.5" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cde5de06e8d4c2faabc400238f9ae1c74d5412d03a7bd067645ccbc47070e46" +checksum = "0431c65b318a590c1de6b8fd6e72798c92291d27762d94c9e6c37ed7a73d8458" dependencies = [ "lexical-parse-float", "lexical-parse-integer", @@ -3168,9 +3266,9 @@ dependencies = [ [[package]] name = "lexical-parse-float" -version = "0.8.5" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683b3a5ebd0130b8fb52ba0bdc718cc56815b6a097e28ae5a6997d0ad17dc05f" +checksum = "eb17a4bdb9b418051aa59d41d65b1c9be5affab314a872e5ad7f06231fb3b4e0" dependencies = [ "lexical-parse-integer", "lexical-util", @@ -3179,9 +3277,9 @@ dependencies = [ [[package]] name = "lexical-parse-integer" -version = "0.8.6" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d0994485ed0c312f6d965766754ea177d07f9c00c9b82a5ee62ed5b47945ee9" +checksum = "5df98f4a4ab53bf8b175b363a34c7af608fe31f93cc1fb1bf07130622ca4ef61" dependencies = [ "lexical-util", "static_assertions", @@ -3189,18 +3287,18 @@ dependencies = [ [[package]] name = "lexical-util" -version = "0.8.5" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5255b9ff16ff898710eb9eb63cb39248ea8a5bb036bea8085b1a767ff6c4e3fc" +checksum = "85314db53332e5c192b6bca611fb10c114a80d1b831ddac0af1e9be1b9232ca0" dependencies = [ "static_assertions", ] [[package]] name = "lexical-write-float" -version = "0.8.5" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accabaa1c4581f05a3923d1b4cfd124c329352288b7b9da09e766b0668116862" +checksum = "6e7c3ad4e37db81c1cbe7cf34610340adc09c322871972f74877a712abc6c809" dependencies = [ "lexical-util", "lexical-write-integer", @@ -3209,9 +3307,9 @@ dependencies = [ [[package]] name = "lexical-write-integer" -version = "0.8.5" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1b6f3d1f4422866b68192d62f77bc5c700bee84f3069f2469d7bc8c77852446" +checksum = "eb89e9f6958b83258afa3deed90b5de9ef68eef090ad5086c791cd2345610162" dependencies = [ "lexical-util", "static_assertions", @@ -3219,9 +3317,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.155" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libflate" @@ -3367,7 +3465,7 @@ dependencies = [ "http-body-util", "hyper 1.4.1", "hyper-util", - "indexmap 2.3.0", + "indexmap 2.6.0", "ipnet", "metrics", "metrics-util", @@ -3414,15 +3512,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.8.0" @@ -3468,12 +3557,35 @@ dependencies = [ "uuid", ] +[[package]] +name = "multimap" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" + [[package]] name = "murmur3" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9252111cf132ba0929b6f8e030cac2a24b507f3a4d6db6fb2896f27b354c714b" +[[package]] +name = "native-tls" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + [[package]] name = "needs_env_var" version = "1.1.0" @@ -3482,9 +3594,9 @@ checksum = "b3b406fd667619150b3ac88bfa5b2791311d7100c0b91eb6ed6488b82349856d" [[package]] name = "nkeys" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2de02c883c178998da8d0c9816a88ef7ef5c58314dd1585c97a4a5679f3ab337" +checksum = "9f49e787f4c61cbd0f9320b31cc26e58719f6aa5068e34697dd3aea361412fe3" dependencies = [ "data-encoding", "ed25519", @@ -3650,7 +3762,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -3674,18 +3786,18 @@ dependencies = [ [[package]] name = "object" -version = "0.36.3" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "opendal" @@ -3707,7 +3819,7 @@ dependencies = [ "md-5", "once_cell", "percent-encoding", - "quick-xml 0.36.1", + "quick-xml 0.36.2", "reqsign", "reqwest 0.12.8", "serde", @@ -3716,12 +3828,72 @@ dependencies = [ "uuid", ] +[[package]] +name = "openfga-rs" +version = "0.1.0" +source = "git+https://github.com/c-thiel/openfga-rs.git?rev=e576fe89778b0092292f115afec236b8134f56d1#e576fe89778b0092292f115afec236b8134f56d1" +dependencies = [ + "chrono", + "http 1.1.0", + "prost", + "prost-types", + "prost-wkt", + "prost-wkt-build", + "prost-wkt-types", + "reqwest 0.12.8", + "serde", + "serde_json", + "thiserror", + "tokio", + "tonic", + "tonic-build", + "veil", +] + +[[package]] +name = "openssl" +version = "0.10.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +dependencies = [ + "bitflags 2.6.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "openssl-sys" +version = "0.9.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "ordered-float" version = "2.10.1" @@ -3733,9 +3905,9 @@ dependencies = [ [[package]] name = "ordered-float" -version = "4.2.2" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a91171844676f8c7990ce64959210cd2eaef32c2612c50f9fae9f8aaa6065a6" +checksum = "44d501f1a72f71d3c063a6bbc8f7271fa73aa09fe5d6283b6571e2ed176a2537" dependencies = [ "num-traits", ] @@ -3775,9 +3947,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -3797,16 +3969,16 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.3", + "redox_syscall", "smallvec", "windows-targets 0.52.6", ] [[package]] name = "parquet" -version = "53.0.0" +version = "53.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0fbf928021131daaa57d334ca8e3904fe9ae22f73c56244fc7db9b04eedc3d8" +checksum = "310c46a70a3ba90d98fec39fa2da6d9d731e544191da6fb56c9d199484d0dd3e" dependencies = [ "ahash 0.8.11", "arrow-array", @@ -3873,7 +4045,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -3901,24 +4073,34 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "petgraph" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +dependencies = [ + "fixedbitset", + "indexmap 2.6.0", +] + [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -3940,7 +4122,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand 2.1.0", + "fastrand 2.1.1", "futures-io", ] @@ -3994,9 +4176,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "polling" @@ -4015,9 +4197,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.7.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "powerfmt" @@ -4034,13 +4216,33 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "pretty_assertions" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" +dependencies = [ + "diff", + "yansi", +] + +[[package]] +name = "prettyplease" +version = "0.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" +dependencies = [ + "proc-macro2", + "syn 2.0.79", +] + [[package]] name = "proc-macro-crate" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.21.1", + "toml_edit", ] [[package]] @@ -4084,11 +4286,110 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", "version_check", "yansi", ] +[[package]] +name = "prost" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" +dependencies = [ + "bytes", + "heck", + "itertools", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn 2.0.79", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn 2.0.79", +] + +[[package]] +name = "prost-types" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4759aa0d3a6232fb8dbdb97b61de2c20047c68aca932c7ed76da9d788508d670" +dependencies = [ + "prost", +] + +[[package]] +name = "prost-wkt" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d84e2bee181b04c2bac339f2bfe818c46a99750488cc6728ce4181d5aa8299" +dependencies = [ + "chrono", + "inventory", + "prost", + "serde", + "serde_derive", + "serde_json", + "typetag", +] + +[[package]] +name = "prost-wkt-build" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a669d5acbe719010c6f62a64e6d7d88fdedc1fe46e419747949ecb6312e9b14" +dependencies = [ + "heck", + "prost", + "prost-build", + "prost-types", + "quote", +] + +[[package]] +name = "prost-wkt-types" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01ef068e9b82e654614b22e6b13699bd545b6c0e2e721736008b00b38aeb4f64" +dependencies = [ + "chrono", + "prost", + "prost-build", + "prost-types", + "prost-wkt", + "prost-wkt-build", + "regex", + "serde", + "serde_derive", + "serde_json", +] + [[package]] name = "ptr_meta" version = "0.1.4" @@ -4111,9 +4412,9 @@ dependencies = [ [[package]] name = "quad-rand" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658fa1faf7a4cc5f057c9ee5ef560f717ad9d8dc66d975267f709624d6e1ab88" +checksum = "b76f1009795ca44bb5aaae8fd3f18953e209259c33d9b059b1f53d58ab7511db" [[package]] name = "quanta" @@ -4142,9 +4443,9 @@ dependencies = [ [[package]] name = "quick-xml" -version = "0.36.1" +version = "0.36.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96a05e2e8efddfa51a84ca47cec303fac86c8541b686d37cac5efc0e094417bc" +checksum = "f7649a7b4df05aed9ea7ec6f628c67c9953a43869b8bc50929569b2999d443fe" dependencies = [ "memchr", "serde", @@ -4152,16 +4453,16 @@ dependencies = [ [[package]] name = "quinn" -version = "0.11.3" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b22d8e7369034b9a7132bc2008cac12f2013c8132b45e0554e6e20e2617f2156" +checksum = "8c7c5fdde3cdae7203427dc4f0a68fe0ed09833edc525a03456b153b79828684" dependencies = [ "bytes", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.23.12", + "rustls 0.23.14", "socket2", "thiserror", "tokio", @@ -4170,15 +4471,15 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.6" +version = "0.11.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba92fb39ec7ad06ca2582c0ca834dfeadcaf06ddfc8e635c80aa7e1c05315fdd" +checksum = "fadfaed2cd7f389d0161bb73eeb07b7b78f8691047a6f3e73caaeae55310a4a6" dependencies = [ "bytes", "rand 0.8.5", "ring", "rustc-hash", - "rustls 0.23.12", + "rustls 0.23.14", "slab", "thiserror", "tinyvec", @@ -4187,22 +4488,22 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bffec3605b73c6f1754535084a85229fa8a30f86014e6c81aeec4abb68b0285" +checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" dependencies = [ "libc", "once_cell", "socket2", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -4286,41 +4587,32 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "11.1.0" +version = "11.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb9ee317cfe3fbd54b36a511efc1edd42e216903c9cd575e686dd68a2ba90d8d" +checksum = "1ab240315c661615f2ee9f0f2cd32d5a7343a84d5ebcccb99d46e6637565e7b0" dependencies = [ "bitflags 2.6.0", ] [[package]] name = "redox_syscall" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.5.3" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] [[package]] name = "regex" -version = "1.10.6" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -4334,13 +4626,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -4357,9 +4649,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "rend" @@ -4389,7 +4681,7 @@ dependencies = [ "log", "once_cell", "percent-encoding", - "quick-xml 0.36.1", + "quick-xml 0.36.2", "rand 0.8.5", "reqwest 0.12.8", "rsa", @@ -4430,7 +4722,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "sync_wrapper 0.1.2", - "system-configuration", + "system-configuration 0.5.1", "tokio", "tokio-rustls 0.24.1", "tower-service", @@ -4454,28 +4746,33 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", + "h2 0.4.6", "http 1.1.0", "http-body 1.0.1", "http-body-util", "hyper 1.4.1", - "hyper-rustls 0.27.2", + "hyper-rustls 0.27.3", + "hyper-tls", "hyper-util", "ipnet", "js-sys", "log", "mime", + "native-tls", "once_cell", "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.12", - "rustls-pemfile 2.1.3", + "rustls 0.23.14", + "rustls-pemfile 2.2.0", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper 1.0.1", + "system-configuration 0.6.1", "tokio", + "tokio-native-tls", "tokio-rustls 0.26.0", "tokio-util", "tower-service", @@ -4484,7 +4781,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 0.26.3", + "webpki-roots 0.26.6", "windows-registry", ] @@ -4516,9 +4813,9 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.44" +version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cba464629b3394fc4dbc6f940ff8f5b4ff5c7aef40f29166fd4ad12acbc99c0" +checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b" dependencies = [ "bitvec", "bytecheck", @@ -4534,9 +4831,9 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.44" +version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7dddfff8de25e6f62b9d64e6e432bf1c6736c57d20323e15ee10435fbda7c65" +checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" dependencies = [ "proc-macro2", "quote", @@ -4590,7 +4887,7 @@ dependencies = [ "proc-macro2", "quote", "rust-embed-utils", - "syn 2.0.74", + "syn 2.0.79", "walkdir", ] @@ -4617,9 +4914,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.35.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1790d1c4c0ca81211399e0e0af16333276f375209e71a37b67698a373db5b47a" +checksum = "b082d80e3e3cc52b2ed634388d436fe1f4de6af5786cc2de9ba9737527bdf555" dependencies = [ "arrayvec", "borsh", @@ -4645,9 +4942,9 @@ checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] @@ -4688,9 +4985,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.34" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", @@ -4713,14 +5010,15 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.12" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ + "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.6", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] @@ -4739,12 +5037,12 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.7.1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a88d6d420651b496bdd98684116959239430022a115c1240e6c3993be0b15fba" +checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", - "rustls-pemfile 2.1.3", + "rustls-pemfile 2.2.0", "rustls-pki-types", "schannel", "security-framework", @@ -4761,19 +5059,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" +checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" [[package]] name = "rustls-webpki" @@ -4787,9 +5084,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.6" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring", "rustls-pki-types", @@ -4828,11 +5125,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4897,9 +5194,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -4926,6 +5223,17 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-aux" +version = "4.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d2e8bfba469d06512e11e3311d4d051a4a387a5b42d010404fecf3200321c95" +dependencies = [ + "chrono", + "serde", + "serde_json", +] + [[package]] name = "serde_bytes" version = "0.11.15" @@ -4943,7 +5251,7 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -4952,7 +5260,7 @@ version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.6.0", "itoa", "memchr", "ryu", @@ -4997,14 +5305,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] name = "serde_spanned" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -5031,7 +5339,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.3.0", + "indexmap 2.6.0", "serde", "serde_derive", "serde_json", @@ -5048,7 +5356,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -5057,7 +5365,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -5101,6 +5409,12 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + [[package]] name = "signal-hook-registry" version = "1.4.2" @@ -5144,9 +5458,9 @@ dependencies = [ [[package]] name = "simdutf8" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] name = "simple_asn1" @@ -5252,9 +5566,9 @@ dependencies = [ [[package]] name = "sqlformat" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f895e3734318cc55f1fe66258926c9b910c124d47520339efecbb6c59cec7c1f" +checksum = "7bba3a93db0cc4f7bdece8bb09e77e2e785c20bfebf79eb8340ed80708048790" dependencies = [ "nom", "unicode_categories", @@ -5295,14 +5609,14 @@ dependencies = [ "hashbrown 0.14.5", "hashlink", "hex", - "indexmap 2.3.0", + "indexmap 2.6.0", "log", "memchr", "once_cell", "paste", "percent-encoding", - "rustls 0.23.12", - "rustls-pemfile 2.1.3", + "rustls 0.23.14", + "rustls-pemfile 2.2.0", "serde", "serde_json", "sha2", @@ -5314,7 +5628,7 @@ dependencies = [ "tracing", "url", "uuid", - "webpki-roots 0.26.3", + "webpki-roots 0.26.6", ] [[package]] @@ -5327,7 +5641,7 @@ dependencies = [ "quote", "sqlx-core", "sqlx-macros-core", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -5350,7 +5664,7 @@ dependencies = [ "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", - "syn 2.0.74", + "syn 2.0.79", "tempfile", "tokio", "url", @@ -5513,7 +5827,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -5535,9 +5849,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.74" +version = "2.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fceb41e3d546d0bd83421d3409b1460cc7444cd389341a4c880fe7a042cb3d7" +checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" dependencies = [ "proc-macro2", "quote", @@ -5553,7 +5867,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -5591,7 +5905,18 @@ checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", "core-foundation", - "system-configuration-sys", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "system-configuration-sys 0.6.0", ] [[package]] @@ -5604,6 +5929,16 @@ dependencies = [ "libc", ] +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "tagptr" version = "0.2.0" @@ -5618,12 +5953,12 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", - "fastrand 2.1.0", + "fastrand 2.1.1", "once_cell", "rustix", "windows-sys 0.59.0", @@ -5646,7 +5981,7 @@ checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -5752,7 +6087,17 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", ] [[package]] @@ -5771,16 +6116,16 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.12", + "rustls 0.23.14", "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -5789,9 +6134,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -5809,7 +6154,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.20", + "toml_edit", ] [[package]] @@ -5823,26 +6168,61 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.21.1" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.6.0", + "serde", + "serde_spanned", "toml_datetime", - "winnow 0.5.40", + "winnow", ] [[package]] -name = "toml_edit" -version = "0.22.20" +name = "tonic" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ - "indexmap 2.3.0", - "serde", - "serde_spanned", - "toml_datetime", - "winnow 0.6.18", + "async-stream", + "async-trait", + "axum", + "base64 0.22.1", + "bytes", + "h2 0.4.6", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost", + "rustls-pemfile 2.2.0", + "socket2", + "tokio", + "tokio-rustls 0.26.0", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-build" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "prost-types", + "quote", + "syn 2.0.79", ] [[package]] @@ -5853,9 +6233,13 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", + "indexmap 1.9.3", "pin-project", "pin-project-lite", + "rand 0.8.5", + "slab", "tokio", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -5882,6 +6266,22 @@ name = "tower-http" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +dependencies = [ + "bitflags 2.6.0", + "bytes", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "pin-project-lite", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" dependencies = [ "async-compression", "bitflags 2.6.0", @@ -5894,7 +6294,7 @@ dependencies = [ "pin-project-lite", "tokio", "tokio-util", - "tower 0.4.13", + "tower 0.5.1", "tower-layer", "tower-service", "tracing", @@ -5933,7 +6333,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -6053,7 +6453,7 @@ checksum = "f9534daa9fd3ed0bd911d462a37f172228077e7abf18c18a5f67199d959205f8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -6064,15 +6464,45 @@ checksum = "560b82d656506509d43abe30e0ba64c56b1953ab3d4fe7ba5902747a7a3cedd5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] +[[package]] +name = "typeid" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e13db2e0ccd5e14a544e8a246ba2312cd25223f616442d7f2cb0e3db614236e" + [[package]] name = "typenum" version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "typetag" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52ba3b6e86ffe0054b2c44f2d86407388b933b16cb0a70eea3929420db1d9bbe" +dependencies = [ + "erased-serde", + "inventory", + "once_cell", + "serde", + "typetag-impl", +] + +[[package]] +name = "typetag-impl" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70b20a22c42c8f1cd23ce5e34f165d4d37038f5b663ad20fb6adbdf029172483" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "uncased" version = "0.9.10" @@ -6093,36 +6523,36 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] [[package]] name = "unicode-properties" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4259d9d4425d9f0661581b804cb85fe66a4c631cadd8f490d1c13a35d5d9291" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" [[package]] name = "unicode-xid" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "unicode_categories" @@ -6172,7 +6602,7 @@ version = "4.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5afb1a60e207dca502682537fefcfd9921e71d0b83e9576060f09abc6efab23" dependencies = [ - "indexmap 2.3.0", + "indexmap 2.6.0", "serde", "serde_json", "serde_yaml", @@ -6189,7 +6619,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.74", + "syn 2.0.79", "url", "uuid", ] @@ -6286,7 +6716,7 @@ checksum = "cff2381c6b31ab2555441e382d699a56c3551d0cfdf0c4df5617bf271c1dd102" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -6346,34 +6776,35 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" dependencies = [ "cfg-if", "js-sys", @@ -6383,9 +6814,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6393,28 +6824,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "wasm-streams" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" +checksum = "4e072d4e72f700fb3443d8fe94a39315df013eef1104903cdb0a2abd322bbecd" dependencies = [ "futures-util", "js-sys", @@ -6425,9 +6856,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" dependencies = [ "js-sys", "wasm-bindgen", @@ -6441,20 +6872,20 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "webpki-roots" -version = "0.26.3" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ "rustls-pki-types", ] [[package]] name = "whoami" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44ab49fad634e88f55bf8f9bb3abd2f27d7204172a112c7c9987e01c1c94ea9" +checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "redox_syscall 0.4.1", + "redox_syscall", "wasite", ] @@ -6688,18 +7119,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" -dependencies = [ - "memchr", -] - -[[package]] -name = "winnow" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -6753,7 +7175,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.74", + "syn 2.0.79", ] [[package]] @@ -6773,7 +7195,7 @@ dependencies = [ "crossbeam-utils", "displaydoc", "flate2", - "indexmap 2.3.0", + "indexmap 2.6.0", "num_enum", "thiserror", ] @@ -6798,9 +7220,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.12+zstd.1.5.6" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a4e40c320c3cb459d9a9ff6de98cff88f4751ee9275d140e2be94a2b74e4c13" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index ccad03ba..77bfad76 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,6 +41,7 @@ serde = { version = "^1.0", features = ["rc"] } serde_derive = "^1.0" serde_with = "^3.4" serde_json = { version = "^1.0", features = ["raw_value"] } +serde-aux = "4.5.0" url = { version = "^2.5", features = ["serde"] } uuid = { version = "^1.6", features = ["serde", "v4", "v5", "v7"] } reqwest = { version = "^0.12", default-features = false, features = [ @@ -50,6 +51,7 @@ reqwest = { version = "^0.12", default-features = false, features = [ iceberg = { git = "https://github.com/hansetag/iceberg-rust.git", rev = "2b6263a6c998df75444f7feb54bd37144354a071", features = [ "storage-all", ] } +openfga-rs = { git = "https://github.com/c-thiel/openfga-rs.git", rev = "e576fe89778b0092292f115afec236b8134f56d1" } typed-builder = "^0.20.0" strum_macros = "^0.26" axum = { version = "^0.7" } @@ -79,7 +81,7 @@ lazy-regex = { version = "3.2.0", features = ["lite"] } lru = "0.12.4" percent-encoding = "2.3.1" strum = { version = "^0.26", features = ["derive"] } -sqlx = { version = "^0.8.0", default-features = false, features = [ +sqlx = { version = "^0.8.2", default-features = false, features = [ "runtime-tokio", "tls-rustls", "postgres", @@ -94,8 +96,8 @@ tokio = { version = "1", default-features = false, features = [ "signal", "rt-multi-thread", ] } -tower = { version = "^0.4" } -tower-http = { version = "^0.5", features = [ +tower = { version = "^0.5" } +tower-http = { version = "^0.6", features = [ "catch-panic", "compression-full", "sensitive-headers", @@ -116,3 +118,4 @@ veil = "0.1.7" paste = "1.0.15" heck = "0.5.0" time = "0.3.36" +pretty_assertions = "~1.4" diff --git a/authz/openfga/collaboration_model/v1/schema.fga b/authz/openfga/collaboration_model/v1/schema.fga new file mode 100644 index 00000000..bd73137e --- /dev/null +++ b/authz/openfga/collaboration_model/v1/schema.fga @@ -0,0 +1,220 @@ +model + schema 1.1 + +type user + +type role + relations + define assignee: [user, role#assignee] + + # ------------------ Actions ------------------ + # Can assume a role + define can_assume: assignee + +type server + relations + # Relation Hierarchies + define child: [project] + + # ------------------ Built-in Roles ------------------ + define global_admin: [user, role#assignee] + + # ------------------ Actions ------------------ + define can_create_project: global_admin + + # ------------------ Assignable Privileges ------------------ + # Anyone can list projects, they are however filtered by can_include_in_list of the project. + # Only global_admin can list all projects. + define can_list_all_projects: global_admin + + # GRANT Permissions + define can_grant_global_admin: global_admin + +type project + relations + # Relation Hierarchies + define child: [warehouse] + define parent: [server] + + # ------------------ Built-in Roles ------------------ + # Project Admin has two purposes: + # 1. Lock-out protection: Checked to never be empty + # 2. Granting new privileges, such as billing, which might require separate permissions / admin roles + define project_admin: [user, role#assignee] or global_admin from parent + # Security Admins can manage all security aspects (grants, ownership) but not modify, create or access objects + define security_admin: [user, role#assignee] or project_admin + # Warehouse Admins can manage all warehouse aspects but not grant privileges + define warehouse_admin: [user, role#assignee] or project_admin + + # ------------------ Assignable Privileges ------------------ + # Privileges relevant for the warehouse and sub-objects + define describe: [role#assignee] or select or create or warehouse_admin or security_admin + define select: [role#assignee] or modify + define create: [role#assignee] or warehouse_admin + define modify: [role#assignee] or project_admin + + # ------------------ Actions ------------------ + # Create stuff inside the project / create new warehouses + define can_create_warehouse: create + # Drop this project + define can_delete: modify + define can_get_metadata: describe or can_get_metadata from child + # Can list elements in this projects - will be filtered subsequently + define can_list_warehouses: can_get_metadata + define can_include_in_list: can_get_metadata + define can_rename: modify + + # GRANT Permissions + define can_grant_create: security_admin + define can_grant_describe: security_admin + define can_grant_modify: security_admin + define can_grant_select: security_admin + define can_grant_project_admin: project_admin + define can_grant_security_admin: security_admin + define can_grant_warehouse_admin: warehouse_admin + +type warehouse + relations + # ------------------ Relation Hierarchies ------------------ + define parent: [project] + define child: [namespace] + + # ------------------ Special roles ------------------ + define ownership: [user, role#assignee] + define managed_access: [user:*, role:*] + define managed_access_inheritance: managed_access + + # ------------------ Assignable Privileges ------------------ + define pass_grants: [role#assignee] + define manage_grants: [role#assignee] or ownership or security_admin from parent + define describe: [role#assignee] or ownership or select or create or describe from parent + define select: [role#assignee] or ownership or modify or select from parent + define create: [role#assignee] or ownership or create from parent + define modify: [role#assignee] or ownership or modify from parent or warehouse_admin from parent + + # ------------------ Actions ------------------ + define can_create_namespace: create + # Delete this warehouse premanently + define can_delete: modify + define can_modify_storage: modify + define can_modify_storage_credential: modify + define can_get_metadata: describe or can_get_metadata from child + define can_get_config: can_get_metadata + # Can list elements in this warehouse - will be filtered subsequently + define can_list_namespaces: can_get_metadata + define can_use: can_get_metadata + define can_include_in_list: can_get_metadata + define can_deactivate: modify + define can_activate: modify + define can_rename: modify + define can_list_deleted_tabulars: can_get_metadata + + # GRANT Permissions + define can_grant_create: manage_grants or (create and pass_grants) + define can_grant_describe: manage_grants or (describe and pass_grants) + define can_grant_modify: manage_grants or (modify and pass_grants) + define can_grant_select: manage_grants or (select and pass_grants) + define can_grant_pass_grants: manage_grants + define can_grant_manage_grants: manage_grants + define can_change_ownership: manage_grants + +type namespace + relations + # ------------------ Relation Hierarchies ------------------ + define parent: [namespace, warehouse] + define child: [namespace, table, view] + + # ------------------ Special roles ------------------ + define ownership: [user, role#assignee] + define managed_access: [user:*, role:*] + define managed_access_inheritance: managed_access or managed_access_inheritance from parent + + # ------------------ Assignable Privileges ------------------ + define pass_grants: [role#assignee] + define manage_grants: [role#assignee] or (ownership but not managed_access_inheritance from parent) or manage_grants from parent + define describe: [role#assignee] or ownership or select or create or describe from parent + define select: [role#assignee] or ownership or modify or select from parent + define create: [role#assignee] or ownership or create from parent + define modify: [role#assignee] or ownership or modify from parent + + # ------------------ Actions ------------------ + define can_create_table: create + define can_create_view: create + define can_create_namespace: create + define can_delete: modify + define can_update_properties: modify + define can_get_metadata: describe or can_get_metadata from child + define can_list_tables: can_get_metadata + define can_list_views: can_get_metadata + define can_list_namespaces: can_get_metadata + define can_include_in_list: can_get_metadata + + # GRANT Permissions + define can_grant_create: manage_grants or (create and pass_grants) + define can_grant_describe: manage_grants or (describe and pass_grants) + define can_grant_modify: manage_grants or (modify and pass_grants) + define can_grant_select: manage_grants or (select and pass_grants) + define can_grant_pass_grants: manage_grants + define can_grant_manage_grants: manage_grants + define can_change_ownership: manage_grants + +type table + relations + # ------------------ Relation Hierarchies ------------------ + define parent: [namespace] + + # ------------------ Special roles ------------------ + define ownership: [user, role#assignee] + + # ------------------ Assignable Privileges ------------------ + define pass_grants: [role#assignee] + define manage_grants: [role#assignee] or (ownership but not managed_access_inheritance from parent) or manage_grants from parent + define describe: [role#assignee] or ownership or select or describe from parent + define select: [role#assignee] or ownership or modify or select from parent + define modify: [role#assignee] or ownership or modify from parent + + # ------------------ Actions ------------------ + define can_drop: modify + define can_write_data: modify + define can_read_data: select + define can_get_metadata: describe + define can_commit: modify + define can_rename: modify + define can_include_in_list: can_get_metadata + + # GRANT Permissions + define can_grant_pass_grants: manage_grants + define can_grant_manage_grants: manage_grants + define can_grant_describe: manage_grants or (describe and pass_grants) + define can_grant_select: manage_grants or (select and pass_grants) + define can_grant_modify: manage_grants or (modify and pass_grants) + define can_change_ownership: manage_grants + +type view + relations + # ------------------ Relation Hierarchies ------------------ + define parent: [namespace] + + # ------------------ Special roles ------------------ + define ownership: [user, role#assignee] + + # ------------------ Assignable Privileges ------------------ + define pass_grants: [role#assignee] + define manage_grants: [role#assignee] or (ownership but not managed_access_inheritance from parent) or manage_grants from parent + define describe: [role#assignee] or ownership or modify or describe from parent + define modify: [role#assignee] or ownership or modify from parent + + # ------------------ Actions ------------------ + define can_drop: modify + define can_commit: modify + define can_get_metadata: describe + define can_rename: modify + define can_include_in_list: can_get_metadata + + + # GRANT Permissions + define can_grant_pass_grants: manage_grants + define can_grant_manage_grants: manage_grants + define can_grant_describe: manage_grants or (describe and pass_grants) + define can_grant_modify: manage_grants or (modify and pass_grants) + define can_change_ownership: manage_grants diff --git a/authz/openfga/collaboration_model/v1/schema.json b/authz/openfga/collaboration_model/v1/schema.json new file mode 100644 index 00000000..8876c4a6 --- /dev/null +++ b/authz/openfga/collaboration_model/v1/schema.json @@ -0,0 +1,1976 @@ +{ + "schema_version": "1.1", + "type_definitions": [ + { + "type": "user" + }, + { + "metadata": { + "relations": { + "assignee": { + "directly_related_user_types": [ + { + "type": "user" + }, + { + "relation": "assignee", + "type": "role" + } + ] + }, + "can_assume": {} + } + }, + "relations": { + "assignee": { + "this": {} + }, + "can_assume": { + "computedUserset": { + "relation": "assignee" + } + } + }, + "type": "role" + }, + { + "metadata": { + "relations": { + "can_create_project": {}, + "can_grant_global_admin": {}, + "can_list_all_projects": {}, + "child": { + "directly_related_user_types": [ + { + "type": "project" + } + ] + }, + "global_admin": { + "directly_related_user_types": [ + { + "type": "user" + }, + { + "relation": "assignee", + "type": "role" + } + ] + } + } + }, + "relations": { + "can_create_project": { + "computedUserset": { + "relation": "global_admin" + } + }, + "can_grant_global_admin": { + "computedUserset": { + "relation": "global_admin" + } + }, + "can_list_all_projects": { + "computedUserset": { + "relation": "global_admin" + } + }, + "child": { + "this": {} + }, + "global_admin": { + "this": {} + } + }, + "type": "server" + }, + { + "metadata": { + "relations": { + "can_create_warehouse": {}, + "can_delete": {}, + "can_get_metadata": {}, + "can_grant_create": {}, + "can_grant_describe": {}, + "can_grant_modify": {}, + "can_grant_project_admin": {}, + "can_grant_security_admin": {}, + "can_grant_select": {}, + "can_grant_warehouse_admin": {}, + "can_list_warehouses": {}, + "can_rename": {}, + "can_include_in_list": {}, + "child": { + "directly_related_user_types": [ + { + "type": "warehouse" + } + ] + }, + "create": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "describe": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "modify": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "parent": { + "directly_related_user_types": [ + { + "type": "server" + } + ] + }, + "project_admin": { + "directly_related_user_types": [ + { + "type": "user" + }, + { + "relation": "assignee", + "type": "role" + } + ] + }, + "security_admin": { + "directly_related_user_types": [ + { + "type": "user" + }, + { + "relation": "assignee", + "type": "role" + } + ] + }, + "select": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "warehouse_admin": { + "directly_related_user_types": [ + { + "type": "user" + }, + { + "relation": "assignee", + "type": "role" + } + ] + } + } + }, + "relations": { + "can_create_warehouse": { + "computedUserset": { + "relation": "create" + } + }, + "can_delete": { + "computedUserset": { + "relation": "modify" + } + }, + "can_get_metadata": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "describe" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "can_get_metadata" + }, + "tupleset": { + "relation": "child" + } + } + } + ] + } + }, + "can_grant_create": { + "computedUserset": { + "relation": "security_admin" + } + }, + "can_grant_describe": { + "computedUserset": { + "relation": "security_admin" + } + }, + "can_grant_modify": { + "computedUserset": { + "relation": "security_admin" + } + }, + "can_grant_project_admin": { + "computedUserset": { + "relation": "project_admin" + } + }, + "can_grant_security_admin": { + "computedUserset": { + "relation": "security_admin" + } + }, + "can_grant_select": { + "computedUserset": { + "relation": "security_admin" + } + }, + "can_grant_warehouse_admin": { + "computedUserset": { + "relation": "warehouse_admin" + } + }, + "can_list_warehouses": { + "computedUserset": { + "relation": "can_get_metadata" + } + }, + "can_rename": { + "computedUserset": { + "relation": "modify" + } + }, + "can_include_in_list": { + "computedUserset": { + "relation": "can_get_metadata" + } + }, + "child": { + "this": {} + }, + "create": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "warehouse_admin" + } + } + ] + } + }, + "describe": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "select" + } + }, + { + "computedUserset": { + "relation": "create" + } + }, + { + "computedUserset": { + "relation": "warehouse_admin" + } + }, + { + "computedUserset": { + "relation": "security_admin" + } + } + ] + } + }, + "modify": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "project_admin" + } + } + ] + } + }, + "parent": { + "this": {} + }, + "project_admin": { + "union": { + "child": [ + { + "this": {} + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "global_admin" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + }, + "security_admin": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "project_admin" + } + } + ] + } + }, + "select": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "modify" + } + } + ] + } + }, + "warehouse_admin": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "project_admin" + } + } + ] + } + } + }, + "type": "project" + }, + { + "metadata": { + "relations": { + "can_activate": {}, + "can_change_ownership": {}, + "can_create_namespace": {}, + "can_deactivate": {}, + "can_delete": {}, + "can_get_config": {}, + "can_get_metadata": {}, + "can_grant_create": {}, + "can_grant_describe": {}, + "can_grant_manage_grants": {}, + "can_grant_modify": {}, + "can_grant_pass_grants": {}, + "can_grant_select": {}, + "can_list_deleted_tabulars": {}, + "can_list_namespaces": {}, + "can_modify_storage": {}, + "can_modify_storage_credential": {}, + "can_rename": {}, + "can_include_in_list": {}, + "can_use": {}, + "child": { + "directly_related_user_types": [ + { + "type": "namespace" + } + ] + }, + "create": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "describe": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "manage_grants": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "managed_access": { + "directly_related_user_types": [ + { + "type": "user", + "wildcard": {} + }, + { + "type": "role", + "wildcard": {} + } + ] + }, + "managed_access_inheritance": {}, + "modify": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "ownership": { + "directly_related_user_types": [ + { + "type": "user" + }, + { + "relation": "assignee", + "type": "role" + } + ] + }, + "parent": { + "directly_related_user_types": [ + { + "type": "project" + } + ] + }, + "pass_grants": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "select": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + } + } + }, + "relations": { + "can_activate": { + "computedUserset": { + "relation": "modify" + } + }, + "can_change_ownership": { + "computedUserset": { + "relation": "manage_grants" + } + }, + "can_create_namespace": { + "computedUserset": { + "relation": "create" + } + }, + "can_deactivate": { + "computedUserset": { + "relation": "modify" + } + }, + "can_delete": { + "computedUserset": { + "relation": "modify" + } + }, + "can_get_config": { + "computedUserset": { + "relation": "can_get_metadata" + } + }, + "can_get_metadata": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "describe" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "can_get_metadata" + }, + "tupleset": { + "relation": "child" + } + } + } + ] + } + }, + "can_grant_create": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "manage_grants" + } + }, + { + "intersection": { + "child": [ + { + "computedUserset": { + "relation": "create" + } + }, + { + "computedUserset": { + "relation": "pass_grants" + } + } + ] + } + } + ] + } + }, + "can_grant_describe": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "manage_grants" + } + }, + { + "intersection": { + "child": [ + { + "computedUserset": { + "relation": "describe" + } + }, + { + "computedUserset": { + "relation": "pass_grants" + } + } + ] + } + } + ] + } + }, + "can_grant_manage_grants": { + "computedUserset": { + "relation": "manage_grants" + } + }, + "can_grant_modify": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "manage_grants" + } + }, + { + "intersection": { + "child": [ + { + "computedUserset": { + "relation": "modify" + } + }, + { + "computedUserset": { + "relation": "pass_grants" + } + } + ] + } + } + ] + } + }, + "can_grant_pass_grants": { + "computedUserset": { + "relation": "manage_grants" + } + }, + "can_grant_select": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "manage_grants" + } + }, + { + "intersection": { + "child": [ + { + "computedUserset": { + "relation": "select" + } + }, + { + "computedUserset": { + "relation": "pass_grants" + } + } + ] + } + } + ] + } + }, + "can_list_deleted_tabulars": { + "computedUserset": { + "relation": "can_get_metadata" + } + }, + "can_list_namespaces": { + "computedUserset": { + "relation": "can_get_metadata" + } + }, + "can_modify_storage": { + "computedUserset": { + "relation": "modify" + } + }, + "can_modify_storage_credential": { + "computedUserset": { + "relation": "modify" + } + }, + "can_rename": { + "computedUserset": { + "relation": "modify" + } + }, + "can_include_in_list": { + "computedUserset": { + "relation": "can_get_metadata" + } + }, + "can_use": { + "computedUserset": { + "relation": "can_get_metadata" + } + }, + "child": { + "this": {} + }, + "create": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "ownership" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "create" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + }, + "describe": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "ownership" + } + }, + { + "computedUserset": { + "relation": "select" + } + }, + { + "computedUserset": { + "relation": "create" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "describe" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + }, + "manage_grants": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "ownership" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "security_admin" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + }, + "managed_access": { + "this": {} + }, + "managed_access_inheritance": { + "computedUserset": { + "relation": "managed_access" + } + }, + "modify": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "ownership" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "modify" + }, + "tupleset": { + "relation": "parent" + } + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "warehouse_admin" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + }, + "ownership": { + "this": {} + }, + "parent": { + "this": {} + }, + "pass_grants": { + "this": {} + }, + "select": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "ownership" + } + }, + { + "computedUserset": { + "relation": "modify" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "select" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + } + }, + "type": "warehouse" + }, + { + "metadata": { + "relations": { + "can_change_ownership": {}, + "can_create_namespace": {}, + "can_create_table": {}, + "can_create_view": {}, + "can_delete": {}, + "can_get_metadata": {}, + "can_grant_create": {}, + "can_grant_describe": {}, + "can_grant_manage_grants": {}, + "can_grant_modify": {}, + "can_grant_pass_grants": {}, + "can_grant_select": {}, + "can_list_namespaces": {}, + "can_list_tables": {}, + "can_list_views": {}, + "can_include_in_list": {}, + "can_update_properties": {}, + "child": { + "directly_related_user_types": [ + { + "type": "namespace" + }, + { + "type": "table" + }, + { + "type": "view" + } + ] + }, + "create": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "describe": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "manage_grants": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "managed_access": { + "directly_related_user_types": [ + { + "type": "user", + "wildcard": {} + }, + { + "type": "role", + "wildcard": {} + } + ] + }, + "managed_access_inheritance": {}, + "modify": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "ownership": { + "directly_related_user_types": [ + { + "type": "user" + }, + { + "relation": "assignee", + "type": "role" + } + ] + }, + "parent": { + "directly_related_user_types": [ + { + "type": "namespace" + }, + { + "type": "warehouse" + } + ] + }, + "pass_grants": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "select": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + } + } + }, + "relations": { + "can_change_ownership": { + "computedUserset": { + "relation": "manage_grants" + } + }, + "can_create_namespace": { + "computedUserset": { + "relation": "create" + } + }, + "can_create_table": { + "computedUserset": { + "relation": "create" + } + }, + "can_create_view": { + "computedUserset": { + "relation": "create" + } + }, + "can_delete": { + "computedUserset": { + "relation": "modify" + } + }, + "can_get_metadata": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "describe" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "can_get_metadata" + }, + "tupleset": { + "relation": "child" + } + } + } + ] + } + }, + "can_grant_create": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "manage_grants" + } + }, + { + "intersection": { + "child": [ + { + "computedUserset": { + "relation": "create" + } + }, + { + "computedUserset": { + "relation": "pass_grants" + } + } + ] + } + } + ] + } + }, + "can_grant_describe": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "manage_grants" + } + }, + { + "intersection": { + "child": [ + { + "computedUserset": { + "relation": "describe" + } + }, + { + "computedUserset": { + "relation": "pass_grants" + } + } + ] + } + } + ] + } + }, + "can_grant_manage_grants": { + "computedUserset": { + "relation": "manage_grants" + } + }, + "can_grant_modify": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "manage_grants" + } + }, + { + "intersection": { + "child": [ + { + "computedUserset": { + "relation": "modify" + } + }, + { + "computedUserset": { + "relation": "pass_grants" + } + } + ] + } + } + ] + } + }, + "can_grant_pass_grants": { + "computedUserset": { + "relation": "manage_grants" + } + }, + "can_grant_select": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "manage_grants" + } + }, + { + "intersection": { + "child": [ + { + "computedUserset": { + "relation": "select" + } + }, + { + "computedUserset": { + "relation": "pass_grants" + } + } + ] + } + } + ] + } + }, + "can_list_namespaces": { + "computedUserset": { + "relation": "can_get_metadata" + } + }, + "can_list_tables": { + "computedUserset": { + "relation": "can_get_metadata" + } + }, + "can_list_views": { + "computedUserset": { + "relation": "can_get_metadata" + } + }, + "can_include_in_list": { + "computedUserset": { + "relation": "can_get_metadata" + } + }, + "can_update_properties": { + "computedUserset": { + "relation": "modify" + } + }, + "child": { + "this": {} + }, + "create": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "ownership" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "create" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + }, + "describe": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "ownership" + } + }, + { + "computedUserset": { + "relation": "select" + } + }, + { + "computedUserset": { + "relation": "create" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "describe" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + }, + "manage_grants": { + "union": { + "child": [ + { + "this": {} + }, + { + "difference": { + "base": { + "computedUserset": { + "relation": "ownership" + } + }, + "subtract": { + "tupleToUserset": { + "computedUserset": { + "relation": "managed_access_inheritance" + }, + "tupleset": { + "relation": "parent" + } + } + } + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "manage_grants" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + }, + "managed_access": { + "this": {} + }, + "managed_access_inheritance": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "managed_access" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "managed_access_inheritance" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + }, + "modify": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "ownership" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "modify" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + }, + "ownership": { + "this": {} + }, + "parent": { + "this": {} + }, + "pass_grants": { + "this": {} + }, + "select": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "ownership" + } + }, + { + "computedUserset": { + "relation": "modify" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "select" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + } + }, + "type": "namespace" + }, + { + "metadata": { + "relations": { + "can_change_ownership": {}, + "can_commit": {}, + "can_drop": {}, + "can_get_metadata": {}, + "can_grant_describe": {}, + "can_grant_manage_grants": {}, + "can_grant_modify": {}, + "can_grant_pass_grants": {}, + "can_grant_select": {}, + "can_read_data": {}, + "can_rename": {}, + "can_include_in_list": {}, + "can_write_data": {}, + "describe": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "manage_grants": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "modify": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "ownership": { + "directly_related_user_types": [ + { + "type": "user" + }, + { + "relation": "assignee", + "type": "role" + } + ] + }, + "parent": { + "directly_related_user_types": [ + { + "type": "namespace" + } + ] + }, + "pass_grants": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "select": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + } + } + }, + "relations": { + "can_change_ownership": { + "computedUserset": { + "relation": "manage_grants" + } + }, + "can_commit": { + "computedUserset": { + "relation": "modify" + } + }, + "can_drop": { + "computedUserset": { + "relation": "modify" + } + }, + "can_get_metadata": { + "computedUserset": { + "relation": "describe" + } + }, + "can_grant_describe": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "manage_grants" + } + }, + { + "intersection": { + "child": [ + { + "computedUserset": { + "relation": "describe" + } + }, + { + "computedUserset": { + "relation": "pass_grants" + } + } + ] + } + } + ] + } + }, + "can_grant_manage_grants": { + "computedUserset": { + "relation": "manage_grants" + } + }, + "can_grant_modify": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "manage_grants" + } + }, + { + "intersection": { + "child": [ + { + "computedUserset": { + "relation": "modify" + } + }, + { + "computedUserset": { + "relation": "pass_grants" + } + } + ] + } + } + ] + } + }, + "can_grant_pass_grants": { + "computedUserset": { + "relation": "manage_grants" + } + }, + "can_grant_select": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "manage_grants" + } + }, + { + "intersection": { + "child": [ + { + "computedUserset": { + "relation": "select" + } + }, + { + "computedUserset": { + "relation": "pass_grants" + } + } + ] + } + } + ] + } + }, + "can_read_data": { + "computedUserset": { + "relation": "select" + } + }, + "can_rename": { + "computedUserset": { + "relation": "modify" + } + }, + "can_include_in_list": { + "computedUserset": { + "relation": "can_get_metadata" + } + }, + "can_write_data": { + "computedUserset": { + "relation": "modify" + } + }, + "describe": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "ownership" + } + }, + { + "computedUserset": { + "relation": "select" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "describe" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + }, + "manage_grants": { + "union": { + "child": [ + { + "this": {} + }, + { + "difference": { + "base": { + "computedUserset": { + "relation": "ownership" + } + }, + "subtract": { + "tupleToUserset": { + "computedUserset": { + "relation": "managed_access_inheritance" + }, + "tupleset": { + "relation": "parent" + } + } + } + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "manage_grants" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + }, + "modify": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "ownership" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "modify" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + }, + "ownership": { + "this": {} + }, + "parent": { + "this": {} + }, + "pass_grants": { + "this": {} + }, + "select": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "ownership" + } + }, + { + "computedUserset": { + "relation": "modify" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "select" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + } + }, + "type": "table" + }, + { + "metadata": { + "relations": { + "can_change_ownership": {}, + "can_commit": {}, + "can_drop": {}, + "can_get_metadata": {}, + "can_grant_describe": {}, + "can_grant_manage_grants": {}, + "can_grant_modify": {}, + "can_grant_pass_grants": {}, + "can_rename": {}, + "can_include_in_list": {}, + "describe": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "manage_grants": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "modify": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + }, + "ownership": { + "directly_related_user_types": [ + { + "type": "user" + }, + { + "relation": "assignee", + "type": "role" + } + ] + }, + "parent": { + "directly_related_user_types": [ + { + "type": "namespace" + } + ] + }, + "pass_grants": { + "directly_related_user_types": [ + { + "relation": "assignee", + "type": "role" + } + ] + } + } + }, + "relations": { + "can_change_ownership": { + "computedUserset": { + "relation": "manage_grants" + } + }, + "can_commit": { + "computedUserset": { + "relation": "modify" + } + }, + "can_drop": { + "computedUserset": { + "relation": "modify" + } + }, + "can_get_metadata": { + "computedUserset": { + "relation": "describe" + } + }, + "can_grant_describe": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "manage_grants" + } + }, + { + "intersection": { + "child": [ + { + "computedUserset": { + "relation": "describe" + } + }, + { + "computedUserset": { + "relation": "pass_grants" + } + } + ] + } + } + ] + } + }, + "can_grant_manage_grants": { + "computedUserset": { + "relation": "manage_grants" + } + }, + "can_grant_modify": { + "union": { + "child": [ + { + "computedUserset": { + "relation": "manage_grants" + } + }, + { + "intersection": { + "child": [ + { + "computedUserset": { + "relation": "modify" + } + }, + { + "computedUserset": { + "relation": "pass_grants" + } + } + ] + } + } + ] + } + }, + "can_grant_pass_grants": { + "computedUserset": { + "relation": "manage_grants" + } + }, + "can_rename": { + "computedUserset": { + "relation": "modify" + } + }, + "can_include_in_list": { + "computedUserset": { + "relation": "can_get_metadata" + } + }, + "describe": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "ownership" + } + }, + { + "computedUserset": { + "relation": "modify" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "describe" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + }, + "manage_grants": { + "union": { + "child": [ + { + "this": {} + }, + { + "difference": { + "base": { + "computedUserset": { + "relation": "ownership" + } + }, + "subtract": { + "tupleToUserset": { + "computedUserset": { + "relation": "managed_access_inheritance" + }, + "tupleset": { + "relation": "parent" + } + } + } + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "manage_grants" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + }, + "modify": { + "union": { + "child": [ + { + "this": {} + }, + { + "computedUserset": { + "relation": "ownership" + } + }, + { + "tupleToUserset": { + "computedUserset": { + "relation": "modify" + }, + "tupleset": { + "relation": "parent" + } + } + } + ] + } + }, + "ownership": { + "this": {} + }, + "parent": { + "this": {} + }, + "pass_grants": { + "this": {} + } + }, + "type": "view" + } + ] +} \ No newline at end of file diff --git a/authz/openfga/collaboration_model/v1/store.fga.yaml b/authz/openfga/collaboration_model/v1/store.fga.yaml new file mode 100644 index 00000000..abe7622a --- /dev/null +++ b/authz/openfga/collaboration_model/v1/store.fga.yaml @@ -0,0 +1,1148 @@ +name: Custom Roles +model_file: ./schema.fga +tuples: + # Hierarchies: server_1 -> project_1 -> (warehouse_1 (unmanaged), warehouse_2 (managed access)) + - user: server:server_1 + relation: parent + object: project:project_1 + - user: project:project_1 + relation: child + object: server:server_1 + - user: warehouse:warehouse_1 + relation: child + object: project:project_1 + - user: project:project_1 + relation: parent + object: warehouse:warehouse_1 + - user: warehouse:warehouse_2 + relation: child + object: project:project_1 + - user: project:project_1 + relation: parent + object: warehouse:warehouse_2 + # Hierarchies: warehouse_1 -> namespace_1 -> (table_1, table_2) + - user: warehouse:warehouse_1 + relation: parent + object: namespace:namespace_1 + - user: namespace:namespace_1 + relation: child + object: warehouse:warehouse_1 + - user: table:table_1 + relation: child + object: namespace:namespace_1 + - user: namespace:namespace_1 + relation: parent + object: table:table_1 + # Hierarchies: warehouse_1 -> namespace_1_1 -> namespace_1_2 -> (table_3, view_1) + - user: warehouse:warehouse_1 + relation: parent + object: namespace:namespace_1_1 + - user: namespace:namespace_1_1 + relation: child + object: warehouse:warehouse_1 + - user: namespace:namespace_1_1 + relation: parent + object: namespace:namespace_1_2 + - user: namespace:namespace_1_2 + relation: child + object: namespace:namespace_1_1 + - user: table:table_3 + relation: child + object: namespace:namespace_1_2 + - user: namespace:namespace_1_2 + relation: parent + object: table:table_3 + - user: view:view_1 + relation: child + object: namespace:namespace_1_2 + - user: namespace:namespace_1_2 + relation: parent + object: view:view_1 + # Hierarchies: warehouse_2 -> namespace_2_1 -> namespace_2_2 -> table_2_2 + - user: warehouse:warehouse_2 + relation: parent + object: namespace:namespace_2_1 + - user: namespace:namespace_2_1 + relation: child + object: warehouse:warehouse_2 + - user: namespace:namespace_2_1 + relation: parent + object: namespace:namespace_2_2 + - user: namespace:namespace_2_2 + relation: child + object: namespace:namespace_2_1 + - user: table:table_2_2 + relation: child + object: namespace:namespace_2_2 + - user: namespace:namespace_2_2 + relation: parent + object: table:table_2_2 + # warehouse_2 has managed access + - user: role:* + relation: managed_access + object: warehouse:warehouse_2 + - user: user:* + relation: managed_access + object: warehouse:warehouse_2 + # Roles (Global Admin) + - user: user:global_admin + relation: global_admin + object: server:server_1 + # Roles (Project 1 project admin) + - user: user:project_1_project_admin + relation: project_admin + object: project:project_1 + # Roles (Project 1 security admin) + - user: user:project_1_security_admin + relation: security_admin + object: project:project_1 + # Roles (Project 1 warehouse admin) + - user: user:project_1_warehouse_admin + relation: warehouse_admin + object: project:project_1 + # Roles (Warehouse 1 owner) + - user: user:warehouse_1_owner + relation: ownership + object: warehouse:warehouse_1 + # Roles (Namespace 2 Owner) + - user: user:namespace_1_1_owner + relation: ownership + object: namespace:namespace_1_1 + # Roles (Select on Table 3) + - user: user:select_table_3 + relation: assignee + object: role:select_table_3 + - user: role:select_table_3#assignee + relation: select + object: table:table_3 + # Roles (Owner on warehouse_2) + - user: user:warehouse_2_owner + relation: ownership + object: warehouse:warehouse_2 + # Roles (Owner on namespace_1_1) + - user: user:namespace_2_1_owner + relation: ownership + object: namespace:namespace_2_1 + # Roles (Owner on namespace_2_2) + - user: user:namespace_2_2_owner + relation: ownership + object: namespace:namespace_2_2 + - user: user:table_2_2_owner + relation: ownership + object: table:table_2_2 + # Roles (Owner role on namespace_2_1) + - user: role:namespace_2_1_owner#assignee + relation: ownership + object: namespace:namespace_2_1 + - user: user:namespace_2_1_role_owner + relation: assignee + object: role:namespace_2_1_owner +tests: + - name: Test Global Admin + check: + - user: user:global_admin + object: server:server_1 + assertions: + can_create_project: true + - user: user:global_admin + object: project:project_1 + assertions: + can_create_warehouse: true + can_delete: true + can_get_metadata: true + can_list_warehouses: true + can_include_in_list: true + can_rename: true + can_grant_create: true + can_grant_describe: true + can_grant_modify: true + can_grant_select: true + can_grant_project_admin: true + can_grant_security_admin: true + can_grant_warehouse_admin: true + - user: user:global_admin + object: warehouse:warehouse_1 + assertions: + can_create_namespace: true + can_delete: true + can_modify_storage: true + can_modify_storage_credential: true + can_get_metadata: true + can_get_config: true + can_list_namespaces: true + can_use: true + can_include_in_list: true + can_deactivate: true + can_activate: true + can_rename: true + can_list_deleted_tabulars: true + can_grant_create: true + can_grant_describe: true + can_grant_modify: true + can_grant_select: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_change_ownership: true + - user: user:global_admin + object: namespace:namespace_1 + assertions: + can_create_table: true + can_create_view: true + can_create_namespace: true + can_delete: true + can_update_properties: true + can_get_metadata: true + can_list_tables: true + can_list_views: true + can_list_namespaces: true + can_grant_create: true + can_grant_describe: true + can_grant_modify: true + can_grant_select: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_change_ownership: true + - user: user:global_admin + object: table:table_3 + assertions: + can_drop: true + can_write_data: true + can_read_data: true + can_get_metadata: true + can_commit: true + can_rename: true + can_include_in_list: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_grant_describe: true + can_grant_select: true + can_grant_modify: true + can_change_ownership: true + - user: user:global_admin + object: view:view_1 + assertions: + can_drop: true + can_commit: true + can_get_metadata: true + can_rename: true + can_include_in_list: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_grant_describe: true + can_grant_modify: true + can_change_ownership: true + - name: Project Admin has full access in project and below + check: + - user: user:project_1_project_admin + object: project:project_1 + assertions: + can_create_warehouse: true + can_delete: true + can_get_metadata: true + can_list_warehouses: true + can_include_in_list: true + can_rename: true + can_grant_create: true + can_grant_describe: true + can_grant_modify: true + can_grant_select: true + can_grant_project_admin: true + can_grant_security_admin: true + can_grant_warehouse_admin: true + - user: user:project_1_project_admin + object: warehouse:warehouse_1 + assertions: + can_create_namespace: true + can_delete: true + can_modify_storage: true + can_modify_storage_credential: true + can_get_metadata: true + can_get_config: true + can_list_namespaces: true + can_use: true + can_include_in_list: true + can_deactivate: true + can_activate: true + can_rename: true + can_list_deleted_tabulars: true + can_grant_create: true + can_grant_describe: true + can_grant_modify: true + can_grant_select: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_change_ownership: true + - user: user:project_1_project_admin + object: namespace:namespace_1 + assertions: + can_create_table: true + can_create_view: true + can_create_namespace: true + can_delete: true + can_update_properties: true + can_get_metadata: true + can_list_tables: true + can_list_views: true + can_list_namespaces: true + can_grant_create: true + can_grant_describe: true + can_grant_modify: true + can_grant_select: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_change_ownership: true + - user: user:project_1_project_admin + object: table:table_3 + assertions: + can_drop: true + can_write_data: true + can_read_data: true + can_get_metadata: true + can_commit: true + can_rename: true + can_include_in_list: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_grant_describe: true + can_grant_select: true + can_grant_modify: true + can_change_ownership: true + - user: user:project_1_project_admin + object: view:view_1 + assertions: + can_drop: true + can_commit: true + can_get_metadata: true + can_rename: true + can_include_in_list: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_grant_describe: true + can_grant_modify: true + can_change_ownership: true + - name: Security Admin can manage grants and naviagate but not see data + check: + - user: user:project_1_security_admin + object: project:project_1 + assertions: + can_create_warehouse: false + can_delete: false + can_get_metadata: true + can_list_warehouses: true + can_include_in_list: true + can_rename: false + can_grant_create: true + can_grant_describe: true + can_grant_modify: true + can_grant_select: true + can_grant_project_admin: false + can_grant_security_admin: true + can_grant_warehouse_admin: false + - user: user:project_1_security_admin + object: warehouse:warehouse_1 + assertions: + can_create_namespace: false + can_delete: false + can_modify_storage: false + can_modify_storage_credential: false + can_get_metadata: true + can_get_config: true + can_list_namespaces: true + can_use: true + can_include_in_list: true + can_deactivate: false + can_activate: false + can_rename: false + can_list_deleted_tabulars: true + can_grant_create: true + can_grant_describe: true + can_grant_modify: true + can_grant_select: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_change_ownership: true + - user: user:project_1_security_admin + object: namespace:namespace_1 + assertions: + can_create_table: false + can_create_view: false + can_create_namespace: false + can_delete: false + can_update_properties: false + can_get_metadata: true + can_list_tables: true + can_list_views: true + can_include_in_list: true + can_list_namespaces: true + can_grant_create: true + can_grant_describe: true + can_grant_modify: true + can_grant_select: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_change_ownership: true + - user: user:project_1_security_admin + object: table:table_3 + assertions: + can_drop: false + can_write_data: false + can_read_data: false + can_get_metadata: true + can_commit: false + can_rename: false + can_include_in_list: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_grant_describe: true + can_grant_select: true + can_grant_modify: true + can_change_ownership: true + - user: user:project_1_security_admin + object: view:view_1 + assertions: + can_drop: false + can_commit: false + can_get_metadata: true + can_rename: false + can_include_in_list: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_grant_describe: true + can_grant_modify: true + can_change_ownership: true + - name: Warehouse Admin can manage data and objects but not grants + check: + - user: user:project_1_warehouse_admin + object: project:project_1 + assertions: + can_create_warehouse: true + can_delete: false + can_get_metadata: true + can_list_warehouses: true + can_include_in_list: true + can_rename: false + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_project_admin: false + can_grant_security_admin: false + can_grant_warehouse_admin: true + - user: user:project_1_warehouse_admin + object: warehouse:warehouse_1 + assertions: + can_create_namespace: true + can_delete: true + can_modify_storage: true + can_modify_storage_credential: true + can_get_metadata: true + can_get_config: true + can_list_namespaces: true + can_use: true + can_include_in_list: true + can_deactivate: true + can_activate: true + can_rename: true + can_list_deleted_tabulars: true + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_pass_grants: false + can_grant_manage_grants: false + can_change_ownership: false + - user: user:project_1_warehouse_admin + object: namespace:namespace_1 + assertions: + can_create_table: true + can_create_view: true + can_create_namespace: true + can_delete: true + can_update_properties: true + can_get_metadata: true + can_list_tables: true + can_list_views: true + can_list_namespaces: true + can_include_in_list: true + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_pass_grants: false + can_grant_manage_grants: false + can_change_ownership: false + - user: user:project_1_warehouse_admin + object: table:table_3 + assertions: + can_drop: true + can_write_data: true + can_read_data: true + can_get_metadata: true + can_commit: true + can_rename: true + can_include_in_list: true + can_grant_pass_grants: false + can_grant_manage_grants: false + can_grant_describe: false + can_grant_select: false + can_grant_modify: false + can_change_ownership: false + - user: user:project_1_warehouse_admin + object: view:view_1 + assertions: + can_drop: true + can_commit: true + can_get_metadata: true + can_rename: true + can_include_in_list: true + can_grant_pass_grants: false + can_grant_manage_grants: false + can_grant_describe: false + can_grant_modify: false + can_change_ownership: false + - name: Warehouse Owner can do everything in its Warehouse + check: + - user: user:warehouse_1_owner + object: warehouse:warehouse_1 + assertions: + can_create_namespace: true + can_delete: true + can_modify_storage: true + can_modify_storage_credential: true + can_get_metadata: true + can_get_config: true + can_list_namespaces: true + can_use: true + can_include_in_list: true + can_deactivate: true + can_activate: true + can_rename: true + can_list_deleted_tabulars: true + can_grant_create: true + can_grant_describe: true + can_grant_modify: true + can_grant_select: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_change_ownership: true + - user: user:warehouse_1_owner + object: namespace:namespace_1 + assertions: + can_create_table: true + can_create_view: true + can_create_namespace: true + can_delete: true + can_update_properties: true + can_get_metadata: true + can_list_tables: true + can_list_views: true + can_include_in_list: true + can_list_namespaces: true + can_grant_create: true + can_grant_describe: true + can_grant_modify: true + can_grant_select: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_change_ownership: true + - user: user:warehouse_1_owner + object: table:table_3 + assertions: + can_drop: true + can_write_data: true + can_read_data: true + can_get_metadata: true + can_commit: true + can_rename: true + can_include_in_list: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_grant_describe: true + can_grant_select: true + can_grant_modify: true + can_change_ownership: true + - user: user:warehouse_1_owner + object: view:view_1 + assertions: + can_drop: true + can_commit: true + can_get_metadata: true + can_rename: true + can_include_in_list: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_grant_describe: true + can_grant_modify: true + can_change_ownership: true + - name: Namespace Owner can do everyting in their Namespace but not above + check: + - user: user:namespace_1_1_owner + object: project:project_1 + assertions: + can_create_warehouse: false + can_delete: false + can_get_metadata: true + can_list_warehouses: true + can_include_in_list: true + can_rename: false + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_project_admin: false + can_grant_security_admin: false + can_grant_warehouse_admin: false + - user: user:namespace_1_1_owner + object: namespace:namespace_1_1 + assertions: + can_create_table: true + can_create_view: true + can_create_namespace: true + can_delete: true + can_update_properties: true + can_get_metadata: true + can_list_tables: true + can_list_views: true + can_include_in_list: true + can_list_namespaces: true + can_grant_create: true + can_grant_describe: true + can_grant_modify: true + can_grant_select: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_change_ownership: true + - user: user:namespace_1_1_owner + object: namespace:namespace_1 + assertions: + can_create_table: false + can_create_view: false + can_create_namespace: false + can_delete: false + can_update_properties: false + can_get_metadata: false + can_list_tables: false + can_list_views: false + can_list_namespaces: false + can_grant_create: false + can_grant_describe: false + can_include_in_list: false + can_grant_modify: false + can_grant_select: false + can_grant_pass_grants: false + can_grant_manage_grants: false + can_change_ownership: false + - user: user:namespace_1_1_owner + object: table:table_3 + assertions: + can_drop: true + can_write_data: true + can_read_data: true + can_get_metadata: true + can_commit: true + can_rename: true + can_include_in_list: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_grant_describe: true + can_grant_select: true + can_grant_modify: true + can_change_ownership: true + - user: user:namespace_1_1_owner + object: view:view_1 + assertions: + can_drop: true + can_commit: true + can_get_metadata: true + can_rename: true + can_include_in_list: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_grant_describe: true + can_grant_modify: true + can_change_ownership: true + - name: Select Table 3 bubbles list up + check: + - user: user:select_table_3 + object: project:project_1 + assertions: + can_create_warehouse: false + can_delete: false + can_get_metadata: true + can_list_warehouses: true + can_include_in_list: true + can_rename: false + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_project_admin: false + can_grant_security_admin: false + can_grant_warehouse_admin: false + - user: user:select_table_3 + object: warehouse:warehouse_1 + assertions: + can_create_namespace: false + can_delete: false + can_modify_storage: false + can_modify_storage_credential: false + can_get_metadata: true + can_get_config: true + can_list_namespaces: true + can_use: true + can_include_in_list: true + can_deactivate: false + can_activate: false + can_rename: false + can_list_deleted_tabulars: true + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_pass_grants: false + can_grant_manage_grants: false + can_change_ownership: false + - user: user:select_table_3 + object: namespace:namespace_1 + assertions: + can_create_table: false + can_create_view: false + can_create_namespace: false + can_delete: false + can_update_properties: false + can_get_metadata: false + can_list_tables: false + can_list_views: false + can_list_namespaces: false + can_include_in_list: false + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_pass_grants: false + can_grant_manage_grants: false + can_change_ownership: false + - user: user:select_table_3 + object: namespace:namespace_1_1 + assertions: + can_create_table: false + can_create_view: false + can_create_namespace: false + can_delete: false + can_update_properties: false + can_get_metadata: true + can_list_tables: true + can_list_views: true + can_list_namespaces: true + can_include_in_list: true + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_pass_grants: false + can_grant_manage_grants: false + can_change_ownership: false + - user: user:select_table_3 + object: table:table_3 + assertions: + can_drop: false + can_write_data: false + can_read_data: true + can_get_metadata: true + can_commit: false + can_rename: false + can_include_in_list: true + can_grant_pass_grants: false + can_grant_manage_grants: false + can_grant_describe: false + can_grant_select: false + can_grant_modify: false + can_change_ownership: false + - name: Managed access on warehouse owner can modify warehouse and below + check: + - user: user:warehouse_2_owner + object: warehouse:warehouse_2 + assertions: + can_create_namespace: true + can_delete: true + can_modify_storage: true + can_modify_storage_credential: true + can_get_metadata: true + can_get_config: true + can_list_namespaces: true + can_use: true + can_include_in_list: true + can_deactivate: true + can_activate: true + can_rename: true + can_list_deleted_tabulars: true + can_grant_create: true + can_grant_describe: true + can_grant_modify: true + can_grant_select: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_change_ownership: true + - user: user:warehouse_2_owner + object: namespace:namespace_2_1 + assertions: + can_create_table: true + can_create_view: true + can_create_namespace: true + can_delete: true + can_update_properties: true + can_get_metadata: true + can_list_tables: true + can_list_views: true + can_list_namespaces: true + can_include_in_list: true + can_grant_create: true + can_grant_describe: true + can_grant_modify: true + can_grant_select: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_change_ownership: true + - user: user:warehouse_2_owner + object: namespace:namespace_2_2 + assertions: + can_create_table: true + can_create_view: true + can_create_namespace: true + can_delete: true + can_update_properties: true + can_get_metadata: true + can_list_tables: true + can_list_views: true + can_list_namespaces: true + can_include_in_list: true + can_grant_create: true + can_grant_describe: true + can_grant_modify: true + can_grant_select: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_change_ownership: true + - user: user:warehouse_2_owner + object: table:table_2_2 + assertions: + can_drop: true + can_write_data: true + can_read_data: true + can_get_metadata: true + can_commit: true + can_rename: true + can_include_in_list: true + can_grant_pass_grants: true + can_grant_manage_grants: true + can_grant_describe: true + can_grant_select: true + can_grant_modify: true + can_change_ownership: true + - name: Managed access on warehouse blocks namespace owner grants + check: + - user: user:namespace_2_1_owner + object: project:project_1 + assertions: + can_create_warehouse: false + can_delete: false + can_get_metadata: true + can_list_warehouses: true + can_include_in_list: true + can_rename: false + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_project_admin: false + can_grant_security_admin: false + can_grant_warehouse_admin: false + - user: user:namespace_2_1_owner + object: warehouse:warehouse_2 + assertions: + can_create_namespace: false + can_delete: false + can_modify_storage: false + can_modify_storage_credential: false + can_get_metadata: true + can_get_config: true + can_list_namespaces: true + can_use: true + can_include_in_list: true + can_deactivate: false + can_activate: false + can_rename: false + can_list_deleted_tabulars: true + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_pass_grants: false + can_grant_manage_grants: false + can_change_ownership: false + - user: user:namespace_2_1_owner + object: namespace:namespace_2_1 + assertions: + can_create_table: true + can_create_view: true + can_create_namespace: true + can_delete: true + can_update_properties: true + can_get_metadata: true + can_list_tables: true + can_list_views: true + can_list_namespaces: true + can_include_in_list: true + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_pass_grants: false + can_grant_manage_grants: false + can_change_ownership: false + - user: user:table_2_2_owner + object: table:table_2_2 + assertions: + can_drop: true + can_write_data: true + can_read_data: true + can_get_metadata: true + can_commit: true + can_rename: true + can_include_in_list: true + can_grant_pass_grants: false + can_grant_manage_grants: false + can_grant_describe: false + can_grant_select: false + can_grant_modify: false + can_change_ownership: false + - name: Managed access enforced in sub-namespaces + check: + - user: user:namespace_2_2_owner + object: project:project_1 + assertions: + can_create_warehouse: false + can_delete: false + can_get_metadata: true + can_list_warehouses: true + can_include_in_list: true + can_rename: false + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_project_admin: false + can_grant_security_admin: false + can_grant_warehouse_admin: false + - user: user:namespace_2_2_owner + object: warehouse:warehouse_2 + assertions: + can_create_namespace: false + can_delete: false + can_modify_storage: false + can_modify_storage_credential: false + can_get_metadata: true + can_get_config: true + can_list_namespaces: true + can_use: true + can_include_in_list: true + can_deactivate: false + can_activate: false + can_rename: false + can_list_deleted_tabulars: true + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_pass_grants: false + can_grant_manage_grants: false + can_change_ownership: false + - user: user:namespace_2_2_owner + object: namespace:namespace_2_1 + assertions: + can_create_table: false + can_create_view: false + can_create_namespace: false + can_delete: false + can_update_properties: false + can_get_metadata: true + can_list_tables: true + can_list_views: true + can_list_namespaces: true + can_include_in_list: true + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_pass_grants: false + can_grant_manage_grants: false + can_change_ownership: false + - user: user:namespace_2_2_owner + object: namespace:namespace_2_2 + assertions: + can_create_table: true + can_create_view: true + can_create_namespace: true + can_delete: true + can_update_properties: true + can_get_metadata: true + can_list_tables: true + can_list_views: true + can_list_namespaces: true + can_include_in_list: true + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_pass_grants: false + can_grant_manage_grants: false + can_change_ownership: false + - user: user:namespace_2_2_owner + object: table:table_2_2 + assertions: + can_drop: true + can_write_data: true + can_read_data: true + can_get_metadata: true + can_commit: true + can_rename: true + can_include_in_list: true + can_grant_pass_grants: false + can_grant_manage_grants: false + can_grant_describe: false + can_grant_select: false + can_grant_modify: false + can_change_ownership: false + - user: user:table_2_2_owner + object: table:table_2_2 + assertions: + can_drop: true + can_write_data: true + can_read_data: true + can_get_metadata: true + can_commit: true + can_rename: true + can_include_in_list: true + can_grant_pass_grants: false + can_grant_manage_grants: false + can_grant_describe: false + can_grant_select: false + can_grant_modify: false + can_change_ownership: false + - name: Managed access for roles enforced in sub-namespaces + check: + - user: user:namespace_2_1_role_owner + object: project:project_1 + assertions: + can_create_warehouse: false + can_delete: false + can_get_metadata: true + can_list_warehouses: true + can_include_in_list: true + can_rename: false + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_project_admin: false + can_grant_security_admin: false + can_grant_warehouse_admin: false + - user: role:namespace_2_1_owner#assignee + object: project:project_1 + assertions: + can_create_warehouse: false + can_delete: false + can_get_metadata: true + can_list_warehouses: true + can_include_in_list: true + can_rename: false + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_project_admin: false + can_grant_security_admin: false + can_grant_warehouse_admin: false + - user: user:namespace_2_1_role_owner + object: warehouse:warehouse_2 + assertions: + can_create_namespace: false + can_delete: false + can_modify_storage: false + can_modify_storage_credential: false + can_get_metadata: true + can_get_config: true + can_list_namespaces: true + can_use: true + can_include_in_list: true + can_deactivate: false + can_activate: false + can_rename: false + can_list_deleted_tabulars: true + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_pass_grants: false + can_grant_manage_grants: false + can_change_ownership: false + - user: role:namespace_2_1_owner#assignee + object: warehouse:warehouse_2 + assertions: + can_create_namespace: false + can_delete: false + can_modify_storage: false + can_modify_storage_credential: false + can_get_metadata: true + can_get_config: true + can_list_namespaces: true + can_use: true + can_include_in_list: true + can_deactivate: false + can_activate: false + can_rename: false + can_list_deleted_tabulars: true + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_pass_grants: false + can_grant_manage_grants: false + can_change_ownership: false + - user: user:namespace_2_1_role_owner + object: namespace:namespace_2_1 + assertions: + can_create_table: true + can_create_view: true + can_create_namespace: true + can_delete: true + can_update_properties: true + can_get_metadata: true + can_list_tables: true + can_list_views: true + can_list_namespaces: true + can_include_in_list: true + can_grant_create: false + can_grant_describe: false + can_grant_modify: false + can_grant_select: false + can_grant_pass_grants: false + can_grant_manage_grants: false + can_change_ownership: false + - user: user:table_2_2_owner + object: table:table_2_2 + assertions: + can_drop: true + can_write_data: true + can_read_data: true + can_get_metadata: true + can_commit: true + can_rename: true + can_include_in_list: true + can_grant_pass_grants: false + can_grant_manage_grants: false + can_grant_describe: false + can_grant_select: false + can_grant_modify: false + can_change_ownership: false + diff --git a/crates/iceberg-catalog-bin/src/serve.rs b/crates/iceberg-catalog-bin/src/serve.rs index f2955bc3..f31cc1a5 100644 --- a/crates/iceberg-catalog-bin/src/serve.rs +++ b/crates/iceberg-catalog-bin/src/serve.rs @@ -1,14 +1,14 @@ use anyhow::{anyhow, Error}; use iceberg_catalog::api::router::{new_full_router, serve as service_serve}; use iceberg_catalog::implementations::postgres::{CatalogState, PostgresCatalog, ReadWrite}; -use iceberg_catalog::implementations::{AllowAllAuthState, AllowAllAuthZHandler}; +use iceberg_catalog::implementations::Secrets; +use iceberg_catalog::service::authz::implementations::get_default_authorizer_from_config; use iceberg_catalog::service::contract_verification::ContractVerifiers; use iceberg_catalog::service::event_publisher::{ CloudEventBackend, CloudEventsPublisher, CloudEventsPublisherBackgroundTask, Message, NatsBackend, }; use iceberg_catalog::service::health::ServiceHealthProvider; -use iceberg_catalog::service::secrets::Secrets; use iceberg_catalog::service::token_verification::Verifier; use iceberg_catalog::{SecretBackend, CONFIG}; use reqwest::Url; @@ -43,13 +43,13 @@ pub(crate) async fn serve(bind_addr: std::net::SocketAddr) -> Result<(), anyhow: .into() } }; - let auth_state = AllowAllAuthState; + let authorizer = get_default_authorizer_from_config().await?; let health_provider = ServiceHealthProvider::new( vec![ ("catalog", Arc::new(catalog_state.clone())), ("secrets", Arc::new(secrets_state.clone())), - ("auth", Arc::new(auth_state.clone())), + ("auth", Arc::new(authorizer.clone())), ], CONFIG.health_check_frequency_seconds, CONFIG.health_check_jitter_millis, @@ -88,14 +88,8 @@ pub(crate) async fn serve(bind_addr: std::net::SocketAddr) -> Result<(), anyhow: let metrics_layer = iceberg_catalog::metrics::get_axum_layer_and_install_recorder(CONFIG.metrics_port)?; - let router = new_full_router::< - PostgresCatalog, - PostgresCatalog, - AllowAllAuthZHandler, - AllowAllAuthZHandler, - Secrets, - >( - auth_state, + let router = new_full_router::( + authorizer, catalog_state.clone(), secrets_state.clone(), queues.clone(), diff --git a/crates/iceberg-catalog/Cargo.toml b/crates/iceberg-catalog/Cargo.toml index c1739ea7..1311a722 100644 --- a/crates/iceberg-catalog/Cargo.toml +++ b/crates/iceberg-catalog/Cargo.toml @@ -47,6 +47,7 @@ google-cloud-auth = { workspace = true } google-cloud-token = { workspace = true } hostname = { workspace = true } http = { workspace = true } +http-body-util = { version = "~0.1" } iceberg = { workspace = true } iceberg-ext = { path = "../iceberg-ext", features = ["axum"] } itertools = { workspace = true } @@ -56,10 +57,12 @@ lazy-regex = { workspace = true } lazy_static = { workspace = true } lru = { workspace = true } maplit = { workspace = true } +openfga-rs = { workspace = true, optional = false } percent-encoding = { workspace = true } rand = "0.8.5" reqwest = { workspace = true } serde = { workspace = true } +serde-aux = { workspace = true } serde_json = { workspace = true, features = ["preserve_order"] } sqlx = { workspace = true, optional = true, features = ["tls-rustls"] } strum = { workspace = true } @@ -84,8 +87,10 @@ vaultrs-login = "0.2.1" veil = { workspace = true } [dev-dependencies] +figment = { workspace = true, features = ["test"] } http-body-util = { workspace = true } needs_env_var = { workspace = true } +pretty_assertions = { workspace = true } serde_urlencoded = "0.7.1" tower = { workspace = true } tracing-subscriber = { workspace = true } diff --git a/crates/iceberg-catalog/src/api/iceberg/mod.rs b/crates/iceberg-catalog/src/api/iceberg/mod.rs index d9763717..88421125 100644 --- a/crates/iceberg-catalog/src/api/iceberg/mod.rs +++ b/crates/iceberg-catalog/src/api/iceberg/mod.rs @@ -36,17 +36,21 @@ pub mod v1 { pub const MAX_PAGE_SIZE: i64 = i64::MAX; pub fn new_v1_full_router< - C: config::Service, - #[cfg(feature = "s3-signer")] T: namespace::Service + #[cfg(feature = "s3-signer")] T: config::Service + + namespace::Service + tables::Service + metrics::Service + s3_signer::Service + views::Service, - #[cfg(not(feature = "s3-signer"))] T: namespace::Service + tables::Service + metrics::Service + views::Service, + #[cfg(not(feature = "s3-signer"))] T: config::Service + + namespace::Service + + tables::Service + + metrics::Service + + views::Service, S: ThreadSafe, >() -> Router> { let router = Router::new() - .merge(config::router::()) + .merge(config::router::()) .merge(namespace::router::()) .merge(tables::router::()) .merge(views::router::()) diff --git a/crates/iceberg-catalog/src/api/iceberg/v1/tables.rs b/crates/iceberg-catalog/src/api/iceberg/v1/tables.rs index 3b073ccf..8b92bf38 100644 --- a/crates/iceberg-catalog/src/api/iceberg/v1/tables.rs +++ b/crates/iceberg-catalog/src/api/iceberg/v1/tables.rs @@ -291,6 +291,13 @@ pub struct DataAccess { pub remote_signing: bool, } +impl DataAccess { + #[must_use] + pub fn requested(&self) -> bool { + self.vended_credentials || self.remote_signing + } +} + pub(crate) fn parse_data_access(headers: &HeaderMap) -> DataAccess { let header = headers .get_all(DATA_ACCESS_HEADER) diff --git a/crates/iceberg-catalog/src/api/management/mod.rs b/crates/iceberg-catalog/src/api/management/mod.rs index dcc31b46..0ea8899a 100644 --- a/crates/iceberg-catalog/src/api/management/mod.rs +++ b/crates/iceberg-catalog/src/api/management/mod.rs @@ -6,12 +6,12 @@ pub mod v1 { use crate::api::{ApiContext, Result}; use crate::request_metadata::RequestMetadata; - use crate::service::auth::AuthZHandler; + use crate::service::authz::Authorizer; use std::marker::PhantomData; use crate::api::iceberg::v1::PaginationQuery; - use crate::service::tabular_idents::TabularIdentUuid; + use crate::service::TabularIdentUuid; use crate::service::{storage::S3Flavor, Catalog, SecretStore, State}; use axum::extract::{Path, Query, State as AxumState}; use axum::routing::{get, post}; @@ -85,7 +85,7 @@ pub mod v1 { #[derive(Clone, Debug)] - pub struct ApiServer { + pub struct ApiServer { auth_handler: PhantomData, config_server: PhantomData, secret_store: PhantomData, @@ -105,7 +105,7 @@ pub mod v1 { (status = 201, description = "Warehouse created successfully", body = [CreateWarehouseResponse]), ) )] - async fn create_warehouse( + async fn create_warehouse( AxumState(api_context): AxumState>>, Extension(metadata): Extension, Json(request): Json, @@ -122,7 +122,7 @@ pub mod v1 { (status = 200, description = "List of projects", body = [ListProjectsResponse]) ) )] - async fn list_projects( + async fn list_projects( AxumState(api_context): AxumState>>, Extension(metadata): Extension, ) -> Result { @@ -138,7 +138,7 @@ pub mod v1 { (status = 201, description = "Project created successfully", body = [ProjectResponse]) ) )] - async fn create_project( + async fn create_project( AxumState(api_context): AxumState>>, Extension(metadata): Extension, Json(request): Json, @@ -155,7 +155,7 @@ pub mod v1 { (status = 200, description = "Project details", body = [GetProjectResponse]) ) )] - async fn get_project( + async fn get_project( Path(project_id): Path, AxumState(api_context): AxumState>>, Extension(metadata): Extension, @@ -174,7 +174,7 @@ pub mod v1 { (status = 200, description = "Project deleted successfully") ) )] - async fn delete_project( + async fn delete_project( Path(project_id): Path, AxumState(api_context): AxumState>>, Extension(metadata): Extension, @@ -191,7 +191,7 @@ pub mod v1 { (status = 200, description = "Project renamed successfully") ) )] - async fn rename_project( + async fn rename_project( Path(project_id): Path, AxumState(api_context): AxumState>>, Extension(metadata): Extension, @@ -214,7 +214,7 @@ pub mod v1 { (status = 200, description = "List of warehouses", body = [ListWarehousesResponse]) ) )] - async fn list_warehouses( + async fn list_warehouses( Query(request): Query, AxumState(api_context): AxumState>>, Extension(metadata): Extension, @@ -231,7 +231,7 @@ pub mod v1 { (status = 200, description = "Warehouse details", body = [GetWarehouseResponse]) ) )] - async fn get_warehouse( + async fn get_warehouse( Path(warehouse_id): Path, AxumState(api_context): AxumState>>, Extension(metadata): Extension, @@ -248,7 +248,7 @@ pub mod v1 { (status = 200, description = "Warehouse deleted successfully") ) )] - async fn delete_warehouse( + async fn delete_warehouse( Path(warehouse_id): Path, AxumState(api_context): AxumState>>, Extension(metadata): Extension, @@ -266,7 +266,7 @@ pub mod v1 { (status = 200, description = "Warehouse renamed successfully") ) )] - async fn rename_warehouse( + async fn rename_warehouse( Path(warehouse_id): Path, AxumState(api_context): AxumState>>, Extension(metadata): Extension, @@ -285,7 +285,7 @@ pub mod v1 { (status = 200, description = "Warehouse deactivated successfully") ) )] - async fn deactivate_warehouse( + async fn deactivate_warehouse( Path(warehouse_id): Path, AxumState(api_context): AxumState>>, Extension(metadata): Extension, @@ -302,7 +302,7 @@ pub mod v1 { (status = 200, description = "Warehouse activated successfully") ) )] - async fn activate_warehouse( + async fn activate_warehouse( Path(warehouse_id): Path, AxumState(api_context): AxumState>>, Extension(metadata): Extension, @@ -310,7 +310,7 @@ pub mod v1 { ApiServer::::activate_warehouse(warehouse_id.into(), api_context, metadata).await } - /// Update the storage profile of a warehouse + /// Update the storage profile of a warehouse including its storage credential. #[utoipa::path( post, tag = "management", @@ -320,7 +320,7 @@ pub mod v1 { (status = 200, description = "Storage profile updated successfully") ) )] - async fn update_storage_profile( + async fn update_storage_profile( Path(warehouse_id): Path, AxumState(api_context): AxumState>>, Extension(metadata): Extension, @@ -330,7 +330,8 @@ pub mod v1 { .await } - /// Update the storage credential of a warehouse + /// Update the storage credential of a warehouse. The storage profile is not modified. + /// This can be used to update credentials before expiration. #[utoipa::path( post, tag = "management", @@ -340,14 +341,19 @@ pub mod v1 { (status = 200, description = "Storage credential updated successfully") ) )] - async fn update_storage_credential( + async fn update_storage_credential( Path(warehouse_id): Path, AxumState(api_context): AxumState>>, Extension(metadata): Extension, Json(request): Json, ) -> Result<()> { - ApiServer::::update_credential(warehouse_id.into(), request, api_context, metadata) - .await + ApiServer::::update_storage_credential( + warehouse_id.into(), + request, + api_context, + metadata, + ) + .await } /// List soft-deleted tabulars @@ -361,7 +367,7 @@ pub mod v1 { (status = 200, description = "List of soft-deleted tabulars", body = [ListDeletedTabularsResponse]) ) )] - async fn list_deleted_tabulars( + async fn list_deleted_tabulars( Path(warehouse_id): Path, Query(pagination): Query, AxumState(api_context): AxumState>>, @@ -429,7 +435,7 @@ pub mod v1 { Purge, } - impl ApiServer { + impl ApiServer { pub fn new_v1_router() -> Router>> { Router::new() // Create a new project diff --git a/crates/iceberg-catalog/src/api/management/v1/project.rs b/crates/iceberg-catalog/src/api/management/v1/project.rs index 10512b2e..2fd8e71d 100644 --- a/crates/iceberg-catalog/src/api/management/v1/project.rs +++ b/crates/iceberg-catalog/src/api/management/v1/project.rs @@ -7,7 +7,11 @@ pub use crate::service::storage::{ }; pub use crate::service::WarehouseStatus; -use crate::service::{auth::AuthZHandler, secrets::SecretStore, Catalog, State, Transaction}; +use crate::service::{ + authz::{Authorizer, ListProjectsResponse as AuthZListProjectsResponse}, + secrets::SecretStore, + Catalog, State, Transaction, +}; use crate::ProjectIdent; use iceberg_ext::catalog::rest::ErrorModel; use utoipa::ToSchema; @@ -63,10 +67,10 @@ impl axum::response::IntoResponse for GetProjectResponse { } } -impl Service for ApiServer {} +impl Service for ApiServer {} #[async_trait::async_trait] -pub(super) trait Service { +pub(super) trait Service { async fn create_project( request: CreateProjectRequest, context: ApiContext>, @@ -156,18 +160,21 @@ pub(super) trait Service { request_metadata: RequestMetadata, ) -> Result { // ------------------- AuthZ ------------------- - let projects = A::check_list_projects(&request_metadata, context.v1_state.auth).await?; + let authorizer = context.v1_state.authz; + let projects = authorizer.list_projects(&request_metadata).await?; // ------------------- Business Logic ------------------- - - let projects = C::list_projects(projects, context.v1_state.catalog).await?; - + let project_id_filter = match projects { + AuthZListProjectsResponse::All => None, + AuthZListProjectsResponse::Projects(projects) => Some(projects), + }; + let projects = C::list_projects(project_id_filter, context.v1_state.catalog).await?; Ok(ListProjectsResponse { projects: projects .into_iter() - .map(|r| GetProjectResponse { - project_id: *r.project_id, - project_name: r.name, + .map(|project| GetProjectResponse { + project_id: *project.project_id, + project_name: project.name, }) .collect(), }) diff --git a/crates/iceberg-catalog/src/api/management/v1/warehouse.rs b/crates/iceberg-catalog/src/api/management/v1/warehouse.rs index fd5e0e94..344529a2 100644 --- a/crates/iceberg-catalog/src/api/management/v1/warehouse.rs +++ b/crates/iceberg-catalog/src/api/management/v1/warehouse.rs @@ -1,6 +1,7 @@ use crate::api::management::v1::{ApiServer, DeletedTabularResponse, ListDeletedTabularsResponse}; use crate::api::{ApiContext, Result}; use crate::request_metadata::RequestMetadata; +use crate::service::authz::{ProjectAction, WarehouseAction}; pub use crate::service::storage::{ AzCredential, AzdlsProfile, GcsCredential, GcsProfile, GcsServiceKey, S3Credential, S3Profile, StorageCredential, StorageProfile, @@ -10,21 +11,24 @@ use crate::api::iceberg::v1::{PaginatedTabulars, PaginationQuery}; pub use crate::service::WarehouseStatus; use crate::service::{ - auth::AuthZHandler, secrets::SecretStore, Catalog, ListFlags, State, Transaction, + authz::Authorizer, secrets::SecretStore, Catalog, ListFlags, State, Transaction, }; use crate::{ProjectIdent, WarehouseIdent, CONFIG}; use iceberg_ext::catalog::rest::ErrorModel; use serde::Deserialize; use utoipa::ToSchema; +use super::TabularType; + #[derive(Debug, Clone, serde::Serialize, serde::Deserialize, ToSchema)] #[serde(rename_all = "kebab-case")] pub struct CreateWarehouseRequest { /// Name of the warehouse to create. Must be unique - /// within a project. + /// within a project and may not contain "/" pub warehouse_name: String, /// Project ID in which to create the warehouse. - pub project_id: uuid::Uuid, + /// If no default project is set for this server, this field is required. + pub project_id: Option, /// Storage profile to use for the warehouse. pub storage_profile: StorageProfile, /// Optional storage credential to use for the warehouse. @@ -151,10 +155,10 @@ impl axum::response::IntoResponse for CreateWarehouseResponse { } } -impl Service for ApiServer {} +impl Service for ApiServer {} #[async_trait::async_trait] -pub(super) trait Service { +pub(super) trait Service { async fn create_warehouse( request: CreateWarehouseRequest, context: ApiContext>, @@ -167,10 +171,24 @@ pub(super) trait Service { storage_credential, delete_profile, } = request; - let project_ident = ProjectIdent::from(project_id); + let project_ident = project_id + .map(ProjectIdent::from) + .or(CONFIG.default_project_id) + .ok_or(ErrorModel::bad_request( + "project_id must be specified", + "CreateWarehouseProjectIdMissing", + None, + ))?; // ------------------- AuthZ ------------------- - A::check_create_warehouse(&request_metadata, &project_ident, context.v1_state.auth).await?; + let authorizer = context.v1_state.authz; + authorizer + .require_project_action( + &request_metadata, + project_ident, + &ProjectAction::CanCreateWarehouse, + ) + .await?; // ------------------- Business Logic ------------------- validate_warehouse_name(&warehouse_name)?; @@ -194,7 +212,7 @@ pub(super) trait Service { let warehouse_id = C::create_warehouse( warehouse_name, - project_id.into(), + project_ident, storage_profile, delete_profile, secret_id, @@ -215,37 +233,53 @@ pub(super) trait Service { request_metadata: RequestMetadata, ) -> Result { // ------------------- AuthZ ------------------- - let project_id = ProjectIdent::from( - request.project_id.ok_or( - ErrorModel::builder() - .code(http::StatusCode::BAD_REQUEST.into()) - .message("project-id is required".to_string()) - .r#type("MissingProjectId".to_string()) - .build(), - )?, - ); - let warehouses = A::check_list_warehouse_in_project( - &request_metadata, - project_id, - context.v1_state.auth, - ) - .await?; + let project_id = request + .project_id + .map(ProjectIdent::from) + .or(CONFIG.default_project_id) + .ok_or(ErrorModel::bad_request( + "project_id must be specified", + "ListWarehousesProjectIdMissing", + None, + ))?; + + let authorizer = context.v1_state.authz; + authorizer + .require_project_action( + &request_metadata, + project_id, + &ProjectAction::CanListWarehouses, + ) + .await?; // ------------------- Business Logic ------------------- let warehouses = C::list_warehouses( project_id, request.warehouse_status, - warehouses.as_ref(), context.v1_state.catalog, ) .await?; - Ok(ListWarehousesResponse { - warehouses: warehouses - .into_iter() - .map(std::convert::Into::into) - .collect(), + let warehouses = futures::future::try_join_all(warehouses.iter().map(|w| { + authorizer.is_allowed_warehouse_action( + &request_metadata, + w.id, + &WarehouseAction::CanShowInList, + ) + })) + .await? + .into_iter() + .zip(warehouses.into_iter()) + .filter_map(|(allowed, warehouse)| { + if allowed { + Some(warehouse.into()) + } else { + None + } }) + .collect(); + + Ok(ListWarehousesResponse { warehouses }) } async fn get_warehouse( @@ -254,7 +288,14 @@ pub(super) trait Service { request_metadata: RequestMetadata, ) -> Result { // ------------------- AuthZ ------------------- - A::check_get_warehouse(&request_metadata, warehouse_id, context.v1_state.auth).await?; + let authorizer = context.v1_state.authz; + authorizer + .require_warehouse_action( + &request_metadata, + warehouse_id, + &WarehouseAction::CanGetMetadata, + ) + .await?; // ------------------- Business Logic ------------------- let mut transaction = C::Transaction::begin_read(context.v1_state.catalog).await?; @@ -269,7 +310,10 @@ pub(super) trait Service { request_metadata: RequestMetadata, ) -> Result<()> { // ------------------- AuthZ ------------------- - A::check_delete_warehouse(&request_metadata, warehouse_id, context.v1_state.auth).await?; + let authorizer = context.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanDelete) + .await?; // ------------------- Business Logic ------------------- let mut transaction = C::Transaction::begin_write(context.v1_state.catalog).await?; @@ -287,7 +331,10 @@ pub(super) trait Service { request_metadata: RequestMetadata, ) -> Result<()> { // ------------------- AuthZ ------------------- - A::check_rename_warehouse(&request_metadata, warehouse_id, context.v1_state.auth).await?; + let authorizer = context.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanRename) + .await?; // ------------------- Business Logic ------------------- validate_warehouse_name(&request.new_name)?; @@ -306,7 +353,13 @@ pub(super) trait Service { request_metadata: RequestMetadata, ) -> Result<()> { // ------------------- AuthZ ------------------- - A::check_deactivate_warehouse(&request_metadata, warehouse_id, context.v1_state.auth) + let authorizer = context.v1_state.authz; + authorizer + .require_warehouse_action( + &request_metadata, + warehouse_id, + &WarehouseAction::CanDeactivate, + ) .await?; // ------------------- Business Logic ------------------- @@ -330,7 +383,14 @@ pub(super) trait Service { request_metadata: RequestMetadata, ) -> Result<()> { // ------------------- AuthZ ------------------- - A::check_activate_warehouse(&request_metadata, warehouse_id, context.v1_state.auth).await?; + let authorizer = context.v1_state.authz; + authorizer + .require_warehouse_action( + &request_metadata, + warehouse_id, + &WarehouseAction::CanActivate, + ) + .await?; // ------------------- Business Logic ------------------- let mut transaction = C::Transaction::begin_write(context.v1_state.catalog).await?; @@ -354,7 +414,14 @@ pub(super) trait Service { request_metadata: RequestMetadata, ) -> Result<()> { // ------------------- AuthZ ------------------- - A::check_update_storage(&request_metadata, warehouse_id, context.v1_state.auth).await?; + let authorizer = context.v1_state.authz; + authorizer + .require_warehouse_action( + &request_metadata, + warehouse_id, + &WarehouseAction::CanUpdateStorage, + ) + .await?; // ------------------- Business Logic ------------------- let UpdateWarehouseStorageRequest { @@ -412,14 +479,21 @@ pub(super) trait Service { Ok(()) } - async fn update_credential( + async fn update_storage_credential( warehouse_id: WarehouseIdent, request: UpdateWarehouseCredentialRequest, context: ApiContext>, request_metadata: RequestMetadata, ) -> Result<()> { // ------------------- AuthZ ------------------- - A::check_update_storage(&request_metadata, warehouse_id, context.v1_state.auth).await?; + let authorizer = context.v1_state.authz; + authorizer + .require_warehouse_action( + &request_metadata, + warehouse_id, + &WarehouseAction::CanUpdateStorageCredential, + ) + .await?; // ------------------- Business Logic ------------------- let UpdateWarehouseCredentialRequest { @@ -480,7 +554,13 @@ pub(super) trait Service { pagination_query: PaginationQuery, ) -> Result { // ------------------- AuthZ ------------------- - A::check_list_soft_deletions(&request_metadata, warehouse_id, context.v1_state.auth) + let authorizer = context.v1_state.authz; + authorizer + .require_warehouse_action( + &request_metadata, + warehouse_id, + &WarehouseAction::CanListDeletedTabulars, + ) .await?; // ------------------- Business Logic ------------------- @@ -495,29 +575,52 @@ pub(super) trait Service { ) .await?; - Ok(ListDeletedTabularsResponse { - tabulars: tabulars - .into_iter() - .map(|(k, (ident, delete_opts))| { - let i = ident.into_inner(); - let deleted = delete_opts.ok_or(ErrorModel::internal( - "Expected delete options to be Some, but found None", - "InternalDatabaseError", - None, - ))?; - - Ok(DeletedTabularResponse { - id: *k, - name: i.name, - namespace: i.namespace.inner(), - typ: k.into(), - warehouse_id: *warehouse_id, - created_at: deleted.created_at, - deleted_at: deleted.deleted_at, - expiration_date: deleted.expiration_date, - }) + let tabulars = tabulars + .into_iter() + .map(|(k, (ident, delete_opts))| { + let i = ident.into_inner(); + let deleted = delete_opts.ok_or(ErrorModel::internal( + "Expected delete options to be Some, but found None", + "InternalDatabaseError", + None, + ))?; + + Ok(DeletedTabularResponse { + id: *k, + name: i.name, + namespace: i.namespace.inner(), + typ: k.into(), + warehouse_id: *warehouse_id, + created_at: deleted.created_at, + deleted_at: deleted.deleted_at, + expiration_date: deleted.expiration_date, }) - .collect::>>()?, + }) + .collect::>>()?; + + let tabulars = futures::future::try_join_all(tabulars.iter().map(|t| match t.typ { + TabularType::View => authorizer.is_allowed_view_action( + &request_metadata, + warehouse_id, + t.id.into(), + &crate::service::authz::ViewAction::CanShowInList, + ), + TabularType::Table => authorizer.is_allowed_table_action( + &request_metadata, + warehouse_id, + t.id.into(), + &crate::service::authz::TableAction::CanShowInList, + ), + })) + .await? + .into_iter() + .zip(tabulars.into_iter()) + .filter_map(|(allowed, tabular)| if allowed { Some(tabular) } else { None }) + .collect(); + + // ToDo: Better pagination with non-empty pages + Ok(ListDeletedTabularsResponse { + tabulars, next_page_token, }) } @@ -595,7 +698,7 @@ mod test { assert_eq!(request.warehouse_name, "test_warehouse"); assert_eq!( request.project_id, - uuid::Uuid::parse_str("f47ac10b-58cc-4372-a567-0e02b2c3d479").unwrap() + Some(uuid::Uuid::parse_str("f47ac10b-58cc-4372-a567-0e02b2c3d479").unwrap()) ); let s3_profile = request.storage_profile.try_into_s3().unwrap(); assert_eq!(s3_profile.bucket, "test"); diff --git a/crates/iceberg-catalog/src/api/mod.rs b/crates/iceberg-catalog/src/api/mod.rs index e94563ba..255ab2fb 100644 --- a/crates/iceberg-catalog/src/api/mod.rs +++ b/crates/iceberg-catalog/src/api/mod.rs @@ -46,3 +46,11 @@ pub async fn shutdown_signal() { () = terminate => {}, } } + +pub(crate) fn set_not_found_status_code( + e: impl Into, +) -> IcebergErrorResponse { + let mut e = e.into(); + e.error.code = http::StatusCode::NOT_FOUND.into(); + e +} diff --git a/crates/iceberg-catalog/src/api/router.rs b/crates/iceberg-catalog/src/api/router.rs index a3a280dd..6379c42c 100644 --- a/crates/iceberg-catalog/src/api/router.rs +++ b/crates/iceberg-catalog/src/api/router.rs @@ -8,11 +8,7 @@ use crate::service::contract_verification::ContractVerifiers; use crate::service::health::ServiceHealthProvider; use crate::service::task_queue::TaskQueues; use crate::service::token_verification::Verifier; -use crate::service::{ - auth::{AuthConfigHandler, AuthZHandler}, - config::ConfigProvider, - Catalog, SecretStore, State, -}; +use crate::service::{authz::Authorizer, Catalog, SecretStore, State}; use axum::response::IntoResponse; use axum::{routing::get, Json, Router}; use axum_prometheus::PrometheusMetricLayer; @@ -25,14 +21,8 @@ use tower_http::{ use utoipa::OpenApi; #[allow(clippy::module_name_repetitions, clippy::too_many_arguments)] -pub fn new_full_router< - CP: ConfigProvider, - C: Catalog, - AH: AuthConfigHandler, - A: AuthZHandler, - S: SecretStore, ->( - auth_state: A::State, +pub fn new_full_router( + authorizer: A, catalog_state: C::State, secrets_state: S, queues: TaskQueues, @@ -42,11 +32,7 @@ pub fn new_full_router< svhp: ServiceHealthProvider, metrics_layer: Option>, ) -> Router { - let v1_routes = new_v1_full_router::< - crate::catalog::ConfigServer, - crate::catalog::CatalogServer, - State, - >(); + let v1_routes = new_v1_full_router::, State>(); let management_routes = Router::new().merge(ApiServer::new_v1_router()); @@ -89,7 +75,7 @@ pub fn new_full_router< ) .with_state(ApiContext { v1_state: State { - auth: auth_state, + authz: authorizer, catalog: catalog_state, secrets: secrets_state, publisher, @@ -105,7 +91,7 @@ pub fn new_full_router< } } -fn maybe_add_auth( +fn maybe_add_auth( token_verifier: Option, router: Router>>, ) -> Router>> { diff --git a/crates/iceberg-catalog/src/catalog/config.rs b/crates/iceberg-catalog/src/catalog/config.rs index 27197fd3..6a96da8b 100644 --- a/crates/iceberg-catalog/src/catalog/config.rs +++ b/crates/iceberg-catalog/src/catalog/config.rs @@ -1,91 +1,54 @@ use crate::api::iceberg::v1::config::GetConfigQueryParams; -use crate::api::iceberg::v1::{ - ApiContext, CatalogConfig, ErrorModel, IcebergErrorResponse, Result, -}; +use crate::api::iceberg::v1::{ApiContext, CatalogConfig, ErrorModel, Result}; use crate::request_metadata::RequestMetadata; -use http::StatusCode; -use std::marker::PhantomData; +use crate::service::authz::{ProjectAction, WarehouseAction}; +use crate::service::token_verification::AuthDetails; use std::str::FromStr; use crate::service::SecretStore; -use crate::service::{ - auth::{AuthConfigHandler, AuthZHandler, UserWarehouse}, - config::ConfigProvider, - Catalog, ProjectIdent, State, -}; +use crate::service::{authz::Authorizer, Catalog, ProjectIdent, State}; use crate::CONFIG; -#[derive(Clone, Debug)] -pub struct Server, D: Catalog, T: AuthConfigHandler, A: AuthZHandler> { - auth_handler: PhantomData, - auth_state: PhantomData, - config_server: PhantomData, - catalog_state: PhantomData, -} +use super::CatalogServer; #[async_trait::async_trait] -impl< - C: ConfigProvider, - A: AuthZHandler, - D: Catalog, - S: SecretStore, - T: AuthConfigHandler, - > crate::api::iceberg::v1::config::Service> for Server +impl + crate::api::iceberg::v1::config::Service> for CatalogServer { async fn get_config( query: GetConfigQueryParams, - api_context: ApiContext>, + api_context: ApiContext>, request_metadata: RequestMetadata, ) -> Result { - let auth_info = T::get_and_validate_user_warehouse( - api_context.v1_state.auth.clone(), - &request_metadata, - ) - .await?; - - let UserWarehouse { - project_id: project_from_auth, - warehouse_id: warehouse_from_auth, - } = auth_info; - - if query.warehouse.is_none() && warehouse_from_auth.is_none() { - let e: IcebergErrorResponse = ErrorModel::builder() - .code(StatusCode::BAD_REQUEST.into()) - .message("No warehouse specified. Please specify the 'warehouse' parameter in the GET /config request.".to_string()) - .r#type("GetConfigNoWarehouseProvided".to_string()) - .build() - .into(); - return Err(e); - } - - let (project_from_arg, warehouse_from_arg) = query - .warehouse - .map_or((None, None), |arg| parse_warehouse_arg(&arg)); - - if let Some(project_from_arg) = &project_from_arg { - // This is a user-provided project-id, so we need to check if the user is allowed to access it - T::check_list_warehouse_in_project( - api_context.v1_state.auth.clone(), - project_from_arg, - &request_metadata, - ) - .await?; - } - - let project_id = project_from_arg - .or(project_from_auth) - .or(CONFIG.default_project_id.map(std::convert::Into::into)) - .ok_or_else(|| { - let e: IcebergErrorResponse = ErrorModel::builder() - .code(StatusCode::BAD_REQUEST.into()) - .message("No project provided".to_string()) - .r#type("GetConfigNoProjectProvided".to_string()) - .build() - .into(); - e - })?; - - let warehouse_id = if let Some(warehouse_from_arg) = warehouse_from_arg { + let authorizer = api_context.v1_state.authz; + let project_id_from_auth = &request_metadata + .auth_details + .as_ref() + .and_then(AuthDetails::project_id); + let warehouse_id_from_auth = &request_metadata + .auth_details + .as_ref() + .and_then(AuthDetails::warehouse_id); + + // Arg takes precedence over auth + let warehouse_id = if let Some(query_warehouse) = query.warehouse { + let (project_from_arg, warehouse_from_arg) = parse_warehouse_arg(&query_warehouse); + let project_id = project_from_arg + .or(*project_id_from_auth) + .or(CONFIG.default_project_id) + .ok_or_else(|| { + // ToDo Christian: Split Project into separate endpoint, Use name + ErrorModel::bad_request( + "No project provided. Please provide warehouse as: /", + "GetConfigNoProjectProvided", None) + })?; + authorizer + .require_project_action( + &request_metadata, + project_id, + &ProjectAction::CanListWarehouses, + ) + .await?; C::require_warehouse_by_name( &warehouse_from_arg, project_id, @@ -93,42 +56,22 @@ impl< ) .await? } else { - warehouse_from_auth.ok_or_else(|| { - let e: IcebergErrorResponse = ErrorModel::builder() - .code(StatusCode::BAD_REQUEST.into()) - .message("No warehouse provided".to_string()) - .r#type("GetConfigNoWarehouseProvided".to_string()) - .build() - .into(); - e + warehouse_id_from_auth.ok_or_else(|| { + ErrorModel::bad_request("No warehouse specified. Please specify the 'warehouse' parameter in the GET /config request.".to_string(), "GetConfigNoWarehouseProvided", None) })? }; - T::check_user_get_config_for_warehouse( - api_context.v1_state.auth.clone(), - warehouse_id, - &request_metadata, - ) - .await?; + authorizer + .require_warehouse_action( + &request_metadata, + warehouse_id, + &WarehouseAction::CanGetConfig, + ) + .await?; // Get config from DB and new token from AuthHandler simultaneously - let config = C::require_config_for_warehouse(warehouse_id, api_context.v1_state.catalog); - - // Give the auth-handler a chance to exchange / enrich the token - let new_token = T::exchange_token_for_warehouse( - api_context.v1_state.auth.clone(), - &request_metadata, - &project_id, - warehouse_id, - ); - - let (config, new_token) = futures::join!(config, new_token); - let new_token = new_token?; - let mut config = config?; - - if let Some(new_token) = new_token { - config.overrides.insert("token".to_string(), new_token); - } + let mut config = + C::require_config_for_warehouse(warehouse_id, api_context.v1_state.catalog).await?; config .overrides @@ -142,22 +85,16 @@ impl< } } -fn parse_warehouse_arg(arg: &str) -> (Option, Option) { - // structure of the argument is <(optional uuid project_id)>/ - fn filter_empty_strings(s: String) -> Option { - if s.is_empty() { - None - } else { - Some(s) - } - } +fn parse_warehouse_arg(arg: &str) -> (Option, String) { + // structure of the argument is <(optional uuid project_id)>/ + // Warehouse names cannot include / // Split arg at first / let parts: Vec<&str> = arg.splitn(2, '/').collect(); match parts.len() { 1 => { // No project_id provided - let warehouse_name = filter_empty_strings(parts[0].to_string()); + let warehouse_name = parts[0].to_string(); (None, warehouse_name) } 2 => { @@ -165,10 +102,10 @@ fn parse_warehouse_arg(arg: &str) -> (Option, Option) { // If parts[0] is a valid UUID, it is a project_id, otherwise the whole thing is a warehouse_id match ProjectIdent::from_str(parts[0]) { Ok(project_id) => { - let warehouse_name = filter_empty_strings(parts[1].to_string()); + let warehouse_name = parts[1].to_string(); (Some(project_id), warehouse_name) } - Err(_) => (None, filter_empty_strings(arg.to_string())), + Err(_) => (None, arg.to_string()), } } // Because of the splitn(2, ..) there can't be more than 2 parts diff --git a/crates/iceberg-catalog/src/catalog/metrics.rs b/crates/iceberg-catalog/src/catalog/metrics.rs index 9f6e4358..9beae412 100644 --- a/crates/iceberg-catalog/src/catalog/metrics.rs +++ b/crates/iceberg-catalog/src/catalog/metrics.rs @@ -1,12 +1,12 @@ use crate::api::iceberg::v1::{ApiContext, Result, TableParameters}; use crate::request_metadata::RequestMetadata; -use crate::service::{auth::AuthZHandler, secrets::SecretStore, Catalog, State}; +use crate::service::{authz::Authorizer, secrets::SecretStore, Catalog, State}; use super::CatalogServer; #[async_trait::async_trait] -impl +impl crate::api::iceberg::v1::metrics::Service> for CatalogServer { async fn report_metrics( diff --git a/crates/iceberg-catalog/src/catalog/mod.rs b/crates/iceberg-catalog/src/catalog/mod.rs index 9cca53c3..a1869b67 100644 --- a/crates/iceberg-catalog/src/catalog/mod.rs +++ b/crates/iceberg-catalog/src/catalog/mod.rs @@ -9,7 +9,6 @@ mod s3_signer; mod tables; mod views; -pub use config::Server as ConfigServer; use iceberg::spec::{TableMetadata, ViewMetadata}; use iceberg_ext::catalog::rest::IcebergErrorResponse; pub use namespace::{MAX_NAMESPACE_DEPTH, UNSUPPORTED_NAMESPACE_PROPERTIES}; @@ -17,7 +16,7 @@ pub use namespace::{MAX_NAMESPACE_DEPTH, UNSUPPORTED_NAMESPACE_PROPERTIES}; use crate::api::{iceberg::v1::Prefix, ErrorModel, Result}; use crate::service::storage::StorageCredential; use crate::{ - service::{auth::AuthZHandler, secrets::SecretStore, Catalog}, + service::{authz::Authorizer, secrets::SecretStore, Catalog}, WarehouseIdent, }; use std::collections::HashMap; @@ -41,9 +40,9 @@ impl CommonMetadata for ViewMetadata { #[derive(Clone, Debug)] -pub struct CatalogServer { +pub struct CatalogServer { auth_handler: PhantomData, - config_server: PhantomData, + catalog_backend: PhantomData, secret_store: PhantomData, } diff --git a/crates/iceberg-catalog/src/catalog/namespace.rs b/crates/iceberg-catalog/src/catalog/namespace.rs index 65db468f..7513dc0b 100644 --- a/crates/iceberg-catalog/src/catalog/namespace.rs +++ b/crates/iceberg-catalog/src/catalog/namespace.rs @@ -4,8 +4,10 @@ use crate::api::iceberg::v1::{ ListNamespacesQuery, ListNamespacesResponse, NamespaceParameters, Prefix, Result, UpdateNamespacePropertiesRequest, UpdateNamespacePropertiesResponse, }; +use crate::api::set_not_found_status_code; use crate::request_metadata::RequestMetadata; -use crate::service::{auth::AuthZHandler, secrets::SecretStore, Catalog, State, Transaction as _}; +use crate::service::authz::{NamespaceAction, WarehouseAction}; +use crate::service::{authz::Authorizer, secrets::SecretStore, Catalog, State, Transaction as _}; use crate::service::{GetWarehouseResponse, NamespaceIdentUuid}; use crate::CONFIG; use http::StatusCode; @@ -21,7 +23,7 @@ pub const UNSUPPORTED_NAMESPACE_PROPERTIES: &[&str] = &[]; pub const MAX_NAMESPACE_DEPTH: i32 = 1; #[async_trait::async_trait] -impl +impl crate::api::iceberg::v1::namespace::Service> for CatalogServer { async fn list_namespaces( @@ -40,16 +42,52 @@ impl parent.as_ref().map(validate_namespace_ident).transpose()?; // ------------------- AUTHZ ------------------- - A::check_list_namespace( - &request_metadata, - warehouse_id, - query.parent.as_ref(), - state.v1_state.auth, - ) - .await?; + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action( + &request_metadata, + warehouse_id, + &WarehouseAction::CanListNamespaces, + ) + .await?; + + let mut t = if let Some(parent) = parent { + let mut t = C::Transaction::begin_read(state.v1_state.catalog).await?; + let namespace_id = C::namespace_to_id(warehouse_id, parent, t.transaction()).await; // Cannot fail before authz + authorizer + .require_namespace_action( + &request_metadata, + warehouse_id, + namespace_id, + &NamespaceAction::CanListNamespaces, + ) + .await?; + t + } else { + C::Transaction::begin_read(state.v1_state.catalog).await? + }; // ------------------- BUSINESS LOGIC ------------------- - C::list_namespaces(warehouse_id, &query, state.v1_state.catalog).await + let list_namespaces = C::list_namespaces(warehouse_id, &query, t.transaction()).await?; + // ToDo: Better pagination with non-empty pages + let namespaces: Vec<_> = + futures::future::try_join_all(list_namespaces.namespaces.iter().map(|n| { + authorizer.is_allowed_namespace_action( + &request_metadata, + warehouse_id, + *n.0, + &NamespaceAction::CanGetMetadata, + ) + })) + .await? + .into_iter() + .zip(list_namespaces.namespaces.into_iter()) + .filter_map(|(allowed, namespace)| if allowed { Some(namespace.1) } else { None }) + .collect(); + Ok(ListNamespacesResponse { + namespaces, + next_page_token: list_namespaces.next_page_token, + }) } async fn create_namespace( @@ -85,23 +123,24 @@ impl } // ------------------- AUTHZ ------------------- - A::check_create_namespace( - &request_metadata, - warehouse_id, - request.namespace.parent().as_ref(), - state.v1_state.auth, - ) - .await?; + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action( + &request_metadata, + warehouse_id, + &WarehouseAction::CanCreateNamespace, + ) + .await?; // ------------------- BUSINESS LOGIC ------------------- let namespace_id = NamespaceIdentUuid::default(); - // Set location if not specified - validate location if specified let mut t = C::Transaction::begin_write(state.v1_state.catalog).await?; let warehouse = C::require_warehouse(warehouse_id, t.transaction()).await?; let mut namespace_props = NamespaceProperties::try_from_maybe_props(properties.clone()) .map_err(|e| ErrorModel::bad_request(e.to_string(), e.err_type(), None))?; + // Set location if not specified - validate location if specified set_namespace_location_property(&mut namespace_props, &warehouse, namespace_id)?; let mut request = request; @@ -123,17 +162,27 @@ impl validate_namespace_ident(¶meters.namespace)?; // ------------------- AUTHZ ------------------- - A::check_load_namespace_metadata( - &request_metadata, - warehouse_id, - ¶meters.namespace, - state.v1_state.auth, - ) - .await?; + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + + let mut t = C::Transaction::begin_read(state.v1_state.catalog).await?; + let namespace_id = + C::namespace_to_id(warehouse_id, ¶meters.namespace, t.transaction()).await; // Cannot fail before authz + + let namespace_id = authorizer + .require_namespace_action( + &request_metadata, + warehouse_id, + namespace_id, + &NamespaceAction::CanGetMetadata, + ) + .await + .map_err(set_not_found_status_code)?; // ------------------- BUSINESS LOGIC ------------------- - let mut t = C::Transaction::begin_write(state.v1_state.catalog).await?; - let r = C::get_namespace(warehouse_id, ¶meters.namespace, t.transaction()).await?; + let r = C::get_namespace(warehouse_id, namespace_id, t.transaction()).await?; t.commit().await?; Ok(GetNamespaceResponse { properties: r.properties, @@ -152,31 +201,25 @@ impl validate_namespace_ident(¶meters.namespace)?; // ------------------- AUTHZ ------------------- - A::check_namespace_exists( - &request_metadata, - warehouse_id, - ¶meters.namespace, - state.v1_state.auth, - ) - .await?; + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; // ------------------- BUSINESS LOGIC ------------------- - if C::namespace_ident_to_id(warehouse_id, ¶meters.namespace, state.v1_state.catalog) - .await? - .is_some() - { - Ok(()) - } else { - Err(ErrorModel::builder() - .code(StatusCode::NOT_FOUND.into()) - .message(format!( - "Namespace '{}' not found.", - parameters.namespace.to_url_string() - )) - .r#type("NoSuchNamespaceException".to_string()) - .build() - .into()) - } + let mut t = C::Transaction::begin_read(state.v1_state.catalog).await?; + let namespace_id = + C::namespace_to_id(warehouse_id, ¶meters.namespace, t.transaction()).await; // Cannot fail before authz + authorizer + .require_namespace_action( + &request_metadata, + warehouse_id, + namespace_id, + &NamespaceAction::CanGetMetadata, + ) + .await + .map_err(set_not_found_status_code)?; + Ok(()) } /// Drop a namespace from the catalog. Namespace must be empty. @@ -202,17 +245,25 @@ impl } // ------------------- AUTHZ ------------------- - A::check_drop_namespace( - &request_metadata, - warehouse_id, - ¶meters.namespace, - state.v1_state.auth, - ) - .await?; + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let mut t = C::Transaction::begin_write(state.v1_state.catalog).await?; + let namespace_id = + C::namespace_to_id(warehouse_id, ¶meters.namespace, t.transaction()).await; // Cannot fail before authz + + let namespace_id = authorizer + .require_namespace_action( + &request_metadata, + warehouse_id, + namespace_id, + &NamespaceAction::CanDelete, + ) + .await?; // ------------------- BUSINESS LOGIC ------------------- - let mut t = C::Transaction::begin_write(state.v1_state.catalog).await?; - let r = C::drop_namespace(warehouse_id, ¶meters.namespace, t.transaction()).await?; + let r = C::drop_namespace(warehouse_id, namespace_id, t.transaction()).await?; t.commit().await?; Ok(r) } @@ -241,27 +292,30 @@ impl let updates = NamespaceProperties::try_from_maybe_props(updates.clone()) .map_err(|e| ErrorModel::bad_request(e.to_string(), e.err_type(), None))?; // ------------------- AUTHZ ------------------- - A::check_update_namespace_properties( - &request_metadata, - warehouse_id, - ¶meters.namespace, - state.v1_state.auth, - ) - .await?; + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let mut t = C::Transaction::begin_write(state.v1_state.catalog).await?; + let namespace_id = + C::namespace_to_id(warehouse_id, ¶meters.namespace, t.transaction()).await; // Cannot fail before authz + + let namespace_id = authorizer + .require_namespace_action( + &request_metadata, + warehouse_id, + namespace_id, + &NamespaceAction::CanUpdateProperties, + ) + .await?; // ------------------- BUSINESS LOGIC ------------------- - let mut t = C::Transaction::begin_write(state.v1_state.catalog).await?; let previous_properties = - C::get_namespace(warehouse_id, ¶meters.namespace, t.transaction()).await?; + C::get_namespace(warehouse_id, namespace_id, t.transaction()).await?; let (new_properties, r) = update_namespace_properties(previous_properties.properties, updates, removals); - C::update_namespace_properties( - warehouse_id, - ¶meters.namespace, - new_properties, - t.transaction(), - ) - .await?; + C::update_namespace_properties(warehouse_id, namespace_id, new_properties, t.transaction()) + .await?; t.commit().await?; Ok(r) } diff --git a/crates/iceberg-catalog/src/catalog/s3_signer/sign.rs b/crates/iceberg-catalog/src/catalog/s3_signer/sign.rs index ab4e08d1..686fb175 100644 --- a/crates/iceberg-catalog/src/catalog/s3_signer/sign.rs +++ b/crates/iceberg-catalog/src/catalog/s3_signer/sign.rs @@ -6,6 +6,7 @@ use std::vec; use crate::api::iceberg::types::Prefix; use crate::api::{ApiContext, Result}; use crate::api::{ErrorModel, IcebergErrorResponse, S3SignRequest, S3SignResponse}; +use crate::service::authz::{TableAction, WarehouseAction}; use aws_sigv4::http_request::{sign as aws_sign, SignableBody, SignableRequest, SigningSettings}; use aws_sigv4::sign::v4; use aws_sigv4::{self}; @@ -14,9 +15,8 @@ use super::super::CatalogServer; use super::error::SignError; use crate::catalog::require_warehouse_id; use crate::request_metadata::RequestMetadata; -use crate::service::secrets::SecretStore; use crate::service::storage::{S3Location, S3Profile, StorageCredential}; -use crate::service::{auth::AuthZHandler, Catalog, ListFlags, State}; +use crate::service::{authz::Authorizer, secrets::SecretStore, Catalog, ListFlags, State}; use crate::service::{GetTableMetadataResponse, TableIdentUuid}; use crate::WarehouseIdent; @@ -34,7 +34,7 @@ const HEADERS_TO_SIGN: [&str; 7] = [ ]; #[async_trait::async_trait] -impl +impl crate::api::iceberg::v1::s3_signer::Service> for CatalogServer { #[allow(clippy::too_many_lines)] @@ -47,6 +47,10 @@ impl request_metadata: RequestMetadata, ) -> Result { let warehouse_id = require_warehouse_id(prefix.clone())?; + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; let S3SignRequest { region: request_region, @@ -68,10 +72,38 @@ impl // We are looking for the path in the database, which allows us to also work with AuthN solutions // that do not support custom data in tokens. Perspectively, we should // try to get per-table signer.uri support in Spark. - let (table_id, table_metadata) = if let Ok(table_id) = require_table_id(table.clone()) { - (table_id, None) + let GetTableMetadataResponse { + table: _, + table_id, + namespace_id: _, + warehouse_id: _, + location, + metadata_location: _, + storage_secret_ident, + storage_profile, + } = if let Ok(table_id) = require_table_id(table.clone()) { + let metadata = C::get_table_metadata_by_id( + warehouse_id, + table_id, + ListFlags { + include_staged, + // we were able to resolve the table to id so we know the table is not deleted + include_deleted: false, + include_active: true, + }, + state.v1_state.catalog, + ) + .await; + authorizer + .require_table_action( + &request_metadata, + warehouse_id, + metadata, + &TableAction::CanGetMetadata, + ) + .await? } else { - let table_metadata = C::get_table_metadata_by_s3_location( + let metadata = C::get_table_metadata_by_s3_location( warehouse_id, parsed_url.location.location(), ListFlags { @@ -84,17 +116,15 @@ impl }, state.v1_state.catalog.clone(), ) - .await - .map_err(|e| { - ErrorModel::builder() - .code(http::StatusCode::UNAUTHORIZED.into()) - .message("Unauthorized".to_string()) - .r#type("InvalidLocation".to_string()) - .source(Some(Box::new(e.error))) - .build() - })?; - - (table_metadata.table_id, Some(table_metadata)) + .await; + authorizer + .require_table_action( + &request_metadata, + warehouse_id, + metadata, + &TableAction::CanGetMetadata, + ) + .await? }; // First check - fail fast if requested table is not allowed. @@ -104,36 +134,10 @@ impl &request_metadata, warehouse_id, table_id, - state.v1_state.auth, + authorizer, ) .await?; - // Load table metadata if not already loaded - let GetTableMetadataResponse { - table: _, - table_id, - warehouse_id: _, - location, - metadata_location: _, - storage_secret_ident, - storage_profile, - } = if let Some(table_metadata) = table_metadata { - table_metadata - } else { - C::get_table_metadata_by_id( - warehouse_id, - table_id, - ListFlags { - include_staged, - // we were able to resolve the table to id so we know the table is not deleted - include_deleted: false, - include_active: true, - }, - state.v1_state.catalog, - ) - .await? - }; - let extend_err = |mut e: IcebergErrorResponse| { e.error = e .error @@ -334,21 +338,35 @@ fn validate_region(region: &str, storage_profile: &S3Profile) -> Result<()> { Ok(()) } -async fn validate_table_method( +async fn validate_table_method( method: &http::Method, metadata: &RequestMetadata, warehouse_id: WarehouseIdent, table_id: TableIdentUuid, - auth_state: A::State, + authorizer: A, ) -> Result<()> { // First check - fail fast if requested table is not allowed. // We also need to check later if the path matches the table location. if WRITE_METHODS.contains(&method.as_str()) { - // We specify namespace as none for AuthZ check because we don't want to grant access to potentially + // We specify namespace as none for AuthZ check because we don't want to grant access to // locations not known to the catalog. - A::check_commit_table(metadata, warehouse_id, Some(table_id), None, auth_state).await?; + authorizer + .require_table_action( + metadata, + warehouse_id, + Ok(Some(table_id)), + &TableAction::CanWriteData, + ) + .await?; } else if READ_METHODS.contains(&method.as_str()) { - A::check_load_table(metadata, warehouse_id, None, Some(table_id), auth_state).await?; + authorizer + .require_table_action( + metadata, + warehouse_id, + Ok(Some(table_id)), + &TableAction::CanReadData, + ) + .await?; } else { return Err(ErrorModel::builder() .code(http::StatusCode::METHOD_NOT_ALLOWED.into()) diff --git a/crates/iceberg-catalog/src/catalog/tables.rs b/crates/iceberg-catalog/src/catalog/tables.rs index 68479976..42f075d3 100644 --- a/crates/iceberg-catalog/src/catalog/tables.rs +++ b/crates/iceberg-catalog/src/catalog/tables.rs @@ -16,19 +16,23 @@ use crate::api::iceberg::v1::{ }; use crate::api::management::v1::warehouse::TabularDeleteProfile; use crate::api::management::v1::TabularType; +use crate::api::set_not_found_status_code; use crate::catalog::compression_codec::CompressionCodec; use crate::request_metadata::RequestMetadata; +use crate::service::authz::{NamespaceAction, TableAction, WarehouseAction}; use crate::service::contract_verification::{ContractVerification, ContractVerificationOutcome}; use crate::service::event_publisher::{CloudEventsPublisher, EventMetadata}; use crate::service::storage::{StorageLocations as _, StoragePermissions, StorageProfile}; -use crate::service::tabular_idents::TabularIdentUuid; use crate::service::task_queue::tabular_expiration_queue::TabularExpirationInput; use crate::service::task_queue::tabular_purge_queue::TabularPurgeInput; +use crate::service::TabularIdentUuid; use crate::service::{ - auth::AuthZHandler, secrets::SecretStore, Catalog, CreateTableResponse, ListFlags, - LoadTableResponse as CatalogLoadTableResult, State, TableCreation, Transaction, + authz::Authorizer, secrets::SecretStore, Catalog, CreateTableResponse, ListFlags, + LoadTableResponse as CatalogLoadTableResult, State, Transaction, +}; +use crate::service::{ + GetNamespaceResponse, TableCommit, TableCreation, TableIdentUuid, WarehouseStatus, }; -use crate::service::{GetNamespaceResponse, TableCommit, TableIdentUuid, WarehouseStatus}; use http::StatusCode; use iceberg::spec::{ @@ -45,7 +49,7 @@ const PROPERTY_METADATA_DELETE_AFTER_COMMIT_ENABLED: &str = const PROPERTY_METADATA_DELETE_AFTER_COMMIT_ENABLED_DEFAULT: bool = false; #[async_trait::async_trait] -impl +impl crate::api::iceberg::v1::tables::Service> for CatalogServer { /// List all table identifiers underneath a given namespace @@ -61,13 +65,22 @@ impl validate_namespace_ident(&namespace)?; // ------------------- AUTHZ ------------------- - A::check_list_tables( - &request_metadata, - warehouse_id, - &namespace, - state.v1_state.auth, - ) - .await?; + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let mut t: ::Transaction = + C::Transaction::begin_read(state.v1_state.catalog).await?; + let namespace_id = C::namespace_to_id(warehouse_id, &namespace, t.transaction()).await; // We can't fail before AuthZ. + + authorizer + .require_namespace_action( + &request_metadata, + warehouse_id, + namespace_id, + &NamespaceAction::CanListTables, + ) + .await?; // ------------------- BUSINESS LOGIC ------------------- let include_staged = false; @@ -82,14 +95,30 @@ impl include_staged, include_deleted, }, - state.v1_state.catalog, + t.transaction(), pagination_query, ) .await?; + // ToDo: Better pagination with non-empty pages + let next_page_token = tables.next_page_token; + let identifiers = futures::future::try_join_all(tables.tabulars.iter().map(|t| { + authorizer.is_allowed_table_action( + &request_metadata, + warehouse_id, + *t.0, + &TableAction::CanShowInList, + ) + })) + .await? + .into_iter() + .zip(tables.tabulars.into_iter()) + .filter_map(|(allowed, table)| if allowed { Some(table.1) } else { None }) + .collect(); + Ok(ListTablesResponse { - next_page_token: None, - identifiers: tables.into_iter().map(|t| t.1).collect(), + next_page_token, + identifiers, }) } @@ -114,19 +143,25 @@ impl } // ------------------- AUTHZ ------------------- - A::check_create_table( - &request_metadata, - warehouse_id, - &namespace, - state.v1_state.auth, - ) - .await?; + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let mut t = C::Transaction::begin_write(state.v1_state.catalog).await?; + let namespace_id = C::namespace_to_id(warehouse_id, &namespace, t.transaction()).await; // We can't fail before AuthZ. + let namespace_id = authorizer + .require_namespace_action( + &request_metadata, + warehouse_id, + namespace_id, + &NamespaceAction::CanCreateTable, + ) + .await?; // ------------------- BUSINESS LOGIC ------------------- let table_id: TabularIdentUuid = TabularIdentUuid::Table(uuid::Uuid::now_v7()); - let mut t = C::Transaction::begin_write(state.v1_state.catalog).await?; - let namespace = C::get_namespace(warehouse_id, &namespace, t.transaction()).await?; + let namespace = C::get_namespace(warehouse_id, namespace_id, t.transaction()).await?; let warehouse = C::require_warehouse(warehouse_id, t.transaction()).await?; let storage_profile = &warehouse.storage_profile; require_active_warehouse(warehouse.status)?; @@ -217,7 +252,6 @@ impl &data_access, storage_secret.as_ref(), &table_location, - // TODO: This should be a permission based on authz StoragePermissions::ReadWriteDelete, ) .await?; @@ -250,6 +284,7 @@ impl } /// Register a table in the given namespace using given metadata file location + #[allow(clippy::too_many_lines)] async fn register_table( _parameters: NamespaceParameters, _request: RegisterTableRequest, @@ -265,6 +300,7 @@ impl } /// Load a table from the catalog + #[allow(clippy::too_many_lines)] async fn load_table( parameters: TableParameters, data_access: DataAccess, @@ -290,11 +326,16 @@ impl } // ------------------- AUTHZ ------------------- - let include_staged = false; + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let include_staged: bool = false; let include_deleted = false; let include_active = true; - let table_id = C::table_ident_to_id( + let mut t = C::Transaction::begin_read(state.v1_state.catalog).await?; + let table_id = C::table_to_id( warehouse_id, &table, ListFlags { @@ -302,25 +343,43 @@ impl include_staged, include_deleted, }, - state.v1_state.catalog.clone(), + t.transaction(), ) - .await - // We can't fail before AuthZ. - .ok() - .flatten(); + .await; // We can't fail before AuthZ. + let table_id = authorizer + .require_table_action( + &request_metadata, + warehouse_id, + table_id, + &TableAction::CanGetMetadata, + ) + .await + .map_err(set_not_found_status_code)?; + + let (read_access, write_access) = futures::try_join!( + authorizer.is_allowed_table_action( + &request_metadata, + warehouse_id, + table_id, + &TableAction::CanReadData, + ), + authorizer.is_allowed_table_action( + &request_metadata, + warehouse_id, + table_id, + &TableAction::CanWriteData, + ), + )?; - A::check_load_table( - &request_metadata, - warehouse_id, - Some(&table.namespace), - table_id, - state.v1_state.auth, - ) - .await?; + let storage_permissions = if write_access { + Some(StoragePermissions::ReadWriteDelete) + } else if read_access { + Some(StoragePermissions::Read) + } else { + None + }; // ------------------- BUSINESS LOGIC ------------------- - let table_id = require_table_id(&table, table_id)?; - let mut t = C::Transaction::begin_read(state.v1_state.catalog).await?; let mut metadatas = C::load_tables( warehouse_id, vec![table_id], @@ -338,20 +397,6 @@ impl } = remove_table(&table_id, &table, &mut metadatas)?; require_not_staged(&metadata_location)?; - // ToDo: This is a small inefficiency: We fetch the secret even if it might - // not be required based on the `data_access` parameter. - let storage_secret = if let Some(secret_id) = storage_secret_ident { - Some( - state - .v1_state - .secrets - .get_secret_by_id(&secret_id) - .await? - .secret, - ) - } else { - None - }; let table_location = Location::from_str(table_metadata.location()).map_err(|e| { ErrorModel::internal( format!("Invalid table location in DB: {e}"), @@ -359,21 +404,30 @@ impl Some(Box::new(e)), ) })?; - let load_table_result = LoadTableResult { - metadata_location: metadata_location.as_ref().map(ToString::to_string), - metadata: table_metadata, - config: Some( + + // ToDo: This is a small inefficiency: We fetch the secret even if it might + // not be required based on the `data_access` parameter. + let storage_config = if let Some(storage_permissions) = storage_permissions { + let storage_secret = + maybe_get_secret(storage_secret_ident, &state.v1_state.secrets).await?; + Some( storage_profile .generate_table_config( &data_access, storage_secret.as_ref(), &table_location, - // TODO: This should be a permission based on authz - StoragePermissions::ReadWriteDelete, + storage_permissions, ) - .await? - .into(), - ), + .await?, + ) + } else { + None + }; + + let load_table_result = LoadTableResult { + metadata_location: metadata_location.as_ref().map(ToString::to_string), + metadata: table_metadata, + config: storage_config.map(Into::into), }; Ok(load_table_result) @@ -398,11 +452,17 @@ impl validate_table_updates(&request.updates)?; // ------------------- AUTHZ ------------------- + let mut t = C::Transaction::begin_write(state.v1_state.catalog).await?; + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let include_staged = true; let include_deleted = false; let include_active = true; - let table_id = C::table_ident_to_id( + let table_id = C::table_to_id( warehouse_id, &table_ident, ListFlags { @@ -410,24 +470,20 @@ impl include_staged, include_deleted, }, - state.v1_state.catalog.clone(), - ) - .await - // We can't fail before AuthZ. - .ok() - .flatten(); - - A::check_commit_table( - &request_metadata, - warehouse_id, - table_id, - Some(&table_ident.namespace), - state.v1_state.auth, + t.transaction(), ) - .await?; + .await; // We can't fail before AuthZ. + + let table_id = authorizer + .require_table_action( + &request_metadata, + warehouse_id, + table_id, + &TableAction::CanCommit, + ) + .await?; // ------------------- BUSINESS LOGIC ------------------- - let table_id = require_table_id(&table_ident, table_id)?; // serialize body before moving it let body = maybe_body_to_json(&request); let CommitTableRequest { @@ -436,7 +492,6 @@ impl updates, } = request; - let mut t = C::Transaction::begin_write(state.v1_state.catalog).await?; let mut previous_table = C::load_tables( warehouse_id, vec![table_id], @@ -559,11 +614,17 @@ impl validate_table_or_view_ident(&table)?; // ------------------- AUTHZ ------------------- + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let include_staged = true; let include_deleted = false; let include_active = true; - let table_id = C::table_ident_to_id( + let mut t = C::Transaction::begin_write(state.v1_state.catalog).await?; + let table_id = C::table_to_id( warehouse_id, &table, ListFlags { @@ -571,34 +632,23 @@ impl include_staged, include_deleted, }, - state.v1_state.catalog.clone(), - ) - .await - // We can't fail before AuthZ. - .ok() - .flatten(); - - A::check_drop_table( - &request_metadata, - warehouse_id, - table_id, - state.v1_state.auth, + t.transaction(), ) - .await?; + .await; // We can't fail before AuthZ + + let table_id = authorizer + .require_table_action( + &request_metadata, + warehouse_id, + table_id, + &TableAction::CanDrop, + ) + .await?; // ------------------- BUSINESS LOGIC ------------------- let purge = purge_requested.unwrap_or(false); - let mut transaction = C::Transaction::begin_write(state.v1_state.catalog).await?; - let warehouse = C::require_warehouse(warehouse_id, transaction.transaction()).await?; - - let table_id = table_id.ok_or_else(|| { - ErrorModel::not_found( - format!("Table does not exist in warehouse {warehouse_id}"), - "TableNotFound", - None, - ) - })?; + let warehouse = C::require_warehouse(warehouse_id, t.transaction()).await?; state .v1_state @@ -609,11 +659,11 @@ impl match warehouse.tabular_delete_profile { TabularDeleteProfile::Hard {} => { - let location = C::drop_table(table_id, transaction.transaction()).await?; + let location = C::drop_table(table_id, t.transaction()).await?; // committing here means maybe dangling data if queue_tabular_purge fails // commiting after queuing means we may end up with a table pointing nowhere // I feel that some undeleted files are less bad than a table that's there but can't be loaded - transaction.commit().await?; + t.commit().await?; if purge { state @@ -632,12 +682,9 @@ impl tracing::debug!("Queued purge task for dropped table '{table_id}'."); } TabularDeleteProfile::Soft { expiration_seconds } => { - C::mark_tabular_as_deleted( - TabularIdentUuid::Table(*table_id), - transaction.transaction(), - ) - .await?; - transaction.commit().await?; + C::mark_tabular_as_deleted(TabularIdentUuid::Table(*table_id), t.transaction()) + .await?; + t.commit().await?; state .v1_state @@ -688,11 +735,17 @@ impl validate_table_or_view_ident(&table)?; // ------------------- AUTHZ ------------------- + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let include_staged = false; let include_deleted = false; let include_active = true; - let table_id = C::table_ident_to_id( + let mut t = C::Transaction::begin_read(state.v1_state.catalog).await?; + let table_id = C::table_to_id( warehouse_id, &table, ListFlags { @@ -700,31 +753,21 @@ impl include_staged, include_deleted, }, - state.v1_state.catalog.clone(), - ) - .await - .transpose(); - // We can't fail before AuthZ. - A::check_table_exists( - &request_metadata, - warehouse_id, - Some(&table.namespace), - table_id.as_ref().and_then(|x| x.as_ref().ok()).copied(), - state.v1_state.auth, + t.transaction(), ) - .await?; + .await; // We can't fail before AuthZ + authorizer + .require_table_action( + &request_metadata, + warehouse_id, + table_id, + &TableAction::CanGetMetadata, + ) + .await + .map_err(set_not_found_status_code)?; // ------------------- BUSINESS LOGIC ------------------- - if table_id.transpose()?.is_some() { - Ok(()) - } else { - Err(ErrorModel::builder() - .code(StatusCode::NOT_FOUND.into()) - .message(format!("Table does not exist in warehouse {warehouse_id}")) - .r#type("TableNotFound".to_string()) - .build() - .into()) - } + Ok(()) } /// Rename a table @@ -745,11 +788,17 @@ impl validate_table_or_view_ident(&destination)?; // ------------------- AUTHZ ------------------- + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let include_staged = false; let include_deleted = false; let include_active = true; - let source_id = C::table_ident_to_id( + let mut t = C::Transaction::begin_write(state.v1_state.catalog).await?; + let source_table_id = C::table_to_id( warehouse_id, &source, ListFlags { @@ -757,67 +806,56 @@ impl include_staged, include_deleted, }, - state.v1_state.catalog.clone(), + t.transaction(), ) - .await - // We can't fail before AuthZ. - .ok() - .flatten(); + .await; // We can't fail before AuthZ; + let source_table_id = authorizer + .require_table_action( + &request_metadata, + warehouse_id, + source_table_id, + &TableAction::CanRename, + ) + .await?; + let namespace_id = + C::namespace_to_id(warehouse_id, &source.namespace, t.transaction()).await; // We can't fail before AuthZ // We need to be allowed to delete the old table and create the new one - let rename_check = A::check_rename_table( - &request_metadata, - warehouse_id, - source_id, - state.v1_state.auth.clone(), - ); - let create_check = A::check_create_table( - &request_metadata, - warehouse_id, - &destination.namespace, - state.v1_state.auth, - ); - futures::try_join!(rename_check, create_check)?; + authorizer + .require_namespace_action( + &request_metadata, + warehouse_id, + namespace_id, + &NamespaceAction::CanCreateTable, + ) + .await?; // ------------------- BUSINESS LOGIC ------------------- if source == destination { return Ok(()); } - // This case should not happen after AuthZ. - // Its rust though, so we have do to something. - let source_id = source_id.ok_or_else(|| { - ErrorModel::builder() - .code(StatusCode::NOT_FOUND.into()) - .message(format!( - "Source table does not exist in warehouse {warehouse_id}" - )) - .r#type("TableNotFound".to_string()) - .build() - })?; - - let mut transaction = C::Transaction::begin_write(state.v1_state.catalog).await?; C::rename_table( warehouse_id, - source_id, + source_table_id, &source, &destination, - transaction.transaction(), + t.transaction(), ) .await?; state .v1_state .contract_verifiers - .check_rename(TabularIdentUuid::Table(*source_id), &destination) + .check_rename(TabularIdentUuid::Table(*source_table_id), &destination) .await? .into_result()?; - transaction.commit().await?; + t.commit().await?; emit_change_event( EventMetadata { - tabular_id: TabularIdentUuid::Table(*source_id), + tabular_id: TabularIdentUuid::Table(*source_table_id), warehouse_id: *warehouse_id, name: source.name, namespace: source.namespace.to_url_string(), @@ -868,6 +906,11 @@ impl } // ------------------- AUTHZ ------------------- + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let include_staged = true; let include_deleted = false; let include_active = true; @@ -890,36 +933,31 @@ impl ) .await .map_err(|e| { - ErrorModel::builder() - .code(StatusCode::INTERNAL_SERVER_ERROR.into()) - .message("Error fetching table ids".to_string()) - .r#type("TableIdsFetchError".to_string()) - .stack( - vec![e.error.message, e.error.r#type] - .into_iter() - .chain(e.error.stack.into_iter()) - .collect(), - ) - .build() + ErrorModel::internal("Error fetching table ids", "TableIdsFetchError", None) + .append_details(vec![e.error.message, e.error.r#type]) + .append_details(e.error.stack) })?; - let auth_checks = table_ids - .iter() - .map(|(table_ident, table_id)| { - A::check_commit_table( + let authz_checks = table_ids + .values() + .map(|table_id| { + authorizer.require_table_action( &request_metadata, warehouse_id, - *table_id, - Some(&table_ident.namespace), - state.v1_state.auth.clone(), + Ok(*table_id), + &TableAction::CanCommit, ) }) .collect::>(); - futures::future::try_join_all(auth_checks).await?; + let table_uuids = futures::future::try_join_all(authz_checks).await?; + let table_ids = table_ids + .into_iter() + .zip(table_uuids) + .map(|((table_ident, _), table_uuid)| (table_ident, table_uuid)) + .collect::>(); // ------------------- BUSINESS LOGIC ------------------- - let table_ids = require_table_ids(table_ids)?; let mut transaction = C::Transaction::begin_write(state.v1_state.catalog).await?; let warehouse = C::require_warehouse(warehouse_id, transaction.transaction()).await?; @@ -1197,30 +1235,6 @@ pub(super) fn determine_tabular_location( Ok(location) } -fn require_table_ids( - table_ids: HashMap>, -) -> Result> { - table_ids - .into_iter() - .map(|(table_ident, table_id)| { - if let Some(table_id) = table_id { - Ok((table_ident, table_id)) - } else { - Err(ErrorModel::not_found( - format!( - "Table '{}.{}' does not exist.", - table_ident.namespace.to_url_string(), - table_ident.name - ), - "TableNotFound", - None, - ) - .into()) - } - }) - .collect::>>() -} - fn require_table_id( table_ident: &TableIdent, table_id: Option, diff --git a/crates/iceberg-catalog/src/catalog/views.rs b/crates/iceberg-catalog/src/catalog/views.rs index 564d6070..3e9a1699 100644 --- a/crates/iceberg-catalog/src/catalog/views.rs +++ b/crates/iceberg-catalog/src/catalog/views.rs @@ -15,13 +15,14 @@ use crate::api::iceberg::v1::{ ViewParameters, }; use crate::request_metadata::RequestMetadata; -use crate::service::{auth::AuthZHandler, secrets::SecretStore, Catalog, State}; +use crate::service::authz::Authorizer; +use crate::service::{Catalog, SecretStore, State}; use iceberg_ext::catalog::rest::{ErrorModel, ViewUpdate}; use iceberg_ext::configs::Location; use std::str::FromStr; #[async_trait::async_trait] -impl +impl crate::api::iceberg::v1::views::Service> for CatalogServer { /// List all view identifiers underneath a given namespace @@ -137,7 +138,7 @@ mod test { use crate::implementations::postgres::{ CatalogState, PostgresCatalog, ReadWrite, SecretsState, }; - use crate::implementations::{AllowAllAuthState, AllowAllAuthZHandler}; + use crate::service::authz::AllowAllAuthorizer; use crate::service::contract_verification::ContractVerifiers; use crate::service::event_publisher::CloudEventsPublisher; use crate::service::storage::{StorageProfile, TestProfile}; @@ -155,7 +156,7 @@ mod test { pool: PgPool, namespace_name: Option>, ) -> ( - ApiContext>, + ApiContext>, NamespaceIdent, WarehouseIdent, ) { @@ -178,18 +179,19 @@ mod test { None, ) .await + .1 .namespace; (api_context, namespace, warehouse_id) } pub(crate) fn get_api_context( pool: PgPool, - ) -> ApiContext> { + ) -> ApiContext> { let (tx, _) = tokio::sync::mpsc::channel(1000); ApiContext { v1_state: State { - auth: AllowAllAuthState, + authz: AllowAllAuthorizer, catalog: CatalogState::from_pools(pool.clone(), pool.clone()), secrets: SecretsState::from_pools(pool.clone(), pool.clone()), publisher: CloudEventsPublisher::new(tx.clone()), diff --git a/crates/iceberg-catalog/src/catalog/views/commit.rs b/crates/iceberg-catalog/src/catalog/views/commit.rs index 867cdb0d..86fb6d95 100644 --- a/crates/iceberg-catalog/src/catalog/views/commit.rs +++ b/crates/iceberg-catalog/src/catalog/views/commit.rs @@ -11,12 +11,13 @@ use crate::catalog::tables::{ }; use crate::catalog::views::{parse_view_location, validate_view_updates}; use crate::request_metadata::RequestMetadata; +use crate::service::authz::{ViewAction, WarehouseAction}; use crate::service::contract_verification::ContractVerification; use crate::service::event_publisher::EventMetadata; use crate::service::storage::{StorageLocations as _, StoragePermissions}; -use crate::service::tabular_idents::TabularIdentUuid; +use crate::service::TabularIdentUuid; use crate::service::{ - auth::AuthZHandler, secrets::SecretStore, Catalog, GetWarehouseResponse, State, TableIdentUuid, + authz::Authorizer, secrets::SecretStore, Catalog, GetWarehouseResponse, State, TableIdentUuid, Transaction, ViewMetadataWithLocation, }; use http::StatusCode; @@ -28,7 +29,7 @@ use uuid::Uuid; /// Commit updates to a view // TODO: break up into smaller fns #[allow(clippy::too_many_lines)] -pub(crate) async fn commit_view( +pub(crate) async fn commit_view( parameters: ViewParameters, request: CommitViewRequest, state: ApiContext>, @@ -48,45 +49,32 @@ pub(crate) async fn commit_view( validate_table_or_view_ident(&identifier)?; // ------------------- AUTHZ ------------------- - let view_id = C::view_ident_to_id(warehouse_id, &identifier, state.v1_state.catalog.clone()) - .await - // We can't fail before AuthZ. - .transpose(); - - A::check_commit_view( - &request_metadata, - warehouse_id, - view_id.as_ref().and_then(|id| id.as_ref().ok()), - Some(&identifier.namespace), - state.v1_state.auth, - ) - .await?; + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let mut t = C::Transaction::begin_write(state.v1_state.catalog).await?; + let view_id = C::view_to_id(warehouse_id, &identifier, t.transaction()).await; // We can't fail before AuthZ; + + let view_id = authorizer + .require_view_action( + &request_metadata, + warehouse_id, + view_id, + &ViewAction::CanCommit, + ) + .await?; // ------------------- BUSINESS LOGIC ------------------- validate_view_updates(updates)?; - let namespace_id = C::namespace_ident_to_id( - warehouse_id, - &identifier.namespace, - state.v1_state.catalog.clone(), - ) - .await? - .ok_or(ErrorModel::not_found( - "Namespace does not exist", - "NamespaceNotFound", - None, - ))?; - - let view_id = view_id.transpose()?.ok_or_else(|| { - tracing::debug!("View does not exist."); - ErrorModel::not_found( - format!("View does not exist in warehouse {warehouse_id}"), - "ViewNotFound", + let namespace_id = C::namespace_to_id(warehouse_id, identifier.namespace(), t.transaction()) + .await? + .ok_or(ErrorModel::not_found( + "Namespace does not exist", + "NamespaceNotFound", None, - ) - })?; - - let mut transaction = C::Transaction::begin_write(state.v1_state.catalog).await?; + ))?; let GetWarehouseResponse { id: _, @@ -96,7 +84,7 @@ pub(crate) async fn commit_view( storage_secret_id, status, tabular_delete_profile: _, - } = C::require_warehouse(warehouse_id, transaction.transaction()).await?; + } = C::require_warehouse(warehouse_id, t.transaction()).await?; require_active_warehouse(status)?; check_asserts(requirements, view_id)?; @@ -104,7 +92,7 @@ pub(crate) async fn commit_view( let ViewMetadataWithLocation { metadata_location: _, metadata: before_update_metadata, - } = C::load_view(view_id, false, transaction.transaction()).await?; + } = C::load_view(view_id, false, t.transaction()).await?; let view_location = parse_view_location(&before_update_metadata.location)?; state @@ -132,7 +120,7 @@ pub(crate) async fn commit_view( &metadata_location, requested_update_metadata.clone(), &view_location, - transaction.transaction(), + t.transaction(), ) .await?; @@ -175,7 +163,7 @@ pub(crate) async fn commit_view( StoragePermissions::ReadWriteDelete, ) .await?; - transaction.commit().await?; + t.commit().await?; let _ = state .v1_state diff --git a/crates/iceberg-catalog/src/catalog/views/create.rs b/crates/iceberg-catalog/src/catalog/views/create.rs index 425fff9d..b68b987b 100644 --- a/crates/iceberg-catalog/src/catalog/views/create.rs +++ b/crates/iceberg-catalog/src/catalog/views/create.rs @@ -10,11 +10,11 @@ use crate::catalog::tables::{ use crate::catalog::views::validate_view_properties; use crate::catalog::{maybe_get_secret, require_warehouse_id}; use crate::request_metadata::RequestMetadata; -use crate::service::auth::AuthZHandler; +use crate::service::authz::{Authorizer, NamespaceAction, WarehouseAction}; use crate::service::event_publisher::EventMetadata; use crate::service::storage::{StorageLocations as _, StoragePermissions}; -use crate::service::tabular_idents::TabularIdentUuid; use crate::service::Result; +use crate::service::TabularIdentUuid; use crate::service::{Catalog, SecretStore, State, Transaction}; use iceberg::spec::ViewMetadataBuilder; use iceberg::{TableIdent, ViewCreation}; @@ -24,7 +24,7 @@ use uuid::Uuid; // TODO: split up into smaller functions #[allow(clippy::too_many_lines)] /// Create a view in the given namespace -pub(crate) async fn create_view( +pub(crate) async fn create_view( parameters: NamespaceParameters, request: CreateViewRequest, state: ApiContext>, @@ -49,26 +49,23 @@ pub(crate) async fn create_view( } // ------------------- AUTHZ ------------------- - A::check_create_view( - &request_metadata, - warehouse_id, - &namespace, - state.v1_state.auth.clone(), - ) - .await?; + let authorizer = &state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let mut t = C::Transaction::begin_write(state.v1_state.catalog.clone()).await?; + let namespace_id = C::namespace_to_id(warehouse_id, &namespace, t.transaction()).await; // Cannot fail before authz; + let namespace_id = authorizer + .require_namespace_action( + &request_metadata, + warehouse_id, + namespace_id, + &NamespaceAction::CanCreateView, + ) + .await?; // ------------------- BUSINESS LOGIC ------------------- - let namespace_id = - C::namespace_ident_to_id(warehouse_id, &namespace, state.v1_state.catalog.clone()) - .await? - .ok_or(ErrorModel::not_found( - "Namespace does not exist", - "NamespaceNotFound", - None, - ))?; - - let mut t = C::Transaction::begin_write(state.v1_state.catalog.clone()).await?; - let namespace = C::get_namespace(warehouse_id, &namespace, t.transaction()).await?; + let namespace = C::get_namespace(warehouse_id, namespace_id, t.transaction()).await?; let warehouse = C::require_warehouse(warehouse_id, t.transaction()).await?; let storage_profile = warehouse.storage_profile; require_active_warehouse(warehouse.status)?; @@ -188,7 +185,7 @@ pub(crate) mod test { use crate::implementations::postgres::namespace::tests::initialize_namespace; use crate::implementations::postgres::secrets::SecretsState; - use crate::implementations::AllowAllAuthZHandler; + use crate::service::authz::AllowAllAuthorizer; use iceberg::NamespaceIdent; use serde_json::json; use sqlx::PgPool; @@ -196,7 +193,7 @@ pub(crate) mod test { pub(crate) async fn create_view( api_context: ApiContext< State< - AllowAllAuthZHandler, + AllowAllAuthorizer, crate::implementations::postgres::PostgresCatalog, SecretsState, >, @@ -263,6 +260,7 @@ pub(crate) mod test { let new_ns = initialize_namespace(api_context.v1_state.catalog.clone(), whi, &namespace, None) .await + .1 .namespace; let _view = create_view(api_context, new_ns, rq, Some(whi.to_string())) diff --git a/crates/iceberg-catalog/src/catalog/views/drop.rs b/crates/iceberg-catalog/src/catalog/views/drop.rs index 555f6971..b0b11086 100644 --- a/crates/iceberg-catalog/src/catalog/views/drop.rs +++ b/crates/iceberg-catalog/src/catalog/views/drop.rs @@ -6,18 +6,17 @@ use crate::api::ApiContext; use crate::catalog::require_warehouse_id; use crate::catalog::tables::validate_table_or_view_ident; use crate::request_metadata::RequestMetadata; -use crate::service::auth::AuthZHandler; +use crate::service::authz::{Authorizer, ViewAction, WarehouseAction}; use crate::service::contract_verification::ContractVerification; use crate::service::event_publisher::EventMetadata; -use crate::service::tabular_idents::TabularIdentUuid; use crate::service::task_queue::tabular_expiration_queue::TabularExpirationInput; use crate::service::task_queue::tabular_purge_queue::TabularPurgeInput; use crate::service::Result; +use crate::service::TabularIdentUuid; use crate::service::{Catalog, SecretStore, State, Transaction}; -use iceberg_ext::catalog::rest::ErrorModel; use uuid::Uuid; -pub(crate) async fn drop_view( +pub(crate) async fn drop_view( parameters: ViewParameters, DropParams { purge_requested }: DropParams, state: ApiContext>, @@ -29,34 +28,26 @@ pub(crate) async fn drop_view( validate_table_or_view_ident(&view)?; // ------------------- AUTHZ ------------------- - let view_id = C::view_ident_to_id(warehouse_id, &view, state.v1_state.catalog.clone()) - .await - // We can't fail before AuthZ. - .transpose(); - - A::check_drop_view( - &request_metadata, - warehouse_id, - view_id.as_ref().and_then(|id| id.as_ref().ok()), - state.v1_state.auth, - ) - .await?; + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let mut t = C::Transaction::begin_write(state.v1_state.catalog).await?; + let view_id = C::view_to_id(warehouse_id, &view, t.transaction()).await; // Can't fail before authz + + let view_id = authorizer + .require_view_action( + &request_metadata, + warehouse_id, + view_id, + &ViewAction::CanDrop, + ) + .await?; // ------------------- BUSINESS LOGIC ------------------- let purge_requested = purge_requested.unwrap_or(false); - let view_id = view_id.transpose()?.ok_or_else(|| { - tracing::debug!(?view, "View does not exist."); - ErrorModel::not_found( - format!("View does not exist in warehouse {warehouse_id}"), - "ViewNotFound", - None, - ) - })?; - - let mut transaction = C::Transaction::begin_write(state.v1_state.catalog).await?; - - let warehouse = C::require_warehouse(warehouse_id, transaction.transaction()).await?; + let warehouse = C::require_warehouse(warehouse_id, t.transaction()).await?; state .v1_state @@ -69,11 +60,11 @@ pub(crate) async fn drop_view( match warehouse.tabular_delete_profile { TabularDeleteProfile::Hard {} => { - let location = C::drop_view(view_id, transaction.transaction()).await?; + let location = C::drop_view(view_id, t.transaction()).await?; // committing here means maybe dangling data if the queue fails // OTOH committing after queuing means we may end up with a view pointing to deleted files // I feel that some undeleted files are less bad than a view that cannot be loaded - transaction.commit().await?; + t.commit().await?; if purge_requested { state @@ -90,9 +81,8 @@ pub(crate) async fn drop_view( } } TabularDeleteProfile::Soft { expiration_seconds } => { - C::mark_tabular_as_deleted(TabularIdentUuid::View(*view_id), transaction.transaction()) - .await?; - transaction.commit().await?; + C::mark_tabular_as_deleted(TabularIdentUuid::View(*view_id), t.transaction()).await?; + t.commit().await?; state .v1_state diff --git a/crates/iceberg-catalog/src/catalog/views/exists.rs b/crates/iceberg-catalog/src/catalog/views/exists.rs index a2702b5c..5c492e89 100644 --- a/crates/iceberg-catalog/src/catalog/views/exists.rs +++ b/crates/iceberg-catalog/src/catalog/views/exists.rs @@ -1,15 +1,13 @@ use crate::api::iceberg::v1::ViewParameters; -use crate::api::ApiContext; +use crate::api::{set_not_found_status_code, ApiContext}; use crate::catalog::require_warehouse_id; use crate::catalog::tables::validate_table_or_view_ident; use crate::request_metadata::RequestMetadata; -use crate::service::auth::AuthZHandler; +use crate::service::authz::{Authorizer, ViewAction, WarehouseAction}; use crate::service::Result; -use crate::service::{Catalog, SecretStore, State}; -use http::StatusCode; -use iceberg_ext::catalog::rest::ErrorModel; +use crate::service::{Catalog, SecretStore, State, Transaction}; -pub(crate) async fn view_exists( +pub(crate) async fn view_exists( parameters: ViewParameters, state: ApiContext>, request_metadata: RequestMetadata, @@ -19,30 +17,24 @@ pub(crate) async fn view_exists( let warehouse_id = require_warehouse_id(prefix.clone())?; validate_table_or_view_ident(&view)?; - let view_id = C::view_ident_to_id(warehouse_id, &view, state.v1_state.catalog.clone()) - .await - .transpose(); - - A::check_view_exists( - &request_metadata, - warehouse_id, - Some(&view.namespace), - view_id.as_ref().and_then(|x| x.as_ref().ok()), - state.v1_state.auth, - ) - .await?; - // ------------------- BUSINESS LOGIC ------------------- - if view_id.transpose()?.is_some() { - Ok(()) - } else { - Err(ErrorModel::builder() - .code(StatusCode::NOT_FOUND.into()) - .message(format!("Table does not exist in warehouse {warehouse_id}")) - .r#type("TableNotFound".to_string()) - .build() - .into()) - } + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let mut t = C::Transaction::begin_read(state.v1_state.catalog).await?; + let view_id = C::view_to_id(warehouse_id, &view, t.transaction()).await; // Can't fail before authz + + authorizer + .require_view_action( + &request_metadata, + warehouse_id, + view_id, + &ViewAction::CanGetMetadata, + ) + .await + .map_err(set_not_found_status_code)?; + Ok(()) } #[cfg(test)] @@ -101,6 +93,6 @@ mod test { .await .unwrap_err(); - assert_eq!(non_exist.error.code, StatusCode::NOT_FOUND); + assert_eq!(non_exist.error.code, http::StatusCode::NOT_FOUND); } } diff --git a/crates/iceberg-catalog/src/catalog/views/list.rs b/crates/iceberg-catalog/src/catalog/views/list.rs index 593e116b..5a51498c 100644 --- a/crates/iceberg-catalog/src/catalog/views/list.rs +++ b/crates/iceberg-catalog/src/catalog/views/list.rs @@ -4,11 +4,11 @@ use crate::api::Result; use crate::catalog::namespace::validate_namespace_ident; use crate::catalog::require_warehouse_id; use crate::request_metadata::RequestMetadata; -use crate::service::auth::AuthZHandler; -use crate::service::{Catalog, SecretStore, State}; +use crate::service::authz::{Authorizer, NamespaceAction, ViewAction, WarehouseAction}; +use crate::service::{Catalog, SecretStore, State, Transaction}; use iceberg_ext::catalog::rest::ListTablesResponse; -pub(crate) async fn list_views( +pub(crate) async fn list_views( parameters: NamespaceParameters, pagination_query: PaginationQuery, state: ApiContext>, @@ -20,27 +20,51 @@ pub(crate) async fn list_views( validate_namespace_ident(&namespace)?; // ------------------- AUTHZ ------------------- - A::check_list_views( - &request_metadata, - warehouse_id, - &namespace, - state.v1_state.auth, - ) - .await?; + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let mut t: ::Transaction = + C::Transaction::begin_read(state.v1_state.catalog).await?; + let namespace_id = C::namespace_to_id(warehouse_id, &namespace, t.transaction()).await; // We can't fail before AuthZ. - // ------------------- BUSINESS LOGIC ------------------- + authorizer + .require_namespace_action( + &request_metadata, + warehouse_id, + namespace_id, + &NamespaceAction::CanListViews, + ) + .await?; + // ------------------- BUSINESS LOGIC ------------------- let views = C::list_views( warehouse_id, &namespace, false, - state.v1_state.catalog.clone(), + t.transaction(), pagination_query, ) .await?; + // ToDo: Better pagination with non-empty pages + let next_page_token = views.next_page_token; + let identifiers = futures::future::try_join_all(views.tabulars.iter().map(|t| { + authorizer.is_allowed_view_action( + &request_metadata, + warehouse_id, + *t.0, + &ViewAction::CanShowInList, + ) + })) + .await? + .into_iter() + .zip(views.tabulars.into_iter()) + .filter_map(|(allowed, table)| if allowed { Some(table.1) } else { None }) + .collect(); + Ok(ListTablesResponse { - next_page_token: None, - identifiers: views.into_iter().map(|t| t.1).collect(), + next_page_token, + identifiers, }) } diff --git a/crates/iceberg-catalog/src/catalog/views/load.rs b/crates/iceberg-catalog/src/catalog/views/load.rs index 8e9c7427..36c4bc5f 100644 --- a/crates/iceberg-catalog/src/catalog/views/load.rs +++ b/crates/iceberg-catalog/src/catalog/views/load.rs @@ -1,17 +1,16 @@ use crate::api::iceberg::v1::{DataAccess, ViewParameters}; -use crate::api::ApiContext; +use crate::api::{set_not_found_status_code, ApiContext}; use crate::catalog::require_warehouse_id; use crate::catalog::tables::{require_active_warehouse, validate_table_or_view_ident}; use crate::catalog::views::parse_view_location; use crate::request_metadata::RequestMetadata; -use crate::service::auth::AuthZHandler; +use crate::service::authz::{Authorizer, ViewAction, WarehouseAction}; use crate::service::storage::{StorageCredential, StoragePermissions}; use crate::service::{Catalog, SecretStore, State, Transaction, ViewMetadataWithLocation}; use crate::service::{GetWarehouseResponse, Result}; -use http::StatusCode; -use iceberg_ext::catalog::rest::{ErrorModel, LoadViewResult}; +use iceberg_ext::catalog::rest::LoadViewResult; -pub(crate) async fn load_view( +pub(crate) async fn load_view( parameters: ViewParameters, state: ApiContext>, data_access: DataAccess, @@ -36,31 +35,23 @@ pub(crate) async fn load_view( } // ------------------- AUTHZ ------------------- - let view_id = C::view_ident_to_id(warehouse_id, &view, state.v1_state.catalog.clone()) + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let mut t = C::Transaction::begin_read(state.v1_state.catalog).await?; + let view_id = C::view_to_id(warehouse_id, &view, t.transaction()).await; // We can't fail before AuthZ + let view_id = authorizer + .require_view_action( + &request_metadata, + warehouse_id, + view_id, + &ViewAction::CanGetMetadata, + ) .await - // We can't fail before AuthZ. - .transpose(); - - A::check_load_view( - &request_metadata, - warehouse_id, - Some(&view.namespace), - view_id.as_ref().and_then(|id| id.as_ref().ok()), - state.v1_state.auth, - ) - .await?; + .map_err(set_not_found_status_code)?; // ------------------- BUSINESS LOGIC ------------------- - let view_id = view_id.transpose()?.ok_or_else(|| { - tracing::debug!("View does not exist."); - ErrorModel::builder() - .code(StatusCode::NOT_FOUND.into()) - .message(format!("View does not exist in warehouse {warehouse_id}")) - .r#type("ViewNotFound".to_string()) - .build() - })?; - let mut transaction = C::Transaction::begin_read(state.v1_state.catalog).await?; - let GetWarehouseResponse { id: _, name: _, @@ -69,13 +60,13 @@ pub(crate) async fn load_view( storage_secret_id, status, tabular_delete_profile: _, - } = C::require_warehouse(warehouse_id, transaction.transaction()).await?; + } = C::require_warehouse(warehouse_id, t.transaction()).await?; require_active_warehouse(status)?; let ViewMetadataWithLocation { metadata_location, metadata: view_metadata, - } = C::load_view(view_id, false, transaction.transaction()).await?; + } = C::load_view(view_id, false, t.transaction()).await?; let view_location = parse_view_location(&view_metadata.location)?; @@ -108,7 +99,7 @@ pub(crate) async fn load_view( config: Some(access.into()), }; - transaction.commit().await?; + t.commit().await?; Ok(load_table_result) } @@ -121,7 +112,7 @@ pub(crate) mod test { use crate::implementations::postgres::secrets::SecretsState; use crate::implementations::postgres::PostgresCatalog; - use crate::implementations::AllowAllAuthZHandler; + use crate::service::authz::AllowAllAuthorizer; use crate::service::State; @@ -134,11 +125,11 @@ pub(crate) mod test { use crate::catalog::views::test::setup; pub(crate) async fn load_view( - api_context: ApiContext>, + api_context: ApiContext>, params: ViewParameters, ) -> crate::api::Result { - as views::Service< - State, + as views::Service< + State, >>::load_view( params, api_context, diff --git a/crates/iceberg-catalog/src/catalog/views/rename.rs b/crates/iceberg-catalog/src/catalog/views/rename.rs index c2ba047e..bebd7b11 100644 --- a/crates/iceberg-catalog/src/catalog/views/rename.rs +++ b/crates/iceberg-catalog/src/catalog/views/rename.rs @@ -3,17 +3,17 @@ use crate::api::ApiContext; use crate::catalog::require_warehouse_id; use crate::catalog::tables::{maybe_body_to_json, validate_table_or_view_ident}; use crate::request_metadata::RequestMetadata; -use crate::service::auth::AuthZHandler; +use crate::service::authz::{Authorizer, NamespaceAction, ViewAction, WarehouseAction}; use crate::service::contract_verification::ContractVerification; use crate::service::event_publisher::EventMetadata; -use crate::service::tabular_idents::TabularIdentUuid; use crate::service::Result; +use crate::service::TabularIdentUuid; use crate::service::{Catalog, SecretStore, State, Transaction}; use http::StatusCode; -use iceberg_ext::catalog::rest::{ErrorModel, RenameTableRequest}; +use iceberg_ext::catalog::rest::RenameTableRequest; use uuid::Uuid; -pub(crate) async fn rename_view( +pub(crate) async fn rename_view( prefix: Option, request: RenameTableRequest, state: ApiContext>, @@ -29,29 +29,36 @@ pub(crate) async fn rename_view( validate_table_or_view_ident(destination)?; // ------------------- AUTHZ ------------------- - let source_id = C::view_ident_to_id( - warehouse_id, - &request.source, - state.v1_state.catalog.clone(), - ) - .await - // We can't fail before AuthZ. - .transpose(); - + let authorizer = state.v1_state.authz; + authorizer + .require_warehouse_action(&request_metadata, warehouse_id, &WarehouseAction::CanUse) + .await?; + let mut t = C::Transaction::begin_write(state.v1_state.catalog).await?; + + let source_id = C::view_to_id(warehouse_id, &request.source, t.transaction()).await; // We can't fail before AuthZ; + let source_id = authorizer + .require_view_action( + &request_metadata, + warehouse_id, + source_id, + &ViewAction::CanRename, + ) + .await + .map_err(|mut e| { + e.error.code = StatusCode::NOT_FOUND.into(); + e + })?; // We need to be allowed to delete the old table and create the new one - let rename_check = A::check_rename_view( - &request_metadata, - warehouse_id, - source_id.as_ref().and_then(|id| id.as_ref().ok()), - state.v1_state.auth.clone(), - ); - let create_check = A::check_create_view( - &request_metadata, - warehouse_id, - &destination.namespace, - state.v1_state.auth, - ); - futures::try_join!(rename_check, create_check)?; + let namespace_id = C::namespace_to_id(warehouse_id, &source.namespace, t.transaction()).await; // We can't fail before AuthZ + // We need to be allowed to delete the old table and create the new one + authorizer + .require_namespace_action( + &request_metadata, + warehouse_id, + namespace_id, + &NamespaceAction::CanCreateTable, + ) + .await?; // ------------------- BUSINESS LOGIC ------------------- if source == destination { @@ -59,23 +66,12 @@ pub(crate) async fn rename_view( } let body = maybe_body_to_json(&request); - let source_id = source_id.transpose()?.ok_or_else(|| { - tracing::debug!("View does not exist."); - ErrorModel::builder() - .code(StatusCode::NOT_FOUND.into()) - .message(format!("View does not exist in warehouse {warehouse_id}")) - .r#type("ViewNotFound".to_string()) - .build() - })?; - - let mut transaction = C::Transaction::begin_write(state.v1_state.catalog).await?; - C::rename_view( warehouse_id, source_id, source, destination, - transaction.transaction(), + t.transaction(), ) .await?; @@ -86,7 +82,7 @@ pub(crate) async fn rename_view( .await? .into_result()?; - transaction.commit().await?; + t.commit().await?; let _ = state .v1_state @@ -191,6 +187,7 @@ mod test { let new_ns = initialize_namespace(api_context.v1_state.catalog.clone(), whi, &namespace, None) .await + .1 .namespace; let view_name = "my-view"; diff --git a/crates/iceberg-catalog/src/config.rs b/crates/iceberg-catalog/src/config.rs index 821a9437..85cf6a26 100644 --- a/crates/iceberg-catalog/src/config.rs +++ b/crates/iceberg-catalog/src/config.rs @@ -8,7 +8,7 @@ use std::str::FromStr; use url::Url; use crate::service::task_queue::TaskQueueConfig; -use crate::WarehouseIdent; +use crate::{ProjectIdent, WarehouseIdent}; use itertools::Itertools; use serde::{Deserialize, Deserializer, Serialize}; use veil::Redact; @@ -19,26 +19,41 @@ const DEFAULT_ENCRYPTION_KEY: &str = "" lazy_static::lazy_static! { /// Configuration of the service module. pub static ref CONFIG: DynAppConfig = { - let defaults = figment::providers::Serialized::defaults(DynAppConfig::default()); - let mut config = figment::Figment::from(defaults) - .merge(figment::providers::Env::prefixed("ICEBERG_REST__").split("__")) - .extract::() - .expect("Valid Configuration"); - - config.reserved_namespaces.extend(DEFAULT_RESERVED_NAMESPACES.into_iter().map(str::to_string)); - - // Fail early if the base_uri is not a valid URL - config.s3_signer_uri_for_warehouse(WarehouseIdent::from(uuid::Uuid::new_v4())); - config.base_uri_catalog(); - config.base_uri_management(); - if config.secret_backend == SecretBackend::Postgres && config.pg_encryption_key == DEFAULT_ENCRYPTION_KEY { - tracing::warn!("THIS IS UNSAFE! Using default encryption key for secrets in postgres, please set a proper key using ICEBERG_REST__PG_ENCRYPTION_KEY environment variable."); - } - - config + get_config() }; } +fn get_config() -> DynAppConfig { + let defaults = figment::providers::Serialized::defaults(DynAppConfig::default()); + #[cfg(not(test))] + let mut config = figment::Figment::from(defaults) + .merge(figment::providers::Env::prefixed("ICEBERG_REST__").split("__")) + .extract::() + .expect("Valid Configuration"); + + #[cfg(test)] + let mut config = figment::Figment::from(defaults) + .merge(figment::providers::Env::prefixed("LAKEKEEPER_TEST__").split("__")) + .extract::() + .expect("Valid Configuration"); + + config + .reserved_namespaces + .extend(DEFAULT_RESERVED_NAMESPACES.into_iter().map(str::to_string)); + + // Fail early if the base_uri is not a valid URL + config.s3_signer_uri_for_warehouse(WarehouseIdent::from(uuid::Uuid::new_v4())); + config.base_uri_catalog(); + config.base_uri_management(); + if config.secret_backend == SecretBackend::Postgres + && config.pg_encryption_key == DEFAULT_ENCRYPTION_KEY + { + tracing::warn!("THIS IS UNSAFE! Using default encryption key for secrets in postgres, please set a proper key using ICEBERG_REST__PG_ENCRYPTION_KEY environment variable."); + } + + config +} + #[derive(Clone, Deserialize, Serialize, PartialEq, Redact)] /// Configuration of this Module pub struct DynAppConfig { @@ -53,7 +68,7 @@ pub struct DynAppConfig { /// The default Project ID to use. We recommend setting this /// only for singe-project deployments. A single project /// can still contain multiple warehouses. - pub default_project_id: Option, + pub default_project_id: Option, /// Template to obtain the "prefix" for a warehouse, /// may contain `{warehouse_id}` placeholder. /// @@ -101,9 +116,18 @@ pub struct DynAppConfig { #[redact] pub nats_token: Option, - // ------------- AUTHORIZATION ------------- + // ------------- AUTHENTICATION ------------- pub openid_provider_uri: Option, + // ------------- AUTHORIZATION - OPENFGA ------------- + #[serde(default)] + pub authz_backend: AuthZBackend, + #[serde( + deserialize_with = "deserialize_openfga_config", + serialize_with = "serialize_openfga_config" + )] + pub openfga: Option, + // ------------- Health ------------- pub health_check_frequency_seconds: u64, pub health_check_jitter_millis: u64, @@ -147,6 +171,49 @@ where duration.num_seconds().to_string().serialize(serializer) } +#[derive(Clone, Serialize, Deserialize, PartialEq, veil::Redact)] +#[serde(rename_all = "snake_case")] +pub enum OpenFGAAuth { + Anonymous, + ClientCredentials { + client_id: String, + #[redact] + client_secret: String, + token_endpoint: String, + }, + #[redact(all)] + ApiKey(String), +} + +#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)] +pub struct OpenFGAConfig { + /// GRPC Endpoint Url + pub endpoint: Url, + /// Store Name - if not specified, `lakekeeper` is used. + #[serde(default = "default_openfga_store_name")] + pub store_name: String, + /// Server Name - Top level requests are made for `server:{server_id`} + /// If not specified, 00000000-0000-0000-0000-000000000000 is used. + #[serde(default = "uuid::Uuid::nil")] + pub server_id: uuid::Uuid, + /// API-Key. If client-id is specified, this is ignored. + pub auth: OpenFGAAuth, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub enum AuthZBackend { + #[serde(alias = "allowall", alias = "AllowAll", alias = "ALLOWALL")] + AllowAll, + #[serde(alias = "openfga", alias = "OpenFGA", alias = "OPENFGA")] + OpenFGA, +} + +impl Default for AuthZBackend { + fn default() -> Self { + Self::AllowAll + } +} + #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub enum SecretBackend { #[serde(alias = "kv2", alias = "Kv2")] @@ -202,6 +269,8 @@ impl Default for DynAppConfig { health_check_frequency_seconds: 10, health_check_jitter_millis: 500, kv2: None, + authz_backend: AuthZBackend::AllowAll, + openfga: None, secret_backend: SecretBackend::Postgres, queue_config: TaskQueueConfig::default(), default_tabular_expiration_delay_seconds: chrono::Duration::days(7), @@ -318,6 +387,115 @@ where value.0.iter().join(",").serialize(serializer) } +#[derive(Serialize, Deserialize, PartialEq, veil::Redact)] +struct OpenFGAConfigSerde { + /// GRPC Endpoint Url + endpoint: Url, + /// Store Name - if not specified, `lakekeeper` is used. + #[serde(default = "default_openfga_store_name")] + store_name: String, + /// Server Name - Top level requests are made for `server:{server_id`} + /// If not specified, 00000000-0000-0000-0000-000000000000 is used. + #[serde(default = "uuid::Uuid::nil")] + server_id: uuid::Uuid, + /// API-Key. If client-id is specified, this is ignored. + api_key: Option, + /// Client id + client_id: Option, + #[redact] + /// Client secret + client_secret: Option, + /// Token Endpoint to use when exchanging client credentials for an access token. + token_endpoint: Option, +} + +fn default_openfga_store_name() -> String { + "lakekeeper".to_string() +} + +fn deserialize_openfga_config<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let Some(OpenFGAConfigSerde { + client_id, + client_secret, + token_endpoint, + api_key, + endpoint, + store_name, + server_id, + }) = Option::::deserialize(deserializer)? + else { + return Ok(None); + }; + + let auth = if let Some(client_id) = client_id { + let client_secret = client_secret.ok_or_else(|| { + serde::de::Error::custom( + "openfga client_secret is required when client_id is specified", + ) + })?; + let token_endpoint = token_endpoint.ok_or_else(|| { + serde::de::Error::custom( + "openfga token_endpoint is required when client_id is specified", + ) + })?; + OpenFGAAuth::ClientCredentials { + client_id, + client_secret, + token_endpoint, + } + } else { + api_key.map_or(OpenFGAAuth::Anonymous, OpenFGAAuth::ApiKey) + }; + + Ok(Some(OpenFGAConfig { + endpoint, + store_name, + server_id, + auth, + })) +} + +fn serialize_openfga_config( + value: &Option, + serializer: S, +) -> Result +where + S: serde::Serializer, +{ + let Some(value) = value else { + return None::.serialize(serializer); + }; + + let (client_id, client_secret, token_endpoint, api_key) = match &value.auth { + OpenFGAAuth::ClientCredentials { + client_id, + client_secret, + token_endpoint, + } => ( + Some(client_id), + Some(client_secret), + Some(token_endpoint), + None, + ), + OpenFGAAuth::ApiKey(api_key) => (None, None, None, Some(api_key.clone())), + OpenFGAAuth::Anonymous => (None, None, None, None), + }; + + OpenFGAConfigSerde { + client_id: client_id.cloned(), + client_secret: client_secret.cloned(), + token_endpoint: token_endpoint.cloned(), + api_key, + endpoint: value.endpoint.clone(), + store_name: value.store_name.clone(), + server_id: value.server_id, + } + .serialize(serializer) +} + #[cfg(test)] mod test { #[allow(unused_imports)] @@ -333,4 +511,74 @@ mod test { assert!(CONFIG.reserved_namespaces.contains("system")); assert!(CONFIG.reserved_namespaces.contains("examples")); } + + #[test] + fn test_openfga_config_no_auth() { + figment::Jail::expect_with(|jail| { + jail.set_env("LAKEKEEPER_TEST__AUTHZ_BACKEND", "openfga"); + jail.set_env("LAKEKEEPER_TEST__OPENFGA__STORE_NAME", "store_name"); + let config = get_config(); + let authz_config = config.openfga.unwrap(); + assert_eq!(config.authz_backend, AuthZBackend::OpenFGA); + assert_eq!(authz_config.store_name, "store_name"); + + assert_eq!(authz_config.auth, OpenFGAAuth::Anonymous); + + Ok(()) + }); + } + + #[test] + fn test_openfga_config_api_key() { + figment::Jail::expect_with(|jail| { + jail.set_env("LAKEKEEPER_TEST__AUTHZ_BACKEND", "openfga"); + jail.set_env("LAKEKEEPER_TEST__OPENFGA__API_KEY", "api_key"); + let config = get_config(); + let authz_config = config.openfga.unwrap(); + assert_eq!(config.authz_backend, AuthZBackend::OpenFGA); + assert_eq!(authz_config.store_name, "lakekeeper"); + + assert_eq!( + authz_config.auth, + OpenFGAAuth::ApiKey("api_key".to_string()) + ); + Ok(()) + }); + } + + #[test] + #[should_panic(expected = "openfga client_secret is required when client_id is specified")] + fn test_openfga_client_config_fails_without_token() { + figment::Jail::expect_with(|jail| { + jail.set_env("LAKEKEEPER_TEST__AUTHZ_BACKEND", "openfga"); + jail.set_env("LAKEKEEPER_TEST__OPENFGA__CLIENT_ID", "client_id"); + jail.set_env("LAKEKEEPER_TEST__OPENFGA__STORE_NAME", "store_name"); + get_config(); + Ok(()) + }); + } + + #[test] + fn test_openfga_client_credentials() { + figment::Jail::expect_with(|jail| { + jail.set_env("LAKEKEEPER_TEST__AUTHZ_BACKEND", "openfga"); + jail.set_env("LAKEKEEPER_TEST__OPENFGA__CLIENT_ID", "client_id"); + jail.set_env("LAKEKEEPER_TEST__OPENFGA__CLIENT_SECRET", "client_secret"); + jail.set_env("LAKEKEEPER_TEST__OPENFGA__TOKEN_ENDPOINT", "token_endpoint"); + let config = get_config(); + let authz_config = config.openfga.unwrap(); + assert_eq!(config.authz_backend, AuthZBackend::OpenFGA); + assert_eq!(authz_config.store_name, "lakekeeper"); + + assert_eq!( + authz_config.auth, + OpenFGAAuth::ClientCredentials { + client_id: "client_id".to_string(), + client_secret: "client_secret".to_string(), + token_endpoint: "token_endpoint".to_string() + } + ); + Ok(()) + }); + } } diff --git a/crates/iceberg-catalog/src/implementations/authz.rs b/crates/iceberg-catalog/src/implementations/authz.rs deleted file mode 100644 index aa294778..00000000 --- a/crates/iceberg-catalog/src/implementations/authz.rs +++ /dev/null @@ -1,348 +0,0 @@ -use crate::api::{iceberg::v1::NamespaceIdent, Result}; -use crate::request_metadata::RequestMetadata; -use crate::service::health::HealthExt; -use crate::{ - implementations::DEFAULT_PROJECT_ID, - service::{ - auth::{AuthConfigHandler, AuthZHandler, UserWarehouse}, - TableIdentUuid, - }, - ProjectIdent, WarehouseIdent, -}; -use async_trait::async_trait; -use std::collections::HashSet; - -#[derive(Clone, Debug, Default)] -pub struct AllowAllAuthState; - -#[async_trait] -impl HealthExt for AllowAllAuthState { - async fn health(&self) -> Vec { - vec![] - } - - async fn update_health(&self) {} -} - -#[derive(Clone, Debug, Default)] -/// Allow absolutely, gloriously, everything. -pub struct AllowAllAuthZHandler; - -#[async_trait::async_trait] -impl AuthConfigHandler for AllowAllAuthZHandler { - async fn get_and_validate_user_warehouse( - _: AllowAllAuthState, - _: &RequestMetadata, - ) -> Result { - // The AuthHandler should return the user's project or warehouse if this - // information is available. Otherwise return "None". - // This requires the user to specify the project as part of the "warehouse" provided to the GET /config - // endpoint. - Ok(UserWarehouse { - project_id: Some(ProjectIdent::from(DEFAULT_PROJECT_ID)), - warehouse_id: None, - }) - } - - async fn exchange_token_for_warehouse( - _: AllowAllAuthState, - _: &RequestMetadata, - _: &ProjectIdent, - _: WarehouseIdent, - ) -> Result> { - Ok(None) - } - - async fn check_list_warehouse_in_project( - _: AllowAllAuthState, - _: &ProjectIdent, - _: &RequestMetadata, - ) -> Result<()> { - Ok(()) - } - - async fn check_user_get_config_for_warehouse( - _: AllowAllAuthState, - _: WarehouseIdent, - _: &RequestMetadata, - ) -> Result<()> { - Ok(()) - } -} - -#[async_trait::async_trait] -impl AuthZHandler for AllowAllAuthZHandler { - type State = AllowAllAuthState; - - async fn check_list_namespace( - _: &RequestMetadata, - _: WarehouseIdent, - _: Option<&NamespaceIdent>, - _: AllowAllAuthState, - ) -> Result<()> { - Ok(()) - } - - async fn check_create_namespace( - _: &RequestMetadata, - _: WarehouseIdent, - _: Option<&NamespaceIdent>, - _: AllowAllAuthState, - ) -> Result<()> { - Ok(()) - } - - async fn check_load_namespace_metadata( - _: &RequestMetadata, - _: WarehouseIdent, - _: &NamespaceIdent, - _: AllowAllAuthState, - ) -> Result<()> { - Ok(()) - } - - // Should check if the user is allowed to check if a namespace exists, - // not check if the namespace exists. - async fn check_namespace_exists( - _: &RequestMetadata, - _: WarehouseIdent, - _: &NamespaceIdent, - _: AllowAllAuthState, - ) -> Result<()> { - Ok(()) - } - - async fn check_drop_namespace( - _: &RequestMetadata, - _: WarehouseIdent, - _: &NamespaceIdent, - _: AllowAllAuthState, - ) -> Result<()> { - Ok(()) - } - - async fn check_update_namespace_properties( - _: &RequestMetadata, - _: WarehouseIdent, - _: &NamespaceIdent, - _: AllowAllAuthState, - ) -> Result<()> { - Ok(()) - } - - async fn check_create_table( - _: &RequestMetadata, - _: WarehouseIdent, - _: &NamespaceIdent, - _: AllowAllAuthState, - ) -> Result<()> { - Ok(()) - } - - async fn check_list_tables( - _: &RequestMetadata, - _: WarehouseIdent, - _: &NamespaceIdent, - _: AllowAllAuthState, - ) -> Result<()> { - Ok(()) - } - - async fn check_rename_table( - _: &RequestMetadata, - _: WarehouseIdent, - _: Option, - _: AllowAllAuthState, - ) -> Result<()> { - Ok(()) - } - - async fn check_load_table( - _: &RequestMetadata, - _: WarehouseIdent, - _: Option<&NamespaceIdent>, - _: Option, - _: AllowAllAuthState, - ) -> Result<()> { - Ok(()) - } - - async fn check_table_exists( - _: &RequestMetadata, - _: WarehouseIdent, - _: Option<&NamespaceIdent>, - _: Option, - _: AllowAllAuthState, - ) -> Result<()> { - Ok(()) - } - - async fn check_drop_table( - _: &RequestMetadata, - _: WarehouseIdent, - _: Option, - _: AllowAllAuthState, - ) -> Result<()> { - Ok(()) - } - - async fn check_commit_table( - _: &RequestMetadata, - _: WarehouseIdent, - _: Option, - _: Option<&NamespaceIdent>, - _: Self::State, - ) -> Result<()> { - Ok(()) - } - - // ---------------- Management API ---------------- - async fn check_create_warehouse( - _: &RequestMetadata, - _: &ProjectIdent, - _: Self::State, - ) -> Result<()> { - Ok(()) - } - - // Return an error if the user is not authorized. - // Return Ok(None) if the user is authorized to list all existing projects. - // Return Ok(Some(projects)) if the user is authorized to list only the - // specified projects. - async fn check_list_projects( - _: &RequestMetadata, - _: Self::State, - ) -> Result>> { - Ok(None) - } - - async fn check_list_warehouse_in_project( - _: &RequestMetadata, - _: ProjectIdent, - _: Self::State, - ) -> Result>> { - Ok(None) - } - - async fn check_delete_warehouse( - _: &RequestMetadata, - _: WarehouseIdent, - _: Self::State, - ) -> Result<()> { - Ok(()) - } - - async fn check_get_warehouse( - _: &RequestMetadata, - _: WarehouseIdent, - _: Self::State, - ) -> Result<()> { - Ok(()) - } - - async fn check_rename_warehouse( - _: &RequestMetadata, - _: WarehouseIdent, - _: Self::State, - ) -> Result<()> { - Ok(()) - } - - async fn check_deactivate_warehouse( - _: &RequestMetadata, - _: WarehouseIdent, - _: Self::State, - ) -> Result<()> { - Ok(()) - } - - async fn check_activate_warehouse( - _: &RequestMetadata, - _: WarehouseIdent, - _: Self::State, - ) -> Result<()> { - Ok(()) - } - - async fn check_update_storage( - _: &RequestMetadata, - _: WarehouseIdent, - _: Self::State, - ) -> Result<()> { - Ok(()) - } - - async fn check_create_view( - _metadata: &RequestMetadata, - _warehouse_id: WarehouseIdent, - _namespace: &NamespaceIdent, - _state: Self::State, - ) -> Result<()> { - Ok(()) - } - - async fn check_view_exists( - _metadata: &RequestMetadata, - _warehouse_id: WarehouseIdent, - _namespace: Option<&NamespaceIdent>, - _view: Option<&TableIdentUuid>, - _state: Self::State, - ) -> Result<()> { - Ok(()) - } - - async fn check_list_views( - _metadata: &RequestMetadata, - _warehouse_id: WarehouseIdent, - _namespace: &NamespaceIdent, - _state: Self::State, - ) -> Result<()> { - Ok(()) - } - - async fn check_drop_view( - _metadata: &RequestMetadata, - _warehouse_id: WarehouseIdent, - _table: Option<&TableIdentUuid>, - _state: Self::State, - ) -> Result<()> { - Ok(()) - } - - async fn check_load_view( - _metadata: &RequestMetadata, - _warehouse_id: WarehouseIdent, - _namespace: Option<&NamespaceIdent>, - _view: Option<&TableIdentUuid>, - _state: Self::State, - ) -> Result<()> { - Ok(()) - } - - async fn check_commit_view( - _: &RequestMetadata, - _: WarehouseIdent, - _: Option<&TableIdentUuid>, - _: Option<&NamespaceIdent>, - _: Self::State, - ) -> Result<()> { - Ok(()) - } - - async fn check_rename_view( - _: &RequestMetadata, - _: WarehouseIdent, - _: Option<&TableIdentUuid>, - _: Self::State, - ) -> Result<()> { - Ok(()) - } - - async fn check_list_soft_deletions( - _: &RequestMetadata, - _: WarehouseIdent, - _: Self::State, - ) -> Result<()> { - Ok(()) - } -} diff --git a/crates/iceberg-catalog/src/implementations/mod.rs b/crates/iceberg-catalog/src/implementations/mod.rs index 6ab754e6..81c521a3 100644 --- a/crates/iceberg-catalog/src/implementations/mod.rs +++ b/crates/iceberg-catalog/src/implementations/mod.rs @@ -1,10 +1,81 @@ -/// Default project ID used for single-project deployments. -pub const DEFAULT_PROJECT_ID: uuid::Uuid = uuid::uuid!("00000000-0000-0000-0000-000000000000"); +use crate::{ + service::{ + health::{Health, HealthExt}, + secrets::{Secret, SecretInStorage}, + SecretStore, + }, + SecretIdent, +}; +use async_trait::async_trait; #[cfg(feature = "sqlx-postgres")] pub mod postgres; -mod authz; pub mod kv2; -pub use authz::{AllowAllAuthState, AllowAllAuthZHandler}; +#[derive(Debug, Clone)] +pub enum Secrets { + Postgres(crate::implementations::postgres::SecretsState), + KV2(crate::implementations::kv2::SecretsState), +} + +#[async_trait] +impl SecretStore for Secrets { + async fn get_secret_by_id( + &self, + secret_id: &SecretIdent, + ) -> crate::api::Result> { + match self { + Self::Postgres(state) => state.get_secret_by_id(secret_id).await, + Self::KV2(state) => state.get_secret_by_id(secret_id).await, + } + } + + async fn create_secret< + S: SecretInStorage + Send + Sync + serde::Serialize + std::fmt::Debug, + >( + &self, + secret: S, + ) -> crate::api::Result { + match self { + Self::Postgres(state) => state.create_secret(secret).await, + Self::KV2(state) => state.create_secret(secret).await, + } + } + + async fn delete_secret(&self, secret_id: &SecretIdent) -> crate::api::Result<()> { + match self { + Self::Postgres(state) => state.delete_secret(secret_id).await, + Self::KV2(state) => state.delete_secret(secret_id).await, + } + } +} + +#[async_trait] +impl HealthExt for Secrets { + async fn health(&self) -> Vec { + match self { + Self::Postgres(state) => state.health().await, + Self::KV2(state) => state.health().await, + } + } + + async fn update_health(&self) { + match self { + Self::Postgres(state) => state.update_health().await, + Self::KV2(state) => state.update_health().await, + } + } +} + +impl From for Secrets { + fn from(state: crate::implementations::postgres::SecretsState) -> Self { + Self::Postgres(state) + } +} + +impl From for Secrets { + fn from(state: crate::implementations::kv2::SecretsState) -> Self { + Self::KV2(state) + } +} diff --git a/crates/iceberg-catalog/src/implementations/postgres/catalog.rs b/crates/iceberg-catalog/src/implementations/postgres/catalog.rs index 0f6c01c3..c9bc17ad 100644 --- a/crates/iceberg-catalog/src/implementations/postgres/catalog.rs +++ b/crates/iceberg-catalog/src/implementations/postgres/catalog.rs @@ -1,6 +1,6 @@ use super::{ namespace::{ - create_namespace, drop_namespace, get_namespace, list_namespaces, namespace_ident_to_id, + create_namespace, drop_namespace, get_namespace, list_namespaces, namespace_to_id, update_namespace_properties, }, tabular::table::{ @@ -9,18 +9,14 @@ use super::{ table_ident_to_id, table_idents_to_ids, }, warehouse::{ - create_project, create_warehouse, delete_project, delete_warehouse, get_project, - get_warehouse, list_projects, list_warehouses, rename_project, rename_warehouse, - set_warehouse_status, update_storage_profile, + create_project, create_warehouse, delete_project, delete_warehouse, + get_config_for_warehouse, get_project, get_warehouse, get_warehouse_by_name, list_projects, + list_warehouses, rename_project, rename_warehouse, set_warehouse_status, + update_storage_profile, }, CatalogState, PostgresTransaction, }; -use crate::api::management::v1::warehouse::TabularDeleteProfile; -use crate::implementations::postgres::tabular::view::{ - create_view, drop_view, list_views, load_view, rename_view, view_ident_to_id, -}; use crate::implementations::postgres::tabular::{list_tabulars, mark_tabular_as_deleted}; -use crate::service::tabular_idents::{TabularIdentOwned, TabularIdentUuid}; use crate::service::{ CreateNamespaceRequest, CreateNamespaceResponse, DeletionDetails, GetProjectResponse, GetWarehouseResponse, ListFlags, ListNamespacesQuery, ListNamespacesResponse, NamespaceIdent, @@ -30,6 +26,13 @@ use crate::{ api::iceberg::v1::{PaginatedTabulars, PaginationQuery}, service::TableCommit, }; +use crate::{api::management::v1::warehouse::TabularDeleteProfile, service::TabularIdentUuid}; +use crate::{ + implementations::postgres::tabular::view::{ + create_view, drop_view, list_views, load_view, rename_view, view_ident_to_id, + }, + service::TabularIdentOwned, +}; use crate::{ service::{ storage::StorageProfile, Catalog, CreateTableResponse, GetNamespaceResponse, @@ -39,7 +42,7 @@ use crate::{ SecretIdent, }; use iceberg::spec::ViewMetadata; -use iceberg_ext::configs::Location; +use iceberg_ext::{catalog::rest::CatalogConfig, configs::Location}; use std::collections::{HashMap, HashSet}; #[async_trait::async_trait] @@ -47,12 +50,27 @@ impl Catalog for super::PostgresCatalog { type Transaction = PostgresTransaction; type State = CatalogState; - async fn list_namespaces( + async fn get_warehouse_by_name( + warehouse_name: &str, + project_id: ProjectIdent, + catalog_state: CatalogState, + ) -> Result> { + get_warehouse_by_name(warehouse_name, project_id, catalog_state).await + } + + async fn get_config_for_warehouse( warehouse_id: WarehouseIdent, - query: &ListNamespacesQuery, catalog_state: CatalogState, + ) -> Result> { + get_config_for_warehouse(warehouse_id, catalog_state).await + } + + async fn list_namespaces<'a>( + warehouse_id: WarehouseIdent, + query: &ListNamespacesQuery, + transaction: >::Transaction<'a>, ) -> Result { - list_namespaces(warehouse_id, query, catalog_state).await + list_namespaces(warehouse_id, query, transaction).await } async fn create_namespace<'a>( @@ -66,35 +84,35 @@ impl Catalog for super::PostgresCatalog { async fn get_namespace<'a>( warehouse_id: WarehouseIdent, - namespace: &NamespaceIdent, + namespace_id: NamespaceIdentUuid, transaction: >::Transaction<'a>, ) -> Result { - get_namespace(warehouse_id, namespace, transaction).await + get_namespace(warehouse_id, namespace_id, transaction).await } - async fn namespace_ident_to_id( + async fn namespace_to_id<'a>( warehouse_id: WarehouseIdent, namespace: &NamespaceIdent, - catalog_state: CatalogState, + transaction: >::Transaction<'a>, ) -> Result> { - namespace_ident_to_id(warehouse_id, namespace, catalog_state).await + namespace_to_id(warehouse_id, namespace, transaction).await } async fn drop_namespace<'a>( warehouse_id: WarehouseIdent, - namespace: &NamespaceIdent, + namespace_id: NamespaceIdentUuid, transaction: >::Transaction<'a>, ) -> Result<()> { - drop_namespace(warehouse_id, namespace, transaction).await + drop_namespace(warehouse_id, namespace_id, transaction).await } async fn update_namespace_properties<'a>( warehouse_id: WarehouseIdent, - namespace: &NamespaceIdent, + namespace_id: NamespaceIdentUuid, properties: HashMap, transaction: >::Transaction<'a>, ) -> Result<()> { - update_namespace_properties(warehouse_id, namespace, properties, transaction).await + update_namespace_properties(warehouse_id, namespace_id, properties, transaction).await } async fn create_table<'a>( @@ -104,30 +122,30 @@ impl Catalog for super::PostgresCatalog { create_table(table_creation, transaction).await } - async fn list_tables( + async fn list_tables<'a>( warehouse_id: WarehouseIdent, namespace: &NamespaceIdent, - list_flags: crate::service::ListFlags, - catalog_state: CatalogState, + list_flags: ListFlags, + transaction: >::Transaction<'a>, pagination_query: PaginationQuery, ) -> Result> { list_tables( warehouse_id, namespace, list_flags, - catalog_state, + &mut **transaction, pagination_query, ) .await } - async fn table_ident_to_id( + async fn table_to_id<'a>( warehouse_id: WarehouseIdent, table: &TableIdent, - list_flags: crate::service::ListFlags, - catalog_state: Self::State, + list_flags: ListFlags, + transaction: >::Transaction<'a>, ) -> Result> { - table_ident_to_id(warehouse_id, table, list_flags, &catalog_state.read_pool()).await + table_ident_to_id(warehouse_id, table, list_flags, &mut **transaction).await } async fn table_idents_to_ids( @@ -154,7 +172,7 @@ impl Catalog for super::PostgresCatalog { table: TableIdentUuid, list_flags: crate::service::ListFlags, catalog_state: Self::State, - ) -> Result { + ) -> Result> { get_table_metadata_by_id(warehouse_id, table, list_flags, catalog_state).await } @@ -163,7 +181,7 @@ impl Catalog for super::PostgresCatalog { location: &Location, list_flags: crate::service::ListFlags, catalog_state: Self::State, - ) -> Result { + ) -> Result> { get_table_metadata_by_s3_location(warehouse_id, location, list_flags, catalog_state).await } @@ -261,16 +279,9 @@ impl Catalog for super::PostgresCatalog { async fn list_warehouses( project_id: ProjectIdent, include_inactive: Option>, - warehouse_id_filter: Option<&HashSet>, catalog_state: Self::State, ) -> Result> { - list_warehouses( - project_id, - include_inactive, - warehouse_id_filter, - catalog_state, - ) - .await + list_warehouses(project_id, include_inactive, catalog_state).await } async fn get_warehouse<'a>( @@ -318,12 +329,12 @@ impl Catalog for super::PostgresCatalog { .await } - async fn view_ident_to_id( + async fn view_to_id<'a>( warehouse_id: WarehouseIdent, view: &TableIdent, - catalog_state: Self::State, + transaction: >::Transaction<'a>, ) -> Result> { - view_ident_to_id(warehouse_id, view, false, &catalog_state.read_pool()).await + view_ident_to_id(warehouse_id, view, false, &mut **transaction).await } async fn create_view<'a>( @@ -353,18 +364,18 @@ impl Catalog for super::PostgresCatalog { load_view(view_id, include_deleted, &mut *transaction).await } - async fn list_views( + async fn list_views<'a>( warehouse_id: WarehouseIdent, namespace: &NamespaceIdent, include_deleted: bool, - catalog_state: Self::State, + transaction: >::Transaction<'a>, pagination_query: PaginationQuery, ) -> Result> { list_views( warehouse_id, namespace, include_deleted, - catalog_state, + &mut **transaction, pagination_query, ) .await diff --git a/crates/iceberg-catalog/src/implementations/postgres/namespace.rs b/crates/iceberg-catalog/src/implementations/postgres/namespace.rs index 6fc44235..b18039e6 100644 --- a/crates/iceberg-catalog/src/implementations/postgres/namespace.rs +++ b/crates/iceberg-catalog/src/implementations/postgres/namespace.rs @@ -1,4 +1,4 @@ -use super::{dbutils::DBErrorHandler, CatalogState}; +use super::dbutils::DBErrorHandler; use crate::api::iceberg::v1::MAX_PAGE_SIZE; use crate::implementations::postgres::pagination::{PaginateToken, V1PaginateToken}; use crate::service::{ @@ -14,36 +14,45 @@ use uuid::Uuid; pub(crate) async fn get_namespace( warehouse_id: WarehouseIdent, - namespace: &NamespaceIdent, + namespace_id: NamespaceIdentUuid, transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>, ) -> Result { let row = sqlx::query!( r#" SELECT - namespace_id, + namespace_name as "namespace_name: Vec", + n.namespace_id, n.warehouse_id, namespace_properties as "properties: Json>>" FROM namespace n INNER JOIN warehouse w ON n.warehouse_id = w.warehouse_id - WHERE n.warehouse_id = $1 AND n.namespace_name = $2 + WHERE n.warehouse_id = $1 AND n.namespace_id = $2 AND w.status = 'active' "#, *warehouse_id, - namespace.as_ref() + *namespace_id ) .fetch_one(&mut **transaction) .await .map_err(|e| match e { sqlx::Error::RowNotFound => ErrorModel::builder() .code(StatusCode::NOT_FOUND.into()) - .message(format!("Namespace not found: {:?}", namespace.as_ref())) + .message(format!( + "Namespace with id {warehouse_id} not found in warehouse {namespace_id}" + )) .r#type("NamespaceNotFound".to_string()) .build(), _ => e.into_error_model("Error fetching namespace".to_string()), })?; Ok(GetNamespaceResponse { - namespace: namespace.to_owned(), + namespace: NamespaceIdent::from_vec(row.namespace_name.clone()).map_err(|e| { + ErrorModel::internal( + "Error converting namespace", + "NamespaceConversionError", + Some(Box::new(e)), + ) + })?, properties: row.properties.deref().clone(), namespace_id: row.namespace_id.into(), warehouse_id: row.warehouse_id.into(), @@ -58,7 +67,7 @@ pub(crate) async fn list_namespaces( page_size, parent, }: &ListNamespacesQuery, - catalog_state: CatalogState, + transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>, ) -> Result { let page_size = page_size .map(i64::from) @@ -111,7 +120,7 @@ pub(crate) async fn list_namespaces( token_id, page_size ) - .fetch_all(&catalog_state.read_pool()) + .fetch_all(&mut **transaction) .await .map_err(|e| e.into_error_model("Error fetching Namespace".into()))? .into_iter() @@ -140,13 +149,14 @@ pub(crate) async fn list_namespaces( token_id, page_size ) - .fetch_all(&catalog_state.read_pool()) + .fetch_all(&mut **transaction) .await .map_err(|e| e.into_error_model("Error fetching Namespace".into()))? .into_iter() .map(|r| (r.namespace_id, r.namespace_name, r.created_at)) .collect() }; + let next_page_token = namespaces.last().map(|(id, _, ts)| { PaginateToken::V1(V1PaginateToken { id: *id, @@ -156,21 +166,21 @@ pub(crate) async fn list_namespaces( }); // Convert Vec> to Vec - let namespaces: Result> = namespaces + let namespaces = namespaces .iter() - .map(|(_, n, _)| { - NamespaceIdent::from_vec(n.to_owned()).map_err(|e| { - ErrorModel::builder() - .code(StatusCode::INTERNAL_SERVER_ERROR.into()) - .message("Error converting namespace".to_string()) - .r#type("NamespaceConversionError".to_string()) - .source(Some(Box::new(e))) - .build() + .map(|(id, n, _)| { + NamespaceIdent::from_vec(n.to_owned()) + .map_err(|e| { + ErrorModel::internal( + "Error converting namespace", + "NamespaceConversionError", + Some(Box::new(e)), + ) .into() - }) + }) + .map(|n| (id.into(), n)) }) - .collect(); - let namespaces = namespaces?; + .collect::>>()?; Ok(ListNamespacesResponse { next_page_token, @@ -256,10 +266,10 @@ pub(crate) async fn create_namespace( }) } -pub(crate) async fn namespace_ident_to_id( +pub(crate) async fn namespace_to_id( warehouse_id: WarehouseIdent, namespace: &NamespaceIdent, - catalog_state: CatalogState, + transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>, ) -> Result> { let namespace_id = sqlx::query_scalar!( r#" @@ -272,7 +282,7 @@ pub(crate) async fn namespace_ident_to_id( *warehouse_id, &**namespace ) - .fetch_one(&catalog_state.read_pool()) + .fetch_one(&mut **transaction) .await .map_err(|e| match e { sqlx::Error::RowNotFound => None, @@ -288,7 +298,7 @@ pub(crate) async fn namespace_ident_to_id( pub(crate) async fn drop_namespace( warehouse_id: WarehouseIdent, - namespace: &NamespaceIdent, + namespace_id: NamespaceIdentUuid, transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>, ) -> Result<()> { // Return 404 not found if namespace does not exist @@ -297,7 +307,7 @@ pub(crate) async fn drop_namespace( WITH deleted AS ( DELETE FROM namespace WHERE warehouse_id = $1 - AND namespace_name = $2 + AND namespace_id = $2 AND warehouse_id IN ( SELECT warehouse_id FROM warehouse WHERE status = 'active' ) @@ -306,16 +316,16 @@ pub(crate) async fn drop_namespace( SELECT count(*) FROM deleted "#, *warehouse_id, - &**namespace + *namespace_id ) .fetch_one(&mut **transaction) .await .map_err(|e| match &e { - sqlx::Error::RowNotFound => ErrorModel::builder() - .code(StatusCode::NOT_FOUND.into()) - .message(format!("Namespace not found: {:?}", namespace.as_ref())) - .r#type("NamespaceNotFound".to_string()) - .build(), + sqlx::Error::RowNotFound => ErrorModel::internal( + format!("Namespace {namespace_id} not found in warehouse {warehouse_id}"), + "NamespaceNotFound", + None, + ), sqlx::Error::Database(db_error) => { if db_error.is_foreign_key_violation() { ErrorModel::builder() @@ -331,12 +341,12 @@ pub(crate) async fn drop_namespace( })?; if row_count == Some(0) { - return Err(ErrorModel::builder() - .code(StatusCode::NOT_FOUND.into()) - .message(format!("Namespace not found: {:?}", namespace.as_ref())) - .r#type("NamespaceNotFound".to_string()) - .build() - .into()); + return Err(ErrorModel::internal( + format!("Namespace {namespace_id} not found in warehouse {warehouse_id}"), + "NamespaceNotFound", + None, + ) + .into()); } Ok(()) @@ -344,7 +354,7 @@ pub(crate) async fn drop_namespace( pub(crate) async fn update_namespace_properties( warehouse_id: WarehouseIdent, - namespace: &NamespaceIdent, + namespace_id: NamespaceIdentUuid, properties: HashMap, transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>, ) -> Result<()> { @@ -361,14 +371,14 @@ pub(crate) async fn update_namespace_properties( r#" UPDATE namespace SET namespace_properties = $1 - WHERE warehouse_id = $2 AND namespace_name = $3 + WHERE warehouse_id = $2 AND namespace_id = $3 AND warehouse_id IN ( SELECT warehouse_id FROM warehouse WHERE status = 'active' ) "#, properties, *warehouse_id, - &**namespace + *namespace_id ) .execute(&mut **transaction) .await @@ -379,8 +389,7 @@ pub(crate) async fn update_namespace_properties( #[cfg(test)] pub(crate) mod tests { - - use crate::implementations::postgres::PostgresTransaction; + use crate::implementations::postgres::{CatalogState, PostgresTransaction}; use crate::service::{Catalog as _, Transaction as _}; use super::super::warehouse::test::initialize_warehouse; @@ -393,7 +402,7 @@ pub(crate) mod tests { warehouse_id: WarehouseIdent, namespace: &NamespaceIdent, properties: Option>, - ) -> CreateNamespaceResponse { + ) -> (NamespaceIdentUuid, CreateNamespaceResponse) { let mut transaction = PostgresTransaction::begin_write(state.clone()) .await .unwrap(); @@ -414,7 +423,7 @@ pub(crate) mod tests { transaction.commit().await.unwrap(); - response + (namespace_id, response) } #[sqlx::test] @@ -435,12 +444,17 @@ pub(crate) mod tests { let mut transaction = PostgresTransaction::begin_write(state.clone()) .await .unwrap(); + let namespace_id = + PostgresCatalog::namespace_to_id(warehouse_id, &namespace, transaction.transaction()) + .await + .unwrap() + .expect("Namespace not found"); - assert_eq!(response.namespace, namespace); - assert_eq!(response.properties, properties); + assert_eq!(response.1.namespace, namespace); + assert_eq!(response.1.properties, properties); let response = - PostgresCatalog::get_namespace(warehouse_id, &namespace, transaction.transaction()) + PostgresCatalog::get_namespace(warehouse_id, namespace_id, transaction.transaction()) .await .unwrap(); @@ -449,8 +463,12 @@ pub(crate) mod tests { assert_eq!(response.namespace, namespace); assert_eq!(response.properties, properties); + let mut transaction = PostgresTransaction::begin_read(state.clone()) + .await + .unwrap(); + let response = - PostgresCatalog::namespace_ident_to_id(warehouse_id, &namespace, state.clone()) + PostgresCatalog::namespace_to_id(warehouse_id, &namespace, transaction.transaction()) .await .unwrap() .is_some(); @@ -464,12 +482,15 @@ pub(crate) mod tests { page_size: None, parent: None, }, - state.clone(), + transaction.transaction(), ) .await .unwrap(); - assert_eq!(response.namespaces, vec![namespace.clone()]); + assert_eq!( + response.namespaces, + HashMap::from_iter(vec![(namespace_id, namespace.clone())]) + ); let mut transaction = PostgresTransaction::begin_write(state.clone()) .await @@ -481,7 +502,7 @@ pub(crate) mod tests { ]); PostgresCatalog::update_namespace_properties( warehouse_id, - &namespace, + namespace_id, new_props.clone(), transaction.transaction(), ) @@ -493,7 +514,7 @@ pub(crate) mod tests { let mut t = PostgresTransaction::begin_read(state.clone()) .await .unwrap(); - let response = PostgresCatalog::get_namespace(warehouse_id, &namespace, t.transaction()) + let response = PostgresCatalog::get_namespace(warehouse_id, namespace_id, t.transaction()) .await .unwrap(); drop(t); @@ -503,7 +524,7 @@ pub(crate) mod tests { .await .unwrap(); - PostgresCatalog::drop_namespace(warehouse_id, &namespace, transaction.transaction()) + PostgresCatalog::drop_namespace(warehouse_id, namespace_id, transaction.transaction()) .await .expect("Error dropping namespace"); } @@ -537,6 +558,10 @@ pub(crate) mod tests { let response3 = initialize_namespace(state.clone(), warehouse_id, &namespace, properties.clone()).await; + let mut t = PostgresTransaction::begin_read(state.clone()) + .await + .unwrap(); + let ListNamespacesResponse { namespaces, next_page_token, @@ -547,13 +572,20 @@ pub(crate) mod tests { page_size: Some(1), parent: None, }, - state.clone(), + t.transaction(), ) .await .unwrap(); assert_eq!(namespaces.len(), 1); - assert_eq!(namespaces, vec![response1.namespace.clone()]); + assert_eq!( + namespaces, + HashMap::from_iter(vec![(response1.0, response1.1.namespace)]) + ); + + let mut t = PostgresTransaction::begin_read(state.clone()) + .await + .unwrap(); let ListNamespacesResponse { namespaces, @@ -568,7 +600,7 @@ pub(crate) mod tests { page_size: Some(2), parent: None, }, - state.clone(), + t.transaction(), ) .await .unwrap(); @@ -577,7 +609,10 @@ pub(crate) mod tests { assert!(next_page_token.is_some()); assert_eq!( namespaces, - vec![response2.namespace.clone(), response3.namespace.clone()] + HashMap::from_iter(vec![ + (response2.0, response2.1.namespace), + (response3.0, response3.1.namespace) + ]) ); // last page is empty @@ -594,13 +629,13 @@ pub(crate) mod tests { page_size: Some(3), parent: None, }, - state.clone(), + t.transaction(), ) .await .unwrap(); assert_eq!(next_page_token, None); - assert_eq!(namespaces, vec![]); + assert_eq!(namespaces, HashMap::new()); } #[sqlx::test] @@ -614,7 +649,12 @@ pub(crate) mod tests { let mut transaction = PostgresTransaction::begin_write(state.clone()) .await .unwrap(); - let result = drop_namespace(warehouse_id, &table.namespace, transaction.transaction()) + let namespace_id = + namespace_to_id(warehouse_id, &table.namespace, transaction.transaction()) + .await + .unwrap() + .expect("Namespace not found"); + let result = drop_namespace(warehouse_id, namespace_id, transaction.transaction()) .await .unwrap_err(); diff --git a/crates/iceberg-catalog/src/implementations/postgres/tabular.rs b/crates/iceberg-catalog/src/implementations/postgres/tabular.rs index ea96045e..3543f4fc 100644 --- a/crates/iceberg-catalog/src/implementations/postgres/tabular.rs +++ b/crates/iceberg-catalog/src/implementations/postgres/tabular.rs @@ -12,8 +12,8 @@ use iceberg_ext::NamespaceIdent; use crate::api::iceberg::v1::{PaginatedTabulars, PaginationQuery, MAX_PAGE_SIZE}; use crate::implementations::postgres::pagination::{PaginateToken, V1PaginateToken}; -use crate::service::tabular_idents::{TabularIdentBorrowed, TabularIdentOwned, TabularIdentUuid}; use crate::service::DeletionDetails; +use crate::service::{TabularIdentBorrowed, TabularIdentOwned, TabularIdentUuid}; use iceberg_ext::configs::Location; use sqlx::postgres::PgArguments; use sqlx::{Arguments, Execute, FromRow, Postgres, QueryBuilder}; @@ -35,7 +35,7 @@ pub(crate) async fn tabular_ident_to_id<'a, 'e, 'c: 'e, E>( warehouse_id: WarehouseIdent, table: &TabularIdentBorrowed<'a>, list_flags: crate::service::ListFlags, - catalog_state: E, + transaction: E, ) -> Result> where E: 'e + sqlx::Executor<'c, Database = sqlx::Postgres>, @@ -63,7 +63,7 @@ where list_flags.include_deleted, list_flags.include_staged ) - .fetch_one(catalog_state) + .fetch_one(transaction) .await .map(|r| { Some(match r.typ { diff --git a/crates/iceberg-catalog/src/implementations/postgres/tabular/table.rs b/crates/iceberg-catalog/src/implementations/postgres/tabular/table.rs index e80c7d08..d6a0751f 100644 --- a/crates/iceberg-catalog/src/implementations/postgres/tabular/table.rs +++ b/crates/iceberg-catalog/src/implementations/postgres/tabular/table.rs @@ -255,18 +255,21 @@ pub(crate) async fn load_tables( .collect::>>() } -pub(crate) async fn list_tables( +pub(crate) async fn list_tables<'e, 'c: 'e, E>( warehouse_id: WarehouseIdent, namespace: &NamespaceIdent, list_flags: crate::service::ListFlags, - catalog_state: CatalogState, + transaction: E, pagination_query: PaginationQuery, -) -> Result> { +) -> Result> +where + E: 'e + sqlx::Executor<'c, Database = sqlx::Postgres>, +{ let tabulars = list_tabulars( warehouse_id, Some(namespace), list_flags, - &catalog_state.read_pool(), + transaction, Some(TabularType::Table), pagination_query, ) @@ -294,7 +297,7 @@ pub(crate) async fn get_table_metadata_by_id( table: TableIdentUuid, list_flags: crate::service::ListFlags, catalog_state: CatalogState, -) -> Result { +) -> Result> { let table = sqlx::query!( r#" SELECT @@ -302,6 +305,7 @@ pub(crate) async fn get_table_metadata_by_id( ti.name as "table_name", ti.location as "table_location", namespace_name, + ti.namespace_id, t."metadata" as "metadata: Json", ti."metadata_location", w.storage_profile as "storage_profile: Json", @@ -319,39 +323,37 @@ pub(crate) async fn get_table_metadata_by_id( list_flags.include_deleted ) .fetch_one(&catalog_state.read_pool()) - .await - .map_err(|e| match e { - sqlx::Error::RowNotFound => ErrorModel::builder() - .code(StatusCode::NOT_FOUND.into()) - .message("Table not found".to_string()) - .r#type("NoSuchTableError".to_string()) - .build(), - _ => e.into_error_model("Error fetching table".to_string()), - })?; + .await; + + let table = match table { + Ok(table) => table, + Err(sqlx::Error::RowNotFound) => return Ok(None), + Err(e) => { + return Err(e + .into_error_model("Error fetching table".to_string()) + .into()); + } + }; if !list_flags.include_staged && table.metadata_location.is_none() { - return Err(ErrorModel::builder() - .code(StatusCode::NOT_FOUND.into()) - .message("Table is staged and not yet created".to_string()) - .r#type("TableStaged".to_string()) - .build() - .into()); + return Ok(None); } let namespace = try_parse_namespace_ident(table.namespace_name)?; - Ok(GetTableMetadataResponse { + Ok(Some(GetTableMetadataResponse { table: TableIdent { namespace, name: table.table_name, }, + namespace_id: table.namespace_id.into(), table_id: table.table_id.into(), warehouse_id, location: table.table_location, metadata_location: table.metadata_location, storage_secret_ident: table.storage_secret_id.map(SecretIdent::from), storage_profile: table.storage_profile.deref().clone(), - }) + })) } pub(crate) async fn get_table_metadata_by_s3_location( @@ -359,7 +361,7 @@ pub(crate) async fn get_table_metadata_by_s3_location( location: &Location, list_flags: crate::service::ListFlags, catalog_state: CatalogState, -) -> Result { +) -> Result> { let query_strings = location .partial_locations() .into_iter() @@ -375,6 +377,7 @@ pub(crate) async fn get_table_metadata_by_s3_location( ti.name as "table_name", ti.location as "table_location", namespace_name, + ti.namespace_id, t."metadata" as "metadata: Json", ti."metadata_location", w.storage_profile as "storage_profile: Json", @@ -395,43 +398,37 @@ pub(crate) async fn get_table_metadata_by_s3_location( list_flags.include_deleted ) .fetch_one(&catalog_state.read_pool()) - .await - .map_err(|e| match e { - sqlx::Error::RowNotFound => ErrorModel::builder() - .code(StatusCode::NOT_FOUND.into()) - .message("Table not found".to_string()) - .r#type("NoSuchTableError".to_string()) - .stack(vec![ - location.to_string(), - format!("Warehouse: {}", warehouse_id), - ]) - .build(), - _ => e.into_error_model("Error fetching table".to_string()), - })?; + .await; + + let table = match table { + Ok(table) => table, + Err(sqlx::Error::RowNotFound) => return Ok(None), + Err(e) => { + return Err(e + .into_error_model("Error fetching table".to_string()) + .into()); + } + }; if !list_flags.include_staged && table.metadata_location.is_none() { - return Err(ErrorModel::builder() - .code(StatusCode::NOT_FOUND.into()) - .message("Table is staged and not yet created".to_string()) - .r#type("TableStaged".to_string()) - .build() - .into()); + return Ok(None); } let namespace = try_parse_namespace_ident(table.namespace_name)?; - Ok(GetTableMetadataResponse { + Ok(Some(GetTableMetadataResponse { table: TableIdent { namespace, name: table.table_name, }, table_id: table.table_id.into(), + namespace_id: table.namespace_id.into(), warehouse_id, location: table.table_location, metadata_location: table.metadata_location, storage_secret_ident: table.storage_secret_id.map(SecretIdent::from), storage_profile: table.storage_profile.deref().clone(), - }) + })) } /// Rename a table. Tables may be moved across namespaces. @@ -1180,7 +1177,7 @@ pub(crate) mod tests { warehouse_id, &namespace, ListFlags::default(), - state.clone(), + &state.read_pool(), PaginationQuery::empty(), ) .await @@ -1193,7 +1190,7 @@ pub(crate) mod tests { warehouse_id, &table1.namespace, ListFlags::default(), - state.clone(), + &state.read_pool(), PaginationQuery::empty(), ) .await @@ -1206,7 +1203,7 @@ pub(crate) mod tests { warehouse_id, &table2.namespace, ListFlags::default(), - state.clone(), + &state.read_pool(), PaginationQuery::empty(), ) .await @@ -1219,7 +1216,7 @@ pub(crate) mod tests { include_staged: true, ..ListFlags::default() }, - state.clone(), + &state.read_pool(), PaginationQuery::empty(), ) .await @@ -1239,7 +1236,7 @@ pub(crate) mod tests { warehouse_id, &namespace, ListFlags::default(), - state.clone(), + &state.read_pool(), PaginationQuery::empty(), ) .await @@ -1278,7 +1275,7 @@ pub(crate) mod tests { include_staged: true, ..ListFlags::default() }, - state.clone(), + &state.read_pool(), PaginationQuery { page_token: PageToken::NotSpecified, page_size: Some(2), @@ -1297,7 +1294,7 @@ pub(crate) mod tests { include_staged: true, ..ListFlags::default() }, - state.clone(), + &state.read_pool(), PaginationQuery { page_token: PageToken::Present(tables.next_page_token.unwrap()), page_size: Some(2), @@ -1316,7 +1313,7 @@ pub(crate) mod tests { include_staged: true, ..ListFlags::default() }, - state.clone(), + &state.read_pool(), PaginationQuery { page_token: PageToken::Present(tables.next_page_token.unwrap()), page_size: Some(2), @@ -1432,6 +1429,7 @@ pub(crate) mod tests { state.clone(), ) .await + .unwrap() .unwrap(); let mut metadata_location = metadata.location.parse::().unwrap(); // Exact path works @@ -1443,6 +1441,7 @@ pub(crate) mod tests { ) .await .unwrap() + .unwrap() .table_id; assert_eq!(id, table.table_id); @@ -1458,6 +1457,7 @@ pub(crate) mod tests { ) .await .unwrap() + .unwrap() .table_id; assert_eq!(id, table.table_id); @@ -1490,14 +1490,15 @@ pub(crate) mod tests { .unwrap(); // Shorter path does not work - get_table_metadata_by_s3_location( + assert!(get_table_metadata_by_s3_location( warehouse_id, &shorter, ListFlags::default(), state.clone(), ) .await - .unwrap_err(); + .unwrap() + .is_none()); } #[sqlx::test] @@ -1506,21 +1507,21 @@ pub(crate) mod tests { let warehouse_id = initialize_warehouse(state.clone(), None, None, None, true).await; let table = initialize_table(warehouse_id, state.clone(), false, None, None).await; - let mut transaction = pool.begin().await.unwrap(); + let mut transaction = pool.begin().await.expect("Failed to start transaction"); set_warehouse_status(warehouse_id, WarehouseStatus::Inactive, &mut transaction) .await - .unwrap(); + .expect("Failed to set warehouse status"); transaction.commit().await.unwrap(); - let err = get_table_metadata_by_id( + let r = get_table_metadata_by_id( warehouse_id, table.table_id, ListFlags::default(), state.clone(), ) .await - .unwrap_err(); - assert_eq!(err.error.code, StatusCode::NOT_FOUND); + .unwrap(); + assert!(r.is_none()); } #[sqlx::test] @@ -1536,15 +1537,15 @@ pub(crate) mod tests { .unwrap(); transaction.commit().await.unwrap(); - let err = get_table_metadata_by_id( + assert!(get_table_metadata_by_id( warehouse_id, table.table_id, ListFlags::default(), state.clone(), ) .await - .unwrap_err(); - assert_eq!(err.error.code, StatusCode::NOT_FOUND); + .unwrap() + .is_none()); let ok = get_table_metadata_by_id( warehouse_id, @@ -1556,6 +1557,7 @@ pub(crate) mod tests { state.clone(), ) .await + .unwrap() .unwrap(); assert_eq!(ok.table_id, table.table_id); @@ -1564,7 +1566,7 @@ pub(crate) mod tests { drop_table(table.table_id, &mut transaction).await.unwrap(); transaction.commit().await.unwrap(); - let err = get_table_metadata_by_id( + assert!(get_table_metadata_by_id( warehouse_id, table.table_id, ListFlags { @@ -1574,7 +1576,7 @@ pub(crate) mod tests { state.clone(), ) .await - .unwrap_err(); - assert_eq!(err.error.code, StatusCode::NOT_FOUND); + .unwrap() + .is_none()); } } diff --git a/crates/iceberg-catalog/src/implementations/postgres/tabular/view.rs b/crates/iceberg-catalog/src/implementations/postgres/tabular/view.rs index a2c2f305..5c30b1b2 100644 --- a/crates/iceberg-catalog/src/implementations/postgres/tabular/view.rs +++ b/crates/iceberg-catalog/src/implementations/postgres/tabular/view.rs @@ -10,10 +10,9 @@ use http::StatusCode; use crate::api::iceberg::v1::{PaginatedTabulars, PaginationQuery}; use crate::implementations::postgres::tabular::{ - create_tabular, drop_tabular, list_tabulars, CreateTabular, TabularIdentBorrowed, + self, create_tabular, drop_tabular, list_tabulars, CreateTabular, TabularIdentBorrowed, TabularIdentUuid, TabularType, }; -use crate::implementations::postgres::{tabular, CatalogState}; use crate::service::ListFlags; pub(crate) use crate::service::ViewMetadataWithLocation; use chrono::{DateTime, Utc}; @@ -78,7 +77,7 @@ pub(crate) async fn create_view( "InternalServerError", None, ) - .append_details(&[location.to_string(), metadata.location.to_string()]) + .append_details(vec![location.to_string(), metadata.location.to_string()]) .into()); } let tabular_id = create_tabular( @@ -439,13 +438,16 @@ pub(crate) async fn set_current_view_metadata_version( Ok(()) } -pub(crate) async fn list_views( +pub(crate) async fn list_views<'e, 'c: 'e, E>( warehouse_id: WarehouseIdent, namespace: &NamespaceIdent, include_deleted: bool, - catalog_state: CatalogState, + transaction: E, paginate_query: PaginationQuery, -) -> Result> { +) -> Result> +where + E: 'e + sqlx::Executor<'c, Database = sqlx::Postgres>, +{ let page = list_tabulars( warehouse_id, Some(namespace), @@ -454,7 +456,7 @@ pub(crate) async fn list_views( include_staged: false, include_active: true, }, - &catalog_state.read_pool(), + transaction, Some(TabularType::View), paginate_query, ) @@ -550,7 +552,7 @@ pub(crate) mod tests { use crate::api::iceberg::v1::PaginationQuery; use crate::implementations::postgres::tabular::mark_tabular_as_deleted; - use crate::service::tabular_idents::TabularIdentUuid; + use crate::service::TabularIdentUuid; use crate::WarehouseIdent; use iceberg_ext::configs::Location; use serde_json::json; @@ -737,7 +739,7 @@ pub(crate) mod tests { warehouse_id, &namespace, false, - state.clone(), + &state.read_pool(), PaginationQuery::empty(), ) .await diff --git a/crates/iceberg-catalog/src/implementations/postgres/warehouse.rs b/crates/iceberg-catalog/src/implementations/postgres/warehouse.rs index 2a113fc2..3a79e454 100644 --- a/crates/iceberg-catalog/src/implementations/postgres/warehouse.rs +++ b/crates/iceberg-catalog/src/implementations/postgres/warehouse.rs @@ -1,6 +1,5 @@ use super::dbutils::DBErrorHandler as _; use crate::api::{CatalogConfig, ErrorModel, Result}; -use crate::service::config::ConfigProvider; use crate::service::{GetProjectResponse, GetWarehouseResponse, WarehouseStatus}; use crate::{service::storage::StorageProfile, ProjectIdent, SecretIdent, WarehouseIdent}; use http::StatusCode; @@ -8,57 +7,54 @@ use sqlx::Error; use std::collections::HashSet; use std::ops::Deref; -use super::{CatalogState, PostgresCatalog}; +use super::CatalogState; use crate::api::management::v1::warehouse::TabularDeleteProfile; use sqlx::types::Json; -#[async_trait::async_trait] -impl ConfigProvider for super::PostgresCatalog { - async fn get_warehouse_by_name( - warehouse_name: &str, - project_id: ProjectIdent, - catalog_state: CatalogState, - ) -> Result> { - let warehouse_id = row_not_found_to_option( - sqlx::query_scalar!( - r#" - SELECT - warehouse_id - FROM warehouse - WHERE warehouse_name = $1 AND project_id = $2 - AND status = 'active' - "#, - warehouse_name.to_string(), - *project_id - ) - .fetch_one(&catalog_state.read_pool()) - .await, - )?; +pub(super) async fn get_warehouse_by_name( + warehouse_name: &str, + project_id: ProjectIdent, + catalog_state: CatalogState, +) -> Result> { + let warehouse_id = row_not_found_to_option( + sqlx::query_scalar!( + r#" + SELECT + warehouse_id + FROM warehouse + WHERE warehouse_name = $1 AND project_id = $2 + AND status = 'active' + "#, + warehouse_name.to_string(), + *project_id + ) + .fetch_one(&catalog_state.read_pool()) + .await, + )?; - Ok(warehouse_id.map(Into::into)) - } + Ok(warehouse_id.map(Into::into)) +} - async fn get_config_for_warehouse( - warehouse_id: WarehouseIdent, - catalog_state: CatalogState, - ) -> Result> { - let storage_profile = row_not_found_to_option( - sqlx::query_scalar!( - r#" - SELECT - storage_profile as "storage_profile: Json" - FROM warehouse - WHERE warehouse_id = $1 - AND status = 'active' - "#, - *warehouse_id - ) - .fetch_one(&catalog_state.read_pool()) - .await, - )?; +pub(super) async fn get_config_for_warehouse( + warehouse_id: WarehouseIdent, + catalog_state: CatalogState, +) -> Result> { + let storage_profile = row_not_found_to_option( + sqlx::query_scalar!( + r#" + SELECT + storage_profile as "storage_profile: Json" + FROM warehouse + WHERE warehouse_id = $1 + AND status = 'active' + "#, + *warehouse_id + ) + .fetch_one(&catalog_state.read_pool()) + .await, + )?; - Ok(storage_profile.map(|p| p.generate_catalog_config(warehouse_id))) - } + Ok(storage_profile.map(|p| p.generate_catalog_config(warehouse_id))) } pub(crate) async fn create_warehouse<'a>( @@ -220,7 +216,6 @@ pub(crate) async fn delete_project<'a>( pub(crate) async fn list_warehouses( project_id: ProjectIdent, include_status: Option>, - warehouse_id_filter: Option<&HashSet>, catalog_state: CatalogState, ) -> Result> { #[derive(sqlx::FromRow, Debug, PartialEq)] @@ -235,37 +230,9 @@ pub(crate) async fn list_warehouses( } let include_status = include_status.unwrap_or_else(|| vec![WarehouseStatus::Active]); - let warehouses = if let Some(warehouse_id_filter) = warehouse_id_filter { - let warehouse_ids: Vec = warehouse_id_filter - .iter() - .map(WarehouseIdent::to_uuid) - .collect(); - sqlx::query_as!( - WarehouseRecord, - r#" - SELECT - warehouse_id, - warehouse_name, - storage_profile as "storage_profile: Json", - storage_secret_id, - status AS "status: WarehouseStatus", - tabular_delete_mode as "tabular_delete_mode: DbTabularDeleteProfile", - tabular_expiration_seconds - FROM warehouse - WHERE project_id = $1 AND warehouse_id = ANY($2) - AND status = ANY($3) - "#, - *project_id, - &warehouse_ids, - include_status as Vec - ) - .fetch_all(&catalog_state.read_pool()) - .await - .map_err(|e| e.into_error_model("Error fetching warehouses".into()))? - } else { - sqlx::query_as!( - WarehouseRecord, - r#" + let warehouses = sqlx::query_as!( + WarehouseRecord, + r#" SELECT warehouse_id, warehouse_name, @@ -278,13 +245,12 @@ pub(crate) async fn list_warehouses( WHERE project_id = $1 AND status = ANY($2) "#, - *project_id, - include_status as Vec - ) - .fetch_all(&catalog_state.read_pool()) - .await - .map_err(|e| e.into_error_model("Error fetching warehouses".into()))? - }; + *project_id, + include_status as Vec + ) + .fetch_all(&catalog_state.read_pool()) + .await + .map_err(|e| e.into_error_model("Error fetching warehouses".into()))?; warehouses .into_iter() @@ -549,10 +515,12 @@ impl From for DbTabularDeleteProfile { #[cfg(test)] pub(crate) mod test { use super::*; + use crate::implementations::postgres::PostgresCatalog; use crate::service::storage::S3Flavor; + use crate::service::Catalog as _; use crate::{ implementations::postgres::PostgresTransaction, - service::{storage::S3Profile, Catalog as _, Transaction as _}, + service::{storage::S3Profile, Transaction as _}, }; pub(crate) async fn initialize_warehouse( @@ -674,7 +642,7 @@ pub(crate) mod test { let warehouse_id_1 = initialize_warehouse(state.clone(), None, Some(&project_id), None, true).await; - let warehouses = PostgresCatalog::list_warehouses(project_id, None, None, state.clone()) + let warehouses = PostgresCatalog::list_warehouses(project_id, None, state.clone()) .await .unwrap(); assert_eq!(warehouses.len(), 1); @@ -713,7 +681,6 @@ pub(crate) mod test { let warehouses = PostgresCatalog::list_warehouses( project_id, Some(vec![WarehouseStatus::Active, WarehouseStatus::Inactive]), - None, state.clone(), ) .await @@ -723,7 +690,7 @@ pub(crate) mod test { assert!(warehouses.iter().any(|w| w.id == warehouse_id_2)); // Assert only active whs - let warehouses = PostgresCatalog::list_warehouses(project_id, None, None, state.clone()) + let warehouses = PostgresCatalog::list_warehouses(project_id, None, state.clone()) .await .unwrap(); assert_eq!(warehouses.len(), 1); diff --git a/crates/iceberg-catalog/src/lib.rs b/crates/iceberg-catalog/src/lib.rs index 3435eeea..834ff7a6 100644 --- a/crates/iceberg-catalog/src/lib.rs +++ b/crates/iceberg-catalog/src/lib.rs @@ -7,19 +7,19 @@ #![allow(clippy::module_name_repetitions)] #![forbid(unsafe_code)] -pub mod api; - pub mod catalog; mod config; pub mod service; pub use service::{ProjectIdent, SecretIdent, WarehouseIdent}; -pub use config::{SecretBackend, CONFIG}; +pub use config::{AuthZBackend, OpenFGAAuth, SecretBackend, CONFIG}; pub mod implementations; mod request_metadata; +pub mod api; + #[cfg(feature = "router")] pub mod metrics; #[cfg(feature = "router")] diff --git a/crates/iceberg-catalog/src/request_metadata.rs b/crates/iceberg-catalog/src/request_metadata.rs index d5ec408e..83bb9e1f 100644 --- a/crates/iceberg-catalog/src/request_metadata.rs +++ b/crates/iceberg-catalog/src/request_metadata.rs @@ -1,4 +1,4 @@ -use crate::service::token_verification::AuthDetails; +use crate::service::token_verification::{Actor, AuthDetails}; use axum::middleware::Next; use axum::response::Response; use http::HeaderMap; @@ -24,6 +24,14 @@ impl RequestMetadata { auth_details: None, } } + + #[must_use] + pub fn actor(&self) -> Actor { + self.auth_details.as_ref().map_or( + Actor::Anonymous, + super::service::token_verification::AuthDetails::actor, + ) + } } #[cfg(feature = "router")] pub(crate) async fn create_request_metadata_with_trace_id_fn( diff --git a/crates/iceberg-catalog/src/service/auth.rs b/crates/iceberg-catalog/src/service/auth.rs deleted file mode 100644 index 57bde53a..00000000 --- a/crates/iceberg-catalog/src/service/auth.rs +++ /dev/null @@ -1,303 +0,0 @@ -use std::collections::HashSet; - -use super::{ProjectIdent, TableIdentUuid, WarehouseIdent}; -use crate::api::iceberg::v1::{NamespaceIdent, Result}; -use crate::request_metadata::RequestMetadata; - -#[derive(Debug, Clone)] -pub struct UserWarehouse { - pub project_id: Option, - pub warehouse_id: Option, -} - -#[async_trait::async_trait] - -pub trait AuthZHandler -where - Self: Sized + Send + Sync + Clone + 'static, -{ - type State: Clone + Send + Sync + 'static; - - async fn check_list_namespace( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - parent: Option<&NamespaceIdent>, - state: Self::State, - ) -> Result<()>; - - async fn check_create_namespace( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - parent: Option<&NamespaceIdent>, - state: Self::State, - ) -> Result<()>; - - async fn check_load_namespace_metadata( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - namespace: &NamespaceIdent, - state: Self::State, - ) -> Result<()>; - - /// Check if the user is allowed to check if a namespace exists, - /// not check if the namespace exists. - async fn check_namespace_exists( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - namespace: &NamespaceIdent, - state: Self::State, - ) -> Result<()>; - - async fn check_drop_namespace( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - namespace: &NamespaceIdent, - state: Self::State, - ) -> Result<()>; - - async fn check_update_namespace_properties( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - namespace: &NamespaceIdent, - state: Self::State, - ) -> Result<()>; - - async fn check_create_table( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - namespace: &NamespaceIdent, - state: Self::State, - ) -> Result<()>; - - async fn check_list_tables( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - namespace: &NamespaceIdent, - state: Self::State, - ) -> Result<()>; - - async fn check_list_soft_deletions( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - state: Self::State, - ) -> Result<()>; - - /// Check if the user is allowed to load a table. - /// - /// `table` is an optional argument because we might not be able - /// to obtain a table-id from the table_name a user specifies. - /// In most cases, unless the user has high permissions on a - /// namespace, you would probably want to return 401. - /// - /// Arguments: - /// - `warehouse_id`: The warehouse the table is in. - /// - `namespace`: The namespace the table is in. (Direct parent) - /// - async fn check_load_table( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - namespace: Option<&NamespaceIdent>, - table: Option, - state: Self::State, - ) -> Result<()>; - - /// This should check if the user is allowed to rename the table. - /// For rename to work, also "check_create_table" must pass - /// for the destination namespace. - async fn check_rename_table( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - source: Option, - state: Self::State, - ) -> Result<()>; - - async fn check_table_exists( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - namespace: Option<&NamespaceIdent>, - table: Option, - state: Self::State, - ) -> Result<()>; - - async fn check_drop_table( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - table: Option, - state: Self::State, - ) -> Result<()>; - - async fn check_commit_table( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - table: Option, - namespace: Option<&NamespaceIdent>, - state: Self::State, - ) -> Result<()>; - - // ---------------- Management API ---------------- - async fn check_create_warehouse( - metadata: &RequestMetadata, - project_id: &ProjectIdent, - state: Self::State, - ) -> Result<()>; - - // Return an error if the user is not authorized. - // Return Ok(None) if the user is authorized to list all existing projects. - // Return Ok(Some(projects)) if the user is authorized to list only the - // specified projects. - async fn check_list_projects( - metadata: &RequestMetadata, - state: Self::State, - ) -> Result>>; - - async fn check_list_warehouse_in_project( - metadata: &RequestMetadata, - project_id: ProjectIdent, - state: Self::State, - ) -> Result>>; - - async fn check_delete_warehouse( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - state: Self::State, - ) -> Result<()>; - - async fn check_get_warehouse( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - state: Self::State, - ) -> Result<()>; - - async fn check_rename_warehouse( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - state: Self::State, - ) -> Result<()>; - - async fn check_deactivate_warehouse( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - state: Self::State, - ) -> Result<()>; - - async fn check_activate_warehouse( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - state: Self::State, - ) -> Result<()>; - - async fn check_update_storage( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - state: Self::State, - ) -> Result<()>; - - async fn check_create_view( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - namespace: &NamespaceIdent, - state: Self::State, - ) -> Result<()>; - - async fn check_drop_view( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - table: Option<&TableIdentUuid>, - state: Self::State, - ) -> Result<()>; - - async fn check_load_view( - metadata: &RequestMetadata, - warehouse_id: WarehouseIdent, - namespace: Option<&NamespaceIdent>, - view: Option<&TableIdentUuid>, - state: Self::State, - ) -> Result<()>; - async fn check_commit_view( - _: &RequestMetadata, - _: WarehouseIdent, - _: Option<&TableIdentUuid>, - _: Option<&NamespaceIdent>, - _: Self::State, - ) -> Result<()>; - async fn check_rename_view( - _: &RequestMetadata, - _: WarehouseIdent, - _: Option<&TableIdentUuid>, - _: Self::State, - ) -> Result<()>; - async fn check_list_views( - _metadata: &RequestMetadata, - _warehouse_id: WarehouseIdent, - _namespace: &NamespaceIdent, - _state: Self::State, - ) -> Result<()>; - async fn check_view_exists( - _metadata: &RequestMetadata, - _warehouse_id: WarehouseIdent, - _namespace: Option<&NamespaceIdent>, - _view: Option<&TableIdentUuid>, - _state: Self::State, - ) -> Result<()>; -} - -/// Interface to provide Auth-related functions to the config gateway. -/// This is separated from the AuthHandler as different functions -/// are required while fetching the config. The config server might be -/// external to the rest of the catalog. -// We use the same associated type as AuthHandler to avoid requiring -// an additional state to pass as part of the APIContext. -// A dummy AuthHandler implementation is enough to implement this trait. -// This still feels less clunky than using a generic state type. -#[async_trait::async_trait] - -pub trait AuthConfigHandler -where - Self: Sized + Send + Sync + Clone + 'static, -{ - /// Extract information from the user credentials. Return an error if - /// the user is not authenticated or if an expected extraction - /// of information (e.g. project or warehouse) failed. - /// If information is correctly not available, return None for the - /// respective field. In this case project / warehouse must be passed - /// as arguments to the config endpoint. - /// If a warehouse_id is returned, a project_id must also be returned. - /// - /// If a project_id or warehouse_id is returned, this function must also check the - /// `list_warehouse_in_project` permission for a project_id and the - /// `get_config_for_warehouse` permission for a warehouse_id. - async fn get_and_validate_user_warehouse( - state: A::State, - metadata: &RequestMetadata, - ) -> Result; - - /// Enrich / Exchange the token that is used for all further requests - /// to the specified warehouse. Typically, this is used to enrich the - /// token with the warehouse-id, so that the get_token function can - /// extract it. - /// If this AuthNHadler does not support enriching the token, or - /// if no change to the original token is required, return Ok(None). - async fn exchange_token_for_warehouse( - state: A::State, - previous_request_metadata: &RequestMetadata, - project_id: &ProjectIdent, - warehouse_id: WarehouseIdent, - ) -> Result>; - - // // Used for all endpoints - // fn get_warehouse(state: T, headers: &HeaderMap) -> Result; - - /// Check if the user is allowed to list all warehouses in a project. - async fn check_list_warehouse_in_project( - state: A::State, - project_id: &ProjectIdent, - metadata: &RequestMetadata, - ) -> Result<()>; - - /// Check if the user is allowed to get the config for a warehouse. - async fn check_user_get_config_for_warehouse( - state: A::State, - warehouse_id: WarehouseIdent, - metadata: &RequestMetadata, - ) -> Result<()>; -} diff --git a/crates/iceberg-catalog/src/service/authz/implementations/allow_all.rs b/crates/iceberg-catalog/src/service/authz/implementations/allow_all.rs new file mode 100644 index 00000000..445fda59 --- /dev/null +++ b/crates/iceberg-catalog/src/service/authz/implementations/allow_all.rs @@ -0,0 +1,86 @@ +use async_trait::async_trait; + +use crate::api::iceberg::v1::Result; +use crate::request_metadata::RequestMetadata; +use crate::service::authz::{ + Authorizer, ListProjectsResponse, NamespaceAction, ProjectAction, ServerAction, TableAction, + ViewAction, WarehouseAction, +}; +use crate::service::health::{Health, HealthExt}; +use crate::service::{NamespaceIdentUuid, ProjectIdent, TableIdentUuid, WarehouseIdent}; + +#[derive(Clone, Debug, Default)] +pub struct AllowAllAuthorizer; + +#[async_trait] +impl HealthExt for AllowAllAuthorizer { + async fn health(&self) -> Vec { + vec![] + } + async fn update_health(&self) { + // Do nothing + } +} + +#[async_trait] +impl Authorizer for AllowAllAuthorizer { + async fn list_projects(&self, _metadata: &RequestMetadata) -> Result { + Ok(ListProjectsResponse::All) + } + + async fn is_allowed_server_action( + &self, + _metadata: &RequestMetadata, + _action: &ServerAction, + ) -> Result { + Ok(true) + } + + async fn is_allowed_project_action( + &self, + _metadata: &RequestMetadata, + _project_id: ProjectIdent, + _action: &ProjectAction, + ) -> Result { + Ok(true) + } + + async fn is_allowed_warehouse_action( + &self, + _metadata: &RequestMetadata, + _warehouse_id: WarehouseIdent, + _action: &WarehouseAction, + ) -> Result { + Ok(true) + } + + async fn is_allowed_namespace_action( + &self, + _metadata: &RequestMetadata, + _warehouse_id: WarehouseIdent, + _namespace_id: NamespaceIdentUuid, + _action: &NamespaceAction, + ) -> Result { + Ok(true) + } + + async fn is_allowed_table_action( + &self, + _metadata: &RequestMetadata, + _warehouse_id: WarehouseIdent, + _table_id: TableIdentUuid, + _action: &TableAction, + ) -> Result { + Ok(true) + } + + async fn is_allowed_view_action( + &self, + _metadata: &RequestMetadata, + _warehouse_id: WarehouseIdent, + _view_id: TableIdentUuid, + _action: &ViewAction, + ) -> Result { + Ok(true) + } +} diff --git a/crates/iceberg-catalog/src/service/authz/implementations/mod.rs b/crates/iceberg-catalog/src/service/authz/implementations/mod.rs new file mode 100644 index 00000000..8146aa49 --- /dev/null +++ b/crates/iceberg-catalog/src/service/authz/implementations/mod.rs @@ -0,0 +1,268 @@ +use crate::{ + request_metadata::RequestMetadata, + service::{ + authz::{ + ErrorModel, ListProjectsResponse, NamespaceAction, ProjectAction, Result, ServerAction, + TableAction, ViewAction, WarehouseAction, + }, + health::{Health, HealthExt}, + NamespaceIdentUuid, TableIdentUuid, + }, + AuthZBackend, ProjectIdent, WarehouseIdent, CONFIG, +}; + +pub(super) mod allow_all; + +pub mod openfga; + +/// Get the default authorizer from the configuration +/// +/// # Errors +/// Default model is not obtainable, i.e. if the model is not found in openfga +// Return error model here to convert it into anyhow in bin. IcebergErrorResponse does +// not implement StdError +pub async fn get_default_authorizer_from_config() -> std::result::Result { + match &CONFIG.authz_backend { + AuthZBackend::AllowAll => Ok(allow_all::AllowAllAuthorizer.into()), + AuthZBackend::OpenFGA => openfga::new_authorizer_from_config().await, + } +} + +#[derive(Debug, Clone)] +pub enum Authorizers { + AllowAll(allow_all::AllowAllAuthorizer), + OpenFGAUnauthorized(openfga::UnauthenticatedOpenFGAAuthorizer), + OpenFGABearer(openfga::BearerOpenFGAAuthorizer), + OpenFGAClientCreds(openfga::ClientCredentialsOpenFGAAuthorizer), +} + +impl From for Authorizers { + fn from(authorizer: allow_all::AllowAllAuthorizer) -> Self { + Self::AllowAll(authorizer) + } +} + +impl From for Authorizers { + fn from(authorizer: openfga::UnauthenticatedOpenFGAAuthorizer) -> Self { + Self::OpenFGAUnauthorized(authorizer) + } +} + +impl From for Authorizers { + fn from(authorizer: openfga::BearerOpenFGAAuthorizer) -> Self { + Self::OpenFGABearer(authorizer) + } +} + +impl From for Authorizers { + fn from(authorizer: openfga::ClientCredentialsOpenFGAAuthorizer) -> Self { + Self::OpenFGAClientCreds(authorizer) + } +} + +#[async_trait::async_trait] +impl super::Authorizer for Authorizers { + async fn list_projects(&self, metadata: &RequestMetadata) -> Result { + match self { + Self::AllowAll(authorizer) => authorizer.list_projects(metadata).await, + Self::OpenFGAUnauthorized(authorizer) => authorizer.list_projects(metadata).await, + Self::OpenFGABearer(authorizer) => authorizer.list_projects(metadata).await, + Self::OpenFGAClientCreds(authorizer) => authorizer.list_projects(metadata).await, + } + } + + async fn is_allowed_server_action( + &self, + metadata: &RequestMetadata, + action: &ServerAction, + ) -> Result { + match self { + Self::AllowAll(authorizer) => { + authorizer.is_allowed_server_action(metadata, action).await + } + Self::OpenFGAUnauthorized(authorizer) => { + authorizer.is_allowed_server_action(metadata, action).await + } + Self::OpenFGABearer(authorizer) => { + authorizer.is_allowed_server_action(metadata, action).await + } + Self::OpenFGAClientCreds(authorizer) => { + authorizer.is_allowed_server_action(metadata, action).await + } + } + } + + async fn is_allowed_project_action( + &self, + metadata: &RequestMetadata, + project_id: ProjectIdent, + action: &ProjectAction, + ) -> Result { + match self { + Self::AllowAll(authorizer) => { + authorizer + .is_allowed_project_action(metadata, project_id, action) + .await + } + Self::OpenFGAUnauthorized(authorizer) => { + authorizer + .is_allowed_project_action(metadata, project_id, action) + .await + } + Self::OpenFGABearer(authorizer) => { + authorizer + .is_allowed_project_action(metadata, project_id, action) + .await + } + Self::OpenFGAClientCreds(authorizer) => { + authorizer + .is_allowed_project_action(metadata, project_id, action) + .await + } + } + } + + async fn is_allowed_warehouse_action( + &self, + metadata: &RequestMetadata, + warehouse_id: WarehouseIdent, + action: &WarehouseAction, + ) -> Result { + match self { + Self::AllowAll(authorizer) => { + authorizer + .is_allowed_warehouse_action(metadata, warehouse_id, action) + .await + } + Self::OpenFGAUnauthorized(authorizer) => { + authorizer + .is_allowed_warehouse_action(metadata, warehouse_id, action) + .await + } + Self::OpenFGABearer(authorizer) => { + authorizer + .is_allowed_warehouse_action(metadata, warehouse_id, action) + .await + } + Self::OpenFGAClientCreds(authorizer) => { + authorizer + .is_allowed_warehouse_action(metadata, warehouse_id, action) + .await + } + } + } + + async fn is_allowed_namespace_action( + &self, + metadata: &RequestMetadata, + warehouse_id: WarehouseIdent, + namespace_id: NamespaceIdentUuid, + action: &NamespaceAction, + ) -> Result { + match self { + Self::AllowAll(authorizer) => { + authorizer + .is_allowed_namespace_action(metadata, warehouse_id, namespace_id, action) + .await + } + Self::OpenFGAUnauthorized(authorizer) => { + authorizer + .is_allowed_namespace_action(metadata, warehouse_id, namespace_id, action) + .await + } + Self::OpenFGABearer(authorizer) => { + authorizer + .is_allowed_namespace_action(metadata, warehouse_id, namespace_id, action) + .await + } + Self::OpenFGAClientCreds(authorizer) => { + authorizer + .is_allowed_namespace_action(metadata, warehouse_id, namespace_id, action) + .await + } + } + } + + async fn is_allowed_table_action( + &self, + metadata: &RequestMetadata, + warehouse_id: WarehouseIdent, + table_id: TableIdentUuid, + action: &TableAction, + ) -> Result { + match self { + Self::AllowAll(authorizer) => { + authorizer + .is_allowed_table_action(metadata, warehouse_id, table_id, action) + .await + } + Self::OpenFGAUnauthorized(authorizer) => { + authorizer + .is_allowed_table_action(metadata, warehouse_id, table_id, action) + .await + } + Self::OpenFGABearer(authorizer) => { + authorizer + .is_allowed_table_action(metadata, warehouse_id, table_id, action) + .await + } + Self::OpenFGAClientCreds(authorizer) => { + authorizer + .is_allowed_table_action(metadata, warehouse_id, table_id, action) + .await + } + } + } + + async fn is_allowed_view_action( + &self, + metadata: &RequestMetadata, + warehouse_id: WarehouseIdent, + view_id: TableIdentUuid, + action: &ViewAction, + ) -> Result { + match self { + Self::AllowAll(authorizer) => { + authorizer + .is_allowed_view_action(metadata, warehouse_id, view_id, action) + .await + } + Self::OpenFGAUnauthorized(authorizer) => { + authorizer + .is_allowed_view_action(metadata, warehouse_id, view_id, action) + .await + } + Self::OpenFGABearer(authorizer) => { + authorizer + .is_allowed_view_action(metadata, warehouse_id, view_id, action) + .await + } + Self::OpenFGAClientCreds(authorizer) => { + authorizer + .is_allowed_view_action(metadata, warehouse_id, view_id, action) + .await + } + } + } +} + +#[async_trait::async_trait] +impl HealthExt for Authorizers { + async fn health(&self) -> Vec { + match self { + Self::AllowAll(authorizer) => authorizer.health().await, + Self::OpenFGAUnauthorized(authorizer) => authorizer.health().await, + Self::OpenFGABearer(authorizer) => authorizer.health().await, + Self::OpenFGAClientCreds(authorizer) => authorizer.health().await, + } + } + + async fn update_health(&self) { + match self { + Self::AllowAll(authorizer) => authorizer.update_health().await, + Self::OpenFGAUnauthorized(authorizer) => authorizer.update_health().await, + Self::OpenFGABearer(authorizer) => authorizer.update_health().await, + Self::OpenFGAClientCreds(authorizer) => authorizer.update_health().await, + } + } +} diff --git a/crates/iceberg-catalog/src/service/authz/implementations/openfga/client.rs b/crates/iceberg-catalog/src/service/authz/implementations/openfga/client.rs new file mode 100644 index 00000000..06501da8 --- /dev/null +++ b/crates/iceberg-catalog/src/service/authz/implementations/openfga/client.rs @@ -0,0 +1,236 @@ +//! Get `OpenFGA` clients + +use std::{collections::HashMap, str::FromStr}; + +use super::{ClientHelper as _, CollaborationModelVersion, OpenFGAAuthorizer, AUTH_CONFIG}; +use crate::{ + service::authz::{implementations::Authorizers, ErrorModel}, + OpenFGAAuth, +}; +use http::HeaderMap; +use openfga_rs::{ + authentication::{BearerTokenInterceptor, ClientCredentialInterceptor}, + tonic::{ + self, + codegen::{Body, Bytes, StdError}, + service::interceptor::InterceptedService, + transport::{Channel, Endpoint}, + }, +}; +use openfga_rs::{ + authentication::{ClientCredentials, RefreshConfiguration}, + open_fga_service_client::OpenFgaServiceClient, +}; + +pub type UnauthenticatedOpenFGAAuthorizer = OpenFGAAuthorizer; +pub type BearerOpenFGAAuthorizer = + OpenFGAAuthorizer>; +pub type ClientCredentialsOpenFGAAuthorizer = + OpenFGAAuthorizer>; + +/// Create a new `OpenFGA` authorizer from the configuration. +/// +/// # Errors +/// - Server connection fails +/// - Store (name) not found (from crate Config) +/// - Active Authorization model not found +pub async fn new_authorizer_from_config() -> Result { + let endpoint = AUTH_CONFIG.endpoint.clone(); + match &AUTH_CONFIG.auth { + OpenFGAAuth::Anonymous => Ok(new_unauthenticated_authorizer(endpoint).await?.into()), + OpenFGAAuth::ApiKey(api_key) => { + Ok(new_bearer_auth_authorizer(endpoint, api_key).await?.into()) + } + OpenFGAAuth::ClientCredentials { + client_id, + client_secret, + token_endpoint, + } => Ok(new_client_credentials_authorizer( + endpoint, + ClientCredentials { + client_id: client_id.clone(), + client_secret: client_secret.clone(), + token_endpoint: token_endpoint.clone(), + extra_headers: HeaderMap::default(), + extra_oauth_params: HashMap::default(), + }, + ) + .await? + .into()), + } +} + +/// Create a new `OpenFGA` authorizer with the given client. +async fn new_authorizer( + mut client: OpenFgaServiceClient, +) -> std::result::Result, ErrorModel> +where + T: Clone + Sync + Send + 'static, + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + , + >>::Future: Send, +{ + let active_model_version = CollaborationModelVersion::active(); + let active_model = active_model_version.get_model_ref(); + + let store_id = client.get_store_by_name(&AUTH_CONFIG.store_name).await?.id; + let authorization_model_id = client + .get_auth_model_id(store_id.clone(), active_model) + .await?; + + Ok(OpenFGAAuthorizer { + client, + store_id, + authorization_model_id: authorization_model_id.unwrap_or_default(), + }) +} + +/// Create a new `OpenFGA` authorizer without authentication. +/// +/// # Errors +/// - Server connection fails +/// - Store (name) not found (from crate Config) +/// - Active Authorization model not found +pub(crate) async fn new_unauthenticated_authorizer( + endpoint: url::Url, +) -> std::result::Result, ErrorModel> { + let client = new_unauthenticated_client(endpoint).await?; + new_authorizer(client).await +} + +/// Create a new `OpenFGA` authorizer with bearer token. +/// +/// # Errors +/// - Server connection fails +/// - Store (name) not found (from crate Config) +/// - Active Authorization model not found +async fn new_bearer_auth_authorizer( + endpoint: url::Url, + token: &str, +) -> std::result::Result< + OpenFGAAuthorizer>, + ErrorModel, +> { + let client = new_bearer_auth_client(endpoint, token).await?; + new_authorizer(client).await +} + +/// Create a new `OpenFGA` authorizer with client credentials. +/// +/// # Errors +/// - Server connection fails +/// - Store (name) not found (from crate Config) +/// - Active Authorization model not found +async fn new_client_credentials_authorizer( + endpoint: url::Url, + credentials: openfga_rs::authentication::ClientCredentials, +) -> std::result::Result< + OpenFGAAuthorizer>, + ErrorModel, +> { + let client = new_client_credentials( + endpoint, + credentials, + RefreshConfiguration { + max_retry: 10, + retry_interval: std::time::Duration::from_millis(5), + }, + ) + .await?; + new_authorizer(client).await +} + +/// Create a new `OpenFGA` client without authentication. +/// +/// Public use intended for testing only. +/// +/// # Errors +/// - Connection to `OpenFGA` fails +pub async fn new_unauthenticated_client( + endpoint: url::Url, +) -> Result, ErrorModel> { + let client = OpenFgaServiceClient::connect(endpoint.to_string()) + .await + .map_err(|e| { + ErrorModel::internal( + format!("Failed to connect to OpenFGA at {endpoint}"), + "OpenFGAConnection", + Some(Box::new(e)), + ) + })?; + + Ok(client) +} + +/// Create a new `OpenFGA` client with bearer token. +/// +/// # Errors +/// - Connection to `OpenFGA` fails +async fn new_bearer_auth_client( + endpoint: url::Url, + token: &str, +) -> Result>, ErrorModel> { + let channel = new_channel(endpoint).await?; + let interceptor = BearerTokenInterceptor::new(token).map_err(|e| { + ErrorModel::internal( + format!("Failed to create BearerTokenInterceptor: {e}"), + "OpenFGAConnection", + Some(Box::new(e)), + ) + })?; + let client = OpenFgaServiceClient::with_interceptor(channel, interceptor); + Ok(client) +} + +/// Create a new `OpenFGA` client with client credentials. +/// +/// # Errors +/// - Client credentials cannot be exchanged for a token +/// - Connection to `OpenFGA` fails +async fn new_client_credentials( + endpoint: url::Url, + credentials: ClientCredentials, + refresh_config: RefreshConfiguration, +) -> Result< + OpenFgaServiceClient>, + ErrorModel, +> { + let channel = new_channel(endpoint).await?; + let interceptor = ClientCredentialInterceptor::new_initialized(credentials, refresh_config) + .map_err(|e| { + ErrorModel::internal( + format!("Failed to create ClientCredentialInterceptor: {e}"), + "OpenFGAConnection", + Some(Box::new(e)), + ) + })?; + let client: OpenFgaServiceClient> = + OpenFgaServiceClient::with_interceptor(channel, interceptor); + Ok(client) +} + +async fn new_channel(endpoint: url::Url) -> Result { + let channel = Endpoint::from_str(endpoint.as_ref()) + .map_err(|e| { + ErrorModel::internal( + format!("Invalid OpenFGA endpoint: {endpoint}"), + "OpenFGAConnection", + Some(Box::new(e)), + ) + })? + .connect() + .await + .map_err(|e| { + ErrorModel::internal( + format!("Failed to connect to OpenFGA at {endpoint}"), + "OpenFGAConnection", + Some(Box::new(e)), + ) + })?; + + Ok(channel) +} diff --git a/crates/iceberg-catalog/src/service/authz/implementations/openfga/health.rs b/crates/iceberg-catalog/src/service/authz/implementations/openfga/health.rs new file mode 100644 index 00000000..a90624cb --- /dev/null +++ b/crates/iceberg-catalog/src/service/authz/implementations/openfga/health.rs @@ -0,0 +1,28 @@ +use async_trait::async_trait; +use openfga_rs::tonic::{ + self, + codegen::{Body, Bytes, StdError}, +}; + +use super::OpenFGAAuthorizer; +use crate::service::health::{Health, HealthExt}; + +#[async_trait] +impl HealthExt for OpenFGAAuthorizer +where + T: Clone + Sync + Send + 'static, + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + , + >>::Future: Send, +{ + async fn health(&self) -> Vec { + vec![] + } + async fn update_health(&self) { + // Do nothing + } +} diff --git a/crates/iceberg-catalog/src/service/authz/implementations/openfga/migration.rs b/crates/iceberg-catalog/src/service/authz/implementations/openfga/migration.rs new file mode 100644 index 00000000..1f57972d --- /dev/null +++ b/crates/iceberg-catalog/src/service/authz/implementations/openfga/migration.rs @@ -0,0 +1,69 @@ +// use crate::{ +// request_metadata::RequestMetadata, +// service::{ +// authz::{ +// implementations::openfga::CollaborationModelVersion, Authorizer, ErrorModel, +// ListProjectsResponse, NamespaceAction, ProjectAction, Result, ServerAction, +// TableAction, ViewAction, WarehouseAction, +// }, +// token_verification::Actor, +// NamespaceIdentUuid, TableIdentUuid, +// }, +// ProjectIdent, WarehouseIdent, CONFIG, +// }; +// use openfga_rs::open_fga_service_client::OpenFgaServiceClient; +// use openfga_rs::{ +// tonic::{ +// self, +// codegen::{Body, Bytes, StdError}, +// }, +// CheckRequest, CheckRequestTupleKey, ConsistencyPreference, ListObjectsRequest, +// }; +// use strum::IntoEnumIterator as _; + +// use super::OpenFGAAuthorizer; + +// /// Migrate the +// async fn migrate(authorizer: &mut OpenFGAAuthorizer) -> Result<(), ErrorModel> +// where +// T: Clone + Sync + Send + 'static, +// T: tonic::client::GrpcService, +// T::Error: Into, +// T::ResponseBody: Body + Send + 'static, +// ::Error: Into + Send, +// , +// >>::Future: Send, +// { +// // If no authorization model exists, apply the first model +// let models = authorizer +// .client +// .read_authorization_models(openfga_rs::ReadAuthorizationModelsRequest { +// store_id: authorizer.store_id.clone(), +// page_size: Some(2), +// continuation_token: String::new(), +// }) +// .await +// .map_err(|e| { +// ErrorModel::internal( +// "Failed to list authorization models", +// "OpenFGAConnection", +// Some(Box::new(e)), +// ) +// })? +// .into_inner(); + +// if models.authorization_models.is_empty() { +// let active_model = CollaborationModelVersion::active(); +// tracing::info!( +// "No authorization models found. Applying active model version {active_model}" +// ); +// } else { +// // Go backwards in time through all models - stop if we find one +// let all_versions = CollaborationModelVersion::iter().rev().collect::>(); +// // let mut found = None; +// todo!(); // Continue +// } + +// Ok(()) +// } diff --git a/crates/iceberg-catalog/src/service/authz/implementations/openfga/mod.rs b/crates/iceberg-catalog/src/service/authz/implementations/openfga/mod.rs new file mode 100644 index 00000000..d9ede6f5 --- /dev/null +++ b/crates/iceberg-catalog/src/service/authz/implementations/openfga/mod.rs @@ -0,0 +1,417 @@ +use std::{collections::HashSet, str::FromStr}; + +use crate::{ + request_metadata::RequestMetadata, + service::{ + authz::{ + Authorizer, ErrorModel, ListProjectsResponse, NamespaceAction, ProjectAction, Result, + ServerAction, TableAction, ViewAction, WarehouseAction, + }, + token_verification::Actor, + NamespaceIdentUuid, TableIdentUuid, + }, + ProjectIdent, WarehouseIdent, CONFIG, +}; +use openfga_rs::open_fga_service_client::OpenFgaServiceClient; +use openfga_rs::{ + tonic::{ + self, + codegen::{Body, Bytes, StdError}, + }, + CheckRequest, CheckRequestTupleKey, ConsistencyPreference, ListObjectsRequest, +}; + +mod client; +mod health; +mod migration; +mod models; +mod service_ext; + +pub use client::{ + new_authorizer_from_config, new_unauthenticated_client, BearerOpenFGAAuthorizer, + ClientCredentialsOpenFGAAuthorizer, UnauthenticatedOpenFGAAuthorizer, +}; +pub use models::{CollaborationModelVersion, CollaborationModels}; +pub use openfga_rs::authentication::ClientCredentials; +pub use service_ext::ClientHelper; + +lazy_static::lazy_static! { + static ref AUTH_CONFIG: crate::config::OpenFGAConfig = { + + CONFIG.openfga.clone().expect("OpenFGAConfig not found") + }; +} + +#[derive(Clone, Debug)] +pub struct OpenFGAAuthorizer +where + T: Clone + Sync + Send + 'static, + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + , + >>::Future: Send, +{ + pub(crate) client: OpenFgaServiceClient, + pub(crate) store_id: String, + pub(crate) authorization_model_id: String, +} + +#[async_trait::async_trait] +impl Authorizer for OpenFGAAuthorizer +where + T: Clone + Sync + Send + 'static, + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + , + >>::Future: Send, +{ + async fn list_projects(&self, metadata: &RequestMetadata) -> Result { + let actor = metadata.actor(); + + let check_actor_fut = self.check_actor(&actor); + let list_all_fut = self.check(CheckRequestTupleKey { + user: metadata.actor().to_openfga()?, + relation: ServerAction::CanListAllProjects.to_string(), + object: format!("server:{}", AUTH_CONFIG.server_id), + }); + + let (check_actor, list_all) = futures::join!(check_actor_fut, list_all_fut); + check_actor?; + let list_all = list_all?; + + if list_all { + return Ok(ListProjectsResponse::All); + } + + let projects = self + .list_objects( + "project", + ProjectAction::CanShowInList.to_string(), + actor.to_openfga()?, + ) + .await? + .iter() + .map(|p| { + ProjectIdent::from_str(p).map_err(|_e| { + ErrorModel::internal( + "Failed to parse project id", + "ListProjectsIdParseError", + None, + ) + .append_detail(format!("Project id: {p}")) + .into() + }) + }) + .collect::>>()?; + + Ok(ListProjectsResponse::Projects(projects)) + } + + async fn is_allowed_server_action( + &self, + metadata: &RequestMetadata, + action: &ServerAction, + ) -> Result { + let actor = metadata.actor(); + let check_actor_fut = self.check_actor(&actor); + let check_fut = self.check(CheckRequestTupleKey { + user: actor.to_openfga()?, + relation: action.to_string(), + object: format!("server:{}", AUTH_CONFIG.server_id), + }); + + let (check_actor, check) = futures::join!(check_actor_fut, check_fut); + check_actor?; + check + } + + async fn is_allowed_project_action( + &self, + metadata: &RequestMetadata, + project_id: ProjectIdent, + action: &ProjectAction, + ) -> Result { + let actor = metadata.actor(); + let check_actor_fut = self.check_actor(&actor); + let check_fut = self.check(CheckRequestTupleKey { + user: actor.to_openfga()?, + relation: action.to_string(), + object: format!("project:{project_id}"), + }); + + let (check_actor, check) = futures::join!(check_actor_fut, check_fut); + check_actor?; + check + } + + async fn is_allowed_warehouse_action( + &self, + metadata: &RequestMetadata, + warehouse_id: WarehouseIdent, + action: &WarehouseAction, + ) -> Result { + let actor = metadata.actor(); + let check_actor_fut = self.check_actor(&actor); + let check_fut = self.check(CheckRequestTupleKey { + user: actor.to_openfga()?, + relation: action.to_string(), + object: format!("warehouse:{warehouse_id}"), + }); + + let (check_actor, check) = futures::join!(check_actor_fut, check_fut); + check_actor?; + check + } + + /// Return the namespace_id if the action is allowed, otherwise return None. + async fn is_allowed_namespace_action( + &self, + metadata: &RequestMetadata, + _warehouse_id: WarehouseIdent, + namespace_id: NamespaceIdentUuid, + action: &NamespaceAction, + ) -> Result { + let actor = metadata.actor(); + let check_actor_fut = self.check_actor(&actor); + let check_fut = self.check(CheckRequestTupleKey { + user: actor.to_openfga()?, + relation: action.to_string(), + object: format!("namespace:{namespace_id}"), + }); + + let (check_actor, check) = futures::join!(check_actor_fut, check_fut); + check_actor?; + check + } + + /// Return the table_id if the action is allowed, otherwise return None. + async fn is_allowed_table_action( + &self, + metadata: &RequestMetadata, + _warehouse_id: WarehouseIdent, + table_id: TableIdentUuid, + action: &TableAction, + ) -> Result { + let actor = metadata.actor(); + let check_actor_fut = self.check_actor(&actor); + let check_fut = self.check(CheckRequestTupleKey { + user: actor.to_openfga()?, + relation: action.to_string(), + object: format!("table:{table_id}"), + }); + + let (check_actor, check) = futures::join!(check_actor_fut, check_fut); + check_actor?; + check + } + + /// Return the view_id if the action is allowed, otherwise return None. + async fn is_allowed_view_action( + &self, + metadata: &RequestMetadata, + _warehouse_id: WarehouseIdent, + view_id: TableIdentUuid, + action: &ViewAction, + ) -> Result { + let actor = metadata.actor(); + let check_actor_fut = self.check_actor(&actor); + let check_fut = self.check(CheckRequestTupleKey { + user: actor.to_openfga()?, + relation: action.to_string(), + object: format!("view:{view_id}"), + }); + + let (check_actor, check) = futures::join!(check_actor_fut, check_fut); + check_actor?; + check + } +} + +impl OpenFGAAuthorizer +where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + , + >>::Future: Send, +{ + async fn check(&self, tuple_key: CheckRequestTupleKey) -> Result { + self.client + .clone() + .check(CheckRequest { + tuple_key: Some(tuple_key), + store_id: self.store_id.clone(), + authorization_model_id: self.authorization_model_id.clone(), + contextual_tuples: None, + trace: false, + context: None, + consistency: ConsistencyPreference::MinimizeLatency.into(), + }) + .await + .map_err(|e| { + let msg = e.message().to_string(); + let code = e.code().to_string(); + ErrorModel::internal( + "Failed to check authorization", + "AuthorizationCheckFailed", + Some(Box::new(e)), + ) + .append_detail(msg) + .append_detail(format!("Tonic code: {code}")) + .into() + }) + .map(|response| response.get_ref().allowed) + } + + async fn list_objects( + &self, + r#type: impl Into, + relation: impl Into, + user: impl Into, + ) -> Result> { + let user = user.into(); + self.client + .clone() + .list_objects(ListObjectsRequest { + r#type: r#type.into(), + relation: relation.into(), + user: user.clone(), + store_id: self.store_id.clone(), + authorization_model_id: self.authorization_model_id.clone(), + contextual_tuples: None, + context: None, + consistency: ConsistencyPreference::MinimizeLatency.into(), + }) + .await + .map_err(|e| { + let msg = e.message().to_string(); + let code = e.code().to_string(); + ErrorModel::internal( + "Failed to expand authorization", + "AuthorizationExpandFailed", + Some(Box::new(e)), + ) + .append_detail(msg) + .append_detail(format!("Tonic code: {code}")) + .into() + }) + .map(|response| { + let s: Vec = response.into_inner().objects; + // cut off the user: prefix + s.iter().map(|s| s[user.len()..].to_string()).collect() + }) + } + + /// Check if the requested actor combination is allowed - especially if the user + /// is allowed to assume the specified role. + async fn check_actor(&self, actor: &Actor) -> Result<()> { + match actor { + Actor::Principal(_) | Actor::Anonymous => Ok(()), + Actor::Role { + principal, + assumed_role, + } => { + let assume_role_allowed = self + .check(CheckRequestTupleKey { + user: Actor::Principal(principal.to_string()).to_openfga()?, + relation: "can_assume".to_string(), + object: actor.to_openfga()?, + }) + .await?; + + if assume_role_allowed { + Ok(()) + } else { + Err(ErrorModel::forbidden( + format!( + "Principal is not allowed to assume the specified role with id {assumed_role}" + ), + "RoleAssumptionNotAllowed", + None, + ) + .into()) + } + } + } + } +} + +trait ToOpenFGA { + fn to_openfga(&self) -> Result; +} + +impl ToOpenFGA for Actor { + fn to_openfga(&self) -> Result { + match self { + Actor::Anonymous => Ok("user:*".to_string()), + Actor::Principal(principal) => { + validate_user_chars(principal, "Principal")?; + Ok(format!("user:{principal}")) + } + Actor::Role { + principal: _, + assumed_role, + } => { + validate_user_chars(assumed_role, "Role")?; + Ok(format!("role:{assumed_role}#assignee")) + } + } + } +} + +fn validate_user_chars(s: &str, entity_name: &str) -> Result<()> { + let error_typ = capitalize(entity_name); + + if !s + .chars() + .all(|c| c.is_alphanumeric() || c == '-' || c == '_') + { + return Err(ErrorModel::bad_request( + format!("Invalid characters in {entity_name} id"), + error_typ, + None, + ) + .append_detail(format!("{entity_name}: {s}")) + .into()); + } + + // All lowercase + if s.to_lowercase() != s { + return Err(ErrorModel::bad_request( + format!("{entity_name} id must be lowercase"), + error_typ, + None, + ) + .append_detail(format!("{entity_name}: {s}")) + .into()); + } + + // Max length 128 + if s.len() > 128 { + return Err(ErrorModel::bad_request( + format!("{entity_name} id must be at most 128 characters"), + error_typ, + None, + ) + .append_detail(format!("{entity_name}: {s}")) + .into()); + } + + Ok(()) +} + +fn capitalize(s: &str) -> String { + let mut c = s.chars(); + match c.next() { + None => String::new(), + Some(f) => f.to_uppercase().collect::() + c.as_str(), + } +} diff --git a/crates/iceberg-catalog/src/service/authz/implementations/openfga/models.rs b/crates/iceberg-catalog/src/service/authz/implementations/openfga/models.rs new file mode 100644 index 00000000..1c5cce78 --- /dev/null +++ b/crates/iceberg-catalog/src/service/authz/implementations/openfga/models.rs @@ -0,0 +1,752 @@ +use std::collections::HashMap; + +use openfga_rs::{Condition, TypeDefinition}; + +lazy_static::lazy_static! { + static ref MODEL_V1_JSON: serde_json::Value = serde_json::from_str(include_str!("../../../../../../../authz/openfga/collaboration_model/v1/schema.json")).expect("Failed to parse OpenFGA model V1 as JSON"); + static ref MODEL: CollaborationModels = CollaborationModels { + v1: serde_json::from_value(MODEL_V1_JSON.clone()).expect("Failed to parse OpenFGA model V1 from JSON"), + }; +} + +#[derive(Debug)] +pub struct CollaborationModels { + v1: AuthorizationModel, +} + +impl CollaborationModels { + #[must_use] + pub fn get_model(&self, version: &CollaborationModelVersion) -> &AuthorizationModel { + match version { + CollaborationModelVersion::V1 => &self.v1, + } + } +} + +#[derive(Debug, Clone, strum_macros::EnumString, strum_macros::Display, strum_macros::EnumIter)] +#[strum(serialize_all = "lowercase")] +pub enum CollaborationModelVersion { + V1, +} + +impl CollaborationModelVersion { + #[must_use] + pub fn active() -> Self { + CollaborationModelVersion::V1 + } + + #[must_use] + pub fn get_model(&self) -> AuthorizationModel { + MODEL.get_model(self).clone() + } + + #[must_use] + pub fn get_model_ref(&self) -> &AuthorizationModel { + MODEL.get_model(self) + } +} + +#[derive(Debug, serde::Deserialize, Clone, PartialEq)] +#[serde(from = "ser_de::AuthorizationModel")] +pub struct AuthorizationModel { + pub type_definitions: Vec, + pub schema_version: String, + pub conditions: Option>, +} + +impl AuthorizationModel { + pub fn into_write_request( + self, + store_id: String, + ) -> openfga_rs::WriteAuthorizationModelRequest { + openfga_rs::WriteAuthorizationModelRequest { + store_id, + type_definitions: self.type_definitions.into_iter().collect(), + schema_version: self.schema_version, + conditions: self.conditions.unwrap_or_default(), + } + } +} + +impl From for AuthorizationModel { + fn from(value: openfga_rs::AuthorizationModel) -> Self { + let openfga_rs::AuthorizationModel { + id: _, + schema_version, + type_definitions, + conditions, + } = value; + AuthorizationModel { + type_definitions: type_definitions + .into_iter() + .map(std::convert::Into::into) + .collect(), + schema_version, + conditions: Some(conditions).filter(|v: &HashMap| !v.is_empty()), + } + } +} + +mod ser_de { + use super::HashMap; + use openfga_rs::relation_reference::RelationOrWildcard; + use serde::{Deserialize, Serialize}; + + #[derive(Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq)] + #[serde(deny_unknown_fields)] + pub(super) struct AuthorizationModel { + type_definitions: Vec, + schema_version: String, + #[serde(skip_serializing_if = "Option::is_none")] + conditions: Option>, + } + + impl From for super::AuthorizationModel { + fn from(value: AuthorizationModel) -> Self { + super::AuthorizationModel { + type_definitions: value + .type_definitions + .into_iter() + .map(std::convert::Into::into) + .collect(), + schema_version: value.schema_version, + conditions: value + .conditions + .map(|conditions| conditions.into_iter().map(|(k, v)| (k, v.into())).collect()), + } + } + } + + impl TryFrom for AuthorizationModel { + type Error = String; + + fn try_from(value: super::AuthorizationModel) -> Result { + Ok(AuthorizationModel { + type_definitions: value + .type_definitions + .into_iter() + .map(std::convert::Into::into) + .collect(), + schema_version: value.schema_version, + conditions: match value.conditions { + Some(conditions) => Some( + conditions + .into_iter() + .map(|(k, v)| (k, v.try_into())) + .map(|(k, v)| v.map(|v| (k, v))) + .collect::>()?, + ), + None => None, + }, + }) + } + } + + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] + #[serde(deny_unknown_fields)] + pub struct TypeDefinition { + #[serde(rename = "type")] + r#type: String, + #[serde(rename = "relations", skip_serializing_if = "Option::is_none")] + relations: Option>, + #[serde(rename = "metadata", skip_serializing_if = "Option::is_none")] + metadata: Option>, + } + + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] + #[serde(deny_unknown_fields)] + struct Metadata { + #[serde(rename = "relations", skip_serializing_if = "Option::is_none")] + relations: Option>, + } + + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] + #[serde(deny_unknown_fields)] + struct RelationMetadata { + #[serde( + rename = "directly_related_user_types", + skip_serializing_if = "Option::is_none" + )] + directly_related_user_types: Option>, + } + + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] + #[serde(deny_unknown_fields)] + struct RelationReference { + #[serde(rename = "type")] + r#type: String, + #[serde(rename = "relation", skip_serializing_if = "Option::is_none")] + relation: Option, + #[serde(rename = "wildcard", skip_serializing_if = "Option::is_none")] + wildcard: Option, + /// The name of a condition that is enforced over the allowed relation. + #[serde(rename = "condition", skip_serializing_if = "Option::is_none")] + condition: Option, + } + + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] + #[serde(deny_unknown_fields)] + struct Userset { + /// A `DirectUserset` is a sentinel message for referencing the direct members specified by an object/relation mapping. + #[serde(rename = "this", skip_serializing_if = "Option::is_none")] + this: Option, + #[serde(rename = "computedUserset", skip_serializing_if = "Option::is_none")] + #[allow(clippy::struct_field_names)] + computed_userset: Option>, + #[serde(rename = "tupleToUserset", skip_serializing_if = "Option::is_none")] + #[allow(clippy::struct_field_names)] + tuple_to_userset: Option>, + #[serde(rename = "union", skip_serializing_if = "Option::is_none")] + union: Option>, + #[serde(rename = "intersection", skip_serializing_if = "Option::is_none")] + intersection: Option>, + #[serde(rename = "difference", skip_serializing_if = "Option::is_none")] + difference: Option>, + } + + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] + pub struct Condition { + #[serde(rename = "name")] + pub name: String, + /// A Google CEL expression, expressed as a string. + #[serde(rename = "expression")] + pub expression: String, + /// A map of parameter names to the parameter's defined type reference. + #[serde(rename = "parameters", skip_serializing_if = "Option::is_none")] + pub parameters: Option>, + } + + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] + pub struct ConditionParamTypeRef { + #[serde(rename = "type_name")] + pub type_name: TypeName, + #[serde(rename = "generic_types", skip_serializing_if = "Option::is_none")] + pub generic_types: Option>, + } + + #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] + pub enum TypeName { + #[serde(rename = "TYPE_NAME_UNSPECIFIED")] + Unspecified, + #[serde(rename = "TYPE_NAME_ANY")] + Any, + #[serde(rename = "TYPE_NAME_BOOL")] + Bool, + #[serde(rename = "TYPE_NAME_STRING")] + String, + #[serde(rename = "TYPE_NAME_INT")] + Int, + #[serde(rename = "TYPE_NAME_UINT")] + Uint, + #[serde(rename = "TYPE_NAME_DOUBLE")] + Double, + #[serde(rename = "TYPE_NAME_DURATION")] + Duration, + #[serde(rename = "TYPE_NAME_TIMESTAMP")] + Timestamp, + #[serde(rename = "TYPE_NAME_MAP")] + Map, + #[serde(rename = "TYPE_NAME_LIST")] + List, + #[serde(rename = "TYPE_NAME_IPADDRESS")] + Ipaddress, + } + + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] + #[serde(deny_unknown_fields)] + struct ObjectRelation { + #[serde(rename = "object", skip_serializing_if = "Option::is_none")] + object: Option, + #[serde(rename = "relation", skip_serializing_if = "Option::is_none")] + relation: Option, + } + + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] + #[serde(deny_unknown_fields)] + struct V1PeriodTupleToUserset { + #[serde(rename = "tupleset")] + tupleset: Box, + #[serde(rename = "computedUserset")] + computed_userset: Box, + } + + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] + #[serde(deny_unknown_fields)] + struct V1PeriodDifference { + #[serde(rename = "base")] + base: Box, + #[serde(rename = "subtract")] + subtract: Box, + } + + #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] + #[serde(deny_unknown_fields)] + struct Usersets { + #[serde(rename = "child")] + child: Vec, + } + + impl From for openfga_rs::Condition { + fn from(value: Condition) -> Self { + openfga_rs::Condition { + name: value.name, + expression: value.expression, + parameters: value + .parameters + .map(|parameters| parameters.into_iter().map(|(k, v)| (k, v.into())).collect()) + .unwrap_or_default(), + metadata: None, + } + } + } + + impl From for openfga_rs::ConditionParamTypeRef { + fn from(value: ConditionParamTypeRef) -> Self { + let type_name: openfga_rs::condition_param_type_ref::TypeName = value.type_name.into(); + openfga_rs::ConditionParamTypeRef { + type_name: type_name.into(), + generic_types: value + .generic_types + .map(|generic_types| { + generic_types + .into_iter() + .map(std::convert::Into::into) + .collect() + }) + .unwrap_or_default(), + } + } + } + + impl From for openfga_rs::condition_param_type_ref::TypeName { + fn from(value: TypeName) -> Self { + match value { + TypeName::Unspecified => { + openfga_rs::condition_param_type_ref::TypeName::Unspecified + } + TypeName::Any => openfga_rs::condition_param_type_ref::TypeName::Any, + TypeName::Bool => openfga_rs::condition_param_type_ref::TypeName::Bool, + TypeName::String => openfga_rs::condition_param_type_ref::TypeName::String, + TypeName::Int => openfga_rs::condition_param_type_ref::TypeName::Int, + TypeName::Uint => openfga_rs::condition_param_type_ref::TypeName::Uint, + TypeName::Double => openfga_rs::condition_param_type_ref::TypeName::Double, + TypeName::Duration => openfga_rs::condition_param_type_ref::TypeName::Duration, + TypeName::Timestamp => openfga_rs::condition_param_type_ref::TypeName::Timestamp, + TypeName::Map => openfga_rs::condition_param_type_ref::TypeName::Map, + TypeName::List => openfga_rs::condition_param_type_ref::TypeName::List, + TypeName::Ipaddress => openfga_rs::condition_param_type_ref::TypeName::Ipaddress, + } + } + } + + impl From for openfga_rs::TypeDefinition { + fn from(value: TypeDefinition) -> Self { + openfga_rs::TypeDefinition { + r#type: value.r#type, + relations: value + .relations + .map(|relations| relations.into_iter().map(|(k, v)| (k, v.into())).collect()) + .unwrap_or_default(), + metadata: value.metadata.map(|metadata| (*metadata).into()), + } + } + } + + impl From for openfga_rs::Metadata { + fn from(value: Metadata) -> Self { + openfga_rs::Metadata { + relations: value + .relations + .map(|relations| relations.into_iter().map(|(k, v)| (k, v.into())).collect()) + .unwrap_or_default(), + source_info: None, + // https://github.com/openfga/api/blob/339f6b8ff0f0d25f77d374ede323daebe98cbe7e/openfga/v1/authzmodel.proto#L86 + module: String::new(), + } + } + } + + impl From for openfga_rs::RelationMetadata { + fn from(value: RelationMetadata) -> Self { + openfga_rs::RelationMetadata { + directly_related_user_types: value + .directly_related_user_types + .map(|v| v.into_iter().map(std::convert::Into::into).collect()) + .unwrap_or_default(), + module: String::new(), + source_info: None, + } + } + } + + impl From for openfga_rs::RelationReference { + fn from(value: RelationReference) -> Self { + openfga_rs::RelationReference { + r#type: value.r#type, + relation_or_wildcard: value.relation.map(RelationOrWildcard::Relation).or(value + .wildcard + .map(|_| RelationOrWildcard::Wildcard(openfga_rs::Wildcard {}))), + condition: value.condition.unwrap_or_default(), + } + } + } + + impl From for openfga_rs::Userset { + fn from(value: Userset) -> Self { + let userset = if let Some(_this) = value.this { + Some(openfga_rs::userset::Userset::This( + openfga_rs::DirectUserset {}, + )) + } else if let Some(computed_userset) = value.computed_userset { + Some(openfga_rs::userset::Userset::ComputedUserset( + (*computed_userset).into(), + )) + } else if let Some(tuple_to_userset) = value.tuple_to_userset { + Some(openfga_rs::userset::Userset::TupleToUserset( + (*tuple_to_userset).into(), + )) + } else if let Some(union) = value.union { + Some(openfga_rs::userset::Userset::Union((*union).into())) + } else if let Some(intersection) = value.intersection { + Some(openfga_rs::userset::Userset::Intersection( + (*intersection).into(), + )) + } else { + value.difference.map(|difference| { + openfga_rs::userset::Userset::Difference(Box::new((*difference).into())) + }) + }; + + openfga_rs::Userset { userset } + } + } + + impl From for openfga_rs::ObjectRelation { + fn from(value: ObjectRelation) -> Self { + openfga_rs::ObjectRelation { + object: value.object.unwrap_or_default(), + relation: value.relation.unwrap_or_default(), + } + } + } + + impl From for openfga_rs::TupleToUserset { + fn from(value: V1PeriodTupleToUserset) -> Self { + openfga_rs::TupleToUserset { + tupleset: Some((*value.tupleset).into()), + computed_userset: Some((*value.computed_userset).into()), + } + } + } + + impl From for openfga_rs::Difference { + fn from(value: V1PeriodDifference) -> Self { + openfga_rs::Difference { + base: Some(Box::new((*value.base).into())), + subtract: Some(Box::new((*value.subtract).into())), + } + } + } + + impl From for openfga_rs::Usersets { + fn from(value: Usersets) -> Self { + openfga_rs::Usersets { + child: value + .child + .into_iter() + .map(std::convert::Into::into) + .collect(), + } + } + } + + impl From for TypeDefinition { + fn from(value: openfga_rs::TypeDefinition) -> Self { + TypeDefinition { + r#type: value.r#type, + relations: Some( + value + .relations + .into_iter() + .map(|(k, v)| (k, v.into())) + .collect(), + ) + .filter(|v: &HashMap| !v.is_empty()), + metadata: value.metadata.map(|metadata| Box::new(metadata.into())), + } + } + } + + impl From for Metadata { + fn from(value: openfga_rs::Metadata) -> Self { + Metadata { + relations: Some( + value + .relations + .into_iter() + .map(|(k, v)| (k, v.into())) + .collect(), + ) + .filter(|v: &HashMap| !v.is_empty()), + } + } + } + + impl From for RelationMetadata { + fn from(value: openfga_rs::RelationMetadata) -> Self { + RelationMetadata { + directly_related_user_types: Some( + value + .directly_related_user_types + .into_iter() + .map(std::convert::Into::into) + .collect(), + ) + .filter(|v: &Vec| !v.is_empty()), + } + } + } + + impl From for RelationReference { + fn from(value: openfga_rs::RelationReference) -> Self { + RelationReference { + r#type: value.r#type, + relation: value.relation_or_wildcard.as_ref().and_then(|v| match v { + RelationOrWildcard::Relation(v) => Some(v.clone()), + RelationOrWildcard::Wildcard(_) => None, + }), + wildcard: value.relation_or_wildcard.as_ref().and_then(|v| match v { + RelationOrWildcard::Wildcard(_) => Some(serde_json::json!({})), + RelationOrWildcard::Relation(_) => None, + }), + condition: Some(value.condition).filter(|s| !s.is_empty()), + } + } + } + + impl TryFrom for Condition { + type Error = String; + + fn try_from(value: openfga_rs::Condition) -> Result { + Ok(Condition { + name: value.name, + expression: value.expression, + parameters: Some( + value + .parameters + .into_iter() + .map(|(k, v)| (k, v.try_into())) + .map(|(k, v)| v.map(|v| (k, v))) + .collect::>()?, + ) + .filter(|v: &HashMap| !v.is_empty()), + }) + } + } + + impl TryFrom for ConditionParamTypeRef { + type Error = String; + + fn try_from(value: openfga_rs::ConditionParamTypeRef) -> Result { + let type_name = + openfga_rs::condition_param_type_ref::TypeName::try_from(value.type_name) + .map_err(|e| format!("Failed to convert TypeName: {e}"))?; + + Ok(ConditionParamTypeRef { + type_name: type_name.into(), + generic_types: Some( + value + .generic_types + .into_iter() + .map(std::convert::TryInto::try_into) + .collect::>()?, + ) + .filter(|v: &Vec| !v.is_empty()), + }) + } + } + + impl From for TypeName { + fn from(value: openfga_rs::condition_param_type_ref::TypeName) -> Self { + match value { + openfga_rs::condition_param_type_ref::TypeName::Unspecified => { + TypeName::Unspecified + } + openfga_rs::condition_param_type_ref::TypeName::Any => TypeName::Any, + openfga_rs::condition_param_type_ref::TypeName::Bool => TypeName::Bool, + openfga_rs::condition_param_type_ref::TypeName::String => TypeName::String, + openfga_rs::condition_param_type_ref::TypeName::Int => TypeName::Int, + openfga_rs::condition_param_type_ref::TypeName::Uint => TypeName::Uint, + openfga_rs::condition_param_type_ref::TypeName::Double => TypeName::Double, + openfga_rs::condition_param_type_ref::TypeName::Duration => TypeName::Duration, + openfga_rs::condition_param_type_ref::TypeName::Timestamp => TypeName::Timestamp, + openfga_rs::condition_param_type_ref::TypeName::Map => TypeName::Map, + openfga_rs::condition_param_type_ref::TypeName::List => TypeName::List, + openfga_rs::condition_param_type_ref::TypeName::Ipaddress => TypeName::Ipaddress, + } + } + } + + impl From for Userset { + fn from(value: openfga_rs::Userset) -> Self { + match value.userset { + Some(openfga_rs::userset::Userset::This(_)) => Userset { + this: Some(serde_json::json!({})), + computed_userset: None, + tuple_to_userset: None, + union: None, + intersection: None, + difference: None, + }, + Some(openfga_rs::userset::Userset::ComputedUserset(v)) => Userset { + this: None, + computed_userset: Some(Box::new(v.into())), + tuple_to_userset: None, + union: None, + intersection: None, + difference: None, + }, + Some(openfga_rs::userset::Userset::TupleToUserset(v)) => Userset { + this: None, + computed_userset: None, + tuple_to_userset: Some(Box::new(v.into())), + union: None, + intersection: None, + difference: None, + }, + Some(openfga_rs::userset::Userset::Union(v)) => Userset { + this: None, + computed_userset: None, + tuple_to_userset: None, + union: Some(Box::new(v.into())), + intersection: None, + difference: None, + }, + Some(openfga_rs::userset::Userset::Intersection(v)) => Userset { + this: None, + computed_userset: None, + tuple_to_userset: None, + union: None, + intersection: Some(Box::new(v.into())), + difference: None, + }, + Some(openfga_rs::userset::Userset::Difference(v)) => Userset { + this: None, + computed_userset: None, + tuple_to_userset: None, + union: None, + intersection: None, + difference: Some(Box::new((*v).into())), + }, + None => Userset { + this: None, + computed_userset: None, + tuple_to_userset: None, + union: None, + intersection: None, + difference: None, + }, + } + } + } + + impl From for ObjectRelation { + fn from(value: openfga_rs::ObjectRelation) -> Self { + ObjectRelation { + object: Some(value.object).filter(|s| !s.is_empty()), + relation: Some(value.relation).filter(|s| !s.is_empty()), + } + } + } + + impl From for V1PeriodTupleToUserset { + fn from(value: openfga_rs::TupleToUserset) -> Self { + V1PeriodTupleToUserset { + tupleset: Box::new(value.tupleset.map_or( + ObjectRelation { + object: None, + relation: None, + }, + Into::into, + )), + computed_userset: Box::new(value.computed_userset.map_or( + ObjectRelation { + object: None, + relation: None, + }, + Into::into, + )), + } + } + } + + impl From for V1PeriodDifference { + fn from(value: openfga_rs::Difference) -> Self { + V1PeriodDifference { + base: Box::new(value.base.map_or( + Userset { + this: None, + computed_userset: None, + tuple_to_userset: None, + union: None, + intersection: None, + difference: None, + }, + |v| (*v).into(), + )), + subtract: Box::new(value.subtract.map_or( + Userset { + this: None, + computed_userset: None, + tuple_to_userset: None, + union: None, + intersection: None, + difference: None, + }, + |v| (*v).into(), + )), + } + } + } + + impl From for Usersets { + fn from(value: openfga_rs::Usersets) -> Self { + Usersets { + child: value.child.into_iter().map(Into::into).collect(), + } + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_load_collaboration_model() { + let _model = MODEL.get_model(&CollaborationModelVersion::V1); + } + + #[test] + fn test_load_ser_de_roundtrip() { + let ser_model: ser_de::AuthorizationModel = + serde_json::from_value((*MODEL_V1_JSON).clone()).unwrap(); + println!("{}", serde_json::to_value(&ser_model).unwrap()); + assert_eq!(serde_json::to_value(ser_model).unwrap(), *MODEL_V1_JSON); + } + + #[test] + fn test_proto_roundtrip() { + let proto = MODEL.get_model(&CollaborationModelVersion::V1).clone(); + let write: openfga_rs::WriteAuthorizationModelRequest = + proto.into_write_request("store_id".to_string()); + let proto2 = AuthorizationModel { + schema_version: write.schema_version, + type_definitions: write.type_definitions, + conditions: Some(write.conditions).filter(|v| !v.is_empty()), + }; + let model: ser_de::AuthorizationModel = proto2.clone().try_into().unwrap(); + let value = serde_json::to_value(model).unwrap(); + assert!(value == *MODEL_V1_JSON); + } +} diff --git a/crates/iceberg-catalog/src/service/authz/implementations/openfga/service_ext.rs b/crates/iceberg-catalog/src/service/authz/implementations/openfga/service_ext.rs new file mode 100644 index 00000000..dd35c7e5 --- /dev/null +++ b/crates/iceberg-catalog/src/service/authz/implementations/openfga/service_ext.rs @@ -0,0 +1,238 @@ +use crate::service::authz::ErrorModel; +use openfga_rs::open_fga_service_client::OpenFgaServiceClient; +use openfga_rs::{ + tonic::{ + self, + codegen::{Body, Bytes, StdError}, + }, + ListStoresRequest, Store, +}; + +use super::models::AuthorizationModel; + +const MAX_PAGES: u32 = 100; + +#[async_trait::async_trait] +pub trait ClientHelper { + async fn get_store_by_name( + &mut self, + store_name: &str, + ) -> std::result::Result; + + async fn get_auth_model_id( + &mut self, + store_id: String, + model: &AuthorizationModel, + ) -> std::result::Result, ErrorModel>; +} + +#[async_trait::async_trait] +impl ClientHelper for OpenFgaServiceClient +where + T: Clone + Sync + Send + 'static, + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + , + >>::Future: Send, +{ + async fn get_store_by_name( + &mut self, + store_name: &str, + ) -> std::result::Result { + let mut continuation_token = String::new(); + let count = 0; + let store = loop { + let stores = self + .list_stores(ListStoresRequest { + page_size: Some(100), + continuation_token: continuation_token.clone(), + }) + .await + .map_err(|e| { + ErrorModel::internal( + "Failed to list stores", + "OpenFGAConnection", + Some(Box::new(e)), + ) + })? + .into_inner(); + let num_stores = stores.stores.len(); + continuation_token.clone_from(&stores.continuation_token); + let store = stores.stores.into_iter().find(|s| s.name == store_name); + if let Some(store) = store { + break store; + } + + if continuation_token.is_empty() || num_stores < 100 || count > MAX_PAGES { + return Err(ErrorModel::internal( + format!("Store {store_name} not found"), + "OpenFGAConnection", + None, + )); + } + }; + Ok(store) + } + + async fn get_auth_model_id( + &mut self, + store_id: String, + model: &AuthorizationModel, + ) -> std::result::Result, ErrorModel> { + let mut continuation_token = String::new(); + let count = 0; + let model_id = loop { + let models = self + .read_authorization_models(openfga_rs::ReadAuthorizationModelsRequest { + store_id: store_id.clone(), + page_size: Some(100), + continuation_token: continuation_token.clone(), + }) + .await + .map_err(|e| { + ErrorModel::internal( + "Failed to list authorization models", + "OpenFGAConnection", + Some(Box::new(e)), + ) + })? + .into_inner(); + let num_models = models.authorization_models.len(); + continuation_token.clone_from(&models.continuation_token); + let found_model = models + .authorization_models + .into_iter() + .map(|m| (m.id.clone(), AuthorizationModel::from(m))) + .find(|m| &m.1 == model) + .map(|m| m.0); + if let Some(found_model) = found_model { + break Some(found_model); + } + + if continuation_token.is_empty() || num_models < 100 || count > MAX_PAGES { + break None; + } + }; + + Ok(model_id) + } +} + +#[cfg(test)] +mod test { + use super::*; + + // #[needs_env_var::needs_env_var("TEST_OPENFGA" = 1)] + mod openfga { + use openfga_rs::{CreateStoreRequest, WriteAuthorizationModelRequest}; + use tonic::transport::Channel; + + use super::*; + use crate::service::authz::implementations::openfga::{ + client::new_unauthenticated_client, CollaborationModelVersion, AUTH_CONFIG, + }; + + async fn new_store(client: &mut OpenFgaServiceClient) -> String { + let store_name = uuid::Uuid::now_v7().to_string(); + client + .create_store(CreateStoreRequest { + name: store_name.clone(), + }) + .await + .unwrap(); + store_name + } + + #[tokio::test] + async fn test_get_store_by_name() { + let mut client = new_unauthenticated_client(AUTH_CONFIG.endpoint.clone()) + .await + .expect("Failed to create OpenFGA client"); + + let store_name = new_store(&mut client).await; + let store = client.get_store_by_name(&store_name).await.unwrap(); + assert_eq!(store.name, store_name); + + let retrieved_store = client.get_store_by_name("non-existent-store").await; + assert!(retrieved_store.is_err()); + + let retrieved_store = client.get_store_by_name(&store_name).await.unwrap(); + assert_eq!(retrieved_store.name, store_name); + assert_eq!(retrieved_store, store); + } + + #[tokio::test] + async fn test_get_store_by_name_pagination() { + // Create 201 stores + let mut client = new_unauthenticated_client(AUTH_CONFIG.endpoint.clone()) + .await + .expect("Failed to create OpenFGA client"); + + let mut store_names = std::collections::HashSet::new(); + for _ in 0..201 { + store_names.insert(new_store(&mut client).await); + } + + assert_eq!(store_names.len(), 201); + + // Get all stores + for store_name in &store_names { + let store = client.get_store_by_name(store_name).await.unwrap(); + assert_eq!(store.name, *store_name); + } + } + + #[tokio::test] + async fn test_get_auth_model_id() { + let mut client = new_unauthenticated_client(AUTH_CONFIG.endpoint.clone()) + .await + .expect("Failed to create OpenFGA client"); + + let store_name = new_store(&mut client).await; + let store = client.get_store_by_name(&store_name).await.unwrap(); + + let test_model = CollaborationModelVersion::active().get_model(); + + let model = client + .write_authorization_model(WriteAuthorizationModelRequest { + store_id: store.id.clone(), + type_definitions: test_model.type_definitions.clone(), + schema_version: test_model.schema_version.clone(), + conditions: test_model.conditions.clone().unwrap_or_default(), + }) + .await + .unwrap() + .into_inner(); + + let model_id = client + .get_auth_model_id(store.id.clone(), &test_model) + .await + .unwrap() + .unwrap(); + + assert_eq!(model_id, model.authorization_model_id); + } + + #[tokio::test] + async fn test_get_auth_model_id_not_found() { + let mut client = new_unauthenticated_client(AUTH_CONFIG.endpoint.clone()) + .await + .expect("Failed to create OpenFGA client"); + + let store_name = new_store(&mut client).await; + let store = client.get_store_by_name(&store_name).await.unwrap(); + + let test_model = CollaborationModelVersion::active().get_model(); + + let model_id = client + .get_auth_model_id(store.id.clone(), &test_model) + .await + .unwrap(); + + assert!(model_id.is_none()); + } + } +} diff --git a/crates/iceberg-catalog/src/service/authz/mod.rs b/crates/iceberg-catalog/src/service/authz/mod.rs new file mode 100644 index 00000000..b5cb15b8 --- /dev/null +++ b/crates/iceberg-catalog/src/service/authz/mod.rs @@ -0,0 +1,383 @@ +use super::health::HealthExt; +use super::{NamespaceIdentUuid, ProjectIdent, TableIdentUuid, WarehouseIdent}; +use crate::api::iceberg::v1::Result; +use crate::request_metadata::RequestMetadata; +use std::collections::HashSet; + +pub mod implementations; + +use iceberg_ext::catalog::rest::ErrorModel; +pub use implementations::allow_all::AllowAllAuthorizer; + +#[derive(Debug, Clone, strum_macros::Display)] +#[strum(serialize_all = "snake_case")] +pub enum ServerAction { + /// Can create items inside the server (can create Warehouses). + CanCreate, + /// List projects on this server. Returned projects + /// are filtered by the user's permissions (`CanShowInList`) + CanListAllProjects, +} + +#[derive(Debug, Clone, strum_macros::Display)] +#[strum(serialize_all = "snake_case")] +pub enum ProjectAction { + CanCreateWarehouse, + CanDelete, + CanRename, + CanGetMetadata, + CanListWarehouses, + CanShowInList, +} + +#[derive(Debug, Clone, strum_macros::Display)] +#[strum(serialize_all = "snake_case")] +pub enum WarehouseAction { + CanCreateNamespace, + /// Can delete this warehouse permanently. + CanDelete, + CanUpdateStorage, + CanUpdateStorageCredential, + CanGetMetadata, + CanGetConfig, + CanListNamespaces, + /// Base permission to use any endpoint prefixed with `/api/v1/warehouse/{warehouse_id}`. + /// This is used to pre-check endpoints for which the actual object id must be looked up. + CanUse, + CanShowInList, + CanDeactivate, + CanActivate, + CanRename, + CanListDeletedTabulars, +} + +#[derive(Debug, Clone, strum_macros::Display)] +#[strum(serialize_all = "snake_case")] +pub enum NamespaceAction { + CanCreateTable, + CanCreateView, + CanCreateNamespace, + CanDelete, + CanUpdateProperties, + CanGetMetadata, + CanListTables, + CanListViews, + CanListNamespaces, +} + +#[derive(Debug, Clone, strum_macros::Display)] +#[strum(serialize_all = "snake_case")] +pub enum TableAction { + CanDrop, + CanWriteData, + CanReadData, + CanGetMetadata, + CanCommit, + CanRename, + CanShowInList, +} + +#[derive(Debug, Clone, strum_macros::Display)] +#[strum(serialize_all = "snake_case")] +pub enum ViewAction { + CanDrop, + CanGetMetadata, + CanCommit, + CanShowInList, + CanRename, +} + +pub trait TableUuid { + fn table_uuid(&self) -> TableIdentUuid; +} + +impl TableUuid for TableIdentUuid { + fn table_uuid(&self) -> TableIdentUuid { + *self + } +} + +#[derive(Debug, Clone)] +pub enum ListProjectsResponse { + /// List of projects that the user is allowed to see. + Projects(HashSet), + /// The user is allowed to see all projects. + All, +} + +#[async_trait::async_trait] +/// Interface to provide AuthZ functions to the catalog. +pub trait Authorizer +where + Self: Send + Sync + 'static + HealthExt + Clone, +{ + /// Return Ok(true) if the action is allowed, otherwise return Ok(false). + /// Return Err for internal errors. + async fn list_projects(&self, metadata: &RequestMetadata) -> Result; + + /// Return Ok(true) if the action is allowed, otherwise return Ok(false). + /// Return Err for internal errors. + async fn is_allowed_server_action( + &self, + metadata: &RequestMetadata, + action: &ServerAction, + ) -> Result; + + /// Return Ok(true) if the action is allowed, otherwise return Ok(false). + /// Return Err for internal errors. + async fn is_allowed_project_action( + &self, + metadata: &RequestMetadata, + project_id: ProjectIdent, + action: &ProjectAction, + ) -> Result; + + /// Return Ok(true) if the action is allowed, otherwise return Ok(false). + /// Return Err for internal errors. + async fn is_allowed_warehouse_action( + &self, + metadata: &RequestMetadata, + warehouse_id: WarehouseIdent, + action: &WarehouseAction, + ) -> Result; + + /// Return Ok(true) if the action is allowed, otherwise return Ok(false). + /// Return Err for internal errors. + async fn is_allowed_namespace_action( + &self, + metadata: &RequestMetadata, + warehouse_id: WarehouseIdent, + namespace_id: NamespaceIdentUuid, + action: &NamespaceAction, + ) -> Result; + + /// Return Ok(true) if the action is allowed, otherwise return Ok(false). + /// Return Err for internal errors. + async fn is_allowed_table_action( + &self, + metadata: &RequestMetadata, + warehouse_id: WarehouseIdent, + table_id: TableIdentUuid, + action: &TableAction, + ) -> Result; + + /// Return Ok(true) if the action is allowed, otherwise return Ok(false). + /// Return Err for internal errors. + async fn is_allowed_view_action( + &self, + metadata: &RequestMetadata, + warehouse_id: WarehouseIdent, + view_id: TableIdentUuid, + action: &ViewAction, + ) -> Result; + + async fn require_server_action( + &self, + metadata: &RequestMetadata, + action: &ServerAction, + ) -> Result<()> { + if self.is_allowed_server_action(metadata, action).await? { + Ok(()) + } else { + Err(ErrorModel::forbidden( + format!("Forbidden action {action} on server"), + "ServerActionForbidden", + None, + ) + .into()) + } + } + + async fn require_project_action( + &self, + metadata: &RequestMetadata, + project_id: ProjectIdent, + action: &ProjectAction, + ) -> Result<()> { + if self + .is_allowed_project_action(metadata, project_id, action) + .await? + { + Ok(()) + } else { + Err(ErrorModel::forbidden( + format!("Forbidden action {action} on project {project_id}"), + "ProjectActionForbidden", + None, + ) + .into()) + } + } + + async fn require_warehouse_action( + &self, + metadata: &RequestMetadata, + warehouse_id: WarehouseIdent, + action: &WarehouseAction, + ) -> Result<()> { + if self + .is_allowed_warehouse_action(metadata, warehouse_id, action) + .await? + { + Ok(()) + } else { + Err(ErrorModel::forbidden( + format!("Forbidden action {action} on warehouse {warehouse_id}"), + "WarehouseActionForbidden", + None, + ) + .into()) + } + } + + async fn require_namespace_action( + &self, + metadata: &RequestMetadata, + warehouse_id: WarehouseIdent, + // Outer error: Internal error that failed to fetch the namespace. + // Ok(None): Namespace does not exist. + // Ok(Some(namespace_id)): Namespace exists. + namespace_id: Result>, + action: &NamespaceAction, + ) -> Result { + // It is important to throw the same error if the namespace does not exist (None) or if the action is not allowed, + // to avoid leaking information about the existence of the namespace. + let msg = format!("Namespace action {action} forbidden"); + let typ = "NamespaceActionForbidden"; + + match namespace_id { + Ok(None) => Err(ErrorModel::forbidden(msg, typ, None).into()), + Ok(Some(namespace_id)) => { + if self + .is_allowed_namespace_action(metadata, warehouse_id, namespace_id, action) + .await? + { + Ok(namespace_id) + } else { + Err(ErrorModel::forbidden(msg, typ, None).into()) + } + } + Err(e) => Err(ErrorModel::internal(msg, typ, e.error.source) + .append_detail(format!("Original Type: {}", e.error.r#type)) + .append_detail(e.error.message) + .append_details(e.error.stack) + .into()), + } + } + + async fn require_table_action( + &self, + metadata: &RequestMetadata, + warehouse_id: WarehouseIdent, + table_id: Result>, + action: &TableAction, + ) -> Result { + let msg = format!("Table action {action} forbidden"); + let typ = "TableActionForbidden"; + + match table_id { + Ok(None) => Err(ErrorModel::forbidden(msg, typ, None).into()), + Ok(Some(table_id)) => { + if self + .is_allowed_table_action(metadata, warehouse_id, table_id.table_uuid(), action) + .await? + { + Ok(table_id) + } else { + Err(ErrorModel::forbidden(msg, typ, None).into()) + } + } + Err(e) => Err(ErrorModel::internal(msg, typ, e.error.source) + .append_detail(format!("Original Type: {}", e.error.r#type)) + .append_detail(e.error.message) + .append_details(e.error.stack) + .into()), + } + } + + async fn require_view_action( + &self, + metadata: &RequestMetadata, + warehouse_id: WarehouseIdent, + view_id: Result>, + action: &ViewAction, + ) -> Result { + let msg = format!("View action {action} forbidden"); + let typ = "ViewActionForbidden"; + + match view_id { + Ok(None) => Err(ErrorModel::forbidden(msg, typ, None).into()), + Ok(Some(view_id)) => { + if self + .is_allowed_view_action(metadata, warehouse_id, view_id, action) + .await? + { + Ok(view_id) + } else { + Err(ErrorModel::forbidden(msg, typ, None).into()) + } + } + Err(e) => Err(ErrorModel::internal(msg, typ, e.error.source) + .append_detail(format!("Original Type: {}", e.error.r#type)) + .append_detail(e.error.message) + .append_details(e.error.stack) + .into()), + } + } +} + +// // Contains non-object safe methods +// #[async_trait::async_trait] +// pub(crate) trait AuthorizerExt +// where +// Self: Authorizer, +// { +// async fn require_table_action_generic( +// &self, +// metadata: &RequestMetadata, +// warehouse_id: WarehouseIdent, +// table: Result>, +// action: &TableAction, +// ) -> Result { +// let (return_value, table_id) = match table { +// Ok(Some(table)) => { +// let table_id = table.table_uuid().clone(); +// (Ok(table), Ok(Some(table_id))) +// } +// Ok(None) => ( +// Err(ErrorModel::internal( +// "Unexpected response from require_table_action", +// "RequireTableActionGeneric", +// None, +// )), +// Ok(None), +// ), +// Err(e) => ( +// Err(ErrorModel::internal( +// "Unexpected response from require_table_action", +// "RequireTableActionGeneric", +// None, +// )), +// Err(e), +// ), +// }; +// self.require_table_action(metadata, warehouse_id, table_id, action) +// .await?; +// return_value.map_err(Into::into) +// } +// } + +// impl AuthorizerExt for T where T: Authorizer {} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_namespace_action() { + assert_eq!( + NamespaceAction::CanCreateTable.to_string(), + "can_create_table" + ); + } +} diff --git a/crates/iceberg-catalog/src/service/catalog.rs b/crates/iceberg-catalog/src/service/catalog.rs index 1f1da90e..c5ecd19b 100644 --- a/crates/iceberg-catalog/src/service/catalog.rs +++ b/crates/iceberg-catalog/src/service/catalog.rs @@ -1,11 +1,11 @@ +use super::authz::TableUuid; use super::{ storage::StorageProfile, NamespaceIdentUuid, ProjectIdent, TableIdentUuid, WarehouseIdent, WarehouseStatus, }; pub use crate::api::iceberg::v1::{ - CreateNamespaceRequest, CreateNamespaceResponse, ListNamespacesQuery, ListNamespacesResponse, - NamespaceIdent, Result, TableIdent, UpdateNamespacePropertiesRequest, - UpdateNamespacePropertiesResponse, + CreateNamespaceRequest, CreateNamespaceResponse, ListNamespacesQuery, NamespaceIdent, Result, + TableIdent, UpdateNamespacePropertiesRequest, UpdateNamespacePropertiesResponse, }; use crate::api::iceberg::v1::{PaginatedTabulars, PaginationQuery}; use crate::service::health::HealthExt; @@ -14,7 +14,7 @@ use crate::SecretIdent; use crate::api::management::v1::warehouse::TabularDeleteProfile; use crate::service::tabular_idents::{TabularIdentOwned, TabularIdentUuid}; use iceberg::spec::{Schema, SortOrder, TableMetadata, UnboundPartitionSpec, ViewMetadata}; -use iceberg_ext::catalog::rest::ErrorModel; +use iceberg_ext::catalog::rest::{CatalogConfig, ErrorModel}; pub use iceberg_ext::catalog::rest::{CommitTableResponse, CreateTableRequest}; use iceberg_ext::configs::Location; use std::collections::{HashMap, HashSet}; @@ -48,6 +48,12 @@ pub struct GetNamespaceResponse { pub properties: Option>, } +#[derive(Clone, Debug, PartialEq)] +pub struct ListNamespacesResponse { + pub next_page_token: Option, + pub namespaces: HashMap, +} + #[derive(Debug)] pub struct CreateTableResponse { pub table_metadata: TableMetadata, @@ -67,6 +73,7 @@ pub struct LoadTableResponse { pub struct GetTableMetadataResponse { pub table: TableIdent, pub table_id: TableIdentUuid, + pub namespace_id: NamespaceIdentUuid, pub warehouse_id: WarehouseIdent, pub location: String, pub metadata_location: Option, @@ -74,6 +81,12 @@ pub struct GetTableMetadataResponse { pub storage_profile: StorageProfile, } +impl TableUuid for GetTableMetadataResponse { + fn table_uuid(&self) -> TableIdentUuid { + self.table_id + } +} + #[derive(Debug)] pub struct GetStorageConfigResponse { pub storage_profile: StorageProfile, @@ -133,11 +146,61 @@ where type Transaction: Transaction; type State: Clone + Send + Sync + 'static + HealthExt; + // Should only return a warehouse if the warehouse is active. + async fn get_warehouse_by_name( + warehouse_name: &str, + project_id: ProjectIdent, + catalog_state: Self::State, + ) -> Result>; + + /// Wrapper around get_warehouse_by_name that returns + /// not found error if the warehouse does not exist. + async fn require_warehouse_by_name( + warehouse_name: &str, + project_id: ProjectIdent, + catalog_state: Self::State, + ) -> Result { + Self::get_warehouse_by_name(warehouse_name, project_id, catalog_state) + .await? + .ok_or( + ErrorModel::not_found( + format!("Warehouse {warehouse_name} not found"), + "WarehouseNotFound", + None, + ) + .into(), + ) + } + + // Should only return a warehouse if the warehouse is active. + async fn get_config_for_warehouse( + warehouse_id: WarehouseIdent, + catalog_state: Self::State, + ) -> Result>; + + /// Wrapper around get_config_for_warehouse that returns + /// not found error if the warehouse does not exist. + async fn require_config_for_warehouse( + warehouse_id: WarehouseIdent, + catalog_state: Self::State, + ) -> Result { + Self::get_config_for_warehouse(warehouse_id, catalog_state) + .await? + .ok_or( + ErrorModel::not_found( + format!("Warehouse {warehouse_id} not found"), + "WarehouseNotFound", + None, + ) + .into(), + ) + } + // Should only return namespaces if the warehouse is active. - async fn list_namespaces( + async fn list_namespaces<'a>( warehouse_id: WarehouseIdent, query: &ListNamespacesQuery, - catalog_state: Self::State, + transaction: >::Transaction<'a>, ) -> Result; async fn create_namespace<'a>( @@ -150,7 +213,7 @@ where // Should only return a namespace if the warehouse is active. async fn get_namespace<'a>( warehouse_id: WarehouseIdent, - namespace: &NamespaceIdent, + namespace_id: NamespaceIdentUuid, transaction: >::Transaction<'a>, ) -> Result; @@ -159,15 +222,15 @@ where /// /// We use this function also to handle the `namespace_exists` endpoint. /// Also return Ok(false) if the warehouse is not active. - async fn namespace_ident_to_id( + async fn namespace_to_id<'a>( warehouse_id: WarehouseIdent, namespace: &NamespaceIdent, - catalog_state: Self::State, + transaction: >::Transaction<'a>, ) -> Result>; async fn drop_namespace<'a>( warehouse_id: WarehouseIdent, - namespace: &NamespaceIdent, + namespace_id: NamespaceIdentUuid, transaction: >::Transaction<'a>, ) -> Result<()>; @@ -177,7 +240,7 @@ where /// be persisted as-is in the catalog. async fn update_namespace_properties<'a>( warehouse_id: WarehouseIdent, - namespace: &NamespaceIdent, + namespace_id: NamespaceIdentUuid, properties: HashMap, transaction: >::Transaction<'a>, ) -> Result<()>; @@ -187,11 +250,11 @@ where transaction: >::Transaction<'a>, ) -> Result; - async fn list_tables( + async fn list_tables<'a>( warehouse_id: WarehouseIdent, namespace: &NamespaceIdent, list_flags: ListFlags, - catalog_state: Self::State, + transaction: >::Transaction<'a>, pagination_query: PaginationQuery, ) -> Result>; @@ -201,11 +264,11 @@ where /// /// We use this function also to handle the `table_exists` endpoint. /// Also return Ok(None) if the warehouse is not active. - async fn table_ident_to_id( + async fn table_to_id<'a>( warehouse_id: WarehouseIdent, table: &TableIdent, list_flags: ListFlags, - catalog_state: Self::State, + transaction: >::Transaction<'a>, ) -> Result>; /// Same as `table_ident_to_id`, but for multiple tables. @@ -229,20 +292,22 @@ where /// Get table metadata by table id. /// If include_staged is true, also return staged tables, /// i.e. tables with no metadata file yet. + /// Return Ok(None) if the table does not exist. async fn get_table_metadata_by_id( warehouse_id: WarehouseIdent, table: TableIdentUuid, list_flags: ListFlags, catalog_state: Self::State, - ) -> Result; + ) -> Result>; /// Get table metadata by location. + /// Return Ok(None) if the table does not exist. async fn get_table_metadata_by_s3_location( warehouse_id: WarehouseIdent, location: &Location, list_flags: ListFlags, catalog_state: Self::State, - ) -> Result; + ) -> Result>; /// Rename a table. Tables may be moved across namespaces. async fn rename_table<'a>( @@ -322,9 +387,6 @@ where // If None, return only active warehouses // If Some, return only warehouses with any of the statuses in the set include_inactive: Option>, - // If None, return all warehouses in the project - // If Some, return only the warehouses in the set - warehouse_id_filter: Option<&HashSet>, catalog_state: Self::State, ) -> Result>; @@ -391,10 +453,10 @@ where /// /// We use this function also to handle the `view_exists` endpoint. /// Also return Ok(None) if the warehouse is not active. - async fn view_ident_to_id( + async fn view_to_id<'a>( warehouse_id: WarehouseIdent, view: &TableIdent, - catalog_state: Self::State, + transaction: >::Transaction<'a>, ) -> Result>; async fn create_view<'a>( @@ -412,11 +474,11 @@ where transaction: >::Transaction<'a>, ) -> Result; - async fn list_views( + async fn list_views<'a>( warehouse_id: WarehouseIdent, namespace: &NamespaceIdent, include_deleted: bool, - catalog_state: Self::State, + transaction: >::Transaction<'a>, pagination_query: PaginationQuery, ) -> Result>; diff --git a/crates/iceberg-catalog/src/service/config.rs b/crates/iceberg-catalog/src/service/config.rs deleted file mode 100644 index e8de6266..00000000 --- a/crates/iceberg-catalog/src/service/config.rs +++ /dev/null @@ -1,60 +0,0 @@ -use super::{Catalog, ProjectIdent, WarehouseIdent}; -use crate::api::{CatalogConfig, Result}; -use iceberg_ext::catalog::rest::ErrorModel; - -#[async_trait::async_trait] - -pub trait ConfigProvider -where - Self: Clone + Send + Sync + 'static, -{ - /// Return Ok(Some(x)) only for active warehouses - async fn get_warehouse_by_name( - warehouse_name: &str, - project_id: ProjectIdent, - catalog_state: C::State, - ) -> Result>; - - /// Wrapper around get_warehouse_by_name that returns - /// not found error if the warehouse does not exist. - async fn require_warehouse_by_name( - warehouse_name: &str, - project_id: ProjectIdent, - catalog_state: C::State, - ) -> Result { - Self::get_warehouse_by_name(warehouse_name, project_id, catalog_state) - .await? - .ok_or( - ErrorModel::not_found( - format!("Warehouse {warehouse_name} not found"), - "WarehouseNotFound", - None, - ) - .into(), - ) - } - - // Should only return a warehouse if the warehouse is active. - async fn get_config_for_warehouse( - warehouse_id: WarehouseIdent, - catalog_state: C::State, - ) -> Result>; - - /// Wrapper around get_config_for_warehouse that returns - /// not found error if the warehouse does not exist. - async fn require_config_for_warehouse( - warehouse_id: WarehouseIdent, - catalog_state: C::State, - ) -> Result { - Self::get_config_for_warehouse(warehouse_id, catalog_state) - .await? - .ok_or( - ErrorModel::not_found( - format!("Warehouse {warehouse_id} not found"), - "WarehouseNotFound", - None, - ) - .into(), - ) - } -} diff --git a/crates/iceberg-catalog/src/service/contract_verification.rs b/crates/iceberg-catalog/src/service/contract_verification.rs index 996a94e4..049745f0 100644 --- a/crates/iceberg-catalog/src/service/contract_verification.rs +++ b/crates/iceberg-catalog/src/service/contract_verification.rs @@ -1,5 +1,5 @@ #![allow(clippy::module_name_repetitions)] -use crate::service::tabular_idents::TabularIdentUuid; +use crate::service::TabularIdentUuid; use async_trait::async_trait; use iceberg::spec::{TableMetadata, ViewMetadata}; use iceberg::{TableIdent, TableUpdate}; @@ -19,7 +19,7 @@ use std::sync::Arc; /// use async_trait::async_trait; /// use iceberg::spec::{TableMetadata, ViewMetadata}; /// use iceberg::{TableIdent, TableUpdate}; -/// use iceberg_catalog::service::{tabular_idents::TabularIdentUuid, contract_verification::{ContractVerification, ContractVerificationOutcome}}; +/// use iceberg_catalog::service::{TabularIdentUuid, contract_verification::{ContractVerification, ContractVerificationOutcome}}; /// use iceberg_ext::catalog::rest::{ErrorModel, ViewUpdate}; /// /// #[derive(Debug)] diff --git a/crates/iceberg-catalog/src/service/mod.rs b/crates/iceberg-catalog/src/service/mod.rs index 6df808f9..1d0f495e 100644 --- a/crates/iceberg-catalog/src/service/mod.rs +++ b/crates/iceberg-catalog/src/service/mod.rs @@ -1,12 +1,11 @@ -pub mod auth; +pub mod authz; mod catalog; -pub mod config; pub mod contract_verification; pub mod event_publisher; pub mod health; pub mod secrets; pub mod storage; -pub mod tabular_idents; +mod tabular_idents; pub mod task_queue; pub mod token_verification; @@ -19,8 +18,10 @@ pub use catalog::{ UpdateNamespacePropertiesResponse, ViewMetadataWithLocation, }; use std::ops::Deref; +pub(crate) use tabular_idents::TabularIdentBorrowed; +pub use tabular_idents::{TabularIdentOwned, TabularIdentUuid}; -use self::auth::AuthZHandler; +use self::authz::Authorizer; use crate::api::iceberg::v1::Prefix; use crate::api::ThreadSafe as ServiceState; pub use crate::api::{ErrorModel, IcebergErrorResponse}; @@ -31,39 +32,10 @@ use http::StatusCode; pub use secrets::{SecretIdent, SecretStore}; use std::str::FromStr; -#[async_trait::async_trait] -pub trait NamespaceIdentExt -where - Self: Sized, -{ - fn parent(&self) -> Option; -} - -#[async_trait::async_trait] -impl NamespaceIdentExt for NamespaceIdent { - fn parent(&self) -> Option { - let mut name = self.clone().inner(); - // The last element is the namespace itself, everything before it the parent. - name.pop(); - - if name.is_empty() { - None - } else { - match NamespaceIdent::from_vec(name) { - Ok(ident) => Some(ident), - // This only fails if the vector is empty, - // in which case there is no parent, so return None - Err(_e) => None, - } - } - } -} - // ---------------- State ---------------- - #[derive(Clone, Debug)] -pub struct State { - pub auth: A::State, +pub struct State { + pub authz: A, pub catalog: C::State, pub secrets: S, pub publisher: CloudEventsPublisher, @@ -71,7 +43,7 @@ pub struct State { pub queues: TaskQueues, } -impl ServiceState for State {} +impl ServiceState for State {} #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Copy)] pub struct NamespaceIdentUuid(uuid::Uuid); @@ -119,6 +91,12 @@ impl From for NamespaceIdentUuid { } } +impl From<&uuid::Uuid> for NamespaceIdentUuid { + fn from(uuid: &uuid::Uuid) -> Self { + Self(*uuid) + } +} + #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Copy)] pub struct TableIdentUuid(uuid::Uuid); @@ -170,10 +148,12 @@ impl From for uuid::Uuid { } // ---------------- Identifier ---------------- -#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Copy)] +#[derive( + Debug, serde::Serialize, serde::Deserialize, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Copy, +)] #[cfg_attr(feature = "sqlx", derive(sqlx::Type))] #[cfg_attr(feature = "sqlx", sqlx(transparent))] -// Is UUID here too strict? +#[serde(transparent)] pub struct ProjectIdent(uuid::Uuid); impl Deref for ProjectIdent { diff --git a/crates/iceberg-catalog/src/service/secrets.rs b/crates/iceberg-catalog/src/service/secrets.rs index b3222eea..5050a705 100644 --- a/crates/iceberg-catalog/src/service/secrets.rs +++ b/crates/iceberg-catalog/src/service/secrets.rs @@ -1,5 +1,5 @@ use crate::api::Result; -use crate::service::health::{Health, HealthExt}; +use crate::service::health::HealthExt; use async_trait::async_trait; use serde::de::DeserializeOwned; use serde::Serialize; @@ -27,71 +27,6 @@ where async fn delete_secret(&self, secret_id: &SecretIdent) -> Result<()>; } -#[derive(Debug, Clone)] -pub enum Secrets { - Postgres(crate::implementations::postgres::SecretsState), - KV2(crate::implementations::kv2::SecretsState), -} - -#[async_trait] -impl SecretStore for Secrets { - async fn get_secret_by_id( - &self, - secret_id: &SecretIdent, - ) -> crate::api::Result> { - match self { - Self::Postgres(state) => state.get_secret_by_id(secret_id).await, - Self::KV2(state) => state.get_secret_by_id(secret_id).await, - } - } - - async fn create_secret( - &self, - secret: S, - ) -> crate::api::Result { - match self { - Self::Postgres(state) => state.create_secret(secret).await, - Self::KV2(state) => state.create_secret(secret).await, - } - } - - async fn delete_secret(&self, secret_id: &SecretIdent) -> crate::api::Result<()> { - match self { - Self::Postgres(state) => state.delete_secret(secret_id).await, - Self::KV2(state) => state.delete_secret(secret_id).await, - } - } -} - -#[async_trait] -impl HealthExt for Secrets { - async fn health(&self) -> Vec { - match self { - Self::Postgres(state) => state.health().await, - Self::KV2(state) => state.health().await, - } - } - - async fn update_health(&self) { - match self { - Self::Postgres(state) => state.update_health().await, - Self::KV2(state) => state.update_health().await, - } - } -} - -impl From for Secrets { - fn from(state: crate::implementations::postgres::SecretsState) -> Self { - Self::Postgres(state) - } -} - -impl From for Secrets { - fn from(state: crate::implementations::kv2::SecretsState) -> Self { - Self::KV2(state) - } -} - #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] #[cfg_attr(feature = "sqlx", derive(sqlx::Type))] #[cfg_attr(feature = "sqlx", sqlx(transparent))] diff --git a/crates/iceberg-catalog/src/service/token_verification.rs b/crates/iceberg-catalog/src/service/token_verification.rs index ea74211e..80cdcb7d 100644 --- a/crates/iceberg-catalog/src/service/token_verification.rs +++ b/crates/iceberg-catalog/src/service/token_verification.rs @@ -20,11 +20,43 @@ use std::fmt::Debug; use std::str::FromStr; use url::Url; +use super::{ProjectIdent, WarehouseIdent}; + #[derive(Debug, Clone)] pub enum AuthDetails { JWT(Claims), } +impl AuthDetails { + #[must_use] + pub fn actor(&self) -> Actor { + // match self { + // Self::JWT(claims) => Actor::Principal(claims.sub.clone()), + // } + todo!() + } + + #[must_use] + pub fn project_id(&self) -> Option { + None + } + + #[must_use] + pub fn warehouse_id(&self) -> Option { + None + } +} + +#[derive(Debug, Clone)] +pub enum Actor { + Anonymous, + Principal(String), + Role { + principal: String, + assumed_role: String, + }, +} + #[derive(Debug, Clone, Deserialize)] pub struct Claims { pub sub: String, diff --git a/crates/iceberg-ext/src/catalog/rest/error.rs b/crates/iceberg-ext/src/catalog/rest/error.rs index 5bba8476..41943849 100644 --- a/crates/iceberg-ext/src/catalog/rest/error.rs +++ b/crates/iceberg-ext/src/catalog/rest/error.rs @@ -214,8 +214,8 @@ impl ErrorModel { } #[must_use] - pub fn append_details(mut self, details: &[String]) -> Self { - self.stack.extend_from_slice(details); + pub fn append_details(mut self, details: impl IntoIterator) -> Self { + self.stack.extend(details); self } diff --git a/docker/full.Dockerfile b/docker/full.Dockerfile index 54607b80..f8ffdd41 100644 --- a/docker/full.Dockerfile +++ b/docker/full.Dockerfile @@ -4,6 +4,10 @@ FROM rust:1.81-slim-bookworm AS chef RUN apt update -q && \ DEBIAN_FRONTEND=noninteractive apt install -yqq curl libpq-dev pkg-config libssl-dev make perl --no-install-recommends && \ cargo install --version=0.7.4 sqlx-cli --no-default-features --features postgres + +RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v28.2/protoc-28.2-linux-x86_64.zip && \ + unzip protoc-28.2-linux-x86_64.zip -d /usr/local/ && \ + rm protoc-28.2-linux-x86_64.zip RUN cargo install cargo-chef WORKDIR /app @@ -22,7 +26,6 @@ COPY . . ENV SQLX_OFFLINE=true RUN cargo build --release --bin iceberg-catalog -RUN ldd target/release/iceberg-catalog > tmp_file # our final base FROM gcr.io/distroless/cc-debian12:nonroot as base diff --git a/openapi/management-open-api.yaml b/openapi/management-open-api.yaml index 51abbadb..363712b5 100644 --- a/openapi/management-open-api.yaml +++ b/openapi/management-open-api.yaml @@ -287,7 +287,7 @@ paths: post: tags: - management - summary: Update the storage profile of a warehouse + summary: Update the storage profile of a warehouse including its storage credential. operationId: update_storage_profile parameters: - name: warehouse_id @@ -309,7 +309,8 @@ paths: post: tags: - management - summary: Update the storage credential of a warehouse + summary: Update the storage credential of a warehouse. The storage profile is not modified. + description: This can be used to update credentials before expiration. operationId: update_storage_credential parameters: - name: warehouse_id @@ -411,7 +412,6 @@ components: type: object required: - warehouse-name - - project-id - storage-profile properties: delete-profile: @@ -419,7 +419,10 @@ components: project-id: type: string format: uuid - description: Project ID in which to create the warehouse. + description: |- + Project ID in which to create the warehouse. + If no default project is set for this server, this field is required. + nullable: true storage-credential: allOf: - $ref: '#/components/schemas/StorageCredential' @@ -430,7 +433,7 @@ components: type: string description: |- Name of the warehouse to create. Must be unique - within a project. + within a project and may not contain "/" CreateWarehouseResponse: type: object required: diff --git a/tests/python/tests/conftest.py b/tests/python/tests/conftest.py index 676fc681..7e25ab78 100644 --- a/tests/python/tests/conftest.py +++ b/tests/python/tests/conftest.py @@ -186,7 +186,7 @@ def create_project(self, name: str) -> uuid.UUID: return uuid.UUID(project_id) def create_warehouse( - self, name: str, project_id: uuid.UUID, storage_config: dict + self, name: str, project_id: uuid.UUID, storage_config: dict ) -> uuid.UUID: """Create a warehouse in this server"""