diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 81bf944b..79c19e38 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -44,7 +44,7 @@ jobs: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive - - uses: dtolnay/rust-toolchain@1.74.0 + - uses: dtolnay/rust-toolchain@1.75.0 - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 with: key: udeps @@ -122,7 +122,7 @@ jobs: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive - - uses: dtolnay/rust-toolchain@1.74.0 + - uses: dtolnay/rust-toolchain@1.75.0 with: components: rustfmt - run: cargo fmt --all -- --check @@ -139,7 +139,7 @@ jobs: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive - - uses: dtolnay/rust-toolchain@1.74.0 + - uses: dtolnay/rust-toolchain@1.75.0 with: components: clippy - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 @@ -174,7 +174,7 @@ jobs: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive - - uses: dtolnay/rust-toolchain@1.74.0 + - uses: dtolnay/rust-toolchain@1.75.0 with: components: rustfmt - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 @@ -195,7 +195,7 @@ jobs: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive - - uses: dtolnay/rust-toolchain@1.74.0 + - uses: dtolnay/rust-toolchain@1.75.0 - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 with: key: test @@ -258,7 +258,7 @@ jobs: with: version: v3.13.3 - name: Set up cargo - uses: dtolnay/rust-toolchain@1.74.0 + uses: dtolnay/rust-toolchain@1.75.0 - uses: Swatinem/rust-cache@3cf7f8cc28d1b4e7d01e3783be10a97d55d483c8 # v2.7.1 with: key: charts @@ -318,7 +318,7 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive - - uses: dtolnay/rust-toolchain@1.74.0 + - uses: dtolnay/rust-toolchain@1.75.0 with: components: rustfmt # This step checks if the current run was triggered by a push to a pr (or a pr being created). diff --git a/CHANGELOG.md b/CHANGELOG.md index 0dee77f2..c6fb6c71 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,8 +12,12 @@ All notable changes to this project will be documented in this file. ### Changed -- `operator-rs` `0.56.1` -> `0.57.0` ([#433]). +- Use new label builders ([#454]). + +### Removed + - [BREAKING] `.spec.clusterConfig.listenerClass` has been split to `.spec.nameNodes.config.listenerClass` and `.spec.dataNodes.config.listenerClass`, migration will be required when using `external-unstable` ([#450], [#462]). +- [BREAKING] Removed legacy node selector on roleGroups ([#454]). - Change default value of `dfs.ha.nn.not-become-active-in-safemode` from `true` to `false` ([#458]). ### Fixed @@ -21,9 +25,9 @@ All notable changes to this project will be documented in this file. - Include hdfs principals `dfs.journalnode.kerberos.principal`, `dfs.namenode.kerberos.principal` and `dfs.datanode.kerberos.principal` in the discovery ConfigMap in case Kerberos is enabled ([#451]). -[#433]: https://github.com/stackabletech/hdfs-operator/pull/433 [#450]: https://github.com/stackabletech/hdfs-operator/pull/450 [#451]: https://github.com/stackabletech/hdfs-operator/pull/451 +[#454]: https://github.com/stackabletech/hdfs-operator/pull/454 [#458]: https://github.com/stackabletech/hdfs-operator/pull/458 [#460]: https://github.com/stackabletech/hdfs-operator/pull/460 [#462]: https://github.com/stackabletech/hdfs-operator/pull/462 diff --git a/Cargo.lock b/Cargo.lock index 0ac9350f..e1e38184 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -423,6 +423,17 @@ dependencies = [ "syn 2.0.39", ] +[[package]] +name = "delegate" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e018fccbeeb50ff26562ece792ed06659b9c2dae79ece77c4456bb10d9bf79b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + [[package]] name = "derivative" version = "2.2.0" @@ -673,12 +684,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - [[package]] name = "hashbrown" version = "0.14.0" @@ -840,16 +845,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", -] - [[package]] name = "indexmap" version = "2.0.0" @@ -857,7 +852,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown", ] [[package]] @@ -1045,7 +1040,7 @@ dependencies = [ "backoff", "derivative", "futures 0.3.28", - "hashbrown 0.14.0", + "hashbrown", "json-patch", "k8s-openapi", "kube-client", @@ -1205,71 +1200,62 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "opentelemetry" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9591d937bc0e6d2feb6f71a559540ab300ea49955229c347a517a28d27784c54" +checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a" dependencies = [ - "opentelemetry_api", - "opentelemetry_sdk", + "futures-core", + "futures-sink", + "indexmap", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", + "urlencoding", ] [[package]] name = "opentelemetry-jaeger" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "876958ba9084f390f913fcf04ddf7bbbb822898867bb0a51cc28f2b9e5c1b515" +checksum = "e617c66fd588e40e0dbbd66932fdc87393095b125d4459b1a3a10feb1712f8a1" dependencies = [ "async-trait", "futures-core", "futures-util", "opentelemetry", "opentelemetry-semantic-conventions", + "opentelemetry_sdk", "thrift", "tokio", ] [[package]] name = "opentelemetry-semantic-conventions" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73c9f9340ad135068800e7f1b24e9e09ed9e7143f5bf8518ded3d3ec69789269" +checksum = "f5774f1ef1f982ef2a447f6ee04ec383981a3ab99c8e77a1a7b30182e65bbc84" dependencies = [ "opentelemetry", ] -[[package]] -name = "opentelemetry_api" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a81f725323db1b1206ca3da8bb19874bbd3f57c3bcd59471bfb04525b265b9b" -dependencies = [ - "futures-channel", - "futures-util", - "indexmap 1.9.3", - "js-sys", - "once_cell", - "pin-project-lite", - "thiserror", - "urlencoding", -] - [[package]] name = "opentelemetry_sdk" -version = "0.20.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa8e705a0612d48139799fcbaba0d4a90f06277153e43dd2bdc16c6f0edd8026" +checksum = "2f16aec8a98a457a52664d69e0091bac3a0abd18ead9b641cb00202ba4e0efe4" dependencies = [ "async-trait", "crossbeam-channel", "futures-channel", "futures-executor", "futures-util", + "glob", "once_cell", - "opentelemetry_api", - "ordered-float 3.9.1", + "opentelemetry", + "ordered-float 4.2.0", "percent-encoding", "rand", - "regex", "thiserror", "tokio", "tokio-stream", @@ -1286,9 +1272,9 @@ dependencies = [ [[package]] name = "ordered-float" -version = "3.9.1" +version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a54938017eacd63036332b4ae5c8a49fc8c0c1d6d629893057e4f13609edd06" +checksum = "a76df7075c7d4d01fdcb46c912dd17fba5b60c78ea480b475f2b6ab6f666584e" dependencies = [ "num-traits", ] @@ -1802,7 +1788,7 @@ version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ - "indexmap 2.0.0", + "indexmap", "itoa", "ryu", "serde", @@ -1823,7 +1809,7 @@ version = "0.9.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" dependencies = [ - "indexmap 2.0.0", + "indexmap", "itoa", "ryu", "serde", @@ -1984,12 +1970,13 @@ dependencies = [ [[package]] name = "stackable-operator" -version = "0.57.0" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=0.57.0#ab5c5c3f220ae9449e82f6861f44a4a9a6fb7b6b" +version = "0.62.0" +source = "git+https://github.com/stackabletech/operator-rs.git?tag=0.62.0#407cfec12469dba32ac1795a9ce7e62077a0650b" dependencies = [ "chrono", "clap", "const_format", + "delegate", "derivative", "dockerfile-parser", "either", @@ -2000,6 +1987,7 @@ dependencies = [ "lazy_static", "opentelemetry", "opentelemetry-jaeger", + "opentelemetry_sdk", "product-config", "rand", "regex", @@ -2016,12 +2004,13 @@ dependencies = [ "tracing", "tracing-opentelemetry", "tracing-subscriber", + "url", ] [[package]] name = "stackable-operator-derive" -version = "0.57.0" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=0.57.0#ab5c5c3f220ae9449e82f6861f44a4a9a6fb7b6b" +version = "0.62.0" +source = "git+https://github.com/stackabletech/operator-rs.git?tag=0.62.0#407cfec12469dba32ac1795a9ce7e62077a0650b" dependencies = [ "darling", "proc-macro2", @@ -2249,7 +2238,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.0", + "indexmap", "serde", "serde_spanned", "toml_datetime", @@ -2363,20 +2352,33 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + [[package]] name = "tracing-opentelemetry" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75327c6b667828ddc28f5e3f169036cb793c3f588d83bf0f262a7f062ffed3c8" +checksum = "c67ac25c5407e7b961fafc6f7e9aa5958fd297aada2d20fa2ae1737357e55596" dependencies = [ + "js-sys", "once_cell", "opentelemetry", "opentelemetry_sdk", "smallvec", "tracing", "tracing-core", - "tracing-log", + "tracing-log 0.2.0", "tracing-subscriber", + "web-time", ] [[package]] @@ -2394,7 +2396,7 @@ dependencies = [ "thread_local", "tracing", "tracing-core", - "tracing-log", + "tracing-log 0.1.3", ] [[package]] @@ -2583,6 +2585,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa30049b1c872b72c89866d458eae9f20380ab280ffd1b1e18df2d3e2d98cfe0" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "winapi" version = "0.3.9" diff --git a/Cargo.toml b/Cargo.toml index 24b0b1f7..9db85aa7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/stackabletech/hdfs-operator" [workspace.dependencies] anyhow = "1.0" -built = { version = "0.6", features = ["chrono", "git2"] } +built = { version = "0.6", features = ["chrono", "git2"] } clap = "4.3" futures = { version = "0.3", features = ["compat"] } indoc = "2.0" @@ -21,7 +21,7 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" serde_yaml = "0.9" snafu = "0.7" -stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.57.0" } +stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.62.0" } product-config = { git = "https://github.com/stackabletech/product-config.git", tag = "0.6.0" } strum = { version = "0.25", features = ["derive"] } tokio = { version = "1.29", features = ["full"] } diff --git a/deploy/helm/hdfs-operator/crds/crds.yaml b/deploy/helm/hdfs-operator/crds/crds.yaml index 6d42cfab..fc02f8af 100644 --- a/deploy/helm/hdfs-operator/crds/crds.yaml +++ b/deploy/helm/hdfs-operator/crds/crds.yaml @@ -89,7 +89,7 @@ spec: type: boolean type: object dataNodes: - description: This struct represents a role - e.g. HDFS datanodes or Trino workers. It has a [`HashMap`] containing all the roleGroups that are part of this role. Additionally, there is a `config`, which is configurable at the role *and* roleGroup level. Everything at roleGroup level is merged on top of what is configured on role level using the [`Merge`] trait. There is also a second form of config, which can only be configured at role level, the `roleConfig`. + description: This struct represents a role - e.g. HDFS datanodes or Trino workers. It has a key-value-map containing all the roleGroups that are part of this role. Additionally, there is a `config`, which is configurable at the role *and* roleGroup level. Everything at roleGroup level is merged on top of what is configured on role level. There is also a second form of config, which can only be configured at role level, the `roleConfig`. You can learn more about this in the [Roles and role group concept documentation](https://docs.stackable.tech/home/nightly/concepts/roles-and-role-groups). nullable: true properties: cliOverrides: @@ -7088,37 +7088,6 @@ spec: minimum: 0.0 nullable: true type: integer - selector: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. - nullable: true - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object type: object type: object required: @@ -7171,7 +7140,7 @@ spec: type: string type: object journalNodes: - description: This struct represents a role - e.g. HDFS datanodes or Trino workers. It has a [`HashMap`] containing all the roleGroups that are part of this role. Additionally, there is a `config`, which is configurable at the role *and* roleGroup level. Everything at roleGroup level is merged on top of what is configured on role level using the [`Merge`] trait. There is also a second form of config, which can only be configured at role level, the `roleConfig`. + description: This struct represents a role - e.g. HDFS datanodes or Trino workers. It has a key-value-map containing all the roleGroups that are part of this role. Additionally, there is a `config`, which is configurable at the role *and* roleGroup level. Everything at roleGroup level is merged on top of what is configured on role level. There is also a second form of config, which can only be configured at role level, the `roleConfig`. You can learn more about this in the [Roles and role group concept documentation](https://docs.stackable.tech/home/nightly/concepts/roles-and-role-groups). nullable: true properties: cliOverrides: @@ -14144,44 +14113,13 @@ spec: minimum: 0.0 nullable: true type: integer - selector: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. - nullable: true - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object type: object type: object required: - roleGroups type: object nameNodes: - description: This struct represents a role - e.g. HDFS datanodes or Trino workers. It has a [`HashMap`] containing all the roleGroups that are part of this role. Additionally, there is a `config`, which is configurable at the role *and* roleGroup level. Everything at roleGroup level is merged on top of what is configured on role level using the [`Merge`] trait. There is also a second form of config, which can only be configured at role level, the `roleConfig`. + description: This struct represents a role - e.g. HDFS datanodes or Trino workers. It has a key-value-map containing all the roleGroups that are part of this role. Additionally, there is a `config`, which is configurable at the role *and* roleGroup level. Everything at roleGroup level is merged on top of what is configured on role level. There is also a second form of config, which can only be configured at role level, the `roleConfig`. You can learn more about this in the [Roles and role group concept documentation](https://docs.stackable.tech/home/nightly/concepts/roles-and-role-groups). nullable: true properties: cliOverrides: @@ -21162,37 +21100,6 @@ spec: minimum: 0.0 nullable: true type: integer - selector: - description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. - nullable: true - properties: - matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. - properties: - key: - description: key is the label key that the selector applies to. - type: string - operator: - description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object type: object type: object required: diff --git a/nix/sources.json b/nix/sources.json index 92a53221..77a4d125 100644 --- a/nix/sources.json +++ b/nix/sources.json @@ -17,10 +17,10 @@ "homepage": "", "owner": "NixOS", "repo": "nixpkgs", - "rev": "3f21a22b5aafefa1845dec6f4a378a8f53d8681c", - "sha256": "15y8k3hazg91kscbmn7dy6m0q6zvmhlvvhg97gcl5kw87y0svzxk", + "rev": "5f5210aa20e343b7e35f40c033000db0ef80d7b9", + "sha256": "0yc83iqbzj1gd651x6p2wx7a76wfpifaq7jxz4srbinaglfwfb07", "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/3f21a22b5aafefa1845dec6f4a378a8f53d8681c.tar.gz", + "url": "https://github.com/NixOS/nixpkgs/archive/5f5210aa20e343b7e35f40c033000db0ef80d7b9.tar.gz", "url_template": "https://github.com///archive/.tar.gz" } } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 639f4f17..7897a24d 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "1.74.0" +channel = "1.75.0" diff --git a/rust/crd/src/affinity.rs b/rust/crd/src/affinity.rs index d1f8f32e..90ae70b5 100644 --- a/rust/crd/src/affinity.rs +++ b/rust/crd/src/affinity.rs @@ -33,11 +33,10 @@ mod test { use crate::{HdfsCluster, HdfsRole}; use stackable_operator::{ - commons::affinity::{StackableAffinity, StackableNodeSelector}, + commons::affinity::StackableAffinity, k8s_openapi::{ api::core::v1::{ - NodeAffinity, NodeSelector, NodeSelectorRequirement, NodeSelectorTerm, PodAffinity, - PodAffinityTerm, PodAntiAffinity, WeightedPodAffinityTerm, + PodAffinity, PodAffinityTerm, PodAntiAffinity, WeightedPodAffinityTerm, }, apimachinery::pkg::apis::meta::v1::LabelSelector, }, @@ -132,117 +131,4 @@ spec: } ); } - - #[test] - fn test_affinity_legacy_node_selector() { - let input = r#" -apiVersion: hdfs.stackable.tech/v1alpha1 -kind: HdfsCluster -metadata: - name: simple-hdfs -spec: - image: - productVersion: 3.3.6 - clusterConfig: - zookeeperConfigMapName: hdfs-zk - journalNodes: - roleGroups: - default: - replicas: 1 - nameNodes: - roleGroups: - default: - replicas: 1 - dataNodes: - roleGroups: - default: - replicas: 1 - selector: - matchLabels: - disktype: ssd - matchExpressions: - - key: topology.kubernetes.io/zone - operator: In - values: - - antarctica-east1 - - antarctica-west1 - "#; - let hdfs: HdfsCluster = serde_yaml::from_str(input).unwrap(); - let merged_config = HdfsRole::DataNode.merged_config(&hdfs, "default").unwrap(); - - assert_eq!( - merged_config.affinity, - StackableAffinity { - pod_affinity: Some(PodAffinity { - preferred_during_scheduling_ignored_during_execution: Some(vec![ - WeightedPodAffinityTerm { - pod_affinity_term: PodAffinityTerm { - label_selector: Some(LabelSelector { - match_expressions: None, - match_labels: Some(BTreeMap::from([ - ("app.kubernetes.io/name".to_string(), "hdfs".to_string(),), - ( - "app.kubernetes.io/instance".to_string(), - "simple-hdfs".to_string(), - ), - ])) - }), - namespace_selector: None, - namespaces: None, - topology_key: "kubernetes.io/hostname".to_string(), - }, - weight: 20 - } - ]), - required_during_scheduling_ignored_during_execution: None, - }), - pod_anti_affinity: Some(PodAntiAffinity { - preferred_during_scheduling_ignored_during_execution: Some(vec![ - WeightedPodAffinityTerm { - pod_affinity_term: PodAffinityTerm { - label_selector: Some(LabelSelector { - match_expressions: None, - match_labels: Some(BTreeMap::from([ - ("app.kubernetes.io/name".to_string(), "hdfs".to_string(),), - ( - "app.kubernetes.io/instance".to_string(), - "simple-hdfs".to_string(), - ), - ( - "app.kubernetes.io/component".to_string(), - "datanode".to_string(), - ) - ])) - }), - namespace_selector: None, - namespaces: None, - topology_key: "kubernetes.io/hostname".to_string(), - }, - weight: 70 - } - ]), - required_during_scheduling_ignored_during_execution: None, - }), - node_affinity: Some(NodeAffinity { - preferred_during_scheduling_ignored_during_execution: None, - required_during_scheduling_ignored_during_execution: Some(NodeSelector { - node_selector_terms: vec![NodeSelectorTerm { - match_expressions: Some(vec![NodeSelectorRequirement { - key: "topology.kubernetes.io/zone".to_string(), - operator: "In".to_string(), - values: Some(vec![ - "antarctica-east1".to_string(), - "antarctica-west1".to_string() - ]), - }]), - match_fields: None, - }] - }), - }), - node_selector: Some(StackableNodeSelector { - node_selector: BTreeMap::from([("disktype".to_string(), "ssd".to_string())]) - }), - } - ); - } } diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index c1823efe..ea7ea6a3 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -27,12 +27,11 @@ use stackable_operator::{ merge::Merge, }, k8s_openapi::{ - api::core::v1::Pod, - api::core::v1::PodTemplateSpec, - apimachinery::pkg::{api::resource::Quantity, apis::meta::v1::LabelSelector}, + api::core::v1::{Pod, PodTemplateSpec}, + apimachinery::pkg::api::resource::Quantity, }, kube::{runtime::reflector::ObjectRef, CustomResource, ResourceExt}, - labels::role_group_selector_labels, + kvp::{LabelError, Labels}, product_config_utils::{ConfigError, Configuration}, product_logging, product_logging::spec::{ContainerLogConfig, Logging}, @@ -61,33 +60,44 @@ pub mod constants; pub mod security; pub mod storage; +type Result = std::result::Result; + #[derive(Snafu, Debug)] pub enum Error { #[snafu(display("object has no associated namespace"))] NoNamespace, + #[snafu(display("missing node role {role:?}"))] MissingRole { role: String }, + #[snafu(display("missing role group {role_group:?} for role {role:?}"))] MissingRoleGroup { role: String, role_group: String }, + #[snafu(display("fragment validation failure"))] FragmentValidationFailure { source: ValidationError }, + #[snafu(display("unable to get {listener} (for {pod})"))] GetPodListener { source: stackable_operator::error::Error, listener: ObjectRef, pod: ObjectRef, }, + #[snafu(display("{listener} (for {pod}) has no address"))] PodListenerHasNoAddress { listener: ObjectRef, pod: ObjectRef, }, + #[snafu(display("port {port} ({port_name:?}) is out of bounds, must be within {range:?}", range = 0..=u16::MAX))] PortOutOfBounds { source: TryFromIntError, port_name: String, port: i32, }, + + #[snafu(display("failed to build role-group selector label"))] + BuildRoleGroupSelectorLabel { source: LabelError }, } /// An HDFS cluster stacklet. This resource is managed by the Stackable operator for Apache Hadoop HDFS. @@ -350,20 +360,6 @@ impl HdfsRole { .config .clone(); - if let Some(RoleGroup { - selector: Some(selector), - .. - }) = role.role_groups.get(role_group) - { - // Migrate old `selector` attribute, see ADR 26 affinities. - // TODO Can be removed after support for the old `selector` field is dropped. - #[allow(deprecated)] - role_group_config - .common - .affinity - .add_legacy_selector(selector); - } - role_config.merge(&default_config); role_group_config.merge(&role_config); Ok(AnyNodeConfig::NameNode( @@ -392,20 +388,6 @@ impl HdfsRole { .config .clone(); - if let Some(RoleGroup { - selector: Some(selector), - .. - }) = role.role_groups.get(role_group) - { - // Migrate old `selector` attribute, see ADR 26 affinities. - // TODO Can be removed after support for the old `selector` field is dropped. - #[allow(deprecated)] - role_group_config - .common - .affinity - .add_legacy_selector(selector); - } - role_config.merge(&default_config); role_group_config.merge(&role_config); Ok(AnyNodeConfig::DataNode( @@ -435,20 +417,6 @@ impl HdfsRole { .config .clone(); - if let Some(RoleGroup { - selector: Some(selector), - .. - }) = role.role_groups.get(role_group) - { - // Migrate old `selector` attribute, see ADR 26 affinities. - // TODO Can be removed after support for the old `selector` field is dropped. - #[allow(deprecated)] - role_group_config - .common - .affinity - .add_legacy_selector(selector); - } - role_config.merge(&default_config); role_group_config.merge(&role_config); Ok(AnyNodeConfig::JournalNode( @@ -490,25 +458,6 @@ impl HdfsRole { .and_then(|rg| rg.replicas), } } - - /// Return the node/label selector for a certain rolegroup. - pub fn role_group_node_selector( - &self, - hdfs: &HdfsCluster, - role_group: &str, - ) -> Option { - match self { - HdfsRole::NameNode => hdfs - .namenode_rolegroup(role_group) - .and_then(|rg| rg.selector.clone()), - HdfsRole::DataNode => hdfs - .datanode_rolegroup(role_group) - .and_then(|rg| rg.selector.clone()), - HdfsRole::JournalNode => hdfs - .journalnode_rolegroup(role_group) - .and_then(|rg| rg.selector.clone()), - } - } } impl HdfsCluster { @@ -523,17 +472,22 @@ impl HdfsCluster { pub fn rolegroup_selector_labels( &self, rolegroup_ref: &RoleGroupRef, - ) -> BTreeMap { - let mut group_labels = role_group_selector_labels( + ) -> Result { + let mut group_labels = Labels::role_group_selector( self, APP_NAME, &rolegroup_ref.role, &rolegroup_ref.role_group, - ); - group_labels.insert(String::from("role"), rolegroup_ref.role.clone()); - group_labels.insert(String::from("group"), rolegroup_ref.role_group.clone()); - + ) + .context(BuildRoleGroupSelectorLabelSnafu)?; + group_labels + .parse_insert(("role", rolegroup_ref.role.deref())) + .context(BuildRoleGroupSelectorLabelSnafu)?; group_labels + .parse_insert(("group", rolegroup_ref.role_group.deref())) + .context(BuildRoleGroupSelectorLabelSnafu)?; + + Ok(group_labels) } /// Get a reference to the namenode [`RoleGroup`] struct if it exists. diff --git a/rust/operator-binary/src/container.rs b/rust/operator-binary/src/container.rs index 26a906fa..5fe61492 100644 --- a/rust/operator-binary/src/container.rs +++ b/rust/operator-binary/src/container.rs @@ -9,12 +9,7 @@ //! - Set resources //! - Add tcp probes and container ports (to the main containers) //! -use crate::product_logging::{ - FORMAT_NAMENODES_LOG4J_CONFIG_FILE, FORMAT_ZOOKEEPER_LOG4J_CONFIG_FILE, HDFS_LOG4J_CONFIG_FILE, - MAX_FORMAT_NAMENODE_LOG_FILE_SIZE, MAX_FORMAT_ZOOKEEPER_LOG_FILE_SIZE, MAX_HDFS_LOG_FILE_SIZE, - MAX_WAIT_NAMENODES_LOG_FILE_SIZE, MAX_ZKFC_LOG_FILE_SIZE, STACKABLE_LOG_DIR, - WAIT_FOR_NAMENODES_LOG4J_CONFIG_FILE, ZKFC_LOG4J_CONFIG_FILE, -}; +use std::{collections::BTreeMap, str::FromStr}; use indoc::formatdoc; use snafu::{OptionExt, ResultExt, Snafu}; @@ -27,13 +22,14 @@ use stackable_hdfs_crd::{ STACKABLE_ROOT_DATA_DIR, }, storage::DataNodeStorageConfig, - AnyNodeConfig, DataNodeContainer, HdfsCluster, HdfsPodRef, HdfsRole, LoggingExt as _, - NameNodeContainer, + AnyNodeConfig, DataNodeContainer, HdfsCluster, HdfsPodRef, HdfsRole, NameNodeContainer, }; use stackable_operator::{ builder::{ - resources::ResourceRequirementsBuilder, ContainerBuilder, PodBuilder, - SecretOperatorVolumeSourceBuilder, VolumeBuilder, VolumeMountBuilder, + resources::ResourceRequirementsBuilder, ContainerBuilder, + ListenerOperatorVolumeSourceBuilder, ListenerOperatorVolumeSourceBuilderError, + ListenerReference, PodBuilder, SecretFormat, SecretOperatorVolumeSourceBuilder, + SecretOperatorVolumeSourceBuilderError, VolumeBuilder, VolumeMountBuilder, }, commons::product_image_selection::ResolvedProductImage, k8s_openapi::{ @@ -46,6 +42,9 @@ use stackable_operator::{ }, kube::{core::ObjectMeta, ResourceExt}, memory::{BinaryMultiple, MemoryQuantity}, + product_logging::framework::{ + create_vector_shutdown_file_command, remove_vector_shutdown_file_command, + }, product_logging::{ self, spec::{ @@ -53,38 +52,55 @@ use stackable_operator::{ CustomContainerLogConfig, }, }, -}; -use stackable_operator::{ - builder::{ListenerOperatorVolumeSourceBuilder, ListenerReference, SecretFormat}, - product_logging::framework::{ - create_vector_shutdown_file_command, remove_vector_shutdown_file_command, - }, utils::COMMON_BASH_TRAP_FUNCTIONS, }; -use std::{collections::BTreeMap, str::FromStr}; use strum::{Display, EnumDiscriminants, IntoStaticStr}; +use crate::product_logging::{ + FORMAT_NAMENODES_LOG4J_CONFIG_FILE, FORMAT_ZOOKEEPER_LOG4J_CONFIG_FILE, HDFS_LOG4J_CONFIG_FILE, + MAX_FORMAT_NAMENODE_LOG_FILE_SIZE, MAX_FORMAT_ZOOKEEPER_LOG_FILE_SIZE, MAX_HDFS_LOG_FILE_SIZE, + MAX_WAIT_NAMENODES_LOG_FILE_SIZE, MAX_ZKFC_LOG_FILE_SIZE, STACKABLE_LOG_DIR, + WAIT_FOR_NAMENODES_LOG4J_CONFIG_FILE, ZKFC_LOG4J_CONFIG_FILE, +}; + pub(crate) const TLS_STORE_DIR: &str = "/stackable/tls"; pub(crate) const TLS_STORE_VOLUME_NAME: &str = "tls"; pub(crate) const TLS_STORE_PASSWORD: &str = "changeit"; +pub(crate) const KERBEROS_VOLUME_NAME: &str = "kerberos"; + +type Result = std::result::Result; #[derive(Snafu, Debug, EnumDiscriminants)] #[strum_discriminants(derive(IntoStaticStr))] pub enum Error { #[snafu(display("object has no namespace"))] ObjectHasNoNamespace, - #[snafu(display("Invalid java heap config for [{role}]"))] + + #[snafu(display("invalid java heap config for {role:?}"))] InvalidJavaHeapConfig { source: stackable_operator::error::Error, role: String, }, - #[snafu(display("Could not determine any ContainerConfig actions for [{container_name}]. Container not recognized."))] + + #[snafu(display("could not determine any ContainerConfig actions for {container_name:?}. Container not recognized."))] UnrecognizedContainerName { container_name: String }, - #[snafu(display("Invalid container name [{name}]"))] + + #[snafu(display("invalid container name {name:?}"))] InvalidContainerName { source: stackable_operator::error::Error, name: String, }, + + #[snafu(display("failed to build secret volume for {volume_name:?}"))] + BuildSecretVolume { + source: SecretOperatorVolumeSourceBuilderError, + volume_name: String, + }, + + #[snafu(display("failed to build listener volume"))] + BuildListenerVolume { + source: ListenerOperatorVolumeSourceBuilderError, + }, } /// ContainerConfig contains information to create all main, side and init containers for @@ -163,7 +179,7 @@ impl ContainerConfig { ) -> Result<(), Error> { // HDFS main container let main_container_config = Self::from(role.clone()); - pb.add_volumes(main_container_config.volumes(merged_config, object_name)); + pb.add_volumes(main_container_config.volumes(merged_config, object_name)?); pb.add_container(main_container_config.main_container( hdfs, role, @@ -200,13 +216,16 @@ impl ContainerConfig { .with_node_scope() .with_format(SecretFormat::TlsPkcs12) .with_tls_pkcs12_password(TLS_STORE_PASSWORD) - .build(), + .build() + .context(BuildSecretVolumeSnafu { + volume_name: TLS_STORE_VOLUME_NAME, + })?, ) .build(), ); pb.add_volume( - VolumeBuilder::new("kerberos") + VolumeBuilder::new(KERBEROS_VOLUME_NAME) .ephemeral( SecretOperatorVolumeSourceBuilder::new( &authentication_config.kerberos.secret_class, @@ -214,7 +233,10 @@ impl ContainerConfig { .with_service_scope(hdfs.name_any()) .with_kerberos_service_name(role.kerberos_service_name()) .with_kerberos_service_name("HTTP") - .build(), + .build() + .context(BuildSecretVolumeSnafu { + volume_name: KERBEROS_VOLUME_NAME, + })?, ) .build(), ); @@ -225,7 +247,7 @@ impl ContainerConfig { HdfsRole::NameNode => { // Zookeeper fail over container let zkfc_container_config = Self::try_from(NameNodeContainer::Zkfc.to_string())?; - pb.add_volumes(zkfc_container_config.volumes(merged_config, object_name)); + pb.add_volumes(zkfc_container_config.volumes(merged_config, object_name)?); pb.add_container(zkfc_container_config.main_container( hdfs, role, @@ -239,7 +261,7 @@ impl ContainerConfig { let format_namenodes_container_config = Self::try_from(NameNodeContainer::FormatNameNodes.to_string())?; pb.add_volumes( - format_namenodes_container_config.volumes(merged_config, object_name), + format_namenodes_container_config.volumes(merged_config, object_name)?, ); pb.add_init_container(format_namenodes_container_config.init_container( hdfs, @@ -255,7 +277,7 @@ impl ContainerConfig { let format_zookeeper_container_config = Self::try_from(NameNodeContainer::FormatZooKeeper.to_string())?; pb.add_volumes( - format_zookeeper_container_config.volumes(merged_config, object_name), + format_zookeeper_container_config.volumes(merged_config, object_name)?, ); pb.add_init_container(format_zookeeper_container_config.init_container( hdfs, @@ -272,7 +294,7 @@ impl ContainerConfig { let wait_for_namenodes_container_config = Self::try_from(DataNodeContainer::WaitForNameNodes.to_string())?; pb.add_volumes( - wait_for_namenodes_container_config.volumes(merged_config, object_name), + wait_for_namenodes_container_config.volumes(merged_config, object_name)?, ); pb.add_init_container(wait_for_namenodes_container_config.init_container( hdfs, @@ -290,16 +312,20 @@ impl ContainerConfig { Ok(()) } - pub fn volume_claim_templates(merged_config: &AnyNodeConfig) -> Vec { + pub fn volume_claim_templates( + merged_config: &AnyNodeConfig, + ) -> Result> { match merged_config { AnyNodeConfig::NameNode(node) => { let listener = ListenerOperatorVolumeSourceBuilder::new( &ListenerReference::ListenerClass(node.listener_class.to_string()), ) - .build() + .build_ephemeral() + .context(BuildListenerVolumeSnafu)? .volume_claim_template .unwrap(); - vec![ + + let pvcs = vec![ node.resources.storage.data.build_pvc( ContainerConfig::DATA_VOLUME_MOUNT_NAME, Some(vec!["ReadWriteOnce"]), @@ -312,16 +338,18 @@ impl ContainerConfig { spec: Some(listener.spec), ..Default::default() }, - ] + ]; + + Ok(pvcs) } - AnyNodeConfig::JournalNode(node) => vec![node.resources.storage.data.build_pvc( + AnyNodeConfig::JournalNode(node) => Ok(vec![node.resources.storage.data.build_pvc( ContainerConfig::DATA_VOLUME_MOUNT_NAME, Some(vec!["ReadWriteOnce"]), - )], - AnyNodeConfig::DataNode(node) => DataNodeStorageConfig { + )]), + AnyNodeConfig::DataNode(node) => Ok(DataNodeStorageConfig { pvcs: node.resources.storage.clone(), } - .build_pvcs(), + .build_pvcs()), } } @@ -355,7 +383,7 @@ impl ContainerConfig { env_overrides, resources.as_ref(), )) - .add_volume_mounts(self.volume_mounts(hdfs, merged_config)) + .add_volume_mounts(self.volume_mounts(hdfs, merged_config)?) .add_container_ports(self.container_ports(hdfs)); if let Some(resources) = resources { @@ -391,7 +419,7 @@ impl ContainerConfig { .command(Self::command()) .args(self.args(hdfs, role, merged_config, namenode_podrefs)?) .add_env_vars(self.env(hdfs, zookeeper_config_map_name, env_overrides, None)) - .add_volume_mounts(self.volume_mounts(hdfs, merged_config)); + .add_volume_mounts(self.volume_mounts(hdfs, merged_config)?); // We use the main app container resources here in contrast to several operators (which use // hardcoded resources) due to the different code structure. @@ -785,7 +813,7 @@ wait_for_termination $! } /// Return the container volumes. - fn volumes(&self, merged_config: &AnyNodeConfig, object_name: &str) -> Vec { + fn volumes(&self, merged_config: &AnyNodeConfig, object_name: &str) -> Result> { let mut volumes = vec![]; if let ContainerConfig::Hdfs { .. } = self { @@ -796,7 +824,8 @@ wait_for_termination $! ListenerOperatorVolumeSourceBuilder::new( &ListenerReference::ListenerClass(node.listener_class.to_string()), ) - .build(), + .build_ephemeral() + .context(BuildListenerVolumeSnafu)?, ) .build(), ); @@ -845,11 +874,15 @@ wait_for_termination $! self.volume_mount_dirs().log_mount_name(), )); - volumes + Ok(volumes) } /// Returns the container volume mounts. - fn volume_mounts(&self, hdfs: &HdfsCluster, merged_config: &AnyNodeConfig) -> Vec { + fn volume_mounts( + &self, + hdfs: &HdfsCluster, + merged_config: &AnyNodeConfig, + ) -> Result> { let mut volume_mounts = vec![ VolumeMountBuilder::new(Self::STACKABLE_LOG_VOLUME_MOUNT_NAME, STACKABLE_LOG_DIR) .build(), @@ -903,7 +936,7 @@ wait_for_termination $! ); } HdfsRole::DataNode => { - for pvc in Self::volume_claim_templates(merged_config) { + for pvc in Self::volume_claim_templates(merged_config)? { let pvc_name = pvc.name_any(); volume_mounts.push(VolumeMount { mount_path: format!("{DATANODE_ROOT_DATA_DIR_PREFIX}{pvc_name}"), @@ -919,7 +952,8 @@ wait_for_termination $! | ContainerConfig::WaitForNameNodes { .. } | ContainerConfig::FormatZooKeeper { .. } => {} } - volume_mounts + + Ok(volume_mounts) } /// Create a config directory for the respective container. diff --git a/rust/operator-binary/src/discovery.rs b/rust/operator-binary/src/discovery.rs index 3b09f7ba..35bb5249 100644 --- a/rust/operator-binary/src/discovery.rs +++ b/rust/operator-binary/src/discovery.rs @@ -1,19 +1,44 @@ -use crate::{ - build_recommended_labels, - config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}, - hdfs_controller::Error, -}; +use snafu::{ResultExt, Snafu}; use stackable_hdfs_crd::{ constants::{CORE_SITE_XML, HDFS_SITE_XML}, HdfsCluster, HdfsPodRef, HdfsRole, }; use stackable_operator::{ - builder::{ConfigMapBuilder, ObjectMetaBuilder}, + builder::{ConfigMapBuilder, ObjectMetaBuilder, ObjectMetaBuilderError}, commons::product_image_selection::ResolvedProductImage, k8s_openapi::api::core::v1::ConfigMap, kube::{runtime::reflector::ObjectRef, ResourceExt}, }; +use crate::{ + build_recommended_labels, + config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}, + kerberos, +}; + +type Result = std::result::Result; + +#[derive(Snafu, Debug)] +#[allow(clippy::enum_variant_names)] +pub enum Error { + #[snafu(display("object {hdfs} is missing metadata to build owner reference"))] + ObjectMissingMetadataForOwnerRef { + source: stackable_operator::error::Error, + hdfs: ObjectRef, + }, + + #[snafu(display("failed to build ConfigMap"))] + BuildConfigMap { + source: stackable_operator::error::Error, + }, + + #[snafu(display("failed to build object meta data"))] + ObjectMeta { source: ObjectMetaBuilderError }, + + #[snafu(display("failed to build security discovery config map"))] + BuildSecurityDiscoveryConfigMap { source: kerberos::Error }, +} + /// Creates a discovery config map containing the `hdfs-site.xml` and `core-site.xml` /// for clients. pub fn build_discovery_configmap( @@ -21,25 +46,25 @@ pub fn build_discovery_configmap( controller: &str, namenode_podrefs: &[HdfsPodRef], resolved_product_image: &ResolvedProductImage, -) -> Result { +) -> Result { + let metadata = ObjectMetaBuilder::new() + .name_and_namespace(hdfs) + .ownerreference_from_resource(hdfs, None, Some(true)) + .context(ObjectMissingMetadataForOwnerRefSnafu { + hdfs: ObjectRef::from_obj(hdfs), + })? + .with_recommended_labels(build_recommended_labels( + hdfs, + controller, + &resolved_product_image.app_version_label, + &HdfsRole::NameNode.to_string(), + "discovery", + )) + .context(ObjectMetaSnafu)? + .build(); + ConfigMapBuilder::new() - .metadata( - ObjectMetaBuilder::new() - .name_and_namespace(hdfs) - .ownerreference_from_resource(hdfs, None, Some(true)) - .map_err(|err| Error::ObjectMissingMetadataForOwnerRef { - source: err, - obj_ref: ObjectRef::from_obj(hdfs), - })? - .with_recommended_labels(build_recommended_labels( - hdfs, - controller, - &resolved_product_image.app_version_label, - &HdfsRole::NameNode.to_string(), - "discovery", - )) - .build(), - ) + .metadata(metadata) .add_data( HDFS_SITE_XML, build_discovery_hdfs_site_xml(hdfs, hdfs.name_any(), namenode_podrefs), @@ -49,7 +74,7 @@ pub fn build_discovery_configmap( build_discovery_core_site_xml(hdfs, hdfs.name_any())?, ) .build() - .map_err(|err| Error::BuildDiscoveryConfigMap { source: err }) + .context(BuildConfigMapSnafu) } fn build_discovery_hdfs_site_xml( @@ -67,12 +92,10 @@ fn build_discovery_hdfs_site_xml( .build_as_xml() } -fn build_discovery_core_site_xml( - hdfs: &HdfsCluster, - logical_name: String, -) -> Result { +fn build_discovery_core_site_xml(hdfs: &HdfsCluster, logical_name: String) -> Result { Ok(CoreSiteConfigBuilder::new(logical_name) .fs_default_fs() - .security_discovery_config(hdfs)? + .security_discovery_config(hdfs) + .context(BuildSecurityDiscoveryConfigMapSnafu)? .build_as_xml()) } diff --git a/rust/operator-binary/src/event.rs b/rust/operator-binary/src/event.rs index cdfa7c1b..46ff0b12 100644 --- a/rust/operator-binary/src/event.rs +++ b/rust/operator-binary/src/event.rs @@ -12,7 +12,7 @@ use strum::{EnumDiscriminants, IntoStaticStr}; #[derive(Snafu, Debug, EnumDiscriminants)] #[strum_discriminants(derive(IntoStaticStr))] pub enum Error { - #[snafu(display("Failed to publish event"))] + #[snafu(display("failed to publish event"))] PublishEvent { source: stackable_operator::kube::Error, }, diff --git a/rust/operator-binary/src/hdfs_controller.rs b/rust/operator-binary/src/hdfs_controller.rs index e40cbfc0..b65e52c3 100644 --- a/rust/operator-binary/src/hdfs_controller.rs +++ b/rust/operator-binary/src/hdfs_controller.rs @@ -14,7 +14,10 @@ use stackable_hdfs_crd::{ constants::*, AnyNodeConfig, HdfsCluster, HdfsClusterStatus, HdfsPodRef, HdfsRole, }; use stackable_operator::{ - builder::{ConfigMapBuilder, ObjectMetaBuilder, PodBuilder, PodSecurityContextBuilder}, + builder::{ + ConfigMapBuilder, ObjectMetaBuilder, ObjectMetaBuilderError, PodBuilder, + PodSecurityContextBuilder, + }, client::Client, cluster_resources::{ClusterResourceApplyStrategy, ClusterResources}, commons::{ @@ -34,7 +37,7 @@ use stackable_operator::{ runtime::{controller::Action, reflector::ObjectRef}, Resource, ResourceExt, }, - labels::role_group_selector_labels, + kvp::{Label, LabelError, Labels}, logging::controller::ReconcilerError, product_config_utils::{transform_all_roles_to_config, validate_all_roles_and_groups_config}, role_utils::{GenericRoleConfig, RoleGroupRef}, @@ -49,9 +52,9 @@ use strum::{EnumDiscriminants, IntoStaticStr}; use crate::{ build_recommended_labels, config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}, - container::ContainerConfig, + container::{self, ContainerConfig}, container::{TLS_STORE_DIR, TLS_STORE_PASSWORD}, - discovery::build_discovery_configmap, + discovery::{self, build_discovery_configmap}, event::{build_invalid_replica_message, publish_event}, kerberos, operations::{ @@ -69,97 +72,92 @@ const DOCKER_IMAGE_BASE_NAME: &str = "hadoop"; #[derive(Snafu, Debug, EnumDiscriminants)] #[strum_discriminants(derive(IntoStaticStr))] pub enum Error { - #[snafu(display("Invalid role configuration"))] + #[snafu(display("invalid role configuration"))] InvalidRoleConfig { source: stackable_operator::product_config_utils::ConfigError, }, - #[snafu(display("Invalid product configuration"))] + #[snafu(display("invalid product configuration"))] InvalidProductConfig { source: stackable_operator::error::Error, }, - #[snafu(display("Cannot create rolegroup service [{name}]"))] + #[snafu(display("cannot create rolegroup service {name:?}"))] ApplyRoleGroupService { source: stackable_operator::error::Error, name: String, }, - #[snafu(display("Cannot create role group config map [{name}]"))] + #[snafu(display("cannot create role group config map {name:?}"))] ApplyRoleGroupConfigMap { source: stackable_operator::error::Error, name: String, }, - #[snafu(display("Cannot create role group stateful set [{name}]"))] + #[snafu(display("cannot create role group stateful set {name:?}"))] ApplyRoleGroupStatefulSet { source: stackable_operator::error::Error, name: String, }, - #[snafu(display("Cannot create discovery config map [{name}]"))] + #[snafu(display("cannot create discovery config map {name:?}"))] ApplyDiscoveryConfigMap { source: stackable_operator::error::Error, name: String, }, - #[snafu(display("No metadata for [{obj_ref}]"))] + #[snafu(display("no metadata for {obj_ref:?}"))] ObjectMissingMetadataForOwnerRef { source: stackable_operator::error::Error, obj_ref: ObjectRef, }, - #[snafu(display("Invalid role [{role}]"))] + #[snafu(display("invalid role {role:?}"))] InvalidRole { source: strum::ParseError, role: String, }, - #[snafu(display("Object has no name"))] + #[snafu(display("object has no name"))] ObjectHasNoName { obj_ref: ObjectRef }, - #[snafu(display("Object has no namespace"))] - ObjectHasNoNamespace { obj_ref: ObjectRef }, - - #[snafu(display("Cannot build config map for role [{role}] and role group [{role_group}]"))] + #[snafu(display("cannot build config map for role {role:?} and role group {role_group:?}"))] BuildRoleGroupConfigMap { source: stackable_operator::error::Error, role: String, role_group: String, }, - #[snafu(display("Cannot collect discovery configuration"))] + #[snafu(display("cannot collect discovery configuration"))] CollectDiscoveryConfig { source: stackable_hdfs_crd::Error }, - #[snafu(display("Cannot build discovery config map"))] - BuildDiscoveryConfigMap { - source: stackable_operator::error::Error, - }, + #[snafu(display("cannot build config discovery config map"))] + BuildDiscoveryConfigMap { source: discovery::Error }, - #[snafu(display("Failed to patch service account"))] + #[snafu(display("failed to patch service account"))] ApplyServiceAccount { source: stackable_operator::error::Error, }, - #[snafu(display("Failed to patch role binding"))] + #[snafu(display("failed to patch role binding"))] ApplyRoleBinding { source: stackable_operator::error::Error, }, - #[snafu(display("Failed to create cluster resources"))] + #[snafu(display("failed to create cluster resources"))] CreateClusterResources { source: stackable_operator::error::Error, }, - #[snafu(display("Failed to delete orphaned resources"))] + #[snafu(display("failed to delete orphaned resources"))] DeleteOrphanedResources { source: stackable_operator::error::Error, }, - #[snafu(display("Failed to create pod references"))] + #[snafu(display("failed to create pod references"))] CreatePodReferences { source: stackable_hdfs_crd::Error }, - #[snafu(display("Failed to build role properties"))] + #[snafu(display("failed to build role properties"))] BuildRoleProperties { source: stackable_hdfs_crd::Error }, #[snafu(display("failed to resolve the Vector aggregator address"))] @@ -167,7 +165,7 @@ pub enum Error { source: crate::product_logging::Error, }, - #[snafu(display("failed to add the logging configuration to the ConfigMap [{cm_name}]"))] + #[snafu(display("failed to add the logging configuration to the ConfigMap {cm_name:?}"))] InvalidLoggingConfig { source: crate::product_logging::Error, cm_name: String, @@ -203,7 +201,7 @@ pub enum Error { KerberosNotSupported, #[snafu(display( - "failed to serialize [{JVM_SECURITY_PROPERTIES_FILE}] for {}", + "failed to serialize {JVM_SECURITY_PROPERTIES_FILE:?} for {}", rolegroup ))] JvmSecurityProperties { @@ -213,6 +211,27 @@ pub enum Error { #[snafu(display("failed to configure graceful shutdown"))] GracefulShutdown { source: graceful_shutdown::Error }, + + #[snafu(display("failed to build roleGroup selector labels"))] + RoleGroupSelectorLabels { source: stackable_hdfs_crd::Error }, + + #[snafu(display("failed to build prometheus label"))] + BuildPrometheusLabel { source: LabelError }, + + #[snafu(display("failed to build cluster resources label"))] + BuildClusterResourcesLabel { source: LabelError }, + + #[snafu(display("failed to build role-group selector label"))] + BuildRoleGroupSelectorLabel { source: LabelError }, + + #[snafu(display("failed to build role-group volume claim templates from config"))] + BuildRoleGroupVolumeClaimTemplates { source: container::Error }, + + #[snafu(display("failed to build object meta data"))] + ObjectMeta { source: ObjectMetaBuilderError }, + + #[snafu(display("failed to build security config"))] + BuildSecurityConfig { source: kerberos::Error }, } impl ReconcilerError for Error { @@ -236,8 +255,8 @@ pub async fn reconcile_hdfs(hdfs: Arc, ctx: Arc) -> HdfsOperat .spec .image .resolve(DOCKER_IMAGE_BASE_NAME, crate::built_info::CARGO_PKG_VERSION); - if hdfs.has_kerberos_enabled() { - kerberos::check_if_supported(&resolved_product_image)?; + if hdfs.has_kerberos_enabled() && kerberos::is_not_supported(&resolved_product_image) { + return KerberosNotSupportedSnafu.fail(); } let vector_aggregator_address = resolve_vector_aggregator_address(&hdfs, client) @@ -279,7 +298,9 @@ pub async fn reconcile_hdfs(hdfs: Arc, ctx: Arc) -> HdfsOperat let (rbac_sa, rbac_rolebinding) = build_rbac_resources( hdfs.as_ref(), APP_NAME, - cluster_resources.get_required_labels(), + cluster_resources + .get_required_labels() + .context(BuildClusterResourcesLabelSnafu)?, ) .context(BuildRbacResourcesSnafu)?; @@ -390,7 +411,8 @@ pub async fn reconcile_hdfs(hdfs: Arc, ctx: Arc) -> HdfsOperat .await .context(CollectDiscoveryConfigSnafu)?, &resolved_product_image, - )?; + ) + .context(BuildDiscoveryConfigMapSnafu)?; // The discovery CM is linked to the cluster lifecycle via ownerreference. // Therefore, must not be added to the "orphaned" cluster resources @@ -430,42 +452,55 @@ fn rolegroup_service( resolved_product_image: &ResolvedProductImage, ) -> HdfsOperatorResult { tracing::info!("Setting up Service for {:?}", rolegroup_ref); + + let prometheus_label = + Label::try_from(("prometheus.io/scrape", "true")).context(BuildPrometheusLabelSnafu)?; + + let metadata = ObjectMetaBuilder::new() + .name_and_namespace(hdfs) + .name(&rolegroup_ref.object_name()) + .ownerreference_from_resource(hdfs, None, Some(true)) + .context(ObjectMissingMetadataForOwnerRefSnafu { + obj_ref: ObjectRef::from_obj(hdfs), + })? + .with_recommended_labels(build_recommended_labels( + hdfs, + RESOURCE_MANAGER_HDFS_CONTROLLER, + &resolved_product_image.app_version_label, + &rolegroup_ref.role, + &rolegroup_ref.role_group, + )) + .context(ObjectMetaSnafu)? + .with_label(prometheus_label) + .build(); + + let service_spec = ServiceSpec { + // Internal communication does not need to be exposed + type_: Some("ClusterIP".to_string()), + cluster_ip: Some("None".to_string()), + ports: Some( + hdfs.ports(role) + .into_iter() + .map(|(name, value)| ServicePort { + name: Some(name), + port: i32::from(value), + protocol: Some("TCP".to_string()), + ..ServicePort::default() + }) + .collect(), + ), + selector: Some( + hdfs.rolegroup_selector_labels(rolegroup_ref) + .context(RoleGroupSelectorLabelsSnafu)? + .into(), + ), + publish_not_ready_addresses: Some(true), + ..ServiceSpec::default() + }; + Ok(Service { - metadata: ObjectMetaBuilder::new() - .name_and_namespace(hdfs) - .name(&rolegroup_ref.object_name()) - .ownerreference_from_resource(hdfs, None, Some(true)) - .with_context(|_| ObjectMissingMetadataForOwnerRefSnafu { - obj_ref: ObjectRef::from_obj(hdfs), - })? - .with_recommended_labels(build_recommended_labels( - hdfs, - RESOURCE_MANAGER_HDFS_CONTROLLER, - &resolved_product_image.app_version_label, - &rolegroup_ref.role, - &rolegroup_ref.role_group, - )) - .with_label("prometheus.io/scrape", "true") - .build(), - spec: Some(ServiceSpec { - // Internal communication does not need to be exposed - type_: Some("ClusterIP".to_string()), - cluster_ip: Some("None".to_string()), - ports: Some( - hdfs.ports(role) - .into_iter() - .map(|(name, value)| ServicePort { - name: Some(name), - port: i32::from(value), - protocol: Some("TCP".to_string()), - ..ServicePort::default() - }) - .collect(), - ), - selector: Some(hdfs.rolegroup_selector_labels(rolegroup_ref)), - publish_not_ready_addresses: Some(true), - ..ServiceSpec::default() - }), + metadata, + spec: Some(service_spec), status: None, }) } @@ -551,7 +586,8 @@ fn rolegroup_config_map( core_site_xml = CoreSiteConfigBuilder::new(hdfs_name.to_string()) .fs_default_fs() .ha_zookeeper_quorum() - .security_config(hdfs)? + .security_config(hdfs) + .context(BuildSecurityConfigSnafu)? // the extend with config must come last in order to have overrides working!!! .extend(config) .build_as_xml(); @@ -632,24 +668,25 @@ fn rolegroup_config_map( .map(|(k, v)| (k, Some(v))) .collect(); + let cm_metadata = ObjectMetaBuilder::new() + .name_and_namespace(hdfs) + .name(&rolegroup_ref.object_name()) + .ownerreference_from_resource(hdfs, None, Some(true)) + .with_context(|_| ObjectMissingMetadataForOwnerRefSnafu { + obj_ref: ObjectRef::from_obj(hdfs), + })? + .with_recommended_labels(build_recommended_labels( + hdfs, + RESOURCE_MANAGER_HDFS_CONTROLLER, + &resolved_product_image.app_version_label, + &rolegroup_ref.role, + &rolegroup_ref.role_group, + )) + .context(ObjectMetaSnafu)? + .build(); + builder - .metadata( - ObjectMetaBuilder::new() - .name_and_namespace(hdfs) - .name(&rolegroup_ref.object_name()) - .ownerreference_from_resource(hdfs, None, Some(true)) - .with_context(|_| ObjectMissingMetadataForOwnerRefSnafu { - obj_ref: ObjectRef::from_obj(hdfs), - })? - .with_recommended_labels(build_recommended_labels( - hdfs, - RESOURCE_MANAGER_HDFS_CONTROLLER, - &resolved_product_image.app_version_label, - &rolegroup_ref.role, - &rolegroup_ref.role_group, - )) - .build(), - ) + .metadata(cm_metadata) .add_data(CORE_SITE_XML.to_string(), core_site_xml) .add_data(HDFS_SITE_XML.to_string(), hdfs_site_xml) .add_data(HADOOP_POLICY_XML.to_string(), hadoop_policy_xml) @@ -697,20 +734,27 @@ fn rolegroup_statefulset( let object_name = rolegroup_ref.object_name(); // PodBuilder for StatefulSet Pod template. let mut pb = PodBuilder::new(); - pb.metadata(ObjectMeta { - labels: Some(hdfs.rolegroup_selector_labels(rolegroup_ref)), + + let pb_metadata = ObjectMeta { + labels: Some( + hdfs.rolegroup_selector_labels(rolegroup_ref) + .context(RoleGroupSelectorLabelsSnafu)? + .into(), + ), ..ObjectMeta::default() - }) - .image_pull_secrets_from_product_image(resolved_product_image) - .affinity(&merged_config.affinity) - .service_account_name(service_account_name(APP_NAME)) - .security_context( - PodSecurityContextBuilder::new() - .run_as_user(HDFS_UID) - .run_as_group(0) - .fs_group(1000) - .build(), - ); + }; + + pb.metadata(pb_metadata) + .image_pull_secrets_from_product_image(resolved_product_image) + .affinity(&merged_config.affinity) + .service_account_name(service_account_name(APP_NAME)) + .security_context( + PodSecurityContextBuilder::new() + .run_as_user(HDFS_UID) + .run_as_group(0) + .fs_group(1000) + .build(), + ); // Adds all containers and volumes to the pod builder ContainerConfig::add_containers_and_volumes( @@ -737,42 +781,53 @@ fn rolegroup_statefulset( pod_template.merge_from(pod_overrides.clone()); } + let metadata = ObjectMetaBuilder::new() + .name_and_namespace(hdfs) + .name(&rolegroup_ref.object_name()) + .ownerreference_from_resource(hdfs, None, Some(true)) + .with_context(|_| ObjectMissingMetadataForOwnerRefSnafu { + obj_ref: ObjectRef::from_obj(hdfs), + })? + .with_recommended_labels(build_recommended_labels( + hdfs, + RESOURCE_MANAGER_HDFS_CONTROLLER, + &resolved_product_image.app_version_label, + &rolegroup_ref.role, + &rolegroup_ref.role_group, + )) + .context(ObjectMetaSnafu)? + .build(); + + let match_labels = Labels::role_group_selector( + hdfs, + APP_NAME, + &rolegroup_ref.role, + &rolegroup_ref.role_group, + ) + .context(BuildRoleGroupSelectorLabelSnafu)?; + + let pvcs = ContainerConfig::volume_claim_templates(merged_config) + .context(BuildRoleGroupVolumeClaimTemplatesSnafu)?; + + let statefulset_spec = StatefulSetSpec { + pod_management_policy: Some("OrderedReady".to_string()), + replicas: role + .role_group_replicas(hdfs, &rolegroup_ref.role_group) + .map(i32::from), + selector: LabelSelector { + match_labels: Some(match_labels.into()), + ..LabelSelector::default() + }, + service_name: object_name, + template: pod_template, + + volume_claim_templates: Some(pvcs), + ..StatefulSetSpec::default() + }; + Ok(StatefulSet { - metadata: ObjectMetaBuilder::new() - .name_and_namespace(hdfs) - .name(&rolegroup_ref.object_name()) - .ownerreference_from_resource(hdfs, None, Some(true)) - .with_context(|_| ObjectMissingMetadataForOwnerRefSnafu { - obj_ref: ObjectRef::from_obj(hdfs), - })? - .with_recommended_labels(build_recommended_labels( - hdfs, - RESOURCE_MANAGER_HDFS_CONTROLLER, - &resolved_product_image.app_version_label, - &rolegroup_ref.role, - &rolegroup_ref.role_group, - )) - .build(), - spec: Some(StatefulSetSpec { - pod_management_policy: Some("OrderedReady".to_string()), - replicas: role - .role_group_replicas(hdfs, &rolegroup_ref.role_group) - .map(i32::from), - selector: LabelSelector { - match_labels: Some(role_group_selector_labels( - hdfs, - APP_NAME, - &rolegroup_ref.role, - &rolegroup_ref.role_group, - )), - ..LabelSelector::default() - }, - service_name: object_name, - template: pod_template, - - volume_claim_templates: Some(ContainerConfig::volume_claim_templates(merged_config)), - ..StatefulSetSpec::default() - }), + metadata, + spec: Some(statefulset_spec), status: None, }) } diff --git a/rust/operator-binary/src/kerberos.rs b/rust/operator-binary/src/kerberos.rs index d36463cc..095b0cd7 100644 --- a/rust/operator-binary/src/kerberos.rs +++ b/rust/operator-binary/src/kerberos.rs @@ -1,3 +1,4 @@ +use snafu::{ResultExt, Snafu}; use stackable_hdfs_crd::{ constants::{SSL_CLIENT_XML, SSL_SERVER_XML}, HdfsCluster, @@ -7,21 +8,27 @@ use stackable_operator::{ kube::{runtime::reflector::ObjectRef, ResourceExt}, }; -use crate::{ - config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}, - hdfs_controller::Error, -}; +use crate::config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder}; -pub fn check_if_supported(resolved_product_image: &ResolvedProductImage) -> Result<(), Error> { - // We only support Kerberos for HDFS >= 3.3.x - // With HDFS 3.2.2 we got weird errors, which *might* be caused by DNS lookup issues - // The Stacktrace is documented in rust/operator/src/kerberos_hdfs_3.2_stacktrace.txt +type Result = std::result::Result; - if resolved_product_image.product_version.starts_with("3.2.") { - Err(Error::KerberosNotSupported {}) - } else { - Ok(()) - } +#[derive(Snafu, Debug)] +#[allow(clippy::enum_variant_names)] +pub enum Error { + #[snafu(display("object has no namespace"))] + ObjectHasNoNamespace { + source: stackable_hdfs_crd::Error, + obj_ref: ObjectRef, + }, +} + +/// Checks for unsupported Kerberos versions +/// +/// We only support Kerberos for HDFS >= 3.3.x +/// With HDFS 3.2.2 we got weird errors, which *might* be caused by DNS lookup issues +/// The Stacktrace is documented in rust/operator/src/kerberos_hdfs_3.2_stacktrace.txt +pub fn is_not_supported(resolved_product_image: &ResolvedProductImage) -> bool { + resolved_product_image.product_version.starts_with("3.2.") } impl HdfsSiteConfigBuilder { @@ -55,7 +62,7 @@ impl HdfsSiteConfigBuilder { } impl CoreSiteConfigBuilder { - pub fn security_config(&mut self, hdfs: &HdfsCluster) -> Result<&mut Self, Error> { + pub fn security_config(&mut self, hdfs: &HdfsCluster) -> Result<&mut Self> { if hdfs.authentication_config().is_some() { let principal_host_part = principal_host_part(hdfs)?; @@ -106,7 +113,7 @@ impl CoreSiteConfigBuilder { Ok(self) } - pub fn security_discovery_config(&mut self, hdfs: &HdfsCluster) -> Result<&mut Self, Error> { + pub fn security_discovery_config(&mut self, hdfs: &HdfsCluster) -> Result<&mut Self> { if hdfs.has_kerberos_enabled() { let principal_host_part = principal_host_part(hdfs)?; @@ -147,11 +154,11 @@ impl CoreSiteConfigBuilder { /// ``` /// /// After we have switched to using the following principals everything worked without problems -fn principal_host_part(hdfs: &HdfsCluster) -> Result { +fn principal_host_part(hdfs: &HdfsCluster) -> Result { let hdfs_name = hdfs.name_any(); let hdfs_namespace = hdfs .namespace_or_error() - .map_err(|_| Error::ObjectHasNoNamespace { + .with_context(|_| ObjectHasNoNamespaceSnafu { obj_ref: ObjectRef::from_obj(hdfs), })?; Ok(format!( diff --git a/rust/operator-binary/src/main.rs b/rust/operator-binary/src/main.rs index 27e49671..89c77d4a 100644 --- a/rust/operator-binary/src/main.rs +++ b/rust/operator-binary/src/main.rs @@ -12,7 +12,7 @@ use stackable_operator::{ core::v1::{ConfigMap, Service}, }, kube::runtime::{watcher, Controller}, - labels::ObjectLabels, + kvp::ObjectLabels, logging::controller::report_controller_reconciled, namespace::WatchNamespace, CustomResourceExt, diff --git a/rust/operator-binary/src/operations/graceful_shutdown.rs b/rust/operator-binary/src/operations/graceful_shutdown.rs index dfb9440e..d35b9e29 100644 --- a/rust/operator-binary/src/operations/graceful_shutdown.rs +++ b/rust/operator-binary/src/operations/graceful_shutdown.rs @@ -4,7 +4,7 @@ use stackable_operator::builder::PodBuilder; #[derive(Debug, Snafu)] pub enum Error { - #[snafu(display("Failed to set terminationGracePeriod"))] + #[snafu(display("failed to set terminationGracePeriod"))] SetTerminationGracePeriod { source: stackable_operator::builder::pod::Error, }, diff --git a/rust/operator-binary/src/operations/pdb.rs b/rust/operator-binary/src/operations/pdb.rs index b9a82cbc..0418d382 100644 --- a/rust/operator-binary/src/operations/pdb.rs +++ b/rust/operator-binary/src/operations/pdb.rs @@ -11,12 +11,13 @@ use crate::{hdfs_controller::RESOURCE_MANAGER_HDFS_CONTROLLER, OPERATOR_NAME}; #[derive(Snafu, Debug)] pub enum Error { - #[snafu(display("Cannot create PodDisruptionBudget for role [{role}]"))] + #[snafu(display("cannot create PodDisruptionBudget for role {role:?}"))] CreatePdb { source: stackable_operator::error::Error, role: String, }, - #[snafu(display("Cannot apply role group PodDisruptionBudget [{name}]"))] + + #[snafu(display("cannot apply role group PodDisruptionBudget {name:?}"))] ApplyPdb { source: stackable_operator::error::Error, name: String, diff --git a/rust/operator-binary/src/product_logging.rs b/rust/operator-binary/src/product_logging.rs index 73579261..cc1d29b7 100644 --- a/rust/operator-binary/src/product_logging.rs +++ b/rust/operator-binary/src/product_logging.rs @@ -1,9 +1,7 @@ use std::{borrow::Cow, fmt::Display}; use snafu::{OptionExt, ResultExt, Snafu}; -use stackable_hdfs_crd::{ - AnyNodeConfig, DataNodeContainer, HdfsCluster, LoggingExt, NameNodeContainer, -}; +use stackable_hdfs_crd::{AnyNodeConfig, DataNodeContainer, HdfsCluster, NameNodeContainer}; use stackable_operator::{ builder::ConfigMapBuilder, client::Client, @@ -21,16 +19,19 @@ use stackable_operator::{ pub enum Error { #[snafu(display("object has no namespace"))] ObjectHasNoNamespace, - #[snafu(display("failed to retrieve the ConfigMap [{cm_name}]"))] + + #[snafu(display("failed to retrieve the ConfigMap {cm_name:?}"))] ConfigMapNotFound { source: stackable_operator::error::Error, cm_name: String, }, - #[snafu(display("failed to retrieve the entry [{entry}] for ConfigMap [{cm_name}]"))] + + #[snafu(display("failed to retrieve the entry {entry:?} for ConfigMap {cm_name:?}"))] MissingConfigMapEntry { entry: &'static str, cm_name: String, }, + #[snafu(display("vectorAggregatorConfigMapName must be set"))] MissingVectorAggregatorAddress, }