diff --git a/.github/workflows/rust-test.yml b/.github/workflows/rust-test.yml index 9a8a3a1..0421d66 100644 --- a/.github/workflows/rust-test.yml +++ b/.github/workflows/rust-test.yml @@ -11,32 +11,50 @@ env: jobs: build: - runs-on: ubuntu-latest + container: rust:slim-bookworm + + # see https://docs.github.com/en/actions/using-containerized-services/creating-postgresql-service-containers + # Service containers to run with `build` + services: + # Label used to access the service container + postgres: + # Docker Hub image + image: postgres + # Provide the password for postgres + env: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + POSTGRES_DB: test + # Set health checks to wait until postgres has started + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + # Maps tcp port 5432 on service container to the host + - 5432:5432 steps: - uses: actions/checkout@v3 - name: Install dependencies run: | - sudo apt-get update && \ - sudo apt-get install -y --no-install-recommends \ + apt-get update && \ + apt-get install -y --no-install-recommends \ libkrb5-dev \ - libssl-dev - - name: Install a Postgres database (CockroachDB) - run: | - git clone https://github.com/cockroachlabs-field/docker-examples.git - cd docker-examples/example-nginx/ - sh up.sh - - name: Build - run: cargo build --verbose + libssl-dev \ + make \ + clang \ + pkgconf ; - name: Clippy - run: cargo clippy -- -Dwarnings + run: rustup component add clippy && cargo clippy -- -Dwarnings - name: Run tests run: | - # Env var to connect to the Postgres docker - export POSTGRES_HOST="localhost" - export POSTGRES_PORT="26257" - export POSTGRES_USER="root" - export POSTGRES_PASSWORD="" + # Env var to connect to the Postgres service + export POSTGRES_HOST="postgres" + export POSTGRES_PORT="5432" + export POSTGRES_USER="postgres" + export POSTGRES_PASSWORD="postgres" export POSTGRES_DBNAME="test" cargo test --verbose diff --git a/CHANGELOG.md b/CHANGELOG.md index 03d6885..f28b705 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.2.0] + ### Added - Add OpenWEC node name (if configured) in JSON format output (#2) @@ -16,6 +18,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Add a setting to configure `heartbeats_queue_size` (#37) - Add Tls support for encryption and authentication (#36) - Add support for output events to redis list (#45) +- Add TCP keepalive settings (with sensible defaults) in server settings (#56) +- Add support for output events to unix domain socket (#60) +- Add configuration files for subscriptions coming with two openwec cli subcommands (`subscriptions load` and `subscriptions skell`) +- Add `cli.read_only_subscriptions` setting to disable the cli features which edit subscriptions (except `subscriptions load`) +- Add `RawJson` format which enables to retrieve events in raw format while also getting the metadata added by OpenWEC +- Add the subscription revision in OpenWEC events metadata +- Add `locale` and `data_locale` subscriptions parameters +- Add support for Proxy Protocol to allow openwec to be used behind a layer 4 load +balancer whilst preserving the client IP address and port. ### Changed @@ -26,8 +37,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **Breaking change**: Keytab file path must be specified only once for all collectors (using Kerberos authentication) - A malformed event will no longer stop the event stream (for a computer/subscription) because formatters are not allowed to fail. In problematic cases, some work is done to try to recover the raw data of the event, and an `OpenWEC.Error` field is added (in the JSON formatter) to help catch the problem (#47) - **Breaking change**: Split access and server logs. Configuration file format has been updated. (#52) -- Ensure that openwecd shutdowns gracefully even if hyper server is not responding -- Improve the logging of failed Kerberos authentications: missing authorization header warning is now in DEBUG level +- Ensure that openwecd shutdowns gracefully even if hyper server is not responding (#65) +- Improve the logging of failed Kerberos authentications: missing authorization header warning is now in DEBUG level (#65) +- Rework output drivers and output formats architecture +- Change the outputs storage format in database +- Rework the import/export format to enable compatibility between OpenWEC versions +- Each subscription has now two "versions": a public one sent to clients (derived from subscription parameters) and a private one used for synchronization between openwec nodes ### Fixed diff --git a/Cargo.lock b/Cargo.lock index c366d10..dab85b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -19,9 +19,9 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.8.6" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "once_cell", @@ -31,18 +31,18 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "android-tzdata" @@ -61,9 +61,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.4" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" dependencies = [ "anstyle", "anstyle-parse", @@ -75,49 +75,49 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" [[package]] name = "anstyle-parse" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.1" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" dependencies = [ "anstyle", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" [[package]] name = "arc-swap" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "asn1-rs" @@ -160,26 +160,53 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.74" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.60", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" + +[[package]] +name = "aws-lc-rs" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5509d663b2c00ee421bda8d6a24d6c42e15970957de1701b8df9f6fbe5707df1" +dependencies = [ + "aws-lc-sys", + "mirai-annotations", + "paste", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "8d5d317212c2a78d86ba6622e969413c38847b62f48111f8b763af3dac2f9840" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", + "libc", + "paste", +] [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", @@ -192,30 +219,36 @@ dependencies = [ [[package]] name = "base64" -version = "0.21.5" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" [[package]] name = "bindgen" -version = "0.69.2" +version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c69fae65a523209d34240b60abe0c42d33d1045d445c0839d8a4894a736e2d" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.5.0", "cexpr", "clang-sys", + "itertools", "lazy_static", "lazycell", "log", - "peeking_take_while", "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.38", + "syn 2.0.60", "which", ] @@ -227,9 +260,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" [[package]] name = "bitreader" @@ -257,9 +290,9 @@ checksum = "2e2c71c44e5bbc64de4ecfac946e05f9bba5cc296ea7bab4d3eda242a3ffa73c" [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "byteorder" @@ -269,18 +302,19 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "cc" -version = "1.0.83" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "d32a725bc159af97c3e629873bb9f88fb8cf8a4867175f76dc987815ea07c83b" dependencies = [ "jobserver", "libc", + "once_cell", ] [[package]] @@ -300,23 +334,21 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", - "js-sys", "num-traits", - "wasm-bindgen", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] name = "clang-sys" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" dependencies = [ "glob", "libc", @@ -325,18 +357,18 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.7" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac495e00dcec98c83465d5ad66c5c4fabd652fd6686e7c6269b117e729a6f17b" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" -version = "4.4.7" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77ed9a32a62e6ca27175d00d29d05ca32e396ea1eb5fb01d8256b669cec7663" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstream", "anstyle", @@ -346,22 +378,35 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "cli" -version = "0.1.0" +version = "0.2.0" dependencies = [ "anyhow", + "chrono", "clap", "common", "env_logger", "log", "roxmltree", + "serde", "serde_json", + "strum", "tokio", + "uuid", +] + +[[package]] +name = "cmake" +version = "0.1.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +dependencies = [ + "cc", ] [[package]] @@ -372,9 +417,9 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "combine" -version = "4.6.6" +version = "4.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" dependencies = [ "bytes", "futures-core", @@ -386,7 +431,7 @@ dependencies = [ [[package]] name = "common" -version = "0.1.0" +version = "0.2.0" dependencies = [ "anyhow", "async-trait", @@ -401,6 +446,7 @@ dependencies = [ "serde", "serde_json", "serial_test", + "strum", "tempfile", "tokio", "tokio-postgres", @@ -410,15 +456,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "cpufeatures" -version = "0.2.10" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fbc60abd742b35f2492f808e1abbb83d45f72db402e14c55057edc9c7b1e9e4" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -433,24 +479,11 @@ dependencies = [ "typenum", ] -[[package]] -name = "dashmap" -version = "5.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" -dependencies = [ - "cfg-if", - "hashbrown 0.14.2", - "lock_api", - "once_cell", - "parking_lot_core", -] - [[package]] name = "data-encoding" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "deadpool" @@ -534,9 +567,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", ] @@ -577,29 +610,35 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.60", ] +[[package]] +name = "dunce" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" + [[package]] name = "either" -version = "1.9.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ "cfg-if", ] [[package]] name = "env_logger" -version = "0.10.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" +checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" dependencies = [ "humantime", "is-terminal", @@ -638,9 +677,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "finl_unicode" @@ -671,18 +710,24 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "futures" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -695,9 +740,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -705,15 +750,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -722,38 +767,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.60", ] [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -779,9 +824,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", "libc", @@ -790,9 +835,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "glob" @@ -802,9 +847,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.3.24" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "816ec7294445779408f36fe57bc5b7fc1cf59664059096c65f905c1c61f58069" dependencies = [ "bytes", "fnv", @@ -812,7 +857,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.0.2", + "indexmap", "slab", "tokio", "tokio-util", @@ -821,15 +866,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.12.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - -[[package]] -name = "hashbrown" -version = "0.14.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", @@ -841,14 +880,20 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" dependencies = [ - "hashbrown 0.14.2", + "hashbrown", ] +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + [[package]] name = "hermit-abi" -version = "0.3.3" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -867,18 +912,18 @@ dependencies = [ [[package]] name = "home" -version = "0.5.5" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "http" -version = "0.2.9" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -887,12 +932,24 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" dependencies = [ "bytes", + "futures-core", "http", + "http-body", "pin-project-lite", ] @@ -916,13 +973,12 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.27" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" dependencies = [ "bytes", "futures-channel", - "futures-core", "futures-util", "h2", "http", @@ -931,18 +987,36 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "hyper", + "pin-project-lite", + "socket2 0.5.7", "tokio", + "tower", "tower-service", "tracing", - "want", ] [[package]] name = "iana-time-zone" -version = "0.1.58" +version = "0.1.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -963,9 +1037,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -973,64 +1047,54 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.3" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", -] - -[[package]] -name = "indexmap" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.2", + "hashbrown", ] [[package]] name = "is-terminal" -version = "0.4.9" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" dependencies = [ "hermit-abi", - "rustix", - "windows-sys 0.48.0", + "libc", + "windows-sys 0.52.0", ] [[package]] name = "itertools" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" dependencies = [ "either", ] [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.27" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -1049,9 +1113,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.151" +version = "0.2.154" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" +checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" [[package]] name = "libgssapi" @@ -1059,7 +1123,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c22f0430969e524b2177498ca3eeed48faca6f6c80f1b098d27ecbec84222f3a" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.5.0", "bytes", "lazy_static", "libgssapi-sys", @@ -1077,12 +1141,12 @@ dependencies = [ [[package]] name = "libloading" -version = "0.7.4" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "winapi", + "windows-targets 0.52.5", ] [[package]] @@ -1098,9 +1162,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.12" +version = "1.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" dependencies = [ "cc", "libc", @@ -1108,23 +1172,17 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - [[package]] name = "linux-raw-sys" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -1132,9 +1190,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" dependencies = [ "serde", ] @@ -1147,9 +1205,9 @@ checksum = "a94d21414c1f4a51209ad204c1776a3d0765002c76c6abcb602a6f09f1e881c7" [[package]] name = "log4rs" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d36ca1786d9e79b8193a68d480a0907b612f109537115c6ff655a3a1967533fd" +checksum = "0816135ae15bd0391cf284eab37e6e3ee0a6ee63d2ceeb659862bd8d0a984ca6" dependencies = [ "anyhow", "arc-swap", @@ -1160,7 +1218,9 @@ dependencies = [ "libc", "log", "log-mdc", + "once_cell", "parking_lot", + "rand", "serde", "serde-value", "serde_json", @@ -1193,9 +1253,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.4" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "mime" @@ -1211,24 +1271,30 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.9" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi", "windows-sys 0.48.0", ] +[[package]] +name = "mirai-annotations" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" + [[package]] name = "nom" version = "7.1.3" @@ -1250,21 +1316,26 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-traits" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", ] @@ -1302,9 +1373,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -1320,17 +1391,17 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "openssl" -version = "0.10.60" +version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79a4c6c3a2b158f7f8f2a2fc5a969fa3a068df6fc9dbb4a43845436e3af7c800" +checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.5.0", "cfg-if", "foreign-types", "libc", @@ -1347,14 +1418,14 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.60", ] [[package]] name = "openssl-sys" -version = "0.9.96" +version = "0.9.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f" +checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" dependencies = [ "cc", "libc", @@ -1373,9 +1444,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" dependencies = [ "lock_api", "parking_lot_core", @@ -1383,28 +1454,28 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.5.1", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] -name = "peeking_take_while" -version = "0.1.2" +name = "paste" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "phf" @@ -1424,11 +1495,31 @@ dependencies = [ "siphasher", ] +[[package]] +name = "pin-project" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.60", +] + [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -1438,9 +1529,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "postgres-openssl" @@ -1461,7 +1552,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49b6c5ef183cd3ab4ba005f1ca64c21e8bd97ce4699cfea9e8d9a2c4958ca520" dependencies = [ - "base64", + "base64 0.21.7", "byteorder", "bytes", "fallible-iterator", @@ -1490,6 +1581,15 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" +[[package]] +name = "ppp" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82d901d7dd743c478e14af9518bdbc33e53e50be56429233f812537f29dbf0d1" +dependencies = [ + "thiserror", +] + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -1498,12 +1598,12 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" -version = "0.2.15" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +checksum = "5ac2cf0f2e4f42b49f5ffd07dae8d746508ef7526c13940e5f524012ae6c6550" dependencies = [ "proc-macro2", - "syn 2.0.38", + "syn 2.0.60", ] [[package]] @@ -1518,9 +1618,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.69" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" dependencies = [ "unicode-ident", ] @@ -1536,9 +1636,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -1575,9 +1675,9 @@ dependencies = [ [[package]] name = "rdkafka" -version = "0.36.0" +version = "0.36.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54f02a5a40220f8a2dfa47ddb38ba9064475a5807a69504b6f91711df2eea63" +checksum = "1beea247b9a7600a81d4cc33f659ce1a77e1988323d7d2809c7ed1c21f4c316d" dependencies = [ "futures-channel", "futures-util", @@ -1635,11 +1735,20 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_syscall" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" +dependencies = [ + "bitflags 2.5.0", +] + [[package]] name = "regex" -version = "1.10.2" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", @@ -1649,9 +1758,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", @@ -1660,9 +1769,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "retain_mut" @@ -1672,16 +1781,17 @@ checksum = "4389f1d5789befaf6029ebd9f7dac4af7f7e3d61b69d4f30e2ac02b57e7712b0" [[package]] name = "ring" -version = "0.17.5" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", + "cfg-if", "getrandom", "libc", "spin", "untrusted", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -1727,11 +1837,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.28" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys", @@ -1740,40 +1850,67 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.8" +version = "0.23.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" +checksum = "afabcee0551bd1aa3e18e5adbf2c0544722014b899adb31bd186ec638d3da97e" dependencies = [ + "aws-lc-rs", "log", - "ring", + "once_cell", + "rustls-pki-types", "rustls-webpki", - "sct", + "subtle", + "zeroize", ] [[package]] name = "rustls-pemfile" -version = "1.0.3" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64", + "base64 0.22.0", + "rustls-pki-types", ] +[[package]] +name = "rustls-pki-types" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "beb461507cee2c2ff151784c52762cf4d9ff6a61f3e80968600ed24fa837fa54" + [[package]] name = "rustls-webpki" -version = "0.101.7" +version = "0.102.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +checksum = "f3bce581c0dd41bce533ce695a1437fa16a7ab5ac3ccfa99fe1a620a7885eabf" dependencies = [ + "aws-lc-rs", "ring", + "rustls-pki-types", "untrusted", ] [[package]] -name = "ryu" +name = "rustversion" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" + +[[package]] +name = "ryu" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" + +[[package]] +name = "scc" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec96560eea317a9cc4e0bb1f6a2c93c09a19b8c4fc5cb3fcc0ec1c094cd783e2" +dependencies = [ + "sdd", +] [[package]] name = "scopeguard" @@ -1782,20 +1919,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] -name = "sct" -version = "0.7.1" +name = "sdd" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] +checksum = "b84345e4c9bd703274a082fb80caaa99b7612be48dfaa1dd9266577ec412309d" [[package]] name = "serde" -version = "1.0.189" +version = "1.0.199" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "0c9f6e76df036c77cd94996771fb40db98187f096dd0b9af39c6c6e452ba966a" dependencies = [ "serde_derive", ] @@ -1812,20 +1945,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.199" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "11bd257a6541e141e42ca6d24ae26f7714887b47e89aa739099104c7e4d3b7fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.60", ] [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" dependencies = [ "itoa", "ryu", @@ -1834,57 +1967,58 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" dependencies = [ "serde", ] [[package]] name = "serde_yaml" -version = "0.8.26" +version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 1.9.3", + "indexmap", + "itoa", "ryu", "serde", - "yaml-rust", + "unsafe-libyaml", ] [[package]] name = "serial_test" -version = "3.0.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ad9342b3aaca7cb43c45c097dd008d4907070394bd0751a0aa8817e5a018d" +checksum = "4b4b487fe2acf240a021cf57c6b2b4903b1e78ca0ecd862a71b71d2a51fed77d" dependencies = [ - "dashmap", "futures", - "lazy_static", "log", + "once_cell", "parking_lot", + "scc", "serial_test_derive", ] [[package]] name = "serial_test_derive" -version = "3.0.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b93fb4adc70021ac1b47f7d45e8cc4169baaa7ea58483bc5b721d19a26202212" +checksum = "82fe9db325bcef1fbcde82e078a5cc4efdf787e96b3b9cf45b50b529f2083d67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.60", ] [[package]] name = "server" -version = "0.1.0" +version = "0.2.0" dependencies = [ "anyhow", "async-trait", - "base64", + "base64 0.21.7", "bitreader", "buf-read-ext", "chrono", @@ -1893,29 +2027,29 @@ dependencies = [ "futures", "futures-util", "hex", - "http", + "http-body-util", "httparse", "hyper", + "hyper-util", "itertools", "itoa", - "lazy_static", "libgssapi", "log", "log-mdc", "log4rs", "mime", + "ppp", "quick-xml", "rdkafka", "redis", "regex", "roxmltree", - "rustls", "rustls-pemfile", "serde", "serde_json", "sha1", + "socket2 0.5.7", "thiserror", - "tls-listener", "tokio", "tokio-rustls", "tokio-util", @@ -1960,9 +2094,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -1984,9 +2118,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" @@ -2000,12 +2134,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2027,9 +2161,31 @@ dependencies = [ [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "strum" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.60", +] [[package]] name = "subtle" @@ -2050,9 +2206,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.38" +version = "2.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b" +checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" dependencies = [ "proc-macro2", "quote", @@ -2073,44 +2229,43 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.9.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", "fastrand", - "redox_syscall", "rustix", "windows-sys 0.52.0", ] [[package]] name = "termcolor" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.60", ] [[package]] @@ -2125,12 +2280,13 @@ dependencies = [ [[package]] name = "time" -version = "0.3.30" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", + "num-conv", "powerfmt", "serde", "time-core", @@ -2145,10 +2301,11 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.15" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ + "num-conv", "time-core", ] @@ -2167,25 +2324,11 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" -[[package]] -name = "tls-listener" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7139f551f21722b83059739c7444582904d3a833ca316bece9a32870254746c" -dependencies = [ - "futures-util", - "hyper", - "pin-project-lite", - "thiserror", - "tokio", - "tokio-rustls", -] - [[package]] name = "tokio" -version = "1.35.1" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", @@ -2195,7 +2338,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.5", + "socket2 0.5.7", "tokio-macros", "windows-sys 0.48.0", ] @@ -2208,14 +2351,14 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.60", ] [[package]] name = "tokio-openssl" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08f9ffb7809f1b20c1b398d92acf4cc719874b3b2b2d9ea2f09b4a80350878a" +checksum = "6ffab79df67727f6acf57f1ff743091873c24c579b1e2ce4d8f53e47ded4d63d" dependencies = [ "futures-util", "openssl", @@ -2243,7 +2386,7 @@ dependencies = [ "postgres-protocol", "postgres-types", "rand", - "socket2 0.5.5", + "socket2 0.5.7", "tokio", "tokio-util", "whoami", @@ -2251,11 +2394,12 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.24.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ "rustls", + "rustls-pki-types", "tokio", ] @@ -2275,14 +2419,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.4" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ef75d881185fd2df4a040793927c153d863651108a93c7e17a9e591baa95cc6" +checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.20.4", + "toml_edit 0.22.12", ] [[package]] @@ -2300,24 +2444,46 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.2", + "indexmap", "toml_datetime", - "winnow", + "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.20.4" +version = "0.22.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380f9e8120405471f7c9ad1860a713ef5ece6a670c7eae39225e477340f32fc4" +checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" dependencies = [ - "indexmap 2.0.2", + "indexmap", "serde", "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.6.7", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", ] +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + [[package]] name = "tower-service" version = "0.3.2" @@ -2330,6 +2496,7 @@ version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ + "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -2343,7 +2510,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.60", ] [[package]] @@ -2357,9 +2524,9 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typemap-ors" @@ -2378,9 +2545,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "unicode-bidi" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" @@ -2390,9 +2557,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] @@ -2412,6 +2579,12 @@ dependencies = [ "destructure_traitobject", ] +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + [[package]] name = "untrusted" version = "0.9.0" @@ -2420,9 +2593,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", "idna", @@ -2437,12 +2610,13 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ "getrandom", "rand", + "serde", ] [[package]] @@ -2472,11 +2646,17 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -2484,24 +2664,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.60", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2509,28 +2689,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.60", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -2550,11 +2730,12 @@ dependencies = [ [[package]] name = "whoami" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" +checksum = "a44ab49fad634e88f55bf8f9bb3abd2f27d7204172a112c7c9987e01c1c94ea9" dependencies = [ - "wasm-bindgen", + "redox_syscall 0.4.1", + "wasite", "web-sys", ] @@ -2576,11 +2757,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -2591,11 +2772,11 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.51.1" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] @@ -2613,7 +2794,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.5", ] [[package]] @@ -2633,17 +2814,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -2654,9 +2836,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -2666,9 +2848,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -2678,9 +2860,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -2690,9 +2878,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -2702,9 +2890,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -2714,9 +2902,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -2726,15 +2914,24 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" + +[[package]] +name = "winnow" +version = "0.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +dependencies = [ + "memchr", +] [[package]] name = "winnow" -version = "0.5.17" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3b801d0e0a6726477cc207f60162da452f3a95adb368399bef20a946e06f65c" +checksum = "14b9415ee827af173ebb3f15f9083df5a122eb93572ec28741fb153356ea2578" dependencies = [ "memchr", ] @@ -2762,15 +2959,6 @@ version = "0.13.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "zerocopy" version = "0.7.32" @@ -2788,14 +2976,20 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.38", + "syn 2.0.60", ] +[[package]] +name = "zeroize" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" + [[package]] name = "zstd-sys" -version = "2.0.9+zstd.1.5.5" +version = "2.0.10+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" dependencies = [ "cc", "pkg-config", diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 3f206df..ee82287 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "cli" -version = "0.1.0" +version = "0.2.0" edition = "2021" [[bin]] @@ -18,3 +18,7 @@ log = "0.4.19" serde_json = "1.0.97" tokio = { version = "1.35.1", features = ["full"] } roxmltree = "0.19.0" +serde = { version = "1.0", features = ["derive"] } +uuid = { version = "1.7.0", features = ["v4", "fast-rng"] } +chrono = { version = "0.4.26", default-features = false, features = ["clock"] } +strum = { version = "0.26.1", features = ["derive"] } \ No newline at end of file diff --git a/cli/src/bookmarks.rs b/cli/src/bookmarks.rs index d0311b1..8d98608 100644 --- a/cli/src/bookmarks.rs +++ b/cli/src/bookmarks.rs @@ -39,7 +39,7 @@ async fn show(db: &Db, matches: &ArgMatches) -> Result<()> { })?; if let Some(machine) = machine { - let bookmark = db.get_bookmark(&machine, subscription.uuid()).await?; + let bookmark = db.get_bookmark(&machine, &subscription.uuid_string()).await?; match bookmark { Some(str) => { println!("{}", str) @@ -51,7 +51,7 @@ async fn show(db: &Db, matches: &ArgMatches) -> Result<()> { ), }; } else { - for data in db.get_bookmarks(subscription.uuid()).await? { + for data in db.get_bookmarks(&subscription.uuid_string()).await? { println!("{}:{}", data.machine, data.bookmark); } } @@ -82,7 +82,8 @@ async fn delete(db: &Db, matches: &ArgMatches) -> Result<()> { }; if utils::confirm(&message) { - db.delete_bookmarks(machine.as_deref(), subscription.as_ref().map(|x| x.uuid())) + let uuid_opt = subscription.map(|x| x.uuid_string()); + db.delete_bookmarks(machine.as_deref(), uuid_opt.as_deref()) .await?; println!("Done"); } else { @@ -125,7 +126,7 @@ async fn copy(db: &Db, matches: &ArgMatches) -> Result<()> { })?; if let Some(machine) = machine { - let existing_bookmark = db.get_bookmark(&machine, destination.uuid()).await?; + let existing_bookmark = db.get_bookmark(&machine, &destination.uuid_string()).await?; if existing_bookmark.is_some() { println!( "WARNING: A bookmark for {} already exists within subscription \"{}\"", @@ -135,7 +136,7 @@ async fn copy(db: &Db, matches: &ArgMatches) -> Result<()> { } let bookmark = db - .get_bookmark(&machine, source.uuid()) + .get_bookmark(&machine, &source.uuid_string()) .await? .ok_or_else(|| { anyhow!( @@ -150,7 +151,7 @@ async fn copy(db: &Db, matches: &ArgMatches) -> Result<()> { return Ok(()); } - db.store_bookmark(&machine, destination.uuid(), &bookmark) + db.store_bookmark(&machine, &destination.uuid_string(), &bookmark) .await?; println!("1 bookmark copied"); } else { @@ -160,8 +161,8 @@ async fn copy(db: &Db, matches: &ArgMatches) -> Result<()> { }; let mut counter: usize = 0; - for data in db.get_bookmarks(source.uuid()).await? { - db.store_bookmark(&data.machine, destination.uuid(), &data.bookmark) + for data in db.get_bookmarks(&source.uuid_string()).await? { + db.store_bookmark(&data.machine, &destination.uuid_string(), &data.bookmark) .await?; counter += 1; } diff --git a/cli/src/config.rs b/cli/src/config.rs new file mode 100644 index 0000000..a1e77ae --- /dev/null +++ b/cli/src/config.rs @@ -0,0 +1,59 @@ +use anyhow::{bail, Context, Result}; +use common::{models::config::parse, subscription::SubscriptionData}; +use log::info; +use std::{ + fs::{self}, + path::{Path, PathBuf}, +}; + +fn visit_dirs(path: &Path) -> Result> { + if !path.exists() { + bail!("{} does not exist", path.display()); + } + let mut config_files = Vec::new(); + if path.is_dir() { + for entry in fs::read_dir(path)? { + let entry = entry?; + let path = entry.path(); + if path.is_dir() { + config_files.append(&mut visit_dirs(&path)?); + } else { + config_files.push(entry.path()) + } + } + } else if path.is_file() { + config_files.push(path.to_path_buf()) + } + Ok(config_files) +} +pub fn load_from_path(path: &str, revision: Option<&String>) -> Result> { + let mut subscriptions = Vec::new(); + + let root = Path::new(path); + let config_files = visit_dirs(root).context("Failed to config load files")?; + + info!("Found config files: {:?}", config_files); + for path in config_files { + let content = + fs::read(&path).with_context(|| format!("Failed to read {}", path.display()))?; + let content_str = String::from_utf8(content).with_context(|| { + format!( + "Failed to decode the content of {} using UTF-8", + path.display() + ) + })?; + + let subscription: SubscriptionData = parse(&content_str, revision) + .with_context(|| format!("Failed to parse file {}", path.display()))?; + info!( + "{}: {} (uuid: {}, version: {})", + path.display(), + subscription.name(), + subscription.uuid(), + subscription.public_version()? + ); + subscriptions.push(subscription); + } + + Ok(subscriptions) +} diff --git a/cli/src/heartbeats.rs b/cli/src/heartbeats.rs index fb43f0b..ef9a6da 100644 --- a/cli/src/heartbeats.rs +++ b/cli/src/heartbeats.rs @@ -26,7 +26,7 @@ async fn find_subscription(db: &Db, matches: &ArgMatches) -> Result Result<()> { let subscription = find_subscription(db, matches).await?; - let subscription_uuid = subscription.map(|sub| sub.uuid().to_owned()); + let subscription_uuid = subscription.map(|sub| sub.uuid_string()); let heartbeats = if let Some(machine) = matches.get_one::("machine") { db.get_heartbeats_by_machine(machine, subscription_uuid.as_deref()) diff --git a/cli/src/lib.rs b/cli/src/lib.rs index 2aaad1a..cbd337d 100644 --- a/cli/src/lib.rs +++ b/cli/src/lib.rs @@ -12,6 +12,8 @@ mod heartbeats; mod stats; mod subscriptions; mod utils; +mod config; +mod skell; pub async fn run(matches: ArgMatches, help_str: StyledStr) -> Result<()> { let settings = Settings::new(matches.get_one::("config")) @@ -33,7 +35,7 @@ pub async fn run(matches: ArgMatches, help_str: StyledStr) -> Result<()> { }; if let Some(matches) = matches.subcommand_matches("subscriptions") { - subscriptions::run(&db, matches).await?; + subscriptions::run(&db, matches, &settings).await?; } else if let Some(matches) = matches.subcommand_matches("heartbeats") { heartbeats::run(&db, matches).await?; } else if let Some(matches) = matches.subcommand_matches("stats") { diff --git a/cli/src/main.rs b/cli/src/main.rs index 342f15a..eb6bd63 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -1,10 +1,14 @@ -#![allow(clippy::too_many_arguments)] +#![deny(unsafe_code)] use std::env; -use common::{database::schema::Version, settings::DEFAULT_CONFIG_FILE}; - use clap::{arg, command, value_parser, Arg, ArgAction, ArgGroup, Command}; +use common::{ + database::schema::Version, + settings::DEFAULT_CONFIG_FILE, + subscription::{SubscriptionOutputFormat, DEFAULT_FILE_NAME}, +}; +use strum::VariantNames; #[tokio::main] async fn main() { @@ -81,7 +85,7 @@ async fn main() { .subcommand( Command::new("add") .about("Add a new output for this subscription") - .arg(arg!(-f --format "Output format").value_parser(["json", "raw"]).required(true)) + .arg(arg!(-f --format "Output format").value_parser(SubscriptionOutputFormat::VARIANTS.to_vec()).required(true)) .subcommand( Command::new("tcp") .about("TCP output") @@ -118,7 +122,7 @@ async fn main() { .arg( arg!(--"append-node-name" "Append the configured node name at the end of the generated path (parent dir of )") ) - .arg(arg!(--filename "Name of the file where logs will be written.").default_value("messages")) + .arg(arg!(--filename "Name of the file where logs will be written.").default_value(DEFAULT_FILE_NAME)) ) .subcommand( Command::new("unixdatagram") @@ -201,6 +205,12 @@ async fn main() { arg!(--"max-envelope-size" "Max envelope size") // TODO: improve help .value_parser(value_parser!(u32)) ) + .arg( + arg!(--"locale" [LOCALE] "Language in which openwec wants the rendering info data to be translated (RFC 3066 code)") + ) + .arg( + arg!(--"data-locale" [DATA_LOCALE] "Language in which openwec wants the numerical data to be formatted (RFC 3066 code)") + ) .arg( arg!(--"enable" "Enable the subscription") ) @@ -246,6 +256,14 @@ async fn main() { "file to import" )) ) + .subcommand( + Command::new("load") + .about("Load subscriptions from configuration files") + .arg(arg!( "Directory of configuration files or a single configuration file")) + .arg(arg!(-k --keep "Do not delete subscriptions that are not present in the configuration")) + .arg(arg!(-y --yes "Do not prompt for confirmation when is a configuration file and --keep is not used")) + .arg(arg!(-r --revision "Revision name of the configuration. If present, it will be added by openwec as metadata of all events received using this subscription.")) + ) .subcommand( Command::new("delete") .about("Delete an existing subscription") @@ -284,6 +302,13 @@ async fn main() { .arg(arg!(-a --all "Reload all subscriptions")) .arg(arg!( ... "Name or UUID of subscriptions").action(ArgAction::Append).required(false)) ) + .subcommand( + Command::new("skell") + .about("Generate a subscription configuration file (that may be used with `load`)") + .arg(arg!(-n --name "Name of the subscription")) + .arg(arg!(-m --minimal "Generate a minimal subscription configuration")) + .arg(arg!( "Path of the newly generated configuration file. '-' means stdout.").required(false).default_value("-")) + ) ) .subcommand( Command::new("heartbeats") diff --git a/cli/src/skell.rs b/cli/src/skell.rs new file mode 100644 index 0000000..555e269 --- /dev/null +++ b/cli/src/skell.rs @@ -0,0 +1,226 @@ +use chrono::{DateTime, Local}; +use common::subscription::{ + DEFAULT_CONNECTION_RETRY_COUNT, DEFAULT_CONNECTION_RETRY_INTERVAL, DEFAULT_CONTENT_FORMAT, + DEFAULT_ENABLED, DEFAULT_FILE_APPEND_NODE_NAME, DEFAULT_FILE_NAME, DEFAULT_HEARTBEAT_INTERVAL, + DEFAULT_IGNORE_CHANNEL_ERROR, DEFAULT_MAX_ENVELOPE_SIZE, DEFAULT_MAX_TIME, + DEFAULT_READ_EXISTING_EVENTS, +}; +use uuid::Uuid; + +fn format_bool(value: bool) -> String { + if value { + "true".to_string() + } else { + "false".to_string() + } +} + +fn get_header(uuid: Uuid, name: &str, now: DateTime) -> String { + format!( + r#"# autogenerated by openwec {} +# {} + +# Unique identifier of the subscription +uuid = "{}" +# Unique name of the subscription +name = "{}" + +# Subscription query +query = """ + + + +""" +"#, + env!("CARGO_PKG_VERSION"), + now.to_rfc2822(), + uuid, + name + ) +} + +fn get_options() -> String { + format!( + r#" +# Subscription options (optional) +# [options] +# +# Enable/disable the subscription +# enabled = {} + +# If the uri parameter is undefined (default), the subscription will +# always be sent to clients. Otherwise, only clients sending enumerate +# requests to the URI will be able to get it. +# uri = + +# The maximum allowable time, in seconds, before the client will send +# an heartbeat message if it has no new events to send. +# heartbeat_interval = {} + +# Number of times the client will attempt to connect if the subscriber +# is unreachable. +# connection_retry_count = {} + +# Interval observed between each connection attempt if the subscriber +# is unreachable. +# connection_retry_interval = {} + +# The maximum time, in seconds, that the client should aggregate new +# events before sending them. +# max_time = {} + +# The maximum number of bytes in the SOAP envelope used to deliver +# the events. +# max_envelope_size = {} + +# If `true`, the event source should replay all possible events that +# match the filter and any events that subsequently occur for that +# event source. +# read_existing_events = {} + +# This option determines whether rendering information are to be passed +# with events or not. `Raw` means that only event data will be passed +# without any rendering information, whereas `RenderedText` adds +# rendering information. +# content_format = "{}" + +# This option determines if various filtering options resulting in errors +# are to result in termination of the processing by clients. +# ignore_channel_error = {} + +# This option determines the language in which openwec wants the +# rendering info data to be translated. +# Defaults to unset, meaning OpenWEC lets the clent choose. +# locale = + +# This option determines the language in which openwec wants the +# numerical data to be formatted. +# Defaults to unset, meaning OpenWEC lets the clent choose. +# data_locale = +"#, + format_bool(DEFAULT_ENABLED), + DEFAULT_HEARTBEAT_INTERVAL, + DEFAULT_CONNECTION_RETRY_COUNT, + DEFAULT_CONNECTION_RETRY_INTERVAL, + DEFAULT_MAX_TIME, + DEFAULT_MAX_ENVELOPE_SIZE, + format_bool(DEFAULT_READ_EXISTING_EVENTS), + DEFAULT_CONTENT_FORMAT, + format_bool(DEFAULT_IGNORE_CHANNEL_ERROR), + ) +} + +fn get_filter() -> String { + r#" +# Subscription filter (optional) +# +# Filters enables you to choose which clients can read the subscription +# There are two operations available : +# - "Only": only the listed principals will be able to read the subscription +# - "Except": everyone but the listed principals will be able to read the subscription +# +# By default, everyone can read the subscription. +# +# [filter] +# operation = "Only" +# princs = ["courgette@REALM", "radis@REALM"] + +"# + .to_string() +} + +fn get_outputs() -> String { + format!( + r#" +# +# Outputs +# + +# Configure a Files output +# [[outputs]] +# driver = "Files" +# format = "Raw" + +# Files driver has the following parameters: +# - base (required): the base path in which files will be written +# - split_on_addr_index (optional, defaults to undefined): split the IP address +# of the client on the given index to build a directory tree. +# - append_node_name (optional, defaults to {}): Add the openwec node's +# name to the path. +# - filename (optional, defaults to "{}"): the name of the file containing events +# for one client. +# config = {{ base = "/var/log/openwec/", split_on_addr_index = 2, append_node_name = {}, filename = "{}" }} + + +# Configure a Kafka output +# [[outputs]] +# driver = "Kafka" +# format = "Raw" + +# Kafka driver has the following parameters: +# - topic (required): the Kafka topic to send events to +# - options (optional, defaults to undefined): additional kafka settings, directly +# sent to librdkafka (https://docs.confluent.io/platform/current/clients/librdkafka/html/md_CONFIGURATION.html) +# config = {{ topic = "openwec", options = {{ "bootstrap.servers" = "localhost:9092" }} }} + + +# Configure a Tcp output +# [[outputs]] +# driver = "Tcp" +# format = "Raw" + +# Tcp driver has the following paramters: +# - addr (required): Hostname or IP Address to send events to +# - port (required): Tcp port to send events to +# config = {{ addr = "localhost", port = 5000 }} + + +# Configure a Redis output +# [[outputs]] +# driver = "Redis" +# format = "Raw" + +# Redis driver has the following parameters: +# - addr (required): Hostname or IP Address of the Redis server +# - list (required): Name of the Redis list to push events to +# config = {{ addr = "localhost", list = "openwec" }} + + +# Configure a UnixDatagram output +# [[outputs]] +# driver = "UnixDatagram" +# format = "Raw" + +# UnixDatagram driver has the following parameters: +# - path (required): Path of the Unix socket to send events to +# config = {{ path = "/tmp/openwec.socket" }} +"#, + format_bool(DEFAULT_FILE_APPEND_NODE_NAME), + DEFAULT_FILE_NAME, + format_bool(DEFAULT_FILE_APPEND_NODE_NAME), + DEFAULT_FILE_NAME + ) +} + +pub fn get_minimal_skell_content(uuid: Uuid, name: &str, now: DateTime) -> String { + let mut content = get_header(uuid, name, now); + content.push_str( + r#" +# Configures a simple output which stores events in files (one per client) in Raw format +# (without parsing of XML Events) +[[outputs]] +driver = "Files" +format = "Raw" +config = { base = "/var/log/openwec/" } +"#, + ); + content +} + +pub fn get_full_skell_content(uuid: Uuid, name: &str, now: DateTime) -> String { + let mut content = get_header(uuid, name, now); + content.push_str(&get_options()); + content.push_str(&get_filter()); + content.push_str(&get_outputs()); + content +} diff --git a/cli/src/stats.rs b/cli/src/stats.rs index dcc04f5..7fe526d 100644 --- a/cli/src/stats.rs +++ b/cli/src/stats.rs @@ -41,7 +41,7 @@ pub async fn stats_text( let interval = interval.unwrap_or_else(|| subscription.heartbeat_interval()) as i64; let start_heartbeat_interval = now - interval; let stats = db - .get_stats(subscription.uuid(), start_heartbeat_interval) + .get_stats(&subscription.uuid_string(), start_heartbeat_interval) .await?; let start_heartbeat_interval_date = timestamp_to_local_date(start_heartbeat_interval)?; @@ -79,7 +79,7 @@ pub async fn stats_json( let interval = interval.unwrap_or_else(|| subscription.heartbeat_interval()) as i64; let start_heartbeat_interval = now - interval; let stats = db - .get_stats(subscription.uuid(), start_heartbeat_interval) + .get_stats(&subscription.uuid_string(), start_heartbeat_interval) .await?; let start_heartbeat_interval_date = timestamp_to_local_date(start_heartbeat_interval)?; diff --git a/cli/src/subscriptions.rs b/cli/src/subscriptions.rs index 2e9c84d..ebf0dc0 100644 --- a/cli/src/subscriptions.rs +++ b/cli/src/subscriptions.rs @@ -1,10 +1,12 @@ use common::{ database::Db, encoding::decode_utf16le, + settings::Settings, subscription::{ - ContentFormat, FileConfiguration, KafkaConfiguration, PrincsFilter, PrincsFilterOperation, - SubscriptionData, SubscriptionMachineState, SubscriptionOutput, SubscriptionOutputFormat, - TcpConfiguration, RedisConfiguration, UnixDatagramConfiguration, + ContentFormat, FilesConfiguration, KafkaConfiguration, PrincsFilterOperation, + RedisConfiguration, SubscriptionData, SubscriptionMachineState, SubscriptionOutput, + SubscriptionOutputDriver, SubscriptionOutputFormat, TcpConfiguration, + UnixDatagramConfiguration, }, }; use roxmltree::{Document, Node}; @@ -12,56 +14,76 @@ use std::{ collections::{HashMap, HashSet}, fs::File, io::{BufReader, Read}, + path::Path, str::FromStr, time::SystemTime, }; +use uuid::Uuid; use anyhow::{anyhow, bail, ensure, Context, Result}; use clap::ArgMatches; use log::{debug, info, warn}; +use std::io::Write; -use crate::utils::{self, confirm}; +use crate::{ + config, + skell::{get_full_skell_content, get_minimal_skell_content}, + utils::{self, confirm}, +}; enum ImportFormat { OpenWEC, Windows, } -pub async fn run(db: &Db, matches: &ArgMatches) -> Result<()> { +pub async fn run(db: &Db, matches: &ArgMatches, settings: &Settings) -> Result<()> { match matches.subcommand() { Some(("new", matches)) => { + check_subscriptions_ro(settings)?; new(db, matches).await?; } Some(("show", matches)) => { show(db, matches).await?; } Some(("edit", matches)) => { + check_subscriptions_ro(settings)?; edit(db, matches).await?; } Some(("export", matches)) => { export(db, matches).await?; } Some(("import", matches)) => { + check_subscriptions_ro(settings)?; import(db, matches).await?; } Some(("delete", matches)) => { + check_subscriptions_ro(settings)?; delete(db, matches).await?; } Some(("machines", matches)) => { machines(db, matches).await?; } Some(("duplicate", matches)) => { + check_subscriptions_ro(settings)?; duplicate(db, matches).await?; } Some(("enable", matches)) => { + check_subscriptions_ro(settings)?; set_enable(db, matches, true).await?; } Some(("disable", matches)) => { + check_subscriptions_ro(settings)?; set_enable(db, matches, false).await?; } Some(("reload", matches)) => { reload(db, matches).await?; } + Some(("load", matches)) => { + load(db, matches).await?; + } + Some(("skell", matches)) => { + skell(db, matches).await?; + } _ => { list(db, matches).await?; } @@ -97,8 +119,6 @@ async fn show(db: &Db, matches: &ArgMatches) -> Result<()> { .context("Failed to retrieve subscription from database")?; println!("{}", subscription); - println!("Event filter query:\n"); - println!("{}", subscription.query()); Ok(()) } @@ -116,7 +136,7 @@ async fn duplicate(db: &Db, matches: &ArgMatches) -> Result<()> { .expect("Required by clap") .to_string(), ); - db.store_subscription(new).await?; + db.store_subscription(&new).await?; Ok(()) } @@ -132,7 +152,7 @@ async fn export(db: &Db, matches: &ArgMatches) -> Result<()> { .context("Failed to retrieve subscriptions from database")? }; - let res = serde_json::to_string(&subscriptions)?; + let res = common::models::export::serialize(&subscriptions)?; println!("{}", res); Ok(()) } @@ -333,12 +353,39 @@ async fn edit(db: &Db, matches: &ArgMatches) -> Result<()> { ); subscription.set_ignore_channel_error(*ignore_channel_error); } + + if matches.contains_id("locale") { + if let Some(locale) = matches.get_one::("locale") { + debug!( + "Update locale from {:?} to {:?}", + subscription.locale(), + Some(locale) + ); + subscription.set_locale(Some(locale.to_string())); + } else { + subscription.set_locale(None); + } + } + + if matches.contains_id("data-locale") { + if let Some(data_locale) = matches.get_one::("data-locale") { + debug!( + "Update data-locale from {:?} to {:?}", + subscription.data_locale(), + Some(data_locale) + ); + subscription.set_data_locale(Some(data_locale.to_string())); + } else { + subscription.set_locale(None); + } + } + info!( "Saving subscription {} ({})", subscription.name(), subscription.uuid() ); - db.store_subscription(subscription).await?; + db.store_subscription(&subscription).await?; Ok(()) } @@ -363,37 +410,55 @@ async fn new(db: &Db, matches: &ArgMatches) -> Result<()> { .expect("Defaulted by clap"), )?; - let subscription = SubscriptionData::new( + let mut subscription = SubscriptionData::new( matches.get_one::("name").expect("Required by clap"), - matches.get_one::("uri").map(|e| e.as_str()), &query, - matches.get_one::("heartbeat-interval"), - matches.get_one::("connection-retry-count"), - matches.get_one::("connection-retry-interval"), - matches.get_one::("max-time"), - matches.get_one::("max-envelope-size"), - false, - *matches - .get_one::("read-existing-events") - .expect("defaulted by clap"), - content_format, - *matches - .get_one::("ignore-channel-error") - .expect("Defaulted by clap"), - PrincsFilter::empty(), - None, ); + subscription + .set_uri(matches.get_one::("uri").cloned()) + .set_enabled(false) + .set_read_existing_events( + *matches + .get_one::("read-existing-events") + .expect("defaulted by clap"), + ) + .set_content_format(content_format) + .set_ignore_channel_error( + *matches + .get_one::("ignore-channel-error") + .expect("Defaulted by clap"), + ); + + if let Some(heartbeat_interval) = matches.get_one::("heartbeat-interval") { + subscription.set_heartbeat_interval(*heartbeat_interval); + } + + if let Some(connection_retry_count) = matches.get_one::("connection-retry-count") { + subscription.set_connection_retry_count(*connection_retry_count); + } + + if let Some(connection_retry_interval) = matches.get_one::("connection-retry-interval") { + subscription.set_connection_retry_interval(*connection_retry_interval); + } + + if let Some(max_time) = matches.get_one::("max-time") { + subscription.set_max_time(*max_time); + } + + if let Some(max_envelope_size) = matches.get_one::("max-envelope-size") { + subscription.set_max_envelope_size(*max_envelope_size); + } + debug!( "Subscription that is going to be inserted: {:?}", subscription ); - let name = subscription.name().to_owned(); - db.store_subscription(subscription).await?; + db.store_subscription(&subscription).await?; println!( "Subscription {} has been created successfully. \ You need to configure its outputs using `openwec subscriptions edit {} outputs add --help`. \ When you are ready, you can enable it using `openwec subscriptions edit {} --enable", - name, name, name + subscription.name(), subscription.name(), subscription.name() ); Ok(()) } @@ -424,7 +489,7 @@ async fn import(db: &Db, matches: &ArgMatches) -> Result<()> { subscription.set_enabled(false); debug!("Store {:?}", subscription); - db.store_subscription(subscription) + db.store_subscription(&subscription) .await .context("Failed to store subscription")?; } @@ -437,8 +502,11 @@ async fn import(db: &Db, matches: &ArgMatches) -> Result<()> { Ok(()) } -fn import_openwec(reader: BufReader) -> Result> { - Ok(serde_json::from_reader(reader)?) +fn import_openwec(mut reader: BufReader) -> Result> { + let mut buffer = String::new(); + reader.read_to_string(&mut buffer)?; + + common::models::export::parse(&buffer) } fn import_windows(mut reader: BufReader) -> Result> { @@ -455,10 +523,13 @@ fn import_windows(mut reader: BufReader) -> Result> "Invalid subscription format" ); - let mut data = SubscriptionData::empty(); + // We initialize subscription data with empty name and query + // They will be overwritten later + let mut data = SubscriptionData::new("", ""); + for node in root.children() { if node.has_tag_name("SubscriptionId") && node.text().is_some() { - data.set_name(node.text().map(String::from).unwrap()) + data.set_name(node.text().map(String::from).unwrap()); } else if node.has_tag_name("SubscriptionType") && node.text().is_some() { ensure!( node.text().map(String::from).unwrap() == "SourceInitiated", @@ -481,7 +552,7 @@ fn import_windows(mut reader: BufReader) -> Result> if batching_node.has_tag_name("MaxLatencyTime") && batching_node.text().is_some() { - data.set_max_time(batching_node.text().unwrap().parse::()?) + data.set_max_time(batching_node.text().unwrap().parse::()?); } } } else if delivery_node.has_tag_name("PushSettings") { @@ -519,7 +590,7 @@ async fn delete(db: &Db, matches: &ArgMatches) -> Result<()> { { return Ok(()); } - db.delete_subscription(subscription.uuid()).await + db.delete_subscription(&subscription.uuid_string()).await } async fn edit_filter(subscription: &mut SubscriptionData, matches: &ArgMatches) -> Result<()> { @@ -620,26 +691,37 @@ async fn outputs(subscription: &mut SubscriptionData, matches: &ArgMatches) -> R } async fn outputs_add(subscription: &mut SubscriptionData, matches: &ArgMatches) -> Result<()> { - let format: SubscriptionOutputFormat = match matches - .get_one::("format") - .ok_or_else(|| anyhow!("Missing format argument"))? - { - x if x == "raw" => SubscriptionOutputFormat::Raw, - x if x == "json" => SubscriptionOutputFormat::Json, - _ => bail!("Invalid output format"), - }; + let format: SubscriptionOutputFormat = SubscriptionOutputFormat::from_str( + matches + .get_one::("format") + .ok_or_else(|| anyhow!("Missing format argument"))?, + )?; let output = match matches.subcommand() { - Some(("tcp", matches)) => SubscriptionOutput::Tcp(format, outputs_add_tcp(matches)?, true), - Some(("redis", matches)) => SubscriptionOutput::Redis(format, outputs_add_redis(matches)?, true), - Some(("kafka", matches)) => { - SubscriptionOutput::Kafka(format, outputs_add_kafka(matches)?, true) - } - Some(("files", matches)) => { - SubscriptionOutput::Files(format, outputs_add_files(matches)?, true) - } - Some(("unixdatagram", matches)) => { - SubscriptionOutput::UnixDatagram(format, outputs_add_unix_datagram(matches)?, true) - } + Some(("tcp", matches)) => SubscriptionOutput::new( + format, + SubscriptionOutputDriver::Tcp(outputs_add_tcp(matches)?), + true, + ), + Some(("redis", matches)) => SubscriptionOutput::new( + format, + SubscriptionOutputDriver::Redis(outputs_add_redis(matches)?), + true, + ), + Some(("kafka", matches)) => SubscriptionOutput::new( + format, + SubscriptionOutputDriver::Kafka(outputs_add_kafka(matches)?), + true, + ), + Some(("files", matches)) => SubscriptionOutput::new( + format, + SubscriptionOutputDriver::Files(outputs_add_files(matches)?), + true, + ), + Some(("unixdatagram", matches)) => SubscriptionOutput::new( + format, + SubscriptionOutputDriver::UnixDatagram(outputs_add_unix_datagram(matches)?), + true, + ), _ => { bail!("Missing output type") } @@ -656,7 +738,7 @@ fn outputs_add_tcp(matches: &ArgMatches) -> Result { .get_one::("port") .ok_or_else(|| anyhow!("Missing TCP port"))?; - info!("Adding TCP output : {}:{}", addr, port); + info!("Adding TCP output: {}:{}", addr, port); Ok(TcpConfiguration::new(addr.clone(), *port)) } @@ -669,7 +751,7 @@ fn outputs_add_redis(matches: &ArgMatches) -> Result { .get_one::("list") .ok_or_else(|| anyhow!("Missing Redis list"))?; - info!("Adding Redis output : address: {}, list {}", addr, list); + info!("Adding Redis output: address: {}, list {}", addr, list); Ok(RedisConfiguration::new(addr.clone(), list.clone())) } @@ -698,7 +780,7 @@ fn outputs_add_kafka(matches: &ArgMatches) -> Result { Ok(KafkaConfiguration::new(topic.clone(), options_hashmap)) } -fn outputs_add_files(matches: &ArgMatches) -> Result { +fn outputs_add_files(matches: &ArgMatches) -> Result { let base = matches .get_one::("base") .ok_or_else(|| anyhow!("Missing files base path"))? @@ -713,7 +795,7 @@ fn outputs_add_files(matches: &ArgMatches) -> Result { .expect("defaulted by clap") .to_owned(); - let config = FileConfiguration::new(base, split_on_addr_index, append_node_name, filename); + let config = FilesConfiguration::new(base, split_on_addr_index, append_node_name, filename); info!("Adding Files output with config {:?}", config); Ok(config) } @@ -724,7 +806,7 @@ fn outputs_add_unix_datagram(matches: &ArgMatches) -> Result Result<()> { }; let machines = db - .get_machines(subscription.uuid(), start_heartbeat_interval, state) + .get_machines(&subscription.uuid_string(), start_heartbeat_interval, state) .await .context("Failed to retrieve machines for subscription")?; @@ -856,7 +938,7 @@ async fn set_enable(db: &Db, matches: &ArgMatches, value: bool) -> Result<()> { } for subscription in to_store { - db.store_subscription(subscription.clone()) + db.store_subscription(&subscription) .await .context("Failed to store subscription in db")?; if value { @@ -873,8 +955,8 @@ async fn reload(db: &Db, matches: &ArgMatches) -> Result<()> { let mut subscriptions = find_subscriptions(db, matches).await?; for subscription in subscriptions.iter_mut() { - subscription.update_version(); - db.store_subscription(subscription.clone()) + subscription.update_internal_version(); + db.store_subscription(subscription) .await .context("Failed to store subscription in db")?; println!("+ Subscription {} has been reloaded", subscription.name()); @@ -883,6 +965,104 @@ async fn reload(db: &Db, matches: &ArgMatches) -> Result<()> { Ok(()) } +async fn load(db: &Db, matches: &ArgMatches) -> Result<()> { + let path = matches + .get_one::("path") + .ok_or_else(|| anyhow!("Missing argument path"))?; + let keep = matches.get_one::("keep").expect("Defaulted by clap"); + let yes = matches.get_one::("yes").expect("Defaulted by clap"); + let revision = matches.get_one::("revision"); + + let path_obj = Path::new(path); + if !path_obj.exists() { + bail!("Path {} does not exist", path_obj.display()); + } + + if path_obj.is_file() && !keep && !yes && !confirm(&format!("Are you sure that you want to remove all existing subscriptions to keep the only one described in {}? Use -k/--keep otherwise.", path_obj.display())) { + println!("Aborted"); + return Ok(()) + } + + let subscriptions = + config::load_from_path(path, revision).context("Failed to load config files")?; + + if subscriptions.is_empty() { + bail!("Could not find any subscriptions"); + } + + for subscription in subscriptions.iter() { + if !check_query_size(subscription.query()).with_context(|| { + format!( + "Failed to check query size for subscription '{}' ({})", + subscription.name(), + subscription.uuid() + ) + })? { + println!("Aborted"); + return Ok(()); + } + } + // Build a set of subscriptions uuids + let mut uuids = HashSet::new(); + + // Insert or update subscriptions + for subscription in subscriptions.iter() { + println!("+ Load subscription {}", subscription.name()); + db.store_subscription(subscription) + .await + .context("Failed to store subscription in db")?; + uuids.insert(subscription.uuid()); + } + + if !keep { + // Remove other subscriptions + let all_subscriptions = db.get_subscriptions().await?; + for subscription in all_subscriptions.iter() { + if !uuids.contains(subscription.uuid()) { + println!("+ Remove subscription {}", subscription.name()); + db.delete_subscription(&subscription.uuid_string()).await?; + } + } + } + + Ok(()) +} + +async fn skell(_db: &Db, matches: &ArgMatches) -> Result<()> { + let path = matches + .get_one::("path") + .ok_or_else(|| anyhow!("Missing argument path"))?; + + let uuid = Uuid::new_v4(); + let name: String = match matches.get_one::("name") { + Some(name) => name.clone(), + None => format!("subscription-{}", uuid), + }; + let now = chrono::Local::now(); + + let content = if *matches + .get_one::("minimal") + .expect("defaulted by clap") + { + get_minimal_skell_content(uuid, &name, now) + } else { + get_full_skell_content(uuid, &name, now) + }; + + if path.as_str() == "-" { + println!("{}", content); + } else { + let mut output = File::create(path)?; + output.write_all(content.as_bytes())?; + } + + Ok(()) +} + +/*** + * Helpers +***/ + async fn find_subscription(db: &Db, matches: &ArgMatches) -> Result { let identifier = matches .get_one::("subscription") @@ -918,3 +1098,10 @@ async fn find_subscriptions(db: &Db, matches: &ArgMatches) -> Result Result<()> { + if settings.cli().read_only_subscriptions() { + bail!("Subscriptions can only be edited using `openwec subscriptions load` because `cli.read_only_subscriptions` is set in settings.") + } + Ok(()) +} diff --git a/cli/src/utils.rs b/cli/src/utils.rs index 885820e..d2ad447 100644 --- a/cli/src/utils.rs +++ b/cli/src/utils.rs @@ -7,11 +7,14 @@ use std::io::Write; pub fn confirm(message: &str) -> bool { for _ in 0..3 { - print!("{} [y/n] ", message); + print!("{} [Y/n] ", message); io::stdout().flush().unwrap(); let mut input = String::new(); - if let Ok(2) = io::stdin().read_line(&mut input) { - return input.to_ascii_lowercase().trim() == "y"; + match io::stdin().read_line(&mut input) { + Ok(2) => return input.to_ascii_lowercase().trim() == "y", + // defaults to yes + Ok(1) => return true, + _ => (), } } false diff --git a/common/Cargo.toml b/common/Cargo.toml index 809de8e..0ad2c6d 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "common" -version = "0.1.0" +version = "0.2.0" edition = "2021" # Used by cargo-deb homepage = "https://github.com/cea-sec/openwec" @@ -10,7 +10,7 @@ homepage = "https://github.com/cea-sec/openwec" [dependencies] anyhow = "1.0.71" rusqlite = { version = "0.28.0", features = ["bundled"] } -uuid = { version = "1.7.0", features = ["v4", "fast-rng"] } +uuid = { version = "1.7.0", features = ["v4", "fast-rng", "serde"] } serde = { version = "1.0", features = ["derive"] } toml = "0.8.0" log = "0.4.19" @@ -24,6 +24,7 @@ deadpool-postgres = "0.12.1" deadpool-sqlite = "0.5.0" openssl = "0.10.60" postgres-openssl = "0.5.0" +strum = { version = "0.26.1", features = ["derive"] } [dev-dependencies] tempfile = "3.9.0" diff --git a/common/src/database/mod.rs b/common/src/database/mod.rs index b359c6d..8fa2abb 100644 --- a/common/src/database/mod.rs +++ b/common/src/database/mod.rs @@ -80,12 +80,11 @@ pub trait Database { async fn store_heartbeats(&self, heartbeats: &HeartbeatsCache) -> Result<()>; async fn get_subscriptions(&self) -> Result>; - async fn get_subscription(&self, version: &str) -> Result>; async fn get_subscription_by_identifier( &self, identifier: &str, ) -> Result>; - async fn store_subscription(&self, subscription: SubscriptionData) -> Result<()>; + async fn store_subscription(&self, subscription: &SubscriptionData) -> Result<()>; async fn delete_subscription(&self, uuid: &str) -> Result<()>; async fn setup_schema(&self) -> Result<()>; @@ -125,8 +124,9 @@ pub mod tests { use crate::{ heartbeat::{HeartbeatKey, HeartbeatValue}, subscription::{ - ContentFormat, FileConfiguration, PrincsFilter, PrincsFilterOperation, - SubscriptionOutput, SubscriptionOutputFormat, + ContentFormat, FilesConfiguration, PrincsFilter, PrincsFilterOperation, + SubscriptionOutput, SubscriptionOutputDriver, SubscriptionOutputFormat, + DEFAULT_CONTENT_FORMAT, DEFAULT_IGNORE_CHANNEL_ERROR, DEFAULT_READ_EXISTING_EVENTS, }, }; @@ -150,85 +150,69 @@ pub mod tests { pub async fn test_subscriptions(db: Arc) -> Result<()> { setup_db(db.clone()).await?; assert!(db.get_subscriptions().await?.is_empty(),); - assert!(db.get_subscription("toto").await?.is_none(),); assert!(db.get_subscription_by_identifier("toto").await?.is_none()); - assert!(db.get_subscription_by_identifier("toto").await?.is_none()); - let subscription = SubscriptionData::new( - "toto", - Some("/test/1"), - "query", - None, - None, - None, - None, - None, - false, - false, - ContentFormat::Raw, - true, - PrincsFilter::empty(), - None, - ); - db.store_subscription(subscription.clone()).await?; + + let mut subscription = SubscriptionData::new("toto", "query"); + subscription + .set_uri(Some("/test/1".to_owned())) + .set_enabled(false); + db.store_subscription(&subscription).await?; + assert!(db.get_subscriptions().await?.len() == 1); + let toto = &db.get_subscriptions().await?[0]; assert_eq!(toto.name(), "toto"); assert_eq!(toto.uri(), Some(&"/test/1".to_string())); assert_eq!(toto.query(), "query",); assert_eq!(toto.enabled(), false); - assert_eq!(toto.read_existing_events(), false); - assert_eq!(toto.content_format(), &ContentFormat::Raw); - assert_eq!(toto.ignore_channel_error(), true); + assert_eq!(toto.read_existing_events(), DEFAULT_READ_EXISTING_EVENTS); + assert_eq!(toto.content_format(), &DEFAULT_CONTENT_FORMAT); + assert_eq!(toto.ignore_channel_error(), DEFAULT_IGNORE_CHANNEL_ERROR); assert_eq!(toto.princs_filter().operation(), None); assert_eq!(toto.is_active(), false); assert_eq!(toto.is_active_for("couscous"), false); + assert_eq!(toto.revision(), None); + assert_eq!(toto.data_locale(), None); + assert_eq!(toto.locale(), None); let toto2 = db.get_subscription_by_identifier("toto").await?.unwrap(); assert_eq!(toto, &toto2); let toto3 = db - .get_subscription_by_identifier(subscription.uuid()) + .get_subscription_by_identifier(&subscription.uuid_string()) .await? .unwrap(); assert_eq!(toto, &toto3); - let toto4 = db.get_subscription(subscription.version()).await?.unwrap(); - assert_eq!(toto, &toto4); let file_config_1 = - FileConfiguration::new("/path1".to_string(), None, false, "messages".to_string()); + FilesConfiguration::new("/path1".to_string(), None, false, "messages".to_string()); let file_config_2 = - FileConfiguration::new("/path2".to_string(), None, false, "messages".to_string()); - let subscription2 = SubscriptionData::new( - "tata", - None, - "query2", - None, - None, - None, - None, - None, - true, - true, - ContentFormat::RenderedText, - false, - PrincsFilter::from( + FilesConfiguration::new("/path2".to_string(), None, false, "messages".to_string()); + let mut subscription2 = SubscriptionData::new("tata", "query2"); + subscription2 + .set_read_existing_events(true) + .set_content_format(ContentFormat::RenderedText) + .set_ignore_channel_error(false) + .set_princs_filter(PrincsFilter::from( Some("Only".to_string()), Some("couscous,boulette".to_string()), - )?, - Some(vec![ - SubscriptionOutput::Files( + )?) + .set_outputs(vec![ + SubscriptionOutput::new( SubscriptionOutputFormat::Json, - file_config_1.clone(), + SubscriptionOutputDriver::Files(file_config_1.clone()), true, ), - SubscriptionOutput::Files( + SubscriptionOutput::new( SubscriptionOutputFormat::Raw, - file_config_2.clone(), + SubscriptionOutputDriver::Files(file_config_2.clone()), false, ), - ]), - ); - db.store_subscription(subscription2).await?; + ]) + .set_revision(Some("1472".to_string())) + .set_locale(Some("fr-FR".to_string())) + .set_data_locale(Some("en-US".to_string())); + db.store_subscription(&subscription2).await?; assert!(db.get_subscriptions().await?.len() == 2); @@ -252,43 +236,56 @@ pub mod tests { assert_eq!( tata.outputs(), vec![ - SubscriptionOutput::Files( + SubscriptionOutput::new( SubscriptionOutputFormat::Json, - file_config_1.clone(), - true + SubscriptionOutputDriver::Files(file_config_1.clone()), + true, ), - SubscriptionOutput::Files( + SubscriptionOutput::new( SubscriptionOutputFormat::Raw, - file_config_2.clone(), + SubscriptionOutputDriver::Files(file_config_2.clone()), false, - ), - ] + ) + ], ); assert_eq!(tata.is_active(), true); assert_eq!(tata.is_active_for("couscous"), true); // Filter is case-sensitive assert_eq!(tata.is_active_for("Couscous"), false); assert_eq!(tata.is_active_for("semoule"), false); + assert_eq!(tata.revision(), Some("1472".to_string()).as_ref()); + assert_eq!(tata.locale(), Some("fr-FR".to_string()).as_ref()); + assert_eq!(tata.data_locale(), Some("en-US".to_string()).as_ref()); let tata_save = tata.clone(); - tata.set_name("titi".to_string()); - tata.set_max_time(25000); - tata.set_read_existing_events(false); - tata.set_content_format(ContentFormat::Raw); - tata.set_ignore_channel_error(true); + tata.set_name("titi".to_string()) + .set_max_time(25000) + .set_heartbeat_interval(1234) + .set_connection_retry_count(3) + .set_connection_retry_interval(54321) + .set_max_envelope_size(7777) + .set_read_existing_events(false) + .set_content_format(ContentFormat::Raw) + .set_ignore_channel_error(true) + .set_revision(Some("1890".to_string())) + .set_data_locale(Some("fr-FR".to_string())); let mut new_princs_filter = tata.princs_filter().clone(); new_princs_filter.add_princ("semoule")?; tata.set_princs_filter(new_princs_filter); - db.store_subscription(tata.clone()).await?; + db.store_subscription(&tata).await?; ensure!(db.get_subscriptions().await?.len() == 2); let mut tata2 = db - .get_subscription_by_identifier(tata.uuid()) + .get_subscription_by_identifier(&tata.uuid_string()) .await? .unwrap(); assert_eq!(tata2.name(), "titi"); assert_eq!(tata2.max_time(), 25000); + assert_eq!(tata2.heartbeat_interval(), 1234); + assert_eq!(tata2.connection_retry_count(), 3); + assert_eq!(tata2.connection_retry_interval(), 54321); + assert_eq!(tata2.max_envelope_size(), 7777); assert_eq!(tata2.read_existing_events(), false); assert_eq!(tata2.content_format(), &ContentFormat::Raw); assert_eq!(tata2.ignore_channel_error(), true); @@ -306,18 +303,21 @@ pub mod tests { ); assert_eq!(tata2.is_active_for("couscous"), true); assert_eq!(tata2.is_active_for("semoule"), true); + assert_eq!(tata2.revision(), Some("1890".to_string()).as_ref()); + assert_eq!(tata2.locale(), Some("fr-FR".to_string()).as_ref()); // Unchanged + assert_eq!(tata2.data_locale(), Some("fr-FR".to_string()).as_ref()); - assert!(tata2.version() != tata_save.version()); + assert!(tata2.public_version()? != tata_save.public_version()?); let mut new_princs_filter = tata2.princs_filter().clone(); new_princs_filter.delete_princ("couscous")?; new_princs_filter.set_operation(Some(PrincsFilterOperation::Except)); tata2.set_princs_filter(new_princs_filter); - db.store_subscription(tata2).await?; + db.store_subscription(&tata2).await?; let mut tata2_clone = db - .get_subscription_by_identifier(tata.uuid()) + .get_subscription_by_identifier(&tata.uuid_string()) .await? .unwrap(); assert_eq!( @@ -337,10 +337,10 @@ pub mod tests { new_princs_filter.set_operation(None); tata2_clone.set_princs_filter(new_princs_filter); - db.store_subscription(tata2_clone).await?; + db.store_subscription(&tata2_clone).await?; let tata2_clone_clone = db - .get_subscription_by_identifier(tata.uuid()) + .get_subscription_by_identifier(&tata.uuid_string()) .await? .unwrap(); assert_eq!(tata2_clone_clone.princs_filter().operation(), None); @@ -349,9 +349,11 @@ pub mod tests { assert_eq!(tata2_clone_clone.is_active_for("semoule"), true); assert_eq!(tata2_clone_clone.is_active_for("boulette"), true); - db.delete_subscription(toto4.uuid()).await?; + db.delete_subscription(&toto3.uuid_string()).await?; ensure!( - db.get_subscription("toto").await?.is_none(), + db.get_subscription_by_identifier(&toto3.uuid_string()) + .await? + .is_none(), "The subscription with version 'toto' should not exist yet" ); assert!(db.get_subscriptions().await?.len() == 1); @@ -359,7 +361,7 @@ pub mod tests { let tata3 = &db.get_subscriptions().await?[0]; assert_eq!(tata.uuid(), tata3.uuid()); - db.delete_subscription(tata.uuid()).await?; + db.delete_subscription(&tata.uuid_string()).await?; assert!(db.get_subscriptions().await?.is_empty()); clean_db(db.clone()).await?; @@ -368,152 +370,136 @@ pub mod tests { pub async fn test_bookmarks(db: Arc) -> Result<()> { setup_db(db.clone()).await?; - let subscription_tutu = SubscriptionData::new( - "tutu", - None, - "query", - None, - None, - None, - None, - None, - false, - false, - ContentFormat::Raw, - false, - PrincsFilter::empty(), - None, - ); - db.store_subscription(subscription_tutu.clone()).await?; - let subscription_titi = SubscriptionData::new( - "titi", - None, - "query", - None, - None, - None, - None, - None, - false, - false, - ContentFormat::RenderedText, - true, - PrincsFilter::empty(), - None, - ); - db.store_subscription(subscription_titi.clone()).await?; + let subscription_tutu = SubscriptionData::new("tutu", "query"); + db.store_subscription(&subscription_tutu).await?; + let subscription_titi = SubscriptionData::new("titi", "query"); + db.store_subscription(&subscription_titi).await?; // Test non existent bookmark assert!(db - .get_bookmark("toto", subscription_tutu.uuid()) + .get_bookmark("toto", &subscription_tutu.uuid_string()) .await? .is_none(),); - assert!(db.get_bookmarks(subscription_tutu.uuid()).await?.is_empty()); + assert!(db + .get_bookmarks(&subscription_tutu.uuid_string()) + .await? + .is_empty()); // Store a bookmark - db.store_bookmark("toto", subscription_tutu.uuid(), "titi") + db.store_bookmark("toto", &subscription_tutu.uuid_string(), "titi") .await?; // Test if the bookmark is correctly remembered assert_eq!( - db.get_bookmark("toto", subscription_tutu.uuid()) + db.get_bookmark("toto", &subscription_tutu.uuid_string()) .await? .unwrap(), "titi", ); assert_eq!( - db.get_bookmarks(subscription_tutu.uuid()).await?[0], + db.get_bookmarks(&subscription_tutu.uuid_string()).await?[0], BookmarkData { machine: "toto".to_owned(), - subscription: subscription_tutu.uuid().to_owned(), + subscription: subscription_tutu.uuid_string().to_owned(), bookmark: "titi".to_owned() } ); // Update the bookmark - db.store_bookmark("toto", subscription_tutu.uuid(), "toto") + db.store_bookmark("toto", &subscription_tutu.uuid_string(), "toto") .await?; // Test if the bookmark is correctly remembered assert_eq!( - db.get_bookmark("toto", subscription_tutu.uuid()) + db.get_bookmark("toto", &subscription_tutu.uuid_string()) .await? .unwrap(), "toto", ); assert_eq!( - db.get_bookmarks(subscription_tutu.uuid()).await?[0], + db.get_bookmarks(&subscription_tutu.uuid_string()).await?[0], BookmarkData { machine: "toto".to_owned(), - subscription: subscription_tutu.uuid().to_owned(), + subscription: subscription_tutu.uuid_string().to_owned(), bookmark: "toto".to_owned() } ); // Update another bookmark - db.store_bookmark("toto", subscription_titi.uuid(), "babar") + db.store_bookmark("toto", &subscription_titi.uuid_string(), "babar") .await?; // Test if the original bookmark is correctly remembered assert_eq!( - db.get_bookmark("toto", subscription_tutu.uuid()) + db.get_bookmark("toto", &subscription_tutu.uuid_string()) .await? .unwrap(), "toto", ); assert_eq!( - db.get_bookmarks(subscription_tutu.uuid()).await?[0], + db.get_bookmarks(&subscription_tutu.uuid_string()).await?[0], BookmarkData { machine: "toto".to_owned(), - subscription: subscription_tutu.uuid().to_owned(), + subscription: subscription_tutu.uuid_string().to_owned(), bookmark: "toto".to_owned() } ); assert_eq!( - db.get_bookmark("toto", subscription_titi.uuid()) + db.get_bookmark("toto", &subscription_titi.uuid_string()) .await? .unwrap(), "babar", ); assert_eq!( - db.get_bookmarks(subscription_titi.uuid()).await?[0], + db.get_bookmarks(&subscription_titi.uuid_string()).await?[0], BookmarkData { machine: "toto".to_owned(), - subscription: subscription_titi.uuid().to_owned(), + subscription: subscription_titi.uuid_string().to_owned(), bookmark: "babar".to_owned() } ); // Test that bookmarks are deleted if subscription is deleted - db.delete_subscription(subscription_tutu.uuid()).await?; + db.delete_subscription(&subscription_tutu.uuid_string()) + .await?; assert!(db - .get_bookmark("toto", subscription_tutu.uuid()) + .get_bookmark("toto", &subscription_tutu.uuid_string()) .await? .is_none(),); - assert!(db.get_bookmarks(subscription_tutu.uuid()).await?.is_empty()); + assert!(db + .get_bookmarks(&subscription_tutu.uuid_string()) + .await? + .is_empty()); assert_eq!( - db.get_bookmark("toto", subscription_titi.uuid()) + db.get_bookmark("toto", &subscription_titi.uuid_string()) .await? .unwrap(), "babar", ); - assert!(!db.get_bookmarks(subscription_titi.uuid()).await?.is_empty()); - db.delete_subscription(subscription_titi.uuid()).await?; + assert!(!db + .get_bookmarks(&subscription_titi.uuid_string()) + .await? + .is_empty()); + db.delete_subscription(&subscription_titi.uuid_string()) + .await?; assert!(db - .get_bookmark("toto", subscription_titi.uuid()) + .get_bookmark("toto", &subscription_titi.uuid_string()) .await? .is_none(),); - assert!(db.get_bookmarks(subscription_titi.uuid()).await?.is_empty()); + assert!(db + .get_bookmarks(&subscription_titi.uuid_string()) + .await? + .is_empty()); - db.store_subscription(subscription_tutu.clone()).await?; - db.store_subscription(subscription_titi.clone()).await?; + db.store_subscription(&subscription_tutu).await?; + db.store_subscription(&subscription_titi).await?; - db.store_bookmark("m1", subscription_tutu.uuid(), "m1b1") + db.store_bookmark("m1", &subscription_tutu.uuid_string(), "m1b1") .await?; - db.store_bookmark("m2", subscription_tutu.uuid(), "m2b1") + db.store_bookmark("m2", &subscription_tutu.uuid_string(), "m2b1") .await?; - db.store_bookmark("m1", subscription_titi.uuid(), "m1b2") + db.store_bookmark("m1", &subscription_titi.uuid_string(), "m1b2") .await?; // Test Retrieve bookmarks for subscription tutu - let bookmarks = db.get_bookmarks(subscription_tutu.uuid()).await?; + let bookmarks = db.get_bookmarks(&subscription_tutu.uuid_string()).await?; assert_eq!( bookmarks .iter() @@ -531,63 +517,63 @@ pub mod tests { "m2b1" ); - db.delete_bookmarks(Some("m1"), Some(subscription_titi.uuid())) + db.delete_bookmarks(Some("m1"), Some(&subscription_titi.uuid_string())) .await?; assert!(db - .get_bookmark("m1", subscription_titi.uuid()) + .get_bookmark("m1", &subscription_titi.uuid_string()) .await? .is_none()); - db.store_bookmark("m1", subscription_titi.uuid(), "m1b3") + db.store_bookmark("m1", &subscription_titi.uuid_string(), "m1b3") .await?; - db.delete_bookmarks(None, Some(subscription_tutu.uuid())) + db.delete_bookmarks(None, Some(&subscription_tutu.uuid_string())) .await?; assert!(db - .get_bookmark("m1", subscription_tutu.uuid()) + .get_bookmark("m1", &subscription_tutu.uuid_string()) .await? .is_none()); assert!(db - .get_bookmark("m2", subscription_tutu.uuid()) + .get_bookmark("m2", &subscription_tutu.uuid_string()) .await? .is_none()); assert_eq!( - db.get_bookmark("m1", subscription_titi.uuid()) + db.get_bookmark("m1", &subscription_titi.uuid_string()) .await? .unwrap(), "m1b3" ); - db.store_bookmark("m1", subscription_tutu.uuid(), "m1b4") + db.store_bookmark("m1", &subscription_tutu.uuid_string(), "m1b4") .await?; - db.store_bookmark("m2", subscription_tutu.uuid(), "m2b2") + db.store_bookmark("m2", &subscription_tutu.uuid_string(), "m2b2") .await?; db.delete_bookmarks(Some("m1"), None).await?; assert_eq!( - db.get_bookmark("m2", subscription_tutu.uuid()) + db.get_bookmark("m2", &subscription_tutu.uuid_string()) .await? .unwrap(), "m2b2" ); assert!(db - .get_bookmark("m1", subscription_tutu.uuid()) + .get_bookmark("m1", &subscription_tutu.uuid_string()) .await? .is_none()); assert!(db - .get_bookmark("m1", subscription_titi.uuid()) + .get_bookmark("m1", &subscription_titi.uuid_string()) .await? .is_none()); - db.store_bookmark("m1", subscription_tutu.uuid(), "m1b5") + db.store_bookmark("m1", &subscription_tutu.uuid_string(), "m1b5") .await?; - db.store_bookmark("m2", subscription_titi.uuid(), "m2b3") + db.store_bookmark("m2", &subscription_titi.uuid_string(), "m2b3") .await?; db.delete_bookmarks(None, None).await?; assert!(db - .get_bookmark("m1", subscription_tutu.uuid()) + .get_bookmark("m1", &subscription_tutu.uuid_string()) .await? .is_none()); assert!(db - .get_bookmark("m2", subscription_titi.uuid()) + .get_bookmark("m2", &subscription_titi.uuid_string()) .await? .is_none()); @@ -604,24 +590,8 @@ pub mod tests { assert!(db.get_heartbeats().await?.is_empty()); - let subscription_tutu = SubscriptionData::new( - "tutu", - None, - "query", - None, - None, - None, - None, - None, - false, - false, - ContentFormat::Raw, - true, - PrincsFilter::empty(), - None, - ); - - db.store_subscription(subscription_tutu.clone()).await?; + let subscription_tutu = SubscriptionData::new("tutu", "query"); + db.store_subscription(&subscription_tutu).await?; let before = SystemTime::now(); sleep(Duration::from_secs(1)); @@ -630,12 +600,12 @@ pub mod tests { db.store_heartbeat( "toto", "127.0.0.1".to_string(), - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), false, ) .await?; let heartbeat = db - .get_heartbeats_by_machine("toto", Some(subscription_tutu.uuid())) + .get_heartbeats_by_machine("toto", Some(&subscription_tutu.uuid_string())) .await?[0] .clone(); assert_eq!( @@ -665,7 +635,7 @@ pub mod tests { ); assert!(db.get_heartbeats_by_ip("127.0.0.2", None).await?.is_empty(),); assert_eq!( - db.get_heartbeats_by_ip("127.0.0.1", Some(subscription_tutu.uuid())) + db.get_heartbeats_by_ip("127.0.0.1", Some(&subscription_tutu.uuid_string())) .await?[0], heartbeat ); @@ -675,13 +645,13 @@ pub mod tests { db.store_heartbeat( "toto", "127.0.0.1".to_string(), - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), true, ) .await?; let heartbeat = db - .get_heartbeats_by_machine("toto", Some(subscription_tutu.uuid())) + .get_heartbeats_by_machine("toto", Some(&subscription_tutu.uuid_string())) .await?[0] .clone(); assert!( @@ -693,7 +663,7 @@ pub mod tests { db.store_heartbeat( "tata", "127.0.0.2".to_string(), - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), false, ) .await?; @@ -702,7 +672,7 @@ pub mod tests { assert_eq!(heartbeats.len(), 2); assert_eq!( - db.get_heartbeats_by_subscription(subscription_tutu.uuid()) + db.get_heartbeats_by_subscription(&subscription_tutu.uuid_string()) .await?, heartbeats ); @@ -710,14 +680,15 @@ pub mod tests { db.store_heartbeat( "tata", "127.0.0.2".to_string(), - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), true, ) .await?; assert!(!db.get_heartbeats_by_ip("127.0.0.2", None).await?.is_empty()); // Remove subscription and assert that heartbeats have been deleted - db.delete_subscription(subscription_tutu.uuid()).await?; + db.delete_subscription(&subscription_tutu.uuid_string()) + .await?; assert!(db.get_heartbeats().await?.is_empty()); clean_db(db.clone()).await?; @@ -727,30 +698,14 @@ pub mod tests { pub async fn test_heartbeats_cache(db: Arc) -> Result<()> { setup_db(db.clone()).await?; - let subscription_tutu = SubscriptionData::new( - "tutu", - None, - "query", - None, - None, - None, - None, - None, - false, - false, - ContentFormat::RenderedText, - false, - PrincsFilter::empty(), - None, - ); - - db.store_subscription(subscription_tutu.clone()).await?; + let subscription_tutu = SubscriptionData::new("tutu", "query"); + db.store_subscription(&subscription_tutu).await?; let mut heartbeats = HeartbeatsCache::new(); heartbeats.insert( HeartbeatKey { machine: "m1".to_string(), - subscription: subscription_tutu.uuid().to_owned(), + subscription: subscription_tutu.uuid_string().to_owned(), }, HeartbeatValue { ip: "127.0.0.1".to_string(), @@ -761,7 +716,7 @@ pub mod tests { heartbeats.insert( HeartbeatKey { machine: "m2".to_string(), - subscription: subscription_tutu.uuid().to_owned(), + subscription: subscription_tutu.uuid_string().to_owned(), }, HeartbeatValue { ip: "127.0.0.2".to_string(), @@ -799,7 +754,7 @@ pub mod tests { heartbeats.insert( HeartbeatKey { machine: "m1".to_string(), - subscription: subscription_tutu.uuid().to_owned(), + subscription: subscription_tutu.uuid_string().to_owned(), }, HeartbeatValue { ip: "127.0.0.100".to_string(), @@ -838,7 +793,7 @@ pub mod tests { heartbeats.insert( HeartbeatKey { machine: format!("machine${}", i * 2), - subscription: subscription_tutu.uuid().to_owned(), + subscription: subscription_tutu.uuid_string().to_owned(), }, HeartbeatValue { ip: "127.0.0.1".to_string(), @@ -849,7 +804,7 @@ pub mod tests { heartbeats.insert( HeartbeatKey { machine: format!("machine${}", i * 2 + 1), - subscription: subscription_tutu.uuid().to_owned(), + subscription: subscription_tutu.uuid_string().to_owned(), }, HeartbeatValue { ip: "127.0.0.2".to_string(), @@ -873,37 +828,21 @@ pub mod tests { SubscriptionStatsCounters::new(0, 0, 0, 0) ); - let subscription_tutu = SubscriptionData::new( - "tutu", - None, - "query", - None, - None, - None, - None, - None, - false, - false, - ContentFormat::Raw, - true, - PrincsFilter::empty(), - None, - ); - - db.store_subscription(subscription_tutu.clone()).await?; + let subscription_tutu = SubscriptionData::new("tutu", "query"); + db.store_subscription(&subscription_tutu).await?; assert_eq!( - db.get_stats(subscription_tutu.uuid(), 0).await?, + db.get_stats(&subscription_tutu.uuid_string(), 0).await?, SubscriptionStatsCounters::new(0, 0, 0, 0) ); assert!(db - .get_machines(subscription_tutu.uuid(), 0, None) + .get_machines(&subscription_tutu.uuid_string(), 0, None) .await? .is_empty()); assert!(db .get_machines( - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), 0, Some(SubscriptionMachineState::Alive) ) @@ -912,7 +851,7 @@ pub mod tests { assert!(db .get_machines( - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), 0, Some(SubscriptionMachineState::Active) ) @@ -921,7 +860,7 @@ pub mod tests { assert!(db .get_machines( - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), 0, Some(SubscriptionMachineState::Dead) ) @@ -937,7 +876,7 @@ pub mod tests { db.store_heartbeat( "toto", "127.0.0.1".to_string(), - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), false, ) .await?; @@ -945,14 +884,14 @@ pub mod tests { println!("{:?}", db.get_heartbeats().await?); assert_eq!( - db.get_stats(subscription_tutu.uuid(), 0).await?, + db.get_stats(&subscription_tutu.uuid_string(), 0).await?, // total:1, alive:1, active:0, dead:0 SubscriptionStatsCounters::new(1, 1, 0, 0) ); let alive_machines = db .get_machines( - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), 0, Some(SubscriptionMachineState::Alive), ) @@ -962,14 +901,16 @@ pub mod tests { assert_eq!(alive_machines[0].name(), "toto"); assert_eq!(alive_machines[0].ip(), "127.0.0.1"); - let total_machines = db.get_machines(subscription_tutu.uuid(), 0, None).await?; + let total_machines = db + .get_machines(&subscription_tutu.uuid_string(), 0, None) + .await?; assert_eq!(total_machines.len(), 1); assert_eq!(total_machines[0].name(), "toto"); assert_eq!(total_machines[0].ip(), "127.0.0.1"); assert!(db .get_machines( - subscription_tutu.uuid(), + &&subscription_tutu.uuid_string(), 0, Some(SubscriptionMachineState::Active) ) @@ -977,7 +918,7 @@ pub mod tests { .is_empty()); assert!(db .get_machines( - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), 0, Some(SubscriptionMachineState::Dead) ) @@ -988,20 +929,20 @@ pub mod tests { db.store_heartbeat( "toto", "127.0.0.1".to_string(), - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), true, ) .await?; assert_eq!( - db.get_stats(subscription_tutu.uuid(), 0).await?, + db.get_stats(&subscription_tutu.uuid_string(), 0).await?, // total:1, alive:0, active:1, dead:0 SubscriptionStatsCounters::new(1, 0, 1, 0) ); assert_eq!( db.get_machines( - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), 0, Some(SubscriptionMachineState::Active) ) @@ -1011,7 +952,7 @@ pub mod tests { ); assert!(db .get_machines( - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), 0, Some(SubscriptionMachineState::Alive) ) @@ -1019,14 +960,14 @@ pub mod tests { .is_empty()); assert!(db .get_machines( - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), 0, Some(SubscriptionMachineState::Dead) ) .await? .is_empty()); assert_eq!( - db.get_machines(subscription_tutu.uuid(), 0, None) + db.get_machines(&subscription_tutu.uuid_string(), 0, None) .await? .len(), 1 @@ -1038,7 +979,7 @@ pub mod tests { db.store_heartbeat( "tata", "127.0.0.2".to_string(), - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), false, ) .await?; @@ -1046,19 +987,20 @@ pub mod tests { // We have waited 2 seconds and set heartbeat_interval_start at "now + 1", so // only the last stored heartbeat is considered alive. assert_eq!( - db.get_stats(subscription_tutu.uuid(), now + 1).await?, + db.get_stats(&subscription_tutu.uuid_string(), now + 1) + .await?, // total:2, alive:1, active:0, dead:1 SubscriptionStatsCounters::new(2, 1, 0, 1) ); let total_machines = db - .get_machines(subscription_tutu.uuid(), now + 1, None) + .get_machines(&subscription_tutu.uuid_string(), now + 1, None) .await?; assert_eq!(total_machines.len(), 2); let alive_machines = db .get_machines( - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), now + 1, Some(SubscriptionMachineState::Alive), ) @@ -1069,7 +1011,7 @@ pub mod tests { let dead_machines = db .get_machines( - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), now + 1, Some(SubscriptionMachineState::Dead), ) @@ -1080,7 +1022,7 @@ pub mod tests { assert!(db .get_machines( - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), now + 1, Some(SubscriptionMachineState::Active) ) @@ -1091,50 +1033,36 @@ pub mod tests { db.store_heartbeat( "toto", "127.0.0.1".to_string(), - subscription_tutu.uuid(), + &subscription_tutu.uuid_string(), true, ) .await?; // First machine is active again assert_eq!( - db.get_stats(subscription_tutu.uuid(), now + 1).await?, + db.get_stats(&subscription_tutu.uuid_string(), now + 1) + .await?, // total:2, alive:1, active:1, dead:0 SubscriptionStatsCounters::new(2, 1, 1, 0) ); // Create another subscription - let subscription_tata = SubscriptionData::new( - "tata", - None, - "query", - None, - None, - None, - None, - None, - false, - false, - ContentFormat::Raw, - false, - PrincsFilter::empty(), - None, - ); - - db.store_subscription(subscription_tata.clone()).await?; + let subscription_tata = SubscriptionData::new("tata", "query"); + db.store_subscription(&subscription_tata).await?; // Store an heartbeat for this other subscription db.store_heartbeat( "toto", "127.0.0.1".to_string(), - subscription_tata.uuid(), + &subscription_tata.uuid_string(), true, ) .await?; // Nothing has changed for first subscription assert_eq!( - db.get_stats(subscription_tutu.uuid(), now + 1).await?, + db.get_stats(&subscription_tutu.uuid_string(), now + 1) + .await?, // total:2, alive:1, active:1, dead:0 SubscriptionStatsCounters::new(2, 1, 1, 0) ); diff --git a/common/src/database/postgres.rs b/common/src/database/postgres.rs index 4907ca9..2238091 100644 --- a/common/src/database/postgres.rs +++ b/common/src/database/postgres.rs @@ -31,8 +31,8 @@ use crate::bookmark::BookmarkData; use crate::heartbeat::{HeartbeatKey, HeartbeatsCache}; use crate::settings::PostgresSslMode; use crate::subscription::{ - ContentFormat, PrincsFilter, SubscriptionMachine, SubscriptionMachineState, - SubscriptionStatsCounters, + ContentFormat, InternalVersion, PrincsFilter, SubscriptionMachine, SubscriptionMachineState, + SubscriptionStatsCounters, SubscriptionUuid, }; use crate::{ database::Database, heartbeat::HeartbeatData, settings::Postgres, @@ -53,6 +53,7 @@ use std::{ }; use tokio_postgres::types::ToSql; use tokio_postgres::{NoTls, Row}; +use uuid::Uuid; use super::schema::{Migration, MigrationBase, Version}; @@ -142,32 +143,6 @@ impl PostgresDatabase { } } - async fn get_subscription_by_field( - &self, - field: &str, - value: &str, - ) -> Result> { - let res = self - .pool - .get() - .await? - .query_opt( - format!( - r#"SELECT * - FROM subscriptions - WHERE {} = $1"#, - field - ) - .as_str(), - &[&value], - ) - .await?; - Ok(match res { - Some(row) => Some(row_to_subscription(&row)?), - None => None, - }) - } - async fn get_heartbeats_by_field( &self, field: &str, @@ -242,24 +217,30 @@ fn row_to_subscription(row: &Row) -> Result { row.try_get("princs_filter_value")?, )?; - Ok(SubscriptionData::from( - row.try_get("uuid")?, - row.try_get("version")?, - row.try_get("name")?, - row.try_get("uri")?, - row.try_get("query")?, - heartbeat_interval.try_into()?, - connection_retry_count.try_into()?, - connection_retry_interval.try_into()?, - max_time.try_into()?, - max_envelope_size.try_into()?, - row.try_get("enabled")?, - row.try_get("read_existing_events")?, - ContentFormat::from_str(row.try_get("content_format")?)?, - row.try_get("ignore_channel_error")?, - princs_filter, - outputs, - )) + let mut subscription = SubscriptionData::new(row.try_get("name")?, row.try_get("query")?); + subscription + .set_uuid(SubscriptionUuid(Uuid::parse_str(row.try_get("uuid")?)?)) + .set_uri(row.try_get("uri")?) + .set_revision(row.try_get("revision")?) + .set_heartbeat_interval(heartbeat_interval.try_into()?) + .set_connection_retry_count(connection_retry_count.try_into()?) + .set_connection_retry_interval(connection_retry_interval.try_into()?) + .set_max_time(max_time.try_into()?) + .set_max_envelope_size(max_envelope_size.try_into()?) + .set_enabled(row.try_get("enabled")?) + .set_read_existing_events(row.try_get("read_existing_events")?) + .set_content_format(ContentFormat::from_str(row.try_get("content_format")?)?) + .set_ignore_channel_error(row.try_get("ignore_channel_error")?) + .set_locale(row.try_get("locale")?) + .set_data_locale(row.try_get("data_locale")?) + .set_princs_filter(princs_filter) + .set_outputs(outputs); + + // This needs to be done at the end because version is updated each time + // a "set_" function is called + subscription.set_internal_version(InternalVersion(Uuid::parse_str(row.try_get("version")?)?)); + + Ok(subscription) } fn row_to_heartbeat(row: &Row) -> Result { @@ -606,10 +587,6 @@ impl Database for PostgresDatabase { Ok(subscriptions) } - async fn get_subscription(&self, version: &str) -> Result> { - self.get_subscription_by_field("version", version).await - } - async fn get_subscription_by_identifier( &self, identifier: &str, @@ -632,7 +609,7 @@ impl Database for PostgresDatabase { }) } - async fn store_subscription(&self, subscription: SubscriptionData) -> Result<()> { + async fn store_subscription(&self, subscription: &SubscriptionData) -> Result<()> { let heartbeat_interval: i32 = subscription.heartbeat_interval().try_into()?; let connection_retry_count: i32 = subscription.connection_retry_count().into(); let connection_retry_interval: i32 = subscription.connection_retry_interval().try_into()?; @@ -643,13 +620,15 @@ impl Database for PostgresDatabase { .get() .await? .execute( - r#"INSERT INTO subscriptions (uuid, version, name, uri, query, + r#"INSERT INTO subscriptions (uuid, version, revision, name, uri, query, heartbeat_interval, connection_retry_count, connection_retry_interval, max_time, max_envelope_size, enabled, read_existing_events, content_format, - ignore_channel_error, princs_filter_op, princs_filter_value, outputs) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17) + ignore_channel_error, princs_filter_op, princs_filter_value, outputs, locale, + data_locale) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20) ON CONFLICT (uuid) DO UPDATE SET version = excluded.version, + revision = excluded.revision, name = excluded.name, uri = excluded.uri, query = excluded.query, @@ -664,10 +643,13 @@ impl Database for PostgresDatabase { ignore_channel_error = excluded.ignore_channel_error, princs_filter_op = excluded.princs_filter_op, princs_filter_value = excluded.princs_filter_value, - outputs = excluded.outputs"#, + outputs = excluded.outputs, + locale = excluded.locale, + data_locale = excluded.data_locale"#, &[ - &subscription.uuid(), - &subscription.version(), + &subscription.uuid_string(), + &subscription.internal_version().to_string(), + &subscription.revision(), &subscription.name(), &subscription.uri(), &subscription.query(), @@ -686,6 +668,8 @@ impl Database for PostgresDatabase { .map(|x| x.to_string()), &subscription.princs_filter().princs_to_opt_string(), &serde_json::to_string(subscription.outputs())?.as_str(), + &subscription.locale(), + &subscription.data_locale() ], ) .await?; diff --git a/common/src/database/schema/postgres/_009_alter_outputs_format.rs b/common/src/database/schema/postgres/_009_alter_outputs_format.rs new file mode 100644 index 0000000..4b7f61d --- /dev/null +++ b/common/src/database/schema/postgres/_009_alter_outputs_format.rs @@ -0,0 +1,183 @@ +use std::collections::HashMap; + +use anyhow::Result; +use async_trait::async_trait; +use deadpool_postgres::Transaction; +use serde::{Deserialize, Serialize}; + +use crate::{database::postgres::PostgresMigration, migration}; + +pub(super) struct AlterOutputsFormat; +migration!(AlterOutputsFormat, 9, "alter outputs format"); + +#[derive(Debug, Serialize, Clone, Eq, PartialEq, Deserialize)] +pub enum SubscriptionOutputFormat { + Json, + Raw, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct KafkaConfiguration { + topic: String, + options: HashMap, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct RedisConfiguration { + addr: String, + list: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct TcpConfiguration { + addr: String, + port: u16, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct FileConfiguration { + base: String, + split_on_addr_index: Option, + append_node_name: bool, + filename: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct UnixDatagramConfiguration { + path: String, +} + +#[derive(Debug, Serialize, Clone, Eq, PartialEq, Deserialize)] +enum OldSubscriptionOutput { + // The last bool indicates whether the output is enabled or not. + Files(SubscriptionOutputFormat, FileConfiguration, bool), + Kafka(SubscriptionOutputFormat, KafkaConfiguration, bool), + Tcp(SubscriptionOutputFormat, TcpConfiguration, bool), + Redis(SubscriptionOutputFormat, RedisConfiguration, bool), + UnixDatagram(SubscriptionOutputFormat, UnixDatagramConfiguration, bool), +} + +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub enum SubscriptionOutputDriver { + Files(FileConfiguration), + Kafka(KafkaConfiguration), + Tcp(TcpConfiguration), + Redis(RedisConfiguration), + UnixDatagram(UnixDatagramConfiguration), +} + +#[derive(Serialize, Debug, Deserialize, Clone, Eq, PartialEq)] +pub struct NewSubscriptionOutput { + format: SubscriptionOutputFormat, + driver: SubscriptionOutputDriver, + enabled: bool, +} + +fn old_to_new_output(old: &OldSubscriptionOutput) -> NewSubscriptionOutput { + match old.clone() { + OldSubscriptionOutput::Files(format, config, enabled) => NewSubscriptionOutput { + format, + driver: SubscriptionOutputDriver::Files(config), + enabled, + }, + OldSubscriptionOutput::Kafka(format, config, enabled) => NewSubscriptionOutput { + format, + driver: SubscriptionOutputDriver::Kafka(config), + enabled, + }, + OldSubscriptionOutput::Tcp(format, config, enabled) => NewSubscriptionOutput { + format, + driver: SubscriptionOutputDriver::Tcp(config), + enabled, + }, + OldSubscriptionOutput::Redis(format, config, enabled) => NewSubscriptionOutput { + format, + driver: SubscriptionOutputDriver::Redis(config), + enabled, + }, + OldSubscriptionOutput::UnixDatagram(format, config, enabled) => NewSubscriptionOutput { + format, + driver: SubscriptionOutputDriver::UnixDatagram(config), + enabled, + }, + } +} + +fn new_to_old_output(new: &NewSubscriptionOutput) -> OldSubscriptionOutput { + let enabled = new.enabled; + let format = new.format.clone(); + match &new.driver { + SubscriptionOutputDriver::Files(config) => { + OldSubscriptionOutput::Files(format, config.clone(), enabled) + } + SubscriptionOutputDriver::Kafka(config) => { + OldSubscriptionOutput::Kafka(format, config.clone(), enabled) + } + SubscriptionOutputDriver::Tcp(config) => { + OldSubscriptionOutput::Tcp(format, config.clone(), enabled) + } + SubscriptionOutputDriver::Redis(config) => { + OldSubscriptionOutput::Redis(format, config.clone(), enabled) + } + SubscriptionOutputDriver::UnixDatagram(config) => { + OldSubscriptionOutput::UnixDatagram(format, config.clone(), enabled) + } + } +} + +#[async_trait] +impl PostgresMigration for AlterOutputsFormat { + async fn up(&self, tx: &mut Transaction) -> Result<()> { + let rows = tx + .query( + r#"SELECT uuid, outputs + FROM subscriptions + "#, + &[], + ) + .await?; + for row in rows { + let uuid: String = row.try_get("uuid")?; + let outputs_str: String = row.try_get("outputs")?; + let outputs: Vec = serde_json::from_str(&outputs_str)?; + let new_outputs: Vec = + outputs.iter().map(old_to_new_output).collect(); + let new_outputs_str = serde_json::to_string(&new_outputs)?; + tx.execute( + r#"UPDATE subscriptions + SET outputs = $1 + WHERE uuid = $2"#, + &[&new_outputs_str, &uuid], + ) + .await?; + } + Ok(()) + } + + async fn down(&self, tx: &mut Transaction) -> Result<()> { + let rows = tx + .query( + r#"SELECT uuid, outputs + FROM subscriptions + "#, + &[], + ) + .await?; + for row in rows { + let uuid: String = row.try_get("uuid")?; + let outputs_str: String = row.try_get("outputs")?; + let outputs: Vec = serde_json::from_str(&outputs_str)?; + let new_outputs: Vec = + outputs.iter().map(new_to_old_output).collect(); + let new_outputs_str = serde_json::to_string(&new_outputs)?; + tx.execute( + r#"UPDATE subscriptions + SET outputs = $1 + WHERE uuid = $2"#, + &[&new_outputs_str, &uuid], + ) + .await?; + } + Ok(()) + } +} diff --git a/common/src/database/schema/postgres/_010_add_revision_field_in_subscriptions_table.rs b/common/src/database/schema/postgres/_010_add_revision_field_in_subscriptions_table.rs new file mode 100644 index 0000000..b467669 --- /dev/null +++ b/common/src/database/schema/postgres/_010_add_revision_field_in_subscriptions_table.rs @@ -0,0 +1,33 @@ +use anyhow::Result; +use async_trait::async_trait; +use deadpool_postgres::Transaction; + +use crate::{database::postgres::PostgresMigration, migration}; + +pub(super) struct AddRevisionFieldInSubscriptionsTable; +migration!( + AddRevisionFieldInSubscriptionsTable, + 10, + "add revision field in subscriptions table" +); + +#[async_trait] +impl PostgresMigration for AddRevisionFieldInSubscriptionsTable { + async fn up(&self, tx: &mut Transaction) -> Result<()> { + tx.execute( + "ALTER TABLE subscriptions ADD COLUMN IF NOT EXISTS revision TEXT;", + &[], + ) + .await?; + Ok(()) + } + + async fn down(&self, tx: &mut Transaction) -> Result<()> { + tx.execute( + "ALTER TABLE subscriptions DROP COLUMN IF EXISTS revision", + &[], + ) + .await?; + Ok(()) + } +} diff --git a/common/src/database/schema/postgres/_011_add_locale_fields_in_subscriptions_table.rs b/common/src/database/schema/postgres/_011_add_locale_fields_in_subscriptions_table.rs new file mode 100644 index 0000000..6c2762b --- /dev/null +++ b/common/src/database/schema/postgres/_011_add_locale_fields_in_subscriptions_table.rs @@ -0,0 +1,43 @@ +use anyhow::Result; +use async_trait::async_trait; +use deadpool_postgres::Transaction; + +use crate::{database::postgres::PostgresMigration, migration}; + +pub(super) struct AddLocaleFieldsInSubscriptionsTable; +migration!( + AddLocaleFieldsInSubscriptionsTable, + 11, + "add locale fields in subscriptions table" +); + +#[async_trait] +impl PostgresMigration for AddLocaleFieldsInSubscriptionsTable { + async fn up(&self, tx: &mut Transaction) -> Result<()> { + tx.execute( + "ALTER TABLE subscriptions ADD COLUMN IF NOT EXISTS locale TEXT;", + &[], + ) + .await?; + tx.execute( + "ALTER TABLE subscriptions ADD COLUMN IF NOT EXISTS data_locale TEXT;", + &[], + ) + .await?; + Ok(()) + } + + async fn down(&self, tx: &mut Transaction) -> Result<()> { + tx.execute( + "ALTER TABLE subscriptions DROP COLUMN IF EXISTS locale", + &[], + ) + .await?; + tx.execute( + "ALTER TABLE subscriptions DROP COLUMN IF EXISTS data_locale", + &[], + ) + .await?; + Ok(()) + } +} diff --git a/common/src/database/schema/postgres/mod.rs b/common/src/database/schema/postgres/mod.rs index f728380..d64629d 100644 --- a/common/src/database/schema/postgres/mod.rs +++ b/common/src/database/schema/postgres/mod.rs @@ -11,6 +11,9 @@ use self::{ _006_add_content_format_field_in_subscriptions_table::AddContentFormatFieldInSubscriptionsTable, _007_add_ignore_channel_error_field_in_subscriptions_table::AddIgnoreChannelErrorFieldInSubscriptionsTable, _008_add_princs_filter_fields_in_subscriptions_table::AddPrincsFilterFieldsInSubscriptionsTable, + _009_alter_outputs_format::AlterOutputsFormat, + _010_add_revision_field_in_subscriptions_table::AddRevisionFieldInSubscriptionsTable, + _011_add_locale_fields_in_subscriptions_table::AddLocaleFieldsInSubscriptionsTable, }; mod _001_create_subscriptions_table; @@ -21,6 +24,9 @@ mod _005_add_uri_field_in_subscriptions_table; mod _006_add_content_format_field_in_subscriptions_table; mod _007_add_ignore_channel_error_field_in_subscriptions_table; mod _008_add_princs_filter_fields_in_subscriptions_table; +mod _009_alter_outputs_format; +mod _010_add_revision_field_in_subscriptions_table; +mod _011_add_locale_fields_in_subscriptions_table; pub fn register_migrations(postgres_db: &mut PostgresDatabase) { postgres_db.register_migration(Arc::new(CreateSubscriptionsTable)); @@ -31,4 +37,7 @@ pub fn register_migrations(postgres_db: &mut PostgresDatabase) { postgres_db.register_migration(Arc::new(AddContentFormatFieldInSubscriptionsTable)); postgres_db.register_migration(Arc::new(AddIgnoreChannelErrorFieldInSubscriptionsTable)); postgres_db.register_migration(Arc::new(AddPrincsFilterFieldsInSubscriptionsTable)); + postgres_db.register_migration(Arc::new(AlterOutputsFormat)); + postgres_db.register_migration(Arc::new(AddRevisionFieldInSubscriptionsTable)); + postgres_db.register_migration(Arc::new(AddLocaleFieldsInSubscriptionsTable)); } diff --git a/common/src/database/schema/sqlite/_009_alter_outputs_format.rs b/common/src/database/schema/sqlite/_009_alter_outputs_format.rs new file mode 100644 index 0000000..09411aa --- /dev/null +++ b/common/src/database/schema/sqlite/_009_alter_outputs_format.rs @@ -0,0 +1,182 @@ +use std::collections::HashMap; + +use anyhow::Result; +use rusqlite::{named_params, Connection}; +use serde::{Deserialize, Serialize}; + +use crate::database::sqlite::SQLiteMigration; +use crate::migration; + +pub(super) struct AlterOutputsFormat; +migration!(AlterOutputsFormat, 9, "alter outputs format"); + +#[derive(Debug, Serialize, Clone, Eq, PartialEq, Deserialize)] +pub enum SubscriptionOutputFormat { + Json, + Raw, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct KafkaConfiguration { + topic: String, + options: HashMap, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct RedisConfiguration { + addr: String, + list: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct TcpConfiguration { + addr: String, + port: u16, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct FileConfiguration { + base: String, + split_on_addr_index: Option, + append_node_name: bool, + filename: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct UnixDatagramConfiguration { + path: String, +} + +#[derive(Debug, Serialize, Clone, Eq, PartialEq, Deserialize)] +enum OldSubscriptionOutput { + // The last bool indicates whether the output is enabled or not. + Files(SubscriptionOutputFormat, FileConfiguration, bool), + Kafka(SubscriptionOutputFormat, KafkaConfiguration, bool), + Tcp(SubscriptionOutputFormat, TcpConfiguration, bool), + Redis(SubscriptionOutputFormat, RedisConfiguration, bool), + UnixDatagram(SubscriptionOutputFormat, UnixDatagramConfiguration, bool), +} + +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub enum SubscriptionOutputDriver { + Files(FileConfiguration), + Kafka(KafkaConfiguration), + Tcp(TcpConfiguration), + Redis(RedisConfiguration), + UnixDatagram(UnixDatagramConfiguration), +} + +#[derive(Serialize, Debug, Deserialize, Clone, Eq, PartialEq)] +pub struct NewSubscriptionOutput { + format: SubscriptionOutputFormat, + driver: SubscriptionOutputDriver, + enabled: bool, +} + +fn old_to_new_output(old: &OldSubscriptionOutput) -> NewSubscriptionOutput { + match old.clone() { + OldSubscriptionOutput::Files(format, config, enabled) => NewSubscriptionOutput { + format, + driver: SubscriptionOutputDriver::Files(config), + enabled, + }, + OldSubscriptionOutput::Kafka(format, config, enabled) => NewSubscriptionOutput { + format, + driver: SubscriptionOutputDriver::Kafka(config), + enabled, + }, + OldSubscriptionOutput::Tcp(format, config, enabled) => NewSubscriptionOutput { + format, + driver: SubscriptionOutputDriver::Tcp(config), + enabled, + }, + OldSubscriptionOutput::Redis(format, config, enabled) => NewSubscriptionOutput { + format, + driver: SubscriptionOutputDriver::Redis(config), + enabled, + }, + OldSubscriptionOutput::UnixDatagram(format, config, enabled) => NewSubscriptionOutput { + format, + driver: SubscriptionOutputDriver::UnixDatagram(config), + enabled, + }, + } +} + +fn new_to_old_output(new: &NewSubscriptionOutput) -> OldSubscriptionOutput { + let enabled = new.enabled; + let format = new.format.clone(); + match &new.driver { + SubscriptionOutputDriver::Files(config) => { + OldSubscriptionOutput::Files(format, config.clone(), enabled) + } + SubscriptionOutputDriver::Kafka(config) => { + OldSubscriptionOutput::Kafka(format, config.clone(), enabled) + } + SubscriptionOutputDriver::Tcp(config) => { + OldSubscriptionOutput::Tcp(format, config.clone(), enabled) + } + SubscriptionOutputDriver::Redis(config) => { + OldSubscriptionOutput::Redis(format, config.clone(), enabled) + } + SubscriptionOutputDriver::UnixDatagram(config) => { + OldSubscriptionOutput::UnixDatagram(format, config.clone(), enabled) + } + } +} + +impl SQLiteMigration for AlterOutputsFormat { + fn up(&self, conn: &Connection) -> Result<()> { + let mut statement = conn.prepare( + r#"SELECT uuid, outputs + FROM subscriptions + "#, + )?; + let mut rows = statement.query([])?; + while let Some(row) = rows.next()? { + let uuid: String = row.get(0)?; + let outputs_str: String = row.get(1)?; + let outputs: Vec = serde_json::from_str(&outputs_str)?; + let new_outputs: Vec = + outputs.iter().map(old_to_new_output).collect(); + let new_outputs_str = serde_json::to_string(&new_outputs)?; + conn.execute( + r#"UPDATE subscriptions + SET outputs = :outputs + WHERE uuid = :uuid"#, + named_params! { + ":outputs": new_outputs_str, + ":uuid": uuid + }, + )?; + } + Ok(()) + } + + fn down(&self, conn: &Connection) -> Result<()> { + let mut statement = conn.prepare( + r#"SELECT uuid, outputs + FROM subscriptions + "#, + )?; + let mut rows = statement.query([])?; + while let Some(row) = rows.next()? { + let uuid: String = row.get(0)?; + let outputs_str: String = row.get(1)?; + let outputs: Vec = serde_json::from_str(&outputs_str)?; + let new_outputs: Vec = + outputs.iter().map(new_to_old_output).collect(); + let new_outputs_str = serde_json::to_string(&new_outputs)?; + conn.execute( + r#"UPDATE subscriptions + SET outputs = :outputs + WHERE uuid = :uuid"#, + named_params! { + ":outputs": new_outputs_str, + ":uuid": uuid + }, + )?; + } + Ok(()) + } +} diff --git a/common/src/database/schema/sqlite/_010_add_revision_field_in_subscriptions_table.rs b/common/src/database/schema/sqlite/_010_add_revision_field_in_subscriptions_table.rs new file mode 100644 index 0000000..4f50ac9 --- /dev/null +++ b/common/src/database/schema/sqlite/_010_add_revision_field_in_subscriptions_table.rs @@ -0,0 +1,29 @@ +use anyhow::{anyhow, Result}; +use rusqlite::Connection; + +use crate::database::sqlite::SQLiteMigration; +use crate::migration; + +pub(super) struct AddRevisionFieldInSubscriptionsTable; +migration!( + AddRevisionFieldInSubscriptionsTable, + 10, + "add revision field in subscriptions table" +); + +impl SQLiteMigration for AddRevisionFieldInSubscriptionsTable { + fn up(&self, conn: &Connection) -> Result<()> { + conn.execute( + "ALTER TABLE subscriptions ADD COLUMN revision TEXT", + [], + ) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + Ok(()) + } + + fn down(&self, conn: &Connection) -> Result<()> { + conn.execute("ALTER TABLE subscriptions DROP COLUMN revision", []) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + Ok(()) + } +} diff --git a/common/src/database/schema/sqlite/_011_add_locale_fields_in_subscriptions_table.rs b/common/src/database/schema/sqlite/_011_add_locale_fields_in_subscriptions_table.rs new file mode 100644 index 0000000..42881a7 --- /dev/null +++ b/common/src/database/schema/sqlite/_011_add_locale_fields_in_subscriptions_table.rs @@ -0,0 +1,30 @@ +use anyhow::{anyhow, Result}; +use rusqlite::Connection; + +use crate::database::sqlite::SQLiteMigration; +use crate::migration; + +pub(super) struct AddLocaleFieldsInSubscriptionsTable; +migration!( + AddLocaleFieldsInSubscriptionsTable, + 11, + "add locale fields in subscriptions table" +); + +impl SQLiteMigration for AddLocaleFieldsInSubscriptionsTable { + fn up(&self, conn: &Connection) -> Result<()> { + conn.execute("ALTER TABLE subscriptions ADD COLUMN locale TEXT", []) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + conn.execute("ALTER TABLE subscriptions ADD COLUMN data_locale TEXT", []) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + Ok(()) + } + + fn down(&self, conn: &Connection) -> Result<()> { + conn.execute("ALTER TABLE subscriptions DROP COLUMN locale", []) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + conn.execute("ALTER TABLE subscriptions DROP COLUMN data_locale", []) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + Ok(()) + } +} diff --git a/common/src/database/schema/sqlite/mod.rs b/common/src/database/schema/sqlite/mod.rs index 050cdef..6ad17d9 100644 --- a/common/src/database/schema/sqlite/mod.rs +++ b/common/src/database/schema/sqlite/mod.rs @@ -11,6 +11,9 @@ use self::{ _006_add_content_format_field_in_subscriptions_table::AddContentFormatFieldInSubscriptionsTable, _007_add_ignore_channel_error_field_in_subscriptions_table::AddIgnoreChannelErrorFieldInSubscriptionsTable, _008_add_princs_filter_fields_in_subscriptions_table::AddPrincsFilterFieldsInSubscriptionsTable, + _009_alter_outputs_format::AlterOutputsFormat, + _010_add_revision_field_in_subscriptions_table::AddRevisionFieldInSubscriptionsTable, + _011_add_locale_fields_in_subscriptions_table::AddLocaleFieldsInSubscriptionsTable, }; mod _001_create_subscriptions_table; @@ -21,6 +24,9 @@ mod _005_add_uri_field_in_subscriptions_table; mod _006_add_content_format_field_in_subscriptions_table; mod _007_add_ignore_channel_error_field_in_subscriptions_table; mod _008_add_princs_filter_fields_in_subscriptions_table; +mod _009_alter_outputs_format; +mod _010_add_revision_field_in_subscriptions_table; +mod _011_add_locale_fields_in_subscriptions_table; pub fn register_migrations(sqlite_db: &mut SQLiteDatabase) { sqlite_db.register_migration(Arc::new(CreateSubscriptionsTable)); @@ -31,4 +37,7 @@ pub fn register_migrations(sqlite_db: &mut SQLiteDatabase) { sqlite_db.register_migration(Arc::new(AddContentFormatFieldInSubscriptionsTable)); sqlite_db.register_migration(Arc::new(AddIgnoreChannelErrorFieldInSubscriptionsTable)); sqlite_db.register_migration(Arc::new(AddPrincsFilterFieldsInSubscriptionsTable)); + sqlite_db.register_migration(Arc::new(AlterOutputsFormat)); + sqlite_db.register_migration(Arc::new(AddRevisionFieldInSubscriptionsTable)); + sqlite_db.register_migration(Arc::new(AddLocaleFieldsInSubscriptionsTable)); } diff --git a/common/src/database/sqlite.rs b/common/src/database/sqlite.rs index 1bfcf54..74bd6f3 100644 --- a/common/src/database/sqlite.rs +++ b/common/src/database/sqlite.rs @@ -28,9 +28,9 @@ use anyhow::{anyhow, ensure, Context, Error, Result}; use async_trait::async_trait; use deadpool_sqlite::{Config, Pool, Runtime}; -use log::{error, warn}; -use rusqlite::types::Type; +use log::warn; use rusqlite::{named_params, params, Connection, OptionalExtension, Row}; +use uuid::Uuid; use std::collections::btree_map::Entry::Vacant; use std::collections::{BTreeMap, BTreeSet}; use std::str::FromStr; @@ -41,7 +41,7 @@ use crate::bookmark::BookmarkData; use crate::database::Database; use crate::heartbeat::{HeartbeatData, HeartbeatsCache}; use crate::subscription::{ - SubscriptionData, SubscriptionMachine, SubscriptionMachineState, SubscriptionStatsCounters, ContentFormat, PrincsFilter, + ContentFormat, InternalVersion, PrincsFilter, SubscriptionData, SubscriptionMachine, SubscriptionMachineState, SubscriptionStatsCounters, SubscriptionUuid }; use super::schema::{Migration, MigrationBase, Version}; @@ -65,6 +65,18 @@ pub struct SQLiteDatabase { migrations: BTreeMap>, } +fn optional(res: Result) -> Result> { + match res { + Ok(value) => Ok(Some(value)), + Err(e) => { + match e.downcast_ref::() { + Some(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + _ => Err(e), + } + } + } +} + impl SQLiteDatabase { pub async fn new(path: &str) -> Result { let config = Config::new(path); @@ -89,33 +101,6 @@ impl SQLiteDatabase { } } - async fn get_subscription_by_field( - &self, - field: &'static str, - value: String, - ) -> Result> { - self.pool - .get() - .await? - .interact(move |conn| { - conn.query_row( - format!( - r#"SELECT * - FROM subscriptions - WHERE {} = :value"#, - field - ) - .as_str(), - &[(":value", &value)], - row_to_subscription, - ) - .optional() - .map_err(|err| anyhow!(err)) - }) - .await - .map_err(|err| anyhow!(format!("{}", err)))? - } - async fn get_heartbeats_by_field( &self, field: &'static str, @@ -138,7 +123,7 @@ impl SQLiteDatabase { ) .as_str() )?; - let rows = statement.query_map(&[(":field_value", &field_value), (":subscription", &value)], row_to_heartbeat)?; + let rows = statement.query_and_then(&[(":field_value", &field_value), (":subscription", &value)], row_to_heartbeat)?; let mut heartbeats = Vec::new(); for heartbeat in rows { @@ -156,7 +141,7 @@ impl SQLiteDatabase { ) .as_str() )?; - let rows = statement.query_map(&[(":field_value", &field_value)], row_to_heartbeat)?; + let rows = statement.query_and_then(&[(":field_value", &field_value)], row_to_heartbeat)?; let mut heartbeats = Vec::new(); for heartbeat in rows { heartbeats.push(heartbeat?); @@ -169,47 +154,45 @@ impl SQLiteDatabase { } } -fn row_to_subscription(row: &Row) -> Result { +fn row_to_subscription(row: &Row) -> Result { let outputs_str: String = row.get("outputs")?; - let outputs = match serde_json::from_str(&outputs_str) { - Ok(outputs) => outputs, - Err(e) => { - error!( - "Failed to parse subscription output : {}. Subscription output is {}", - e, outputs_str - ); - // We are forced to create a rusqlite::Error - return Err(rusqlite::Error::InvalidColumnType( - 9, - "outputs".to_owned(), - Type::Text, - )); - } - }; - let content_format = ContentFormat::from_str(row.get::<&str, String>("content_format")?.as_ref()).map_err(|_| rusqlite::Error::InvalidColumnType(12, "content_format".to_owned(), Type::Text))?; - let princs_filter = PrincsFilter::from(row.get("princs_filter_op")?, row.get("princs_filter_value")?).map_err(|_| rusqlite::Error::InvalidColumnType(12, "princs_filter".to_owned(), Type::Text))?; - - Ok(SubscriptionData::from( - row.get("uuid")?, - row.get("version")?, - row.get("name")?, - row.get("uri")?, - row.get("query")?, - row.get("heartbeat_interval")?, - row.get("connection_retry_count")?, - row.get("connection_retry_interval")?, - row.get("max_time")?, - row.get("max_envelope_size")?, - row.get("enabled")?, - row.get("read_existing_events")?, - content_format, - row.get("ignore_channel_error")?, - princs_filter, - outputs, - )) + let outputs = serde_json::from_str(&outputs_str).context("Failed to parse subscription output")?; + + // row.get can not convert into &str, so we retrieve String(s) first + let name: String = row.get("name")?; + let uuid: String = row.get("uuid")?; + let version: String = row.get("version")?; + let query: String = row.get("query")?; + + let content_format = ContentFormat::from_str(row.get::<&str, String>("content_format")?.as_ref())?; + let princs_filter = PrincsFilter::from(row.get("princs_filter_op")?, row.get("princs_filter_value")?)?; + + let mut subscription= SubscriptionData::new(&name, &query); + subscription.set_uuid(SubscriptionUuid(Uuid::parse_str(&uuid)?)) + .set_uri(row.get("uri")?) + .set_revision(row.get("revision")?) + .set_heartbeat_interval(row.get("heartbeat_interval")?) + .set_connection_retry_count(row.get("connection_retry_count")?) + .set_connection_retry_interval(row.get("connection_retry_interval")?) + .set_max_time(row.get("max_time")?) + .set_max_envelope_size(row.get("max_envelope_size")?) + .set_enabled(row.get("enabled")?) + .set_read_existing_events(row.get("read_existing_events")?) + .set_content_format(content_format) + .set_ignore_channel_error(row.get("ignore_channel_error")?) + .set_locale(row.get("locale")?) + .set_data_locale(row.get("data_locale")?) + .set_princs_filter(princs_filter) + .set_outputs(outputs); + + // This needs to be done at the end because version is updated each time + // a "set_" function is called + subscription.set_internal_version(InternalVersion(Uuid::parse_str(&version)?)); + + Ok(subscription) } -fn row_to_heartbeat(row: &Row) -> Result { +fn row_to_heartbeat(row: &Row) -> Result { let subscription = row_to_subscription(row)?; let heartbeat = HeartbeatData::new( row.get("machine")?, @@ -379,7 +362,7 @@ impl Database for SQLiteDatabase { JOIN subscriptions ON subscriptions.uuid = heartbeats.subscription "#, )?; - let rows = statement.query_map((), row_to_heartbeat)?; + let rows = statement.query_and_then((), row_to_heartbeat)?; let mut heartbeats = Vec::new(); for heartbeat in rows { @@ -407,7 +390,7 @@ impl Database for SQLiteDatabase { WHERE subscription = :subscription"#, )?; let rows = statement - .query_map(&[(":subscription", &subscription_owned)], row_to_heartbeat)?; + .query_and_then(&[(":subscription", &subscription_owned)], row_to_heartbeat)?; let mut heartbeats = Vec::new(); for heartbeat in rows { @@ -525,7 +508,7 @@ impl Database for SQLiteDatabase { FROM subscriptions "#, )?; - let rows = statement.query_map((), row_to_subscription)?; + let rows = statement.query_and_then((), row_to_subscription)?; let mut subscriptions = Vec::new(); for subscription in rows { @@ -537,11 +520,6 @@ impl Database for SQLiteDatabase { .map_err(|err| anyhow!(format!("{}", err)))? } - async fn get_subscription(&self, version: &str) -> Result> { - self.get_subscription_by_field("version", version.to_string()) - .await - } - async fn get_subscription_by_identifier( &self, identifier: &str, @@ -551,37 +529,41 @@ impl Database for SQLiteDatabase { .get() .await? .interact(move |conn| { - conn.query_row( - r#"SELECT * - FROM subscriptions - WHERE name = :identifier OR uuid = :identifier"#, - &[(":identifier", &identifier)], - row_to_subscription, + optional( + conn.query_row_and_then( + r#"SELECT * + FROM subscriptions + WHERE name = :identifier OR uuid = :identifier"#, + &[(":identifier", &identifier)], + row_to_subscription, + ) ) - .optional() - .map_err(|err| anyhow!(err)) }) .await .map_err(|err| anyhow!(format!("{}", err)))? } - async fn store_subscription(&self, subscription: SubscriptionData) -> Result<()> { + async fn store_subscription(&self, subscription: &SubscriptionData) -> Result<()> { + let subscription = subscription.clone(); let count = self .pool .get() .await? .interact(move |conn| { conn.execute( - r#"INSERT INTO subscriptions (uuid, version, name, uri, query, + r#"INSERT INTO subscriptions (uuid, version, revision, name, uri, query, heartbeat_interval, connection_retry_count, connection_retry_interval, max_time, max_envelope_size, enabled, read_existing_events, content_format, - ignore_channel_error, princs_filter_op, princs_filter_value, outputs) - VALUES (:uuid, :version, :name, :uri, :query, + ignore_channel_error, princs_filter_op, princs_filter_value, outputs, locale, + data_locale) + VALUES (:uuid, :version, :revision, :name, :uri, :query, :heartbeat_interval, :connection_retry_count, :connection_retry_interval, :max_time, :max_envelope_size, :enabled, :read_existing_events, :content_format, - :ignore_channel_error, :princs_filter_op, :princs_filter_value, :outputs) + :ignore_channel_error, :princs_filter_op, :princs_filter_value, :outputs, + :locale, :data_locale) ON CONFLICT (uuid) DO UPDATE SET version = excluded.version, + revision = excluded.revision, name = excluded.name, uri = excluded.uri, query = excluded.query, @@ -596,10 +578,13 @@ impl Database for SQLiteDatabase { ignore_channel_error = excluded.ignore_channel_error, princs_filter_op = excluded.princs_filter_op, princs_filter_value = excluded.princs_filter_value, - outputs = excluded.outputs"#, + outputs = excluded.outputs, + locale = excluded.locale, + data_locale = excluded.data_locale"#, named_params! { - ":uuid": subscription.uuid(), - ":version": subscription.version(), + ":uuid": subscription.uuid_string(), + ":version": subscription.internal_version().to_string(), + ":revision": subscription.revision(), ":name": subscription.name(), ":uri": subscription.uri(), ":query": subscription.query(), @@ -615,6 +600,8 @@ impl Database for SQLiteDatabase { ":princs_filter_op": subscription.princs_filter().operation().map(|x| x.to_string()), ":princs_filter_value": subscription.princs_filter().princs_to_opt_string(), ":outputs": serde_json::to_string(subscription.outputs())?, + ":locale": subscription.locale(), + ":data_locale": subscription.data_locale(), }, ) .map_err(|err| anyhow!(err)) diff --git a/common/src/lib.rs b/common/src/lib.rs index a38bc2e..67cbb99 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -1,4 +1,4 @@ -#![allow(clippy::too_many_arguments)] +#![deny(unsafe_code)] pub mod bookmark; pub mod database; @@ -7,3 +7,4 @@ pub mod heartbeat; pub mod settings; pub mod subscription; pub mod utils; +pub mod models; diff --git a/common/src/models/config.rs b/common/src/models/config.rs new file mode 100644 index 0000000..c0ae88d --- /dev/null +++ b/common/src/models/config.rs @@ -0,0 +1,645 @@ +use std::collections::{HashMap, HashSet}; + +use anyhow::{bail, Context, Result}; +use serde::Deserialize; +use uuid::Uuid; + +use crate::subscription::{ + SubscriptionData, DEFAULT_FILE_APPEND_NODE_NAME, DEFAULT_FILE_NAME, DEFAULT_OUTPUT_ENABLED, +}; + +#[derive(Debug, Clone, Deserialize, Eq, PartialEq)] +#[serde(deny_unknown_fields)] +struct KafkaConfiguration { + pub topic: String, + pub options: HashMap, +} + +impl From for crate::subscription::KafkaConfiguration { + fn from(value: KafkaConfiguration) -> Self { + crate::subscription::KafkaConfiguration::new(value.topic, value.options) + } +} + +#[derive(Debug, Clone, Deserialize, Eq, PartialEq)] +#[serde(deny_unknown_fields)] +struct RedisConfiguration { + pub addr: String, + pub list: String, +} + +impl From for crate::subscription::RedisConfiguration { + fn from(value: RedisConfiguration) -> Self { + crate::subscription::RedisConfiguration::new(value.addr, value.list) + } +} + +#[derive(Debug, Clone, Deserialize, Eq, PartialEq)] +#[serde(deny_unknown_fields)] +struct TcpConfiguration { + pub addr: String, + pub port: u16, +} + +impl From for crate::subscription::TcpConfiguration { + fn from(value: TcpConfiguration) -> Self { + crate::subscription::TcpConfiguration::new(value.addr, value.port) + } +} + +#[derive(Debug, Clone, Deserialize, Eq, PartialEq)] +#[serde(deny_unknown_fields)] +struct FilesConfiguration { + pub base: String, + pub split_on_addr_index: Option, + pub append_node_name: Option, + pub filename: Option, +} + +impl From for crate::subscription::FilesConfiguration { + fn from(value: FilesConfiguration) -> Self { + crate::subscription::FilesConfiguration::new( + value.base, + value.split_on_addr_index, + value + .append_node_name + .unwrap_or(DEFAULT_FILE_APPEND_NODE_NAME), + value.filename.unwrap_or(DEFAULT_FILE_NAME.to_owned()), + ) + } +} + +#[derive(Debug, Clone, Deserialize, Eq, PartialEq)] +#[serde(deny_unknown_fields)] +struct UnixDatagramConfiguration { + pub path: String, +} + +impl From for crate::subscription::UnixDatagramConfiguration { + fn from(value: UnixDatagramConfiguration) -> Self { + crate::subscription::UnixDatagramConfiguration::new(value.path) + } +} + +#[derive(Debug, Clone, Deserialize, Eq, PartialEq)] +#[serde(tag = "driver", content = "config")] +enum SubscriptionOutputDriver { + Files(FilesConfiguration), + Kafka(KafkaConfiguration), + Tcp(TcpConfiguration), + Redis(RedisConfiguration), + UnixDatagram(UnixDatagramConfiguration), +} + +impl From for crate::subscription::SubscriptionOutputDriver { + fn from(value: SubscriptionOutputDriver) -> Self { + match value { + SubscriptionOutputDriver::Files(config) => { + crate::subscription::SubscriptionOutputDriver::Files(config.into()) + } + SubscriptionOutputDriver::Kafka(config) => { + crate::subscription::SubscriptionOutputDriver::Kafka(config.into()) + } + SubscriptionOutputDriver::Tcp(config) => { + crate::subscription::SubscriptionOutputDriver::Tcp(config.into()) + } + SubscriptionOutputDriver::Redis(config) => { + crate::subscription::SubscriptionOutputDriver::Redis(config.into()) + } + SubscriptionOutputDriver::UnixDatagram(config) => { + crate::subscription::SubscriptionOutputDriver::UnixDatagram(config.into()) + } + } + } +} + +#[derive(Deserialize, Debug, Clone, Eq, PartialEq)] +#[serde(deny_unknown_fields)] +struct SubscriptionOutput { + pub format: SubscriptionOutputFormat, + #[serde(flatten)] + pub driver: SubscriptionOutputDriver, + pub enabled: Option, +} + +impl From for crate::subscription::SubscriptionOutput { + fn from(value: SubscriptionOutput) -> Self { + crate::subscription::SubscriptionOutput::new( + value.format.into(), + value.driver.into(), + value.enabled.unwrap_or(DEFAULT_OUTPUT_ENABLED), + ) + } +} + +#[derive(Debug, Clone, Eq, PartialEq, Deserialize)] +enum SubscriptionOutputFormat { + Json, + Raw, + RawJson, +} + +impl From for crate::subscription::SubscriptionOutputFormat { + fn from(value: SubscriptionOutputFormat) -> Self { + match value { + SubscriptionOutputFormat::Json => crate::subscription::SubscriptionOutputFormat::Json, + SubscriptionOutputFormat::Raw => crate::subscription::SubscriptionOutputFormat::Raw, + SubscriptionOutputFormat::RawJson => { + crate::subscription::SubscriptionOutputFormat::RawJson + } + } + } +} + +#[derive(Debug, Clone, Eq, PartialEq, Deserialize)] +enum PrincsFilterOperation { + Only, + Except, +} + +impl From for crate::subscription::PrincsFilterOperation { + fn from(value: PrincsFilterOperation) -> Self { + match value { + PrincsFilterOperation::Except => crate::subscription::PrincsFilterOperation::Except, + PrincsFilterOperation::Only => crate::subscription::PrincsFilterOperation::Only, + } + } +} + +#[derive(Debug, Clone, Eq, PartialEq, Deserialize)] +#[serde(deny_unknown_fields)] +struct PrincsFilter { + pub operation: Option, + pub princs: HashSet, +} + +impl TryFrom for crate::subscription::PrincsFilter { + type Error = anyhow::Error; + + fn try_from(value: PrincsFilter) -> std::prelude::v1::Result { + let mut filter = crate::subscription::PrincsFilter::empty(); + let operation = value.operation.map(|op| op.into()); + filter.set_operation(operation); + filter.set_princs(value.princs)?; + Ok(filter) + } +} + +#[derive(Debug, Clone, Eq, PartialEq, Deserialize)] +enum ContentFormat { + Raw, + RenderedText, +} + +impl From for crate::subscription::ContentFormat { + fn from(value: ContentFormat) -> Self { + match value { + ContentFormat::Raw => crate::subscription::ContentFormat::Raw, + ContentFormat::RenderedText => crate::subscription::ContentFormat::RenderedText, + } + } +} + +#[derive(Debug, Clone, Eq, PartialEq, Deserialize)] +#[serde(deny_unknown_fields)] +struct SubscriptionOptions { + pub uri: Option, + pub heartbeat_interval: Option, + pub connection_retry_count: Option, + pub connection_retry_interval: Option, + pub max_time: Option, + pub max_envelope_size: Option, + pub enabled: Option, + pub read_existing_events: Option, + pub content_format: Option, + pub ignore_channel_error: Option, + pub locale: Option, + pub data_locale: Option, +} + +impl SubscriptionOptions { + pub fn feed_subscription_data(&self, data: &mut SubscriptionData) { + data.set_uri(self.uri.clone()); + + if let Some(heartbeat_interval) = self.heartbeat_interval { + data.set_heartbeat_interval(heartbeat_interval); + } + + if let Some(connection_retry_count) = self.connection_retry_count { + data.set_connection_retry_count(connection_retry_count); + } + + if let Some(connection_retry_interval) = self.connection_retry_interval { + data.set_connection_retry_interval(connection_retry_interval); + } + + if let Some(max_time) = self.max_time { + data.set_max_time(max_time); + } + + if let Some(max_envelope_size) = self.max_envelope_size { + data.set_max_envelope_size(max_envelope_size); + } + + if let Some(enabled) = self.enabled { + data.set_enabled(enabled); + } + + if let Some(read_existing_events) = self.read_existing_events { + data.set_read_existing_events(read_existing_events); + } + + if let Some(content_format) = self.content_format.clone() { + data.set_content_format(content_format.into()); + } + + if let Some(ignore_channel_error) = self.ignore_channel_error { + data.set_ignore_channel_error(ignore_channel_error); + } + + data.set_locale(self.locale.clone()); + data.set_data_locale(self.data_locale.clone()); + } +} +#[derive(Debug, PartialEq, Clone, Eq, Deserialize)] +#[serde(deny_unknown_fields)] +struct Subscription { + pub uuid: Uuid, + #[serde(default)] + pub version: Uuid, + pub name: String, + pub query: String, + pub filter: Option, + pub outputs: Vec, + pub options: Option, +} + +impl TryFrom for crate::subscription::SubscriptionData { + type Error = anyhow::Error; + + fn try_from(subscription: Subscription) -> std::prelude::v1::Result { + let mut data = + crate::subscription::SubscriptionData::new(&subscription.name, &subscription.query); + data.set_uuid(crate::subscription::SubscriptionUuid(subscription.uuid)); + data.set_name(subscription.name.clone()); + data.set_query(subscription.query.clone()); + if let Some(filter) = subscription.filter { + data.set_princs_filter(filter.try_into()?); + } + + if subscription.outputs.is_empty() { + bail!("Missing subscription outputs"); + } + + for output in subscription.outputs.iter() { + data.add_output(output.clone().into()); + } + + if let Some(options) = subscription.options { + options.feed_subscription_data(&mut data); + } + + Ok(data) + } +} + +pub fn parse( + content: &str, + revision: Option<&String>, +) -> Result { + let subscription: Subscription = toml::from_str(content).context("Error while parsing TOML")?; + let mut data: SubscriptionData = subscription.try_into()?; + data.set_revision(revision.cloned()); + Ok(data) +} + +#[cfg(test)] +pub mod tests { + use std::str::FromStr; + + use crate::subscription::InternalVersion; + + use super::*; + + const FULL_CONTENT: &str = r#" +uuid = "b00bf259-3ba9-4faf-b58e-d0e9a3275778" +name = "my-subscription" + +query = """ +a wonderful query +""" + +[options] +enabled = true +uri = "toto" +heartbeat_interval = 32 +connection_retry_count = 11 +connection_retry_interval = 12 +max_time = 13 +max_envelope_size = 14 +read_existing_events = false +content_format = "Raw" # or RenderedText +ignore_channel_error = true + +[filter] +operation = "Only" # or Except +princs = ["toto@windomain.local", "tutu@windomain.local"] + +## Files output +[[outputs]] +driver = "Files" +format = "Json" # or "Raw" +enabled = true + +[outputs.config] +base = "/tmp/" +split_on_addr_index = 5 +append_node_name = true +filename = "courgette" + +## Kafka output +[[outputs]] +driver = "Kafka" +format = "Raw" +enabled = false + +[outputs.config] +topic = "my-topic" + +[outputs.config.options] +"bootstrap.server" = "localhost:9092" + +## Tcp output +[[outputs]] +driver = "Tcp" +format = "RawJson" +enabled = true + +[outputs.config] +addr = "127.0.0.1" +port = 8080 + +## Redis output +[[outputs]] +driver = "Redis" +format = "Json" +enabled = false + +[outputs.config] +addr = "localhost" +list = "my-list" + +## UnixDatagram output +[[outputs]] +format = "Raw" +enabled = true +driver = "UnixDatagram" + +[outputs.config] +path = "/tmp/openwec.socket" + "#; + + #[test] + fn test_deserialize_full() -> Result<()> { + let revision = "My-revision".to_string(); + let mut data = parse(FULL_CONTENT, Some(&revision))?; + + let mut expected = + crate::subscription::SubscriptionData::new("my-subscription", "a wonderful query\n"); + expected + .set_uuid(crate::subscription::SubscriptionUuid(Uuid::from_str( + "b00bf259-3ba9-4faf-b58e-d0e9a3275778", + )?)) + .set_uri(Some("toto".to_string())) + .set_enabled(true) + .set_heartbeat_interval(32) + .set_connection_retry_count(11) + .set_connection_retry_interval(12) + .set_max_time(13) + .set_max_envelope_size(14) + .set_read_existing_events(false) + .set_content_format(crate::subscription::ContentFormat::Raw) + .set_ignore_channel_error(true) + .set_revision(Some(revision)); + + let mut kafka_options = HashMap::new(); + kafka_options.insert("bootstrap.server".to_string(), "localhost:9092".to_string()); + + let outputs = vec![ + crate::subscription::SubscriptionOutput::new( + crate::subscription::SubscriptionOutputFormat::Json, + crate::subscription::SubscriptionOutputDriver::Files( + crate::subscription::FilesConfiguration::new( + "/tmp/".to_string(), + Some(5), + true, + "courgette".to_string(), + ), + ), + true, + ), + crate::subscription::SubscriptionOutput::new( + crate::subscription::SubscriptionOutputFormat::Raw, + crate::subscription::SubscriptionOutputDriver::Kafka( + crate::subscription::KafkaConfiguration::new( + "my-topic".to_string(), + kafka_options, + ), + ), + false, + ), + crate::subscription::SubscriptionOutput::new( + crate::subscription::SubscriptionOutputFormat::RawJson, + crate::subscription::SubscriptionOutputDriver::Tcp( + crate::subscription::TcpConfiguration::new("127.0.0.1".to_string(), 8080), + ), + true, + ), + crate::subscription::SubscriptionOutput::new( + crate::subscription::SubscriptionOutputFormat::Json, + crate::subscription::SubscriptionOutputDriver::Redis( + crate::subscription::RedisConfiguration::new( + "localhost".to_string(), + "my-list".to_string(), + ), + ), + false, + ), + crate::subscription::SubscriptionOutput::new( + crate::subscription::SubscriptionOutputFormat::Raw, + crate::subscription::SubscriptionOutputDriver::UnixDatagram( + crate::subscription::UnixDatagramConfiguration::new( + "/tmp/openwec.socket".to_string(), + ), + ), + true, + ), + ]; + + expected.set_outputs(outputs); + + let mut filter = crate::subscription::PrincsFilter::empty(); + filter.set_operation(Some(crate::subscription::PrincsFilterOperation::Only)); + let mut princs = HashSet::new(); + princs.insert("toto@windomain.local".to_string()); + princs.insert("tutu@windomain.local".to_string()); + filter.set_princs(princs)?; + + expected.set_princs_filter(filter); + + // The only difference between both subscriptions should be the + // internal version, so we set both the same value + let version = Uuid::new_v4(); + // Must be done last + expected.set_internal_version(crate::subscription::InternalVersion(version.clone())); + data.set_internal_version(crate::subscription::InternalVersion(version.clone())); + + assert_eq!(data, expected); + Ok(()) + } + + const MINIMAL_CONTENT: &str = r#" +uuid = "b00bf259-3ba9-4faf-b58e-d0e9a3757798" +name = "minimal" + +query = """ +a very small query +""" + +[[outputs]] +driver = "UnixDatagram" +format = "Json" + +[outputs.config] +path = "/tmp/my.socket" + "#; + + #[test] + fn test_serialize_minimal() -> Result<()> { + let mut data = parse(MINIMAL_CONTENT, None)?; + + let mut expected = + crate::subscription::SubscriptionData::new("minimal", "a very small query\n"); + expected + .set_uuid(crate::subscription::SubscriptionUuid(Uuid::from_str( + "b00bf259-3ba9-4faf-b58e-d0e9a3757798", + )?)) + .set_outputs(vec![crate::subscription::SubscriptionOutput::new( + crate::subscription::SubscriptionOutputFormat::Json, + crate::subscription::SubscriptionOutputDriver::UnixDatagram( + crate::subscription::UnixDatagramConfiguration::new( + "/tmp/my.socket".to_string(), + ), + ), + true, + )]); + + // Must be done last + let version = Uuid::new_v4(); + data.set_internal_version(InternalVersion(version)); + expected.set_internal_version(InternalVersion(version)); + + assert_eq!(data, expected); + Ok(()) + } + + const MISSING_UUID: &str = r#" +name = "minimal" + +query = """ +a very small query +""" + +[[outputs]] +driver = "UnixDatagram" +format = "Json" + +[outputs.config] +path = "/tmp/my.socket" + "#; + + #[test] + #[should_panic(expected = "missing field `uuid`")] + fn test_serialize_missing_uuid() { + parse(MISSING_UUID, None).unwrap(); + } + + const MISSING_NAME: &str = r#" +uuid = "b00bf259-3ba9-4faf-b58e-d0e9a3757798" + +query = """ +a very small query +""" + +[[outputs]] +driver = "UnixDatagram" +format = "Json" + +[outputs.config] +path = "/tmp/my.socket" + "#; + + #[test] + #[should_panic(expected = "missing field `name`")] + fn test_serialize_missing_name() { + parse(MISSING_NAME, None).unwrap(); + } + + const MISSING_QUERY: &str = r#" +uuid = "b00bf259-3ba9-4faf-b58e-d0e9a3757798" +name = "minimal" + +[[outputs]] +driver = "UnixDatagram" +format = "Json" + +[outputs.config] +path = "/tmp/my.socket" + "#; + + #[test] + #[should_panic(expected = "missing field `query`")] + fn test_serialize_missing_query() { + parse(MISSING_QUERY, None).unwrap(); + } + + const MISSING_OUTPUTS: &str = r#" +uuid = "b00bf259-3ba9-4faf-b58e-d0e9a3757798" +name = "minimal" + +query = """ +a very small query +""" + +outputs = [] + "#; + + #[test] + #[should_panic(expected = "Missing subscription outputs")] + fn test_serialize_missing_outputs() { + parse(MISSING_OUTPUTS, None).unwrap(); + } + + const RANDOM_FIELD: &str = r#" +babar = "courgette" +uuid = "b00bf259-3ba9-4faf-b58e-d0e9a3757798" +name = "minimal" + +query = """ +a very small query +""" + +[[outputs]] +driver = "UnixDatagram" +format = "Json" + +[outputs.config] +path = "/tmp/my.socket" + "#; + + #[test] + #[should_panic(expected = "unknown field `babar`")] + fn test_random_field() { + parse(RANDOM_FIELD, None).unwrap(); + } +} diff --git a/common/src/models/export.rs b/common/src/models/export.rs new file mode 100644 index 0000000..f1cf29f --- /dev/null +++ b/common/src/models/export.rs @@ -0,0 +1,486 @@ +use serde::{Deserialize, Serialize}; + +use anyhow::{Context, Result}; + +// Export/Import structures of an already existing version must NOT change +// because we want to be able to import subscriptions that have been exported +// using an "old" version of OpenWEC. +// +// If you want to change something in the SubscriptionData struct, you must +// create a new version of the schema and adapt the import code of the already +// existing versions so that importing from old versions still works. +// Then, you need to update the version used while exporting (see serialize()). + +#[derive(Debug, PartialEq, Clone, Eq, Deserialize, Serialize)] +#[serde(tag = "schema", content = "data")] +enum ImportExport { + V1(v1::Subscriptions), +} + +pub fn serialize(subscriptions: &[crate::subscription::SubscriptionData]) -> Result { + let export = ImportExport::V1(subscriptions.into()); + Ok(serde_json::to_string(&export)?) +} + +pub fn parse(content: &str) -> Result> { + let import: ImportExport = serde_json::from_str(content).context("Failed to parse file")?; + let subscriptions = match import { + ImportExport::V1(subscriptions) => subscriptions.into(), + }; + Ok(subscriptions) +} +mod v1 { + use serde::{Deserialize, Serialize}; + use std::collections::{HashMap, HashSet}; + use uuid::Uuid; + + #[derive(Debug, Clone, Deserialize, Eq, PartialEq, Serialize)] + pub(super) struct KafkaConfiguration { + pub topic: String, + pub options: HashMap, + } + + // Used for import + impl From for crate::subscription::KafkaConfiguration { + fn from(value: KafkaConfiguration) -> Self { + crate::subscription::KafkaConfiguration::new(value.topic, value.options) + } + } + + // Used for export + impl From for KafkaConfiguration { + fn from(value: crate::subscription::KafkaConfiguration) -> Self { + Self { + topic: value.topic().to_string(), + options: value.options().clone(), + } + } + } + + #[derive(Debug, Clone, Deserialize, Eq, PartialEq, Serialize)] + pub(super) struct RedisConfiguration { + pub addr: String, + pub list: String, + } + + impl From for crate::subscription::RedisConfiguration { + fn from(value: RedisConfiguration) -> Self { + crate::subscription::RedisConfiguration::new(value.addr, value.list) + } + } + + impl From for RedisConfiguration { + fn from(value: crate::subscription::RedisConfiguration) -> Self { + Self { + addr: value.addr().to_string(), + list: value.list().to_string(), + } + } + } + + #[derive(Debug, Clone, Deserialize, Eq, PartialEq, Serialize)] + pub(super) struct TcpConfiguration { + pub addr: String, + pub port: u16, + } + + impl From for crate::subscription::TcpConfiguration { + fn from(value: TcpConfiguration) -> Self { + crate::subscription::TcpConfiguration::new(value.addr, value.port) + } + } + + impl From for TcpConfiguration { + fn from(value: crate::subscription::TcpConfiguration) -> Self { + Self { + addr: value.addr().to_string(), + port: value.port(), + } + } + } + + #[derive(Debug, Clone, Deserialize, Eq, PartialEq, Serialize)] + pub(super) struct FilesConfiguration { + pub base: String, + pub split_on_addr_index: Option, + pub append_node_name: bool, + pub filename: String, + } + + impl From for crate::subscription::FilesConfiguration { + fn from(value: FilesConfiguration) -> Self { + crate::subscription::FilesConfiguration::new( + value.base, + value.split_on_addr_index, + value.append_node_name, + value.filename, + ) + } + } + + impl From for FilesConfiguration { + fn from(value: crate::subscription::FilesConfiguration) -> Self { + Self { + base: value.base().to_string(), + split_on_addr_index: value.split_on_addr_index(), + append_node_name: value.append_node_name(), + filename: value.filename().to_string(), + } + } + } + + #[derive(Debug, Clone, Deserialize, Eq, PartialEq, Serialize)] + pub(super) struct UnixDatagramConfiguration { + pub path: String, + } + + impl From for crate::subscription::UnixDatagramConfiguration { + fn from(value: UnixDatagramConfiguration) -> Self { + crate::subscription::UnixDatagramConfiguration::new(value.path) + } + } + + impl From for UnixDatagramConfiguration { + fn from(value: crate::subscription::UnixDatagramConfiguration) -> Self { + Self { + path: value.path().to_string(), + } + } + } + + #[derive(Debug, Clone, Deserialize, Eq, PartialEq, Serialize)] + pub(super) enum SubscriptionOutputDriver { + Files(FilesConfiguration), + Kafka(KafkaConfiguration), + Tcp(TcpConfiguration), + Redis(RedisConfiguration), + UnixDatagram(UnixDatagramConfiguration), + } + + impl From for crate::subscription::SubscriptionOutputDriver { + fn from(value: SubscriptionOutputDriver) -> Self { + match value { + SubscriptionOutputDriver::Files(config) => { + crate::subscription::SubscriptionOutputDriver::Files(config.into()) + } + SubscriptionOutputDriver::Kafka(config) => { + crate::subscription::SubscriptionOutputDriver::Kafka(config.into()) + } + SubscriptionOutputDriver::Tcp(config) => { + crate::subscription::SubscriptionOutputDriver::Tcp(config.into()) + } + SubscriptionOutputDriver::Redis(config) => { + crate::subscription::SubscriptionOutputDriver::Redis(config.into()) + } + SubscriptionOutputDriver::UnixDatagram(config) => { + crate::subscription::SubscriptionOutputDriver::UnixDatagram(config.into()) + } + } + } + } + + impl From for SubscriptionOutputDriver { + fn from(value: crate::subscription::SubscriptionOutputDriver) -> Self { + match value { + crate::subscription::SubscriptionOutputDriver::Files(config) => { + SubscriptionOutputDriver::Files(config.into()) + } + crate::subscription::SubscriptionOutputDriver::Kafka(config) => { + SubscriptionOutputDriver::Kafka(config.into()) + } + crate::subscription::SubscriptionOutputDriver::Tcp(config) => { + SubscriptionOutputDriver::Tcp(config.into()) + } + crate::subscription::SubscriptionOutputDriver::Redis(config) => { + SubscriptionOutputDriver::Redis(config.into()) + } + crate::subscription::SubscriptionOutputDriver::UnixDatagram(config) => { + SubscriptionOutputDriver::UnixDatagram(config.into()) + } + } + } + } + + #[derive(Debug, Clone, Eq, PartialEq, Deserialize, Serialize)] + pub(super) enum SubscriptionOutputFormat { + Json, + Raw, + RawJson, + } + + impl From for crate::subscription::SubscriptionOutputFormat { + fn from(value: SubscriptionOutputFormat) -> Self { + match value { + SubscriptionOutputFormat::Json => { + crate::subscription::SubscriptionOutputFormat::Json + } + SubscriptionOutputFormat::Raw => crate::subscription::SubscriptionOutputFormat::Raw, + SubscriptionOutputFormat::RawJson => { + crate::subscription::SubscriptionOutputFormat::RawJson + } + } + } + } + + impl From for SubscriptionOutputFormat { + fn from(value: crate::subscription::SubscriptionOutputFormat) -> Self { + match value { + crate::subscription::SubscriptionOutputFormat::Json => { + SubscriptionOutputFormat::Json + } + crate::subscription::SubscriptionOutputFormat::Raw => SubscriptionOutputFormat::Raw, + crate::subscription::SubscriptionOutputFormat::RawJson => { + SubscriptionOutputFormat::RawJson + } + } + } + } + + #[derive(Deserialize, Debug, Clone, Eq, PartialEq, Serialize)] + pub(super) struct SubscriptionOutput { + pub format: SubscriptionOutputFormat, + pub driver: SubscriptionOutputDriver, + pub enabled: bool, + } + + impl From for crate::subscription::SubscriptionOutput { + fn from(value: SubscriptionOutput) -> Self { + crate::subscription::SubscriptionOutput::new( + value.format.into(), + value.driver.into(), + value.enabled, + ) + } + } + + impl From for SubscriptionOutput { + fn from(value: crate::subscription::SubscriptionOutput) -> Self { + Self { + format: value.format().clone().into(), + driver: value.driver().clone().into(), + enabled: value.enabled(), + } + } + } + + #[derive(Debug, Clone, Eq, PartialEq, Deserialize, Serialize)] + pub(super) enum PrincsFilterOperation { + Only, + Except, + } + + impl From for crate::subscription::PrincsFilterOperation { + fn from(value: PrincsFilterOperation) -> Self { + match value { + PrincsFilterOperation::Except => crate::subscription::PrincsFilterOperation::Except, + PrincsFilterOperation::Only => crate::subscription::PrincsFilterOperation::Only, + } + } + } + + impl From for PrincsFilterOperation { + fn from(value: crate::subscription::PrincsFilterOperation) -> Self { + match value { + crate::subscription::PrincsFilterOperation::Except => PrincsFilterOperation::Except, + crate::subscription::PrincsFilterOperation::Only => PrincsFilterOperation::Only, + } + } + } + + #[derive(Debug, Clone, Eq, PartialEq, Deserialize, Serialize)] + pub(super) struct PrincsFilter { + pub operation: Option, + pub princs: HashSet, + } + + impl From for crate::subscription::PrincsFilter { + fn from(value: PrincsFilter) -> Self { + crate::subscription::PrincsFilter::new(value.operation.map(|x| x.into()), value.princs) + } + } + + impl From for PrincsFilter { + fn from(value: crate::subscription::PrincsFilter) -> Self { + Self { + operation: value.operation().map(|x| x.clone().into()), + princs: value.princs().clone(), + } + } + } + + #[derive(Debug, Clone, Eq, PartialEq, Deserialize, Serialize)] + pub(super) enum ContentFormat { + Raw, + RenderedText, + } + + impl From for crate::subscription::ContentFormat { + fn from(value: ContentFormat) -> Self { + match value { + ContentFormat::Raw => crate::subscription::ContentFormat::Raw, + ContentFormat::RenderedText => crate::subscription::ContentFormat::RenderedText, + } + } + } + + impl From for ContentFormat { + fn from(value: crate::subscription::ContentFormat) -> Self { + match value { + crate::subscription::ContentFormat::Raw => ContentFormat::Raw, + crate::subscription::ContentFormat::RenderedText => ContentFormat::RenderedText, + } + } + } + + #[derive(Debug, PartialEq, Clone, Eq, Deserialize, Serialize)] + pub(super) struct SubscriptionData { + pub uuid: Uuid, + pub revision: Option, + pub name: String, + pub uri: Option, + pub query: String, + pub heartbeat_interval: u32, + pub connection_retry_count: u16, + pub connection_retry_interval: u32, + pub max_time: u32, + pub max_envelope_size: u32, + pub enabled: bool, + pub read_existing_events: bool, + pub content_format: ContentFormat, + pub ignore_channel_error: bool, + pub locale: Option, + pub data_locale: Option, + pub filter: PrincsFilter, + pub outputs: Vec, + } + + impl From for crate::subscription::SubscriptionData { + fn from(value: SubscriptionData) -> Self { + let mut data = crate::subscription::SubscriptionData::new(&value.name, &value.query); + data.set_uuid(crate::subscription::SubscriptionUuid(value.uuid)) + .set_uri(value.uri) + .set_heartbeat_interval(value.heartbeat_interval) + .set_connection_retry_count(value.connection_retry_count) + .set_connection_retry_interval(value.connection_retry_interval) + .set_max_time(value.max_time) + .set_max_envelope_size(value.max_envelope_size) + .set_enabled(value.enabled) + .set_read_existing_events(value.read_existing_events) + .set_content_format(value.content_format.into()) + .set_ignore_channel_error(value.ignore_channel_error) + .set_princs_filter(value.filter.into()) + .set_locale(value.locale) + .set_data_locale(value.data_locale) + .set_outputs(value.outputs.iter().map(|s| s.clone().into()).collect()) + .set_revision(value.revision); + // Note: internal version is not exported nor set + data + } + } + + impl From for SubscriptionData { + fn from(value: crate::subscription::SubscriptionData) -> Self { + // Note: internal version is not exported nor set + Self { + uuid: value.uuid().0, + name: value.name().to_string(), + uri: value.uri().cloned(), + revision: value.revision().cloned(), + query: value.query().to_string(), + heartbeat_interval: value.heartbeat_interval(), + connection_retry_count: value.connection_retry_count(), + connection_retry_interval: value.connection_retry_interval(), + max_time: value.max_time(), + max_envelope_size: value.max_envelope_size(), + enabled: value.enabled(), + read_existing_events: value.read_existing_events(), + content_format: value.content_format().to_owned().into(), + ignore_channel_error: value.ignore_channel_error(), + locale: value.locale().cloned(), + data_locale: value.data_locale().cloned(), + filter: value.princs_filter().clone().into(), + outputs: value.outputs().iter().map(|o| o.clone().into()).collect(), + } + } + } + + #[derive(Debug, PartialEq, Clone, Eq, Deserialize, Serialize)] + pub(super) struct Subscriptions { + pub subscriptions: Vec, + } + + impl From for Vec { + fn from(value: Subscriptions) -> Self { + value + .subscriptions + .iter() + .map(|s| s.clone().into()) + .collect() + } + } + + impl From<&[crate::subscription::SubscriptionData]> for Subscriptions { + fn from(value: &[crate::subscription::SubscriptionData]) -> Self { + Self { + subscriptions: value.iter().map(|s| s.clone().into()).collect(), + } + } + } +} + +#[cfg(test)] +mod tests { + use anyhow::Result; + use std::collections::HashSet; + + use super::{parse, serialize}; + + #[test] + fn test_export_import() -> Result<()> { + let mut subscription = + crate::subscription::SubscriptionData::new("my-subscription", "my-query"); + let mut princs = HashSet::new(); + princs.insert("courgette@WINDOMAIN.LOCAL".to_string()); + princs.insert("boulette@WINDOMAIN.LOCAL".to_string()); + + subscription + .set_content_format(crate::subscription::ContentFormat::RenderedText) + .set_connection_retry_count(10) + .set_max_time(5) + .set_connection_retry_interval(1) + .set_heartbeat_interval(1000) + .set_ignore_channel_error(false) + .set_max_envelope_size(10000) + .set_max_time(1) + .set_read_existing_events(false) + .set_uri(Some("toto".to_string())) + .set_princs_filter(crate::subscription::PrincsFilter::new( + Some(crate::subscription::PrincsFilterOperation::Except), + princs, + )) + .set_outputs(vec![crate::subscription::SubscriptionOutput::new( + crate::subscription::SubscriptionOutputFormat::Json, + crate::subscription::SubscriptionOutputDriver::Tcp( + crate::subscription::TcpConfiguration::new("127.0.0.1".to_string(), 5000), + ), + true, + )]) + .set_revision(Some("1234".to_string())); + + let subscriptions = vec![subscription.clone()]; + let content = serialize(&subscriptions)?; + + let mut imported_subscriptions = parse(&content)?; + assert_eq!(imported_subscriptions.len(), 1); + + let mut imported_subscription = imported_subscriptions.pop().unwrap(); + + // Internal version is generated randomly during import, so we need to set it + // to be able to compare + imported_subscription.set_internal_version(subscription.internal_version()); + + assert_eq!(subscription, imported_subscription); + + Ok(()) + } +} diff --git a/common/src/models/mod.rs b/common/src/models/mod.rs new file mode 100644 index 0000000..15ad040 --- /dev/null +++ b/common/src/models/mod.rs @@ -0,0 +1,2 @@ +pub mod config; +pub mod export; \ No newline at end of file diff --git a/common/src/settings.rs b/common/src/settings.rs index 209e81a..b7e3774 100644 --- a/common/src/settings.rs +++ b/common/src/settings.rs @@ -20,6 +20,7 @@ pub enum Database { } #[derive(Debug, Deserialize, Clone)] +#[serde(deny_unknown_fields)] pub struct Tls { server_certificate: String, server_private_key: String, @@ -41,12 +42,15 @@ impl Tls { } #[derive(Debug, Deserialize, Clone)] +#[serde(deny_unknown_fields)] pub struct Collector { hostname: String, listen_address: String, listen_port: Option, max_content_length: Option, authentication: Authentication, + enable_proxy_protocol: Option, + advertized_port: Option, } impl Collector { @@ -65,12 +69,22 @@ impl Collector { pub fn max_content_length(&self) -> u64 { self.max_content_length.unwrap_or(512_000) } + pub fn authentication(&self) -> &Authentication { &self.authentication } + + pub fn enable_proxy_protocol(&self) -> bool { + self.enable_proxy_protocol.unwrap_or(false) + } + + pub fn advertized_port(&self) -> u16 { + self.advertized_port.unwrap_or_else(|| self.listen_port()) + } } #[derive(Debug, Deserialize, Clone)] +#[serde(deny_unknown_fields)] pub struct Kerberos { service_principal_name: String, } @@ -88,6 +102,7 @@ impl Kerberos { } #[derive(Debug, Deserialize, Clone)] +#[serde(deny_unknown_fields)] pub struct SQLite { path: String, } @@ -107,6 +122,7 @@ pub enum PostgresSslMode { } #[derive(Debug, Deserialize, Clone)] +#[serde(deny_unknown_fields)] pub struct Postgres { host: String, port: u16, @@ -120,6 +136,7 @@ pub struct Postgres { } impl Postgres { + #[cfg(test)] pub fn new( host: &str, port: u16, @@ -197,7 +214,8 @@ impl FromStr for LoggingType { } } -#[derive(Debug, Deserialize, Clone)] +#[derive(Debug, Deserialize, Clone, Default)] +#[serde(deny_unknown_fields)] pub struct Logging { verbosity: Option, access_logs: Option, @@ -237,7 +255,8 @@ impl Logging { } } -#[derive(Debug, Deserialize, Clone)] +#[derive(Debug, Deserialize, Clone, Default)] +#[serde(deny_unknown_fields)] pub struct Server { db_sync_interval: Option, flush_heartbeats_interval: Option, @@ -283,12 +302,32 @@ impl Server { } } +#[derive(Debug, Deserialize, Clone, Default)] +#[serde(deny_unknown_fields)] +pub struct Cli { + // When set, subscriptions can only be written using + // openwec subscriptions load`, defaults to false. + #[serde(default)] + read_only_subscriptions: bool, +} + +impl Cli { + pub fn read_only_subscriptions(&self) -> bool { + self.read_only_subscriptions + } +} + #[derive(Debug, Deserialize, Clone)] +#[serde(deny_unknown_fields)] pub struct Settings { collectors: Vec, database: Database, + #[serde(default)] server: Server, + #[serde(default)] logging: Logging, + #[serde(default)] + cli: Cli, } impl std::str::FromStr for Settings { @@ -322,6 +361,10 @@ impl Settings { pub fn logging(&self) -> &Logging { &self.logging } + + pub fn cli(&self) -> &Cli { + &self.cli + } } #[cfg(test)] @@ -335,10 +378,6 @@ mod tests { tcp_keepalive_intvl = 1 tcp_keepalive_probes = 10 - [logging] - verbosity = "debug" - server_logs = "stdout" - [database] type = "SQLite" path = "/tmp/toto.sqlite" @@ -348,6 +387,8 @@ mod tests { listen_address = "0.0.0.0" listen_port = 5986 max_content_length = 1000 + enable_proxy_protocol = true + advertized_port = 15986 [collectors.authentication] type = "Kerberos" @@ -363,6 +404,8 @@ mod tests { assert_eq!(collector.listen_address(), "0.0.0.0"); assert_eq!(collector.listen_port(), 5986); assert_eq!(collector.max_content_length(), 1000); + assert_eq!(collector.enable_proxy_protocol(), true); + assert_eq!(collector.advertized_port(), 15986); let kerberos = match collector.authentication() { Authentication::Kerberos(kerb) => kerb, @@ -381,19 +424,18 @@ mod tests { assert_eq!(sqlite.path(), "/tmp/toto.sqlite"); - assert_eq!(s.logging().verbosity().unwrap(), "debug"); + assert!(s.logging().verbosity().is_none()); assert!(s.logging().access_logs().is_none()); - assert_eq!(s.logging().server_logs(), LoggingType::Stdout); + assert_eq!(s.logging().server_logs(), LoggingType::Stderr); assert_eq!(s.server().tcp_keepalive_time(), 3600); assert_eq!(s.server().tcp_keepalive_intvl().unwrap(), 1); assert_eq!(s.server().tcp_keepalive_probes().unwrap(), 10); } const CONFIG_TLS_POSTGRES: &str = r#" - [server] - [logging] access_logs = "/tmp/toto" + server_logs = "stdout" server_logs_pattern = "toto" access_logs_pattern = "tutu" @@ -426,6 +468,8 @@ mod tests { // Checks default values assert_eq!(collector.listen_port(), 5985); assert_eq!(collector.max_content_length(), 512_000); + assert_eq!(collector.enable_proxy_protocol(), false); + assert_eq!(collector.advertized_port(), 5985); let tls = match collector.authentication() { Authentication::Tls(tls) => tls, @@ -451,11 +495,93 @@ mod tests { s.logging().access_logs(), Some(LoggingType::File("/tmp/toto".to_string())) ); - assert_eq!(s.logging().server_logs(), LoggingType::Stderr,); + assert_eq!(s.logging().server_logs(), LoggingType::Stdout); assert_eq!(s.logging().server_logs_pattern().unwrap(), "toto"); assert_eq!(s.logging().access_logs_pattern(), "tutu"); assert_eq!(s.server().tcp_keepalive_time(), 7200); assert!(s.server().tcp_keepalive_intvl().is_none()); assert!(s.server().tcp_keepalive_probes().is_none()); + assert_eq!(s.cli().read_only_subscriptions(), false); + } + + const CONFIG_TLS_POSTGRES_WITH_CLI: &str = r#" + [logging] + access_logs = "/tmp/toto" + server_logs_pattern = "toto" + access_logs_pattern = "tutu" + + [database] + type = "Postgres" + host = "localhost" + port = 26257 + dbname = "test" + user = "root" + password = "" + + [[collectors]] + hostname = "wec.windomain.local" + listen_address = "0.0.0.0" + + [collectors.authentication] + type = "Tls" + server_certificate = "/etc/server_certificate.pem" + server_private_key = "/etc/server_private_key.pem" + ca_certificate = "/etc/ca_certificate.pem" + + [cli] + read_only_subscriptions = true + "#; + + #[test] + fn test_settings_tls_postgres_with_cli() { + let s = Settings::from_str(CONFIG_TLS_POSTGRES_WITH_CLI).unwrap(); + assert_eq!(s.cli().read_only_subscriptions(), true); + } + + const GETTING_STARTED: &str = r#" + [server] + keytab = "/etc/wec.windomain.local.keytab" + + [database] + type = "SQLite" + # You need to create /var/db/openwec yourself + path = "/var/db/openwec/db.sqlite" + + [[collectors]] + hostname = "wec.windomain.local" + listen_address = "0.0.0.0" + + [collectors.authentication] + type = "Kerberos" + service_principal_name = "http/wec.windomain.local@WINDOMAIN.LOCAL" + "#; + + #[test] + fn test_getting_started() { + let s = Settings::from_str(GETTING_STARTED).unwrap(); + assert_eq!(s.collectors().len(), 1); + let collector = &s.collectors()[0]; + assert_eq!(collector.hostname(), "wec.windomain.local"); + assert_eq!(collector.listen_address(), "0.0.0.0"); + + let kerberos = match collector.authentication() { + Authentication::Kerberos(kerb) => kerb, + _ => panic!("Wrong authentication type"), + }; + assert_eq!( + s.server().keytab().unwrap(), + "/etc/wec.windomain.local.keytab" + ); + assert_eq!( + kerberos.service_principal_name(), + "http/wec.windomain.local@WINDOMAIN.LOCAL" + ); + + let sqlite = match s.database() { + Database::SQLite(sqlite) => sqlite, + _ => panic!("Wrong database type"), + }; + + assert_eq!(sqlite.path(), "/var/db/openwec/db.sqlite"); } } diff --git a/common/src/subscription.rs b/common/src/subscription.rs index 4682af7..b3fd1cd 100644 --- a/common/src/subscription.rs +++ b/common/src/subscription.rs @@ -1,16 +1,33 @@ use std::{ collections::{HashMap, HashSet}, fmt::{Display, Formatter}, + hash::{Hash, Hasher}, str::FromStr, }; -use crate::utils::new_uuid; -use anyhow::{anyhow, bail, Error, Result}; +use anyhow::{anyhow, bail, Result}; use log::{info, warn}; use serde::{Deserialize, Serialize}; +use strum::{AsRefStr, EnumString, VariantNames}; use uuid::Uuid; -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +use crate::utils::VersionHasher; + +pub const DEFAULT_HEARTBEAT_INTERVAL: u32 = 3_600; +pub const DEFAULT_CONNECTION_RETRY_COUNT: u16 = 5; +pub const DEFAULT_CONNECTION_RETRY_INTERVAL: u32 = 60; +pub const DEFAULT_MAX_TIME: u32 = 30; +pub const DEFAULT_MAX_ENVELOPE_SIZE: u32 = 512_000; +pub const DEFAULT_READ_EXISTING_EVENTS: bool = false; +pub const DEFAULT_CONTENT_FORMAT: ContentFormat = ContentFormat::Raw; +pub const DEFAULT_IGNORE_CHANNEL_ERROR: bool = true; +pub const DEFAULT_ENABLED: bool = true; + +pub const DEFAULT_FILE_APPEND_NODE_NAME: bool = false; +pub const DEFAULT_FILE_NAME: &str = "messages"; +pub const DEFAULT_OUTPUT_ENABLED: bool = true; + +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct KafkaConfiguration { topic: String, options: HashMap, @@ -32,14 +49,14 @@ impl KafkaConfiguration { } } -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct RedisConfiguration { addr: String, list: String, } impl RedisConfiguration { - pub fn new(addr:String, list: String) -> Self { + pub fn new(addr: String, list: String) -> Self { RedisConfiguration { addr, list } } @@ -52,10 +69,9 @@ impl RedisConfiguration { pub fn addr(&self) -> &str { self.addr.as_ref() } - } -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct TcpConfiguration { addr: String, port: u16, @@ -78,8 +94,8 @@ impl TcpConfiguration { // File storage path format is: // ///[/]/ // can be splitted (depends of split_on_addr_index) -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] -pub struct FileConfiguration { +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +pub struct FilesConfiguration { base: String, // None => don't split // Some(n) => Split starting on the n-th segment (IPv4 and IPv6) @@ -89,7 +105,7 @@ pub struct FileConfiguration { filename: String, } -impl FileConfiguration { +impl FilesConfiguration { pub fn new( base: String, split_on_addr_index: Option, @@ -121,7 +137,7 @@ impl FileConfiguration { } } -#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct UnixDatagramConfiguration { path: String, } @@ -136,127 +152,86 @@ impl UnixDatagramConfiguration { } } -#[derive(Debug, Serialize, Clone, Eq, PartialEq, Deserialize)] -pub enum SubscriptionOutput { - // The last bool indicates whether the output is enabled or not. - Files(SubscriptionOutputFormat, FileConfiguration, bool), - Kafka(SubscriptionOutputFormat, KafkaConfiguration, bool), - Tcp(SubscriptionOutputFormat, TcpConfiguration, bool), - Redis(SubscriptionOutputFormat, RedisConfiguration, bool), - UnixDatagram(SubscriptionOutputFormat, UnixDatagramConfiguration, bool), +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize, AsRefStr)] +#[strum(serialize_all = "lowercase")] +pub enum SubscriptionOutputDriver { + Files(FilesConfiguration), + Kafka(KafkaConfiguration), + Tcp(TcpConfiguration), + Redis(RedisConfiguration), + UnixDatagram(UnixDatagramConfiguration), +} + +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +pub struct SubscriptionOutput { + format: SubscriptionOutputFormat, + driver: SubscriptionOutputDriver, + enabled: bool, } impl SubscriptionOutput { - pub fn format(&self) -> &SubscriptionOutputFormat { - match self { - SubscriptionOutput::Files(format, _, _) => format, - SubscriptionOutput::Kafka(format, _, _) => format, - SubscriptionOutput::Tcp(format, _, _) => format, - SubscriptionOutput::Redis(format, _, _) => format, - SubscriptionOutput::UnixDatagram(format, _, _) => format, + pub fn new( + format: SubscriptionOutputFormat, + driver: SubscriptionOutputDriver, + enabled: bool, + ) -> Self { + Self { + format, + driver, + enabled, } } + pub fn format(&self) -> &SubscriptionOutputFormat { + &self.format + } - pub fn is_enabled(&self) -> bool { - match self { - SubscriptionOutput::Files(_, _, enabled) => *enabled, - SubscriptionOutput::Kafka(_, _, enabled) => *enabled, - SubscriptionOutput::Tcp(_, _, enabled) => *enabled, - SubscriptionOutput::Redis(_, _, enabled) => *enabled, - SubscriptionOutput::UnixDatagram(_, _, enabled) => *enabled, - } + pub fn enabled(&self) -> bool { + self.enabled } - pub fn set_enabled(&self, value: bool) -> SubscriptionOutput { - match self { - SubscriptionOutput::Files(format, config, _) => { - SubscriptionOutput::Files(format.clone(), config.clone(), value) - } - SubscriptionOutput::Kafka(format, config, _) => { - SubscriptionOutput::Kafka(format.clone(), config.clone(), value) - } - SubscriptionOutput::Tcp(format, config, _) => { - SubscriptionOutput::Tcp(format.clone(), config.clone(), value) - } - SubscriptionOutput::Redis(format, config, _) => { - SubscriptionOutput::Redis(format.clone(), config.clone(), value) - } - SubscriptionOutput::UnixDatagram(format, config, _) => { - SubscriptionOutput::UnixDatagram(format.clone(), config.clone(), value) - } - } + pub fn set_enabled(&mut self, value: bool) { + self.enabled = value; + } + + pub fn driver(&self) -> &SubscriptionOutputDriver { + &self.driver } } impl Display for SubscriptionOutput { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - SubscriptionOutput::Files(format, config, enabled) => { - write!( - f, - "Enabled: {:?}, Format: {}, Output: Files({:?})", - enabled, format, config - ) - } - SubscriptionOutput::Kafka(format, config, enabled) => { - write!( - f, - "Enabled: {:?}, Format: {}, Output: Kafka({:?})", - enabled, format, config - ) - } - SubscriptionOutput::Tcp(format, config, enabled) => { - write!( - f, - "Enabled: {:?}, Format: {}, Output: Tcp({}:{})", - enabled, format, config.addr, config.port - ) - } - SubscriptionOutput::Redis(format, config, enabled) => { - write!( - f, - "Enabled: {:?}, Format: {}, Output: Redis({:?})", - enabled, format, config - ) - } - SubscriptionOutput::UnixDatagram(format, config, enabled) => { - write!( - f, - "Enabled: {:?}, Format: {}, Output: UnixDatagram({:?})", - enabled, format, config - ) - } - } + write!( + f, + "Enabled: {:?}, Format: {}, Driver: {:?}", + self.enabled, + self.format.as_ref(), + self.driver + ) } } - -#[derive(Debug, Serialize, Clone, Eq, PartialEq, Deserialize)] +#[derive( + Debug, Clone, Eq, PartialEq, Serialize, Deserialize, Hash, VariantNames, AsRefStr, EnumString, +)] +#[strum(serialize_all = "snake_case", ascii_case_insensitive)] pub enum SubscriptionOutputFormat { Json, Raw, + RawJson, } -impl Display for SubscriptionOutputFormat { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match *self { - SubscriptionOutputFormat::Json => write!(f, "Json"), - SubscriptionOutputFormat::Raw => write!(f, "Raw"), +impl SubscriptionOutputFormat { + /// Whether the output format needs to be given a parsed version + /// of the event. + pub fn needs_parsed_event(&self) -> bool { + match self { + SubscriptionOutputFormat::Raw => false, + SubscriptionOutputFormat::RawJson => false, + SubscriptionOutputFormat::Json => true, } } } -impl TryFrom for SubscriptionOutputFormat { - type Error = Error; - fn try_from(value: u8) -> Result { - Ok(match value { - 0 => SubscriptionOutputFormat::Json, - 1 => SubscriptionOutputFormat::Raw, - _ => bail!("Unknown subscription output format {}", value), - }) - } -} - -#[derive(Debug, Serialize, Clone, Eq, PartialEq, Deserialize)] +#[derive(Debug, Clone, Eq, PartialEq)] pub enum PrincsFilterOperation { Only, Except, @@ -285,7 +260,7 @@ impl PrincsFilterOperation { } } -#[derive(Debug, Serialize, Clone, Eq, PartialEq, Deserialize)] +#[derive(Debug, Clone, Eq, PartialEq)] pub struct PrincsFilter { operation: Option, princs: HashSet, @@ -299,6 +274,10 @@ impl PrincsFilter { } } + pub fn new(operation: Option, princs: HashSet) -> Self { + Self { operation, princs } + } + pub fn from(operation: Option, princs: Option) -> Result { Ok(PrincsFilter { operation: match operation { @@ -370,7 +349,7 @@ impl PrincsFilter { } } -#[derive(Debug, Serialize, Clone, Eq, PartialEq, Deserialize)] +#[derive(Debug, Clone, Eq, PartialEq, Hash)] pub enum ContentFormat { Raw, RenderedText, @@ -399,34 +378,101 @@ impl FromStr for ContentFormat { } } -#[derive(Debug, PartialEq, Clone, Eq, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Eq, Hash, Copy, Serialize)] +pub struct SubscriptionUuid(pub Uuid); + +impl Display for SubscriptionUuid { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} +// Internal version and public version are both uuids +// We use the newtype pattern so that the compiler can check that +// we don't use one instead of the other + +#[derive(Debug, PartialEq, Clone, Eq, Hash, Copy)] +pub struct InternalVersion(pub Uuid); + +impl Display for InternalVersion { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +#[derive(Debug, PartialEq, Clone, Eq, Default, Hash, Copy)] +pub struct PublicVersion(pub Uuid); + +impl Display for PublicVersion { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +/// Contains subscription parameters visible for clients +/// When one element of this structure changes, the "public" version +/// of the subscription is updated and clients are expected to update +/// their configuration. +/// Every elements must implement the Hash trait +#[derive(Debug, PartialEq, Clone, Eq, Hash)] +pub struct SubscriptionParameters { + pub name: String, + pub query: String, + pub heartbeat_interval: u32, + pub connection_retry_count: u16, + pub connection_retry_interval: u32, + pub max_time: u32, + pub max_envelope_size: u32, + pub read_existing_events: bool, + pub content_format: ContentFormat, + pub ignore_channel_error: bool, + pub locale: Option, + pub data_locale: Option, +} + +#[derive(Debug, PartialEq, Clone, Eq)] pub struct SubscriptionData { - #[serde(default = "new_uuid")] - uuid: String, - #[serde(default = "new_uuid")] - version: String, - name: String, + // Unique identifier of the subscription + uuid: SubscriptionUuid, + // Internal version, NOT the version of the subscription sent to clients + // It is generated when the subscription is created and updated every time + // there is a change in the subscription. + // Its goal is to synchronize the configuration of the subscription between + // all openwec nodes. + internal_version: InternalVersion, + // Optional revision name of the subscription. Can be set using + // openwec subscriptions load <...> + revision: Option, + // Optional URI on which subscription will be shown uri: Option, - query: String, - heartbeat_interval: u32, - connection_retry_count: u16, - connection_retry_interval: u32, - max_time: u32, - max_envelope_size: u32, + // Enable or disable the subscription enabled: bool, - read_existing_events: bool, - content_format: ContentFormat, - ignore_channel_error: bool, + // Configure which principal can see the subscription princs_filter: PrincsFilter, - #[serde(default)] + // Public parameters of the subscriptions. This structure is used + // to compute the public subscription version sent to clients. + parameters: SubscriptionParameters, + // Outputs of the subscription outputs: Vec, } impl Display for SubscriptionData { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - writeln!(f, "Subscription {}", self.name)?; + writeln!(f, "Subscription {}", self.name())?; writeln!(f, "\tUUID: {}", self.uuid())?; - writeln!(f, "\tVersion: {}", self.version())?; + writeln!(f, "\tInternal version: {}", self.internal_version())?; + writeln!( + f, + "\tPublic version: {}", + self.public_version().unwrap_or_default() + )?; + writeln!( + f, + "\tRevision: {}", + match self.revision() { + Some(revision) => revision, + None => "Not configured", + } + )?; writeln!( f, "\tURI: {}", @@ -452,9 +498,25 @@ impl Display for SubscriptionData { self.max_time() )?; writeln!(f, "\tMax envelope size: {} bytes", self.max_envelope_size())?; - writeln!(f, "\tReadExistingEvents: {}", self.read_existing_events)?; + writeln!(f, "\tRead existing events: {}", self.read_existing_events())?; writeln!(f, "\tContent format: {}", self.content_format())?; writeln!(f, "\tIgnore channel error: {}", self.ignore_channel_error())?; + writeln!( + f, + "\tLocale: {}", + match self.locale() { + Some(locale) => locale, + None => "Not configured", + } + )?; + writeln!( + f, + "\tData Locale: {}", + match self.data_locale() { + Some(data_locale) => data_locale, + None => "Not configured", + } + )?; match self.princs_filter().operation() { None => { writeln!(f, "\tPrincipal filter: Not configured")?; @@ -476,103 +538,36 @@ impl Display for SubscriptionData { writeln!(f, "\t- {}: {}", index, output)?; } } - writeln!(f, "\tEnabled: {}", self.enabled) + writeln!(f, "\tEnabled: {}", self.enabled)?; + writeln!(f, "\tEvent filter query:\n\n{}", self.query()) } } impl SubscriptionData { - pub fn empty() -> Self { - SubscriptionData { - uuid: Uuid::new_v4().to_string().to_ascii_uppercase(), - version: Uuid::new_v4().to_string().to_ascii_uppercase(), - name: String::new(), + pub fn new(name: &str, query: &str) -> Self { + Self { + uuid: SubscriptionUuid(Uuid::new_v4()), + internal_version: InternalVersion(Uuid::new_v4()), + revision: None, uri: None, - query: String::new(), - heartbeat_interval: 3_600, - connection_retry_count: 5, - connection_retry_interval: 60, - max_time: 30, - max_envelope_size: 512_000, - enabled: true, - read_existing_events: false, - content_format: ContentFormat::Raw, - ignore_channel_error: true, + enabled: DEFAULT_ENABLED, princs_filter: PrincsFilter::empty(), outputs: Vec::new(), - } - } - - pub fn new( - name: &str, - uri: Option<&str>, - query: &str, - heartbeat_interval: Option<&u32>, - connection_retry_count: Option<&u16>, - connection_retry_interval: Option<&u32>, - max_time: Option<&u32>, - max_envelope_size: Option<&u32>, - enabled: bool, - read_existing_events: bool, - content_format: ContentFormat, - ignore_channel_error: bool, - princs_filter: PrincsFilter, - outputs: Option>, - ) -> Self { - SubscriptionData { - uuid: Uuid::new_v4().to_string().to_ascii_uppercase(), - version: Uuid::new_v4().to_string().to_ascii_uppercase(), - name: name.to_owned(), - uri: uri.map(|e| e.to_string()), - query: query.to_owned(), - heartbeat_interval: *heartbeat_interval.unwrap_or(&3_600), - connection_retry_count: *connection_retry_count.unwrap_or(&5), - connection_retry_interval: *connection_retry_interval.unwrap_or(&60), - max_time: *max_time.unwrap_or(&30), - max_envelope_size: *max_envelope_size.unwrap_or(&512_000), - enabled, - read_existing_events, - content_format, - ignore_channel_error, - princs_filter, - outputs: outputs.unwrap_or_default(), - } - } - - pub fn from( - uuid: String, - version: String, - name: String, - uri: Option, - query: String, - heartbeat_interval: u32, - connection_retry_count: u16, - connection_retry_interval: u32, - max_time: u32, - max_envelope_size: u32, - enabled: bool, - read_existing_events: bool, - content_format: ContentFormat, - ignore_channel_error: bool, - princs_filter: PrincsFilter, - outputs: Vec, - ) -> Self { - SubscriptionData { - uuid, - version, - name, - uri, - query, - heartbeat_interval, - connection_retry_count, - connection_retry_interval, - max_time, - max_envelope_size, - enabled, - read_existing_events, - content_format, - ignore_channel_error, - princs_filter, - outputs, + parameters: SubscriptionParameters { + name: name.to_string(), + query: query.to_string(), + // Defaults + heartbeat_interval: DEFAULT_HEARTBEAT_INTERVAL, + connection_retry_count: DEFAULT_CONNECTION_RETRY_COUNT, + connection_retry_interval: DEFAULT_CONNECTION_RETRY_INTERVAL, + max_time: DEFAULT_MAX_TIME, + max_envelope_size: DEFAULT_MAX_ENVELOPE_SIZE, + read_existing_events: DEFAULT_READ_EXISTING_EVENTS, + content_format: DEFAULT_CONTENT_FORMAT, + ignore_channel_error: DEFAULT_IGNORE_CHANNEL_ERROR, + locale: None, + data_locale: None, + }, } } @@ -584,7 +579,7 @@ impl SubscriptionData { res.push_str("[-] "); } - res.push_str(format!("{} ", self.name).as_str()); + res.push_str(format!("{} ", self.name()).as_str()); if let Some(uri) = &self.uri { res.push_str(format!("({})", uri).as_str()); } else { @@ -594,100 +589,116 @@ impl SubscriptionData { res } - pub fn update_version(&mut self) { - self.version = Uuid::new_v4().to_string().to_ascii_uppercase(); - } - pub fn update_uuid(&mut self) { // This should only be used when duplicating an existing subscription - self.uuid = Uuid::new_v4().to_string().to_ascii_uppercase(); + self.uuid = SubscriptionUuid(Uuid::new_v4()); + } + + pub fn set_uuid(&mut self, uuid: SubscriptionUuid) -> &mut Self { + self.uuid = uuid; + self } /// Get a reference to the subscription's uuid. - pub fn uuid(&self) -> &str { - self.uuid.as_ref() + pub fn uuid(&self) -> &SubscriptionUuid { + &self.uuid + } + + pub fn uuid_string(&self) -> String { + self.uuid.to_string().to_uppercase() } - /// Get a reference to the subscription's version. - pub fn version(&self) -> &str { - self.version.as_ref() + /// Compute the subscription's public version + pub fn public_version(&self) -> Result { + let mut hasher = VersionHasher::new()?; + self.parameters.hash(&mut hasher); + // hasher only gives a u64, but it is enough for this usage + let result = hasher.finish(); + Ok(PublicVersion(Uuid::from_u64_pair(result, result))) } /// Get a reference to the subscription's name. pub fn name(&self) -> &str { - self.name.as_ref() + self.parameters.name.as_ref() } /// Get a reference to the subscription's heartbeat interval. pub fn heartbeat_interval(&self) -> u32 { - self.heartbeat_interval + self.parameters.heartbeat_interval } /// Get a reference to the subscription's connection retry count. pub fn connection_retry_count(&self) -> u16 { - self.connection_retry_count + self.parameters.connection_retry_count } /// Get a reference to the subscription's connection retry interval. pub fn connection_retry_interval(&self) -> u32 { - self.connection_retry_interval + self.parameters.connection_retry_interval } /// Get a reference to the subscription's max time. pub fn max_time(&self) -> u32 { - self.max_time + self.parameters.max_time } /// Get a reference to the subscription's max envelope size. pub fn max_envelope_size(&self) -> u32 { - self.max_envelope_size + self.parameters.max_envelope_size } /// Get a reference to the subscription's query. pub fn query(&self) -> &str { - self.query.as_ref() + self.parameters.query.as_ref() } /// Set the subscription's name. - pub fn set_name(&mut self, name: String) { - self.name = name; - self.update_version(); + pub fn set_name(&mut self, name: String) -> &mut Self { + self.parameters.name = name; + self.update_internal_version(); + self } /// Set the subscription's query. - pub fn set_query(&mut self, query: String) { - self.query = query; - self.update_version(); + pub fn set_query(&mut self, query: String) -> &mut Self { + self.parameters.query = query; + self.update_internal_version(); + self } /// Set the subscription's heartbeat interval. - pub fn set_heartbeat_interval(&mut self, heartbeat_interval: u32) { - self.heartbeat_interval = heartbeat_interval; - self.update_version(); + pub fn set_heartbeat_interval(&mut self, heartbeat_interval: u32) -> &mut Self { + self.parameters.heartbeat_interval = heartbeat_interval; + self.update_internal_version(); + self } /// Set the subscription's connection retry count. - pub fn set_connection_retry_count(&mut self, connection_retry_count: u16) { - self.connection_retry_count = connection_retry_count; - self.update_version(); + pub fn set_connection_retry_count(&mut self, connection_retry_count: u16) -> &mut Self { + self.parameters.connection_retry_count = connection_retry_count; + self.update_internal_version(); + self } /// Set the subscription's connection retry interval. - pub fn set_connection_retry_interval(&mut self, connection_retry_interval: u32) { - self.connection_retry_interval = connection_retry_interval; - self.update_version(); + pub fn set_connection_retry_interval(&mut self, connection_retry_interval: u32) -> &mut Self { + self.parameters.connection_retry_interval = connection_retry_interval; + self.update_internal_version(); + self } /// Set the subscription's max time. - pub fn set_max_time(&mut self, max_time: u32) { - self.max_time = max_time; - self.update_version(); + pub fn set_max_time(&mut self, max_time: u32) -> &mut Self { + self.parameters.max_time = max_time; + self.update_internal_version(); + self } /// Set the subscription's max envelope size. - pub fn set_max_envelope_size(&mut self, max_envelope_size: u32) { - self.max_envelope_size = max_envelope_size; - self.update_version(); + pub fn set_max_envelope_size(&mut self, max_envelope_size: u32) -> &mut Self { + self.parameters.max_envelope_size = max_envelope_size; + self.update_internal_version(); + self } /// Get a reference to the subscription's outputs. @@ -699,54 +710,65 @@ impl SubscriptionData { self.enabled } - pub fn set_enabled(&mut self, enabled: bool) { + pub fn set_enabled(&mut self, enabled: bool) -> &mut Self { self.enabled = enabled; - self.update_version(); + self.update_internal_version(); + self } pub fn read_existing_events(&self) -> bool { - self.read_existing_events + self.parameters.read_existing_events } - pub fn set_read_existing_events(&mut self, read_existing_events: bool) { - self.read_existing_events = read_existing_events; - self.update_version(); + pub fn set_read_existing_events(&mut self, read_existing_events: bool) -> &mut Self { + self.parameters.read_existing_events = read_existing_events; + self.update_internal_version(); + self } pub fn content_format(&self) -> &ContentFormat { - &self.content_format + &self.parameters.content_format } - pub fn set_content_format(&mut self, content_format: ContentFormat) { - self.content_format = content_format; - self.update_version(); + pub fn set_content_format(&mut self, content_format: ContentFormat) -> &mut Self { + self.parameters.content_format = content_format; + self.update_internal_version(); + self } pub fn ignore_channel_error(&self) -> bool { - self.ignore_channel_error + self.parameters.ignore_channel_error + } + + pub fn set_ignore_channel_error(&mut self, ignore_channel_error: bool) -> &mut Self { + self.parameters.ignore_channel_error = ignore_channel_error; + self.update_internal_version(); + self } - pub fn set_ignore_channel_error(&mut self, ignore_channel_error: bool) { - self.ignore_channel_error = ignore_channel_error; - self.update_version(); + pub fn set_outputs(&mut self, outputs: Vec) -> &mut Self { + self.outputs = outputs; + self.update_internal_version(); + self } - pub fn add_output(&mut self, output: SubscriptionOutput) { + pub fn add_output(&mut self, output: SubscriptionOutput) -> &mut Self { self.outputs.push(output); - self.update_version(); + self.update_internal_version(); + self } - pub fn delete_output(&mut self, index: usize) -> Result<()> { + pub fn delete_output(&mut self, index: usize) -> Result<&mut Self> { if index >= self.outputs.len() { bail!("Index out of range"); } let output = self.outputs.remove(index); info!("Deleting output {:?}", output); - self.update_version(); - Ok(()) + self.update_internal_version(); + Ok(self) } - pub fn set_output_enabled(&mut self, index: usize, value: bool) -> Result<()> { + pub fn set_output_enabled(&mut self, index: usize, value: bool) -> Result<&mut Self> { if index >= self.outputs.len() { bail!("Index out of range"); } @@ -759,31 +781,33 @@ impl SubscriptionData { } else { info!("Disabling output {:?}", output); } - self.outputs[index] = output.set_enabled(value); - self.update_version(); - Ok(()) + self.outputs[index].set_enabled(value); + self.update_internal_version(); + Ok(self) } pub fn uri(&self) -> Option<&String> { self.uri.as_ref() } - pub fn set_uri(&mut self, uri: Option) { + pub fn set_uri(&mut self, uri: Option) -> &mut Self { self.uri = uri; - self.update_version(); + self.update_internal_version(); + self } pub fn is_active(&self) -> bool { - self.enabled() && self.outputs().iter().any(|output| output.is_enabled()) + self.enabled() && self.outputs().iter().any(|output| output.enabled()) } pub fn princs_filter(&self) -> &PrincsFilter { &self.princs_filter } - pub fn set_princs_filter(&mut self, princs_filter: PrincsFilter) { + pub fn set_princs_filter(&mut self, princs_filter: PrincsFilter) -> &mut Self { self.princs_filter = princs_filter; - self.update_version(); + self.update_internal_version(); + self } pub fn is_active_for(&self, principal: &str) -> bool { @@ -799,6 +823,48 @@ impl SubscriptionData { } } } + + pub fn revision(&self) -> Option<&String> { + self.revision.as_ref() + } + + pub fn set_revision(&mut self, revision: Option) -> &mut Self { + self.revision = revision; + self.update_internal_version(); + self + } + + pub fn internal_version(&self) -> InternalVersion { + self.internal_version + } + + pub fn set_internal_version(&mut self, internal_version: InternalVersion) { + self.internal_version = internal_version; + } + + pub fn update_internal_version(&mut self) { + self.internal_version = InternalVersion(Uuid::new_v4()); + } + + pub fn locale(&self) -> Option<&String> { + self.parameters.locale.as_ref() + } + + pub fn set_locale(&mut self, locale: Option) -> &mut Self { + self.parameters.locale = locale; + self.update_internal_version(); + self + } + + pub fn data_locale(&self) -> Option<&String> { + self.parameters.data_locale.as_ref() + } + + pub fn set_data_locale(&mut self, locale: Option) -> &mut Self { + self.parameters.data_locale = locale; + self.update_internal_version(); + self + } } #[derive(Debug, PartialEq, Eq)] diff --git a/common/src/utils.rs b/common/src/utils.rs index b1e4e8c..b6f8cda 100644 --- a/common/src/utils.rs +++ b/common/src/utils.rs @@ -1,14 +1,11 @@ -use anyhow::{anyhow, Result}; +use anyhow::{anyhow, Context, Result}; use chrono::{DateTime, Local, TimeZone}; +use log::warn; +use openssl::hash::MessageDigest; use serde::{ser, Serializer}; -use uuid::Uuid; pub type Timestamp = i64; -pub fn new_uuid() -> String { - format!("uuid:{}", Uuid::new_v4().to_string().to_uppercase()) -} - pub fn timestamp_to_local_date(ts: i64) -> Result> { Local .timestamp_opt(ts, 0) @@ -40,3 +37,41 @@ where None => serializer.serialize_none(), } } + +pub struct VersionHasher { + inner: openssl::hash::Hasher, +} + +impl VersionHasher { + pub fn new() -> Result { + let hasher = Self { + inner: openssl::hash::Hasher::new(MessageDigest::sha256()) + .context("Failed to initialize openssl sha256 hasher")?, + }; + Ok(hasher) + } +} + +impl std::hash::Hasher for VersionHasher { + fn finish(&self) -> u64 { + // finish resets the internal buffer, which is not allowed + // by std::hash::Hasher trait + let mut inner_cloned = self.inner.clone(); + let hash_opt = inner_cloned.finish().ok(); + match hash_opt { + Some(hash) => { + let mut short_buf = [0u8; 8]; + short_buf.copy_from_slice(&hash[..8]); + u64::from_le_bytes(short_buf) + }, + None => + 0u64 + } + } + + fn write(&mut self, bytes: &[u8]) { + self.inner.update(bytes).unwrap_or_else(|e| { + warn!("Failed to update version hash AAA: {}", e); + }); + } +} diff --git a/doc/formats.md b/doc/formats.md index e600c83..af59c0c 100644 --- a/doc/formats.md +++ b/doc/formats.md @@ -10,6 +10,53 @@ Using this format, you get the exact event received by OpenWEC (no parsing happe The XML schema is defined in the Windows SDK (see [event.xsd](event.xsd)). +## Json Raw format + +OpenWEC can add useful metadata to events, such as: +- The Windows client principal that sent an event +- The Windows client IP address that sent an event +- The time when the event was received +- the OpenWEC subscription that received the event +- ... + +This format enables you to retrieve these data while getting the events in Raw XML format (with no parsing). + +The JSON document generated uses the following structure: +```json +event := { + "meta": openwec_data, + /* Raw XML event */ + "data": string, +} + +openwec_data := { + /* IP Address of the Windows client */ + "IpAddress": string, + /* Time when the event was received by OpenWEC */ + "TimeReceived": date, + /* Principal of the Windows client */ + "Principal": string, + /* OpenWEC node that received the event. + Only present if server.node_name configuration setting is set */ + "Node": string, + "Subscription": { + "Name": string, + "Version": string, + "Uuid": string, + "Uri": string, + /* Only if revision is set for this subscription */ + "ServerRevision": string, + "ClientRevision": string + }, + /* Only in case of error during event parsing or serializing */ + "Error": { + "OriginalContent": string, + "Type": string, + "Message": string + } +} +``` + ### Json format Using this format, raw XML events are parsed and then serialized using Json. @@ -43,7 +90,10 @@ openwec_data := { "Name": string, "Version": string, "Uuid": string, - "Uri": string + "Uri": string, + /* Only if revision is set for this subscription */ + "ServerRevision": string, + "ClientRevision": string }, /* Only in case of error during event parsing or serializing */ "Error": { @@ -195,6 +245,11 @@ processing_error_data := { } ``` -## How to add a new formatter ? +## How to add a new format ? -TODO +- Create a new dedicated module in `server::formats` with a structure that implements `OutputFormat` +- Add a new variant to `common::subscription::SubscriptionOutputFormat` +- Fix all the compiler errors about missing variant in matches :-) +- Adapt import/export format in `common::models::export` (version don't need to be changed if only new variants are added) +- Adapt config format in `common::models::config` +- Add documentation in `doc/formats.md` \ No newline at end of file diff --git a/doc/getting_started.md b/doc/getting_started.md index ef626a8..1c67e44 100644 --- a/doc/getting_started.md +++ b/doc/getting_started.md @@ -126,13 +126,13 @@ Let's say we want to: - and send them in a Kafka topic (`my-kafka-topic`) on `localhost:9092` for further processing. We need to create 2 outputs: -* `Files` with base path `/data/logs` using the `json` formatter: +* The first one uses the `Files` driver with base path `/data/logs` and the `Json` formatter: ```bash $ openwec subscriptions edit my-test-subscription outputs add --format json files /data/logs ``` -* `Kafka` also using the `Json` formatter: +* The second one uses the `Kafka` driver and the `Json` formatter: ```bash $ openwec subscriptions edit my-test-subscription outputs add --format json kafka my-kafka-topic -o bootstrap.servers localhost:9092 @@ -209,9 +209,9 @@ To be sure that everything works well, you can: ## Going further Now that you have a basic working collector, you have multiple ways to improve your setup: -* Add additional sources in your Event query -* Customize your subscriptions parameters -* Add multiple OpenWEC nodes for redundancy and scaling. You must use PostgreSQL backend to do that (we advise using CockroachDB). You need to setup a load balancer such as Nginx in front of OpenWEC nodes. +* Add additional sources in your Event query and customize your subscriptions parameters +* Configure your subscriptions using configuration files (see [Subscription](subscription.md)) and version them. +* Add multiple OpenWEC nodes for redundancy and horizontal scaling. You must use PostgreSQL backend to do that (we advise using CockroachDB). You also need to setup a load balancer such as Nginx in front of OpenWEC nodes. * Use a gMSA (group Managed Service Account) instead of a standard Active Directory account (you may use [gmsad](https://github.com/cea-sec/gmsad) and [msktutil](https://github.com/msktutil/msktutil)). * Create multiple subscriptions with different URIs, for example one by tier. Thus, you can monitor efficiently that you always receive logs from Tier 0 servers. You need to link one GPO per tier with the subscription URI. diff --git a/doc/how_it_works.md b/doc/how_it_works.md index a4efd31..e40567c 100644 --- a/doc/how_it_works.md +++ b/doc/how_it_works.md @@ -26,27 +26,26 @@ See the [documentation page about subscriptions](subscription.md) for more infor ## Outputs -Each output is actually made up of two elements: -1. an output type -2. a format +Each output is composed of two elements: a **driver** and a **format**. -### Output Types +### Drivers -Output types answer the question "*what should openwec do with collected events*". +Drivers answer the question "*what should openwec do with collected events*". -Currently there are several supported output types: +Currently there are several supported drivers: * `Files`: Events are stored in files in a tree architecture. You need to provide some information, such as the base path. * `TCP`: Events are sent to a TCP server. You must specify a host and port. * `Kafka`: Events are sent in a Kafka topic. You need to specify the name of the Kafka topic and the usual Kafka settings such as *bootstrap servers*. +* `UnixDatagram`: Events are sent in a Unix domain socket. +* `Redis`: Events are sent in a Redis Queue. ## Formats -The OpenWEC server can parse each event and format it differently. There are currently two formatters available: +The OpenWEC server can parse each event and format it. There are several formatters available: * `Raw`: as its name suggests, it does nothing to the events. It just writes raw XML data. *Warning: each event may contain EOL characters which are neither filtered nor transformed*. * `Json`: format events in Json. Json schema is documented [there](formats.md). When using the `Json` formatter, OpenWEC parses XML events and is able to add useful data such as the Kerberos principal or the IP address that sent the event. - - +* `RawJson`: encapsulates the raw XML data in a json document. OpenWEC does not parse the XML event, but can still add useful metadata such as the Kerberos principal or the IP address that sent the event. ## Bookmarks diff --git a/doc/outputs.md b/doc/outputs.md index 12a6c74..00246c5 100644 --- a/doc/outputs.md +++ b/doc/outputs.md @@ -2,11 +2,9 @@ Outputs answer the question "*what should openwec do with collected events?*". For one subscription, you may configure multiple outputs. -Each output is in fact composed of two elements: -1. an output type -2. a format +Each output is composed of two elements: a **driver** and a **format**. -The output type determines where the event will be sent or stored, whereas the format describes how it will be formatted. Formarts are described in [Formats](formats.md). +The driver determines where the event will be sent or stored, whereas the format describes how it will be formatted. Formarts are described in [Formats](formats.md). When an event is received for one subscription, it must be processed successfully by all its outputs. If one output fails, for example if there is no space left on device for a `Files` type output, an error is returned to the client which will try to resend the event later. @@ -16,6 +14,10 @@ When a subscription is updated or reloaded, all its outputs instances are droppe Note: OpenWEC does not guarantee that an event will not be written multiple times. Indeed, if one output fails to write a batch of events, these events will not be acknowledged to the client that sent them and it will try to send them again later. +Subscription outputs can be configured using: +- subscription configuration files (see [Subscription](subscription.md)) +- openwec command line interface + ## Commands For each subscription, you can manipulate its outputs using `openwec subscriptions edit outputs`. @@ -28,13 +30,13 @@ This command prints the current outputs of the subscription. ``` $ openwec subscriptions edit my-subscription outputs -0: Format: Json, Output: Files(FileConfiguration { base: "/var/events/", split_on_addr_index: None, append_node_name: false, filename: "messages" }) -1: Format: Json, Output: Tcp(dc.windomain.local:12000) +0: Enabled: true, Format: Json, Driver: Files(FilesConfiguration { base: "/var/events/", split_on_addr_index: None, append_node_name: false, filename: "messages" }) +1: Enabled: true, Format: Json, Driver: Tcp(dc.windomain.local:12000) ``` The subscription `my-subscription` has two outputs configured: -* the first one is a `Files` output using `Json` format. -* the second one is a `Tcp` output using `Json` format. +* the first uses the `Files` driver and the `Json` format. +* the second one uses the `Tcp` driver and the `Json` format. The index number at the beginning of each line can be used to delete the corresponding output. @@ -42,7 +44,7 @@ The index number at the beginning of each line can be used to delete the corresp This command adds an output to a subscription. -You must specify a format (see [Formats](formats.md)) and an output type (see below). +You must specify a format (see [Formats](formats.md)) and a driver (see below). #### Example @@ -50,7 +52,7 @@ You must specify a format (see [Formats](formats.md)) and an output type (see be $ openwec subscriptions edit my-subscription outputs add --format json files [...] ``` -This command adds a `Files` output using `Json` format. +This command adds an output using `Files` driver and `Json` format. ### `openwec subscriptions edit outputs delete` @@ -67,11 +69,11 @@ $ openwec subscriptions edit my-subscription outputs delete 0 This command deletes the first output of the subscription `my-subscription`. -## Output types +## Drivers ### Files -This output type stores events in files on the collector filesystem. +The Files driver stores events in files on the collector filesystem. For a given subscription, all events sent by a given Windows client will be stored in the following path: ``` @@ -86,7 +88,7 @@ where: * `node_name` (optional): when you use a multi-node setup, you may want to add the node's name in the path. The node's name is configured in server settings, but you can choose to add it or not in each output settings. * `filename`: the name of the file, configured in each output settings. It defaults to `messages`. -When a `Files` output is initialized, it creates a blank hash table which will contains openned file descriptors. Therefore, each file is openned once. +When the `Files` driver is initialized, it creates a blank hash table which will contain openned file descriptors. Therefore, each file is openned once. You may want to tell OpenWEC to close all its file descriptors and to open them again. This can be done using `openwec subscriptions reload `: the subscription outputs will be reloaded at the next "subscriptions reload" tick. You may want to reload subscriptions immediatly by sending a `SIGHUP` signal to `openwecd` process after executing the `openwec subscriptions reload` command. @@ -118,7 +120,7 @@ $ openwec subscriptions edit my-subscription outputs add --format files ### Kafka -This output type sends events in a Kafka topic. +The Kafka driver sends events in a Kafka topic. For a given subscription, all events will be sent in the configured Kafka topic. You may want to add additionnal options to the inner Kafka client, such as `bootstrap.servers`. @@ -132,16 +134,23 @@ $ openwec subscriptions edit my-subscription outputs add --format kafka ### TCP -This output type send events in a "raw" TCP connection. +The TCP driver send events in a "raw" TCP connection. The TCP connection is established when the first event has to be sent. It is kept openned as long as possible, and re-established if required. You must provide an IP address or a hostname and a port to connect to. +#### Examples + +* Send events to a TCP server `my.server.windomain.local` using port `12000`: + +``` +$ openwec subscriptions edit my-subscription outputs add --format tcp my.server.windomain.local 12000 +``` + ### UNIX domain socket -This output type sends events to a UNIX domain socket. The output can be used to forward events to a local output for -further processing. +The Unix datagram driver sends events to a Unix domain socket of type `SOCK_DGRAM`. The connection is established when the first event has to be sent. @@ -155,24 +164,15 @@ The path of the receiver socket is the only mandatory parameter. $ openwec subscriptions edit my-subscription outputs add --format raw unixdatagram /run/openwec.sock ``` -#### Examples - -* Send events to a TCP server `my.server.windomain.local` using port `12000`: - -``` -$ openwec subscriptions edit my-subscription outputs add --format tcp my.server.windomain.local 12000 -``` - ### Redis -This output type sends events to a Redis list using the [LPUSH command](https://redis.io/commands/lpush/) +The Redis driver sends events to a Redis list using the [LPUSH command](https://redis.io/commands/lpush/) You must provide: - a redis server address containing the IP and port to connect to. - a list name TODO: - - [ ] implement TLS connections to redis - [ ] support redis auth - [ ] ... @@ -185,6 +185,16 @@ TODO: $ openwec subscriptions edit my-test-subscription outputs add --format redis 127.0.0.1:6377 wec ``` -## How to add a new output type ? +## How to add a new driver ? -TODO +To add an output driver, you need to: +- in `common`: + - add a new variant to `common::subscription::SubscriptionOutputDriver` with a decicated configuration structure. + - adapt `common::models::config` and `common::models::export`. +- in `server`: + - create a dedicated module in `server::drivers` that contains a struct which implements the `OutputDriver` trait. + - initialize the output in `server::output::Output::new`. +- in `cli`: + - add a subcommand to create an output using the driver in `cli::main` and handle it in `cli::subscriptions`. + - add a config template of an output using the driver in `cli::skell`. +- add documentation in `doc/outputs.md`. \ No newline at end of file diff --git a/doc/protocol.md b/doc/protocol.md index 0722541..c208bd5 100644 --- a/doc/protocol.md +++ b/doc/protocol.md @@ -900,7 +900,7 @@ As specified in MS-WSMV, when the collector receives an Enumerate message, it mu ``` -`Version` is a GUID which changes each time the Subscription is modified. +`Version` is a GUID which changes each time the Subscription is modified. It must be unique for each subscription. `Envelope` must contain a `SubscribeMsg`. The client then needs to extracts those `SubscribeMsg`. @@ -925,7 +925,7 @@ In its Header we have: In its Body we have: - `e:EndTo`: if there is an issue with the subscription, the client sends a `SubscriptionEnd` at the specified address. - `a:Address`: an URL on the collector specific to the current client (ended with a specific UUID). - - `a:ReferenceProperties`/`e:Identifier`: the subscription version GUID. + - `a:ReferenceProperties` contains a set of parameters that the client must send without modification to the server (cf DSP0226 1.1.0 5.1.2.1). The Windows collector sets `e:Identifier` as the subscription version GUID. - `e:Delivery`: According to Microsoft documentation, the following mode are supported: - `http://schemas.xmlsoap.org/ws/2004/08/eventing/DeliveryModes/Push`: every SOAP message contains one event, without ACK or SOAP response. Event transmission is asynchronous. - `http://schemas.dmtf.org/wbem/wsman/1/wsman/PushWithAck`: every SOAP message contains one event, each one needs to be acknowledged before the next one is sent. The sender has a waiting list of events to send. @@ -944,7 +944,7 @@ In its Body we have: ? ``` - - `e:NotifyTo`: endpoint to send events to. + - `e:NotifyTo`: endpoint to send events to. (see EndTo) - `w:MaxTime`: max time between the moment the sender starts encoding the first event and the moment it sends the batch of events. PT30.000S is equivalent to "Minimize Latency" configuration. - `w:MaxEnvelopeSize`: max size in bytes of SOAP envelopes. - `@Policy`: defines what to do when events are too big: diff --git a/doc/query.md b/doc/query.md index 5969c04..59eb449 100644 --- a/doc/query.md +++ b/doc/query.md @@ -36,15 +36,15 @@ We recommend that you generate a list of the channels available in your environm ### Editing the query of an existing subscription -When editing the query of an existing subscription, **you should not add a new channel**. If you do, for each client for which OpenWEC has a bookmark, you will retrieve all existing events for the newly added channel **regardless** of the subscription's `read_existing_events' parameter. You have two (bad) options: +When editing the query of an existing subscription, **you should not add a new channel**. If you do, for each client for which OpenWEC has a bookmark, you will retrieve all existing events for the newly added channel **regardless** of the subscription's `read_existing_events` parameter. You have two (bad) options: * Either accept that you will retrieve a lot of events (the number depends on the added channel). * Either delete all existing bookmarks for this subscription. If you do this, you will probably lose logs. -This is because a bookmark already exists for the client, so it will be sent when the client enumerates the subscription. However, this bookmark does not contain the newly added channel. Therefore, the Windows client (strangely) assumes that you want to retrieve all its events, including the existing ones, regardless of the `read_existing_events' setting. +This is because a bookmark already exists for the client, so it will be sent when the client enumerates the subscription. However, this bookmark does not contain the newly added channel. Therefore, the Windows client (strangely) assumes that you want to retrieve all its events, including the existing ones, regardless of the `read_existing_events` setting. ### Query size -We understand that the event filter query must retrieve events from a maximum of 256 different channels. +It seems that event filter queries must retrieve events from a maximum of 256 different channels. If your query contains more channels, it will be considered invalid by Windows clients. diff --git a/doc/subscription.md b/doc/subscription.md index 8f2b499..179b41b 100644 --- a/doc/subscription.md +++ b/doc/subscription.md @@ -14,9 +14,13 @@ The set of events is defined by a list of XPath filter queries. For example, her ``` -In Windows Event Forwarding protocol, a subscription is identified by its `version`, a GUID which must be updated each time changes are made to the subscription. +In Windows Event Forwarding protocol, a subscription is identified by its (public) `version`, a GUID which must be updated each time changes are made to the subscription. -In OpenWEC, each subscription has a `version`, but because `version` is updated at each modification, each subscription is actually identified by another attribute called `uuid`, which is another GUID unique to a subscription and never updated. A subscription can also be identified using its `name` (user defined). +In OpenWEC, each subscription has two versions which are both GUIDs: +- the public version is updated whenever a change that is visible to Windows clients occurs. This enables clients to know that they need to make a change. +- the internal version is updated every time a change is made to the subscription. This is used to synchronize the subscription between openwec nodes. + +In addition, each subscription is identified by a GUID called `uuid`, which is never updated. A subscription can also be identified by its user-defined `name`. Each Windows machine configured to contact a Windows Event Collector server will send an `Enumerate` request to get a list of subscriptions. It will then create locally these subscriptions and fullfill them. @@ -39,6 +43,8 @@ Subscriptions and their parameters are not defined in OpenWEC configuration file | `read_existing_events` | No | `False` | If `True`, the event source should replay all possible events that match the filter and any events that subsequently occur for that event source. | | `content_format` | No | `Raw` | This option determines whether rendering information are to be passed with events or not. `Raw` means that only event data will be passed without any rendering information, whereas `RenderedText` adds rendering information. | | `ignore_channel_error` | No | `true` | This option determines if various filtering options resulting in errors are to result in termination of the processing by clients. | +| `locale` | No | *Undefined* | This option determines the language in which openwec wants the rendering info data to be translated. Defaults to unset, meaning OpenWEC lets the clent choose. | +| `data_locale` | No | *Undefined* | This option determines the language in which openwec wants the numerical data to be formatted. Defaults to unset, meaning OpenWEC lets the clent choose. | ## Subscription management @@ -76,7 +82,64 @@ The principals filter can be configured using openwec cli: * `openwec subscriptions edit filter princs {add,delete,set} [princ, ...]` manages the principals in the filter. -## Available commands +## Configuration + +There are two methods available to configure subscriptions: +- using configuration files (recommended) +- using the `openwec` command line interface (`openwec subscriptions`) + +## Configuration Files + +A dedicated file in TOML format describes each subscription. To generate such a file, use `openwec subscriptions skell`. + +This example sets up a subscription called "my-sub" with a placeholder query and a Files output in Raw format: +```toml +# Unique identifier of the subscription +uuid = "bf9e18e6-1fd5-4e3c-967d-2b866e0f8999" +# Unique name of the subscription +name = "my-sub" + +# Subscription query +query = """ + + + +""" + +# Subscription outputs +[[outputs]] +driver = "Files" +format = "Raw" +config = { base = "/var/log/openwec/" } +``` + +Note: `uuid` and `name` must be unique for each subscription. + +The OpenWEC server does not load subscription configuration files automatically during startup due to the complexity of doing so in a multi-node environment. Instead, these files must be explicitly loaded using the command 'openwec subscriptions load'. + +`openwec subscriptions load` can load either a whole directory of configuration files, or a single configuration file. When loading a directory, it assumes that the user does not want to keep existing subscriptions that are not present in the directory. When loading a file, it assumes that the user wants to keep already existing subscriptions. This behavior can be changed using the `--keep` flag. + +To use configuration files, edit them and then run `openwec subscriptions load`. In a multi-node environment, the `load` command only needs to be run once. + +### Revisions + +When using the `openwec subscriptions load` command, you can use the `--revision` flag to specify a revision string that represents the configuration version. For example, you can use the output of `git rev-parse --short HEAD` if your configuration files are versioned using `git`. + +When a client retrieves its subscriptions, it also receives the associated revision strings. Later, when pushing events or sending heartbeats, the revision string is included as metadata. The revision string received by OpenWEC within events is called `ClientRevision` because it represents the revision "used" by the client at that time. The revision string is not used to compute the subscription version that clients use to determine whether the subscription has been updated since their last `Refresh`. This is because some configuration updates may only affect "server" parameters (i.e. outputs), and we do not want all clients to refresh the subscription unnecessarily. However, if the configuration update affects "client" parameters (such as query), the subscription version is updated and clients will retrieve the new version of the subscription configuration with the new revision string on the next `Refresh`. + +When OpenWEC receives an event within a subscription, it processes the event by sending it to the designated outputs using the latest available configuration for that subscription. The revision of the configuration used by OpenWEC is called `ServerRevision`, which may differ from the `ClientRevision`. + +Both `ClientRevision` and `ServerRevision` are included with the metadata that OpenWEC adds for each event received (except in `Raw` format). + + +### Configuration files vs cli +There are a number of advantages to using configuration files in place of the cli: +- configuration files can be versioned, and their revision can be included in the metadata of each event received. This is very useful for tracing the query responsible for retrieving events. +- the cli can be difficult to use for editing complex subscriptions. + +You can disable all cli commands that edit subscriptions using the OpenWEC setting `cli.read_only_subscriptions`. + +## Command line interface ### `openwec subscriptions` diff --git a/openwec.conf.sample.toml b/openwec.conf.sample.toml index be61aa1..7ef91fe 100644 --- a/openwec.conf.sample.toml +++ b/openwec.conf.sample.toml @@ -37,7 +37,7 @@ # [Optional] # Set node name # This may be used by outputs. Unset by default. -# node_name = unsef +# node_name = unset # [Optional] # Keytab file path that contains secrets for Kerberos SPNs used by collectors. @@ -67,7 +67,7 @@ ## Logging settings ## ########################## -[logging] +# [logging] # [Optional] # Set server logging verbosity @@ -210,6 +210,21 @@ # If missing in config, default value is 512000 (which should be safe) # max_content_length = 512000 +# [Optional] +# Enable PROXY protocol +# This feature allows openwec to be used behind a layer 4 load balancer +# whilst the PROXY protocol is enabled to preserve the client IP address and port. +# If enabled, openwec EXPECTS a PROXY protocol header (v1 or v2) to be present +# and will close the connection otherwise. +# enable_proxy_protocol = false + +# [Optional] +# openwec will pretend to listen on this port instead of the "real" one +# configured in `listen_port`. +# This can be useful if openwec is behind a load balancer. +# If unset, the default value is `listen_port`. +# advertized_port = + # Authentication settings for this collector [collectors.authentication] # [Required] @@ -242,3 +257,15 @@ # server_private_key = "/etc/server-key.pem" ## End of TLS configuration + +########################## +## Cli settings ## +########################## + +# [cli] + +# [Optional] +# If set, subscriptions can only be updated using `openwec subscriptions load` command. +# This is useful if you want to rely on files to configure your subscriptions +# and want to enforce that subscriptions can not be updated directly. +# read_only_subscriptions = false \ No newline at end of file diff --git a/server/Cargo.toml b/server/Cargo.toml index 7152938..489a617 100644 --- a/server/Cargo.toml +++ b/server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "server" -version = "0.1.0" +version = "0.2.0" edition = "2021" [[bin]] @@ -14,9 +14,8 @@ common = { path = "../common" } anyhow = "1.0.71" base64 = "0.21.2" buf-read-ext = "0.4.0" -http = "0.2.9" httparse = "1.8.0" -hyper = { version = "0.14.26", features = ["full"] } +hyper = { version = "1.2.0", features = ["full"] } itoa = "1.0.6" libgssapi = { version = "0.7.0", features = ["iov"] } log = "0.4.19" @@ -26,9 +25,8 @@ roxmltree = "0.19.0" tokio = { version = "1.35.1", features = ["full"] } rdkafka = { version = "0.36.0", features = ["zstd", "libz", "external-lz4"] } regex = "1.9.0" -lazy_static = "1.4.0" uuid = { version = "1.7.0", features = ["v4", "fast-rng"] } -serde = { version = "1.0.164", features = ["derive"] } +serde = { version = "1.0.164", features = ["derive", "rc"] } serde_json = "1.0.97" async-trait = "0.1.68" chrono = { version = "0.4.26", default-features = false, features = ["clock"] } @@ -38,10 +36,7 @@ xmlparser = "0.13.5" itertools = "0.12.0" futures = "0.3.28" bitreader = "0.3.7" -rustls = "0.21.6" -rustls-pemfile = "1.0.3" -tls-listener = { version = "0.8.0", features = ["hyper-h2", "rustls"] } -tokio-rustls = "0.24.1" +rustls-pemfile = "2.1.1" x509-parser = "0.15.1" sha1 = "0.10.5" hex = "0.4.3" @@ -50,3 +45,8 @@ log4rs = "1.2.0" log-mdc = "0.1.0" tokio-util = "0.7.10" thiserror = "1.0.50" +hyper-util = { version = "0.1", features = ["full"] } +socket2 = "0.5.6" +http-body-util = "0.1" +ppp = "2.2.0" +tokio-rustls = "0.26.0" diff --git a/server/src/outputs/file.rs b/server/src/drivers/files.rs similarity index 73% rename from server/src/outputs/file.rs rename to server/src/drivers/files.rs index 73d6a18..e4fa724 100644 --- a/server/src/outputs/file.rs +++ b/server/src/drivers/files.rs @@ -1,32 +1,32 @@ use async_trait::async_trait; use log::{debug, info, warn}; -use tokio::fs::OpenOptions; -use tokio::sync::{mpsc, oneshot}; -use tokio_util::sync::CancellationToken; +use tokio::sync::oneshot; use crate::event::EventMetadata; -use crate::formatter::Format; -use crate::output::Output; +use crate::output::OutputDriver; use anyhow::{anyhow, bail, Context, Result}; -use common::subscription::FileConfiguration; +use common::subscription::FilesConfiguration; use std::collections::HashMap; +use std::fs::{create_dir_all, File, OpenOptions}; +use std::io::Write; use std::net::IpAddr; +use std::sync::mpsc::{self, Receiver}; use std::sync::Arc; use std::{path::PathBuf, str::FromStr}; -use tokio::fs::{create_dir_all, File}; -use tokio::io::AsyncWriteExt; + +enum WriteFilesMessage { + Write(WriteMessage), + Stop, +} #[derive(Debug)] -pub struct WriteFileMessage { +pub struct WriteMessage { path: PathBuf, content: String, resp: oneshot::Sender>, } -async fn handle_message( - file_handles: &mut HashMap, - message: &WriteFileMessage, -) -> Result<()> { +fn handle_message(file_handles: &mut HashMap, message: &WriteMessage) -> Result<()> { let parent = message .path .parent() @@ -41,14 +41,13 @@ async fn handle_message( None => { // Create directory (if it does not already exist) debug!("Create directory {}", parent.display()); - create_dir_all(parent).await?; + create_dir_all(parent)?; // Open file debug!("Open file {}", path.display()); let file = OpenOptions::new() .create(true) .append(true) .open(path) - .await .with_context(|| format!("Failed to open file {}", path.display()))?; // Insert it into file_buffers map file_handles.insert(path.clone(), file); @@ -58,62 +57,44 @@ async fn handle_message( .ok_or_else(|| anyhow!("Could not find newly inserted File in file handles"))? } }; - file.write_all(message.content.as_bytes()).await?; + file.write_all(message.content.as_bytes())?; Ok(()) } -pub async fn run(mut task_rx: mpsc::Receiver, task_ct: CancellationToken) { - info!("File output task started"); +fn run(rx: Receiver) { + info!("File output thread started"); let mut file_handles: HashMap = HashMap::new(); - loop { - tokio::select! { - Some(message) = task_rx.recv() => { - let result = handle_message(&mut file_handles, &message).await; - if let Err(e) = message - .resp - .send(result) { - warn!("Failed to send File write result because the receiver dropped. Result was: {:?}", e); - } - }, - _ = task_ct.cancelled() => { - break; - } - }; + while let Ok(WriteFilesMessage::Write(message)) = rx.recv() { + let result = handle_message(&mut file_handles, &message); + if let Err(e) = message.resp.send(result) { + warn!( + "Failed to send File write result because the receiver dropped. Result was: {:?}", + e + ); + } } - info!("Exiting File output task"); + info!("Exiting File output thread"); } -pub struct OutputFile { - format: Format, - config: FileConfiguration, - task_tx: mpsc::Sender, - task_ct: CancellationToken, +pub struct OutputFiles { + config: FilesConfiguration, + tx: mpsc::Sender, } -impl OutputFile { - pub fn new(format: Format, config: &FileConfiguration) -> Self { - debug!( - "Initialize file output with format {:?} and config {:?}", - format, config - ); +impl OutputFiles { + pub fn new(config: &FilesConfiguration) -> Self { + debug!("Initialize Files driver with config {:?}", config); // Create a communication channel with the task responsible for file management - // TODO: Why 32? - let (task_tx, task_rx) = mpsc::channel(32); - - // Use a CancellationToken to tell the task to end itself - let task_ct = CancellationToken::new(); - let cloned_task_ct = task_ct.clone(); + let (tx, rx) = mpsc::channel(); - // Launch the task responsible for handling file system operations - tokio::spawn(async move { - run(task_rx, cloned_task_ct).await; + // Launch a dedicated thread responsible for handling file system operations + std::thread::spawn(move || { + run(rx); }); - OutputFile { - format, + OutputFiles { config: config.clone(), - task_tx, - task_ct, + tx, } } @@ -236,7 +217,7 @@ impl OutputFile { } #[async_trait] -impl Output for OutputFile { +impl OutputDriver for OutputFiles { async fn write( &self, metadata: Arc, @@ -259,32 +240,24 @@ impl Output for OutputFile { // Create a oneshot channel to retrieve the result of the operation let (tx, rx) = oneshot::channel(); - self.task_tx - .send(WriteFileMessage { - path, - content, - resp: tx, - }) - .await?; + self.tx.send(WriteFilesMessage::Write(WriteMessage { + path, + content, + resp: tx, + }))?; // Wait for the result rx.await??; Ok(()) } - - fn describe(&self) -> String { - format!("Files ({:?})", self.config) - } - - fn format(&self) -> &Format { - &self.format - } } -impl Drop for OutputFile { +impl Drop for OutputFiles { fn drop(&mut self) { - self.task_ct.cancel(); + if let Err(e) = self.tx.send(WriteFilesMessage::Stop) { + warn!("Failed to send Stop message to Files handler thread: {}", e); + } } } @@ -346,10 +319,10 @@ mod tests { #[tokio::test] async fn test_build_path() -> Result<()> { let config = - FileConfiguration::new("/base".to_string(), None, false, "messages".to_string()); + FilesConfiguration::new("/base".to_string(), None, false, "messages".to_string()); let ip: IpAddr = "127.0.0.1".parse()?; - let output_file = OutputFile::new(Format::Json, &config); + let output_file = OutputFiles::new(&config); assert_eq!( output_file.build_path(&ip, "princ", None)?, @@ -357,8 +330,8 @@ mod tests { ); let config = - FileConfiguration::new("/base".to_string(), None, true, "messages".to_string()); - let output_file = OutputFile::new(Format::Json, &config); + FilesConfiguration::new("/base".to_string(), None, true, "messages".to_string()); + let output_file = OutputFiles::new(&config); assert_eq!( output_file.build_path(&ip, "princ", Some(&"node".to_string()))?, @@ -366,8 +339,8 @@ mod tests { ); let config = - FileConfiguration::new("/base".to_string(), Some(1), true, "messages".to_string()); - let output_file = OutputFile::new(Format::Json, &config); + FilesConfiguration::new("/base".to_string(), Some(1), true, "messages".to_string()); + let output_file = OutputFiles::new(&config); assert_eq!( output_file.build_path(&ip, "princ", Some(&"node".to_string()))?, @@ -375,8 +348,8 @@ mod tests { ); let config = - FileConfiguration::new("/base".to_string(), Some(2), false, "other".to_string()); - let output_file = OutputFile::new(Format::Json, &config); + FilesConfiguration::new("/base".to_string(), Some(2), false, "other".to_string()); + let output_file = OutputFiles::new(&config); assert_eq!( output_file.build_path(&ip, "princ", Some(&"node".to_string()))?, @@ -384,8 +357,8 @@ mod tests { ); let config = - FileConfiguration::new("/base".to_string(), Some(3), false, "messages".to_string()); - let output_file = OutputFile::new(Format::Json, &config); + FilesConfiguration::new("/base".to_string(), Some(3), false, "messages".to_string()); + let output_file = OutputFiles::new(&config); assert_eq!( output_file.build_path(&ip, "princ", Some(&"node".to_string()))?, @@ -393,8 +366,8 @@ mod tests { ); let config = - FileConfiguration::new("/base".to_string(), Some(4), false, "messages".to_string()); - let output_file = OutputFile::new(Format::Json, &config); + FilesConfiguration::new("/base".to_string(), Some(4), false, "messages".to_string()); + let output_file = OutputFiles::new(&config); assert_eq!( output_file.build_path(&ip, "princ", Some(&"node".to_string()))?, @@ -402,8 +375,8 @@ mod tests { ); let config = - FileConfiguration::new("/base".to_string(), Some(5), false, "messages".to_string()); - let output_file = OutputFile::new(Format::Json, &config); + FilesConfiguration::new("/base".to_string(), Some(5), false, "messages".to_string()); + let output_file = OutputFiles::new(&config); assert_eq!( output_file.build_path(&ip, "princ", Some(&"node".to_string()))?, @@ -411,23 +384,23 @@ mod tests { ); let config = - FileConfiguration::new("/base".to_string(), None, true, "messages".to_string()); - let output_file = OutputFile::new(Format::Json, &config); + FilesConfiguration::new("/base".to_string(), None, true, "messages".to_string()); + let output_file = OutputFiles::new(&config); assert!(output_file.build_path(&ip, "princ", None).is_err()); let ip: IpAddr = "1:2:3:4:5:6:7:8".parse()?; let config = - FileConfiguration::new("/base".to_string(), None, false, "messages".to_string()); - let output_file = OutputFile::new(Format::Json, &config); + FilesConfiguration::new("/base".to_string(), None, false, "messages".to_string()); + let output_file = OutputFiles::new(&config); assert_eq!( output_file.build_path(&ip, "princ", Some(&"node".to_string()))?, PathBuf::from_str("/base/1:2:3:4:5:6:7:8/princ/messages")? ); let config = - FileConfiguration::new("/base".to_string(), Some(3), false, "messages".to_string()); - let output_file = OutputFile::new(Format::Json, &config); + FilesConfiguration::new("/base".to_string(), Some(3), false, "messages".to_string()); + let output_file = OutputFiles::new(&config); assert_eq!(output_file.build_path(&ip, "princ", Some(&"node".to_string()))?,PathBuf::from_str("/base/1:2:3/1:2:3:4/1:2:3:4:5/1:2:3:4:5:6/1:2:3:4:5:6:7/1:2:3:4:5:6:7:8/princ/messages")?); Ok(()) } diff --git a/server/src/outputs/kafka.rs b/server/src/drivers/kafka.rs similarity index 78% rename from server/src/outputs/kafka.rs rename to server/src/drivers/kafka.rs index 2cc4683..8a931f4 100644 --- a/server/src/outputs/kafka.rs +++ b/server/src/drivers/kafka.rs @@ -10,16 +10,15 @@ use rdkafka::{ }; use std::{sync::Arc, time::Duration}; -use crate::{event::EventMetadata, formatter::Format, output::Output}; +use crate::{event::EventMetadata, output::OutputDriver}; pub struct OutputKafka { - format: Format, config: KafkaConfiguration, producer: FutureProducer, } impl OutputKafka { - pub fn new(format: Format, config: &KafkaConfiguration) -> Result { + pub fn new(config: &KafkaConfiguration) -> Result { let mut client_config = ClientConfig::new(); // Set a default value for Kafka delivery timeout // This can be overwritten in Kafka configuration @@ -28,11 +27,10 @@ impl OutputKafka { client_config.set(key, value); } debug!( - "Initialize kafka output with format {:?} and config {:?}", - format, config + "Initialize kafka output with config {:?}", + config ); Ok(OutputKafka { - format, config: config.clone(), producer: client_config.create()?, }) @@ -40,7 +38,7 @@ impl OutputKafka { } #[async_trait] -impl Output for OutputKafka { +impl OutputDriver for OutputKafka { async fn write( &self, _metadata: Arc, @@ -67,12 +65,4 @@ impl Output for OutputKafka { Ok(()) } - - fn describe(&self) -> String { - format!("Kafka (topic {})", self.config.topic()) - } - - fn format(&self) -> &Format { - &self.format - } } diff --git a/server/src/outputs/mod.rs b/server/src/drivers/mod.rs similarity index 79% rename from server/src/outputs/mod.rs rename to server/src/drivers/mod.rs index afb7240..4b8fe9b 100644 --- a/server/src/outputs/mod.rs +++ b/server/src/drivers/mod.rs @@ -1,4 +1,4 @@ -pub mod file; +pub mod files; pub mod kafka; pub mod tcp; pub mod redis; diff --git a/server/src/outputs/redis.rs b/server/src/drivers/redis.rs similarity index 72% rename from server/src/outputs/redis.rs rename to server/src/drivers/redis.rs index 6c7d4b3..3daf792 100644 --- a/server/src/outputs/redis.rs +++ b/server/src/drivers/redis.rs @@ -1,30 +1,28 @@ use anyhow::{bail, Context, Result}; use async_trait::async_trait; -use common::subscription::{ RedisConfiguration}; +use common::subscription::RedisConfiguration; use log::debug; -use std::{sync::Arc}; +use std::sync::Arc; use futures_util::stream::FuturesUnordered; use futures_util::StreamExt; -use crate::{event::EventMetadata, formatter::Format, output::Output}; +use crate::{event::EventMetadata, output::OutputDriver}; pub struct OutputRedis { - format: Format, config: RedisConfiguration, producer: redis::Client, } impl OutputRedis { - pub fn new(format: Format, config: &RedisConfiguration) -> Result { + pub fn new(config: &RedisConfiguration) -> Result { let client = redis::Client::open(format!("redis://{}/", config.addr())).context("Could not open redis connection")?; debug!( - "Initialize redis output with format {:?} and config {:?}", - format, config + "Initialize redis output with config {:?}", + config ); Ok(OutputRedis { - format, config: config.clone(), producer: client, }) @@ -32,14 +30,12 @@ impl OutputRedis { } #[async_trait] -impl Output for OutputRedis { +impl OutputDriver for OutputRedis { async fn write( &self, _metadata: Arc, events: Arc>>, ) -> Result<()> { - - let mut results = FuturesUnordered::new(); let cmd = redis::cmd("LPUSH"); @@ -66,12 +62,4 @@ impl Output for OutputRedis { Ok(()) } - - fn describe(&self) -> String { - format!("Redis (list {})", self.config.list()) - } - - fn format(&self) -> &Format { - &self.format - } } diff --git a/server/src/outputs/tcp.rs b/server/src/drivers/tcp.rs similarity index 75% rename from server/src/outputs/tcp.rs rename to server/src/drivers/tcp.rs index 2d23dc9..ae31670 100644 --- a/server/src/outputs/tcp.rs +++ b/server/src/drivers/tcp.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::{event::EventMetadata, formatter::Format, output::Output}; +use crate::{event::EventMetadata, output::OutputDriver}; use anyhow::{anyhow, Result}; use async_trait::async_trait; use common::subscription::TcpConfiguration; @@ -29,8 +29,7 @@ fn send_response(sender: oneshot::Sender>, msg: Result<()>) { } pub async fn run( - addr: String, - port: u16, + config: TcpConfiguration, mut task_rx: mpsc::Receiver, cancellation_token: CancellationToken, ) { @@ -40,13 +39,13 @@ pub async fn run( Some(message) = task_rx.recv() => { // Establish TCP connection if not already done if stream_opt.is_none() { - match TcpStream::connect((addr.as_str(), port)).await { + match TcpStream::connect((config.addr(), config.port())).await { Ok(stream) => { stream_opt = Some(stream); }, Err(e) => { - warn!("Failed to connect to {}:{}: {}", addr, port, e); - send_response(message.resp, Err(anyhow!(format!("Failed to connect to {}:{}: {}", addr, port, e)))); + warn!("Failed to connect to {}:{}: {}", config.addr(), config.port(), e); + send_response(message.resp, Err(anyhow!(format!("Failed to connect to {}:{}: {}", config.addr(), config.port(), e)))); continue; } }; @@ -56,7 +55,7 @@ pub async fn run( Some(stream) => stream, None => { warn!("TCP stream is unset !"); - send_response(message.resp, Err(anyhow!(format!("TCP stream of {}:{} is unset!", addr, port)))); + send_response(message.resp, Err(anyhow!(format!("TCP stream of {}:{} is unset!", config.addr(), config.port())))); continue; } }; @@ -64,7 +63,7 @@ pub async fn run( // Write data to stream if let Err(e) = stream.write_all(message.content.as_bytes()).await { stream_opt = None; - send_response(message.resp, Err(anyhow!(format!("Failed to write in TCP connection ({}:{}): {}", addr, port, e)))); + send_response(message.resp, Err(anyhow!(format!("Failed to write in TCP connection ({}:{}): {}", config.addr(), config.port(), e)))); continue; } @@ -75,24 +74,19 @@ pub async fn run( } }; } - info!("Exiting TCP output task ({}:{})", addr, port); + info!("Exiting TCP output task ({}:{})", config.addr(), config.port()); } pub struct OutputTcp { - format: Format, - addr: String, - port: u16, task_tx: mpsc::Sender, task_ct: CancellationToken, } impl OutputTcp { - pub fn new(format: Format, config: &TcpConfiguration) -> Result { + pub fn new(config: &TcpConfiguration) -> Result { debug!( - "Initialize TCP output with format {:?} and peer {}:{}", - format, - config.addr(), - config.port() + "Initialize TCP output with config {:?}", + config, ); // Create a communication channel with the task responsible for file management @@ -103,16 +97,12 @@ impl OutputTcp { let task_ct = CancellationToken::new(); let cloned_task_ct = task_ct.clone(); - let addr = config.addr().to_string(); - let port = config.port(); + let config_cloned = config.clone(); // Launch the task responsible for handling the TCP connection - tokio::spawn(async move { run(addr, port, task_rx, cloned_task_ct).await }); + tokio::spawn(async move { run(config_cloned, task_rx, cloned_task_ct).await }); Ok(OutputTcp { - format, - addr: config.addr().to_string(), - port: config.port(), task_tx, task_ct, }) @@ -120,7 +110,7 @@ impl OutputTcp { } #[async_trait] -impl Output for OutputTcp { +impl OutputDriver for OutputTcp { async fn write( &self, _metadata: Arc, @@ -144,14 +134,6 @@ impl Output for OutputTcp { Ok(()) } - - fn describe(&self) -> String { - format!("TCP ({}:{})", self.addr, self.port) - } - - fn format(&self) -> &Format { - &self.format - } } impl Drop for OutputTcp { diff --git a/server/src/outputs/unix.rs b/server/src/drivers/unix.rs similarity index 87% rename from server/src/outputs/unix.rs rename to server/src/drivers/unix.rs index bcbd637..3c53f5b 100644 --- a/server/src/outputs/unix.rs +++ b/server/src/drivers/unix.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::{event::EventMetadata, formatter::Format, output::Output}; +use crate::{event::EventMetadata, output::OutputDriver}; use anyhow::{anyhow, Result}; use async_trait::async_trait; use common::subscription::UnixDatagramConfiguration; @@ -88,15 +88,13 @@ pub async fn run( } pub struct OutputUnixDatagram { - format: Format, - path: String, task_tx: mpsc::Sender, task_ct: CancellationToken, } impl OutputUnixDatagram { - pub fn new(format: Format, config: &UnixDatagramConfiguration) -> Result { - debug!("Initialize UnixDatagram output with format {:?} and path {}", format, config.path()); + pub fn new(config: &UnixDatagramConfiguration) -> Result { + debug!("Initialize UnixDatagram output with path {}", config.path()); let (task_tx, task_rx) = mpsc::channel(32); @@ -108,8 +106,6 @@ impl OutputUnixDatagram { tokio::spawn(async move { run(path, task_rx, cloned_task_ct).await }); Ok(OutputUnixDatagram { - format, - path: config.path().to_string(), task_tx, task_ct, }) @@ -117,7 +113,7 @@ impl OutputUnixDatagram { } #[async_trait] -impl Output for OutputUnixDatagram { +impl OutputDriver for OutputUnixDatagram { async fn write( &self, _metadata: Arc, @@ -132,14 +128,6 @@ impl Output for OutputUnixDatagram { Ok(()) } - - fn describe(&self) -> String { - format!("UnixDatagram ({})", self.path) - } - - fn format(&self) -> &Format { - &self.format - } } impl Drop for OutputUnixDatagram { diff --git a/server/src/event.rs b/server/src/event.rs index d5907be..bb32200 100644 --- a/server/src/event.rs +++ b/server/src/event.rs @@ -3,54 +3,40 @@ use chrono::{DateTime, Utc}; use log::{debug, info, trace, warn}; use roxmltree::{Document, Error, Node}; use serde::Serialize; -use std::{collections::HashMap, net::SocketAddr}; +use std::{collections::HashMap, net::SocketAddr, sync::Arc}; use crate::subscription::Subscription; #[derive(Debug, Default, Serialize, Clone)] pub struct EventDataType { - #[serde(flatten, skip_serializing_if = "HashMap::is_empty")] - named_data: HashMap, - #[serde(rename = "Data", skip_serializing_if = "Vec::is_empty")] - unamed_data: Vec, - #[serde(rename = "Binary", skip_serializing_if = "Option::is_none")] - binary: Option, + pub named_data: HashMap, + pub unamed_data: Vec, + pub binary: Option, } #[derive(Debug, Default, Serialize, Clone)] pub struct DebugDataType { - #[serde(rename = "SequenceNumber", skip_serializing_if = "Option::is_none")] - sequence_number: Option, - #[serde(rename = "FlagsName", skip_serializing_if = "Option::is_none")] - flags_name: Option, - #[serde(rename = "LevelName", skip_serializing_if = "Option::is_none")] - level_name: Option, - #[serde(rename = "Component")] - component: String, - #[serde(rename = "SubComponent", skip_serializing_if = "Option::is_none")] - sub_component: Option, - #[serde(rename = "FileLine", skip_serializing_if = "Option::is_none")] - file_line: Option, - #[serde(rename = "Function", skip_serializing_if = "Option::is_none")] - function: Option, - #[serde(rename = "Message")] - message: String, + pub sequence_number: Option, + pub flags_name: Option, + pub level_name: Option, + pub component: String, + pub sub_component: Option, + pub file_line: Option, + pub function: Option, + pub message: String, } #[derive(Debug, Default, Serialize, Clone)] pub struct ProcessingErrorDataType { - #[serde(rename = "ErrorCode")] - error_code: u32, - #[serde(rename = "DataItemName")] - data_item_name: String, - #[serde(rename = "EventPayload")] - event_payload: String, + pub error_code: u32, + pub data_item_name: String, + pub event_payload: String, } pub type UserDataType = String; pub type BinaryEventDataType = String; -#[derive(Debug, Default, Serialize, Clone)] +#[derive(Debug, Default, Clone)] pub enum DataType { EventData(EventDataType), UserData(UserDataType), @@ -61,35 +47,16 @@ pub enum DataType { Unknown, } -impl DataType { - fn is_unknown(&self) -> bool { - matches!(self, DataType::Unknown) - } -} - -#[derive(Serialize, Debug, Clone, Default)] -#[serde(tag = "Type")] +#[derive(Debug, Clone, Default, Eq, PartialEq)] pub enum ErrorType { /// Initial XML parsing failed but Raw content could be recovered - RawContentRecovered { - #[serde(rename = "Message")] - message: String, - }, + RawContentRecovered(String), /// Initial XML parsing failed and recovering failed again - FailedToRecoverRawContent { - #[serde(rename = "Message")] - message: String, - }, + FailedToRecoverRawContent(String), /// Initial XML parsing failed and no recovering strategy was usable - Unrecoverable { - #[serde(rename = "Message")] - message: String, - }, + Unrecoverable(String), /// Failed to feed event from parsed XML document - FailedToFeedEvent { - #[serde(rename = "Message")] - message: String, - }, + FailedToFeedEvent(String), #[default] Unknown, } @@ -97,33 +64,28 @@ pub enum ErrorType { impl ToString for ErrorType { fn to_string(&self) -> String { match self { - ErrorType::RawContentRecovered { message } => message.clone(), - ErrorType::FailedToRecoverRawContent { message } => message.clone(), - ErrorType::Unrecoverable { message } => message.clone(), - ErrorType::FailedToFeedEvent { message } => message.clone(), + ErrorType::RawContentRecovered(message) => message.clone(), + ErrorType::FailedToRecoverRawContent(message ) => message.clone(), + ErrorType::Unrecoverable(message ) => message.clone(), + ErrorType::FailedToFeedEvent (message ) => message.clone(), ErrorType::Unknown => "Unknown error".to_string(), } } } -#[derive(Debug, Default, Serialize, Clone)] -struct ErrorInfo { - #[serde(rename = "OriginalContent")] - original_content: String, - #[serde(flatten)] - _type: ErrorType, + +#[derive(Debug, Default, Clone, Eq, PartialEq)] +pub struct ErrorInfo { + pub original_content: String, + pub error_type: ErrorType, } -#[derive(Debug, Default, Serialize, Clone)] +#[derive(Debug, Default, Clone)] pub struct Event { - #[serde(rename = "System", skip_serializing_if = "Option::is_none")] - system: Option, - #[serde(flatten, skip_serializing_if = "DataType::is_unknown")] - data: DataType, - #[serde(rename = "RenderingInfo", skip_serializing_if = "Option::is_none")] - rendering_info: Option, - #[serde(rename = "OpenWEC")] - additional: Additional, + pub system: Option, + pub data: DataType, + pub rendering_info: Option, + pub additional: Additional, } impl Event { @@ -169,7 +131,7 @@ impl Event { ) { event.additional.error = Some(ErrorInfo { original_content: content.to_string(), - _type: error_type.clone(), + error_type: error_type.clone(), }); let error_message = error_type.to_string(); if warn { @@ -202,53 +164,36 @@ impl Event { Ok(doc) => { match Event::feed_event_from_document(event, &doc, &clean_content) { Ok(_) => - (ErrorType::RawContentRecovered { message: format!( + (ErrorType::RawContentRecovered(format!( "Failed to parse event XML ({}) but Raw content could be recovered.", initial_error - ) }, false), + )), false), Err(feed_error) => - (ErrorType::FailedToFeedEvent { message: format!( + (ErrorType::FailedToFeedEvent(format!( "Could not feed event from document: {}", feed_error - ) }, true), + )), true), } } Err(recovering_error) => { - (ErrorType::FailedToRecoverRawContent { message: format!( + (ErrorType::FailedToRecoverRawContent(format!( "Failed to parse event XML ({}) and Raw content recovering failed ({})", initial_error, recovering_error - ) }, true) + )), true) } } } None => ( - ErrorType::Unrecoverable { - message: format!("Failed to parse event XML: {}", initial_error), - }, + ErrorType::Unrecoverable(format!("Failed to parse event XML: {}", initial_error)), true, ), }; Event::add_event_parsing_error(event, content, error_type, do_warn); } - pub fn from_str(metadata: &EventMetadata, content: &str) -> Self { - let mut event = Event { - additional: Additional { - addr: metadata.addr().ip().to_string(), - principal: metadata.principal().to_owned(), // TODO : change to something that works for TLS as well (modify db and output) - node: metadata.node_name().cloned(), - time_received: metadata.time_received().to_rfc3339(), - subscription: SubscriptionType { - uuid: metadata.subscription_uuid().to_owned(), - version: metadata.subscription_version().to_owned(), - name: metadata.subscription_name().to_owned(), - uri: metadata.subscription_uri().cloned(), - }, - error: None, - }, - ..Default::default() - }; - + pub fn from_str(content: &str) -> Self { + let mut event = Event::default(); + let doc_parse_attempt = Document::parse(content); match doc_parse_attempt { Ok(doc) => { @@ -258,7 +203,7 @@ impl Event { Event::add_event_parsing_error( &mut event, content, - ErrorType::FailedToFeedEvent { message }, + ErrorType::FailedToFeedEvent(message), true, ); } @@ -351,120 +296,53 @@ fn parse_user_data(user_data_node: &Node) -> Result { Ok(DataType::UserData(data)) } -#[derive(Debug, Default, Serialize, Clone)] -struct Additional { - #[serde(rename = "IpAddress")] - addr: String, - #[serde(rename = "TimeReceived")] - time_received: String, - #[serde(rename = "Principal")] - principal: String, - #[serde(rename = "Subscription")] - subscription: SubscriptionType, - #[serde(rename = "Node", skip_serializing_if = "Option::is_none")] - node: Option, - #[serde(rename = "Error", skip_serializing_if = "Option::is_none")] - error: Option, -} - -#[derive(Debug, Default, Serialize, Clone)] -struct SubscriptionType { - #[serde(rename = "Uuid")] - uuid: String, - #[serde(rename = "Version")] - version: String, - #[serde(rename = "Name")] - name: String, - #[serde(rename = "Uri", skip_serializing_if = "Option::is_none")] - uri: Option, +#[derive(Debug, Default, Clone)] +pub struct Additional { + pub error: Option, } -#[derive(Debug, Default, Serialize, Clone)] +#[derive(Debug, Default, Clone)] pub struct Provider { - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(rename = "Name")] pub name: Option, - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(rename = "Guid")] pub guid: Option, - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(rename = "EventSourceName")] pub event_source_name: Option, } -#[derive(Debug, Default, Serialize, Clone)] +#[derive(Debug, Default, Clone)] pub struct Correlation { - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(rename = "ActivityID")] pub activity_id: Option, - #[serde(skip_serializing_if = "Option::is_none")] - #[serde(rename = "RelatedActivityID")] pub related_activity_id: Option, } -#[derive(Debug, Default, Serialize, Clone)] +#[derive(Debug, Default, Clone)] pub struct Execution { - #[serde(rename = "ProcessID")] pub process_id: u32, - - #[serde(rename = "ThreadID")] pub thread_id: u32, - - #[serde(rename = "ProcessorID")] - #[serde(skip_serializing_if = "Option::is_none")] pub processor_id: Option, - - #[serde(rename = "SessionID")] - #[serde(skip_serializing_if = "Option::is_none")] pub session_id: Option, - - #[serde(rename = "KernelTime")] - #[serde(skip_serializing_if = "Option::is_none")] pub kernel_time: Option, - - #[serde(rename = "UserTime")] - #[serde(skip_serializing_if = "Option::is_none")] pub user_time: Option, - - #[serde(rename = "ProcessorTime")] - #[serde(skip_serializing_if = "Option::is_none")] pub processor_time: Option, } -#[derive(Debug, Default, Serialize, Clone)] -struct System { - #[serde(rename = "Provider")] - provider: Provider, - #[serde(rename = "EventID")] - event_id: u32, - #[serde(rename = "EventIDQualifiers", skip_serializing_if = "Option::is_none")] - event_id_qualifiers: Option, - #[serde(rename = "Version", skip_serializing_if = "Option::is_none")] - version: Option, - #[serde(rename = "Level", skip_serializing_if = "Option::is_none")] - level: Option, - #[serde(rename = "Task", skip_serializing_if = "Option::is_none")] - task: Option, - #[serde(rename = "Opcode", skip_serializing_if = "Option::is_none")] - opcode: Option, - #[serde(rename = "Keywords", skip_serializing_if = "Option::is_none")] - keywords: Option, - #[serde(rename = "TimeCreated", skip_serializing_if = "Option::is_none")] - time_created: Option, - #[serde(rename = "EventRecordID", skip_serializing_if = "Option::is_none")] - event_record_id: Option, - #[serde(rename = "Correlation", skip_serializing_if = "Option::is_none")] - correlation: Option, - #[serde(rename = "Execution", skip_serializing_if = "Option::is_none")] - execution: Option, - #[serde(rename = "Channel", skip_serializing_if = "Option::is_none")] - channel: Option, - #[serde(rename = "Computer")] - computer: String, - #[serde(rename = "Container", skip_serializing_if = "Option::is_none")] - container: Option, - #[serde(rename = "UserID", skip_serializing_if = "Option::is_none")] - user_id: Option, +#[derive(Debug, Default, Clone)] +pub struct System { + pub provider: Provider, + pub event_id: u32, + pub event_id_qualifiers: Option, + pub version: Option, + pub level: Option, + pub task: Option, + pub opcode: Option, + pub keywords: Option, + pub time_created: Option, + pub event_record_id: Option, + pub correlation: Option, + pub execution: Option, + pub channel: Option, + pub computer: String, + pub container: Option, + pub user_id: Option, } impl System { @@ -556,25 +434,17 @@ impl System { } #[derive(Debug, Default, Serialize, Clone)] -struct RenderingInfo { - #[serde(rename = "Message", skip_serializing_if = "Option::is_none")] - message: Option, - #[serde(rename = "Level", skip_serializing_if = "Option::is_none")] - level: Option, - #[serde(rename = "Task", skip_serializing_if = "Option::is_none")] - task: Option, - #[serde(rename = "Opcode", skip_serializing_if = "Option::is_none")] - opcode: Option, - #[serde(rename = "Channel", skip_serializing_if = "Option::is_none")] - channel: Option, - #[serde(rename = "Provider", skip_serializing_if = "Option::is_none")] +pub struct RenderingInfo { + pub message: Option, + pub level: Option, + pub task: Option, + pub opcode: Option, + pub channel: Option, // Microsoft schema states that this field should be called "Publisher" // but this is not what has been observed in practice - provider: Option, - #[serde(rename = "Keywords", skip_serializing_if = "Option::is_none")] - keywords: Option>, - #[serde(rename = "Culture")] - culture: String, + pub provider: Option, + pub keywords: Option>, + pub culture: String, } impl RenderingInfo { @@ -628,6 +498,8 @@ pub struct EventMetadata { subscription_version: String, subscription_name: String, subscription_uri: Option, + subscription_client_revision: Option, + subscription_server_revision: Option, } impl EventMetadata { @@ -636,19 +508,28 @@ impl EventMetadata { principal: &str, node_name: Option, subscription: &Subscription, + public_version: String, + client_revision: Option, ) -> Self { EventMetadata { addr: *addr, principal: principal.to_owned(), node_name, time_received: Utc::now(), - subscription_uuid: subscription.data().uuid().to_owned(), - subscription_version: subscription.data().version().to_owned(), + subscription_uuid: subscription.data().uuid_string(), + subscription_version: public_version, subscription_name: subscription.data().name().to_owned(), subscription_uri: subscription.data().uri().cloned(), + subscription_client_revision: client_revision, + subscription_server_revision: subscription.data().revision().cloned(), } } + #[cfg(test)] + pub fn set_time_received(&mut self, time_received: DateTime) { + self.time_received = time_received; + } + /// Get a reference to the event metadata's addr. pub fn addr(&self) -> SocketAddr { self.addr @@ -681,14 +562,44 @@ impl EventMetadata { pub fn subscription_uri(&self) -> Option<&String> { self.subscription_uri.as_ref() } + + pub fn subscription_client_revision(&self) -> Option<&String> { + self.subscription_client_revision.as_ref() + } + + pub fn subscription_server_revision(&self) -> Option<&String> { + self.subscription_server_revision.as_ref() + } } -#[cfg(test)] -mod tests { - use std::str::FromStr; +pub struct EventData { + raw: Arc, + event: Option, +} - use serde_json::Value; +impl EventData { + pub fn new(raw: Arc, parse_event: bool) -> Self { + let event = if parse_event { + Some(Event::from_str(raw.as_ref())) + } else { + None + }; + Self { + raw, + event + } + } + pub fn raw(&self) -> Arc { + self.raw.clone() + } + + pub fn event(&self) -> Option<&Event> { + self.event.as_ref() + } +} +#[cfg(test)] +mod tests { use super::*; const EVENT_142: &str = r#" @@ -1010,429 +921,129 @@ mod tests { #[test] fn test_4689_parsing() { let event = Event::from_str( - &EventMetadata { - addr: SocketAddr::from_str("192.168.0.1:5985").unwrap(), - principal: "win10.windomain.local".to_owned(), - node_name: Some("openwec".to_owned()), - time_received: chrono::DateTime::parse_from_rfc3339( - "2022-11-07T17:08:27.169805+01:00", - ) - .unwrap() - .with_timezone(&Utc), - subscription_name: "Test".to_string(), - subscription_uuid: "8B18D83D-2964-4F35-AC3B-6F4E6FFA727B".to_string(), - subscription_version: "AD0D118F-31EF-4111-A0CA-D87249747278".to_string(), - subscription_uri: Some("/this/is/a/test".to_string()), - }, EVENT_4689, ); assert!(event.additional.error.is_none()) } - const EVENT_4688: &str = r#"4688201331200x8020000000000000114689Securitywin10.windomain.localS-1-5-18WIN10$WINDOMAIN0x3e70x3a8C:\Program Files (x86)\Microsoft\EdgeUpdate\MicrosoftEdgeUpdate.exe%%19360x240S-1-0-0--0x0C:\Windows\System32\services.exeS-1-16-16384A new process has been created. - -Creator Subject: - Security ID: S-1-5-18 - Account Name: WIN10$ - Account Domain: WINDOMAIN - Logon ID: 0x3E7 - -Target Subject: - Security ID: S-1-0-0 - Account Name: - - Account Domain: - - Logon ID: 0x0 - -Process Information: - New Process ID: 0x3a8 - New Process Name: C:\Program Files (x86)\Microsoft\EdgeUpdate\MicrosoftEdgeUpdate.exe - Token Elevation Type: %%1936 - Mandatory Label: S-1-16-16384 - Creator Process ID: 0x240 - Creator Process Name: C:\Windows\System32\services.exe - Process Command Line: - -Token Elevation Type indicates the type of token that was assigned to the new process in accordance with User Account Control policy. - -Type 1 is a full token with no privileges removed or groups disabled. A full token is only used if User Account Control is disabled or if the user is the built-in Administrator account or a service account. - -Type 2 is an elevated token with no privileges removed or groups disabled. An elevated token is used when User Account Control is enabled and the user chooses to start the program using Run as administrator. An elevated token is also used when an application is configured to always require administrative privilege or to always require maximum privilege, and the user is a member of the Administrators group. - -Type 3 is a limited token with administrative privileges removed and administrative groups disabled. The limited token is used when User Account Control is enabled, the application does not require administrative privilege, and the user does not choose to start the program using Run as administrator.InformationProcess CreationInfoSecurityMicrosoft Windows security auditing.Audit Success"#; - const EVENT_4688_JSON: &str = r#"{"System":{"Provider":{"Name":"Microsoft-Windows-Security-Auditing","Guid":"{54849625-5478-4994-a5ba-3e3b0328c30d}"},"EventID":4688,"Version":2,"Level":0,"Task":13312,"Opcode":0,"Keywords":"0x8020000000000000","TimeCreated":"2022-12-14T16:06:51.0643605Z","EventRecordID":114689,"Correlation":{},"Execution":{"ProcessID":4,"ThreadID":196},"Channel":"Security","Computer":"win10.windomain.local"},"EventData":{"SubjectLogonId":"0x3e7","SubjectUserName":"WIN10$","SubjectDomainName":"WINDOMAIN","ParentProcessName":"C:\\Windows\\System32\\services.exe","MandatoryLabel":"S-1-16-16384","SubjectUserSid":"S-1-5-18","NewProcessName":"C:\\Program Files (x86)\\Microsoft\\EdgeUpdate\\MicrosoftEdgeUpdate.exe","TokenElevationType":"%%1936","TargetUserSid":"S-1-0-0","TargetDomainName":"-","CommandLine":"","TargetUserName":"-","NewProcessId":"0x3a8","TargetLogonId":"0x0","ProcessId":"0x240"},"RenderingInfo":{"Message":"A new process has been created.\n\nCreator Subject:\n\tSecurity ID:\t\tS-1-5-18\n\tAccount Name:\t\tWIN10$\n\tAccount Domain:\t\tWINDOMAIN\n\tLogon ID:\t\t0x3E7\n\nTarget Subject:\n\tSecurity ID:\t\tS-1-0-0\n\tAccount Name:\t\t-\n\tAccount Domain:\t\t-\n\tLogon ID:\t\t0x0\n\nProcess Information:\n\tNew Process ID:\t\t0x3a8\n\tNew Process Name:\tC:\\Program Files (x86)\\Microsoft\\EdgeUpdate\\MicrosoftEdgeUpdate.exe\n\tToken Elevation Type:\t%%1936\n\tMandatory Label:\t\tS-1-16-16384\n\tCreator Process ID:\t0x240\n\tCreator Process Name:\tC:\\Windows\\System32\\services.exe\n\tProcess Command Line:\t\n\nToken Elevation Type indicates the type of token that was assigned to the new process in accordance with User Account Control policy.\n\nType 1 is a full token with no privileges removed or groups disabled. A full token is only used if User Account Control is disabled or if the user is the built-in Administrator account or a service account.\n\nType 2 is an elevated token with no privileges removed or groups disabled. An elevated token is used when User Account Control is enabled and the user chooses to start the program using Run as administrator. An elevated token is also used when an application is configured to always require administrative privilege or to always require maximum privilege, and the user is a member of the Administrators group.\n\nType 3 is a limited token with administrative privileges removed and administrative groups disabled. The limited token is used when User Account Control is enabled, the application does not require administrative privilege, and the user does not choose to start the program using Run as administrator.","Level":"Information","Task":"Process Creation","Opcode":"Info","Channel":"Security","Provider":"Microsoft Windows security auditing.","Keywords":["Audit Success"],"Culture":"en-US"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T16:07:03.331+00:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Node":"openwec","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"AD0D118F-31EF-4111-A0CA-D87249747278","Name":"Test","Uri":"/this/is/a/test"}}}"#; - - #[test] - fn test_serialize_4688_event_data() { - let event = Event::from_str( - &EventMetadata { - addr: SocketAddr::from_str("192.168.58.100:5985").unwrap(), - principal: "WIN10$@WINDOMAIN.LOCAL".to_owned(), - node_name: Some("openwec".to_owned()), - time_received: chrono::DateTime::parse_from_rfc3339( - "2022-12-14T17:07:03.331+01:00", - ) - .unwrap() - .with_timezone(&Utc), - subscription_name: "Test".to_string(), - subscription_uuid: "8B18D83D-2964-4F35-AC3B-6F4E6FFA727B".to_string(), - subscription_version: "AD0D118F-31EF-4111-A0CA-D87249747278".to_string(), - subscription_uri: Some("/this/is/a/test".to_string()), - }, - EVENT_4688, - ); - assert!(event.additional.error.is_none()); - - let event_json = serde_json::to_string(&event).unwrap(); - - let event_json_value: Value = serde_json::from_str(&event_json).unwrap(); - let expected_value: Value = serde_json::from_str(EVENT_4688_JSON).unwrap(); - - assert_eq!(event_json_value, expected_value); - } - - const EVENT_1003: &str = r#"100304000x800000000000007603Applicationwin10.windomain.local55c92734-d682-4d71-983e-d6ec3f16059f -1: 3f4c0546-36c6-46a8-a37f-be13cdd0cf25, 1, 1 [(0 [0xC004E003, 0, 0], [( 9 0xC004FC07 90 0)( 1 0x00000000)(?)( 2 0x00000000 0 0 msft:rm/algorithm/hwid/4.0 0x00000000 0)(?)( 9 0xC004FC07 90 0)( 10 0x00000000 msft:rm/algorithm/flags/1.0)(?)])(1 )(2 )(3 [0x00000000, 0, 0], [( 6 0xC004F009 0 0)( 1 0x00000000)( 6 0xC004F009 0 0)(?)(?)(?)( 10 0x00000000 msft:rm/algorithm/flags/1.0)( 11 0x00000000 0xC004FC07)])] - -The Software Protection service has completed licensing status check. -Application Id=55c92734-d682-4d71-983e-d6ec3f16059f -Licensing Status= -1: 3f4c0546-36c6-46a8-a37f-be13cdd0cf25, 1, 1 [(0 [0xC004E003, 0, 0], [( 9 0xC004FC07 90 0)( 1 0x00000000)(?)( 2 0x00000000 0 0 msft:rm/algorithm/hwid/4.0 0x00000000 0)(?)( 9 0xC004FC07 90 0)( 10 0x00000000 msft:rm/algorithm/flags/1.0)(?)])(1 )(2 )(3 [0x00000000, 0, 0], [( 6 0xC004F009 0 0)( 1 0x00000000)( 6 0xC004F009 0 0)(?)(?)(?)( 10 0x00000000 msft:rm/algorithm/flags/1.0)( 11 0x00000000 0xC004FC07)])] - -InformationMicrosoft-Windows-Security-SPPClassic - "#; - const EVENT_1003_JSON: &str = r#"{"System":{"Provider":{"Name":"Microsoft-Windows-Security-SPP","Guid":"{E23B33B0-C8C9-472C-A5F9-F2BDFEA0F156}","EventSourceName":"Software Protection Platform Service"},"EventID":1003,"EventIDQualifiers":16384,"Version":0,"Level":4,"Task":0,"Opcode":0,"Keywords":"0x80000000000000","TimeCreated":"2022-12-14T16:05:59.7074374Z","EventRecordID":7603,"Correlation":{},"Execution":{"ProcessID":0,"ThreadID":0},"Channel":"Application","Computer":"win10.windomain.local"},"EventData":{"Data":["55c92734-d682-4d71-983e-d6ec3f16059f","\n1: 3f4c0546-36c6-46a8-a37f-be13cdd0cf25, 1, 1 [(0 [0xC004E003, 0, 0], [( 9 0xC004FC07 90 0)( 1 0x00000000)(?)( 2 0x00000000 0 0 msft:rm/algorithm/hwid/4.0 0x00000000 0)(?)( 9 0xC004FC07 90 0)( 10 0x00000000 msft:rm/algorithm/flags/1.0)(?)])(1 )(2 )(3 [0x00000000, 0, 0], [( 6 0xC004F009 0 0)( 1 0x00000000)( 6 0xC004F009 0 0)(?)(?)(?)( 10 0x00000000 msft:rm/algorithm/flags/1.0)( 11 0x00000000 0xC004FC07)])]\n\n"]},"RenderingInfo":{"Message":"The Software Protection service has completed licensing status check.\nApplication Id=55c92734-d682-4d71-983e-d6ec3f16059f\nLicensing Status=\n1: 3f4c0546-36c6-46a8-a37f-be13cdd0cf25, 1, 1 [(0 [0xC004E003, 0, 0], [( 9 0xC004FC07 90 0)( 1 0x00000000)(?)( 2 0x00000000 0 0 msft:rm/algorithm/hwid/4.0 0x00000000 0)(?)( 9 0xC004FC07 90 0)( 10 0x00000000 msft:rm/algorithm/flags/1.0)(?)])(1 )(2 )(3 [0x00000000, 0, 0], [( 6 0xC004F009 0 0)( 1 0x00000000)( 6 0xC004F009 0 0)(?)(?)(?)( 10 0x00000000 msft:rm/algorithm/flags/1.0)( 11 0x00000000 0xC004FC07)])]\n\n","Level":"Information","Provider":"Microsoft-Windows-Security-SPP","Keywords":["Classic"],"Culture":"en-US"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T16:07:03.324+00:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Node":"openwec","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"AD0D118F-31EF-4111-A0CA-D87249747278","Name":"Test"}}}"#; - - #[test] - fn test_serialize_1003_event_data_unamed() { - let event = Event::from_str( - &EventMetadata { - addr: SocketAddr::from_str("192.168.58.100:5985").unwrap(), - principal: "WIN10$@WINDOMAIN.LOCAL".to_owned(), - node_name: Some("openwec".to_owned()), - time_received: chrono::DateTime::parse_from_rfc3339( - "2022-12-14T17:07:03.324+01:00", - ) - .unwrap() - .with_timezone(&Utc), - subscription_name: "Test".to_string(), - subscription_uuid: "8B18D83D-2964-4F35-AC3B-6F4E6FFA727B".to_string(), - subscription_version: "AD0D118F-31EF-4111-A0CA-D87249747278".to_string(), - subscription_uri: None, - }, - EVENT_1003, - ); - assert!(event.additional.error.is_none()); - - let event_json = serde_json::to_string(&event).unwrap(); - - let event_json_value: Value = serde_json::from_str(&event_json).unwrap(); - let expected_value: Value = serde_json::from_str(EVENT_1003_JSON).unwrap(); - - assert_eq!(event_json_value, expected_value); - } - - const EVENT_5719: &str = r#"571902000x800000000000009466Systemwin10.windomain.localWINDOMAIN%%13115E0000C0This computer was not able to set up a secure session with a domain controller in domain WINDOMAIN due to the following: -We can't sign you in with this credential because your domain isn't available. Make sure your device is connected to your organization's network and try again. If you previously signed in on this device with another credential, you can sign in with that credential. -This may lead to authentication problems. Make sure that this computer is connected to the network. If the problem persists, please contact your domain administrator. - -ADDITIONAL INFO -If this computer is a domain controller for the specified domain, it sets up the secure session to the primary domain controller emulator in the specified domain. Otherwise, this computer sets up the secure session to any domain controller in the specified domain.ErrorInfoClassic"#; - const EVENT_5719_JSON: &str = r#"{"System":{"Provider":{"Name":"NETLOGON"},"EventID":5719,"EventIDQualifiers":0,"Version":0,"Level":2,"Task":0,"Opcode":0,"Keywords":"0x80000000000000","TimeCreated":"2022-12-14T16:04:59.0817047Z","EventRecordID":9466,"Correlation":{},"Execution":{"ProcessID":0,"ThreadID":0},"Channel":"System","Computer":"win10.windomain.local"},"EventData":{"Data":["WINDOMAIN","%%1311"],"Binary":"5E0000C0"},"RenderingInfo":{"Message":"This computer was not able to set up a secure session with a domain controller in domain WINDOMAIN due to the following: \nWe can't sign you in with this credential because your domain isn't available. Make sure your device is connected to your organization's network and try again. If you previously signed in on this device with another credential, you can sign in with that credential. \nThis may lead to authentication problems. Make sure that this computer is connected to the network. If the problem persists, please contact your domain administrator. \n\nADDITIONAL INFO \nIf this computer is a domain controller for the specified domain, it sets up the secure session to the primary domain controller emulator in the specified domain. Otherwise, this computer sets up the secure session to any domain controller in the specified domain.","Level":"Error","Opcode":"Info","Keywords":["Classic"],"Culture":"en-US"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T16:07:02.919+00:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Node":"openwec","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"AD0D118F-31EF-4111-A0CA-D87249747278","Name":"Test","Uri":"/this/is/a/test"}}}"#; - - #[test] - fn test_serialize_5719_event_data_binary() { - let event = Event::from_str( - &EventMetadata { - addr: SocketAddr::from_str("192.168.58.100:5985").unwrap(), - principal: "WIN10$@WINDOMAIN.LOCAL".to_owned(), - node_name: Some("openwec".to_owned()), - time_received: chrono::DateTime::parse_from_rfc3339( - "2022-12-14T17:07:02.919+01:00", - ) - .unwrap() - .with_timezone(&Utc), - subscription_name: "Test".to_string(), - subscription_uuid: "8B18D83D-2964-4F35-AC3B-6F4E6FFA727B".to_string(), - subscription_version: "AD0D118F-31EF-4111-A0CA-D87249747278".to_string(), - subscription_uri: Some("/this/is/a/test".to_string()), - }, - EVENT_5719, - ); - assert!(event.additional.error.is_none()); - - let event_json = serde_json::to_string(&event).unwrap(); - - let event_json_value: Value = serde_json::from_str(&event_json).unwrap(); - let expected_value: Value = serde_json::from_str(EVENT_5719_JSON).unwrap(); - - assert_eq!(event_json_value, expected_value); - } - - const EVENT_6013: &str = r#"601304000x800000000000009427Systemwin10.windomain.local6600 Coordinated Universal Time31002E003100000030000000570069006E0064006F0077007300200031003000200045006E007400650072007000720069007300650020004500760061006C0075006100740069006F006E000000310030002E0030002E003100390030003400330020004200750069006C0064002000310039003000340033002000200000004D0075006C0074006900700072006F0063006500730073006F007200200046007200650065000000310039003000340031002E00760062005F00720065006C0065006100730065002E003100390031003200300036002D00310034003000360000003600320031003400640066003100630000004E006F007400200041007600610069006C00610062006C00650000004E006F007400200041007600610069006C00610062006C00650000003900000031000000320030003400380000003400300039000000770069006E00310030002E00770069006E0064006F006D00610069006E002E006C006F00630061006C0000000000The system uptime is 6 seconds.InformationClassic"#; - const EVENT_6013_JSON: &str = r#"{"System":{"Provider":{"Name":"EventLog"},"EventID":6013,"EventIDQualifiers":32768,"Version":0,"Level":4,"Task":0,"Opcode":0,"Keywords":"0x80000000000000","TimeCreated":"2022-12-14T16:04:43.7965565Z","EventRecordID":9427,"Correlation":{},"Execution":{"ProcessID":0,"ThreadID":0},"Channel":"System","Computer":"win10.windomain.local"},"EventData":{"Data":["6","60","0 Coordinated Universal Time"],"Binary":"31002E003100000030000000570069006E0064006F0077007300200031003000200045006E007400650072007000720069007300650020004500760061006C0075006100740069006F006E000000310030002E0030002E003100390030003400330020004200750069006C0064002000310039003000340033002000200000004D0075006C0074006900700072006F0063006500730073006F007200200046007200650065000000310039003000340031002E00760062005F00720065006C0065006100730065002E003100390031003200300036002D00310034003000360000003600320031003400640066003100630000004E006F007400200041007600610069006C00610062006C00650000004E006F007400200041007600610069006C00610062006C00650000003900000031000000320030003400380000003400300039000000770069006E00310030002E00770069006E0064006F006D00610069006E002E006C006F00630061006C0000000000"},"RenderingInfo":{"Message":"The system uptime is 6 seconds.","Level":"Information","Keywords":["Classic"],"Culture":"en-US"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T16:07:02.524+00:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"AD0D118F-31EF-4111-A0CA-D87249747278","Name":"Test","Uri":"/this/is/a/test"}}}"#; - - #[test] - fn test_serialize_6013_event_data_unamed_empty() { - let event = Event::from_str( - &EventMetadata { - addr: SocketAddr::from_str("192.168.58.100:5985").unwrap(), - principal: "WIN10$@WINDOMAIN.LOCAL".to_owned(), - node_name: None, - time_received: chrono::DateTime::parse_from_rfc3339( - "2022-12-14T17:07:02.524+01:00", - ) - .unwrap() - .with_timezone(&Utc), - subscription_name: "Test".to_string(), - subscription_uuid: "8B18D83D-2964-4F35-AC3B-6F4E6FFA727B".to_string(), - subscription_version: "AD0D118F-31EF-4111-A0CA-D87249747278".to_string(), - subscription_uri: Some("/this/is/a/test".to_string()), - }, - EVENT_6013, - ); - assert!(event.additional.error.is_none()); - - let event_json = serde_json::to_string(&event).unwrap(); - - let event_json_value: Value = serde_json::from_str(&event_json).unwrap(); - let expected_value: Value = serde_json::from_str(EVENT_6013_JSON).unwrap(); - - println!("{}", event_json_value); - println!("{}", expected_value); - assert_eq!(event_json_value, expected_value); - } - - const EVENT_1100: &str = r#"11000410300x4020000000000000114371Securitywin10.windomain.localThe event logging service has shut down.InformationService shutdownInfoSecurityMicrosoft-Windows-EventlogAudit Success"#; - const EVENT_1100_JSON: &str = r#"{"System":{"Provider":{"Name":"Microsoft-Windows-Eventlog","Guid":"{fc65ddd8-d6ef-4962-83d5-6e5cfe9ce148}"},"EventID":1100,"Version":0,"Level":4,"Task":103,"Opcode":0,"Keywords":"0x4020000000000000","TimeCreated":"2022-12-14T14:39:07.1686183Z","EventRecordID":114371,"Correlation":{},"Execution":{"ProcessID":496,"ThreadID":204},"Channel":"Security","Computer":"win10.windomain.local"},"UserData":"","RenderingInfo":{"Message":"The event logging service has shut down.","Level":"Information","Task":"Service shutdown","Opcode":"Info","Channel":"Security","Provider":"Microsoft-Windows-Eventlog","Keywords":["Audit Success"],"Culture":"en-US"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T16:07:02.156+00:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Node":"openwec","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"AD0D118F-31EF-4111-A0CA-D87249747278","Name":"Test","Uri":"/this/is/a/test"}}}"#; - - #[test] - fn test_serialize_1100_user_data() { - let event = Event::from_str( - &EventMetadata { - addr: SocketAddr::from_str("192.168.58.100:5985").unwrap(), - principal: "WIN10$@WINDOMAIN.LOCAL".to_owned(), - node_name: Some("openwec".to_owned()), - time_received: chrono::DateTime::parse_from_rfc3339( - "2022-12-14T17:07:02.156+01:00", - ) - .unwrap() - .with_timezone(&Utc), - subscription_name: "Test".to_string(), - subscription_uuid: "8B18D83D-2964-4F35-AC3B-6F4E6FFA727B".to_string(), - subscription_version: "AD0D118F-31EF-4111-A0CA-D87249747278".to_string(), - subscription_uri: Some("/this/is/a/test".to_string()), - }, - EVENT_1100, - ); - assert!(event.additional.error.is_none()); - - let event_json = serde_json::to_string(&event).unwrap(); - - let event_json_value: Value = serde_json::from_str(&event_json).unwrap(); - let expected_value: Value = serde_json::from_str(EVENT_1100_JSON).unwrap(); - - assert_eq!(event_json_value, expected_value); - } - - const EVENT_111: &str = r#"111win10.windomain.local"#; - const EVENT_111_JSON: &str = r#"{"System":{"Provider":{"Name":"Microsoft-Windows-EventForwarder"},"EventID":111,"TimeCreated":"2023-02-14T09:14:23.175Z","Computer":"win10.windomain.local"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T16:07:02.156+00:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Node":"other_node","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"AD0D118F-31EF-4111-A0CA-D87249747278","Name":"Test","Uri":"/this/is/a/test"}}}"#; - - #[test] - fn test_serialize_111() { - let event = Event::from_str( - &EventMetadata { - addr: SocketAddr::from_str("192.168.58.100:5985").unwrap(), - principal: "WIN10$@WINDOMAIN.LOCAL".to_owned(), - node_name: Some("other_node".to_owned()), - time_received: chrono::DateTime::parse_from_rfc3339( - "2022-12-14T17:07:02.156+01:00", - ) - .unwrap() - .with_timezone(&Utc), - subscription_name: "Test".to_string(), - subscription_uuid: "8B18D83D-2964-4F35-AC3B-6F4E6FFA727B".to_string(), - subscription_version: "AD0D118F-31EF-4111-A0CA-D87249747278".to_string(), - subscription_uri: Some("/this/is/a/test".to_string()), - }, - EVENT_111, - ); - assert!(event.additional.error.is_none()); - - let event_json = serde_json::to_string(&event).expect("Failed to serialize event"); - - let event_json_value: Value = serde_json::from_str(&event_json).unwrap(); - let expected_value: Value = serde_json::from_str(EVENT_111_JSON).unwrap(); - - assert_eq!(event_json_value, expected_value); - } - const RAW_CONTENT_RECOVERED: &str = r#"4798001382400x8020000000000000980236Securitydvas0004_xpsdavevxxxxx_xpsS-1-5-21-1604529354-1295832394-4197355770-1001S-1-5-18xxxxx_XPS$WORKGROUP0x3e70x28d4C:\\Windows\\System32\\svchost.exeA user's local group membership was enumerated. Subject: "#; - const RAW_CONTENT_RECOVERED_JSON: &str = r#"{"System":{ "Provider":{ "Name":"Microsoft-Windows-Security-Auditing", "Guid":"{54849625-5478-4994-a5ba-3e3b0328c30d}" }, "EventID":4798, "Version":0, "Level":0, "Task":13824, "Opcode":0, "Keywords":"0x8020000000000000", "TimeCreated":"2023-09-29T13:39:08.7234692Z", "EventRecordID":980236, "Correlation":{ "ActivityID":"{f59bb999-ec5b-0008-f6b9-9bf55becd901}" }, "Execution":{ "ProcessID":1440, "ThreadID":16952 }, "Channel":"Security", "Computer":"dvas0004_xps" }, "EventData":{ "SubjectLogonId":"0x3e7", "TargetDomainName":"xxxxx_xps", "CallerProcessId":"0x28d4", "CallerProcessName":"C:\\\\Windows\\\\System32\\\\svchost.exe", "TargetUserName":"davev", "SubjectDomainName":"WORKGROUP", "SubjectUserName":"xxxxx_XPS$", "TargetSid":"S-1-5-21-1604529354-1295832394-4197355770-1001", "SubjectUserSid":"S-1-5-18" }, "OpenWEC":{ "IpAddress":"127.0.0.1", "TimeReceived":"2023-09-29T14:33:12.574363325+00:00", "Principal":"demo-client", "Subscription":{ "Uuid":"91E05B32-F8F6-48CF-8AB4-4038233B83AC", "Version":"523D1886-E73E-4A96-A95D-F0326CB282F0", "Name":"my-test-subscription" }, "Error":{ "OriginalContent":"4798001382400x8020000000000000980236Securitydvas0004_xpsdavevxxxxx_xpsS-1-5-21-1604529354-1295832394-4197355770-1001S-1-5-18xxxxx_XPS$WORKGROUP0x3e70x28d4C:\\\\Windows\\\\System32\\\\svchost.exeA user's local group membership was enumerated. Subject: ", "Type": "RawContentRecovered", "Message":"Failed to parse event XML (the root node was opened but never closed) but Raw content could be recovered." } } }"#; #[test] fn test_serialize_malformed_raw_content_recovered() { // Try to serialize a malformed event, and use the recovering strategy to // recover its Raw content let event = Event::from_str( - &EventMetadata { - addr: SocketAddr::from_str("127.0.0.1:5985").unwrap(), - principal: "demo-client".to_string(), - node_name: None, - time_received: chrono::DateTime::parse_from_rfc3339( - "2023-09-29T14:33:12.574363325+00:00", - ) - .unwrap() - .with_timezone(&Utc), - subscription_name: "my-test-subscription".to_string(), - subscription_uuid: "91E05B32-F8F6-48CF-8AB4-4038233B83AC".to_string(), - subscription_version: "523D1886-E73E-4A96-A95D-F0326CB282F0".to_string(), - subscription_uri: None, - }, RAW_CONTENT_RECOVERED, ); - assert!(event.additional.error.is_some()); - let event_json = serde_json::to_string(&event).expect("Failed to serialize event"); + let error = event.additional.error.unwrap(); + assert_eq!(error.error_type, ErrorType::RawContentRecovered("Failed to parse event XML (the root node was opened but never closed) but Raw content could be recovered.".to_string())); + assert_eq!(error.original_content, RAW_CONTENT_RECOVERED); - let event_json_value: Value = serde_json::from_str(&event_json).unwrap(); - let expected_value: Value = serde_json::from_str(RAW_CONTENT_RECOVERED_JSON).unwrap(); + let system = event.system.unwrap(); + assert_eq!(system.provider.name.unwrap(), "Microsoft-Windows-Security-Auditing".to_string()); + assert_eq!(system.event_id, 4798); + assert_eq!(system.execution.unwrap().thread_id, 16952); - assert_eq!(event_json_value, expected_value); + assert!(event.rendering_info.is_none()); + + match event.data { + DataType::EventData(data) => { + assert_eq!(data.named_data.get("TargetDomainName").unwrap(), "xxxxx_xps"); + assert_eq!(data.named_data.get("TargetSid").unwrap(), "S-1-5-21-1604529354-1295832394-4197355770-1001"); + }, + _ => panic!("Wrong event data type") + }; } const UNRECOVERABLE_1: &str = r#"4798001382400x8020000000000000980236Securitydvas0004_xpsdavevxxxxx_xpsS-1-5-21-1604529354-1295832394-4197355770-1001S-1-5-18xxxxx_XPS$WORKGROUP0x3e70x28d4C:\\Windows\\System32\\svchost.exe"#; - const UNRECOVERABLE_1_JSON: &str = r#"{"OpenWEC":{ "IpAddress":"127.0.0.1", "TimeReceived":"2023-09-29T14:33:12.574363325+00:00", "Principal":"demo-client", "Subscription":{ "Uuid":"91E05B32-F8F6-48CF-8AB4-4038233B83AC", "Version":"523D1886-E73E-4A96-A95D-F0326CB282F0", "Name":"my-test-subscription" }, "Error":{ "Type": "Unrecoverable", "OriginalContent":"4798001382400x8020000000000000980236Securitydvas0004_xpsdavevxxxxx_xpsS-1-5-21-1604529354-1295832394-4197355770-1001S-1-5-18xxxxx_XPS$WORKGROUP0x3e70x28d4C:\\\\Windows\\\\System32\\\\svchost.exe", "Message":"Failed to parse event XML: the root node was opened but never closed" } } }"#; #[test] fn test_serialize_malformed_unrecoverable_1() { // Try to serialize an event for which there is no recovering strategy let event = Event::from_str( - &EventMetadata { - addr: SocketAddr::from_str("127.0.0.1:5985").unwrap(), - principal: "demo-client".to_string(), - node_name: None, - time_received: chrono::DateTime::parse_from_rfc3339( - "2023-09-29T14:33:12.574363325+00:00", - ) - .unwrap() - .with_timezone(&Utc), - subscription_name: "my-test-subscription".to_string(), - subscription_uuid: "91E05B32-F8F6-48CF-8AB4-4038233B83AC".to_string(), - subscription_version: "523D1886-E73E-4A96-A95D-F0326CB282F0".to_string(), - subscription_uri: None, - }, UNRECOVERABLE_1, ); assert!(event.additional.error.is_some()); + assert!(event.system.is_none()); + assert!(event.rendering_info.is_none()); - let event_json = serde_json::to_string(&event).expect("Failed to serialize event"); - - let event_json_value: Value = serde_json::from_str(&event_json).unwrap(); - let expected_value: Value = serde_json::from_str(UNRECOVERABLE_1_JSON).unwrap(); + match event.data { + DataType::Unknown => (), + _ => panic!("Wrong event data type") + }; - assert_eq!(event_json_value, expected_value); + let error = event.additional.error.unwrap(); + assert_eq!(error.error_type, ErrorType::Unrecoverable("Failed to parse event XML: the root node was opened but never closed".to_string())); + assert_eq!(error.original_content, UNRECOVERABLE_1); } const UNRECOVERABLE_2: &str = r#" (), + _ => panic!("Wrong event data type") + }; - assert_eq!(event_json_value, expected_value); + let error = event.additional.error.unwrap(); + assert_eq!(error.error_type, ErrorType::Unrecoverable("Failed to parse event XML: unexpected end of stream".to_string())); + assert_eq!(error.original_content, UNRECOVERABLE_2); } const FAILED_TO_RECOVER_RAW_CONTENT: &str = r#"4798001382400x8020000000000000980236Securitydvas0004_xpsdavevxxxxx_xpsS-1-5-21-1604529354-1295832394-4197355770-1001S-1-5-18xxxxx_XPS$WORKGROUP0x3e70x28d4C:\\Windows\\System32\\svchost.exe4798001382400x8020000000000000980236Securitydvas0004_xpsdavevxxxxx_xpsS-1-5-21-1604529354-1295832394-4197355770-1001S-1-5-18xxxxx_XPS$WORKGROUP0x3e70x28d4C:\\\\Windows\\\\System32\\\\svchost.exe (), + _ => panic!("Wrong event data type") + }; - assert_eq!(event_json_value, expected_value); + let error = event.additional.error.unwrap(); + assert_eq!(error.error_type, ErrorType::FailedToRecoverRawContent("Failed to parse event XML (invalid name token at 1:1088) and Raw content recovering failed (invalid name token at 1:1088)".to_string())); + assert_eq!(error.original_content, FAILED_TO_RECOVER_RAW_CONTENT); } const FAILED_TO_FEED_EVENT: &str = r#"4798001382400x8020000000000000980236Securitydvas0004_xpsdavevxxxxx_xpsS-1-5-21-1604529354-1295832394-4197355770-1001S-1-5-18xxxxx_XPS$WORKGROUP0x3e70x28d4C:\\Windows\\System32\\svchost.exeA use"#; - const FAILED_TO_FEED_EVENT_JSON: &str = r#"{"OpenWEC":{ "IpAddress":"127.0.0.1", "TimeReceived":"2023-09-29T14:33:12.574363325+00:00", "Principal":"demo-client", "Subscription":{ "Uuid":"91E05B32-F8F6-48CF-8AB4-4038233B83AC", "Version":"523D1886-E73E-4A96-A95D-F0326CB282F0", "Name":"my-test-subscription" }, "Error":{ "Type": "FailedToFeedEvent", "OriginalContent":"4798001382400x8020000000000000980236Securitydvas0004_xpsdavevxxxxx_xpsS-1-5-21-1604529354-1295832394-4197355770-1001S-1-5-18xxxxx_XPS$WORKGROUP0x3e70x28d4C:\\\\Windows\\\\System32\\\\svchost.exeA use", "Message":"Could not feed event from document: Parsing failure in System" } } }"#; #[test] fn test_serialize_malformed_failed_to_feed_event() { // Try to serialize a malformed event for which the recovering strategy can // not succeed because is invalid. let event = Event::from_str( - &EventMetadata { - addr: SocketAddr::from_str("127.0.0.1:5985").unwrap(), - principal: "demo-client".to_string(), - node_name: None, - time_received: chrono::DateTime::parse_from_rfc3339( - "2023-09-29T14:33:12.574363325+00:00", - ) - .unwrap() - .with_timezone(&Utc), - subscription_name: "my-test-subscription".to_string(), - subscription_uuid: "91E05B32-F8F6-48CF-8AB4-4038233B83AC".to_string(), - subscription_version: "523D1886-E73E-4A96-A95D-F0326CB282F0".to_string(), - subscription_uri: None, - }, FAILED_TO_FEED_EVENT, ); assert!(event.additional.error.is_some()); + assert!(event.system.is_none()); + assert!(event.rendering_info.is_none()); - let event_json = serde_json::to_string(&event).expect("Failed to serialize event"); - - let event_json_value: Value = serde_json::from_str(&event_json).unwrap(); - let expected_value: Value = serde_json::from_str(FAILED_TO_FEED_EVENT_JSON).unwrap(); + match event.data { + DataType::Unknown => (), + _ => panic!("Wrong event data type") + }; - assert_eq!(event_json_value, expected_value); + let error = event.additional.error.unwrap(); + assert_eq!(error.error_type, ErrorType::FailedToFeedEvent("Could not feed event from document: Parsing failure in System".to_string())); + assert_eq!(error.original_content, FAILED_TO_FEED_EVENT); } -} +} \ No newline at end of file diff --git a/server/src/formats/json.rs b/server/src/formats/json.rs new file mode 100644 index 0000000..600162a --- /dev/null +++ b/server/src/formats/json.rs @@ -0,0 +1,745 @@ +use std::{collections::HashMap, fmt::Debug, sync::Arc}; + +use log::warn; +use serde::Serialize; + +use crate::{ + event::{EventData, EventMetadata}, + output::OutputFormat, +}; + +pub struct JsonFormat; + +impl OutputFormat for JsonFormat { + fn format(&self, metadata: &EventMetadata, data: &EventData) -> Option> { + if let Some(event) = data.event() { + let json_event = JsonEvent::new(event.clone(), metadata); + match serde_json::to_string(&json_event) { + Ok(str) => Some(Arc::new(str)), + Err(e) => { + warn!( + "Failed to serialize event in JSON: {:?}. Event was: {:?}", + e, event + ); + None + } + } + } else { + warn!("Failed to retrieve parsed event"); + None + } + } +} + +#[derive(Debug, Serialize)] +struct EventDataType { + #[serde(flatten, skip_serializing_if = "HashMap::is_empty")] + named_data: HashMap, + #[serde(rename = "Data", skip_serializing_if = "Vec::is_empty")] + unamed_data: Vec, + #[serde(rename = "Binary", skip_serializing_if = "Option::is_none")] + binary: Option, +} + +impl From for EventDataType { + fn from(value: crate::event::EventDataType) -> Self { + Self { + named_data: value.named_data, + unamed_data: value.unamed_data, + binary: value.binary, + } + } +} + +#[derive(Debug, Serialize)] +struct DebugDataType { + #[serde(rename = "SequenceNumber", skip_serializing_if = "Option::is_none")] + sequence_number: Option, + #[serde(rename = "FlagsName", skip_serializing_if = "Option::is_none")] + flags_name: Option, + #[serde(rename = "LevelName", skip_serializing_if = "Option::is_none")] + level_name: Option, + #[serde(rename = "Component")] + component: String, + #[serde(rename = "SubComponent", skip_serializing_if = "Option::is_none")] + sub_component: Option, + #[serde(rename = "FileLine", skip_serializing_if = "Option::is_none")] + file_line: Option, + #[serde(rename = "Function", skip_serializing_if = "Option::is_none")] + function: Option, + #[serde(rename = "Message")] + message: String, +} + +impl From for DebugDataType { + fn from(value: crate::event::DebugDataType) -> Self { + Self { + sequence_number: value.sequence_number, + flags_name: value.flags_name, + level_name: value.level_name, + component: value.component, + sub_component: value.sub_component, + file_line: value.file_line, + function: value.function, + message: value.message, + } + } +} + +#[derive(Debug, Serialize)] +struct ProcessingErrorDataType { + #[serde(rename = "ErrorCode")] + error_code: u32, + #[serde(rename = "DataItemName")] + data_item_name: String, + #[serde(rename = "EventPayload")] + event_payload: String, +} + +impl From for ProcessingErrorDataType { + fn from(value: crate::event::ProcessingErrorDataType) -> Self { + Self { + error_code: value.error_code, + data_item_name: value.data_item_name, + event_payload: value.event_payload, + } + } +} + +#[derive(Debug, Serialize)] +enum DataType { + EventData(EventDataType), + UserData(String), + DebugData(DebugDataType), + ProcessingErrorData(ProcessingErrorDataType), + BinaryEventData(String), + Unknown, +} + +impl DataType { + fn is_unknown(&self) -> bool { + matches!(self, DataType::Unknown) + } +} + +impl From for DataType { + fn from(value: crate::event::DataType) -> Self { + match value { + crate::event::DataType::EventData(t) => DataType::EventData(t.into()), + crate::event::DataType::UserData(t) => DataType::UserData(t), + crate::event::DataType::DebugData(t) => DataType::DebugData(t.into()), + crate::event::DataType::ProcessingErrorData(t) => { + DataType::ProcessingErrorData(t.into()) + } + crate::event::DataType::BinaryEventData(t) => DataType::BinaryEventData(t), + crate::event::DataType::Unknown => DataType::Unknown, + } + } +} + +#[derive(Serialize, Debug)] +#[serde(tag = "Type")] +enum ErrorType { + /// Initial XML parsing failed but Raw content could be recovered + RawContentRecovered { + #[serde(rename = "Message")] + message: String, + }, + /// Initial XML parsing failed and recovering failed again + FailedToRecoverRawContent { + #[serde(rename = "Message")] + message: String, + }, + /// Initial XML parsing failed and no recovering strategy was usable + Unrecoverable { + #[serde(rename = "Message")] + message: String, + }, + /// Failed to feed event from parsed XML document + FailedToFeedEvent { + #[serde(rename = "Message")] + message: String, + }, + Unknown, +} + +impl From for ErrorType { + fn from(value: crate::event::ErrorType) -> Self { + match value { + crate::event::ErrorType::RawContentRecovered(message) => { + ErrorType::RawContentRecovered { message } + } + crate::event::ErrorType::FailedToRecoverRawContent(message) => { + ErrorType::FailedToRecoverRawContent { message } + } + crate::event::ErrorType::Unrecoverable(message) => ErrorType::Unrecoverable { message }, + crate::event::ErrorType::FailedToFeedEvent(message) => { + ErrorType::FailedToFeedEvent { message } + } + crate::event::ErrorType::Unknown => ErrorType::Unknown, + } + } +} + +#[derive(Debug, Serialize)] +struct ErrorInfo { + #[serde(rename = "OriginalContent")] + original_content: String, + #[serde(flatten)] + error_type: ErrorType, +} + +impl From for ErrorInfo { + fn from(value: crate::event::ErrorInfo) -> Self { + Self { + original_content: value.original_content, + error_type: value.error_type.into(), + } + } +} + +#[derive(Debug, Serialize)] +struct JsonEvent { + #[serde(rename = "System", skip_serializing_if = "Option::is_none")] + system: Option, + #[serde(flatten, skip_serializing_if = "DataType::is_unknown")] + data: DataType, + #[serde(rename = "RenderingInfo", skip_serializing_if = "Option::is_none")] + rendering_info: Option, + #[serde(rename = "OpenWEC")] + additional: Additional, +} + +impl JsonEvent { + pub fn new(event: crate::event::Event, metadata: &EventMetadata) -> Self { + Self { + system: event.system.map(Into::into), + data: event.data.into(), + rendering_info: event.rendering_info.map(Into::into), + additional: Additional::new(event.additional, metadata), + } + } +} + +#[derive(Debug, Serialize)] +struct Additional { + #[serde(rename = "IpAddress")] + addr: String, + #[serde(rename = "TimeReceived")] + time_received: String, + #[serde(rename = "Principal")] + principal: String, + #[serde(rename = "Subscription")] + subscription: SubscriptionType, + #[serde(rename = "Node", skip_serializing_if = "Option::is_none")] + node: Option, + #[serde(rename = "Error", skip_serializing_if = "Option::is_none")] + error: Option, +} + +impl Additional { + pub fn new(additional: crate::event::Additional, metadata: &EventMetadata) -> Self { + Self { + addr: metadata.addr().ip().to_string(), + principal: metadata.principal().to_owned(), // TODO : change to something that works for TLS as well (modify db and output) + node: metadata.node_name().cloned(), + time_received: metadata.time_received().to_rfc3339(), + subscription: SubscriptionType { + uuid: metadata.subscription_uuid().to_owned(), + version: metadata.subscription_version().to_owned(), + name: metadata.subscription_name().to_owned(), + uri: metadata.subscription_uri().cloned(), + client_revision: metadata.subscription_client_revision().cloned(), + server_revision: metadata.subscription_server_revision().cloned(), + }, + error: additional.error.map(Into::into), + } + } +} + +#[derive(Debug, Serialize, Clone)] +struct SubscriptionType { + #[serde(rename = "Uuid")] + uuid: String, + #[serde(rename = "Version")] + version: String, + #[serde(rename = "Name")] + name: String, + #[serde(rename = "Uri", skip_serializing_if = "Option::is_none")] + uri: Option, + #[serde(rename = "ClientRevision", skip_serializing_if = "Option::is_none")] + client_revision: Option, + #[serde(rename = "ServerRevision", skip_serializing_if = "Option::is_none")] + server_revision: Option, +} + +#[derive(Debug, Serialize, Clone)] +struct Provider { + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "Name")] + name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "Guid")] + guid: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "EventSourceName")] + event_source_name: Option, +} + +impl From for Provider { + fn from(value: crate::event::Provider) -> Self { + Self { + name: value.name, + guid: value.guid, + event_source_name: value.event_source_name, + } + } +} + +#[derive(Debug, Serialize, Clone)] +struct Correlation { + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "ActivityID")] + activity_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "RelatedActivityID")] + related_activity_id: Option, +} + +impl From for Correlation { + fn from(value: crate::event::Correlation) -> Self { + Self { + activity_id: value.activity_id, + related_activity_id: value.related_activity_id, + } + } +} + +#[derive(Debug, Serialize, Clone)] +struct Execution { + #[serde(rename = "ProcessID")] + process_id: u32, + #[serde(rename = "ThreadID")] + thread_id: u32, + #[serde(rename = "ProcessorID")] + #[serde(skip_serializing_if = "Option::is_none")] + processor_id: Option, + #[serde(rename = "SessionID")] + #[serde(skip_serializing_if = "Option::is_none")] + session_id: Option, + #[serde(rename = "KernelTime")] + #[serde(skip_serializing_if = "Option::is_none")] + kernel_time: Option, + #[serde(rename = "UserTime")] + #[serde(skip_serializing_if = "Option::is_none")] + user_time: Option, + #[serde(rename = "ProcessorTime")] + #[serde(skip_serializing_if = "Option::is_none")] + processor_time: Option, +} + +impl From for Execution { + fn from(value: crate::event::Execution) -> Self { + Self { + process_id: value.process_id, + thread_id: value.thread_id, + processor_id: value.processor_id, + session_id: value.session_id, + kernel_time: value.kernel_time, + user_time: value.user_time, + processor_time: value.processor_time, + } + } +} + +#[derive(Debug, Serialize, Clone)] +struct System { + #[serde(rename = "Provider")] + provider: Provider, + #[serde(rename = "EventID")] + event_id: u32, + #[serde(rename = "EventIDQualifiers", skip_serializing_if = "Option::is_none")] + event_id_qualifiers: Option, + #[serde(rename = "Version", skip_serializing_if = "Option::is_none")] + version: Option, + #[serde(rename = "Level", skip_serializing_if = "Option::is_none")] + level: Option, + #[serde(rename = "Task", skip_serializing_if = "Option::is_none")] + task: Option, + #[serde(rename = "Opcode", skip_serializing_if = "Option::is_none")] + opcode: Option, + #[serde(rename = "Keywords", skip_serializing_if = "Option::is_none")] + keywords: Option, + #[serde(rename = "TimeCreated", skip_serializing_if = "Option::is_none")] + time_created: Option, + #[serde(rename = "EventRecordID", skip_serializing_if = "Option::is_none")] + event_record_id: Option, + #[serde(rename = "Correlation", skip_serializing_if = "Option::is_none")] + correlation: Option, + #[serde(rename = "Execution", skip_serializing_if = "Option::is_none")] + execution: Option, + #[serde(rename = "Channel", skip_serializing_if = "Option::is_none")] + channel: Option, + #[serde(rename = "Computer")] + computer: String, + #[serde(rename = "Container", skip_serializing_if = "Option::is_none")] + container: Option, + #[serde(rename = "UserID", skip_serializing_if = "Option::is_none")] + user_id: Option, +} + +impl From for System { + fn from(value: crate::event::System) -> Self { + Self { + provider: value.provider.into(), + event_id: value.event_id, + event_id_qualifiers: value.event_id_qualifiers, + version: value.version, + level: value.level, + task: value.task, + opcode: value.opcode, + keywords: value.keywords, + time_created: value.time_created, + event_record_id: value.event_record_id, + correlation: value.correlation.map(Into::into), + execution: value.execution.map(Into::into), + channel: value.channel, + computer: value.computer, + container: value.container, + user_id: value.user_id, + } + } +} + +#[derive(Debug, Default, Serialize, Clone)] +struct RenderingInfo { + #[serde(rename = "Message", skip_serializing_if = "Option::is_none")] + message: Option, + #[serde(rename = "Level", skip_serializing_if = "Option::is_none")] + level: Option, + #[serde(rename = "Task", skip_serializing_if = "Option::is_none")] + task: Option, + #[serde(rename = "Opcode", skip_serializing_if = "Option::is_none")] + opcode: Option, + #[serde(rename = "Channel", skip_serializing_if = "Option::is_none")] + channel: Option, + #[serde(rename = "Provider", skip_serializing_if = "Option::is_none")] + provider: Option, + #[serde(rename = "Keywords", skip_serializing_if = "Option::is_none")] + keywords: Option>, + #[serde(rename = "Culture")] + culture: String, +} + +impl From for RenderingInfo { + fn from(value: crate::event::RenderingInfo) -> Self { + Self { + message: value.message, + level: value.level, + task: value.task, + opcode: value.opcode, + channel: value.channel, + provider: value.provider, + keywords: value.keywords, + culture: value.culture, + } + } +} + +#[cfg(test)] +mod tests { + use std::{net::SocketAddr, str::FromStr, sync::Arc}; + + use chrono::Utc; + use common::subscription::{SubscriptionData, SubscriptionUuid}; + use serde_json::Value; + use uuid::Uuid; + + use crate::{ + event::{EventData, EventMetadata}, + formats::json::JsonFormat, + output::OutputFormat, + subscription::Subscription, + }; + + const EVENT_4688: &str = r#"4688201331200x8020000000000000114689Securitywin10.windomain.localS-1-5-18WIN10$WINDOMAIN0x3e70x3a8C:\Program Files (x86)\Microsoft\EdgeUpdate\MicrosoftEdgeUpdate.exe%%19360x240S-1-0-0--0x0C:\Windows\System32\services.exeS-1-16-16384A new process has been created. + +Creator Subject: + Security ID: S-1-5-18 + Account Name: WIN10$ + Account Domain: WINDOMAIN + Logon ID: 0x3E7 + +Target Subject: + Security ID: S-1-0-0 + Account Name: - + Account Domain: - + Logon ID: 0x0 + +Process Information: + New Process ID: 0x3a8 + New Process Name: C:\Program Files (x86)\Microsoft\EdgeUpdate\MicrosoftEdgeUpdate.exe + Token Elevation Type: %%1936 + Mandatory Label: S-1-16-16384 + Creator Process ID: 0x240 + Creator Process Name: C:\Windows\System32\services.exe + Process Command Line: + +Token Elevation Type indicates the type of token that was assigned to the new process in accordance with User Account Control policy. + +Type 1 is a full token with no privileges removed or groups disabled. A full token is only used if User Account Control is disabled or if the user is the built-in Administrator account or a service account. + +Type 2 is an elevated token with no privileges removed or groups disabled. An elevated token is used when User Account Control is enabled and the user chooses to start the program using Run as administrator. An elevated token is also used when an application is configured to always require administrative privilege or to always require maximum privilege, and the user is a member of the Administrators group. + +Type 3 is a limited token with administrative privileges removed and administrative groups disabled. The limited token is used when User Account Control is enabled, the application does not require administrative privilege, and the user does not choose to start the program using Run as administrator.InformationProcess CreationInfoSecurityMicrosoft Windows security auditing.Audit Success"#; + const EVENT_4688_JSON: &str = r#"{"System":{"Provider":{"Name":"Microsoft-Windows-Security-Auditing","Guid":"{54849625-5478-4994-a5ba-3e3b0328c30d}"},"EventID":4688,"Version":2,"Level":0,"Task":13312,"Opcode":0,"Keywords":"0x8020000000000000","TimeCreated":"2022-12-14T16:06:51.0643605Z","EventRecordID":114689,"Correlation":{},"Execution":{"ProcessID":4,"ThreadID":196},"Channel":"Security","Computer":"win10.windomain.local"},"EventData":{"SubjectLogonId":"0x3e7","SubjectUserName":"WIN10$","SubjectDomainName":"WINDOMAIN","ParentProcessName":"C:\\Windows\\System32\\services.exe","MandatoryLabel":"S-1-16-16384","SubjectUserSid":"S-1-5-18","NewProcessName":"C:\\Program Files (x86)\\Microsoft\\EdgeUpdate\\MicrosoftEdgeUpdate.exe","TokenElevationType":"%%1936","TargetUserSid":"S-1-0-0","TargetDomainName":"-","CommandLine":"","TargetUserName":"-","NewProcessId":"0x3a8","TargetLogonId":"0x0","ProcessId":"0x240"},"RenderingInfo":{"Message":"A new process has been created.\n\nCreator Subject:\n\tSecurity ID:\t\tS-1-5-18\n\tAccount Name:\t\tWIN10$\n\tAccount Domain:\t\tWINDOMAIN\n\tLogon ID:\t\t0x3E7\n\nTarget Subject:\n\tSecurity ID:\t\tS-1-0-0\n\tAccount Name:\t\t-\n\tAccount Domain:\t\t-\n\tLogon ID:\t\t0x0\n\nProcess Information:\n\tNew Process ID:\t\t0x3a8\n\tNew Process Name:\tC:\\Program Files (x86)\\Microsoft\\EdgeUpdate\\MicrosoftEdgeUpdate.exe\n\tToken Elevation Type:\t%%1936\n\tMandatory Label:\t\tS-1-16-16384\n\tCreator Process ID:\t0x240\n\tCreator Process Name:\tC:\\Windows\\System32\\services.exe\n\tProcess Command Line:\t\n\nToken Elevation Type indicates the type of token that was assigned to the new process in accordance with User Account Control policy.\n\nType 1 is a full token with no privileges removed or groups disabled. A full token is only used if User Account Control is disabled or if the user is the built-in Administrator account or a service account.\n\nType 2 is an elevated token with no privileges removed or groups disabled. An elevated token is used when User Account Control is enabled and the user chooses to start the program using Run as administrator. An elevated token is also used when an application is configured to always require administrative privilege or to always require maximum privilege, and the user is a member of the Administrators group.\n\nType 3 is a limited token with administrative privileges removed and administrative groups disabled. The limited token is used when User Account Control is enabled, the application does not require administrative privilege, and the user does not choose to start the program using Run as administrator.","Level":"Information","Task":"Process Creation","Opcode":"Info","Channel":"Security","Provider":"Microsoft Windows security auditing.","Keywords":["Audit Success"],"Culture":"en-US"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T16:07:03.331+00:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Node":"openwec","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"188BB736-9441-5C66-188B-B73694415C66","Name":"Test","Uri":"/this/is/a/test","ClientRevision":"1234","ServerRevision":"babar"}}}"#; + + #[test] + fn test_serialize_4688_event_data() { + // Generate metadata + + let mut subscription_data = SubscriptionData::new("Test", ""); + subscription_data + .set_uuid(SubscriptionUuid( + Uuid::from_str("8B18D83D-2964-4F35-AC3B-6F4E6FFA727B").unwrap(), + )) + .set_uri(Some("/this/is/a/test".to_string())) + .set_revision(Some("babar".to_string())); + let subscription = Subscription::try_from(subscription_data).unwrap(); + + let mut metadata = EventMetadata::new( + &SocketAddr::from_str("192.168.58.100:5985").unwrap(), + "WIN10$@WINDOMAIN.LOCAL", + Some("openwec".to_owned()), + &subscription, + "188BB736-9441-5C66-188B-B73694415C66".to_string(), + Some("1234".to_string()) + ); + metadata.set_time_received( + chrono::DateTime::parse_from_rfc3339("2022-12-14T17:07:03.331+01:00") + .unwrap() + .with_timezone(&Utc), + ); + + // Parse and check event + + let event_data = EventData::new(Arc::new(EVENT_4688.to_string()), true); + assert!(event_data.event().unwrap().additional.error.is_none()); + + let formatter: JsonFormat = JsonFormat; + let result = formatter.format(&metadata, &event_data).unwrap(); + + let event_json_value: Value = serde_json::from_str(&result).unwrap(); + let expected_value: Value = serde_json::from_str(EVENT_4688_JSON).unwrap(); + + assert_eq!(event_json_value, expected_value); + } + + const EVENT_1003: &str = r#"100304000x800000000000007603Applicationwin10.windomain.local55c92734-d682-4d71-983e-d6ec3f16059f +1: 3f4c0546-36c6-46a8-a37f-be13cdd0cf25, 1, 1 [(0 [0xC004E003, 0, 0], [( 9 0xC004FC07 90 0)( 1 0x00000000)(?)( 2 0x00000000 0 0 msft:rm/algorithm/hwid/4.0 0x00000000 0)(?)( 9 0xC004FC07 90 0)( 10 0x00000000 msft:rm/algorithm/flags/1.0)(?)])(1 )(2 )(3 [0x00000000, 0, 0], [( 6 0xC004F009 0 0)( 1 0x00000000)( 6 0xC004F009 0 0)(?)(?)(?)( 10 0x00000000 msft:rm/algorithm/flags/1.0)( 11 0x00000000 0xC004FC07)])] + +The Software Protection service has completed licensing status check. +Application Id=55c92734-d682-4d71-983e-d6ec3f16059f +Licensing Status= +1: 3f4c0546-36c6-46a8-a37f-be13cdd0cf25, 1, 1 [(0 [0xC004E003, 0, 0], [( 9 0xC004FC07 90 0)( 1 0x00000000)(?)( 2 0x00000000 0 0 msft:rm/algorithm/hwid/4.0 0x00000000 0)(?)( 9 0xC004FC07 90 0)( 10 0x00000000 msft:rm/algorithm/flags/1.0)(?)])(1 )(2 )(3 [0x00000000, 0, 0], [( 6 0xC004F009 0 0)( 1 0x00000000)( 6 0xC004F009 0 0)(?)(?)(?)( 10 0x00000000 msft:rm/algorithm/flags/1.0)( 11 0x00000000 0xC004FC07)])] + +InformationMicrosoft-Windows-Security-SPPClassic + "#; + const EVENT_1003_JSON: &str = r#"{"System":{"Provider":{"Name":"Microsoft-Windows-Security-SPP","Guid":"{E23B33B0-C8C9-472C-A5F9-F2BDFEA0F156}","EventSourceName":"Software Protection Platform Service"},"EventID":1003,"EventIDQualifiers":16384,"Version":0,"Level":4,"Task":0,"Opcode":0,"Keywords":"0x80000000000000","TimeCreated":"2022-12-14T16:05:59.7074374Z","EventRecordID":7603,"Correlation":{},"Execution":{"ProcessID":0,"ThreadID":0},"Channel":"Application","Computer":"win10.windomain.local"},"EventData":{"Data":["55c92734-d682-4d71-983e-d6ec3f16059f","\n1: 3f4c0546-36c6-46a8-a37f-be13cdd0cf25, 1, 1 [(0 [0xC004E003, 0, 0], [( 9 0xC004FC07 90 0)( 1 0x00000000)(?)( 2 0x00000000 0 0 msft:rm/algorithm/hwid/4.0 0x00000000 0)(?)( 9 0xC004FC07 90 0)( 10 0x00000000 msft:rm/algorithm/flags/1.0)(?)])(1 )(2 )(3 [0x00000000, 0, 0], [( 6 0xC004F009 0 0)( 1 0x00000000)( 6 0xC004F009 0 0)(?)(?)(?)( 10 0x00000000 msft:rm/algorithm/flags/1.0)( 11 0x00000000 0xC004FC07)])]\n\n"]},"RenderingInfo":{"Message":"The Software Protection service has completed licensing status check.\nApplication Id=55c92734-d682-4d71-983e-d6ec3f16059f\nLicensing Status=\n1: 3f4c0546-36c6-46a8-a37f-be13cdd0cf25, 1, 1 [(0 [0xC004E003, 0, 0], [( 9 0xC004FC07 90 0)( 1 0x00000000)(?)( 2 0x00000000 0 0 msft:rm/algorithm/hwid/4.0 0x00000000 0)(?)( 9 0xC004FC07 90 0)( 10 0x00000000 msft:rm/algorithm/flags/1.0)(?)])(1 )(2 )(3 [0x00000000, 0, 0], [( 6 0xC004F009 0 0)( 1 0x00000000)( 6 0xC004F009 0 0)(?)(?)(?)( 10 0x00000000 msft:rm/algorithm/flags/1.0)( 11 0x00000000 0xC004FC07)])]\n\n","Level":"Information","Provider":"Microsoft-Windows-Security-SPP","Keywords":["Classic"],"Culture":"en-US"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T16:07:03.324+00:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Node":"openwec","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"188BB736-9441-5C66-188B-B73694415C66","Name":"Test"}}}"#; + + #[test] + fn test_serialize_1003_event_data_unamed() { + let mut subscription_data = SubscriptionData::new("Test", ""); + subscription_data.set_uuid(SubscriptionUuid( + Uuid::from_str("8B18D83D-2964-4F35-AC3B-6F4E6FFA727B").unwrap(), + )); + let subscription = Subscription::try_from(subscription_data).unwrap(); + + let mut metadata = EventMetadata::new( + &SocketAddr::from_str("192.168.58.100:5985").unwrap(), + "WIN10$@WINDOMAIN.LOCAL", + Some("openwec".to_owned()), + &subscription, + "188BB736-9441-5C66-188B-B73694415C66".to_string(), + None + ); + metadata.set_time_received( + chrono::DateTime::parse_from_rfc3339("2022-12-14T17:07:03.324+01:00") + .unwrap() + .with_timezone(&Utc), + ); + + let event_data = EventData::new(Arc::new(EVENT_1003.to_string()), true); + + assert!(event_data.event().unwrap().additional.error.is_none()); + let formatter: JsonFormat = JsonFormat; + let result = formatter.format(&metadata, &event_data).unwrap(); + + let event_json_value: Value = serde_json::from_str(&result).unwrap(); + let expected_value: Value = serde_json::from_str(EVENT_1003_JSON).unwrap(); + + assert_eq!(event_json_value, expected_value); + } + + const EVENT_5719: &str = r#"571902000x800000000000009466Systemwin10.windomain.localWINDOMAIN%%13115E0000C0This computer was not able to set up a secure session with a domain controller in domain WINDOMAIN due to the following: +We can't sign you in with this credential because your domain isn't available. Make sure your device is connected to your organization's network and try again. If you previously signed in on this device with another credential, you can sign in with that credential. +This may lead to authentication problems. Make sure that this computer is connected to the network. If the problem persists, please contact your domain administrator. + +ADDITIONAL INFO +If this computer is a domain controller for the specified domain, it sets up the secure session to the primary domain controller emulator in the specified domain. Otherwise, this computer sets up the secure session to any domain controller in the specified domain.ErrorInfoClassic"#; + const EVENT_5719_JSON: &str = r#"{"System":{"Provider":{"Name":"NETLOGON"},"EventID":5719,"EventIDQualifiers":0,"Version":0,"Level":2,"Task":0,"Opcode":0,"Keywords":"0x80000000000000","TimeCreated":"2022-12-14T16:04:59.0817047Z","EventRecordID":9466,"Correlation":{},"Execution":{"ProcessID":0,"ThreadID":0},"Channel":"System","Computer":"win10.windomain.local"},"EventData":{"Data":["WINDOMAIN","%%1311"],"Binary":"5E0000C0"},"RenderingInfo":{"Message":"This computer was not able to set up a secure session with a domain controller in domain WINDOMAIN due to the following:\nWe can't sign you in with this credential because your domain isn't available. Make sure your device is connected to your organization's network and try again. If you previously signed in on this device with another credential, you can sign in with that credential.\nThis may lead to authentication problems. Make sure that this computer is connected to the network. If the problem persists, please contact your domain administrator.\n\nADDITIONAL INFO\nIf this computer is a domain controller for the specified domain, it sets up the secure session to the primary domain controller emulator in the specified domain. Otherwise, this computer sets up the secure session to any domain controller in the specified domain.","Level":"Error","Opcode":"Info","Keywords":["Classic"],"Culture":"en-US"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T16:07:02.919+00:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Node":"openwec","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"188BB736-9441-5C66-188B-B73694415C66","Name":"Test","Uri":"/this/is/a/test","ServerRevision":"babar","ClientRevision": "babar"}}}"#; + + #[test] + fn test_serialize_5719_event_data_binary() { + let mut subscription_data = SubscriptionData::new("Test", ""); + subscription_data + .set_uuid(SubscriptionUuid( + Uuid::from_str("8B18D83D-2964-4F35-AC3B-6F4E6FFA727B").unwrap(), + )) + .set_uri(Some("/this/is/a/test".to_string())) + .set_revision(Some("babar".to_string())); + let subscription = Subscription::try_from(subscription_data).unwrap(); + + let mut metadata = EventMetadata::new( + &SocketAddr::from_str("192.168.58.100:5985").unwrap(), + "WIN10$@WINDOMAIN.LOCAL", + Some("openwec".to_owned()), + &subscription, + "188BB736-9441-5C66-188B-B73694415C66".to_string(), + Some("babar".to_string()) + ); + metadata.set_time_received( + chrono::DateTime::parse_from_rfc3339("2022-12-14T17:07:02.919+01:00") + .unwrap() + .with_timezone(&Utc), + ); + + let event_data = EventData::new(Arc::new(EVENT_5719.to_string()), true); + + assert!(event_data.event().unwrap().additional.error.is_none()); + let formatter: JsonFormat = JsonFormat; + let result = formatter.format(&metadata, &event_data).unwrap(); + + let event_json_value: Value = serde_json::from_str(&result).unwrap(); + let expected_value: Value = serde_json::from_str(EVENT_5719_JSON).unwrap(); + + assert_eq!(event_json_value, expected_value); + } + + const EVENT_6013: &str = r#"601304000x800000000000009427Systemwin10.windomain.local6600 Coordinated Universal Time31002E003100000030000000570069006E0064006F0077007300200031003000200045006E007400650072007000720069007300650020004500760061006C0075006100740069006F006E000000310030002E0030002E003100390030003400330020004200750069006C0064002000310039003000340033002000200000004D0075006C0074006900700072006F0063006500730073006F007200200046007200650065000000310039003000340031002E00760062005F00720065006C0065006100730065002E003100390031003200300036002D00310034003000360000003600320031003400640066003100630000004E006F007400200041007600610069006C00610062006C00650000004E006F007400200041007600610069006C00610062006C00650000003900000031000000320030003400380000003400300039000000770069006E00310030002E00770069006E0064006F006D00610069006E002E006C006F00630061006C0000000000The system uptime is 6 seconds.InformationClassic"#; + const EVENT_6013_JSON: &str = r#"{"System":{"Provider":{"Name":"EventLog"},"EventID":6013,"EventIDQualifiers":32768,"Version":0,"Level":4,"Task":0,"Opcode":0,"Keywords":"0x80000000000000","TimeCreated":"2022-12-14T16:04:43.7965565Z","EventRecordID":9427,"Correlation":{},"Execution":{"ProcessID":0,"ThreadID":0},"Channel":"System","Computer":"win10.windomain.local"},"EventData":{"Data":["6","60","0 Coordinated Universal Time"],"Binary":"31002E003100000030000000570069006E0064006F0077007300200031003000200045006E007400650072007000720069007300650020004500760061006C0075006100740069006F006E000000310030002E0030002E003100390030003400330020004200750069006C0064002000310039003000340033002000200000004D0075006C0074006900700072006F0063006500730073006F007200200046007200650065000000310039003000340031002E00760062005F00720065006C0065006100730065002E003100390031003200300036002D00310034003000360000003600320031003400640066003100630000004E006F007400200041007600610069006C00610062006C00650000004E006F007400200041007600610069006C00610062006C00650000003900000031000000320030003400380000003400300039000000770069006E00310030002E00770069006E0064006F006D00610069006E002E006C006F00630061006C0000000000"},"RenderingInfo":{"Message":"The system uptime is 6 seconds.","Level":"Information","Keywords":["Classic"],"Culture":"en-US"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T16:07:02.524+00:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"188BB736-9441-5C66-188B-B73694415C66","Name":"Test","Uri":"/this/is/a/test"}}}"#; + + #[test] + fn test_serialize_6013_event_data_unamed_empty() { + let mut subscription_data = SubscriptionData::new("Test", ""); + subscription_data + .set_uuid(SubscriptionUuid( + Uuid::from_str("8B18D83D-2964-4F35-AC3B-6F4E6FFA727B").unwrap(), + )) + .set_uri(Some("/this/is/a/test".to_string())); + let subscription = Subscription::try_from(subscription_data).unwrap(); + + let mut metadata = EventMetadata::new( + &SocketAddr::from_str("192.168.58.100:5985").unwrap(), + "WIN10$@WINDOMAIN.LOCAL", + None, + &subscription, + "188BB736-9441-5C66-188B-B73694415C66".to_string(), + None, + ); + metadata.set_time_received( + chrono::DateTime::parse_from_rfc3339("2022-12-14T17:07:02.524+01:00") + .unwrap() + .with_timezone(&Utc), + ); + + let event_data = EventData::new(Arc::new(EVENT_6013.to_string()), true); + + assert!(event_data.event().unwrap().additional.error.is_none()); + let formatter: JsonFormat = JsonFormat; + let result = formatter.format(&metadata, &event_data).unwrap(); + + let event_json_value: Value = serde_json::from_str(&result).unwrap(); + let expected_value: Value = serde_json::from_str(EVENT_6013_JSON).unwrap(); + + assert_eq!(event_json_value, expected_value); + } + + const EVENT_1100: &str = r#"11000410300x4020000000000000114371Securitywin10.windomain.localThe event logging service has shut down.InformationService shutdownInfoSecurityMicrosoft-Windows-EventlogAudit Success"#; + const EVENT_1100_JSON: &str = r#"{"System":{"Provider":{"Name":"Microsoft-Windows-Eventlog","Guid":"{fc65ddd8-d6ef-4962-83d5-6e5cfe9ce148}"},"EventID":1100,"Version":0,"Level":4,"Task":103,"Opcode":0,"Keywords":"0x4020000000000000","TimeCreated":"2022-12-14T14:39:07.1686183Z","EventRecordID":114371,"Correlation":{},"Execution":{"ProcessID":496,"ThreadID":204},"Channel":"Security","Computer":"win10.windomain.local"},"UserData":"","RenderingInfo":{"Message":"The event logging service has shut down.","Level":"Information","Task":"Service shutdown","Opcode":"Info","Channel":"Security","Provider":"Microsoft-Windows-Eventlog","Keywords":["Audit Success"],"Culture":"en-US"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T16:07:02.156+00:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Node":"openwec","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"188BB736-9441-5C66-188B-B73694415C66","Name":"Test","Uri":"/this/is/a/test"}}}"#; + + #[test] + fn test_serialize_1100_user_data() { + let mut subscription_data = SubscriptionData::new("Test", ""); + subscription_data + .set_uuid(SubscriptionUuid( + Uuid::from_str("8B18D83D-2964-4F35-AC3B-6F4E6FFA727B").unwrap(), + )) + .set_uri(Some("/this/is/a/test".to_string())); + let subscription = Subscription::try_from(subscription_data).unwrap(); + + let mut metadata = EventMetadata::new( + &SocketAddr::from_str("192.168.58.100:5985").unwrap(), + "WIN10$@WINDOMAIN.LOCAL", + Some("openwec".to_owned()), + &subscription, + "188BB736-9441-5C66-188B-B73694415C66".to_string(), + None + ); + metadata.set_time_received( + chrono::DateTime::parse_from_rfc3339("2022-12-14T17:07:02.156+01:00") + .unwrap() + .with_timezone(&Utc), + ); + + let event_data = EventData::new(Arc::new(EVENT_1100.to_string()), true); + + assert!(event_data.event().unwrap().additional.error.is_none()); + let formatter: JsonFormat = JsonFormat; + let result = formatter.format(&metadata, &event_data).unwrap(); + + let event_json_value: Value = serde_json::from_str(&result).unwrap(); + let expected_value: Value = serde_json::from_str(EVENT_1100_JSON).unwrap(); + + assert_eq!(event_json_value, expected_value); + } + + const EVENT_111: &str = r#"111win10.windomain.local"#; + const EVENT_111_JSON: &str = r#"{"System":{"Provider":{"Name":"Microsoft-Windows-EventForwarder"},"EventID":111,"TimeCreated":"2023-02-14T09:14:23.175Z","Computer":"win10.windomain.local"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T16:07:02.156+00:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Node":"other_node","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"188BB736-9441-5C66-188B-B73694415C66","Name":"Test","Uri":"/this/is/a/test"}}}"#; + + #[test] + fn test_serialize_111() { + let mut subscription_data = SubscriptionData::new("Test", ""); + subscription_data + .set_uuid(SubscriptionUuid( + Uuid::from_str("8B18D83D-2964-4F35-AC3B-6F4E6FFA727B").unwrap(), + )) + .set_uri(Some("/this/is/a/test".to_string())); + let subscription = Subscription::try_from(subscription_data).unwrap(); + + let mut metadata = EventMetadata::new( + &SocketAddr::from_str("192.168.58.100:5985").unwrap(), + "WIN10$@WINDOMAIN.LOCAL", + Some("other_node".to_owned()), + &subscription, + "188BB736-9441-5C66-188B-B73694415C66".to_string(), + None + ); + metadata.set_time_received( + chrono::DateTime::parse_from_rfc3339("2022-12-14T17:07:02.156+01:00") + .unwrap() + .with_timezone(&Utc), + ); + + let event_data = EventData::new(Arc::new(EVENT_111.to_string()), true); + + assert!(event_data.event().unwrap().additional.error.is_none()); + let formatter: JsonFormat = JsonFormat; + let result = formatter.format(&metadata, &event_data).unwrap(); + + let event_json_value: Value = serde_json::from_str(&result).unwrap(); + let expected_value: Value = serde_json::from_str(EVENT_111_JSON).unwrap(); + + assert_eq!(event_json_value, expected_value); + } +} diff --git a/server/src/formats/mod.rs b/server/src/formats/mod.rs new file mode 100644 index 0000000..cc875cf --- /dev/null +++ b/server/src/formats/mod.rs @@ -0,0 +1,3 @@ +pub mod json; +pub mod raw; +pub mod raw_json; \ No newline at end of file diff --git a/server/src/formats/raw.rs b/server/src/formats/raw.rs new file mode 100644 index 0000000..e312f48 --- /dev/null +++ b/server/src/formats/raw.rs @@ -0,0 +1,98 @@ +use std::sync::Arc; + +use crate::{ + event::{EventData, EventMetadata}, + output::OutputFormat, +}; + +pub struct RawFormat; + +impl OutputFormat for RawFormat { + fn format(&self, _metadata: &EventMetadata, data: &EventData) -> Option> { + Some(data.raw()) + } +} + +#[cfg(test)] +mod tests { + use std::{net::SocketAddr, str::FromStr, sync::Arc}; + + use chrono::Utc; + use common::subscription::{SubscriptionData, SubscriptionUuid}; + use uuid::Uuid; + + use crate::{ + event::{EventData, EventMetadata}, + formats::raw::RawFormat, + output::OutputFormat, + subscription::Subscription, + }; + + const EVENT_4688: &str = r#"4688201331200x8020000000000000114689Securitywin10.windomain.localS-1-5-18WIN10$WINDOMAIN0x3e70x3a8C:\Program Files (x86)\Microsoft\EdgeUpdate\MicrosoftEdgeUpdate.exe%%19360x240S-1-0-0--0x0C:\Windows\System32\services.exeS-1-16-16384A new process has been created. + +Creator Subject: + Security ID: S-1-5-18 + Account Name: WIN10$ + Account Domain: WINDOMAIN + Logon ID: 0x3E7 + +Target Subject: + Security ID: S-1-0-0 + Account Name: - + Account Domain: - + Logon ID: 0x0 + +Process Information: + New Process ID: 0x3a8 + New Process Name: C:\Program Files (x86)\Microsoft\EdgeUpdate\MicrosoftEdgeUpdate.exe + Token Elevation Type: %%1936 + Mandatory Label: S-1-16-16384 + Creator Process ID: 0x240 + Creator Process Name: C:\Windows\System32\services.exe + Process Command Line: + +Token Elevation Type indicates the type of token that was assigned to the new process in accordance with User Account Control policy. + +Type 1 is a full token with no privileges removed or groups disabled. A full token is only used if User Account Control is disabled or if the user is the built-in Administrator account or a service account. + +Type 2 is an elevated token with no privileges removed or groups disabled. An elevated token is used when User Account Control is enabled and the user chooses to start the program using Run as administrator. An elevated token is also used when an application is configured to always require administrative privilege or to always require maximum privilege, and the user is a member of the Administrators group. + +Type 3 is a limited token with administrative privileges removed and administrative groups disabled. The limited token is used when User Account Control is enabled, the application does not require administrative privilege, and the user does not choose to start the program using Run as administrator.InformationProcess CreationInfoSecurityMicrosoft Windows security auditing.Audit Success"#; + + #[test] + fn test_raw_format_4688() { + // Generate metadata (which should be ignored) + let mut subscription_data = SubscriptionData::new("Test", ""); + subscription_data + .set_uuid(SubscriptionUuid( + Uuid::from_str("8B18D83D-2964-4F35-AC3B-6F4E6FFA727B").unwrap(), + )) + .set_uri(Some("/this/is/a/test".to_string())) + .set_revision(Some("1234".to_string())); + let subscription = Subscription::try_from(subscription_data).unwrap(); + + let mut metadata = EventMetadata::new( + &SocketAddr::from_str("192.168.58.100:5985").unwrap(), + "WIN10$@WINDOMAIN.LOCAL", + Some("openwec".to_owned()), + &subscription, + "188BB736-9441-5C66-188B-B73694415C66".to_string(), + Some("1234".to_string()) + ); + metadata.set_time_received( + chrono::DateTime::parse_from_rfc3339("2022-12-14T17:07:03.331+01:00") + .unwrap() + .with_timezone(&Utc), + ); + + // Parse and check event + + let event_data = EventData::new(Arc::new(EVENT_4688.to_string()), true); + assert!(event_data.event().unwrap().additional.error.is_none()); + + let formatter = RawFormat; + let result = formatter.format(&metadata, &event_data).unwrap(); + + assert_eq!(result.as_str(), EVENT_4688); + } +} diff --git a/server/src/formats/raw_json.rs b/server/src/formats/raw_json.rs new file mode 100644 index 0000000..46aad5d --- /dev/null +++ b/server/src/formats/raw_json.rs @@ -0,0 +1,171 @@ +use std::sync::Arc; + +use log::warn; +use serde::Serialize; + +use crate::{ + event::{EventData, EventMetadata}, + output::OutputFormat, +}; + +pub struct RawJsonFormat; + +#[derive(Serialize)] +struct RawJson { + meta: Metadata, + data: Arc, +} + +#[derive(Serialize)] +struct Metadata { + #[serde(rename = "IpAddress")] + addr: String, + #[serde(rename = "TimeReceived")] + time_received: String, + #[serde(rename = "Principal")] + principal: String, + #[serde(rename = "Subscription")] + subscription: SubscriptionType, + #[serde(rename = "Node", skip_serializing_if = "Option::is_none")] + node: Option, +} + +#[derive(Serialize)] +struct SubscriptionType { + #[serde(rename = "Uuid")] + uuid: String, + #[serde(rename = "Version")] + version: String, + #[serde(rename = "Name")] + name: String, + #[serde(rename = "Uri", skip_serializing_if = "Option::is_none")] + uri: Option, + #[serde(rename = "ClientRevision", skip_serializing_if = "Option::is_none")] + client_revision: Option, + #[serde(rename = "ServerRevision", skip_serializing_if = "Option::is_none")] + server_revision: Option, +} + +impl Metadata { + pub fn new(metadata: &EventMetadata) -> Self { + Self { + addr: metadata.addr().ip().to_string(), + principal: metadata.principal().to_owned(), // TODO : change to something that works for TLS as well (modify db and output) + node: metadata.node_name().cloned(), + time_received: metadata.time_received().to_rfc3339(), + subscription: SubscriptionType { + uuid: metadata.subscription_uuid().to_owned(), + version: metadata.subscription_version().to_owned(), + name: metadata.subscription_name().to_owned(), + uri: metadata.subscription_uri().cloned(), + client_revision: metadata.subscription_client_revision().cloned(), + server_revision: metadata.subscription_server_revision().cloned(), + }, + } + } +} + +impl OutputFormat for RawJsonFormat { + fn format(&self, metadata: &EventMetadata, data: &EventData) -> Option> { + let event = RawJson { + meta: Metadata::new(metadata), + data: data.raw(), + }; + match serde_json::to_string(&event) { + Ok(str) => Some(Arc::new(str)), + Err(e) => { + warn!("Failed to format event in Raw Json: {:?}.", e); + None + } + } + } +} + +#[cfg(test)] +mod tests { + use std::{net::SocketAddr, str::FromStr, sync::Arc}; + + use chrono::Utc; + use common::subscription::{SubscriptionData, SubscriptionUuid}; + use serde_json::Value; + use uuid::Uuid; + + use crate::{ + event::{EventData, EventMetadata}, + formats::raw_json::RawJsonFormat, + output::OutputFormat, + subscription::Subscription, + }; + + const EVENT_4688: &str = r#"4688201331200x8020000000000000114689Securitywin10.windomain.localS-1-5-18WIN10$WINDOMAIN0x3e70x3a8C:\Program Files (x86)\Microsoft\EdgeUpdate\MicrosoftEdgeUpdate.exe%%19360x240S-1-0-0--0x0C:\Windows\System32\services.exeS-1-16-16384A new process has been created. + +Creator Subject: + Security ID: S-1-5-18 + Account Name: WIN10$ + Account Domain: WINDOMAIN + Logon ID: 0x3E7 + +Target Subject: + Security ID: S-1-0-0 + Account Name: - + Account Domain: - + Logon ID: 0x0 + +Process Information: + New Process ID: 0x3a8 + New Process Name: C:\Program Files (x86)\Microsoft\EdgeUpdate\MicrosoftEdgeUpdate.exe + Token Elevation Type: %%1936 + Mandatory Label: S-1-16-16384 + Creator Process ID: 0x240 + Creator Process Name: C:\Windows\System32\services.exe + Process Command Line: + +Token Elevation Type indicates the type of token that was assigned to the new process in accordance with User Account Control policy. + +Type 1 is a full token with no privileges removed or groups disabled. A full token is only used if User Account Control is disabled or if the user is the built-in Administrator account or a service account. + +Type 2 is an elevated token with no privileges removed or groups disabled. An elevated token is used when User Account Control is enabled and the user chooses to start the program using Run as administrator. An elevated token is also used when an application is configured to always require administrative privilege or to always require maximum privilege, and the user is a member of the Administrators group. + +Type 3 is a limited token with administrative privileges removed and administrative groups disabled. The limited token is used when User Account Control is enabled, the application does not require administrative privilege, and the user does not choose to start the program using Run as administrator.InformationProcess CreationInfoSecurityMicrosoft Windows security auditing.Audit Success"#; + const EVENT_4688_JSON: &str = r#"{"meta":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T16:07:03.331+00:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"188BB736-9441-5C66-188B-B73694415C66","Name":"Test","Uri":"/this/is/a/test","ClientRevision":"1234","ServerRevision":"tutu"},"Node":"openwec"},"data":"4688201331200x8020000000000000114689Securitywin10.windomain.localS-1-5-18WIN10$WINDOMAIN0x3e70x3a8C:\\Program Files (x86)\\Microsoft\\EdgeUpdate\\MicrosoftEdgeUpdate.exe%%19360x240S-1-0-0--0x0C:\\Windows\\System32\\services.exeS-1-16-16384A new process has been created.\n\nCreator Subject:\n\tSecurity ID:\t\tS-1-5-18\n\tAccount Name:\t\tWIN10$\n\tAccount Domain:\t\tWINDOMAIN\n\tLogon ID:\t\t0x3E7\n\nTarget Subject:\n\tSecurity ID:\t\tS-1-0-0\n\tAccount Name:\t\t-\n\tAccount Domain:\t\t-\n\tLogon ID:\t\t0x0\n\nProcess Information:\n\tNew Process ID:\t\t0x3a8\n\tNew Process Name:\tC:\\Program Files (x86)\\Microsoft\\EdgeUpdate\\MicrosoftEdgeUpdate.exe\n\tToken Elevation Type:\t%%1936\n\tMandatory Label:\t\tS-1-16-16384\n\tCreator Process ID:\t0x240\n\tCreator Process Name:\tC:\\Windows\\System32\\services.exe\n\tProcess Command Line:\t\n\nToken Elevation Type indicates the type of token that was assigned to the new process in accordance with User Account Control policy.\n\nType 1 is a full token with no privileges removed or groups disabled. A full token is only used if User Account Control is disabled or if the user is the built-in Administrator account or a service account.\n\nType 2 is an elevated token with no privileges removed or groups disabled. An elevated token is used when User Account Control is enabled and the user chooses to start the program using Run as administrator. An elevated token is also used when an application is configured to always require administrative privilege or to always require maximum privilege, and the user is a member of the Administrators group.\n\nType 3 is a limited token with administrative privileges removed and administrative groups disabled. The limited token is used when User Account Control is enabled, the application does not require administrative privilege, and the user does not choose to start the program using Run as administrator.InformationProcess CreationInfoSecurityMicrosoft Windows security auditing.Audit Success"}"#; + + #[test] + fn test_json_format_4688() { + // Generate metadata + let mut subscription_data = SubscriptionData::new("Test", ""); + subscription_data + .set_uuid(SubscriptionUuid( + Uuid::from_str("8B18D83D-2964-4F35-AC3B-6F4E6FFA727B").unwrap(), + )) + .set_uri(Some("/this/is/a/test".to_string())) + .set_revision(Some("tutu".to_string())); + let subscription = Subscription::try_from(subscription_data).unwrap(); + + let mut metadata = EventMetadata::new( + &SocketAddr::from_str("192.168.58.100:5985").unwrap(), + "WIN10$@WINDOMAIN.LOCAL", + Some("openwec".to_owned()), + &subscription, + "188BB736-9441-5C66-188B-B73694415C66".to_string(), + Some("1234".to_string()) + ); + metadata.set_time_received( + chrono::DateTime::parse_from_rfc3339("2022-12-14T17:07:03.331+01:00") + .unwrap() + .with_timezone(&Utc), + ); + + // Parse and check event + + let event_data = EventData::new(Arc::new(EVENT_4688.to_string()), true); + assert!(event_data.event().unwrap().additional.error.is_none()); + + let formatter = RawJsonFormat; + let result = formatter.format(&metadata, &event_data).unwrap(); + + let event_json_value: Value = serde_json::from_str(&result).unwrap(); + let expected_value: Value = serde_json::from_str(EVENT_4688_JSON).unwrap(); + + assert_eq!(event_json_value, expected_value); + } +} diff --git a/server/src/formatter.rs b/server/src/formatter.rs deleted file mode 100644 index a7da978..0000000 --- a/server/src/formatter.rs +++ /dev/null @@ -1,46 +0,0 @@ -use std::sync::Arc; - -use log::warn; - -use crate::event::{Event, EventMetadata}; -use common::subscription::SubscriptionOutputFormat; - -#[derive(Debug, Clone, Hash, PartialEq, Eq)] -pub enum Format { - Json, - Raw, -} - -impl From<&SubscriptionOutputFormat> for Format { - fn from(sof: &SubscriptionOutputFormat) -> Self { - match sof { - SubscriptionOutputFormat::Json => Format::Json, - SubscriptionOutputFormat::Raw => Format::Raw, - } - } -} - -impl Format { - pub fn format(&self, metadata: &EventMetadata, raw: Arc) -> Option> { - // Formatters are allowed to return None when they can't do - // anything else... - match &self { - Format::Json => format_json(metadata, raw), - Format::Raw => Some(raw), - } - } -} - -fn format_json(metadata: &EventMetadata, raw: Arc) -> Option> { - let event = Event::from_str(metadata, raw.as_ref()); - match serde_json::to_string(&event) { - Ok(str) => Some(Arc::new(str)), - Err(e) => { - warn!( - "Failed to serialize event in JSON: {:?}. Event was: {:?}", - e, event - ); - None - } - } -} diff --git a/server/src/kerberos.rs b/server/src/kerberos.rs index 1063389..8b98f56 100644 --- a/server/src/kerberos.rs +++ b/server/src/kerberos.rs @@ -1,9 +1,10 @@ use anyhow::{anyhow, bail, Context, Result}; use base64::Engine; use common::encoding::encode_utf16le; -use http::request::Parts; +use hyper::body::Incoming; use hyper::header::AUTHORIZATION; -use hyper::{body::Bytes, Body, Request}; +use hyper::http::request::Parts; +use hyper::{body::Bytes, Request}; use libgssapi::{ context::{CtxFlags, SecurityContext, ServerCtx}, credential::{Cred, CredUsage}, @@ -87,70 +88,83 @@ pub enum AuthenticationError { /// pub async fn authenticate( conn_state: &Arc>, - req: &Request, + req: &Request, ) -> Result { - let mut state = conn_state.lock().unwrap(); - let server_ctx = state - .context - .as_mut() - .ok_or_else(|| anyhow!("Kerberos server context is empty"))?; - - // Server context has already been established for this TCP connection - if server_ctx.is_complete() { - return Ok(AuthenticationData { - principal: server_ctx.source_name()?.to_string(), - token: None, - }); + { + let mut state = conn_state.lock().unwrap(); + let server_ctx = state + .context + .as_mut() + .ok_or_else(|| anyhow!("Kerberos server context is empty"))?; + + // Server context has already been established for this TCP connection + if server_ctx.is_complete() { + return Ok(AuthenticationData { + principal: server_ctx.source_name()?.to_string(), + token: None, + }); + } } - // TODO: return a specific error let auth_header = req .headers() .get(AUTHORIZATION) .ok_or_else(|| AuthenticationError::MissingAuthorizationHeader)? .to_str() - .context("Failed to convert authorization header to str")?; - - let b64_token = auth_header - .strip_prefix("Kerberos ") - .ok_or_else(|| anyhow!("Authorization header does not start with 'Kerberos '"))?; - let token = base64::engine::general_purpose::STANDARD - .decode(b64_token) - .context("Failed to decode authorization header token as base64")?; - match server_ctx - .step(&token) - .context("Failed to perform Kerberos operation")? - { - // TODO: should we return Ok in this case ? - None => Ok(AuthenticationData { - principal: server_ctx.source_name()?.to_string(), - token: None, - }), - Some(step) => { - // TODO: support multiple steps - // see RFC4559 "5. Negotiate Operation Example" - if !server_ctx.is_complete() { - return Err(anyhow!( - "Authentication is not complete after first round. Multiple rounds - are not supported" - ) - .into()); - } - let flags = server_ctx.flags().context("Error in server ctx")?; - let required_flags = CtxFlags::GSS_C_CONF_FLAG - | CtxFlags::GSS_C_MUTUAL_FLAG - | CtxFlags::GSS_C_INTEG_FLAG; - if flags & required_flags != required_flags { - return Err(anyhow!("Kerberos flags not compliant").into()); - } - - debug!("Server context info: {:?}", server_ctx.info()); - Ok(AuthenticationData { + .context("Failed to convert authorization header to str")? + .to_owned(); + let cloned_conn_state = conn_state.clone(); + + tokio::task::spawn_blocking(move || { + let b64_token = auth_header + .strip_prefix("Kerberos ") + .ok_or_else(|| anyhow!("Authorization header does not start with 'Kerberos '"))?; + let mut state = cloned_conn_state.lock().unwrap(); + let server_ctx = state + .context + .as_mut() + .ok_or_else(|| anyhow!("Kerberos server context is empty"))?; + let token = base64::engine::general_purpose::STANDARD + .decode(b64_token) + .context("Failed to decode authorization header token as base64")?; + + match server_ctx + .step(&token) + .context("Failed to perform Kerberos operation")? + { + // TODO: should we return Ok in this case ? + None => Ok(AuthenticationData { principal: server_ctx.source_name()?.to_string(), - token: Some(base64::engine::general_purpose::STANDARD.encode(&*step)), - }) + token: None, + }), + Some(step) => { + // TODO: support multiple steps + // see RFC4559 "5. Negotiate Operation Example" + if !server_ctx.is_complete() { + return Err(anyhow!( + "Authentication is not complete after first round. Multiple rounds + are not supported" + ) + .into()); + } + let flags = server_ctx.flags().context("Error in server ctx")?; + let required_flags = CtxFlags::GSS_C_CONF_FLAG + | CtxFlags::GSS_C_MUTUAL_FLAG + | CtxFlags::GSS_C_INTEG_FLAG; + if flags & required_flags != required_flags { + return Err(anyhow!("Kerberos flags not compliant").into()); + } + + debug!("Server context info: {:?}", server_ctx.info()); + Ok(AuthenticationData { + principal: server_ctx.source_name()?.to_string(), + token: Some(base64::engine::general_purpose::STANDARD.encode(&*step)), + }) + } } - } + }) + .await + .map_err(|e| anyhow!("{}", e))? } fn get_boundary(mime: &Mime) -> Result { @@ -227,62 +241,80 @@ fn encrypt_payload(mut payload: Vec, server_ctx: &mut ServerCtx) -> Result>, + conn_state: Arc>, parts: Parts, data: Bytes, ) -> Result>> { - let content_type = match parts.headers.get("Content-Type") { - Some(content_type) => content_type, - None => bail!("Request does not contain 'Content-Type' header"), - }; - - let mime = content_type - .to_str()? - .parse::() - .context("Could not parse Content-Type header")?; - let boundary = get_boundary(&mime).context("Could not get multipart boundaries")?; - let encrypted_payload = multipart::read_multipart_body(&mut &*data, &boundary) - .context("Could not retrieve encrypted payload")?; - - let mut state = conn_state.lock().unwrap(); - let server_ctx = state - .context - .as_mut() - .ok_or_else(|| anyhow!("Kerberos server context is empty"))?; - - let decrypted_message = - decrypt_payload(encrypted_payload, server_ctx).context("Could not decrypt payload")?; - - let message = match parts.headers.get("Content-Encoding") { - Some(value) if value == "SLDC" => { - sldc::decompress(&decrypted_message).unwrap_or(decrypted_message) - } - None => decrypted_message, - value => bail!("Unsupported Content-Encoding {:?}", value), - }; + // Multiple blocking operations are done here: + // - retrieve encrypted payload from multipart request + // - decrypt payload + // - decompress payload + + let get_payload_task = tokio::task::spawn_blocking(move || { + let content_type = match parts.headers.get("Content-Type") { + Some(content_type) => content_type, + None => bail!("Request does not contain 'Content-Type' header"), + }; + + let mime = content_type + .to_str()? + .parse::() + .context("Could not parse Content-Type header")?; + let boundary = get_boundary(&mime).context("Could not get multipart boundaries")?; + + let encrypted_payload = multipart::read_multipart_body(&mut &*data, &boundary) + .context("Could not retrieve encrypted payload")?; + let mut state = conn_state.lock().unwrap(); + let server_ctx = state + .context + .as_mut() + .ok_or_else(|| anyhow!("Kerberos server context is empty"))?; + + let decrypted_message = + decrypt_payload(encrypted_payload, server_ctx).context("Could not decrypt payload")?; + + let message = match parts.headers.get("Content-Encoding") { + Some(value) if value == "SLDC" => { + sldc::decompress(&decrypted_message).unwrap_or(decrypted_message) + } + None => decrypted_message, + value => bail!("Unsupported Content-Encoding {:?}", value), + }; + Ok(message) + }); + let message = get_payload_task.await??; Ok(Some(message)) } -pub fn get_response_payload( - conn_state: &Arc>, +pub async fn get_response_payload( + conn_state: Arc>, payload: String, - boundary: &str, + boundary: String, ) -> Result> { - let mut payload = encode_utf16le(payload).context("Failed to encode payload in utf16le")?; - - let cleartext_payload_len = payload.len(); - - let mut state = conn_state.lock().unwrap(); - let server_ctx = &mut state - .context - .as_mut() - .ok_or_else(|| anyhow!("Kerberos server context is empty"))?; - payload = encrypt_payload(payload, server_ctx).context("Failed to encrypt payload")?; - - Ok(multipart::get_multipart_body( - &payload, - cleartext_payload_len, - boundary, - )) + + // Multiple blocking operations are done here: + // - encode payload + // - encrypt payload + // - generate multipart body + + tokio::task::spawn_blocking(move || { + let mut payload = encode_utf16le(payload).context("Failed to encode payload in utf16le")?; + + let cleartext_payload_len = payload.len(); + + let mut state = conn_state.lock().unwrap(); + let server_ctx = &mut state + .context + .as_mut() + .ok_or_else(|| anyhow!("Kerberos server context is empty"))?; + payload = encrypt_payload(payload, server_ctx).context("Failed to encrypt payload")?; + + Ok(multipart::get_multipart_body( + &payload, + cleartext_payload_len, + &boundary, + )) + }) + .await? } diff --git a/server/src/lib.rs b/server/src/lib.rs index 785487f..563330a 100644 --- a/server/src/lib.rs +++ b/server/src/lib.rs @@ -1,14 +1,16 @@ #![allow(clippy::too_many_arguments)] +#![deny(unsafe_code)] +mod drivers; mod event; -mod formatter; +mod formats; mod heartbeat; mod kerberos; mod logging; mod logic; mod multipart; mod output; -mod outputs; +mod proxy_protocol; mod sldc; mod soap; mod subscription; @@ -21,71 +23,65 @@ use common::settings::{Authentication, Kerberos, Tls}; use common::settings::{Collector, Server as ServerSettings, Settings}; use core::pin::Pin; use futures::Future; -use futures_util::{future::join_all, StreamExt}; +use futures_util::future::join_all; use heartbeat::{heartbeat_task, WriteHeartbeatMessage}; -use http::response::Builder; -use http::status::StatusCode; -use hyper::body::{to_bytes, HttpBody}; +use http_body_util::combinators::BoxBody; +use http_body_util::{BodyExt, Empty, Full}; +use hyper::body::{Body, Bytes, Incoming}; use hyper::header::{CONTENT_TYPE, WWW_AUTHENTICATE}; -use hyper::server::accept; -use hyper::server::conn::AddrIncoming; -use hyper::server::conn::AddrStream; -use hyper::service::{make_service_fn, service_fn}; -use hyper::{Body, Request, Response, Server}; +use hyper::http::response::Builder; +use hyper::http::status::StatusCode; +use hyper::server::conn::http1; +use hyper::service::service_fn; +use hyper::{Request, Response}; +use hyper_util::rt::TokioIo; use kerberos::AuthenticationError; -use lazy_static::lazy_static; use libgssapi::error::MajorFlags; use log::{debug, error, info, trace, warn}; use quick_xml::writer::Writer; -use regex::Regex; use soap::Serializable; +use socket2::{SockRef, TcpKeepalive}; use std::boxed::Box; use std::collections::HashMap; use std::convert::Infallible; -use std::future::ready; use std::io::Cursor; use std::net::{IpAddr, SocketAddr}; use std::str::FromStr; use std::sync::Mutex; use std::sync::{Arc, RwLock}; use std::time::{Duration, Instant}; -use std::{env, mem}; +use std::{env, future, mem}; use subscription::{reload_subscriptions_task, Subscriptions}; -use tls_listener::TlsListener; +use tokio::io::AsyncRead; +use tokio::net::TcpListener; +use tokio::pin; +use tokio::runtime::Handle; use tokio::signal::unix::SignalKind; -use tokio::sync::{mpsc, oneshot}; -use tokio_rustls::server::TlsStream; +use tokio::sync::{mpsc, oneshot, watch}; use tokio_rustls::TlsAcceptor; use tokio_util::sync::CancellationToken; use crate::logging::ACCESS_LOGGER; +use crate::proxy_protocol::read_proxy_header; use crate::tls::{make_config, subject_from_cert}; pub enum RequestCategory { Enumerate(String), - Subscription(String), + Subscription, } -impl TryFrom<&Request> for RequestCategory { +impl TryFrom<&Request> for RequestCategory { type Error = anyhow::Error; - fn try_from(req: &Request) -> Result { + fn try_from(req: &Request) -> Result { if req.method() != "POST" { bail!("Invalid HTTP method {}", req.method()); } - lazy_static! { - static ref SUBSCRIPTION_RE: Regex = Regex::new(r"^/wsman/subscriptions/([0-9A-Fa-f]{8}\b-[0-9A-Fa-f]{4}\b-[0-9A-Fa-f]{4}\b-[0-9A-Fa-f]{4}\b-[0-9A-F]{12})$").expect("Failed to compile SUBSCRIPTION regular expression"); + if req.uri().path().starts_with("/wsman/subscriptions/") { + Ok(Self::Subscription) + } else { + Ok(Self::Enumerate(req.uri().to_string())) } - if let Some(c) = SUBSCRIPTION_RE.captures(req.uri().path()) { - return Ok(RequestCategory::Subscription( - c.get(1) - .ok_or_else(|| anyhow!("Could not get identifier from URI"))? - .as_str() - .to_owned(), - )); - } - - return Ok(Self::Enumerate(req.uri().to_string())); } } @@ -96,7 +92,7 @@ pub struct RequestData { } impl RequestData { - fn new(principal: &str, remote_addr: &SocketAddr, req: &Request) -> Result { + fn new(principal: &str, remote_addr: &SocketAddr, req: &Request) -> Result { Ok(RequestData { principal: principal.to_owned(), remote_addr: remote_addr.to_owned(), @@ -128,10 +124,21 @@ pub enum AuthenticationContext { Tls(String, String), } +fn empty() -> BoxBody { + // Empty::new().map_err(|never| match never {}).boxed() + Empty::new().boxed() +} + +fn full>(chunk: T) -> BoxBody { + Full::new(chunk.into()) + // .map_err(|never| match never {}) + .boxed() +} + async fn get_request_payload( collector: &Collector, auth_ctx: &AuthenticationContext, - req: Request, + req: Request, ) -> Result> { let (parts, body) = req.into_parts(); @@ -151,9 +158,11 @@ async fn get_request_payload( ); } - let data = to_bytes(body) + let data = body + .collect() .await - .context("Could not retrieve request body")?; + .context("Could not retrieve request body")? + .to_bytes(); if data.is_empty() { return Ok(None); @@ -161,44 +170,50 @@ async fn get_request_payload( let message = match auth_ctx { AuthenticationContext::Tls(_, _) => tls::get_request_payload(parts, data).await?, - AuthenticationContext::Kerberos(conn_state) => { - kerberos::get_request_payload(conn_state, parts, data).await? + AuthenticationContext::Kerberos(conn_state) => { + kerberos::get_request_payload(conn_state.to_owned(), parts, data).await? } }; match message { - Some(bytes) => Ok(Some(decode_utf16le(bytes)?)), + Some(bytes) => { + // Spawn a blocking task to decode utf16 + tokio::task::spawn_blocking(|| Ok(Some(decode_utf16le(bytes)?))).await? + } _ => Ok(None), } } -fn create_response( +async fn create_response( auth_ctx: &AuthenticationContext, mut response: Builder, payload: Option, -) -> Result> { +) -> Result>> { match auth_ctx { AuthenticationContext::Tls(_, _) => { if payload.is_some() { response = response.header(CONTENT_TYPE, "application/soap+xml;charset=UTF-16"); } let body = match payload { - None => Body::empty(), - Some(payload) => Body::from( - tls::get_response_payload(payload).context("Failed to compute TLS payload")?, + None => empty(), + Some(payload) => full( + tls::get_response_payload(payload) + .await + .context("Failed to compute TLS payload")?, ), }; Ok(response.body(body)?) } AuthenticationContext::Kerberos(conn_state) => { - let boundary = "Encrypted Boundary"; + let boundary = "Encrypted Boundary".to_owned(); if payload.is_some() { - response = response.header(CONTENT_TYPE, "multipart/encrypted;protocol=\"application/HTTP-Kerberos-session-encrypted\";boundary=\"".to_owned() + boundary + "\""); + response = response.header(CONTENT_TYPE, "multipart/encrypted;protocol=\"application/HTTP-Kerberos-session-encrypted\";boundary=\"".to_owned() + &boundary + "\""); } let body = match payload { - None => Body::empty(), - Some(payload) => Body::from( - kerberos::get_response_payload(conn_state, payload, boundary) + None => empty(), + Some(payload) => full( + kerberos::get_response_payload(conn_state.clone(), payload, boundary) + .await .context("Failed to compute Kerberos encrypted payload")?, ), }; @@ -207,7 +222,7 @@ fn create_response( } } -fn log_auth_error(addr: &SocketAddr, req: &Request, err_str: String, do_warn: bool) { +fn log_auth_error(addr: &SocketAddr, req: &Request, err_str: String, do_warn: bool) { let str_format = format!( "Authentication failed for {}:{} ({}:{}): {}", addr.ip(), @@ -226,7 +241,7 @@ fn log_auth_error(addr: &SocketAddr, req: &Request, err_str: String, do_wa async fn authenticate( auth_ctx: &AuthenticationContext, - req: &Request, + req: &Request, addr: &SocketAddr, ) -> Result<(String, Builder)> { match auth_ctx { @@ -241,7 +256,6 @@ async fn authenticate( Ok((subject.to_owned(), response)) } AuthenticationContext::Kerberos(conn_state) => { - let mut response = Response::builder(); let auth_result = kerberos::authenticate(conn_state, req) .await .map_err(|err| { @@ -259,6 +273,9 @@ async fn authenticate( } err })?; + + let mut response = Response::builder(); + if let Some(token) = auth_result.token() { response = response.header(WWW_AUTHENTICATE, format!("Kerberos {}", token)) } @@ -280,7 +297,12 @@ async fn handle_payload( match request_payload { None => Ok((StatusCode::OK, None)), Some(payload) => { - let message = soap::parse(&payload).context("Failed to parse SOAP message")?; + // Parsing xml takes some time + let message = tokio::task::spawn_blocking(move || { + soap::parse(&payload).context("Failed to parse SOAP message") + }) + .await??; + trace!("Parsed request: {:?}", message); let response = logic::handle_message( server, @@ -300,13 +322,30 @@ async fn handle_payload( logic::Response::Ok(action, body) => { let payload = soap::Message::response_from(&message, &action, body) .context("Failed to build a response payload")?; - let mut writer = Writer::new(Cursor::new(Vec::new())); - payload - .serialize(&mut writer) - .context("Failed to serialize response payload")?; - let result = String::from_utf8(writer.into_inner().into_inner())?; - trace!("Response is: {}", result); - Ok((StatusCode::OK, Some(result))) + // If body is Some(), it means that we send EnumerationResponse + // In this case, message serialization takes some time and should be executed + // in a blocking task + let result: Result = if payload.body.is_some() { + tokio::task::spawn_blocking(move || { + let mut writer = Writer::new(Cursor::new(Vec::new())); + payload + .serialize(&mut writer) + .context("Failed to serialize response payload")?; + let result = String::from_utf8(writer.into_inner().into_inner())?; + Ok(result) + }) + .await? + } else { + let mut writer = Writer::new(Cursor::new(Vec::new())); + payload + .serialize(&mut writer) + .context("Failed to serialize response payload")?; + let result = String::from_utf8(writer.into_inner().into_inner())?; + Ok(result) + }; + let response_payload = result?; + trace!("Response is: {}", response_payload); + Ok((StatusCode::OK, Some(response_payload))) } } } @@ -358,10 +397,10 @@ fn log_response( log_mdc::clear(); } -fn build_error_response(status: StatusCode) -> Response { +fn build_error_response(status: StatusCode) -> Response> { Response::builder() .status(status) - .body(Body::empty()) + .body(empty()) .expect("Failed to build HTTP response") } @@ -373,8 +412,8 @@ async fn handle( heartbeat_tx: mpsc::Sender, auth_ctx: AuthenticationContext, addr: SocketAddr, - req: Request, -) -> Result, Infallible> { + req: Request, +) -> Result>, Infallible> { let start = Instant::now(); debug!( @@ -556,7 +595,7 @@ async fn handle( response_builder = response_builder.status(status); // Create HTTP response - let response = match create_response(&auth_ctx, response_builder, response_payload) { + let response = match create_response(&auth_ctx, response_builder, response_payload).await { Ok(response) => response, Err(e) => { error!("Failed to build HTTP response: {:?}", e); @@ -586,6 +625,44 @@ async fn handle( Ok(response) } +fn create_keepalive_settings(collector_server_settings: &ServerSettings) -> TcpKeepalive { + let tcp_keepalive_time = Duration::from_secs(collector_server_settings.tcp_keepalive_time()); + let tcp_keepalive_interval = collector_server_settings + .tcp_keepalive_intvl() + .map(Duration::from_secs); + let tcp_keepalive_probes = collector_server_settings.tcp_keepalive_probes(); + + let keep_alive = TcpKeepalive::new().with_time(tcp_keepalive_time); + let keep_alive = if let Some(tcp_keepalive_interval) = tcp_keepalive_interval { + keep_alive.with_interval(tcp_keepalive_interval) + } else { + keep_alive + }; + if let Some(tcp_keepalive_retries) = tcp_keepalive_probes { + keep_alive.with_retries(tcp_keepalive_retries) + } else { + keep_alive + } +} + +async fn read_proxy_protocol_header(stream: I) -> Result +where + I: AsyncRead + Unpin, +{ + match read_proxy_header(stream).await { + Ok((_, addr_opt)) => match addr_opt { + Some(addr) => { + debug!("Real client address is {:?}", addr); + Ok(addr) + } + None => { + bail!("Failed to retrieve client address"); + } + }, + Err(err) => Err(anyhow!(err)), + } +} + fn create_kerberos_server( kerberos_settings: &Kerberos, collector_settings: Collector, @@ -593,68 +670,145 @@ fn create_kerberos_server( collector_subscriptions: Subscriptions, collector_heartbeat_tx: mpsc::Sender, collector_server_settings: ServerSettings, - addr: SocketAddr, -) -> Pin> + Send>> { - let principal = kerberos_settings.service_principal_name().to_owned(); + collector_shutdown_ct: CancellationToken, + server_addr: SocketAddr, +) -> Pin> + Send>> { + let server_principal = kerberos_settings.service_principal_name().to_owned(); // Try to initialize a security context. This is to be sure that an error in // Kerberos configuration will be reported as soon as possible. - let state = kerberos::State::new(&principal); + let state = kerberos::State::new(&server_principal); if state.context_is_none() { panic!("Could not initialize Kerberos context"); } - let tcp_keepalive_time = Duration::from_secs(collector_server_settings.tcp_keepalive_time()); - let tcp_keepalive_interval = collector_server_settings - .tcp_keepalive_intvl() - .map(Duration::from_secs); - let tcp_keepalive_probes = collector_server_settings.tcp_keepalive_probes(); + let server = async move { + let listener = TcpListener::bind(server_addr).await?; + info!("Server listenning on {}", server_addr); + + // Each accepted TCP connection gets a channel 'rx', which is closed when + // the connections ends (whether because the client closed the connection + // or if a shutdown signal has been received). + // On shutdown, the server waits for all 'rx' to be dropped before + // resolving terminating using `close_tx.closed().await`. + let (close_tx, close_rx) = watch::channel(()); + loop { + let shutdown_ct = collector_shutdown_ct.clone(); + + // Accept new clients and wait for shutdown signal to stop accepting + let (mut stream, client_addr) = tokio::select! { + conn = listener.accept() => match conn { + Ok(conn) => conn, + Err(err) => { + warn!("Could not get client: {:?}", err); + continue; + } + }, + _ = shutdown_ct.cancelled() => { + debug!("Shutdown signal received, stop accepting new clients connections"); + break; + } + }; - // A `MakeService` that produces a `Service` to handle each connection. - let make_service = make_service_fn(move |conn: &AddrStream| { - // We have to clone the context to share it with each invocation of - // `make_service`. - - // Initialize Kerberos context once for each TCP connection - let collector_settings = collector_settings.clone(); - let svc_db = collector_db.clone(); - let svc_server_settings = collector_server_settings.clone(); - let auth_ctx = - AuthenticationContext::Kerberos(Arc::new(Mutex::new(kerberos::State::new(&principal)))); - let subscriptions = collector_subscriptions.clone(); - let collector_heartbeat_tx = collector_heartbeat_tx.clone(); - - let addr = conn.remote_addr(); - - debug!("Received TCP connection from {}", addr); - - // Create a `Service` for responding to the request. - let service = service_fn(move |req| { - handle( - svc_server_settings.clone(), - collector_settings.clone(), - svc_db.clone(), - subscriptions.clone(), - collector_heartbeat_tx.clone(), - auth_ctx.clone(), - addr, - req, - ) - }); + debug!("Received TCP connection from {}", client_addr); + + // Configure connected socket with keepalive parameters + let keep_alive = create_keepalive_settings(&collector_server_settings); + let socket_ref = SockRef::from(&stream); + socket_ref.set_tcp_keepalive(&keep_alive)?; + + // We have to clone the context to move it into the tokio task + // responsible for handling the client + let collector_settings = collector_settings.clone(); + let svc_db = collector_db.clone(); + let svc_server_settings = collector_server_settings.clone(); + let svc_server_principal = server_principal.clone(); + let subscriptions = collector_subscriptions.clone(); + let collector_heartbeat_tx = collector_heartbeat_tx.clone(); + + // Create a "rx" channel end for the task + let close_rx = close_rx.clone(); + + tokio::task::spawn(async move { + // Parse proxy protocol if enabled + let real_client_addr = if collector_settings.enable_proxy_protocol() { + match read_proxy_protocol_header(&mut stream).await { + Ok(addr) => addr, + Err(err) => { + bail!("Failed to read Proxy Protocol header: {}", err); + } + } + } else { + client_addr + }; - // Return the service to hyper. - async move { Ok::<_, Infallible>(service) } - }); + // Initialize Kerberos context once for each TCP connection + // This operation takes time so we run it in a blocking task + let auth_ctx = tokio::task::spawn_blocking(move || { + AuthenticationContext::Kerberos(Arc::new(Mutex::new(kerberos::State::new( + &svc_server_principal, + )))) + }) + .await?; + + // Hyper needs a wrapper for the stream + let io = TokioIo::new(stream); + + // Handle the connection using Hyper http1 + // conn is a Future that ends when the connection is closed + let conn = http1::Builder::new().serve_connection( + io, + service_fn(move |req| { + handle( + svc_server_settings.clone(), + collector_settings.clone(), + svc_db.clone(), + subscriptions.clone(), + collector_heartbeat_tx.clone(), + auth_ctx.clone(), + real_client_addr, + req, + ) + }), + ); + // conn needs to be pinned to be able to use tokio::select! + pin!(conn); + + // This loop is required to continue to poll the connection after calling + // graceful_shutdown(). + tokio::select! { + res = conn.as_mut() => { + if let Err(err) = res { + debug!("Error serving connection: {:?}", err); + } + }, + _ = shutdown_ct.cancelled() => { + debug!("Shutdown signal received, closing connection with {:?}", client_addr); + conn.as_mut().graceful_shutdown(); + if let Err(err) = conn.as_mut().await { + debug!("Error serving connection: {:?}", err); + }; + } + } + // Connection is closed, drop "task" rx to inform the server that this task + // is ending + drop(close_rx); - // Then bind and serve... - let server = Server::bind(&addr) - .tcp_keepalive(Some(tcp_keepalive_time)) - .tcp_keepalive_interval(tcp_keepalive_interval) - .tcp_keepalive_retries(tcp_keepalive_probes) - .serve(make_service) - .with_graceful_shutdown(http_shutdown_signal()); + Ok(()) + }); + } + + // Drop "server" rx to keep only "tasks" rx + drop(close_rx); + + info!( + "Waiting for {} task(s) to finish", + close_tx.receiver_count() + ); + close_tx.closed().await; + + Ok(()) + }; - info!("Server listenning on {}", addr); - // XXX : because the 2 closures have different types we use this, but may be better way to do this Box::pin(server) } @@ -665,92 +819,170 @@ fn create_tls_server( collector_subscriptions: Subscriptions, collector_heartbeat_tx: mpsc::Sender, collector_server_settings: ServerSettings, - addr: SocketAddr, -) -> Pin> + Send>> { + collector_shutdown_ct: CancellationToken, + server_addr: SocketAddr, +) -> Pin> + Send>> { // make TLS connection config let tls_config = make_config(tls_settings).expect("Error while configuring server"); - - let tcp_keepalive_time = Duration::from_secs(collector_server_settings.tcp_keepalive_time()); - let tcp_keepalive_interval = collector_server_settings - .tcp_keepalive_intvl() - .map(Duration::from_secs); - let tcp_keepalive_probes = collector_server_settings.tcp_keepalive_probes(); - - // create the service per connection - let make_service = make_service_fn(move |conn: &TlsStream| { - // get peer certificate (= user certificate) - let cert = conn - .get_ref() - .1 - .peer_certificates() - .expect("Peer certificate should exist") // client auth has to happen, so this should not fail - .first() - .expect("Peer certificate should not be empty") // client cert cannot be empty if authentication succeeded - .clone(); - - let subject = subject_from_cert(cert.as_ref()).expect("Could not parse client certificate"); - let thumbprint = tls_config.thumbprint.clone(); - - let collector_settings = collector_settings.clone(); - let svc_db = collector_db.clone(); - let svc_server_settings = collector_server_settings.clone(); - let subscriptions = collector_subscriptions.clone(); - let collector_heartbeat_tx = collector_heartbeat_tx.clone(); - - let addr = conn.get_ref().0.remote_addr(); - let auth_ctx = AuthenticationContext::Tls(subject, thumbprint); - - // create service per request - let service = service_fn(move |req| { - handle( - svc_server_settings.clone(), - collector_settings.clone(), - svc_db.clone(), - subscriptions.clone(), - collector_heartbeat_tx.clone(), - auth_ctx.clone(), - addr, - req, - ) - }); - - async move { Ok::<_, Infallible>(service) } - }); - // create acceptor from config let tls_acceptor: TlsAcceptor = tls_config.server.into(); - let mut addr_incoming = AddrIncoming::bind(&addr).expect("Could not bind address to listener"); - addr_incoming.set_keepalive(Some(tcp_keepalive_time)); - addr_incoming.set_keepalive_interval(tcp_keepalive_interval); - addr_incoming.set_keepalive_retries(tcp_keepalive_probes); - - // configure listener on the address to use the acceptor - let incoming = TlsListener::new(tls_acceptor, addr_incoming) - .connections() - .filter(|conn| { - if let Err(err) = &conn { - match err { - tls_listener::Error::TlsAcceptError { error, .. } - if error.to_string() == "tls handshake eof" => - { - // happens sometimes, not problematic - debug!("Error while establishing a connection: {:?}", err) + let server = async move { + let listener = TcpListener::bind(server_addr).await?; + info!("Server listenning on {}", server_addr); + + // Each accepted TCP connection gets a channel 'rx', which is closed when + // the connections ends (whether because the client closed the connection + // or if a shutdown signal has been received). + // On shutdown, the server waits for all 'rx' to be dropped before + // resolving terminating using `close_tx.closed().await`. + let (close_tx, close_rx) = watch::channel(()); + loop { + let shutdown_ct = collector_shutdown_ct.clone(); + + // Accept new clients and wait for shutdown signal to stop accepting + let (mut stream, client_addr) = tokio::select! { + conn = listener.accept() => match conn { + Ok(conn) => conn, + Err(err) => { + warn!("Could not get client: {:?}", err); + continue; + } + }, + _ = shutdown_ct.cancelled() => { + debug!("Shutdown signal received, stop accepting new clients connections"); + break; + } + }; + + debug!("Received TCP connection from {}", client_addr); + + // Configure connected socket with keepalive parameters + let keep_alive = create_keepalive_settings(&collector_server_settings); + let socket_ref = SockRef::from(&stream); + socket_ref.set_tcp_keepalive(&keep_alive)?; + + // We have to clone the context to move it into the tokio task + // responsible for handling the client + let collector_settings = collector_settings.clone(); + let svc_db = collector_db.clone(); + let svc_server_settings = collector_server_settings.clone(); + let subscriptions = collector_subscriptions.clone(); + let collector_heartbeat_tx = collector_heartbeat_tx.clone(); + let thumbprint = tls_config.thumbprint.clone(); + let tls_acceptor = tls_acceptor.clone(); + + // Create a "rx" channel end for the task + let close_rx = close_rx.clone(); + + tokio::task::spawn(async move { + // Parse proxy protocol if enabled + let real_client_addr = if collector_settings.enable_proxy_protocol() { + match read_proxy_protocol_header(&mut stream).await { + Ok(addr) => addr, + Err(err) => { + debug!("Failed to read Proxy Protocol header: {}", err); + // Exit task + return; + } } - _ => warn!("Error while establishing a connection: {:?}", err), + } else { + client_addr }; - ready(false) - } else { - ready(true) - } - }); - let server = Server::builder(accept::from_stream(incoming)) - .serve(make_service) - .with_graceful_shutdown(http_shutdown_signal()); + let stream = match tls_acceptor.accept(stream).await { + Ok(stream) => stream, + Err(err) => { + match err.into_inner() { + Some(str) if str.to_string() == "tls handshake eof" => { + // happens sometimes, not problematic + debug!( + "Error while establishing a connection with '{}': {:?}", + real_client_addr, str + ) + } + other => warn!( + "Error while establishing a connection with '{}': {:?}", + real_client_addr, other + ), + }; + return; + } + }; + + // get peer certificate + let cert = stream + .get_ref() + .1 + .peer_certificates() + .expect("Peer certificate should exist") // client auth has to happen, so this should not fail + .first() + .expect("Peer certificate should not be empty") // client cert cannot be empty if authentication succeeded + .clone(); + + let subject = + subject_from_cert(cert.as_ref()).expect("Could not parse client certificate"); + + // Initialize Authentication context once for each TCP connection + let auth_ctx = AuthenticationContext::Tls(subject, thumbprint); + + // Hyper needs a wrapper for the stream + let io = TokioIo::new(stream); + + // Handle the connection using Hyper http1 + // conn is a Future that ends when the connection is closed + let conn = http1::Builder::new().serve_connection( + io, + service_fn(move |req| { + handle( + svc_server_settings.clone(), + collector_settings.clone(), + svc_db.clone(), + subscriptions.clone(), + collector_heartbeat_tx.clone(), + auth_ctx.clone(), + real_client_addr, + req, + ) + }), + ); + // conn needs to be pinned to be able to use tokio::select! + pin!(conn); + + // This loop is required to continue to poll the connection after calling + // graceful_shutdown(). + tokio::select! { + res = conn.as_mut() => { + if let Err(err) = res { + debug!("Error serving connection: {:?}", err); + } + }, + _ = shutdown_ct.cancelled() => { + debug!("Shutdown signal received, closing connection with {:?}", client_addr); + conn.as_mut().graceful_shutdown(); + if let Err(err) = conn.as_mut().await { + debug!("Error serving connection: {:?}", err); + }; + } + } + // Connection is closed, drop "task" rx to inform the server that this task + // is ending + drop(close_rx); + }); + } + + // Drop "server" rx to keep only "tasks" rx + drop(close_rx); + + info!( + "Waiting for {} task(s) to finish", + close_tx.receiver_count() + ); + close_tx.closed().await; + + Ok(()) + }; - info!("Server listenning on {}", addr); - // XXX : because the 2 closures have different types we use this, but may be better way to do this Box::pin(server) } @@ -759,30 +991,38 @@ enum ShutdownReason { Sigterm, } -async fn shutdown_signal() -> ShutdownReason { +async fn shutdown_signal_task(ct: CancellationToken) { let ctrl_c = tokio::signal::ctrl_c(); let mut sigterm = tokio::signal::unix::signal(SignalKind::terminate()) .expect("failed to install SIGTERM handler"); tokio::select! { - _ = ctrl_c => ShutdownReason::CtrlC, - _ = sigterm.recv() => ShutdownReason::Sigterm, - } -} + _ = ctrl_c => { + info!("Received CTRL+C"); + ShutdownReason::CtrlC + }, + _ = sigterm.recv() => { + info!("Received SIGTERM signal"); + ShutdownReason::Sigterm + } + }; -async fn http_shutdown_signal() { - shutdown_signal().await; + // Send the cancellation signal + ct.cancel(); } -async fn force_shutdown_timeout() { - match shutdown_signal().await { - ShutdownReason::CtrlC => { - info!("Received CTRL+C") - } - ShutdownReason::Sigterm => { - info!("Received SIGTERM signal") - } +fn monitoring_thread(rt_handle: Handle) { + info!("Monitoring thread started"); + loop { + std::thread::sleep(Duration::from_secs(3)); + debug!("Monitoring thread injected dummy task"); + rt_handle.spawn(future::ready(())); } +} + +async fn force_shutdown_timeout(ct: CancellationToken) { + // Wait for the shutdown signal + ct.cancelled().await; debug!("Start 10 secs timeout before killing HTTP servers"); tokio::time::sleep(Duration::from_secs(10)).await; } @@ -793,8 +1033,13 @@ pub async fn run(settings: Settings, verbosity: u8) { panic!("Failed to setup logging: {:?}", e); } - // XXX : because the 2 closures have different types we use this, but may be better way to do this - let mut servers: Vec> + Send>>> = Vec::new(); + let rt_handle = Handle::current(); + + // Start monitoring thread + // This ensures the whole progress does not get stop if the + // tokio runtime is accidently blocked by a "bad" task + // See https://github.com/tokio-rs/tokio/issues/4730 + std::thread::spawn(move || monitoring_thread(rt_handle)); let db: Db = db_from_settings(&settings) .await @@ -839,6 +1084,14 @@ pub async fn run(settings: Settings, verbosity: u8) { heartbeat_task(update_task_db, interval, heartbeat_rx, cloned_heartbaat_ct).await }); + let shutdown_ct = CancellationToken::new(); + let cloned_shutdown_ct = shutdown_ct.clone(); + + // Shutdown task: waits for shutdown signal and cancel the given CancellationToken + tokio::spawn(async move { + shutdown_signal_task(cloned_shutdown_ct).await; + }); + // Set KRB5_KTNAME env variable if necessary (i.e. if at least one collector uses // Kerberos authentication) if settings.collectors().iter().any(|x| { @@ -856,12 +1109,15 @@ pub async fn run(settings: Settings, verbosity: u8) { info!("Server settings: {:?}", settings.server()); + let mut servers: Vec> + Send>>> = Vec::new(); + for collector in settings.collectors() { let collector_db = db.clone(); let collector_subscriptions = subscriptions.clone(); let collector_settings = collector.clone(); let collector_heartbeat_tx = heartbeat_tx.clone(); let collector_server_settings = settings.server().clone(); + let collector_shutdown_ct = shutdown_ct.clone(); // Construct our SocketAddr to listen on... let addr = SocketAddr::from(( @@ -882,10 +1138,10 @@ pub async fn run(settings: Settings, verbosity: u8) { collector_subscriptions, collector_heartbeat_tx, collector_server_settings, + collector_shutdown_ct, addr, )); } - Authentication::Tls(tls) => { servers.push(create_tls_server( tls, @@ -894,6 +1150,7 @@ pub async fn run(settings: Settings, verbosity: u8) { collector_subscriptions, collector_heartbeat_tx, collector_server_settings, + collector_shutdown_ct, addr, )); } @@ -901,7 +1158,7 @@ pub async fn run(settings: Settings, verbosity: u8) { } tokio::select! { - _ = force_shutdown_timeout() => { + _ = force_shutdown_timeout(shutdown_ct) => { warn!("HTTP servers graceful shutdown timed out."); }, result = join_all(servers) => { diff --git a/server/src/logic.rs b/server/src/logic.rs index 0a88855..b268206 100644 --- a/server/src/logic.rs +++ b/server/src/logic.rs @@ -1,23 +1,28 @@ use crate::{ - event::EventMetadata, - formatter::Format, + event::{EventData, EventMetadata}, heartbeat::{store_heartbeat, WriteHeartbeatMessage}, + output::get_formatter, soap::{ Body, Header, Message, OptionSetValue, Subscription as SoapSubscription, SubscriptionBody, ACTION_ACK, ACTION_END, ACTION_ENUMERATE, ACTION_ENUMERATE_RESPONSE, ACTION_EVENTS, ACTION_HEARTBEAT, ACTION_SUBSCRIBE, ACTION_SUBSCRIPTION_END, ANONYMOUS, RESOURCE_EVENT_LOG, }, subscription::{Subscription, Subscriptions}, - RequestCategory, RequestData, AuthenticationContext, + AuthenticationContext, RequestCategory, RequestData, }; use common::{ database::Db, settings::{Collector, Server}, + subscription::{SubscriptionOutputFormat, SubscriptionUuid}, }; -use http::status::StatusCode; +use hyper::http::status::StatusCode; use log::{debug, error, warn}; -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use tokio::{sync::mpsc, task::JoinSet}; +use uuid::Uuid; use anyhow::{anyhow, bail, Context, Result}; @@ -36,24 +41,6 @@ impl Response { } } -fn check_sub_request_data(request_data: &RequestData, version: &str) -> bool { - let uri_version = if let RequestCategory::Subscription(version) = request_data.category() { - version - } else { - error!("Request URI is incoherent with body message"); - return false; - }; - - if version != uri_version { - error!( - "URI identifier and message identifier do not match: {} != {}", - uri_version, version - ); - return false; - } - true -} - async fn handle_enumerate( collector: &Collector, db: &Db, @@ -170,7 +157,7 @@ async fn handle_enumerate( ); let mut bookmark: Option = db - .get_bookmark(request_data.principal(), subscription_data.uuid()) + .get_bookmark(request_data.principal(), &subscription_data.uuid_string()) .await .context("Failed to retrieve current bookmark from database")?; @@ -186,24 +173,29 @@ async fn handle_enumerate( bookmark ); + let public_version = subscription.public_version_string(); + let identifier = subscription.uuid_string(); + let body = SubscriptionBody { heartbeat_interval: subscription_data.heartbeat_interval() as u64, - identifier: subscription_data.version().to_owned(), + identifier: identifier.clone(), + public_version: public_version.clone(), + revision: subscription_data.revision().cloned(), bookmark, query: subscription_data.query().to_owned(), address: match auth_ctx { AuthenticationContext::Kerberos(_) => format!( "http://{}:{}/wsman/subscriptions/{}", collector.hostname(), - collector.listen_port(), - subscription_data.version() + collector.advertized_port(), + identifier ), - AuthenticationContext::Tls(_,_) => format!( + AuthenticationContext::Tls(_, _) => format!( "https://{}:{}/wsman/subscriptions/{}", collector.hostname(), - collector.listen_port(), - subscription_data.version() - ) + collector.advertized_port(), + identifier + ), }, connection_retry_count: subscription_data.connection_retry_count(), connection_retry_interval: subscription_data.connection_retry_interval(), @@ -211,12 +203,14 @@ async fn handle_enumerate( max_envelope_size: subscription_data.max_envelope_size(), thumbprint: match auth_ctx { AuthenticationContext::Tls(_, thumbprint) => Some(thumbprint.clone()), - AuthenticationContext::Kerberos(_) => None - } + AuthenticationContext::Kerberos(_) => None, + }, + locale: subscription_data.locale().cloned(), + data_locale: subscription_data.data_locale().cloned(), }; res_subscriptions.push(SoapSubscription { - identifier: subscription_data.version().to_owned(), + version: public_version, header, body, }); @@ -234,18 +228,16 @@ async fn handle_heartbeat( request_data: &RequestData, message: &Message, ) -> Result { - let version = message - .header() - .identifier() - .ok_or_else(|| anyhow!("Missing field identifier"))?; - - if !check_sub_request_data(request_data, version) { + let subscription_uuid = if let Some(identifier) = message.header().identifier() { + SubscriptionUuid(Uuid::parse_str(identifier)?) + } else { + error!("Could not find identifier in message header"); return Ok(Response::err(StatusCode::BAD_REQUEST)); - } + }; let subscription = { let subscriptions = subscriptions.read().unwrap(); - match subscriptions.get(version) { + match subscriptions.get(&subscription_uuid) { Some(subscription) => subscription.to_owned(), None => { warn!( @@ -253,9 +245,9 @@ async fn handle_heartbeat( request_data.remote_addr().ip(), request_data.remote_addr().port(), request_data.principal(), - version + subscription_uuid ); - return Ok(Response::err(StatusCode::BAD_REQUEST)); + return Ok(Response::err(StatusCode::NOT_FOUND)); } } }; @@ -267,7 +259,7 @@ async fn handle_heartbeat( request_data.remote_addr().port(), request_data.principal(), subscription.data().name(), - subscription.uuid() + subscription.uuid_string() ); return Ok(Response::err(StatusCode::FORBIDDEN)); } @@ -278,14 +270,14 @@ async fn handle_heartbeat( request_data.remote_addr().port(), request_data.principal(), subscription.data().name(), - subscription.uuid(), + subscription.uuid_string(), ); store_heartbeat( heartbeat_tx, request_data.principal(), request_data.remote_addr().ip().to_string(), - subscription.uuid(), + &subscription.uuid_string(), false, ) .await @@ -293,6 +285,34 @@ async fn handle_heartbeat( Ok(Response::ok(ACTION_ACK, None)) } +fn get_formatted_events( + events: &[Arc], + need_to_parse_event: bool, + formats: &HashSet, + metadata: &Arc, +) -> HashMap>>> { + let mut events_data = Vec::with_capacity(events.len()); + for raw in events.iter() { + // EventData parses the raw event into an Event struct + // (once for all formatters). + events_data.push(EventData::new(raw.clone(), need_to_parse_event)) + } + + let mut formatted_events: HashMap>>> = + HashMap::new(); + for format in formats { + let mut content = Vec::new(); + let formatter = get_formatter(format); + for event_data in events_data.iter() { + if let Some(str) = formatter.format(metadata, event_data) { + content.push(str.clone()) + } + } + formatted_events.insert(format.clone(), Arc::new(content)); + } + formatted_events +} + async fn handle_events( server: &Server, db: &Db, @@ -302,22 +322,20 @@ async fn handle_events( message: &Message, ) -> Result { if let Some(Body::Events(events)) = &message.body { - let version = message - .header() - .identifier() - .ok_or_else(|| anyhow!("Missing field identifier"))?; - - if !check_sub_request_data(request_data, version) { + let subscription_uuid = if let Some(identifier) = message.header().identifier() { + SubscriptionUuid(Uuid::parse_str(identifier)?) + } else { + error!("Could not find identifier in message header"); return Ok(Response::err(StatusCode::BAD_REQUEST)); - } + }; let subscription: Arc = { let subscriptions = subscriptions.read().unwrap(); - let subscription = subscriptions.get(version); + let subscription = subscriptions.get(&subscription_uuid); match subscription { Some(subscription) => subscription.to_owned(), None => { - warn!("Unknown subscription version {}", version); + warn!("Unknown subscription uuid {}", subscription_uuid); return Ok(Response::err(StatusCode::NOT_FOUND)); } } @@ -328,9 +346,9 @@ async fn handle_events( "Received Events from {}:{} ({}) for subscription {} ({}) but the principal is not allowed to use this subscription.", request_data.remote_addr().ip(), request_data.remote_addr().port(), - request_data.principal(), + request_data.principal(), subscription.data().name(), - subscription.uuid(), + subscription.uuid_string(), ); return Ok(Response::err(StatusCode::FORBIDDEN)); } @@ -341,44 +359,82 @@ async fn handle_events( request_data.remote_addr().port(), request_data.principal(), subscription.data().name(), - subscription.uuid() + subscription.uuid_string() ); + // Retrieve the public version sent by the client, not the one stored in memory + let public_version = if let Some(public_version) = message.header().version() { + public_version + } else { + warn!("Missing subscription version in message events"); + return Ok(Response::err(StatusCode::BAD_REQUEST)); + }; + let metadata = Arc::new(EventMetadata::new( request_data.remote_addr(), request_data.principal(), server.node_name().cloned(), &subscription, + public_version.clone(), + message.header().revision().cloned(), )); - // Build event strings for all formats - let mut formatted_events: HashMap>>> = HashMap::new(); - for format in subscription.formats() { - let mut content = Vec::new(); - for raw in events.iter() { - if let Some(str) = format.format(&metadata, raw.clone()) { - content.push(str.clone()) - } - } - formatted_events.insert(format.clone(), Arc::new(content)); - } + let need_to_parse_event = subscription + .formats() + .iter() + .any(|format| format.needs_parsed_event()); + + let formatted_events = if need_to_parse_event { + // Parsing events takes time. In addition, if a formatter needs parsed events, + // it probably performs some serialization which takes time and should be done in a + // blocking task. + let task_events = events.clone(); + let task_formats = subscription.formats().clone(); + let task_metadata = metadata.clone(); + tokio::task::spawn_blocking(move || { + get_formatted_events( + &task_events, + need_to_parse_event, + &task_formats, + &task_metadata, + ) + }) + .await? + } else { + get_formatted_events( + events, + need_to_parse_event, + subscription.formats(), + &metadata, + ) + }; let mut handles = JoinSet::new(); // Spawn tasks to write events to every outputs of the subscription for output in subscription.outputs() { - let output = output.clone(); - let metadata = metadata.clone(); - let format = output.format(); + let output_cloned = output.clone(); + let metadata_cloned = metadata.clone(); let content = formatted_events - .get(format) - .ok_or_else(|| anyhow!("Could not get formatted event for format {:?}", format))? + .get(output_cloned.format()) + .ok_or_else(|| { + anyhow!( + "Could not get formatted event for format {:?}", + output_cloned.format() + ) + })? .clone(); handles.spawn(async move { - output.write(metadata, content).await.with_context(|| { - format!("Failed to write event to output {}", output.describe()) - }) + output_cloned + .write(metadata_cloned, content) + .await + .with_context(|| { + format!( + "Failed to write event to output {}", + output_cloned.describe() + ) + }) }); } @@ -407,9 +463,13 @@ async fn handle_events( .bookmarks() .ok_or_else(|| anyhow!("Missing bookmarks in request payload"))?; // Store bookmarks and heartbeats - db.store_bookmark(request_data.principal(), subscription.uuid(), bookmark) - .await - .context("Failed to store bookmarks")?; + db.store_bookmark( + request_data.principal(), + &subscription.uuid_string(), + bookmark, + ) + .await + .context("Failed to store bookmarks")?; debug!( "Store bookmark from {}:{} ({}) for subscription {} ({}): {}", @@ -417,14 +477,14 @@ async fn handle_events( request_data.remote_addr().port(), request_data.principal(), subscription.data().name(), - subscription.uuid(), + subscription.uuid_string(), bookmark ); store_heartbeat( heartbeat_tx, request_data.principal(), request_data.remote_addr().ip().to_string(), - subscription.uuid(), + &subscription.uuid_string(), true, ) .await diff --git a/server/src/output.rs b/server/src/output.rs index 0b86047..58058df 100644 --- a/server/src/output.rs +++ b/server/src/output.rs @@ -2,54 +2,95 @@ use std::sync::Arc; use anyhow::Result; use async_trait::async_trait; -use common::subscription::{FileConfiguration, KafkaConfiguration, SubscriptionOutput, RedisConfiguration, UnixDatagramConfiguration}; +use common::subscription::{ + SubscriptionOutputDriver, SubscriptionOutputFormat, +}; -use crate::{event::EventMetadata, formatter::Format}; +use crate::{ + drivers::{ + files::OutputFiles, kafka::OutputKafka, redis::OutputRedis, tcp::OutputTcp, + unix::OutputUnixDatagram, + }, + event::{EventData, EventMetadata}, formats::{json::JsonFormat, raw::RawFormat, raw_json::RawJsonFormat}, -#[derive(Debug, Clone)] -pub enum OutputType { - Files(Format, FileConfiguration, bool), - Kafka(Format, KafkaConfiguration, bool), - Redis(Format, RedisConfiguration, bool), - Tcp(Format, String, u16, bool), - UnixDatagram(Format, UnixDatagramConfiguration, bool), +}; + +#[derive(Clone)] +pub struct Output { + format: SubscriptionOutputFormat, + driver: Arc, + // Only used for "describe()" + subscription_output_driver: SubscriptionOutputDriver, } -impl From<&SubscriptionOutput> for OutputType { - fn from(so: &SubscriptionOutput) -> Self { - match so { - SubscriptionOutput::Files(sof, config, enabled) => { - OutputType::Files(sof.into(), config.clone(), *enabled) - } - SubscriptionOutput::Kafka(sof, config, enabled) => { - OutputType::Kafka(sof.into(), config.clone(), *enabled) - } - SubscriptionOutput::Redis(sof, config, enabled) => { - OutputType::Redis(sof.into(), config.clone(), *enabled) - } - SubscriptionOutput::Tcp(sof, config, enabled) => OutputType::Tcp( - sof.into(), - config.addr().to_string(), - config.port(), - *enabled, - ), - SubscriptionOutput::UnixDatagram(sof, config, enabled) => { - OutputType::UnixDatagram(sof.into(), config.clone(), *enabled) - } - } +impl Output { + pub fn new( + format: &SubscriptionOutputFormat, + driver: &SubscriptionOutputDriver, + ) -> Result { + Ok(Self { + driver: match driver { + SubscriptionOutputDriver::Files(config) => { + Arc::new(OutputFiles::new(config)) + } + SubscriptionOutputDriver::Kafka(config) => { + Arc::new(OutputKafka::new(config)?) + } + SubscriptionOutputDriver::Tcp(config) => { + Arc::new(OutputTcp::new(config)?) + } + SubscriptionOutputDriver::Redis(config) => { + Arc::new(OutputRedis::new(config)?) + } + SubscriptionOutputDriver::UnixDatagram(config) => { + Arc::new(OutputUnixDatagram::new(config)?) + } + }, + format: format.clone(), + subscription_output_driver: driver.clone(), + }) + } + + pub fn describe(&self) -> String { + format!( + "format: {:?}, driver: {:?}", + self.format, self.subscription_output_driver + ) + } + + pub async fn write( + &self, + metadata: Arc, + events: Arc>>, + ) -> Result<()> { + self.driver.write(metadata, events).await + } + + pub fn format(&self) -> &SubscriptionOutputFormat { + &self.format } } -// async_trait is required to be able to use async functions -// in traits #[async_trait] -pub trait Output { +pub trait OutputDriver { + /// Write a batch of events and associated metadata async fn write( &self, metadata: Arc, events: Arc>>, ) -> Result<()>; +} - fn describe(&self) -> String; - fn format(&self) -> &Format; +pub trait OutputFormat { + /// Formats an event. + /// If something wrong happens, formatter is allowed to return None. + fn format(&self, metadata: &EventMetadata, data: &EventData) -> Option>; } + +pub fn get_formatter(format :&SubscriptionOutputFormat) -> Box { + match format { + SubscriptionOutputFormat::Json => Box::new(JsonFormat), + SubscriptionOutputFormat::Raw => Box::new(RawFormat), + SubscriptionOutputFormat::RawJson => Box::new(RawJsonFormat) + } +} \ No newline at end of file diff --git a/server/src/proxy_protocol.rs b/server/src/proxy_protocol.rs new file mode 100644 index 0000000..a9db24e --- /dev/null +++ b/server/src/proxy_protocol.rs @@ -0,0 +1,195 @@ +// A lot of the following code comes from +// https://github.com/valorem-labs-inc/hyper-server. +// It was not used as a dependency because the read_proxy_header function cannot +// be used outside of the crate. + +// As stated by its license (MIT), we include below its copyright notice and +// permission notice: +// +// Copyright 2021 Axum Server Contributors +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +use ppp::v1; +use ppp::v2; +use ppp::HeaderResult; +use std::net::IpAddr; +use std::{io, net::SocketAddr}; +use tokio::io::AsyncRead; +use tokio::io::AsyncReadExt; + +/// The length of a v1 header in bytes. +const V1_PREFIX_LEN: usize = 5; +/// The maximum length of a v1 header in bytes. +const V1_MAX_LENGTH: usize = 107; +/// The terminator bytes of a v1 header. +const V1_TERMINATOR: &[u8] = b"\r\n"; +/// The prefix length of a v2 header in bytes. +const V2_PREFIX_LEN: usize = 12; +/// The minimum length of a v2 header in bytes. +const V2_MINIMUM_LEN: usize = 16; +/// The index of the start of the big-endian u16 length in the v2 header. +const V2_LENGTH_INDEX: usize = 14; +/// The length of the read buffer used to read the PROXY protocol header. +const READ_BUFFER_LEN: usize = 512; + +pub async fn read_proxy_header(mut stream: I) -> Result<(I, Option), io::Error> +where + I: AsyncRead + Unpin, +{ + // Mutable buffer for storing stream data + let mut buffer = [0; READ_BUFFER_LEN]; + // Dynamic in case v2 header is too long + let mut dynamic_buffer = None; + + // Read prefix to check for v1, v2, or kill + stream.read_exact(&mut buffer[..V1_PREFIX_LEN]).await?; + + if &buffer[..V1_PREFIX_LEN] == v1::PROTOCOL_PREFIX.as_bytes() { + read_v1_header(&mut stream, &mut buffer).await?; + } else { + stream + .read_exact(&mut buffer[V1_PREFIX_LEN..V2_MINIMUM_LEN]) + .await?; + if &buffer[..V2_PREFIX_LEN] == v2::PROTOCOL_PREFIX { + dynamic_buffer = read_v2_header(&mut stream, &mut buffer).await?; + } else { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "No valid Proxy Protocol header detected", + )); + } + } + + // Choose which buffer to parse + let buffer = dynamic_buffer.as_deref().unwrap_or(&buffer[..]); + + // Parse the header + let header = HeaderResult::parse(buffer); + match header { + HeaderResult::V1(Ok(header)) => { + let client_address = match header.addresses { + v1::Addresses::Tcp4(ip) => { + SocketAddr::new(IpAddr::V4(ip.source_address), ip.source_port) + } + v1::Addresses::Tcp6(ip) => { + SocketAddr::new(IpAddr::V6(ip.source_address), ip.source_port) + } + v1::Addresses::Unknown => { + // Return client address as `None` so that "unknown" is used in the http header + return Ok((stream, None)); + } + }; + + Ok((stream, Some(client_address))) + } + HeaderResult::V2(Ok(header)) => { + let client_address = match header.addresses { + v2::Addresses::IPv4(ip) => { + SocketAddr::new(IpAddr::V4(ip.source_address), ip.source_port) + } + v2::Addresses::IPv6(ip) => { + SocketAddr::new(IpAddr::V6(ip.source_address), ip.source_port) + } + v2::Addresses::Unix(unix) => { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!( + "Unix socket addresses are not supported. Addresses: {:?}", + unix + ), + )); + } + v2::Addresses::Unspecified => { + // Return client address as `None` so that "unknown" is used in the http header + return Ok((stream, None)); + } + }; + + Ok((stream, Some(client_address))) + } + HeaderResult::V1(Err(_error)) => Err(io::Error::new( + io::ErrorKind::InvalidData, + "No valid V1 Proxy Protocol header received", + )), + HeaderResult::V2(Err(_error)) => Err(io::Error::new( + io::ErrorKind::InvalidData, + "No valid V2 Proxy Protocol header received", + )), + } +} + +async fn read_v2_header( + mut stream: I, + buffer: &mut [u8; READ_BUFFER_LEN], +) -> Result>, io::Error> +where + I: AsyncRead + Unpin, +{ + let length = + u16::from_be_bytes([buffer[V2_LENGTH_INDEX], buffer[V2_LENGTH_INDEX + 1]]) as usize; + let full_length = V2_MINIMUM_LEN + length; + + // Switch to dynamic buffer if header is too long; v2 has no maximum length + if full_length > READ_BUFFER_LEN { + let mut dynamic_buffer = Vec::with_capacity(full_length); + dynamic_buffer.extend_from_slice(&buffer[..V2_MINIMUM_LEN]); + + // Read the remaining header length + stream + .read_exact(&mut dynamic_buffer[V2_MINIMUM_LEN..full_length]) + .await?; + + Ok(Some(dynamic_buffer)) + } else { + // Read the remaining header length + stream + .read_exact(&mut buffer[V2_MINIMUM_LEN..full_length]) + .await?; + + Ok(None) + } +} + +async fn read_v1_header( + mut stream: I, + buffer: &mut [u8; READ_BUFFER_LEN], +) -> Result<(), io::Error> +where + I: AsyncRead + Unpin, +{ + // read one byte at a time until terminator found + let mut end_found = false; + for i in V1_PREFIX_LEN..V1_MAX_LENGTH { + buffer[i] = stream.read_u8().await?; + + if [buffer[i - 1], buffer[i]] == V1_TERMINATOR { + end_found = true; + break; + } + } + if !end_found { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "No valid Proxy Protocol header detected", + )); + } + + Ok(()) +} diff --git a/server/src/sldc.rs b/server/src/sldc.rs index d32f737..6368152 100644 --- a/server/src/sldc.rs +++ b/server/src/sldc.rs @@ -11,7 +11,7 @@ const CTRLSYMB_RESET_1: u16 = 0b1111111110101; const CTRLSYMB_RESET_2: u16 = 0b1111111110110; const CTRLSYMB_END_MARKER: u16 = 0b1111111111111; -pub fn decompress(compressed_bytes: &Vec) -> Result> { +pub fn decompress(compressed_bytes: &[u8]) -> Result> { // Implemented according to ECMA-321 debug!( "Try to decompress SLDC data ({} compressed bytes)", diff --git a/server/src/soap.rs b/server/src/soap.rs index 8f3cb7f..84df781 100644 --- a/server/src/soap.rs +++ b/server/src/soap.rs @@ -1,5 +1,4 @@ use anyhow::{anyhow, bail, ensure, Context, Result}; -use common::utils::new_uuid; use log::{debug, trace}; use quick_xml::events::{BytesText, Event}; use quick_xml::reader::Reader; @@ -7,6 +6,7 @@ use quick_xml::writer::Writer; use roxmltree::{Document, Node}; use std::collections::HashMap; use std::sync::Arc; +use uuid::Uuid; use xmlparser::XmlCharExt; const SOAP_ENVELOPE_NS: &str = "http://www.w3.org/2003/05/soap-envelope"; @@ -39,13 +39,17 @@ pub const ACTION_SUBSCRIPTION_END: &str = pub const ACTION_HEARTBEAT: &str = "http://schemas.dmtf.org/wbem/wsman/1/wsman/Heartbeat"; pub const ACTION_ACK: &str = "http://schemas.dmtf.org/wbem/wsman/1/wsman/Ack"; +pub fn new_uuid() -> String { + format!("uuid:{}", Uuid::new_v4().to_string().to_uppercase()) +} + pub trait Serializable { fn serialize(&self, writer: &mut Writer) -> quick_xml::Result<()>; } #[derive(Debug)] pub struct Subscription { - pub identifier: String, + pub version: String, pub header: Header, pub body: SubscriptionBody, } @@ -59,7 +63,7 @@ impl Serializable for Subscription { writer .create_element("m:Version") .write_text_content(BytesText::new( - format!("uuid:{}", self.identifier).as_str(), + format!("uuid:{}", self.version).as_str(), ))?; writer .create_element("s:Envelope") @@ -93,6 +97,10 @@ pub struct SubscriptionBody { pub max_time: u32, pub max_envelope_size: u32, pub thumbprint: Option, + pub public_version: String, + pub revision: Option, + pub locale: Option, + pub data_locale: Option, } impl Serializable for SubscriptionBody { @@ -110,11 +118,19 @@ impl Serializable for SubscriptionBody { .create_element("a:Address") .write_text_content(BytesText::new(&self.address))?; writer - .create_element("a:ReferenceProperties") + .create_element("a:ReferenceParameters") .write_inner_content(|writer| { writer .create_element("e:Identifier") .write_text_content(BytesText::new(&self.identifier))?; + writer + .create_element("Version") + .write_text_content(BytesText::new(&self.public_version))?; + if let Some(revision) = &self.revision { + writer + .create_element("Revision") + .write_text_content(BytesText::new(revision))?; + } Ok::<(), quick_xml::Error>(()) })?; Ok::<(), quick_xml::Error>(()) @@ -134,13 +150,19 @@ impl Serializable for SubscriptionBody { .create_element("a:Address") .write_text_content(BytesText::new(&self.address))?; writer - .create_element("a:ReferenceProperties") + .create_element("a:ReferenceParameters") .write_inner_content(|writer| { writer .create_element("e:Identifier") - .write_text_content(BytesText::new( - &self.identifier, - ))?; + .write_text_content(BytesText::new(&self.identifier))?; + writer + .create_element("Version") + .write_text_content(BytesText::new(&self.public_version))?; + if let Some(revision) = &self.revision { + writer + .create_element("Revision") + .write_text_content(BytesText::new(revision))?; + } Ok::<(), quick_xml::Error>(()) })?; writer @@ -229,16 +251,20 @@ impl Serializable for SubscriptionBody { .write_text_content(BytesText::new( format!("{}", self.max_envelope_size).as_str(), ))?; - writer - .create_element("w:Locale") - .with_attribute(("xml:lang", "en-US")) - .with_attribute(("s:mustUnderstand", "false")) - .write_empty()?; - writer - .create_element("p:DataLocale") - .with_attribute(("xml:lang", "en-US")) - .with_attribute(("s:mustUnderstand", "false")) - .write_empty()?; + if let Some(locale) = &self.locale { + writer + .create_element("w:Locale") + .with_attribute(("xml:lang", locale.as_str())) + .with_attribute(("s:mustUnderstand", "false")) + .write_empty()?; + } + if let Some(data_locale) = &self.data_locale { + writer + .create_element("p:DataLocale") + .with_attribute(("xml:lang", data_locale.as_str())) + .with_attribute(("s:mustUnderstand", "false")) + .write_empty()?; + } writer .create_element("w:ContentEncoding") .write_text_content(BytesText::new("UTF-16"))?; @@ -301,7 +327,6 @@ pub struct Header { action: Option, max_envelope_size: Option, message_id: Option, - // TODO: difference between Locale and DataLocale // Might be interesting to keep this data if you want to translate things ? // Locale: String, // DataLocale: String, @@ -318,6 +343,9 @@ pub struct Header { identifier: Option, bookmarks: Option, ack_requested: Option, + // Specific to Events and OpenWEC + version: Option, + revision: Option, } impl Header { @@ -338,6 +366,8 @@ impl Header { ack_requested: None, bookmarks: None, identifier: None, + version: None, + revision: None, } } pub fn new( @@ -367,6 +397,8 @@ impl Header { ack_requested: None, bookmarks: None, identifier: None, + version: None, + revision: None, } } @@ -375,10 +407,17 @@ impl Header { self.bookmarks.as_ref() } - /// Get a reference to the header's identifier. pub fn identifier(&self) -> Option<&String> { self.identifier.as_ref() } + + pub fn version(&self) -> Option<&String> { + self.version.as_ref() + } + + pub fn revision(&self) -> Option<&String> { + self.revision.as_ref() + } } impl Serializable for Header { @@ -427,17 +466,6 @@ impl Serializable for Header { .create_element("a:MessageID") .write_text_content(BytesText::new(message_id))?; } - - writer - .create_element("w:Locale") - .with_attribute(("xml:lang", "en-US")) - .with_attribute(("s:mustUnderstand", "false")) - .write_empty()?; - writer - .create_element("p:DataLocale") - .with_attribute(("xml:lang", "en-US")) - .with_attribute(("s:mustUnderstand", "false")) - .write_empty()?; if let Some(operation_id) = &self.operation_id { writer .create_element("p:OperationID") @@ -606,6 +634,8 @@ impl Message { ack_requested: None, bookmarks: None, identifier: None, + version: None, + revision: None, }, body, }) @@ -659,6 +689,12 @@ fn parse_header(header_node: Node) -> Result
{ )); } else if tag == (EVENTING_NS, "Identifier").into() { header.identifier = node.text().map(String::from) + } else if tag == "Version".into() { + // specific to OpenWEC + header.version = node.text().map(String::from) + } else if tag == "Revision".into() { + // specific to OpenWEC + header.revision = node.text().map(String::from) } } Ok(header) diff --git a/server/src/subscription.rs b/server/src/subscription.rs index e46f23c..eee1b84 100644 --- a/server/src/subscription.rs +++ b/server/src/subscription.rs @@ -1,7 +1,10 @@ use anyhow::Result; use common::{ database::Db, - subscription::{SubscriptionData, SubscriptionOutput}, + subscription::{ + InternalVersion, PublicVersion, SubscriptionData, SubscriptionOutputFormat, + SubscriptionUuid, + }, }; use itertools::Itertools; use log::{debug, info, warn}; @@ -15,74 +18,51 @@ use tokio::{ time, }; -use crate::{ - formatter::Format, - output::Output, - outputs::{file::OutputFile, kafka::OutputKafka, tcp::OutputTcp, unix::OutputUnixDatagram}, -}; - -use crate::outputs::redis::OutputRedis; +use crate::output::Output; pub struct Subscription { data: SubscriptionData, - outputs: Vec>>, - formats: HashSet, + // Subscription public version is a bit expensive to compute, so we + // store the result in memory + public_version: PublicVersion, + outputs: Vec, + formats: HashSet, } impl TryFrom for Subscription { type Error = anyhow::Error; fn try_from(data: SubscriptionData) -> Result { - let mut formats: HashSet = HashSet::new(); + let mut formats: HashSet = HashSet::new(); for output in data.outputs() { - formats.insert(output.format().into()); + formats.insert(output.format().clone()); } let mut subscription = Subscription { + public_version: data.public_version()?, data, outputs: Vec::new(), formats, }; - subscription.init()?; + subscription.create_outputs()?; Ok(subscription) } } impl Subscription { /// Get a reference to the subscription's uuid. - pub fn uuid(&self) -> &str { - self.data.uuid() - } - - /// Get a reference to the subscription's version. - pub fn version(&self) -> &str { - self.data.version() + pub fn uuid_string(&self) -> String { + self.data.uuid_string() } - /// Get a reference to the subscription's outputs. - pub fn outputs(&self) -> &[Arc>] { - self.outputs.as_ref() + /// Get a reference to the subscription's public version. + pub fn public_version_string(&self) -> String { + self.public_version.to_string().to_uppercase() } - fn init(&mut self) -> Result<()> { - // Initialize outputs + fn create_outputs(&mut self) -> Result<()> { for output_data in self.data.outputs() { - if output_data.is_enabled() { - self.outputs.push(match output_data { - SubscriptionOutput::Files(format, config, _) => { - Arc::new(Box::new(OutputFile::new(Format::from(format), config))) - } - SubscriptionOutput::Kafka(format, config, _) => { - Arc::new(Box::new(OutputKafka::new(Format::from(format), config)?)) - } - SubscriptionOutput::Tcp(format, config, _) => { - Arc::new(Box::new(OutputTcp::new(Format::from(format), config)?)) - } - SubscriptionOutput::Redis(format, config, _) => { - Arc::new(Box::new(OutputRedis::new(Format::from(format), config)?)) - } - SubscriptionOutput::UnixDatagram(format, config, _) => { - Arc::new(Box::new(OutputUnixDatagram::new(Format::from(format), config)?)) - } - }); + if output_data.enabled() { + self.outputs + .push(Output::new(output_data.format(), output_data.driver())?); } } Ok(()) @@ -92,12 +72,18 @@ impl Subscription { &self.data } - pub fn formats(&self) -> &HashSet { + pub fn formats(&self) -> &HashSet { &self.formats } + + pub fn outputs(&self) -> &[Output] { + &self.outputs + } } -pub type Subscriptions = Arc>>>; +/// In-memory map of currently active subscriptions +/// => +pub type Subscriptions = Arc>>>; pub async fn reload_subscriptions_task(db: Db, subscriptions: Subscriptions, interval: u64) { info!("reload_subscriptions task started"); @@ -131,7 +117,8 @@ async fn reload_subscriptions( ) -> Result<()> { let db_subscriptions = db.get_subscriptions().await?; - let mut active_subscriptions: HashSet = HashSet::with_capacity(db_subscriptions.len()); + let mut active_subscriptions: HashSet = + HashSet::with_capacity(db_subscriptions.len()); // Take a write lock on subscriptions // It will be released at the end of the function @@ -141,78 +128,93 @@ async fn reload_subscriptions( mem_subscriptions.clear(); } - for subscription_data in db_subscriptions { - let version = subscription_data.version(); + // mem_subscriptions is indexed on "public version" + // To know whether something has changed, we must rely "on internal version" + let mem_subscriptions_internal_version: HashSet = mem_subscriptions + .iter() + .map(|(_, subscription)| subscription.data().internal_version()) + .collect(); + for subscription_data in db_subscriptions { if !subscription_data.is_active() { debug!( "Subscription {} is disabled or have no enabled outputs", - subscription_data.uuid() + subscription_data.name(), ); continue; } - active_subscriptions.insert(version.to_string()); + let internal_version = subscription_data.internal_version(); + + active_subscriptions.insert(internal_version); // Update the in memory representation of this subscription if necessary - match mem_subscriptions.get(version) { + match mem_subscriptions_internal_version.get(&internal_version) { Some(_) => { // This subscription has not been changed. Nothing to do } None => { debug!( - "Subscription version {} not found in the in memory subscriptions", - version + "Subscription internal version {} not found in the in memory subscriptions", + internal_version ); - // The version of this subscription does not exist in the in-memory + // The internal version of this subscription does not exist in the in-memory // subscriptions HashMap. This may happen in 2 situations: // 1. This is a new subscription. We must add it to the in-memory subscriptions. // 2. The subscription has been updated. We must remove the old subscription and add the new one to the // in memory subscriptions. // `subscription.uuid()` stays the same after an update - let old_subscription = { + let old_subscription: Option> = { mem_subscriptions .values() .find(|old_subscription| { - subscription_data.uuid() == old_subscription.uuid() + subscription_data.uuid() == old_subscription.data().uuid() }) .cloned() }; if let Some(old_subscription) = old_subscription { - info!("Subscription {} has been updated", subscription_data.uuid()); - mem_subscriptions.remove(old_subscription.version()); + info!("Subscription {} has been updated", subscription_data.name()); + mem_subscriptions.remove(old_subscription.data().uuid()); } else { - info!("Subscription {} has been created", subscription_data.uuid()); + info!("Subscription {} has been created", subscription_data.name()); } // Initialize the new subscription and add it to in-memory subscriptions let new_subscription = Arc::new(Subscription::try_from(subscription_data.clone())?); - mem_subscriptions.insert(version.to_owned(), new_subscription); + // mem_subscriptions is indexed on public version + mem_subscriptions.insert(*new_subscription.data().uuid(), new_subscription); } } } - debug!("Active subscriptions are: {:?}", active_subscriptions); + debug!( + "Active subscriptions internal versions are: {:?}", + active_subscriptions + ); // Make a list of subscriptions that need to be removed from in-memory subscriptions // These subscriptions have been disabled or deleted - let mut to_delete = HashSet::new(); - for version in mem_subscriptions.keys() { - if !active_subscriptions.contains(version) { - debug!("Mark {} as 'to delete'", version); - to_delete.insert(version.to_string()); + let mut to_delete: HashSet = HashSet::new(); + for subscription in mem_subscriptions.values() { + if !active_subscriptions.contains(&subscription.data().internal_version()) { + debug!( + "Mark subscription {} as 'to delete' (public version: {})", + subscription.data().name(), + subscription.public_version + ); + to_delete.insert(*subscription.data().uuid()); } } // Remove listed subscriptions - for version in to_delete { + for subscription_uuid in to_delete { info!( - "Remove subscription {} from in memory subscriptions", - version + "Remove subscription uuid {} from in memory subscriptions", + subscription_uuid ); - mem_subscriptions.remove(&version); + mem_subscriptions.remove(&subscription_uuid); } if mem_subscriptions.is_empty() { @@ -223,9 +225,11 @@ async fn reload_subscriptions( mem_subscriptions .iter() .map(|(_, subscription)| format!( - "\"{}\" ({})", + "\"{}\" (uuid:{}, internal_version:{}, public_version:{})", subscription.data.name(), - subscription.data.uuid() + subscription.data.uuid(), + subscription.data().internal_version(), + subscription.public_version, )) .join(", ") ); diff --git a/server/src/tls.rs b/server/src/tls.rs index 3cfde94..c91f06c 100644 --- a/server/src/tls.rs +++ b/server/src/tls.rs @@ -2,32 +2,38 @@ use anyhow::{bail, Context, Result}; use common::encoding::encode_utf16le; use hex::ToHex; use log::{debug, info}; -use rustls::server::AllowAnyAuthenticatedClient; -use rustls::{PrivateKey, RootCertStore, ServerConfig}; use sha1::{Digest, Sha1}; use std::fs; use std::io::BufReader; use std::sync::Arc; +use tokio_rustls::rustls::crypto::aws_lc_rs::{default_provider, ALL_CIPHER_SUITES}; +use tokio_rustls::rustls::pki_types::{CertificateDer, PrivateKeyDer}; +use tokio_rustls::rustls::server::WebPkiClientVerifier; +use tokio_rustls::rustls::{RootCertStore, ServerConfig, ALL_VERSIONS}; use x509_parser::oid_registry::OidRegistry; use x509_parser::prelude::{FromDer, X509Certificate}; use crate::sldc; /// Load certificates contained inside a PEM file -fn load_certs(filename: &str) -> Result> { +fn load_certs(filename: &str) -> Result>> { let certfile = fs::File::open(filename)?; let mut reader = BufReader::new(certfile); debug!("Loaded certificate {:?}", filename); - Ok(rustls_pemfile::certs(&mut reader)? - .iter() - .map(|v| rustls::Certificate(v.clone())) - .collect()) + let mut certs = Vec::new(); + for cert_res in rustls_pemfile::certs(&mut reader) { + match cert_res { + Ok(cert) => certs.push(cert.clone()), + Err(error) => return Err(anyhow::anyhow!(error)), + } + } + Ok(certs) } /// Load private key contained inside a file -fn load_priv_key(filename: &str) -> Result { +fn load_priv_key(filename: &str) -> Result> { let keyfile = fs::File::open(filename).context("Cannot open private key file")?; let mut reader = BufReader::new(keyfile); @@ -35,9 +41,9 @@ fn load_priv_key(filename: &str) -> Result { loop { match rustls_pemfile::read_one(&mut reader).context("Cannot parse private key file")? { - Some(rustls_pemfile::Item::RSAKey(key)) => return Ok(PrivateKey(key)), - Some(rustls_pemfile::Item::PKCS8Key(key)) => return Ok(PrivateKey(key)), - Some(rustls_pemfile::Item::ECKey(key)) => return Ok(PrivateKey(key)), + Some(rustls_pemfile::Item::Pkcs1Key(key)) => return Ok(PrivateKeyDer::Pkcs1(key)), + Some(rustls_pemfile::Item::Pkcs8Key(key)) => return Ok(PrivateKeyDer::Pkcs8(key)), + Some(rustls_pemfile::Item::Sec1Key(key)) => return Ok(PrivateKeyDer::Sec1(key)), None => break, _ => {} } @@ -81,23 +87,24 @@ pub fn make_config(args: &common::settings::Tls) -> Result { let mut client_auth_roots = RootCertStore::empty(); - // stock all certificates from given CA certificate file into certificate store + // Put all certificates from given CA certificate file into certificate store for root in ca_certs { client_auth_roots - .add(&root) + .add(root) .context("Could not add certificate to root of trust")?; } // create verifier : does not allow unauthenticated clients // and authenticated clients must be certified by one of the listed CAs - let client_cert_verifier = AllowAnyAuthenticatedClient::new(client_auth_roots).boxed(); + let client_cert_verifier = + WebPkiClientVerifier::builder(Arc::new(client_auth_roots)).build()?; + // Allow everything available in rustls for maximum support + let mut crypto_provider = default_provider(); + crypto_provider.cipher_suites = ALL_CIPHER_SUITES.to_vec(); // make config - let mut config: ServerConfig = ServerConfig::builder() - // Allow everything available in rustls for maximum support - .with_cipher_suites(rustls::ALL_CIPHER_SUITES) - .with_safe_default_kx_groups() - .with_protocol_versions(rustls::ALL_VERSIONS) + let mut config: ServerConfig = ServerConfig::builder_with_provider(Arc::new(crypto_provider)) + .with_protocol_versions(ALL_VERSIONS) .context("Could not build configuration defaults")? .with_client_cert_verifier(client_cert_verifier) // add verifier .with_single_cert(cert, priv_key) // add server vertification @@ -153,13 +160,17 @@ pub fn subject_from_cert(cert: &[u8]) -> Result { /// Read and decode request payload pub async fn get_request_payload( - parts: http::request::Parts, + parts: hyper::http::request::Parts, data: hyper::body::Bytes, ) -> Result>> { let payload = data.to_vec(); let message = match parts.headers.get("Content-Encoding") { - Some(value) if value == "SLDC" => sldc::decompress(&payload).unwrap_or(payload), + Some(value) if value == "SLDC" => { + // Decompression is a blocking operation which can take a few milliseconds + tokio::task::spawn_blocking(move || sldc::decompress(&payload).unwrap_or(payload)) + .await? + } None => payload, value => bail!("Unsupported Content-Encoding {:?}", value), }; @@ -168,8 +179,17 @@ pub async fn get_request_payload( } /// Encode payload for response -pub fn get_response_payload(payload: String) -> Result> { - encode_utf16le(payload).context("Failed to encode payload in utf16le") +pub async fn get_response_payload(payload: String) -> Result> { + // If the payload to encode is large, encoding takes time and should be run + // in a bocking task + if payload.len() > 1000 { + tokio::task::spawn_blocking(move || { + encode_utf16le(payload).context("Failed to encode payload in utf16le") + }) + .await? + } else { + encode_utf16le(payload).context("Failed to encode payload in utf16le") + } } #[cfg(test)] @@ -289,7 +309,7 @@ mod tests { 163, 248, 217, 35, 131, 227, 14, 217, 59, 94, 135, 246, 176, 220, 152, 199, 89, 21, 119, 24, 16, 188, 17, 202, 8, 12, 162, 16, 250, 163, 241, 88, ]; - assert_eq!(key.unwrap(), PrivateKey(content.to_vec())); + assert_eq!(key.unwrap(), PrivateKeyDer::Sec1(content.to_vec().into())); // rsa // let mut file = std::env::var("CARGO_MANIFEST_DIR").unwrap(); @@ -366,6 +386,6 @@ mod tests { 132, 223, 102, 86, 216, 5, 218, 125, 237, 212, 218, 133, 165, 97, 62, 73, 27, 106, 224, 64, ]; - assert_eq!(key.unwrap(), PrivateKey(content.to_vec())); + assert_eq!(key.unwrap(), PrivateKeyDer::Pkcs8(content.to_vec().into())); } }