diff --git a/.github/workflows/rust-test.yml b/.github/workflows/rust-test.yml new file mode 100644 index 0000000..ea497d5 --- /dev/null +++ b/.github/workflows/rust-test.yml @@ -0,0 +1,24 @@ +name: Rust + +on: [push] + +env: + CARGO_TERM_COLOR: always + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Install dependencies + run: | + sudo apt-get update && \ + sudo apt-get install -y --no-install-recommends \ + libkrb5-dev \ + libssl-dev + - name: Build + run: cargo build --verbose + - name: Run tests + run: cargo test --verbose -- --skip postgres diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..81cf465 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +/target +/.vscode diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..3f5de2f --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,2101 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "ahash" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +dependencies = [ + "getrandom", + "once_cell", + "version_check", +] + +[[package]] +name = "aho-corasick" +version = "0.7.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +dependencies = [ + "memchr", +] + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + +[[package]] +name = "anyhow" +version = "1.0.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" + +[[package]] +name = "async-trait" +version = "0.1.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d1d8ab452a3936018a687b20e6f7cf5363d713b732b8884001317b0e48aa3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi 0.1.19", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "bindgen" +version = "0.59.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "clap 2.34.0", + "env_logger", + "lazy_static", + "lazycell", + "log", + "peeking_take_while", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", + "which", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitreader" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d84ea71c85d1fe98fe67a9b9988b1695bc24c0b0d3bfb18d4c510f44b4b09941" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "block-buffer" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +dependencies = [ + "generic-array", +] + +[[package]] +name = "buf-read-ext" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e2c71c44e5bbc64de4ecfac946e05f9bba5cc296ea7bab4d3eda242a3ffa73c" + +[[package]] +name = "bumpalo" +version = "3.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" + +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + +[[package]] +name = "bytes" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" + +[[package]] +name = "cc" +version = "1.0.78" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" +dependencies = [ + "jobserver", +] + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +dependencies = [ + "iana-time-zone", + "js-sys", + "num-integer", + "num-traits", + "time", + "wasm-bindgen", + "winapi", +] + +[[package]] +name = "clang-sys" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "2.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +dependencies = [ + "ansi_term", + "atty", + "bitflags", + "strsim 0.8.0", + "textwrap", + "unicode-width", + "vec_map", +] + +[[package]] +name = "clap" +version = "4.0.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7db700bc935f9e43e88d00b0850dae18a63773cfbec6d8e070fccf7fef89a39" +dependencies = [ + "bitflags", + "clap_lex", + "is-terminal", + "once_cell", + "strsim 0.10.0", + "termcolor", +] + +[[package]] +name = "clap_lex" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8" +dependencies = [ + "os_str_bytes", +] + +[[package]] +name = "cli" +version = "0.1.0" +dependencies = [ + "anyhow", + "clap 4.0.32", + "common", + "env_logger", + "log", + "roxmltree", + "serde_json", + "tokio", +] + +[[package]] +name = "codespan-reporting" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +dependencies = [ + "termcolor", + "unicode-width", +] + +[[package]] +name = "common" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "chrono", + "deadpool-postgres", + "deadpool-sqlite", + "encoding_rs", + "log", + "openssl", + "postgres-openssl", + "rusqlite", + "serde", + "serde_json", + "serial_test", + "tempfile", + "tokio", + "tokio-postgres", + "toml", + "uuid", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" + +[[package]] +name = "cpufeatures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +dependencies = [ + "libc", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "cxx" +version = "1.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5add3fc1717409d029b20c5b6903fc0c0b02fa6741d820054f4a2efa5e5816fd" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c87959ba14bc6fbc61df77c3fcfe180fc32b93538c4f1031dd802ccb5f2ff0" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69a3e162fde4e594ed2b07d0f83c6c67b745e7f28ce58c6df5e6b6bef99dfb59" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e7e2adeb6a0d4a282e581096b06e1791532b7d576dcde5ccd9382acf55db8e6" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dashmap" +version = "5.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" +dependencies = [ + "cfg-if", + "hashbrown", + "lock_api", + "once_cell", + "parking_lot_core", +] + +[[package]] +name = "deadpool" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "421fe0f90f2ab22016f32a9881be5134fdd71c65298917084b0c7477cbc3856e" +dependencies = [ + "async-trait", + "deadpool-runtime", + "num_cpus", + "retain_mut", + "tokio", +] + +[[package]] +name = "deadpool-postgres" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "836a24a9d49deefe610b8b60c767a7412e9a931d79a89415cd2d2d71630ca8d7" +dependencies = [ + "deadpool", + "log", + "tokio", + "tokio-postgres", +] + +[[package]] +name = "deadpool-runtime" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaa37046cc0f6c3cc6090fbdbf73ef0b8ef4cfcc37f6befc0020f63e8cf121e1" +dependencies = [ + "tokio", +] + +[[package]] +name = "deadpool-sqlite" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e026821eaacbce25ff0d54405e4421d71656fcae3e4a9323461280fcda6dbc7d" +dependencies = [ + "deadpool", + "deadpool-sync", + "rusqlite", +] + +[[package]] +name = "deadpool-sync" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1bea344b64b32537fde6e0f0179b1ede34d435636719dd40fe6a0f28218a61c" +dependencies = [ + "deadpool", +] + +[[package]] +name = "digest" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + +[[package]] +name = "either" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" + +[[package]] +name = "encoding_rs" +version = "0.8.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "env_logger" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" +dependencies = [ + "atty", + "humantime", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "errno" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +dependencies = [ + "errno-dragonfly", + "libc", + "winapi", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + +[[package]] +name = "fastrand" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +dependencies = [ + "instant", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "futures" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "531ac96c6ff5fd7c62263c5e3c67a603af4fcaee2e1a0ae5565ba3a11e69e549" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "164713a5a0dcc3e7b4b1ed7d3b433cabc18025386f9339346e8daf15963cf7ac" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86d7a0c1aa76363dac491de0ee99faf6941128376f1cf96f07db7603b7de69dd" + +[[package]] +name = "futures-executor" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1997dd9df74cdac935c76252744c1ed5794fac083242ea4fe77ef3ed60ba0f83" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89d422fa3cbe3b40dca574ab087abb5bc98258ea57eea3fd6f1fa7162c778b91" + +[[package]] +name = "futures-macro" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3eb14ed937631bd8b8b8977f2c198443447a8355b6e3ca599f38c975e5a963b6" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec93083a4aecafb2a80a885c9de1f0ccae9dbd32c2bb54b0c3a65690e0b8d2f2" + +[[package]] +name = "futures-task" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd65540d33b37b16542a0438c12e6aeead10d4ac5d05bd3f805b8f35ab592879" + +[[package]] +name = "futures-util" +version = "0.3.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ef6b17e481503ec85211fed8f39d1970f128935ca1f814cd32ac4a6842e84ab" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", +] + +[[package]] +name = "glob" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" + +[[package]] +name = "h2" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashlink" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" +dependencies = [ + "hashbrown", +] + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "hermit-abi" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" +dependencies = [ + "libc", +] + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + +[[package]] +name = "http" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +dependencies = [ + "bytes", + "http", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + +[[package]] +name = "hyper" +version = "0.14.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "winapi", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +dependencies = [ + "cxx", + "cxx-build", +] + +[[package]] +name = "indexmap" +version = "1.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +dependencies = [ + "autocfg", + "hashbrown", +] + +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "io-lifetimes" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46112a93252b123d31a119a8d1a1ac19deac4fac6e0e8b0df58f0d4e5870e63c" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "is-terminal" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dfb6c8100ccc63462345b67d1bbc3679177c75ee4bf59bf29c8b1d110b8189" +dependencies = [ + "hermit-abi 0.2.6", + "io-lifetimes", + "rustix", + "windows-sys", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" + +[[package]] +name = "jobserver" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +dependencies = [ + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "libc" +version = "0.2.139" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" + +[[package]] +name = "libgssapi" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42971038bb1f78bc88dd817b3aafa509be3d1b71e2042f93246654235e3592f5" +dependencies = [ + "bitflags", + "bytes", + "lazy_static", + "libgssapi-sys", +] + +[[package]] +name = "libgssapi-sys" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dd7d65e409c889f6c9d81ff079371d0d8fd88d7dca702ff187ef96fb0450fb7" +dependencies = [ + "bindgen", +] + +[[package]] +name = "libloading" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +dependencies = [ + "cfg-if", + "winapi", +] + +[[package]] +name = "libsqlite3-sys" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libz-sys" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "link-cplusplus" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" +dependencies = [ + "cc", +] + +[[package]] +name = "linux-raw-sys" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" + +[[package]] +name = "lock_api" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "lz4-sys" +version = "1.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "md-5" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" +dependencies = [ + "digest", +] + +[[package]] +name = "memchr" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" + +[[package]] +name = "mime" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "mio" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +dependencies = [ + "libc", + "log", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys", +] + +[[package]] +name = "nom" +version = "7.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5507769c4919c998e69e49c839d9dc6e693ede4cc4290d6ad8b41d4f09c548c" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +dependencies = [ + "hermit-abi 0.2.6", + "libc", +] + +[[package]] +name = "num_enum" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf5395665662ef45796a4ff5486c5d41d29e0c09640af4c5f17fd94ee2c119c9" +dependencies = [ + "num_enum_derive", +] + +[[package]] +name = "num_enum_derive" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0498641e53dd6ac1a4f22547548caa6864cc4933784319cd1775271c5a46ce" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "once_cell" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f61fba1741ea2b3d6a1e3178721804bb716a68a6aeba1149b5d52e3d464ea66" + +[[package]] +name = "openssl" +version = "0.10.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b102428fd03bc5edf97f62620f7298614c45cedf287c271e7ed450bbaf83f2e1" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "openssl-sys" +version = "0.9.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23bbbf7854cd45b83958ebe919f0e8e516793727652e27fda10a8384cfc790b7" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "os_str_bytes" +version = "6.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" + +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-sys", +] + +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + +[[package]] +name = "percent-encoding" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" + +[[package]] +name = "phf" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "928c6535de93548188ef63bb7c4036bd415cd8f36ad25af44b9789b2ee72a48c" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_shared" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1fb5f6f826b772a8d4c0394209441e7d37cbbb967ae9c7e0e8134365c9ee676" +dependencies = [ + "siphasher", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" + +[[package]] +name = "postgres-openssl" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1de0ea6504e07ca78355a6fb88ad0f36cafe9e696cbc6717f16a207f3a60be72" +dependencies = [ + "futures", + "openssl", + "tokio", + "tokio-openssl", + "tokio-postgres", +] + +[[package]] +name = "postgres-protocol" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "878c6cbf956e03af9aa8204b407b9cbf47c072164800aa918c516cd4b056c50c" +dependencies = [ + "base64", + "byteorder", + "bytes", + "fallible-iterator", + "hmac", + "md-5", + "memchr", + "rand", + "sha2", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73d946ec7d256b04dfadc4e6a3292324e6f417124750fc5c0950f981b703a0f1" +dependencies = [ + "bytes", + "fallible-iterator", + "postgres-protocol", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "proc-macro-crate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" +dependencies = [ + "once_cell", + "thiserror", + "toml", +] + +[[package]] +name = "proc-macro2" +version = "1.0.49" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quick-xml" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58e21a144a0ffb5fad7b464babcdab934a325ad69b7c0373bcfef5cbd9799ca9" +dependencies = [ + "memchr", +] + +[[package]] +name = "quote" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rdkafka" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1de127f294f2dba488ed46760b129d5ecbeabbd337ccbf3739cb29d50db2161c" +dependencies = [ + "futures", + "libc", + "log", + "rdkafka-sys", + "serde", + "serde_derive", + "serde_json", + "slab", + "tokio", +] + +[[package]] +name = "rdkafka-sys" +version = "4.3.0+1.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d222a401698c7f2010e3967353eae566d9934dcda49c29910da922414ab4e3f4" +dependencies = [ + "libc", + "libz-sys", + "lz4-sys", + "num_enum", + "pkg-config", + "zstd-sys", +] + +[[package]] +name = "redox_syscall" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.6.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "retain_mut" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4389f1d5789befaf6029ebd9f7dac4af7f7e3d61b69d4f30e2ac02b57e7712b0" + +[[package]] +name = "roxmltree" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b9de9831a129b122e7e61f242db509fa9d0838008bf0b29bb0624669edfe48a" +dependencies = [ + "xmlparser", +] + +[[package]] +name = "rusqlite" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" +dependencies = [ + "bitflags", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", +] + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustix" +version = "0.36.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4feacf7db682c6c329c4ede12649cd36ecab0f3be5b7d74e6a20304725db4549" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "ryu" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "scratch" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" + +[[package]] +name = "serde" +version = "1.0.152" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.152" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.91" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serial_test" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c789ec87f4687d022a2405cf46e0cd6284889f1839de292cadeb6c6019506f2" +dependencies = [ + "dashmap", + "futures", + "lazy_static", + "log", + "parking_lot", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b64f9e531ce97c88b4778aad0ceee079216071cffec6ac9b904277f8f92e7fe3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "server" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "base64", + "bitreader", + "buf-read-ext", + "chrono", + "clap 4.0.32", + "common", + "env_logger", + "futures", + "futures-util", + "hex", + "http", + "httparse", + "hyper", + "itertools", + "itoa", + "lazy_static", + "libgssapi", + "log", + "mime", + "quick-xml", + "rdkafka", + "regex", + "roxmltree", + "serde", + "serde_json", + "tokio", + "uuid", + "xmlparser", +] + +[[package]] +name = "sha2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "shlex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" + +[[package]] +name = "signal-hook-registry" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +dependencies = [ + "libc", +] + +[[package]] +name = "siphasher" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" + +[[package]] +name = "slab" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" + +[[package]] +name = "socket2" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "stringprep" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "strsim" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "subtle" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" + +[[package]] +name = "syn" +version = "1.0.107" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +dependencies = [ + "cfg-if", + "fastrand", + "libc", + "redox_syscall", + "remove_dir_all", + "winapi", +] + +[[package]] +name = "termcolor" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "thiserror" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "time" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" +dependencies = [ + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi", +] + +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" + +[[package]] +name = "tokio" +version = "1.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eab6d665857cc6ca78d6e80303a02cea7a7851e85dfbd77cbdc09bd129f1ef46" +dependencies = [ + "autocfg", + "bytes", + "libc", + "memchr", + "mio", + "num_cpus", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys", +] + +[[package]] +name = "tokio-macros" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-openssl" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08f9ffb7809f1b20c1b398d92acf4cc719874b3b2b2d9ea2f09b4a80350878a" +dependencies = [ + "futures-util", + "openssl", + "openssl-sys", + "tokio", +] + +[[package]] +name = "tokio-postgres" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29a12c1b3e0704ae7dfc25562629798b29c72e6b1d0a681b6f29ab4ae5e7f7bf" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator", + "futures-channel", + "futures-util", + "log", + "parking_lot", + "percent-encoding", + "phf", + "pin-project-lite", + "postgres-protocol", + "postgres-types", + "socket2", + "tokio", + "tokio-util", +] + +[[package]] +name = "tokio-util" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", +] + +[[package]] +name = "toml" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f" +dependencies = [ + "serde", +] + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tracing" +version = "0.1.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +dependencies = [ + "cfg-if", + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +dependencies = [ + "once_cell", +] + +[[package]] +name = "try-lock" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" + +[[package]] +name = "typenum" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" + +[[package]] +name = "unicode-bidi" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" + +[[package]] +name = "unicode-ident" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" + +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-width" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" + +[[package]] +name = "uuid" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" +dependencies = [ + "getrandom", + "rand", +] + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "vec_map" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "want" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +dependencies = [ + "log", + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" + +[[package]] +name = "which" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" +dependencies = [ + "either", + "libc", + "once_cell", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" + +[[package]] +name = "xmlparser" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d25c75bf9ea12c4040a97f829154768bbbce366287e2dc044af160cd79a13fd" + +[[package]] +name = "zstd-sys" +version = "2.0.7+zstd.1.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94509c3ba2fe55294d752b79842c530ccfab760192521df74a081a78d2b3c7f5" +dependencies = [ + "cc", + "libc", + "pkg-config", +] diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..d36708b --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,7 @@ +[workspace] + +members = [ + "common", + "server", + "cli" +] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..f288702 --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/README.md b/README.md new file mode 100644 index 0000000..22c1d23 --- /dev/null +++ b/README.md @@ -0,0 +1,40 @@ +# OpenWEC + +OpenWEC is a free and open source (GPLv3) implementation of a Windows Event Collector server running on GNU/Linux and written in Rust. + +OpenWEC collects Windows event logs from a Linux machine without the need for a third-party local agent running on Windows machines. + +OpenWEC implements the Windows Event Forwarding protocol ([MS-WSMV](https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-WSMV/%5BMS-WSMV%5D.pdf)), which is derived from WS-Management ([DSP0226](https://www.dmtf.org/sites/default/files/standards/documents/DSP0226_1.0.0.pdf)). The same protocol is used by the built-in Windows Event Forwarding plugin. As it speaks the same protocol, OpenWEC can be used with the built-in Windows Event Forwarding plugin. Only the source-initiated mode (Push) is supported for now. + +OpenWEC is composed of two binaries: +- `openwecd`: OpenWEC server +- `openwec`: OpenWEC CLI, used to manage the OpenWEC server + +The OpenWEC configuration is read from a file (by default `/etc/openwec.conf.toml`). See available parameters in [openwec.conf.sample.toml](openwec.conf.sample.toml). +Subscriptions and their parameters are stored in a [database](doc/database.md) and can be managed using `openwec` (see [CLI](doc/cli.md) documentation). + +# Documentation + +- [Getting started](doc/getting_started.md) +- [Command Line Interface](doc/cli.md) +- [Database](doc/database.md) +- [Subscription query](doc/query.md) +- [Outputs](doc/outputs.md) +- [Output formats](doc/formats.md) +- [How does OpenWEC works ?](doc/how_it_works.md) +- [WEF protocol analysis](doc/protocol.md) +- [Monitoring](doc/monitoring.md) +- [Known issues](doc/issues.md) +- [Talk at SSTIC 2023 (in french)](https://www.sstic.org/2023/presentation/openwec/) + +# Contributing + +Any contribution is welcome, be it code, bug report, packaging, documentation or translation. + +# License + +OpenWEC is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. + +OpenWEC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. + +You should have received a copy of the GNU General Public License along with OpenWEC. If not, see the gnu.org web site. diff --git a/cli/Cargo.toml b/cli/Cargo.toml new file mode 100644 index 0000000..61192cc --- /dev/null +++ b/cli/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "cli" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "openwec" +path = "src/main.rs" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = "1.0.59" +clap = { version = "4.0.14", features = ["cargo"] } +common = { path = "../common" } +env_logger = "0.9.1" +log = "0.4.17" +serde_json = "1.0.87" +tokio = { version = "1.22.0", features = ["full"] } +roxmltree = "0.15.0" \ No newline at end of file diff --git a/cli/src/bookmarks.rs b/cli/src/bookmarks.rs new file mode 100644 index 0000000..d0311b1 --- /dev/null +++ b/cli/src/bookmarks.rs @@ -0,0 +1,172 @@ +use anyhow::{anyhow, bail, Result}; +use clap::ArgMatches; +use common::database::Db; + +use crate::utils; + +pub async fn run(db: &Db, matches: &ArgMatches) -> Result<()> { + match matches.subcommand() { + Some(("show", matches)) => { + show(db, matches).await?; + } + Some(("delete", matches)) => { + delete(db, matches).await?; + } + Some(("copy", matches)) => { + copy(db, matches).await?; + } + _ => { + bail!("Invalid subcommand") + } + }; + Ok(()) +} + +async fn show(db: &Db, matches: &ArgMatches) -> Result<()> { + let machine = matches.get_one::("machine").cloned(); + let subscription_identifier = matches + .get_one::("subscription") + .expect("Required by clap") + .to_string(); + + let subscription = utils::find_subscription(db, &subscription_identifier) + .await? + .ok_or_else(|| { + anyhow!( + "Could not find subscription with identifier {}", + subscription_identifier + ) + })?; + + if let Some(machine) = machine { + let bookmark = db.get_bookmark(&machine, subscription.uuid()).await?; + match bookmark { + Some(str) => { + println!("{}", str) + } + None => bail!( + "No bookmarks found for {} within subscription \"{}\"", + machine, + subscription.name() + ), + }; + } else { + for data in db.get_bookmarks(subscription.uuid()).await? { + println!("{}:{}", data.machine, data.bookmark); + } + } + + Ok(()) +} + +async fn delete(db: &Db, matches: &ArgMatches) -> Result<()> { + let machine = matches.get_one::("machine").cloned(); + let subscription_identifier = matches.get_one::("subscription").cloned(); + + let subscription = match subscription_identifier { + Some(identifier) => Some( + utils::find_subscription(db, &identifier) + .await? + .ok_or_else(|| { + anyhow!("Could not find subscription with identifier {}", identifier) + })?, + ), + None => None, + }; + + let message = match (&machine, &subscription) { + (Some(machine), Some(subscription)) => format!("You are about to delete the bookmark of {} within subscription \"{}\".\nWARNING: You may lose logs!\nAre you sure?", machine, subscription.name()), + (Some(machine), None) => format!("You are about to delete all bookmarks stored for {}.\nWARNING: You may lose logs!\nAre you sure?", machine), + (None, Some(subscription)) => format!("You are about to delete all bookmarks stored for subscription \"{}\".\nWARNING: You may lose logs!\nAre you sure?", subscription.name()), + (None, None) => "You are about to delete all stored bookmarks.\nWARNING: You may lose logs!\nAre you sure?".to_string(), + }; + + if utils::confirm(&message) { + db.delete_bookmarks(machine.as_deref(), subscription.as_ref().map(|x| x.uuid())) + .await?; + println!("Done"); + } else { + println!("Aborted"); + } + + Ok(()) +} + +async fn copy(db: &Db, matches: &ArgMatches) -> Result<()> { + let machine = matches.get_one::("machine").cloned(); + let source_id = matches + .get_one::("source") + .expect("Required by clap") + .to_string(); + let dest_id = matches + .get_one::("destination") + .expect("Required by clap") + .to_string(); + + if source_id == dest_id { + bail!("Source and destination are equal"); + } + + let source = utils::find_subscription(db, &source_id) + .await? + .ok_or_else(|| { + anyhow!( + "Could not find source subscription with identifier \"{}\"", + source_id + ) + })?; + let destination = utils::find_subscription(db, &dest_id) + .await? + .ok_or_else(|| { + anyhow!( + "Could not find destination subscription with identifier \"{}\"", + dest_id + ) + })?; + + if let Some(machine) = machine { + let existing_bookmark = db.get_bookmark(&machine, destination.uuid()).await?; + if existing_bookmark.is_some() { + println!( + "WARNING: A bookmark for {} already exists within subscription \"{}\"", + machine, + destination.name() + ) + } + + let bookmark = db + .get_bookmark(&machine, source.uuid()) + .await? + .ok_or_else(|| { + anyhow!( + "Could not find bookmark for {} in \"{}\"", + machine, + source.name() + ) + })?; + + if !utils::confirm(format!("You are about to copy bookmark for {} in \"{}\" to \"{}\".\nWARNING: You may lose logs!\nAre you sure?", machine, source.name(), destination.name()).as_str()) { + println!("Aborted"); + return Ok(()); + } + + db.store_bookmark(&machine, destination.uuid(), &bookmark) + .await?; + println!("1 bookmark copied"); + } else { + if !utils::confirm(format!("You are about to copy all bookmarks of subscription \"{}\" to \"{}\".\nWARNING: You may lose logs!\nAre you sure?", source.name(), destination.name()).as_str()) { + println!("Aborted"); + return Ok(()) + }; + + let mut counter: usize = 0; + for data in db.get_bookmarks(source.uuid()).await? { + db.store_bookmark(&data.machine, destination.uuid(), &data.bookmark) + .await?; + counter += 1; + } + println!("{} bookmarks copied", counter); + }; + + Ok(()) +} diff --git a/cli/src/db.rs b/cli/src/db.rs new file mode 100644 index 0000000..406e8fc --- /dev/null +++ b/cli/src/db.rs @@ -0,0 +1,153 @@ +use anyhow::{Context, Result}; +use clap::ArgMatches; +use common::database::{ + schema::{Migrator, Version}, + Db, +}; + +use crate::utils::confirm; + +enum Direction { + Up, + Down, +} +pub async fn run(db: &Db, matches: &ArgMatches) -> Result<()> { + match matches.subcommand() { + Some(("init", _matches)) => { + init(db).await?; + } + Some(("upgrade", matches)) => { + upgrade(db, matches).await?; + } + Some(("downgrade", matches)) => { + downgrade(db, matches).await?; + } + _ => { + report(db, Direction::Up, None).await?; + } + } + + Ok(()) +} + +async fn init(db: &Db) -> Result<()> { + db.setup_schema().await.context("Failed to setup schema")?; + let migrator = Migrator::new(db.clone()); + migrator + .up(None, false) + .await + .context("Failed to apply migrations")?; + Ok(()) +} + +async fn upgrade(db: &Db, matches: &ArgMatches) -> Result<()> { + let to = matches.get_one::("to").copied(); + let work_to_do = report(db, Direction::Up, to) + .await + .context("Failed to print migrations report")?; + if !work_to_do { + return Ok(()); + } + let migrator = Migrator::new(db.clone()); + if confirm("Are you sure that you want to apply these migrations?") { + migrator + .up(to, false) + .await + .context("Failed to apply migrations")?; + } + Ok(()) +} + +async fn downgrade(db: &Db, matches: &ArgMatches) -> Result<()> { + let to = if let Some(version) = matches.get_one::("to") { + Some(*version) + } else { + let migrations = db.migrations().await; + let migrated_versions = db + .migrated_versions() + .await + .context("Failed to retrieve applied migrations")?; + let mut migrations = migrations + .iter() + // Rollback migrations from latest to oldest: + .rev() + // Rollback only the migrations that are actually already migrated (in the case that + // some intermediary migrations were never executed). + .filter(|&(v, _)| migrated_versions.contains(v)) + .skip(1); + // Exclude last migration + migrations.next().map(|(version, _)| version).copied() + }; + + let work_to_do = report(db, Direction::Down, to) + .await + .context("Failed to print migrations report")?; + if !work_to_do { + return Ok(()); + } + + let migrator = Migrator::new(db.clone()); + if confirm("Are you sure that you want to downgrade these migrations?") { + migrator + .down(to, false) + .await + .context("Failed to remove migrations")?; + } + Ok(()) +} + +async fn report(db: &Db, direction: Direction, to: Option) -> Result { + let migrations = db.migrations().await; + println!("Knowned migrations:"); + for (version, migration) in migrations.iter() { + println!("{}: {}", version, migration.description()); + } + + let migrated_versions = db + .migrated_versions() + .await + .context("Failed to retrieve applied migrations")?; + if migrated_versions.is_empty() { + println!("No migrations already applied"); + } else { + println!("Applied migrations:"); + for version in migrated_versions { + println!( + "{}: {}", + version, + migrations + .get(&version) + .map(|migration| migration.description()) + .unwrap_or_else(|| "unknown".to_string()) + ); + } + } + + let migrator = Migrator::new(db.clone()); + + let changes = match direction { + Direction::Up => migrator.up(to, true).await, + Direction::Down => migrator.down(to, true).await, + } + .context("Failed to simulate migrations")?; + if changes.is_empty() { + println!("Nothing to do"); + return Ok(false); + } else { + match direction { + Direction::Up => println!("Migrations to apply:"), + Direction::Down => println!("Migrations to remove:"), + }; + for version in changes { + println!( + "{}: {}", + version, + migrations + .get(&version) + .map(|migration| migration.description()) + .unwrap_or_else(|| "unknown".to_string()) + ); + } + } + Ok(true) +} diff --git a/cli/src/heartbeats.rs b/cli/src/heartbeats.rs new file mode 100644 index 0000000..fb43f0b --- /dev/null +++ b/cli/src/heartbeats.rs @@ -0,0 +1,80 @@ +use anyhow::{anyhow, Context, Result}; +use clap::ArgMatches; +use common::{ + database::Db, heartbeat::HeartbeatData, subscription::SubscriptionData, + utils::timestamp_to_local_date, +}; + +use crate::utils; + +async fn find_subscription(db: &Db, matches: &ArgMatches) -> Result> { + if let Some(identifier) = matches.get_one::("subscription") { + Ok(Some( + utils::find_subscription(db, identifier) + .await + .with_context(|| { + format!("Failed to find subscription with identifier {}", identifier) + })? + .ok_or_else(|| { + anyhow!("Subscription {} could not be found in database", identifier) + })?, + )) + } else { + Ok(None) + } +} + +pub async fn run(db: &Db, matches: &ArgMatches) -> Result<()> { + let subscription = find_subscription(db, matches).await?; + let subscription_uuid = subscription.map(|sub| sub.uuid().to_owned()); + + let heartbeats = if let Some(machine) = matches.get_one::("machine") { + db.get_heartbeats_by_machine(machine, subscription_uuid.as_deref()) + .await? + } else if let Some(address) = matches.get_one::("address") { + db.get_heartbeats_by_ip(address, subscription_uuid.as_deref()) + .await? + } else if let Some(uuid) = subscription_uuid { + db.get_heartbeats_by_subscription(&uuid).await? + } else { + db.get_heartbeats().await? + }; + + match matches.get_one::("format") { + Some(fmt) if fmt == "text" => format_text(&heartbeats)?, + Some(fmt) if fmt == "json" => format_json(&heartbeats)?, + x => eprintln!("Invalid format {:?}", x), + } + Ok(()) +} + +fn format_text(heartbeats: &Vec) -> Result<()> { + for heartbeat in heartbeats { + let first_seen = timestamp_to_local_date(heartbeat.first_seen())?; + let last_seen = timestamp_to_local_date(heartbeat.last_seen())?; + let last_event_seen_sentence = if let Some(last_event_seen) = heartbeat.last_event_seen() { + format!( + "Last event received on {}", + timestamp_to_local_date(last_event_seen)?.to_rfc3339() + ) + } else { + "No events have ever been received.".to_string() + }; + println!( + "For subscription \"{}\" ({}), {} ({}) last heartbeat was sent on {}. First seen on {}. {}", + heartbeat.subscription().name(), + heartbeat.subscription().uuid(), + heartbeat.machine(), + heartbeat.ip(), + last_seen.to_rfc3339(), + first_seen.to_rfc3339(), + last_event_seen_sentence + ); + } + Ok(()) +} + +fn format_json(heartbeats: &Vec) -> Result<()> { + println!("{}", serde_json::to_string(heartbeats)?); + Ok(()) +} diff --git a/cli/src/lib.rs b/cli/src/lib.rs new file mode 100644 index 0000000..60dcfea --- /dev/null +++ b/cli/src/lib.rs @@ -0,0 +1,47 @@ +use clap::builder::StyledStr; +use common::database::schema_is_up_to_date; +use common::{database::db_from_settings, settings::Settings}; + +use clap::ArgMatches; + +use anyhow::{anyhow, bail, Context, Result}; + +mod bookmarks; +mod db; +mod heartbeats; +mod stats; +mod subscriptions; +mod utils; + +pub async fn run(matches: ArgMatches, help_str: StyledStr) -> Result<()> { + let settings = Settings::new(matches.get_one::("config")) + .map_err(|e| anyhow!("Failed to retrieve configuration: {}", e))?; + let db = db_from_settings(&settings) + .await + .context("Failed to retrieve a Database instance")?; + + if let Some(matches) = matches.subcommand_matches("db") { + db::run(&db, matches).await?; + return Ok(()); + } + + // Check that database schema is up to date + match schema_is_up_to_date(db.clone()).await.context("Failed to check schema version") { + Ok(true) => (), + Ok(false) => bail!("Schema needs to be updated. Please check migration guide and then run `openwec db upgrade`"), + Err(err) => bail!("{:?}.\nHelp: You may need to run `openwec db init` to setup your database.", err), + }; + + if let Some(matches) = matches.subcommand_matches("subscriptions") { + subscriptions::run(&db, matches).await?; + } else if let Some(matches) = matches.subcommand_matches("heartbeats") { + heartbeats::run(&db, matches).await?; + } else if let Some(matches) = matches.subcommand_matches("stats") { + stats::run(&db, matches).await?; + } else if let Some(matches) = matches.subcommand_matches("bookmarks") { + bookmarks::run(&db, matches).await?; + } else { + println!("{}", help_str); + } + Ok(()) +} diff --git a/cli/src/main.rs b/cli/src/main.rs new file mode 100644 index 0000000..ca0f332 --- /dev/null +++ b/cli/src/main.rs @@ -0,0 +1,322 @@ +use std::env; + +use common::{database::schema::Version, settings::DEFAULT_CONFIG_FILE}; + +use clap::{arg, command, value_parser, Arg, ArgAction, ArgGroup, Command}; + +#[tokio::main] +async fn main() { + let mut command = command!() // requires `cargo` feature + .name("openwec") + .arg( + arg!( + -c --config "Sets a custom config file" + ) + .default_value(DEFAULT_CONFIG_FILE) + .required(false) + .global(true), + ) + .arg(arg!(-v --verbosity ... "Sets the level of verbosity").global(true)) + .subcommand( + Command::new("subscriptions") + .about("deals with openwec subscriptions. Without arguments, it lists the current subscriptions.") + .arg(arg!(-e --enabled "Only show enabled subscriptions")) + .arg(arg!(-d --disabled "Only show disabled subscriptions")) + .group(ArgGroup::new("subscription_list_status").args(["enabled", "disabled"]).required(false)) + .subcommand( + Command::new("new") + .about("Creates a new subscription. The newly created subscription will have to be enabled afterward.") + .arg(arg!( "Name of the subscription")) + .arg(arg!( "File containing the query (XML format)")) + .arg( + arg!(-u --uri "URI linked to this subscription. \ + The subscription will only be presented to hosts using this URI to retrieve their subscriptions. \ + Don't set this flag to present the subscription regardless of the URI (default). \ + Example: /my/custom/uri.") + ) + .arg( + arg!(--"heartbeat-interval" "Heartbeat interval") + .value_parser(value_parser!(u32)) + ) + .arg( + arg!(--"connection-retry-count" "Connection retry count") + .value_parser(value_parser!(u16)) + ) + .arg( + arg!(--"connection-retry-interval" "Connection retry interval") + .value_parser(value_parser!(u32)) + ) + .arg( + arg!(--"max-time" "Max time") // TODO: improve help + .value_parser(value_parser!(u32)) + ) + .arg( + arg!(--"max-envelope-size" "Max envelope size") // TODO: improve help + .value_parser(value_parser!(u32)) + ) + .arg( + arg!(--"read-existing-events" "Subscription retrieves already existing events in addition to new ones") + ) + ) + .subcommand( + Command::new("edit") + .about("Edit an existing subscription") + .arg(arg!( "Name or UUID of the subscription")) + .subcommand( + Command::new("outputs") + .about("Manage the outputs of a subscription") + .subcommand( + Command::new("add") + .about("Add a new output for this subscription") + .arg(arg!(-f --format "Output format").value_parser(["json", "raw"]).required(true)) + .subcommand( + Command::new("tcp") + .about("TCP output") + .arg(arg!( "Remote IP address")) + .arg(arg!( "TCP port").value_parser(value_parser!(u16))) + ) + .subcommand( + Command::new("kafka") + .about("Kafka output") + .arg(arg!( "Kafka topic")) + .arg( + Arg::new("options") + .short('o') + .long("option") + .num_args(2) + .value_names(["KEY", "VALUE"]) + .help("Kafka configuration (bootstrap.servers for example)") + .action(clap::ArgAction::Append)) + ) + .subcommand( + Command::new("files") + .about("Configures a File output. Path template is ///[/], where is the string representation of the IP addr of the machine and its Kerberos principal. You may split the IP Address to make a hierarchical tree structure. is optional, and refers to the eponym server configuration.") + .arg(arg!( "Base path")) + .arg( + arg!(--"split-on-addr-index" "If specified, splits the IP address on the n-th segment. For example, with an IPv4 addr a.b.c.d, using --split-on-addr-index 1 will result in being \"a/a.b/a.b.c/a.b.c.d\"") + .value_parser(value_parser!(u8)) + ) + .arg( + arg!(--"append-node-name" "Append the configured node name at the end of the generated path (parent dir of )") + ) + .arg(arg!(--filename "Name of the file where logs will be written.").default_value("messages")) + ) + ) + .subcommand( + Command::new("delete") + .about("Deletes an output of this subscription") + .arg(arg!(-y --yes "Do not prompt for confirmation")) + .arg(arg!( "Index of the output to delete").value_parser(value_parser!(usize))) + ) + .subcommand( + Command::new("enable") + .about("Enables an output of this subscription") + .arg(arg!( "Index of the output to enable").value_parser(value_parser!(usize))) + ) + .subcommand( + Command::new("disable") + .about("Disables an output of this subscription") + .arg(arg!(-y --yes "Do not prompt for confirmation")) + .arg(arg!( "Index of the output to disable").value_parser(value_parser!(usize))) + ) + ) + .arg(arg!(-q --query "File containing the query (XML format)")) + .arg(arg!(-r --rename "Rename the subscription")) + .arg( + arg!(-u --uri [URI] "URI linked to this subscription. \ + The subscription will only be presented to hosts using this URI to retrieve their subscriptions. \ + Set this flag without value to present the subscription regardless of the URI (default). \ + Example: /my/custom/uri.") + ) + .arg( + arg!(--"heartbeat-interval" "Heartbeat interval") + .value_parser(value_parser!(u32)) + ) + .arg( + arg!(--"connection-retry-count" "Connection retry count") + .value_parser(value_parser!(u16)) + ) + .arg( + arg!(--"connection-retry-interval" "Connection retry interval") + .value_parser(value_parser!(u32)) + ) + .arg( + arg!(--"max-time" "Max time") // TODO: improve help + .value_parser(value_parser!(u32)) + ) + .arg( + arg!(--"max-envelope-size" "Max envelope size") // TODO: improve help + .value_parser(value_parser!(u32)) + ) + .arg( + arg!(--"enable" "Enable the subscription") + ) + .arg( + arg!(--"disable" "Disable the subscription") + ) + .group(ArgGroup::new("subscription_status").args(["enable", "disable"]).required(false)) + ) + .subcommand( + Command::new("show") + .about("Show an existing subscription") + .arg(arg!( "Name or UUID of the subscription")) + ) + .subcommand( + Command::new("duplicate") + .about("Duplicate an existing subscription. The newly created subscription will be disabled by default.") + .arg(arg!( "Name or UUID of the subscription to copy")) + .arg(arg!( "Name of the newly created subcription")) + ) + .subcommand( + Command::new("export") + .about("Export existing subscriptions in a json file") + .arg(arg!(-s --subscription "Name or UUID of a subscription if you want to export only one of them")) + ) + .subcommand( + Command::new("import") + .about("Import subscriptions from a json file") + .arg(arg!( + -f --format "Format of the file. `openwec` format is generated using `openwec export`. \ + `windows` format comes from an export of a Windows Event Collector subscription." + ).value_parser(["openwec", "windows"]).default_value("openwec")) + .arg(arg!( + "file to import" + )) + ) + .subcommand( + Command::new("delete") + .about("Delete an existing subscription") + .arg(arg!(-y --yes "Do not prompt for confirmation")) + .arg(arg!( "Name or UUID of the subscription")) + ) + .subcommand( + Command::new("machines") + .about("Show subscribing machines. Defaults to all machines ever seen.") + .arg(arg!( "Name or UUID of the subscription")) + .arg(arg!(-a --active "Only show active machines")) + .arg(arg!(-l --alive "Only show machines that are alive but not active")) + .arg(arg!(-d --dead "Only show dead machines principal")) + .arg( + arg!(-i --interval "Duration after which a machine is considered alive if no events are received or dead if no heartbeats are received. \ + Defaults to heartbeat-interval") + .value_parser(value_parser!(u32)) + ) + .group(ArgGroup::new("subscription_machines_state").args(["active", "alive", "dead"]).required(false)) + ) + .subcommand( + Command::new("enable") + .about("Enable one or more subscriptions") + .arg(arg!(-a --all "Enable all subscriptions")) + .arg(arg!( ... "Name or UUID of subscriptions to enable").action(ArgAction::Append).required(false)) + ) + .subcommand( + Command::new("disable") + .about("Disable one or more subscriptions") + .arg(arg!(-a --all "Disable all subscriptions")) + .arg(arg!( ... "Name or UUID of subscriptions to disable").action(ArgAction::Append).required(false)) + ) + .subcommand( + Command::new("reload") + .about("Force openwec server to reload subscriptions outputs and clients to establish a new connection.") + .arg(arg!(-a --all "Reload all subscriptions")) + .arg(arg!( ... "Name or UUID of subscriptions").action(ArgAction::Append).required(false)) + ) + ) + .subcommand( + Command::new("heartbeats") + .about("Retrieve machine heartbeats") + .arg(arg!(-s --subscription "Name or UUID of a subscription")) + .arg(arg!(-m --machine "Name of a specific machine")) + .arg(arg!(-a --address "IP Address of a specific machine")) + .group(ArgGroup::new("host") + .args(["machine", "address"]) + .required(false)) + .arg(arg!(-f --format "Output format").value_parser(["text", "json"]).default_value("text")) + ) + .subcommand( + Command::new("bookmarks") + .about("Manipulate bookmarks") + .subcommand( + Command::new("show") + .about("Prints bookmarks") + .arg(arg!( "Name or UUID of a subscription")) + .arg(arg!(-m --machine "Name of a specific machine")) + ) + .subcommand( + Command::new("delete") + .about("Delete bookmarks (dangerous!)") + .arg(arg!(-s --subscription "Name or UUID of a subscription")) + .arg(arg!(-m --machine "Name of a specific machine")) + ) + .subcommand( + Command::new("copy") + .about("Copy bookmarks from a subscription to another subscription (dangerous!)") + .arg(arg!(-m --machine "Name of a specific machine")) + .arg(arg!( "Name or UUID of the source subscription")) + .arg(arg!( "Name or UUID of the destination subscription")) + ) + ) + .subcommand( + Command::new("stats") + .about("Retrieve usage statistics") + .arg(arg!(-s --subscription "Name or UUID of a subscription")) + .arg(arg!(-f --format "Output format").value_parser(["text", "json"]).default_value("text")) + .arg( + arg!(-i --interval "Duration after which a machine is considered alive if no events are received or dead if no heartbeats are received. \ + Defaults to heartbeat-interval") + .value_parser(value_parser!(u32)) + ) + ) + .subcommand( + Command::new("db") + .about("Database operations") + .subcommand( + Command::new("init") + .about("Initialize database schema") + ) + .subcommand( + Command::new("upgrade") + .about("Upgrade database schema") + .arg( + arg!(-t --to "Schema version to upgrade to. Defaults to last version.") + .value_parser(value_parser!(Version)) + ) + ) + .subcommand( + Command::new("downgrade") + .about("Downgrade database schema") + .arg( + arg!(-t --to "Schema version to downgrade to. Defaults to second to last version.") + .value_parser(value_parser!(Version)) + ) + ) + ); + + let help_str = command.render_help(); + let matches = command.get_matches(); + + if env::var("OPENWEC_LOG").is_err() { + env::set_var( + "OPENWEC_LOG", + match matches.get_count("verbosity") { + 0 => "warn", + 1 => "info", + 2 => "debug", + _ => "trace", + }, + ); + } + + env_logger::Builder::from_env("OPENWEC_LOG") + .format_module_path(false) + .format_timestamp(None) + .init(); + + match cli::run(matches, help_str).await { + Ok(_) => (), + Err(err) => { + eprintln!("An error occurred: {:?}", err); + std::process::exit(1); + } + }; +} diff --git a/cli/src/stats.rs b/cli/src/stats.rs new file mode 100644 index 0000000..dcc04f5 --- /dev/null +++ b/cli/src/stats.rs @@ -0,0 +1,100 @@ +use std::time::SystemTime; + +use anyhow::Result; +use clap::ArgMatches; +use common::{database::Db, subscription::SubscriptionData, utils::timestamp_to_local_date}; +use serde_json::json; + +use crate::utils; + +pub async fn run(db: &Db, matches: &ArgMatches) -> Result<()> { + let subscriptions = utils::find_subscriptions(db, matches, "subscription").await?; + let interval = matches.get_one::("interval").cloned(); + match matches.get_one::("format") { + Some(fmt) if fmt == "text" => stats_text(db, &subscriptions, interval).await?, + Some(fmt) if fmt == "json" => stats_json(db, &subscriptions, interval).await?, + x => eprintln!("Invalid format {:?}", x), + } + Ok(()) +} + +pub async fn stats_text( + db: &Db, + subscriptions: &[SubscriptionData], + interval: Option, +) -> Result<()> { + for subscription in subscriptions { + let uri_text = match subscription.uri() { + Some(uri) => uri, + None => "*", + }; + println!( + "Subscription {} ({}) - {}", + subscription.name(), + subscription.uuid(), + uri_text + ); + let now: i64 = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH)? + .as_secs() + .try_into()?; + let interval = interval.unwrap_or_else(|| subscription.heartbeat_interval()) as i64; + let start_heartbeat_interval = now - interval; + let stats = db + .get_stats(subscription.uuid(), start_heartbeat_interval) + .await?; + + let start_heartbeat_interval_date = timestamp_to_local_date(start_heartbeat_interval)?; + println!("- {} machines ever seen", stats.total_machines_count()); + println!( + "- {} active machines (event received since {})", + stats.active_machines_count(), + start_heartbeat_interval_date.to_rfc3339(), + ); + println!( + "- {} alive machines (heartbeat received since {} but no events)", + stats.alive_machines_count(), + start_heartbeat_interval_date.to_rfc3339(), + ); + println!( + "- {} dead machines (no heartbeats nor events since {})", + stats.dead_machines_count(), + start_heartbeat_interval_date.to_rfc3339(), + ); + } + Ok(()) +} + +pub async fn stats_json( + db: &Db, + subscriptions: &[SubscriptionData], + interval: Option, +) -> Result<()> { + let mut stats_vec = Vec::new(); + for subscription in subscriptions { + let now: i64 = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH)? + .as_secs() + .try_into()?; + let interval = interval.unwrap_or_else(|| subscription.heartbeat_interval()) as i64; + let start_heartbeat_interval = now - interval; + let stats = db + .get_stats(subscription.uuid(), start_heartbeat_interval) + .await?; + + let start_heartbeat_interval_date = timestamp_to_local_date(start_heartbeat_interval)?; + + stats_vec.push(json!({ + "subscription_name": subscription.name(), + "subscription_uuid": subscription.uuid(), + "subscription_uri": subscription.uri(), + "since": start_heartbeat_interval_date.to_rfc3339(), + "total_machines_count": stats.total_machines_count(), + "alive_machines_count": stats.alive_machines_count(), + "active_machines_count": stats.active_machines_count(), + "dead_machines_count": stats.dead_machines_count(), + })); + } + println!("{}", serde_json::to_string(&stats_vec)?); + Ok(()) +} diff --git a/cli/src/subscriptions.rs b/cli/src/subscriptions.rs new file mode 100644 index 0000000..9b4f7f0 --- /dev/null +++ b/cli/src/subscriptions.rs @@ -0,0 +1,793 @@ +use common::{ + database::Db, + encoding::decode_utf16le, + subscription::{ + FileConfiguration, KafkaConfiguration, SubscriptionData, SubscriptionMachineState, + SubscriptionOutput, SubscriptionOutputFormat, TcpConfiguration, + }, +}; +use roxmltree::{Document, Node}; +use std::{ + collections::{HashMap, HashSet}, + fs::File, + io::{BufReader, Read}, + time::SystemTime, +}; + +use anyhow::{anyhow, bail, ensure, Context, Result}; +use clap::ArgMatches; +use log::{debug, info, warn}; + +use crate::utils::{self, confirm}; + +enum ImportFormat { + OpenWEC, + Windows, +} + +pub async fn run(db: &Db, matches: &ArgMatches) -> Result<()> { + match matches.subcommand() { + Some(("new", matches)) => { + new(db, matches).await?; + } + Some(("show", matches)) => { + show(db, matches).await?; + } + Some(("edit", matches)) => { + edit(db, matches).await?; + } + Some(("export", matches)) => { + export(db, matches).await?; + } + Some(("import", matches)) => { + import(db, matches).await?; + } + Some(("delete", matches)) => { + delete(db, matches).await?; + } + Some(("machines", matches)) => { + machines(db, matches).await?; + } + Some(("duplicate", matches)) => { + duplicate(db, matches).await?; + } + Some(("enable", matches)) => { + set_enable(db, matches, true).await?; + } + Some(("disable", matches)) => { + set_enable(db, matches, false).await?; + } + Some(("reload", matches)) => { + reload(db, matches).await?; + } + _ => { + list(db, matches).await?; + } + } + Ok(()) +} + +async fn list(db: &Db, matches: &ArgMatches) -> Result<()> { + let enabled = *matches.get_one::("enabled").unwrap_or(&false); + let disabled = *matches.get_one::("disabled").unwrap_or(&false); + if enabled && disabled { + bail!("Enabled and disabled both set"); + } + + for subscription in db + .get_subscriptions() + .await + .context("Failed to retrieve subscriptions from database")? + { + if enabled && subscription.enabled() + || (disabled && !subscription.enabled()) + || (!disabled && !enabled) + { + println!("{}", subscription.short()); + } + } + Ok(()) +} + +async fn show(db: &Db, matches: &ArgMatches) -> Result<()> { + let subscription = find_subscription(db, matches) + .await + .context("Failed to retrieve subscription from database")?; + + println!("{}", subscription); + println!("Event filter query:\n"); + println!("{}", subscription.query()); + Ok(()) +} + +async fn duplicate(db: &Db, matches: &ArgMatches) -> Result<()> { + let source = find_subscription(db, matches) + .await + .context("Failed to retrieve subscription from database")?; + + let mut new = source.clone(); + new.update_uuid(); + new.set_enabled(false); + new.set_name( + matches + .get_one::("name") + .expect("Required by clap") + .to_string(), + ); + db.store_subscription(new).await?; + + Ok(()) +} + +async fn export(db: &Db, matches: &ArgMatches) -> Result<()> { + let subscriptions = if matches.contains_id("subscription") { + vec![find_subscription(db, matches) + .await + .context("Failed to find subscription")?] + } else { + db.get_subscriptions() + .await + .context("Failed to retrieve subscriptions from database")? + }; + + let res = serde_json::to_string(&subscriptions)?; + println!("{}", res); + Ok(()) +} + +fn get_query_channels(root: Node) -> Result> { + let mut channels = HashSet::new(); + if !root.has_tag_name("QueryList") { + bail!( + "Expected root element to be QueryList, found {:?}", + root.tag_name() + ) + } + for query_node in root.children() { + if !query_node.has_tag_name("Query") { + continue; + } + for select_node in query_node.children() { + if !select_node.has_tag_name("Select") { + continue; + } + channels.insert( + select_node + .attribute("Path") + .ok_or_else(|| { + anyhow!( + "Could not find Path attribute in Select node: {:?}", + select_node + ) + })? + .to_owned(), + ); + } + } + Ok(channels) +} + +fn update_query_check(old_query: &str, new_query: &str) -> Result { + let old_doc = Document::parse(old_query).context("Failed to parse old query")?; + let old_query_channels = get_query_channels(old_doc.root_element()) + .context("Failed to get channels of old query")?; + + let new_doc = Document::parse(new_query).context("Failed to parse new query")?; + let new_query_channels = get_query_channels(new_doc.root_element()) + .context("Failed to get channels of new query")?; + + if new_query_channels.is_subset(&old_query_channels) { + Ok(true) + } else { + let diff = new_query_channels.difference(&old_query_channels); + println!("The new query contains new channels:"); + for channel in diff { + println!("- {}", channel); + } + println!("Because there is no bookmarks stored for these channels, you will receive all existing events for them (ignoring the read_existing_events configuration). Depending of the channels, it may cause a huge network trafic."); + Ok(utils::confirm( + "Do you want to ignore this warning and continue?", + )) + } +} + +fn check_query_size(query: &str) -> Result { + let doc = Document::parse(query).context("Failed to parse query")?; + let channels = get_query_channels(doc.root_element())?; + + // Windows clients seem to not like queries selecting more than 256 channels + if channels.len() >= 256 { + println!("The query selects more than 256 channels and will probably not work on Windows clients."); + Ok(utils::confirm( + "Do you want to ignore this warning and continue?", + )) + } else { + Ok(true) + } +} + +async fn edit(db: &Db, matches: &ArgMatches) -> Result<()> { + let mut subscription = find_subscription(db, matches).await?; + if let Some(("outputs", matches)) = matches.subcommand() { + outputs(&mut subscription, matches).await?; + } + if let Some(query) = matches.get_one::("query") { + let mut file = File::open(query)?; + let mut new_query = String::new(); + file.read_to_string(&mut new_query)?; + + // Check the new query size + if !check_query_size(&new_query).context("Failed to check query size")? { + println!("Aborted"); + return Ok(()); + } + + // We try to establish if the new query add sources compared to the + // old one. + // In that case, we warn the user that it can lead to "read_existing_event" + // being set to true for all these new sources. + if !update_query_check(subscription.query(), &new_query) + .context("Failed to compare old and new queries")? + { + println!("Aborted"); + return Ok(()); + } + + debug!( + "Update query from {} to {}", + subscription.query(), + new_query + ); + subscription.set_query(new_query); + } + + if let Some(name) = matches.get_one::("rename") { + debug!("Update name from {} to {}", subscription.name(), name); + subscription.set_name(name.to_owned()); + } + + if matches.contains_id("uri") { + if let Some(uri) = matches.get_one::("uri") { + debug!( + "Update uri from {:?} to {:?}", + subscription.uri(), + Some(uri) + ); + subscription.set_uri(Some(uri.to_string())); + } else { + subscription.set_uri(None); + } + } + + if let Some(heartbeat_interval) = matches.get_one::("heartbeat-interval") { + debug!( + "Update heartbeat_interval from {} to {}", + subscription.heartbeat_interval(), + heartbeat_interval + ); + subscription.set_heartbeat_interval(*heartbeat_interval); + } + if let Some(connection_retry_count) = matches.get_one::("connection-retry-count") { + debug!( + "Update connection_retry_count from {} to {}", + subscription.connection_retry_count(), + connection_retry_count + ); + subscription.set_connection_retry_count(*connection_retry_count); + } + if let Some(connection_retry_interval) = matches.get_one::("connection-retry-interval") { + debug!( + "Update connection_retry_interval from {} to {}", + subscription.connection_retry_interval(), + connection_retry_interval + ); + subscription.set_connection_retry_interval(*connection_retry_interval); + } + if let Some(max_time) = matches.get_one::("max-time") { + debug!( + "Update max_time from {} to {}", + subscription.max_time(), + max_time + ); + subscription.set_max_time(*max_time); + } + if let Some(max_envelope_size) = matches.get_one::("max-envelope-size") { + debug!( + "Update max_envelope_size from {} to {}", + subscription.max_envelope_size(), + max_envelope_size + ); + subscription.set_max_envelope_size(*max_envelope_size); + } + if let Some(true) = matches.get_one::("enable") { + // Check that this subcription has outputs + if subscription.outputs().is_empty() { + bail!("Subscription must have at least one outputs configured to be enabled"); + } + debug!("Update enable from {} to true", subscription.enabled()); + subscription.set_enabled(true); + } else if let Some(true) = matches.get_one::("disable") { + debug!("Update enable from {} to true", subscription.enabled()); + subscription.set_enabled(false); + } + info!( + "Saving subscription {} ({})", + subscription.name(), + subscription.uuid() + ); + db.store_subscription(subscription).await?; + Ok(()) +} + +async fn new(db: &Db, matches: &ArgMatches) -> Result<()> { + let mut file = File::open( + matches + .get_one::("query") + .expect("Required by clap"), + )?; + let mut query = String::new(); + file.read_to_string(&mut query)?; + + // Check the new query size + if !check_query_size(&query).context("Failed to check query size")? { + println!("Aborted"); + return Ok(()); + } + + let subscription = SubscriptionData::new( + matches.get_one::("name").expect("Required by clap"), + matches.get_one::("uri").map(|e| e.as_str()), + &query, + matches.get_one::("heartbeat-interval"), + matches.get_one::("connection-retry-count"), + matches.get_one::("connection-retry-interval"), + matches.get_one::("max-time"), + matches.get_one::("max-envelope-size"), + false, + *matches + .get_one::("read-existing-events") + .expect("defaulted by clap"), + None, + ); + debug!( + "Subscription that is going to be inserted: {:?}", + subscription + ); + let name = subscription.name().to_owned(); + db.store_subscription(subscription).await?; + println!( + "Subscription {} has been created successfully. \ + You need to configure its outputs using `openwec subscriptions edit {} outputs add --help`. \ + When you are ready, you can enable it using `openwec subscriptions edit {} --enable", + name, name, name + ); + Ok(()) +} + +async fn import(db: &Db, matches: &ArgMatches) -> Result<()> { + let format: ImportFormat = match matches + .get_one::("format") + .expect("defaulted by clap") + { + x if x == "openwec" => ImportFormat::OpenWEC, + x if x == "windows" => ImportFormat::Windows, + _ => bail!("Invalid import format"), + }; + let file = File::open(matches.get_one::("file").expect("Required by clap"))?; + let reader = BufReader::new(file); + + let mut subscriptions = + match format { + ImportFormat::OpenWEC => import_openwec(reader) + .context("Failed to import subscriptions using OpenWEC format")?, + ImportFormat::Windows => import_windows(reader) + .context("Failed to import subscriptions using Windows format")?, + }; + + let count = subscriptions.len(); + while let Some(mut subscription) = subscriptions.pop() { + // Imported subscriptions are disabled. They must be enabled manually afterward. + subscription.set_enabled(false); + + debug!("Store {:?}", subscription); + db.store_subscription(subscription) + .await + .context("Failed to store subscription")?; + } + + match count { + 0 => println!("No subscription have been imported."), + 1 => println!("1 subscription has been imported. You may want to enable it using `openwec subscriptions edit --enable`."), + n => println!("{} subscriptions have been imported. They need to be enabled one by one using `openwec subscriptions edit --enable`.", n), + } + Ok(()) +} + +fn import_openwec(reader: BufReader) -> Result> { + Ok(serde_json::from_reader(reader)?) +} + +fn import_windows(mut reader: BufReader) -> Result> { + let mut content_bytes = Vec::new(); + reader.read_to_end(&mut content_bytes)?; + let content = decode_utf16le(content_bytes)?; + let doc = Document::parse(content.as_str())?; + let root = doc.root_element(); + ensure!( + root.has_tag_name(( + "http://schemas.microsoft.com/2006/03/windows/events/subscription", + "Subscription" + )), + "Invalid subscription format" + ); + + let mut data = SubscriptionData::empty(); + for node in root.children() { + if node.has_tag_name("SubscriptionId") && node.text().is_some() { + data.set_name(node.text().map(String::from).unwrap()) + } else if node.has_tag_name("SubscriptionType") && node.text().is_some() { + ensure!( + node.text().map(String::from).unwrap() == "SourceInitiated", + "Invalid subscription format: SubscriptionType must be SourceInitiated" + ); + } else if node.has_tag_name("Uri") && node.text().is_some() { + ensure!( + node.text().map(String::from).unwrap() + == "http://schemas.microsoft.com/wbem/wsman/1/windows/EventLog", + "Invalid subscription Uri" + ); + } else if node.has_tag_name("Delivery") { + ensure!( + node.attribute("Mode").unwrap_or_default() == "Push", + "Invalid delivery mode (should be push)" + ); + for delivery_node in node.children() { + if delivery_node.has_tag_name("Batching") { + for batching_node in delivery_node.children() { + if batching_node.has_tag_name("MaxLatencyTime") + && batching_node.text().is_some() + { + data.set_max_time(batching_node.text().unwrap().parse::()?) + } + } + } else if delivery_node.has_tag_name("PushSettings") { + for settings in delivery_node.children() { + if settings.has_tag_name("Heartbeat") && settings.has_attribute("Interval") + { + data.set_heartbeat_interval( + settings.attribute("Interval").unwrap().parse::()?, + ); + } + } + } + } + } else if node.has_tag_name("Query") && node.text().is_some() { + data.set_query(node.text().map(String::from).unwrap()); + } else if node.has_tag_name("ReadExistingEvents") && node.text().is_some() { + data.set_read_existing_events(node.text().unwrap().parse()?); + } + } + + Ok(vec![data]) +} +async fn delete(db: &Db, matches: &ArgMatches) -> Result<()> { + let subscription = find_subscription(db, matches).await?; + + if !*matches.get_one::("yes").expect("defaulted by clap") + && !confirm(&format!( + "Are you sure that you want to delete \"{}\" ({}) ?", + subscription.name(), + subscription.uuid() + )) + { + return Ok(()); + } + db.delete_subscription(subscription.uuid()).await +} + +async fn outputs(subscription: &mut SubscriptionData, matches: &ArgMatches) -> Result<()> { + info!( + "Loading subscription {} ({})", + subscription.name(), + subscription.uuid() + ); + match matches.subcommand() { + Some(("add", matches)) => { + outputs_add(subscription, matches) + .await + .context("Failed to add output")?; + } + Some(("delete", matches)) => { + outputs_delete(subscription, matches) + .await + .context("Failed to delete output")?; + } + Some(("enable", matches)) => { + outputs_enable(subscription, matches) + .await + .context("Failed to delete output")?; + } + Some(("disable", matches)) => { + outputs_disable(subscription, matches) + .await + .context("Failed to delete output")?; + } + _ => { + outputs_list(subscription); + } + } + Ok(()) +} + +async fn outputs_add(subscription: &mut SubscriptionData, matches: &ArgMatches) -> Result<()> { + let format: SubscriptionOutputFormat = match matches + .get_one::("format") + .ok_or_else(|| anyhow!("Missing format argument"))? + { + x if x == "raw" => SubscriptionOutputFormat::Raw, + x if x == "json" => SubscriptionOutputFormat::Json, + _ => bail!("Invalid output format"), + }; + let output = match matches.subcommand() { + Some(("tcp", matches)) => SubscriptionOutput::Tcp(format, outputs_add_tcp(matches)?, true), + Some(("kafka", matches)) => { + SubscriptionOutput::Kafka(format, outputs_add_kafka(matches)?, true) + } + Some(("files", matches)) => { + SubscriptionOutput::Files(format, outputs_add_files(matches)?, true) + } + _ => { + bail!("Missing output type") + } + }; + subscription.add_output(output); + Ok(()) +} + +fn outputs_add_tcp(matches: &ArgMatches) -> Result { + let addr = matches + .get_one::("addr") + .ok_or_else(|| anyhow!("Missing IP address"))?; + let port = matches + .get_one::("port") + .ok_or_else(|| anyhow!("Missing TCP port"))?; + + info!("Adding TCP output : {}:{}", addr, port); + Ok(TcpConfiguration::new(addr.clone(), *port)) +} + +fn outputs_add_kafka(matches: &ArgMatches) -> Result { + let topic = matches + .get_one::("topic") + .ok_or_else(|| anyhow!("Missing Kafka topic"))?; + + let options = matches.get_many::("options").unwrap().enumerate(); + + let mut options_hashmap = HashMap::new(); + let mut key = String::new(); + for (index, elt) in options { + if index % 2 == 0 { + key = elt.to_owned(); + } else { + options_hashmap.insert(key.clone(), elt.to_owned()); + } + } + + info!( + "Adding Kafka output with topic \"{}\" and the following options: {:?}", + topic, options_hashmap + ); + + Ok(KafkaConfiguration::new(topic.clone(), options_hashmap)) +} + +fn outputs_add_files(matches: &ArgMatches) -> Result { + let base = matches + .get_one::("base") + .ok_or_else(|| anyhow!("Missing files base path"))? + .to_owned(); + + let split_on_addr_index = matches.get_one::("split-on-addr-index").copied(); + let append_node_name = *matches + .get_one::("append-node-name") + .expect("defaulted by clap"); + let filename = matches + .get_one::("filename") + .expect("defaulted by clap") + .to_owned(); + + let config = FileConfiguration::new(base, split_on_addr_index, append_node_name, filename); + info!("Adding Files output with config {:?}", config); + Ok(config) +} + +async fn outputs_delete(subscription: &mut SubscriptionData, matches: &ArgMatches) -> Result<()> { + let index = matches + .get_one::("index") + .ok_or_else(|| anyhow!("Missing index"))?; + let output = subscription + .outputs() + .get(*index) + .ok_or_else(|| anyhow!("index out of range"))?; + if !*matches.get_one::("yes").expect("defaulted by clap") + && !confirm(&format!( + "Are you sure that you want to delete output: ({}) ?", + output, + )) + { + return Ok(()); + } + subscription + .delete_output(*index) + .context("Failed to delete output")?; + Ok(()) +} + +async fn outputs_enable(subscription: &mut SubscriptionData, matches: &ArgMatches) -> Result<()> { + let index = matches + .get_one::("index") + .ok_or_else(|| anyhow!("Missing index"))?; + subscription + .set_output_enabled(*index, true) + .context("Failed to enable output")?; + Ok(()) +} + +async fn outputs_disable(subscription: &mut SubscriptionData, matches: &ArgMatches) -> Result<()> { + let index = matches + .get_one::("index") + .ok_or_else(|| anyhow!("Missing index"))?; + let output = subscription + .outputs() + .get(*index) + .ok_or_else(|| anyhow!("index out of range"))?; + if !*matches.get_one::("yes").expect("defaulted by clap") + && !confirm(&format!( + "Are you sure that you want to disable output : ({}) ?", + output, + )) + { + return Ok(()); + } + subscription + .set_output_enabled(*index, false) + .context("Failed to enable output")?; + Ok(()) +} + +fn outputs_list(subscription: &SubscriptionData) { + if subscription.outputs().is_empty() { + println!( + "Subscription {} does not have any outputs configured yet.", + subscription.name() + ); + } else { + for (index, output) in subscription.outputs().iter().enumerate() { + println!("{}: {}", index, output); + } + } +} + +async fn machines(db: &Db, matches: &ArgMatches) -> Result<()> { + let subscription = find_subscription(db, matches) + .await + .context("Failed to retrieve subscriptions from database")?; + let interval = matches + .get_one::("interval") + .cloned() + .unwrap_or_else(|| subscription.heartbeat_interval()) as i64; + let now: i64 = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH)? + .as_secs() + .try_into()?; + let start_heartbeat_interval = now - interval; + + let state = if *matches + .get_one::("active") + .expect("defaulted by clap") + { + Some(SubscriptionMachineState::Active) + } else if *matches.get_one::("alive").expect("defaulted by clap") { + Some(SubscriptionMachineState::Alive) + } else if *matches.get_one::("dead").expect("defaulted by clap") { + Some(SubscriptionMachineState::Dead) + } else { + None + }; + + let machines = db + .get_machines(subscription.uuid(), start_heartbeat_interval, state) + .await + .context("Failed to retrieve machines for subscription")?; + + for machine in machines { + println!("{}:{}", machine.ip(), machine.name()); + } + Ok(()) +} + +async fn set_enable(db: &Db, matches: &ArgMatches, value: bool) -> Result<()> { + let mut subscriptions = find_subscriptions(db, matches).await?; + + let mut to_store = Vec::new(); + for subscription in subscriptions.iter_mut() { + // Check that this subcription has outputs + if value && subscription.outputs().is_empty() { + warn!( + "Subscription {} must have at least one outputs configured to be enabled", + subscription.name() + ); + continue; + } + + subscription.set_enabled(value); + to_store.push(subscription.clone()); + } + + if to_store.is_empty() { + bail!("Nothing to store, check previous warnings"); + } + + for subscription in to_store { + db.store_subscription(subscription.clone()) + .await + .context("Failed to store subscription in db")?; + if value { + println!("+ Subscription {} has been enabled", subscription.name()); + } else { + println!("+ Subscription {} has been disabled", subscription.name()); + } + } + + Ok(()) +} + +async fn reload(db: &Db, matches: &ArgMatches) -> Result<()> { + let mut subscriptions = find_subscriptions(db, matches).await?; + + for subscription in subscriptions.iter_mut() { + subscription.update_version(); + db.store_subscription(subscription.clone()) + .await + .context("Failed to store subscription in db")?; + println!("+ Subscription {} has been reloaded", subscription.name()); + } + + Ok(()) +} + +async fn find_subscription(db: &Db, matches: &ArgMatches) -> Result { + let identifier = matches + .get_one::("subscription") + .ok_or_else(|| anyhow!("Missing argument subscription"))?; + utils::find_subscription(db, identifier) + .await + .with_context(|| format!("Failed to find subscription with identifier {}", identifier))? + .ok_or_else(|| { + anyhow!( + "Subscription \"{}\" could not be found in database", + identifier + ) + }) +} + +async fn find_subscriptions(db: &Db, matches: &ArgMatches) -> Result> { + match matches.get_many::("subscriptions") { + Some(identifiers) => { + let mut subscriptions = Vec::new(); + for identifier in identifiers { + subscriptions.push(utils::find_subscription(db, identifier).await?.ok_or_else( + || anyhow!("Failed to find subscription with identifier {}", identifier), + )?); + } + Ok(subscriptions) + } + None => { + if *matches.get_one::("all").expect("defaulted by clap") { + db.get_subscriptions().await + } else { + bail!("No subscription given") + } + } + } +} diff --git a/cli/src/utils.rs b/cli/src/utils.rs new file mode 100644 index 0000000..522e114 --- /dev/null +++ b/cli/src/utils.rs @@ -0,0 +1,42 @@ +use anyhow::{anyhow, Context, Result}; +use clap::ArgMatches; +use common::database::Db; +use common::subscription::SubscriptionData; +use std::io; +use std::io::Write; + +pub fn confirm(message: &str) -> bool { + for _ in 0..3 { + print!("{} [y/n] ", message); + io::stdout().flush().unwrap(); + let mut input = String::new(); + match io::stdin().read_line(&mut input) { + Ok(n) if n == 2 => return input.to_ascii_lowercase().trim() == "y", + _ => (), + }; + } + false +} + +pub async fn find_subscription(db: &Db, identifier: &str) -> Result> { + db.get_subscription_by_identifier(identifier) + .await + .with_context(|| format!("Failed to find subscription with identifier {}", identifier)) +} + +pub async fn find_subscriptions( + db: &Db, + matches: &ArgMatches, + field: &str, +) -> Result> { + if let Some(identifier) = matches.get_one::(field) { + Ok(vec![find_subscription(db, identifier) + .await + .with_context(|| format!("Failed to find subscription with identifier {}", identifier))? + .ok_or_else(|| { + anyhow!("Subscription {} could not be found in database", identifier) + })?]) + } else { + Ok(db.get_subscriptions().await?) + } +} diff --git a/common/Cargo.toml b/common/Cargo.toml new file mode 100644 index 0000000..29d6f12 --- /dev/null +++ b/common/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "common" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = "1.0.59" +rusqlite = { version = "0.28.0", features = ["bundled"] } +uuid = { version = "1.1.2", features = ["v4", "fast-rng"] } +serde = { version = "1.0", features = ["derive"] } +toml = "0.5.9" +log = "0.4.17" +tokio = { version = "1.21.2", features = ["full"] } +serde_json = "1.0.87" +async-trait = "0.1.58" +tokio-postgres = "0.7" +chrono = "0.4.23" +encoding_rs = "0.8.31" +deadpool-postgres = "0.10.5" +deadpool-sqlite = "0.5.0" +openssl = "0.10.45" +postgres-openssl = "0.5.0" + +[dev-dependencies] +tempfile = "3.3.0" +serial_test = "0.10.0" diff --git a/common/src/bookmark.rs b/common/src/bookmark.rs new file mode 100644 index 0000000..887d8be --- /dev/null +++ b/common/src/bookmark.rs @@ -0,0 +1,6 @@ +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct BookmarkData { + pub subscription: String, + pub machine: String, + pub bookmark: String, +} diff --git a/common/src/database/mod.rs b/common/src/database/mod.rs new file mode 100644 index 0000000..bff72f7 --- /dev/null +++ b/common/src/database/mod.rs @@ -0,0 +1,971 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, +}; + +use crate::{ + bookmark::BookmarkData, + database::postgres::PostgresDatabase, + database::sqlite::SQLiteDatabase, + heartbeat::{HeartbeatData, HeartbeatsCache}, + settings::Settings, + subscription::{ + SubscriptionData, SubscriptionMachine, SubscriptionMachineState, SubscriptionStatsCounters, + }, +}; +use anyhow::{Context, Result}; +use async_trait::async_trait; + +use self::schema::{Migration, Version}; + +pub mod postgres; +pub mod schema; +pub mod sqlite; + +pub type Db = Arc; + +pub async fn db_from_settings(settings: &Settings) -> Result { + match settings.database() { + crate::settings::Database::SQLite(sqlite) => { + let mut db = SQLiteDatabase::new(sqlite.path()) + .await + .context("Failed to initialize SQLite client")?; + schema::sqlite::register_migrations(&mut db); + Ok(Arc::new(db)) + } + crate::settings::Database::Postgres(postgres) => { + let mut db = PostgresDatabase::new(postgres) + .await + .context("Failed to initialize Postgres client")?; + schema::postgres::register_migrations(&mut db); + Ok(Arc::new(db)) + } + } +} + +#[async_trait] +pub trait Database { + async fn get_bookmark(&self, machine: &str, subscription: &str) -> Result>; + async fn get_bookmarks(&self, subscription: &str) -> Result>; + async fn store_bookmark(&self, machine: &str, subscription: &str, bookmark: &str) + -> Result<()>; + async fn delete_bookmarks( + &self, + machine: Option<&str>, + subscription: Option<&str>, + ) -> Result<()>; + + async fn get_heartbeats(&self) -> Result>; + async fn get_heartbeats_by_machine( + &self, + machine: &str, + subscription: Option<&str>, + ) -> Result>; + async fn get_heartbeats_by_ip( + &self, + ip: &str, + subscription: Option<&str>, + ) -> Result>; + async fn get_heartbeats_by_subscription( + &self, + subscription: &str, + ) -> Result>; + async fn store_heartbeat( + &self, + machine: &str, + ip: String, + subscription: &str, + is_event: bool, + ) -> Result<()>; + async fn store_heartbeats(&self, heartbeats: &HeartbeatsCache) -> Result<()>; + + async fn get_subscriptions(&self) -> Result>; + async fn get_subscription(&self, version: &str) -> Result>; + async fn get_subscription_by_identifier( + &self, + identifier: &str, + ) -> Result>; + async fn store_subscription(&self, subscription: SubscriptionData) -> Result<()>; + async fn delete_subscription(&self, uuid: &str) -> Result<()>; + + async fn setup_schema(&self) -> Result<()>; + async fn current_version(&self) -> Result>; + async fn migrated_versions(&self) -> Result>; + async fn apply_migration(&self, version: Version) -> Result<()>; + async fn revert_migration(&self, version: Version) -> Result<()>; + async fn migrations(&self) -> BTreeMap>; + + async fn get_stats( + &self, + subscription: &str, + start_time: i64, + ) -> Result; + async fn get_machines( + &self, + subscription: &str, + start_time: i64, + state: Option, + ) -> Result>; +} + +pub async fn schema_is_up_to_date(db: Db) -> Result { + let migrated_versions = db + .migrated_versions() + .await + .context("Failed to retrieve currently applied migrations")?; + let migrations = db.migrations().await.keys().copied().collect(); + + Ok(migrated_versions == migrations) +} + +#[cfg(test)] +pub mod tests { + use anyhow::ensure; + + use crate::{ + heartbeat::{HeartbeatKey, HeartbeatValue}, + subscription::{FileConfiguration, SubscriptionOutput, SubscriptionOutputFormat}, + }; + + use super::{schema::Migrator, *}; + use std::{thread::sleep, time::Duration, time::SystemTime}; + + async fn setup_db(db: Arc) -> Result<()> { + db.setup_schema().await?; + let migrator = Migrator::new(db.clone()); + migrator.down(None, false).await?; + migrator.up(None, false).await?; + Ok(()) + } + + async fn clean_db(db: Arc) -> Result<()> { + let migrator = Migrator::new(db.clone()); + migrator.down(None, false).await?; + Ok(()) + } + + pub async fn test_subscriptions(db: Arc) -> Result<()> { + setup_db(db.clone()).await?; + assert!(db.get_subscriptions().await?.is_empty(),); + assert!(db.get_subscription("toto").await?.is_none(),); + assert!(db.get_subscription_by_identifier("toto").await?.is_none()); + assert!(db.get_subscription_by_identifier("toto").await?.is_none()); + let subscription = SubscriptionData::new( + "toto", + Some("/test/1"), + "query", + None, + None, + None, + None, + None, + false, + false, + None, + ); + db.store_subscription(subscription.clone()).await?; + assert!(db.get_subscriptions().await?.len() == 1); + let toto = &db.get_subscriptions().await?[0]; + assert_eq!(toto.name(), "toto"); + assert_eq!(toto.uri(), Some(&"/test/1".to_string())); + assert_eq!(toto.query(), "query",); + assert_eq!(toto.enabled(), false); + assert_eq!(toto.read_existing_events(), false); + + let toto2 = db.get_subscription_by_identifier("toto").await?.unwrap(); + assert_eq!(toto, &toto2); + + let toto3 = db + .get_subscription_by_identifier(subscription.uuid()) + .await? + .unwrap(); + assert_eq!(toto, &toto3); + let toto4 = db.get_subscription(subscription.version()).await?.unwrap(); + assert_eq!(toto, &toto4); + + let file_config_1 = + FileConfiguration::new("/path1".to_string(), None, false, "messages".to_string()); + let file_config_2 = + FileConfiguration::new("/path2".to_string(), None, false, "messages".to_string()); + let subscription2 = SubscriptionData::new( + "tata", + None, + "query2", + None, + None, + None, + None, + None, + true, + true, + Some(vec![ + SubscriptionOutput::Files( + SubscriptionOutputFormat::Json, + file_config_1.clone(), + true, + ), + SubscriptionOutput::Files( + SubscriptionOutputFormat::Raw, + file_config_2.clone(), + false, + ), + ]), + ); + db.store_subscription(subscription2).await?; + + assert!(db.get_subscriptions().await?.len() == 2); + + let mut tata = db.get_subscription_by_identifier("tata").await?.unwrap(); + assert_eq!(tata.name(), "tata"); + assert_eq!(tata.uri(), None); + assert_eq!(tata.query(), "query2",); + assert_eq!(tata.enabled(), true); + assert_eq!(tata.read_existing_events(), true); + assert_eq!( + tata.outputs(), + vec![ + SubscriptionOutput::Files( + SubscriptionOutputFormat::Json, + file_config_1.clone(), + true + ), + SubscriptionOutput::Files( + SubscriptionOutputFormat::Raw, + file_config_2.clone(), + false, + ), + ] + ); + + let tata_save = tata.clone(); + tata.set_name("titi".to_string()); + tata.set_max_time(25000); + tata.set_read_existing_events(false); + db.store_subscription(tata.clone()).await?; + + ensure!(db.get_subscriptions().await?.len() == 2); + let tata2 = db + .get_subscription_by_identifier(tata.uuid()) + .await? + .unwrap(); + assert_eq!(tata2.name(), "titi"); + assert_eq!(tata2.max_time(), 25000); + assert_eq!(tata2.read_existing_events(), false); + assert!(tata2.version() != tata_save.version()); + + db.delete_subscription(toto4.uuid()).await?; + ensure!( + db.get_subscription("toto").await?.is_none(), + "The subscription with version 'toto' should not exist yet" + ); + assert!(db.get_subscriptions().await?.len() == 1); + + let tata3 = &db.get_subscriptions().await?[0]; + assert_eq!(&tata, tata3, "toto"); + + db.delete_subscription(tata.uuid()).await?; + assert!(db.get_subscriptions().await?.is_empty()); + + clean_db(db.clone()).await?; + Ok(()) + } + + pub async fn test_bookmarks(db: Arc) -> Result<()> { + setup_db(db.clone()).await?; + let subscription_tutu = SubscriptionData::new( + "tutu", None, "query", None, None, None, None, None, false, false, None, + ); + db.store_subscription(subscription_tutu.clone()).await?; + let subscription_titi = SubscriptionData::new( + "titi", None, "query", None, None, None, None, None, false, false, None, + ); + db.store_subscription(subscription_titi.clone()).await?; + + // Test non existent bookmark + assert!(db + .get_bookmark("toto", subscription_tutu.uuid()) + .await? + .is_none(),); + + assert!(db.get_bookmarks(subscription_tutu.uuid()).await?.is_empty()); + + // Store a bookmark + db.store_bookmark("toto", subscription_tutu.uuid(), "titi") + .await?; + // Test if the bookmark is correctly remembered + assert_eq!( + db.get_bookmark("toto", subscription_tutu.uuid()) + .await? + .unwrap(), + "titi", + ); + assert_eq!( + db.get_bookmarks(subscription_tutu.uuid()).await?[0], + BookmarkData { + machine: "toto".to_owned(), + subscription: subscription_tutu.uuid().to_owned(), + bookmark: "titi".to_owned() + } + ); + + // Update the bookmark + db.store_bookmark("toto", subscription_tutu.uuid(), "toto") + .await?; + // Test if the bookmark is correctly remembered + assert_eq!( + db.get_bookmark("toto", subscription_tutu.uuid()) + .await? + .unwrap(), + "toto", + ); + assert_eq!( + db.get_bookmarks(subscription_tutu.uuid()).await?[0], + BookmarkData { + machine: "toto".to_owned(), + subscription: subscription_tutu.uuid().to_owned(), + bookmark: "toto".to_owned() + } + ); + // Update another bookmark + db.store_bookmark("toto", subscription_titi.uuid(), "babar") + .await?; + // Test if the original bookmark is correctly remembered + assert_eq!( + db.get_bookmark("toto", subscription_tutu.uuid()) + .await? + .unwrap(), + "toto", + ); + assert_eq!( + db.get_bookmarks(subscription_tutu.uuid()).await?[0], + BookmarkData { + machine: "toto".to_owned(), + subscription: subscription_tutu.uuid().to_owned(), + bookmark: "toto".to_owned() + } + ); + assert_eq!( + db.get_bookmark("toto", subscription_titi.uuid()) + .await? + .unwrap(), + "babar", + ); + assert_eq!( + db.get_bookmarks(subscription_titi.uuid()).await?[0], + BookmarkData { + machine: "toto".to_owned(), + subscription: subscription_titi.uuid().to_owned(), + bookmark: "babar".to_owned() + } + ); + + // Test that bookmarks are deleted if subscription is deleted + db.delete_subscription(subscription_tutu.uuid()).await?; + assert!(db + .get_bookmark("toto", subscription_tutu.uuid()) + .await? + .is_none(),); + assert!(db.get_bookmarks(subscription_tutu.uuid()).await?.is_empty()); + assert_eq!( + db.get_bookmark("toto", subscription_titi.uuid()) + .await? + .unwrap(), + "babar", + ); + assert!(!db.get_bookmarks(subscription_titi.uuid()).await?.is_empty()); + db.delete_subscription(subscription_titi.uuid()).await?; + assert!(db + .get_bookmark("toto", subscription_titi.uuid()) + .await? + .is_none(),); + assert!(db.get_bookmarks(subscription_titi.uuid()).await?.is_empty()); + + db.store_subscription(subscription_tutu.clone()).await?; + db.store_subscription(subscription_titi.clone()).await?; + + db.store_bookmark("m1", subscription_tutu.uuid(), "m1b1") + .await?; + db.store_bookmark("m2", subscription_tutu.uuid(), "m2b1") + .await?; + db.store_bookmark("m1", subscription_titi.uuid(), "m1b2") + .await?; + + // Test Retrieve bookmarks for subscription tutu + let bookmarks = db.get_bookmarks(subscription_tutu.uuid()).await?; + assert_eq!( + bookmarks + .iter() + .find(|b| b.machine == "m1") + .unwrap() + .bookmark, + "m1b1" + ); + assert_eq!( + bookmarks + .iter() + .find(|b| b.machine == "m2") + .unwrap() + .bookmark, + "m2b1" + ); + + db.delete_bookmarks(Some("m1"), Some(subscription_titi.uuid())) + .await?; + assert!(db + .get_bookmark("m1", subscription_titi.uuid()) + .await? + .is_none()); + + db.store_bookmark("m1", subscription_titi.uuid(), "m1b3") + .await?; + db.delete_bookmarks(None, Some(subscription_tutu.uuid())) + .await?; + assert!(db + .get_bookmark("m1", subscription_tutu.uuid()) + .await? + .is_none()); + assert!(db + .get_bookmark("m2", subscription_tutu.uuid()) + .await? + .is_none()); + assert_eq!( + db.get_bookmark("m1", subscription_titi.uuid()) + .await? + .unwrap(), + "m1b3" + ); + + db.store_bookmark("m1", subscription_tutu.uuid(), "m1b4") + .await?; + db.store_bookmark("m2", subscription_tutu.uuid(), "m2b2") + .await?; + db.delete_bookmarks(Some("m1"), None).await?; + assert_eq!( + db.get_bookmark("m2", subscription_tutu.uuid()) + .await? + .unwrap(), + "m2b2" + ); + assert!(db + .get_bookmark("m1", subscription_tutu.uuid()) + .await? + .is_none()); + assert!(db + .get_bookmark("m1", subscription_titi.uuid()) + .await? + .is_none()); + + db.store_bookmark("m1", subscription_tutu.uuid(), "m1b5") + .await?; + db.store_bookmark("m2", subscription_titi.uuid(), "m2b3") + .await?; + db.delete_bookmarks(None, None).await?; + assert!(db + .get_bookmark("m1", subscription_tutu.uuid()) + .await? + .is_none()); + assert!(db + .get_bookmark("m2", subscription_titi.uuid()) + .await? + .is_none()); + + clean_db(db.clone()).await?; + Ok(()) + } + + pub async fn test_heartbeats(db: Arc) -> Result<()> { + setup_db(db.clone()).await?; + ensure!( + db.get_heartbeats_by_machine("toto", None).await?.is_empty(), + "Non existent heartbeat should be None" + ); + + assert!(db.get_heartbeats().await?.is_empty()); + + let subscription_tutu = SubscriptionData::new( + "tutu", None, "query", None, None, None, None, None, false, false, None, + ); + + db.store_subscription(subscription_tutu.clone()).await?; + + let before = SystemTime::now(); + sleep(Duration::from_secs(1)); + + // Store a heartbeat + db.store_heartbeat( + "toto", + "127.0.0.1".to_string(), + subscription_tutu.uuid(), + false, + ) + .await?; + let heartbeat = db + .get_heartbeats_by_machine("toto", Some(subscription_tutu.uuid())) + .await?[0] + .clone(); + assert_eq!( + heartbeat.first_seen(), + heartbeat.last_seen(), + "First seen and last seen should be equal" + ); + assert_eq!(heartbeat.last_event_seen(), None); + + // Store a heartbeat + let after = SystemTime::now(); + let time_first_seen = SystemTime::UNIX_EPOCH + + Duration::from_secs(heartbeat.first_seen().try_into().unwrap()); + assert!( + time_first_seen >= before && time_first_seen <= after, + "First seen should be correct" + ); + assert_eq!(heartbeat.ip(), "127.0.0.1"); + assert_eq!(heartbeat.machine(), "toto"); + assert_eq!(heartbeat.subscription(), &subscription_tutu); + + assert!(db.get_heartbeats().await?.len() == 1); + assert_eq!(db.get_heartbeats().await?[0], heartbeat); + assert_eq!( + db.get_heartbeats_by_ip("127.0.0.1", None).await?[0], + heartbeat + ); + assert!(db.get_heartbeats_by_ip("127.0.0.2", None).await?.is_empty(),); + assert_eq!( + db.get_heartbeats_by_ip("127.0.0.1", Some(subscription_tutu.uuid())) + .await?[0], + heartbeat + ); + + sleep(Duration::from_secs(1)); + + db.store_heartbeat( + "toto", + "127.0.0.1".to_string(), + subscription_tutu.uuid(), + true, + ) + .await?; + + let heartbeat = db + .get_heartbeats_by_machine("toto", Some(subscription_tutu.uuid())) + .await?[0] + .clone(); + assert!( + heartbeat.first_seen() < heartbeat.last_seen(), + "First seen and last seen should NOT be equal" + ); + assert_eq!(heartbeat.last_seen(), heartbeat.last_event_seen().unwrap()); + + db.store_heartbeat( + "tata", + "127.0.0.2".to_string(), + subscription_tutu.uuid(), + false, + ) + .await?; + + let heartbeats = db.get_heartbeats().await?; + assert_eq!(heartbeats.len(), 2); + + assert_eq!( + db.get_heartbeats_by_subscription(subscription_tutu.uuid()) + .await?, + heartbeats + ); + + db.store_heartbeat( + "tata", + "127.0.0.2".to_string(), + subscription_tutu.uuid(), + true, + ) + .await?; + assert!(!db.get_heartbeats_by_ip("127.0.0.2", None).await?.is_empty()); + + // Remove subscription and assert that heartbeats have been deleted + db.delete_subscription(subscription_tutu.uuid()).await?; + assert!(db.get_heartbeats().await?.is_empty()); + + clean_db(db.clone()).await?; + Ok(()) + } + + pub async fn test_heartbeats_cache(db: Arc) -> Result<()> { + setup_db(db.clone()).await?; + + let subscription_tutu = SubscriptionData::new( + "tutu", None, "query", None, None, None, None, None, false, false, None, + ); + + db.store_subscription(subscription_tutu.clone()).await?; + + let mut heartbeats = HeartbeatsCache::new(); + heartbeats.insert( + HeartbeatKey { + machine: "m1".to_string(), + subscription: subscription_tutu.uuid().to_owned(), + }, + HeartbeatValue { + ip: "127.0.0.1".to_string(), + last_seen: 1, + last_event_seen: None, + }, + ); + heartbeats.insert( + HeartbeatKey { + machine: "m2".to_string(), + subscription: subscription_tutu.uuid().to_owned(), + }, + HeartbeatValue { + ip: "127.0.0.2".to_string(), + last_seen: 2, + last_event_seen: Some(2), + }, + ); + db.store_heartbeats(&heartbeats).await?; + + let db_heartbeats = db.get_heartbeats().await?; + assert_eq!(db_heartbeats.len(), 2); + let m1_heartbeat = db_heartbeats + .iter() + .find(|e| e.machine() == "m1") + .cloned() + .expect("m1 heartbeat"); + assert_eq!(m1_heartbeat.first_seen(), 1); + assert_eq!(m1_heartbeat.last_seen(), 1); + assert_eq!(m1_heartbeat.last_event_seen(), None); + assert_eq!(m1_heartbeat.ip(), "127.0.0.1"); + + let m2_heartbeat = db_heartbeats + .iter() + .find(|e| e.machine() == "m2") + .cloned() + .expect("m2 heartbeat"); + assert_eq!(m2_heartbeat.first_seen(), 2); + assert_eq!(m2_heartbeat.last_seen(), 2); + assert_eq!(m2_heartbeat.last_event_seen(), Some(2)); + assert_eq!(m2_heartbeat.ip(), "127.0.0.2"); + + heartbeats.clear(); + + // Update heartbeat for m1, and change its IP address + heartbeats.insert( + HeartbeatKey { + machine: "m1".to_string(), + subscription: subscription_tutu.uuid().to_owned(), + }, + HeartbeatValue { + ip: "127.0.0.100".to_string(), + last_seen: 3, + last_event_seen: Some(3), + }, + ); + db.store_heartbeats(&heartbeats).await?; + + let db_heartbeats = db.get_heartbeats().await?; + assert_eq!(db_heartbeats.len(), 2); + let m1_heartbeat = db_heartbeats + .iter() + .find(|e| e.machine() == "m1") + .cloned() + .expect("m1 heartbeat"); + assert_eq!(m1_heartbeat.first_seen(), 1); + assert_eq!(m1_heartbeat.last_seen(), 3); + assert_eq!(m1_heartbeat.last_event_seen(), Some(3)); + assert_eq!(m1_heartbeat.ip(), "127.0.0.100"); + + // Nothing has changed for m2 + let m2_heartbeat = db_heartbeats + .iter() + .find(|e| e.machine() == "m2") + .cloned() + .expect("m2 heartbeat"); + assert_eq!(m2_heartbeat.first_seen(), 2); + assert_eq!(m2_heartbeat.last_seen(), 2); + assert_eq!(m2_heartbeat.last_event_seen(), Some(2)); + assert_eq!(m2_heartbeat.ip(), "127.0.0.2"); + + // Try to store a lot of heartbeats + let mut heartbeats = HeartbeatsCache::new(); + for i in 0..1020 { + heartbeats.insert( + HeartbeatKey { + machine: format!("machine${}", i * 2), + subscription: subscription_tutu.uuid().to_owned(), + }, + HeartbeatValue { + ip: "127.0.0.1".to_string(), + last_seen: 1, + last_event_seen: None, + }, + ); + heartbeats.insert( + HeartbeatKey { + machine: format!("machine${}", i * 2 + 1), + subscription: subscription_tutu.uuid().to_owned(), + }, + HeartbeatValue { + ip: "127.0.0.2".to_string(), + last_seen: 2, + last_event_seen: Some(2), + }, + ); + } + db.store_heartbeats(&heartbeats).await?; + let db_heartbeats = db.get_heartbeats().await?; + assert_eq!(db_heartbeats.len(), 1020 * 2 + 2); + clean_db(db.clone()).await?; + Ok(()) + } + + pub async fn test_stats_and_machines(db: Arc) -> Result<()> { + setup_db(db.clone()).await?; + + assert_eq!( + db.get_stats("", 0).await?, + SubscriptionStatsCounters::new(0, 0, 0, 0) + ); + + let subscription_tutu = SubscriptionData::new( + "tutu", None, "query", None, None, None, None, None, false, false, None, + ); + + db.store_subscription(subscription_tutu.clone()).await?; + assert_eq!( + db.get_stats(subscription_tutu.uuid(), 0).await?, + SubscriptionStatsCounters::new(0, 0, 0, 0) + ); + + assert!(db + .get_machines(subscription_tutu.uuid(), 0, None) + .await? + .is_empty()); + + assert!(db + .get_machines( + subscription_tutu.uuid(), + 0, + Some(SubscriptionMachineState::Alive) + ) + .await? + .is_empty()); + + assert!(db + .get_machines( + subscription_tutu.uuid(), + 0, + Some(SubscriptionMachineState::Active) + ) + .await? + .is_empty()); + + assert!(db + .get_machines( + subscription_tutu.uuid(), + 0, + Some(SubscriptionMachineState::Dead) + ) + .await? + .is_empty()); + + let now: i64 = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH)? + .as_secs() + .try_into()?; + + // Store a heartbeat + db.store_heartbeat( + "toto", + "127.0.0.1".to_string(), + subscription_tutu.uuid(), + false, + ) + .await?; + + println!("{:?}", db.get_heartbeats().await?); + + assert_eq!( + db.get_stats(subscription_tutu.uuid(), 0).await?, + // total:1, alive:1, active:0, dead:0 + SubscriptionStatsCounters::new(1, 1, 0, 0) + ); + + let alive_machines = db + .get_machines( + subscription_tutu.uuid(), + 0, + Some(SubscriptionMachineState::Alive), + ) + .await?; + println!("{:?}", alive_machines); + assert_eq!(alive_machines.len(), 1); + assert_eq!(alive_machines[0].name(), "toto"); + assert_eq!(alive_machines[0].ip(), "127.0.0.1"); + + let total_machines = db.get_machines(subscription_tutu.uuid(), 0, None).await?; + assert_eq!(total_machines.len(), 1); + assert_eq!(total_machines[0].name(), "toto"); + assert_eq!(total_machines[0].ip(), "127.0.0.1"); + + assert!(db + .get_machines( + subscription_tutu.uuid(), + 0, + Some(SubscriptionMachineState::Active) + ) + .await? + .is_empty()); + assert!(db + .get_machines( + subscription_tutu.uuid(), + 0, + Some(SubscriptionMachineState::Dead) + ) + .await? + .is_empty()); + + // Store an event heartbeat + db.store_heartbeat( + "toto", + "127.0.0.1".to_string(), + subscription_tutu.uuid(), + true, + ) + .await?; + + assert_eq!( + db.get_stats(subscription_tutu.uuid(), 0).await?, + // total:1, alive:0, active:1, dead:0 + SubscriptionStatsCounters::new(1, 0, 1, 0) + ); + + assert_eq!( + db.get_machines( + subscription_tutu.uuid(), + 0, + Some(SubscriptionMachineState::Active) + ) + .await? + .len(), + 1 + ); + assert!(db + .get_machines( + subscription_tutu.uuid(), + 0, + Some(SubscriptionMachineState::Alive) + ) + .await? + .is_empty()); + assert!(db + .get_machines( + subscription_tutu.uuid(), + 0, + Some(SubscriptionMachineState::Dead) + ) + .await? + .is_empty()); + assert_eq!( + db.get_machines(subscription_tutu.uuid(), 0, None) + .await? + .len(), + 1 + ); + + sleep(Duration::from_secs(2)); + + // Store a heartbeat for another machine + db.store_heartbeat( + "tata", + "127.0.0.2".to_string(), + subscription_tutu.uuid(), + false, + ) + .await?; + + // We have waited 2 seconds and set heartbeat_interval_start at "now + 1", so + // only the last stored heartbeat is considered alive. + assert_eq!( + db.get_stats(subscription_tutu.uuid(), now + 1).await?, + // total:2, alive:1, active:0, dead:1 + SubscriptionStatsCounters::new(2, 1, 0, 1) + ); + + let total_machines = db + .get_machines(subscription_tutu.uuid(), now + 1, None) + .await?; + assert_eq!(total_machines.len(), 2); + + let alive_machines = db + .get_machines( + subscription_tutu.uuid(), + now + 1, + Some(SubscriptionMachineState::Alive), + ) + .await?; + assert_eq!(alive_machines.len(), 1); + assert_eq!(alive_machines[0].name(), "tata"); + assert_eq!(alive_machines[0].ip(), "127.0.0.2"); + + let dead_machines = db + .get_machines( + subscription_tutu.uuid(), + now + 1, + Some(SubscriptionMachineState::Dead), + ) + .await?; + assert_eq!(dead_machines.len(), 1); + assert_eq!(dead_machines[0].name(), "toto"); + assert_eq!(dead_machines[0].ip(), "127.0.0.1"); + + assert!(db + .get_machines( + subscription_tutu.uuid(), + now + 1, + Some(SubscriptionMachineState::Active) + ) + .await? + .is_empty()); + + // Store an event heartbeat for first machine + db.store_heartbeat( + "toto", + "127.0.0.1".to_string(), + subscription_tutu.uuid(), + true, + ) + .await?; + + // First machine is active again + assert_eq!( + db.get_stats(subscription_tutu.uuid(), now + 1).await?, + // total:2, alive:1, active:1, dead:0 + SubscriptionStatsCounters::new(2, 1, 1, 0) + ); + + // Create another subscription + let subscription_tata = SubscriptionData::new( + "tata", None, "query", None, None, None, None, None, false, false, None, + ); + + db.store_subscription(subscription_tata.clone()).await?; + + // Store an heartbeat for this other subscription + db.store_heartbeat( + "toto", + "127.0.0.1".to_string(), + subscription_tata.uuid(), + true, + ) + .await?; + + // Nothing has changed for first subscription + assert_eq!( + db.get_stats(subscription_tutu.uuid(), now + 1).await?, + // total:2, alive:1, active:1, dead:0 + SubscriptionStatsCounters::new(2, 1, 1, 0) + ); + + clean_db(db.clone()).await?; + Ok(()) + } +} diff --git a/common/src/database/postgres.rs b/common/src/database/postgres.rs new file mode 100644 index 0000000..fcfd786 --- /dev/null +++ b/common/src/database/postgres.rs @@ -0,0 +1,1017 @@ +// Some of the following code comes from +// https://github.com/SkylerLipthay/schemamama_postgres. It was not used +// directly has some parts needed to be modify to be integrated to OpenWEC. As +// stated by its license (MIT), we include below its copyright notice and +// permission notice: +// +// The MIT License (MIT) +// +// Copyright (c) 2015 Skyler Lipthay +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// +use crate::bookmark::BookmarkData; +use crate::heartbeat::{HeartbeatKey, HeartbeatsCache}; +use crate::settings::PostgresSslMode; +use crate::subscription::{ + SubscriptionMachine, SubscriptionMachineState, SubscriptionStatsCounters, +}; +use crate::{ + database::Database, heartbeat::HeartbeatData, settings::Postgres, + subscription::SubscriptionData, +}; +use anyhow::{anyhow, bail, ensure, Context, Result}; +use async_trait::async_trait; +use deadpool_postgres::{Config, Pool, Runtime, SslMode, Transaction}; +use log::{error, warn}; +use openssl::ssl::{SslConnector, SslMethod}; +use postgres_openssl::MakeTlsConnector; +use std::collections::btree_map::Entry::Vacant; +use std::{ + collections::{BTreeMap, BTreeSet}, + sync::Arc, + time::SystemTime, +}; +use tokio_postgres::types::ToSql; +use tokio_postgres::{NoTls, Row}; + +use super::schema::{Migration, MigrationBase, Version}; + +const MIGRATION_TABLE_NAME: &str = "__schema_migrations"; + +/// A migration to be used within a PostgreSQL connection. +#[async_trait] +pub trait PostgresMigration: Migration { + /// Called when this migration is to be executed. + async fn up(&self, transaction: &mut Transaction) -> Result<()>; + + /// Called when this migration is to be reversed. + async fn down(&self, transaction: &mut Transaction) -> Result<()>; + fn to_base(&self) -> Arc { + Arc::new(MigrationBase::new(self.version(), self.description())) + } +} + +struct PostgresHeartbeatEventValue { + pub ip: String, + pub last_seen: i64, + pub last_event_seen: i64, +} + +struct PostgresHeartbeatValue { + pub ip: String, + pub last_seen: i64, +} + +pub struct PostgresDatabase { + pool: Pool, + migrations: BTreeMap>, + max_chunk_size: usize, +} + +impl PostgresDatabase { + pub async fn new(settings: &Postgres) -> Result { + let mut config = Config::default(); + config.host = Some(settings.host().to_string()); + config.port = Some(settings.port()); + config.user = Some(settings.user().to_string()); + config.password = Some(settings.password().to_string()); + config.dbname = Some(settings.dbname().to_string()); + + let pool = if *settings.ssl_mode() == PostgresSslMode::Disable { + config + .create_pool(Some(Runtime::Tokio1), NoTls) + .context("Failed to create database pool (with NoTls)")? + } else { + config.ssl_mode = match settings.ssl_mode() { + PostgresSslMode::Prefer => Some(SslMode::Prefer), + PostgresSslMode::Require => Some(SslMode::Require), + _ => None, + }; + let mut builder = SslConnector::builder(SslMethod::tls()) + .context("Failed to initialize TLS context")?; + if let Some(ca_file) = settings.ca_file() { + builder + .set_ca_file(ca_file) + .context("Failed to configure CA cert file")?; + } + let connector = MakeTlsConnector::new(builder.build()); + config + .create_pool(Some(Runtime::Tokio1), connector) + .context("Failed to create database pool (with Tls)")? + }; + + let db = PostgresDatabase { + pool, + migrations: BTreeMap::new(), + max_chunk_size: settings.max_chunk_size(), + }; + + Ok(db) + } + + /// Register a migration. If a migration with the same version is already registered, a warning + /// is logged and the registration fails. + pub fn register_migration(&mut self, migration: Arc) { + let version = migration.version(); + if let Vacant(e) = self.migrations.entry(version) { + e.insert(migration); + } else { + warn!("Migration with version {:?} is already registered", version); + } + } + + async fn get_subscription_by_field( + &self, + field: &str, + value: &str, + ) -> Result> { + let res = self + .pool + .get() + .await? + .query_opt( + format!( + r#"SELECT * + FROM subscriptions + WHERE {} = $1"#, + field + ) + .as_str(), + &[&value], + ) + .await?; + Ok(match res { + Some(row) => Some(row_to_subscription(&row)?), + None => None, + }) + } + + async fn get_heartbeats_by_field( + &self, + field: &str, + field_value: &str, + subscription: Option<&str>, + ) -> Result> { + let client = self.pool.get().await?; + + let rows = if let Some(value) = subscription { + client + .query( + format!( + r#"SELECT * + FROM heartbeats + JOIN subscriptions ON subscriptions.uuid = heartbeats.subscription + WHERE {} = $1 + AND subscription = $2"#, + field + ) + .as_str(), + &[&field_value, &value], + ) + .await? + } else { + client + .query( + format!( + r#"SELECT * + FROM heartbeats + JOIN subscriptions ON subscriptions.uuid = heartbeats.subscription + WHERE {} = $1"#, + field + ) + .as_str(), + &[&field_value], + ) + .await? + }; + + let mut heartbeats = Vec::new(); + for row in rows { + heartbeats.push(row_to_heartbeat(&row)?); + } + Ok(heartbeats) + } + + pub fn pool(&self) -> &Pool { + &self.pool + } +} + +fn row_to_subscription(row: &Row) -> Result { + let outputs_str: String = row.try_get("outputs")?; + let outputs = match serde_json::from_str(&outputs_str) { + Ok(outputs) => outputs, + Err(e) => { + error!( + "Failed to parse subscription output : {}. Subscription output is {}", + e, outputs_str + ); + bail!("Failed to parse subscription output"); + } + }; + let heartbeat_interval: i32 = row.try_get("heartbeat_interval")?; + let connection_retry_count: i32 = row.try_get("connection_retry_count")?; + let connection_retry_interval: i32 = row.try_get("connection_retry_interval")?; + let max_envelope_size: i32 = row.try_get("max_envelope_size")?; + let max_time: i32 = row.try_get("max_time")?; + Ok(SubscriptionData::from( + row.try_get("uuid")?, + row.try_get("version")?, + row.try_get("name")?, + row.try_get("uri")?, + row.try_get("query")?, + heartbeat_interval.try_into()?, + connection_retry_count.try_into()?, + connection_retry_interval.try_into()?, + max_time.try_into()?, + max_envelope_size.try_into()?, + row.try_get("enabled")?, + row.try_get("read_existing_events")?, + outputs, + )) +} + +fn row_to_heartbeat(row: &Row) -> Result { + let subscription = row_to_subscription(row)?; + let heartbeat = HeartbeatData::new( + row.try_get("machine")?, + row.try_get("ip")?, + subscription, + row.try_get("first_seen")?, + row.try_get("last_seen")?, + row.try_get("last_event_seen")?, + ); + Ok(heartbeat) +} + +fn gen_heartbeats_query(size: usize, with_event: bool) -> String { + let mut query = "INSERT INTO heartbeats(machine, ip, subscription, first_seen, last_seen, last_event_seen) VALUES ".to_string(); + + for i in 0..size { + let values = if with_event { + format!( + "(${}, ${}, ${}, ${}, ${}, ${}) ", + 6 * i + 1, + 6 * i + 2, + 6 * i + 3, + 6 * i + 4, + 6 * i + 5, + 6 * i + 6 + ) + } else { + format!( + "(${}, ${}, ${}, ${}, ${}, null) ", + 5 * i + 1, + 5 * i + 2, + 5 * i + 3, + 5 * i + 4, + 5 * i + 5, + ) + }; + if i == size - 1 { + query.push_str(&values); + } else { + query.push_str(&values); + query.push_str(", "); + } + } + + if with_event { + query.push_str( + r#"ON CONFLICT (machine, subscription) DO UPDATE SET + last_seen = excluded.last_seen, + last_event_seen = excluded.last_event_seen, + ip = excluded.ip"#, + ); + } else { + query.push_str( + r#"ON CONFLICT (machine, subscription) DO UPDATE SET + last_seen = excluded.last_seen, + ip = excluded.ip"#, + ); + } + query +} + +#[async_trait] +impl Database for PostgresDatabase { + async fn get_bookmark(&self, machine: &str, subscription: &str) -> Result> { + let res = self + .pool + .get() + .await? + .query_opt( + r#"SELECT bookmark + FROM bookmarks + WHERE machine = $1 + AND subscription = $2"#, + &[&machine, &subscription], + ) + .await?; + Ok(match res { + Some(row) => Some(row.try_get("bookmark")?), + None => None, + }) + } + + async fn get_bookmarks(&self, subscription: &str) -> Result> { + let client = self.pool.get().await?; + let rows = client + .query( + r#"SELECT machine, bookmark + FROM bookmarks + WHERE subscription = $1"#, + &[&subscription], + ) + .await?; + let mut bookmarks = Vec::new(); + for row in rows { + bookmarks.push(BookmarkData { + machine: row.try_get("machine")?, + subscription: subscription.to_owned(), + bookmark: row.try_get("bookmark")?, + }); + } + + Ok(bookmarks) + } + + async fn store_bookmark( + &self, + machine: &str, + subscription: &str, + bookmark: &str, + ) -> Result<()> { + let count = self + .pool + .get() + .await? + .execute( + r#"INSERT INTO bookmarks(machine, subscription, bookmark) + VALUES ($1, $2, $3) + ON CONFLICT (machine, subscription) DO + UPDATE SET bookmark = excluded.bookmark"#, + &[&machine, &subscription, &bookmark], + ) + .await?; + + ensure!(count == 1, "Only one row must have been updated"); + + Ok(()) + } + + async fn delete_bookmarks( + &self, + machine: Option<&str>, + subscription: Option<&str>, + ) -> Result<()> { + let client = self.pool.get().await?; + match (machine, subscription) { + (Some(machine), Some(subscription)) => { + client + .execute( + "DELETE FROM bookmarks WHERE machine = $1 AND subscription = $2", + &[&machine, &subscription], + ) + .await?; + } + (Some(machine), None) => { + client + .execute("DELETE FROM bookmarks WHERE machine = $1", &[&machine]) + .await?; + } + (None, Some(subscription)) => { + client + .execute( + "DELETE FROM bookmarks WHERE subscription = $1", + &[&subscription], + ) + .await?; + } + (None, None) => { + client.execute("DELETE FROM bookmarks", &[]).await?; + } + }; + Ok(()) + } + + async fn get_heartbeats_by_machine( + &self, + machine: &str, + subscription: Option<&str>, + ) -> Result> { + self.get_heartbeats_by_field("machine", machine, subscription) + .await + } + + async fn get_heartbeats_by_ip( + &self, + ip: &str, + subscription: Option<&str>, + ) -> Result> { + self.get_heartbeats_by_field("ip", ip, subscription).await + } + + async fn get_heartbeats(&self) -> Result> { + let rows = self + .pool + .get() + .await? + .query( + r#"SELECT * + FROM heartbeats + JOIN subscriptions ON subscriptions.uuid = heartbeats.subscription"#, + &[], + ) + .await?; + let mut heartbeats = Vec::new(); + for row in rows { + heartbeats.push(row_to_heartbeat(&row)?); + } + + Ok(heartbeats) + } + + async fn get_heartbeats_by_subscription( + &self, + subscription: &str, + ) -> Result> { + let rows = self + .pool + .get() + .await? + .query( + r#"SELECT * + FROM heartbeats + JOIN subscriptions ON subscriptions.uuid = heartbeats.subscription + WHERE subscription = $1"#, + &[&subscription], + ) + .await?; + + let mut heartbeats = Vec::new(); + for row in rows { + heartbeats.push(row_to_heartbeat(&row)?); + } + + Ok(heartbeats) + } + + async fn store_heartbeat( + &self, + machine: &str, + ip: String, + subscription: &str, + is_event: bool, + ) -> Result<()> { + let now: i64 = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH)? + .as_secs() + .try_into()?; + + let query = if is_event { + r#"INSERT INTO heartbeats(machine, ip, subscription, first_seen, last_seen, last_event_seen) + VALUES ($1, $2, $3, $4, $4, $4) + ON CONFLICT (machine, subscription) DO + UPDATE SET last_seen = excluded.last_seen, + last_event_seen = excluded.last_event_seen"# + } else { + r#"INSERT INTO heartbeats(machine, ip, subscription, first_seen, last_seen, last_event_seen) + VALUES ($1, $2, $3, $4, $4, null) + ON CONFLICT (machine, subscription) DO + UPDATE SET last_seen = excluded.last_seen"# + }; + let count = self + .pool + .get() + .await? + .execute(query, &[&machine, &ip, &subscription, &now]) + .await?; + + ensure!(count == 1, "Only one row must have been updated"); + Ok(()) + } + + async fn store_heartbeats(&self, heartbeats: &HeartbeatsCache) -> Result<()> { + let client = self.pool.get().await?; + + let mut with_event: Vec<(HeartbeatKey, PostgresHeartbeatEventValue)> = Vec::new(); + let mut without_event: Vec<(HeartbeatKey, PostgresHeartbeatValue)> = Vec::new(); + for (key, value) in heartbeats { + let last_seen: i64 = value.last_seen.try_into()?; + match value.last_event_seen { + Some(last_event_seen) => { + let last_event_seen: i64 = last_event_seen.try_into()?; + with_event.push(( + key.clone(), + PostgresHeartbeatEventValue { + last_seen, + ip: value.ip.clone(), + last_event_seen, + }, + )) + } + None => { + without_event.push(( + key.clone(), + PostgresHeartbeatValue { + last_seen, + ip: value.ip.clone(), + }, + )); + } + } + } + + for chunk in with_event.chunks(self.max_chunk_size) { + let query = gen_heartbeats_query(chunk.len(), true); + let mut params: Vec<&(dyn ToSql + Sync)> = Vec::new(); + for (key, value) in chunk { + params.push(&key.machine); + params.push(&value.ip); + params.push(&key.subscription); + params.push(&value.last_seen); + params.push(&value.last_seen); + params.push(&value.last_event_seen); + } + client.execute(&query, ¶ms[..]).await?; + } + + for chunk in without_event.chunks(self.max_chunk_size) { + let query = gen_heartbeats_query(chunk.len(), false); + let mut params: Vec<&(dyn ToSql + Sync)> = Vec::new(); + for (key, value) in chunk { + params.push(&key.machine); + params.push(&value.ip); + params.push(&key.subscription); + params.push(&value.last_seen); + params.push(&value.last_seen); + } + client.execute(&query, ¶ms[..]).await?; + } + + Ok(()) + } + + async fn get_subscriptions(&self) -> Result> { + let rows = self + .pool + .get() + .await? + .query( + r#" + SELECT * + FROM subscriptions + "#, + &[], + ) + .await?; + + let mut subscriptions = Vec::new(); + for row in rows { + subscriptions.push(row_to_subscription(&row)?) + } + + Ok(subscriptions) + } + + async fn get_subscription(&self, version: &str) -> Result> { + self.get_subscription_by_field("version", version).await + } + + async fn get_subscription_by_identifier( + &self, + identifier: &str, + ) -> Result> { + let res = self + .pool + .get() + .await? + .query_opt( + r#"SELECT * + FROM subscriptions + WHERE uuid = $1 OR name = $1"#, + &[&identifier], + ) + .await?; + + Ok(match res { + Some(row) => Some(row_to_subscription(&row)?), + None => None, + }) + } + + async fn store_subscription(&self, subscription: SubscriptionData) -> Result<()> { + let heartbeat_interval: i32 = subscription.heartbeat_interval().try_into()?; + let connection_retry_count: i32 = subscription.connection_retry_count().try_into()?; + let connection_retry_interval: i32 = subscription.connection_retry_interval().try_into()?; + let max_time: i32 = subscription.max_time().try_into()?; + let max_envelope_size: i32 = subscription.max_envelope_size().try_into()?; + let count = self + .pool + .get() + .await? + .execute( + r#"INSERT INTO subscriptions (uuid, version, name, uri, query, + heartbeat_interval, connection_retry_count, connection_retry_interval, + max_time, max_envelope_size, enabled, read_existing_events, outputs) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) + ON CONFLICT (uuid) DO UPDATE SET + version = excluded.version, + name = excluded.name, + uri = excluded.uri, + query = excluded.query, + heartbeat_interval = excluded.heartbeat_interval, + connection_retry_count = excluded.connection_retry_count, + connection_retry_interval = excluded.connection_retry_interval, + max_time = excluded.max_time, + max_envelope_size = excluded.max_envelope_size, + enabled = excluded.enabled, + read_existing_events = excluded.read_existing_events, + outputs = excluded.outputs"#, + &[ + &subscription.uuid(), + &subscription.version(), + &subscription.name(), + &subscription.uri(), + &subscription.query(), + &heartbeat_interval, + &connection_retry_count, + &connection_retry_interval, + &max_time, + &max_envelope_size, + &subscription.enabled(), + &subscription.read_existing_events(), + &serde_json::to_string(subscription.outputs())?.as_str(), + ], + ) + .await?; + + ensure!(count == 1, "Only one row must have been updated"); + + Ok(()) + } + async fn delete_subscription(&self, uuid: &str) -> Result<()> { + let count = self + .pool + .get() + .await? + .execute(r#"DELETE FROM subscriptions WHERE uuid = $1"#, &[&uuid]) + .await?; + + ensure!(count == 1, "Only one row must have been deleted"); + + Ok(()) + } + /// Create the tables required to keep track of schema state. If the tables already + /// exist, this function has no operation. + async fn setup_schema(&self) -> Result<()> { + let query = format!( + "CREATE TABLE IF NOT EXISTS {} (version BIGINT PRIMARY KEY);", + MIGRATION_TABLE_NAME, + ); + self.pool.get().await?.execute(query.as_str(), &[]).await?; + Ok(()) + } + + async fn current_version(&self) -> Result> { + let query = format!( + "SELECT version FROM {} ORDER BY version DESC LIMIT 1;", + MIGRATION_TABLE_NAME + ); + let conn = self.pool.get().await?; + let row = conn.query_opt(query.as_str(), &[]).await?; + let res = row.map(|r| r.get(0)); + + Ok(res) + } + + async fn migrated_versions(&self) -> Result> { + let query = format!("SELECT version FROM {};", MIGRATION_TABLE_NAME); + let conn = self.pool.get().await?; + let row = conn + .query(query.as_str(), &[]) + .await + .with_context(|| format!("Failed to execute query: \"{}\"", query))?; + Ok(row.iter().map(|r| r.get(0)).collect()) + } + + async fn apply_migration(&self, version: Version) -> Result<()> { + let migration = self + .migrations + .get(&version) + .ok_or_else(|| anyhow!("Could not retrieve migration with version {}", version))?; + let mut conn = self.pool.get().await?; + let mut inner_tx = conn.transaction().await?; + migration.up(&mut inner_tx).await?; + let query = format!( + "INSERT INTO {} (version) VALUES ($1);", + MIGRATION_TABLE_NAME + ); + let _count = inner_tx + .execute(query.as_str(), &[&migration.version()]) + .await?; + inner_tx.commit().await?; + Ok(()) + } + + async fn revert_migration(&self, version: Version) -> Result<()> { + let migration = self + .migrations + .get(&version) + .ok_or_else(|| anyhow!("Could not retrieve migration with version {}", version))?; + let mut conn = self.pool.get().await?; + let mut inner_tx = conn.transaction().await?; + migration.down(&mut inner_tx).await?; + let query = format!("DELETE FROM {} WHERE version = $1;", MIGRATION_TABLE_NAME); + let _count = inner_tx + .execute(query.as_str(), &[&migration.version()]) + .await?; + inner_tx.commit().await?; + Ok(()) + } + + async fn migrations(&self) -> BTreeMap> { + // TODO: Remove copy/paste between db backends + let mut base_migrations = BTreeMap::new(); + for (version, migration) in self.migrations.iter() { + base_migrations.insert(*version, migration.to_base()); + } + base_migrations + } + + async fn get_stats( + &self, + subscription: &str, + start_time: i64, + ) -> Result { + let client = self.pool.get().await?; + let total_machines_count = { + let query = "SELECT COUNT(machine) FROM heartbeats WHERE subscription = $1"; + client + .query_one(query, &[&subscription]) + .await + .with_context(|| format!("Failed to execute query: \"{}\"", query))? + .get(0) + }; + let alive_machines_count = { + let query = + "SELECT COUNT(machine) FROM heartbeats WHERE subscription = $1 AND last_seen > $2 AND (last_event_seen IS NULL OR last_event_seen <= $2)"; + client + .query_one(query, &[&subscription, &start_time]) + .await + .with_context(|| format!("Failed to execute query: \"{}\"", query))? + .get(0) + }; + let active_machines_count = { + let query = + "SELECT COUNT(machine) FROM heartbeats WHERE subscription = $1 AND last_event_seen > $2"; + client + .query_one(query, &[&subscription, &start_time]) + .await + .with_context(|| format!("Failed to execute query: \"{}\"", query))? + .get(0) + }; + let dead_machines_count = { + let query = + "SELECT COUNT(machine) FROM heartbeats WHERE subscription = $1 AND last_seen <= $2 AND (last_event_seen IS NULL OR last_event_seen <= $2)"; + client + .query_one(query, &[&subscription, &start_time]) + .await + .with_context(|| format!("Failed to execute query: \"{}\"", query))? + .get(0) + }; + Ok(SubscriptionStatsCounters::new( + total_machines_count, + alive_machines_count, + active_machines_count, + dead_machines_count, + )) + } + + async fn get_machines( + &self, + subscription: &str, + start_time: i64, + state: Option, + ) -> Result> { + let client = self.pool.get().await?; + + let rows = match state { + None => { + let query = "SELECT machine, ip FROM heartbeats WHERE subscription = $1"; + client + .query(query, &[&subscription]) + .await + .with_context(|| format!("Failed to execute query: \"{}\"", query))? + } + Some(SubscriptionMachineState::Alive) => { + let query = + "SELECT machine, ip FROM heartbeats WHERE subscription = $1 AND (last_event_seen IS NULL OR last_event_seen <= $2) AND last_seen > $2"; + client + .query(query, &[&subscription, &start_time]) + .await + .with_context(|| format!("Failed to execute query: \"{}\"", query))? + } + Some(SubscriptionMachineState::Active) => { + let query = + "SELECT machine, ip FROM heartbeats WHERE subscription = $1 AND last_event_seen > $2"; + client + .query(query, &[&subscription, &start_time]) + .await + .with_context(|| format!("Failed to execute query: \"{}\"", query))? + } + Some(SubscriptionMachineState::Dead) => { + let query = + "SELECT machine, ip FROM heartbeats WHERE subscription = $1 AND (last_event_seen IS NULL OR last_event_seen <= $2) AND last_seen <= $2"; + client + .query(query, &[&subscription, &start_time]) + .await + .with_context(|| format!("Failed to execute query: \"{}\"", query))? + } + }; + + let mut result = Vec::new(); + for row in rows { + result.push(SubscriptionMachine::new( + row.try_get("machine")?, + row.try_get("ip")?, + )) + } + Ok(result) + } +} + +#[cfg(test)] +pub mod tests { + + use serial_test::serial; + + use crate::database::schema::{self, Migrator}; + use crate::migration; + + use super::*; + use std::env; + use std::str::FromStr; + + async fn drop_migrations_table(db: &PostgresDatabase) -> Result<()> { + db.pool + .get() + .await? + .execute( + format!("DROP TABLE IF EXISTS {};", MIGRATION_TABLE_NAME).as_str(), + &[], + ) + .await?; + Ok(()) + } + + async fn db_with_migrations() -> Result> { + let mut db = PostgresDatabase::new(&get_config()) + .await + .expect("Could not connect to database"); + schema::postgres::register_migrations(&mut db); + drop_migrations_table(&db).await?; + Ok(Arc::new(db)) + } + + pub fn get_config() -> Postgres { + let host = env::var("POSTGRES_HOST") + .expect("$POSTGRES_HOST is not set") + .to_owned(); + let port = u16::from_str( + env::var("POSTGRES_PORT") + .expect("$POSTGRES_PORT is not set") + .to_owned() + .as_str(), + ) + .expect("Could not convert port string to u16"); + let user = env::var("POSTGRES_USER") + .expect("$POSTGRES_USER is not set") + .to_owned(); + let password = env::var("POSTGRES_PASSWORD") + .expect("$POSTGRES_PASSWORD is not set") + .to_owned(); + let dbname = env::var("POSTGRES_DBNAME") + .expect("$POSTGRES_DBNAME is not set") + .to_owned(); + let ssl_mode = PostgresSslMode::Disable; + let ca_file = None; + + Postgres::new( + &host, + port, + &dbname, + &user, + &password, + ssl_mode, + ca_file, + Some(50), + ) + } + + #[tokio::test] + #[serial] + async fn test_open_and_close() -> Result<()> { + PostgresDatabase::new(&get_config()) + .await + .expect("Could not connect to database"); + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_bookmarks() -> Result<()> { + crate::database::tests::test_bookmarks(db_with_migrations().await?).await?; + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_heartbeats() -> Result<()> { + crate::database::tests::test_heartbeats(db_with_migrations().await?).await?; + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_heartbeats_cache() -> Result<()> { + crate::database::tests::test_heartbeats_cache(db_with_migrations().await?).await?; + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_subscriptions() -> Result<()> { + crate::database::tests::test_subscriptions(db_with_migrations().await?).await?; + Ok(()) + } + + struct CreateUsers; + migration!(CreateUsers, 1, "create users table"); + + #[async_trait] + impl PostgresMigration for CreateUsers { + async fn up(&self, tx: &mut Transaction) -> Result<()> { + tx.execute("CREATE TABLE users (id BIGINT PRIMARY KEY);", &[]) + .await?; + Ok(()) + } + + async fn down(&self, tx: &mut Transaction) -> Result<()> { + tx.execute("DROP TABLE users;", &[]).await?; + Ok(()) + } + } + + #[tokio::test] + #[serial] + async fn test_register() -> Result<()> { + let mut db = PostgresDatabase::new(&get_config()) + .await + .expect("Could not connect to database"); + + drop_migrations_table(&db).await?; + db.register_migration(Arc::new(CreateUsers)); + + db.setup_schema().await.expect("Could not setup schema"); + + let db_arc = Arc::new(db); + + let migrator = Migrator::new(db_arc.clone()); + + migrator.up(None, false).await.unwrap(); + + assert_eq!(db_arc.current_version().await.unwrap(), Some(1)); + + migrator.down(None, false).await.unwrap(); + + assert_eq!(db_arc.current_version().await.unwrap(), None); + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_stats() -> Result<()> { + crate::database::tests::test_stats_and_machines(db_with_migrations().await?).await?; + Ok(()) + } +} diff --git a/common/src/database/schema/mod.rs b/common/src/database/schema/mod.rs new file mode 100644 index 0000000..dcee775 --- /dev/null +++ b/common/src/database/schema/mod.rs @@ -0,0 +1,195 @@ +// A lot of the following code comes from +// https://github.com/SkylerLipthay/schemamama. It was not used directly has +// some parts needed to be modify to be integrated to OpenWEC. As stated by its +// license (MIT), we include below its copyright notice and permission notice: +// +// The MIT License (MIT) +// +// Copyright (c) 2015 Skyler Lipthay +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// +use anyhow::Result; +use log::info; +use std::{collections::BTreeSet, sync::Arc}; + +use super::Database; + +pub mod postgres; +pub mod sqlite; + +/// The version type alias used to uniquely reference migrations. +pub type Version = i64; + +/// All migrations will implement this trait, and a migration trait specific to the chosen adapter. +/// This trait defines the metadata for tracking migration sequence and for human reference. +pub trait Migration { + /// An ordered (but not necessarily sequential), unique identifier for this migration. + /// Registered migrations will be applied in ascending order by version. + fn version(&self) -> Version; + + /// A message describing the effects of this migration. + fn description(&self) -> String; +} + +pub struct MigrationBase { + version: Version, + description: String, +} +impl MigrationBase { + pub fn new(version: Version, description: String) -> Self { + MigrationBase { + version, + description, + } + } +} +impl Migration for MigrationBase { + fn version(&self) -> Version { + self.version + } + + fn description(&self) -> String { + self.description.clone() + } +} + +#[macro_export] +macro_rules! migration { + ($ty:ident, $version:expr, $description:expr) => { + impl $crate::database::schema::Migration for $ty { + fn version(&self) -> $crate::database::schema::Version { + $version + } + fn description(&self) -> String { + $description.into() + } + } + }; +} + +/// Maintains an ordered collection of migrations to utilize. +pub struct Migrator { + db: Arc, +} + +impl Migrator { + /// Create a migrator with a given adapter. + pub fn new(db: Arc) -> Self { + Migrator { db } + } + + /// Rollback to the specified version (exclusive), or rollback to the state before any + /// registered migrations were applied if `None` is specified. + pub async fn down(&self, to: Option, no_op: bool) -> Result> { + let mut rollbacked_migrations = BTreeSet::new(); + let from = self.db.current_version().await?; + if from.is_none() { + return Ok(rollbacked_migrations); + } + + let migrated_versions = self.db.migrated_versions().await?; + let migrations = self.db.migrations().await; + let targets = migrations + .iter() + // Rollback migrations from latest to oldest: + .rev() + // Rollback the current version, and all versions downwards until the specified version + // (exclusive): + .filter(|&(&v, _)| within_range(v, to, from)) + // Rollback only the migrations that are actually already migrated (in the case that + // some intermediary migrations were never executed). + .filter(|&(v, _)| migrated_versions.contains(v)); + + for (version, migration) in targets { + let description = migration.description(); + rollbacked_migrations.insert(*version); + if !no_op { + info!("Reverting migration {:?}: {}", version, description); + self.db.revert_migration(*version).await?; + } + } + + Ok(rollbacked_migrations) + } + + /// Migrate to the specified version (inclusive). + pub async fn up(&self, to: Option, no_op: bool) -> Result> { + let migrated_versions = self.db.migrated_versions().await?; + let migrations = self.db.migrations().await; + let targets = migrations + .iter() + // Execute all versions upwards until the specified version (inclusive): + .filter(|&(&v, _)| within_range(v, None, to)) + // Execute only the migrations that are actually not already migrated (in the case that + // some intermediary migrations were previously executed). + .filter(|&(v, _)| !migrated_versions.contains(v)); + + let mut applied_migrations = BTreeSet::new(); + for (version, migration) in targets { + let description = migration.description(); + applied_migrations.insert(*version); + if !no_op { + info!("Applying migration {:?}: {}", version, description); + self.db.apply_migration(*version).await? + } + } + + Ok(applied_migrations) + } +} + +// Tests whether a `Version` is within a range defined by the exclusive `low` and the inclusive +// `high` bounds. +fn within_range(version: Version, low: Option, high: Option) -> bool { + match (low, high) { + (None, None) => true, + (Some(low), None) => version > low, + (None, Some(high)) => version <= high, + (Some(low), Some(high)) => version > low && version <= high, + } +} + +#[test] +fn test_within_range() { + // no lower or upper bound + assert!(within_range(0, None, None)); + assert!(within_range(42, None, None)); + assert!(within_range(100000, None, None)); + + // both lower and upper bounds + assert!(!within_range(1, Some(2), Some(5))); + assert!(!within_range(2, Some(2), Some(5))); + assert!(within_range(3, Some(2), Some(5))); + assert!(within_range(5, Some(2), Some(5))); + assert!(!within_range(6, Some(2), Some(5))); + + // lower bound only + assert!(!within_range(0, Some(5), None)); + assert!(!within_range(4, Some(5), None)); + assert!(!within_range(5, Some(5), None)); + assert!(within_range(6, Some(5), None)); + assert!(within_range(60, Some(5), None)); + + // upper bound only + assert!(within_range(0, None, Some(5))); + assert!(within_range(5, None, Some(5))); + assert!(!within_range(6, None, Some(5))); +} diff --git a/common/src/database/schema/postgres/_001_create_subscriptions_table.rs b/common/src/database/schema/postgres/_001_create_subscriptions_table.rs new file mode 100644 index 0000000..ddf2a76 --- /dev/null +++ b/common/src/database/schema/postgres/_001_create_subscriptions_table.rs @@ -0,0 +1,40 @@ +use anyhow::Result; +use async_trait::async_trait; +use deadpool_postgres::Transaction; + +use crate::{database::postgres::PostgresMigration, migration}; + +pub(super) struct CreateSubscriptionsTable; +migration!(CreateSubscriptionsTable, 1, "create subscriptions table"); + +#[async_trait] +impl PostgresMigration for CreateSubscriptionsTable { + async fn up(&self, tx: &mut Transaction) -> Result<()> { + tx.execute( + "CREATE TABLE IF NOT EXISTS subscriptions ( + uuid TEXT NOT NULL UNIQUE, + version TEXT NOT NULL UNIQUE, + name TEXT NOT NULL UNIQUE, + query TEXT NOT NULL, + heartbeat_interval INT4, + connection_retry_count INT4, + connection_retry_interval INT4, + max_time INT4, + max_envelope_size INT4, + enabled BOOLEAN, + read_existing_events BOOLEAN, + outputs TEXT NOT NULL, + PRIMARY KEY (uuid) + );", + &[], + ) + .await?; + Ok(()) + } + + async fn down(&self, tx: &mut Transaction) -> Result<()> { + tx.execute("DROP TABLE IF EXISTS subscriptions;", &[]) + .await?; + Ok(()) + } +} diff --git a/common/src/database/schema/postgres/_002_create_bookmarks_table.rs b/common/src/database/schema/postgres/_002_create_bookmarks_table.rs new file mode 100644 index 0000000..3a206d3 --- /dev/null +++ b/common/src/database/schema/postgres/_002_create_bookmarks_table.rs @@ -0,0 +1,35 @@ +use anyhow::Result; +use async_trait::async_trait; +use deadpool_postgres::Transaction; + +use crate::{database::postgres::PostgresMigration, migration}; + +pub(super) struct CreateBookmarksTable; +migration!(CreateBookmarksTable, 2, "create bookmarks table"); + +#[async_trait] +impl PostgresMigration for CreateBookmarksTable { + async fn up(&self, tx: &mut Transaction) -> Result<()> { + tx.execute( + "CREATE TABLE IF NOT EXISTS bookmarks ( + machine TEXT NOT NULL, + subscription TEXT NOT NULL, + bookmark TEXT NOT NULL, + PRIMARY KEY (machine, subscription), + CONSTRAINT fk_subscription + FOREIGN KEY (subscription) + REFERENCES subscriptions(uuid) + ON UPDATE CASCADE + ON DELETE CASCADE + );", + &[], + ) + .await?; + Ok(()) + } + + async fn down(&self, tx: &mut Transaction) -> Result<()> { + tx.execute("DROP TABLE IF EXISTS bookmarks;", &[]).await?; + Ok(()) + } +} diff --git a/common/src/database/schema/postgres/_003_create_heartbeats_table.rs b/common/src/database/schema/postgres/_003_create_heartbeats_table.rs new file mode 100644 index 0000000..02b3ddb --- /dev/null +++ b/common/src/database/schema/postgres/_003_create_heartbeats_table.rs @@ -0,0 +1,37 @@ +use anyhow::Result; +use async_trait::async_trait; +use deadpool_postgres::Transaction; + +use crate::{database::postgres::PostgresMigration, migration}; + +pub(super) struct CreateHeartbeatsTable; +migration!(CreateHeartbeatsTable, 3, "create heartbeats table"); + +#[async_trait] +impl PostgresMigration for CreateHeartbeatsTable { + async fn up(&self, tx: &mut Transaction) -> Result<()> { + tx.execute( + "CREATE TABLE IF NOT EXISTS heartbeats ( + machine TEXT NOT NULL, + ip TEXT NOT NULL, + subscription TEXT NOT NULL, + first_seen BIGINT, + last_seen BIGINT, + PRIMARY KEY (machine, subscription), + CONSTRAINT fk_subscription + FOREIGN KEY (subscription) + REFERENCES subscriptions(uuid) + ON UPDATE CASCADE + ON DELETE CASCADE + );", + &[], + ) + .await?; + Ok(()) + } + + async fn down(&self, tx: &mut Transaction) -> Result<()> { + tx.execute("DROP TABLE IF EXISTS heartbeats;", &[]).await?; + Ok(()) + } +} diff --git a/common/src/database/schema/postgres/_004_add_last_event_seen_field_in_heartbeats_table.rs b/common/src/database/schema/postgres/_004_add_last_event_seen_field_in_heartbeats_table.rs new file mode 100644 index 0000000..b73df3f --- /dev/null +++ b/common/src/database/schema/postgres/_004_add_last_event_seen_field_in_heartbeats_table.rs @@ -0,0 +1,33 @@ +use anyhow::Result; +use async_trait::async_trait; +use deadpool_postgres::Transaction; + +use crate::{database::postgres::PostgresMigration, migration}; + +pub(super) struct AddLastEventSeenFieldInHeartbeatsTable; +migration!( + AddLastEventSeenFieldInHeartbeatsTable, + 4, + "add last_event_seen field in heartbeats table" +); + +#[async_trait] +impl PostgresMigration for AddLastEventSeenFieldInHeartbeatsTable { + async fn up(&self, tx: &mut Transaction) -> Result<()> { + tx.execute( + "ALTER TABLE heartbeats ADD COLUMN IF NOT EXISTS last_event_seen BIGINT;", + &[], + ) + .await?; + Ok(()) + } + + async fn down(&self, tx: &mut Transaction) -> Result<()> { + tx.execute( + "ALTER TABLE heartbeats DROP COLUMN IF EXISTS last_event_seen", + &[], + ) + .await?; + Ok(()) + } +} diff --git a/common/src/database/schema/postgres/_005_add_uri_field_in_subscriptions_table.rs b/common/src/database/schema/postgres/_005_add_uri_field_in_subscriptions_table.rs new file mode 100644 index 0000000..d057014 --- /dev/null +++ b/common/src/database/schema/postgres/_005_add_uri_field_in_subscriptions_table.rs @@ -0,0 +1,30 @@ +use anyhow::Result; +use async_trait::async_trait; +use deadpool_postgres::Transaction; + +use crate::{database::postgres::PostgresMigration, migration}; + +pub(super) struct AddUriFieldInSubscriptionsTable; +migration!( + AddUriFieldInSubscriptionsTable, + 5, + "add uri field in subscriptions table" +); + +#[async_trait] +impl PostgresMigration for AddUriFieldInSubscriptionsTable { + async fn up(&self, tx: &mut Transaction) -> Result<()> { + tx.execute( + "ALTER TABLE subscriptions ADD COLUMN IF NOT EXISTS uri TEXT;", + &[], + ) + .await?; + Ok(()) + } + + async fn down(&self, tx: &mut Transaction) -> Result<()> { + tx.execute("ALTER TABLE subscriptions DROP COLUMN IF EXISTS uri", &[]) + .await?; + Ok(()) + } +} diff --git a/common/src/database/schema/postgres/mod.rs b/common/src/database/schema/postgres/mod.rs new file mode 100644 index 0000000..02bed3a --- /dev/null +++ b/common/src/database/schema/postgres/mod.rs @@ -0,0 +1,25 @@ +use std::sync::Arc; + +use crate::database::postgres::PostgresDatabase; + +use self::{ + _001_create_subscriptions_table::CreateSubscriptionsTable, + _002_create_bookmarks_table::CreateBookmarksTable, + _003_create_heartbeats_table::CreateHeartbeatsTable, + _004_add_last_event_seen_field_in_heartbeats_table::AddLastEventSeenFieldInHeartbeatsTable, + _005_add_uri_field_in_subscriptions_table::AddUriFieldInSubscriptionsTable, +}; + +mod _001_create_subscriptions_table; +mod _002_create_bookmarks_table; +mod _003_create_heartbeats_table; +mod _004_add_last_event_seen_field_in_heartbeats_table; +mod _005_add_uri_field_in_subscriptions_table; + +pub fn register_migrations(postgres_db: &mut PostgresDatabase) { + postgres_db.register_migration(Arc::new(CreateSubscriptionsTable)); + postgres_db.register_migration(Arc::new(CreateBookmarksTable)); + postgres_db.register_migration(Arc::new(CreateHeartbeatsTable)); + postgres_db.register_migration(Arc::new(AddLastEventSeenFieldInHeartbeatsTable)); + postgres_db.register_migration(Arc::new(AddUriFieldInSubscriptionsTable)); +} diff --git a/common/src/database/schema/sqlite/_001_create_subscriptions_table.rs b/common/src/database/schema/sqlite/_001_create_subscriptions_table.rs new file mode 100644 index 0000000..735ab83 --- /dev/null +++ b/common/src/database/schema/sqlite/_001_create_subscriptions_table.rs @@ -0,0 +1,39 @@ +use anyhow::{anyhow, Result}; +use rusqlite::Connection; + +use crate::database::sqlite::SQLiteMigration; +use crate::migration; + +pub(super) struct CreateSubscriptionsTable; +migration!(CreateSubscriptionsTable, 1, "create subscriptions table"); + +impl SQLiteMigration for CreateSubscriptionsTable { + fn up(&self, conn: &Connection) -> Result<()> { + conn.execute( + "CREATE TABLE IF NOT EXISTS subscriptions ( + uuid TEXT NOT NULL UNIQUE, + version TEXT NOT NULL UNIQUE, + name TEXT NOT NULL UNIQUE, + query TEXT NOT NULL, + heartbeat_interval INTEGER, + connection_retry_count INTEGER, + connection_retry_interval INTEGER, + max_time INTEGER, + max_envelope_size INTEGER, + enabled INTEGER, + read_existing_events INTEGER, + outputs TEXT NOT NULL, + PRIMARY KEY (uuid) + )", + [], + ) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + Ok(()) + } + + fn down(&self, conn: &Connection) -> Result<()> { + conn.execute("DROP TABLE subscriptions;", []) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + Ok(()) + } +} diff --git a/common/src/database/schema/sqlite/_002_create_bookmarks_table.rs b/common/src/database/schema/sqlite/_002_create_bookmarks_table.rs new file mode 100644 index 0000000..e17d291 --- /dev/null +++ b/common/src/database/schema/sqlite/_002_create_bookmarks_table.rs @@ -0,0 +1,31 @@ +use anyhow::{anyhow, Result}; +use rusqlite::Connection; + +use crate::database::sqlite::SQLiteMigration; +use crate::migration; + +pub(super) struct CreateBookmarksTable; +migration!(CreateBookmarksTable, 2, "create bookmarks table"); + +impl SQLiteMigration for CreateBookmarksTable { + fn up(&self, conn: &Connection) -> Result<()> { + conn.execute( + "CREATE TABLE IF NOT EXISTS bookmarks ( + machine TEXT NOT NULL, + subscription TEXT NOT NULL + REFERENCES subscriptions(uuid) ON UPDATE CASCADE ON DELETE CASCADE, + bookmark TEXT NOT NULL, + PRIMARY KEY (machine, subscription) + )", + [], + ) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + Ok(()) + } + + fn down(&self, conn: &Connection) -> Result<()> { + conn.execute("DROP TABLE bookmarks;", []) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + Ok(()) + } +} diff --git a/common/src/database/schema/sqlite/_003_create_heartbeats_table.rs b/common/src/database/schema/sqlite/_003_create_heartbeats_table.rs new file mode 100644 index 0000000..b6a046c --- /dev/null +++ b/common/src/database/schema/sqlite/_003_create_heartbeats_table.rs @@ -0,0 +1,33 @@ +use anyhow::{anyhow, Result}; +use rusqlite::Connection; + +use crate::database::sqlite::SQLiteMigration; +use crate::migration; + +pub(super) struct CreateHeartbeatsTable; +migration!(CreateHeartbeatsTable, 3, "create heartbeats table"); + +impl SQLiteMigration for CreateHeartbeatsTable { + fn up(&self, conn: &Connection) -> Result<()> { + conn.execute( + "CREATE TABLE IF NOT EXISTS heartbeats ( + machine TEXT NOT NULL, + ip TEXT NOT NULL, + subscription TEXT NOT NULL + REFERENCES subscriptions(uuid) ON UPDATE CASCADE ON DELETE CASCADE, + first_seen INTEGER, + last_seen INTEGER, + PRIMARY KEY (machine, subscription) + )", + [], + ) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + Ok(()) + } + + fn down(&self, conn: &Connection) -> Result<()> { + conn.execute("DROP TABLE heartbeats;", []) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + Ok(()) + } +} diff --git a/common/src/database/schema/sqlite/_004_add_last_event_seen_field_in_heartbeats_table.rs b/common/src/database/schema/sqlite/_004_add_last_event_seen_field_in_heartbeats_table.rs new file mode 100644 index 0000000..e37eafe --- /dev/null +++ b/common/src/database/schema/sqlite/_004_add_last_event_seen_field_in_heartbeats_table.rs @@ -0,0 +1,29 @@ +use anyhow::{anyhow, Result}; +use rusqlite::Connection; + +use crate::database::sqlite::SQLiteMigration; +use crate::migration; + +pub(super) struct AddLastEventSeenFieldInHeartbeatsTable; +migration!( + AddLastEventSeenFieldInHeartbeatsTable, + 4, + "add last_event_seen field in heartbeats table" +); + +impl SQLiteMigration for AddLastEventSeenFieldInHeartbeatsTable { + fn up(&self, conn: &Connection) -> Result<()> { + conn.execute( + "ALTER TABLE heartbeats ADD COLUMN last_event_seen INTEGER", + [], + ) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + Ok(()) + } + + fn down(&self, conn: &Connection) -> Result<()> { + conn.execute("ALTER TABLE heartbeats DROP COLUMN last_event_seen", []) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + Ok(()) + } +} diff --git a/common/src/database/schema/sqlite/_005_add_uri_field_in_subscriptions_table.rs b/common/src/database/schema/sqlite/_005_add_uri_field_in_subscriptions_table.rs new file mode 100644 index 0000000..d1162b0 --- /dev/null +++ b/common/src/database/schema/sqlite/_005_add_uri_field_in_subscriptions_table.rs @@ -0,0 +1,26 @@ +use anyhow::{anyhow, Result}; +use rusqlite::Connection; + +use crate::database::sqlite::SQLiteMigration; +use crate::migration; + +pub(super) struct AddUriFieldInSubscriptionsTable; +migration!( + AddUriFieldInSubscriptionsTable, + 5, + "add uri field in subscriptions table" +); + +impl SQLiteMigration for AddUriFieldInSubscriptionsTable { + fn up(&self, conn: &Connection) -> Result<()> { + conn.execute("ALTER TABLE subscriptions ADD COLUMN uri TEXT", []) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + Ok(()) + } + + fn down(&self, conn: &Connection) -> Result<()> { + conn.execute("ALTER TABLE subscriptions DROP COLUMN uri", []) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + Ok(()) + } +} diff --git a/common/src/database/schema/sqlite/mod.rs b/common/src/database/schema/sqlite/mod.rs new file mode 100644 index 0000000..cf48f8f --- /dev/null +++ b/common/src/database/schema/sqlite/mod.rs @@ -0,0 +1,25 @@ +use std::sync::Arc; + +use crate::database::sqlite::SQLiteDatabase; + +use self::{ + _001_create_subscriptions_table::CreateSubscriptionsTable, + _002_create_bookmarks_table::CreateBookmarksTable, + _003_create_heartbeats_table::CreateHeartbeatsTable, + _004_add_last_event_seen_field_in_heartbeats_table::AddLastEventSeenFieldInHeartbeatsTable, + _005_add_uri_field_in_subscriptions_table::AddUriFieldInSubscriptionsTable, +}; + +mod _001_create_subscriptions_table; +mod _002_create_bookmarks_table; +mod _003_create_heartbeats_table; +mod _004_add_last_event_seen_field_in_heartbeats_table; +mod _005_add_uri_field_in_subscriptions_table; + +pub fn register_migrations(sqlite_db: &mut SQLiteDatabase) { + sqlite_db.register_migration(Arc::new(CreateSubscriptionsTable)); + sqlite_db.register_migration(Arc::new(CreateBookmarksTable)); + sqlite_db.register_migration(Arc::new(CreateHeartbeatsTable)); + sqlite_db.register_migration(Arc::new(AddLastEventSeenFieldInHeartbeatsTable)); + sqlite_db.register_migration(Arc::new(AddUriFieldInSubscriptionsTable)); +} diff --git a/common/src/database/sqlite.rs b/common/src/database/sqlite.rs new file mode 100644 index 0000000..dcc0250 --- /dev/null +++ b/common/src/database/sqlite.rs @@ -0,0 +1,1018 @@ +// Some of the following code is inspired from +// https://github.com/SkylerLipthay/schemamama_postgres. As stated by its +// license (MIT), we include below its copyright notice and permission notice: +// +// The MIT License (MIT) +// +// Copyright (c) 2015 Skyler Lipthay +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// +use anyhow::{anyhow, ensure, Context, Error, Result}; +use async_trait::async_trait; +use deadpool_sqlite::{Config, Pool, Runtime}; +use log::{error, warn}; +use rusqlite::types::Type; +use rusqlite::{named_params, params, Connection, OptionalExtension, Row}; +use std::collections::btree_map::Entry::Vacant; +use std::collections::{BTreeMap, BTreeSet}; +use std::sync::Arc; +use std::time::SystemTime; + +use crate::bookmark::BookmarkData; +use crate::database::Database; +use crate::heartbeat::{HeartbeatData, HeartbeatsCache}; +use crate::subscription::{ + SubscriptionData, SubscriptionMachine, SubscriptionMachineState, SubscriptionStatsCounters, +}; + +use super::schema::{Migration, MigrationBase, Version}; + +const MIGRATION_TABLE_NAME: &str = "__schema_migrations"; +/// A migration to be used within a Sqlite connection. +pub trait SQLiteMigration: Migration { + /// Called when this migration is to be executed. + fn up(&self, conn: &Connection) -> Result<()>; + + /// Called when this migration is to be reversed. + fn down(&self, conn: &Connection) -> Result<()>; + + fn to_base(&self) -> Arc { + Arc::new(MigrationBase::new(self.version(), self.description())) + } +} + +pub struct SQLiteDatabase { + pool: Pool, + migrations: BTreeMap>, +} + +impl SQLiteDatabase { + pub async fn new(path: &str) -> Result { + let config = Config::new(path); + let pool = config.create_pool(Runtime::Tokio1)?; + + let db = SQLiteDatabase { + pool, + migrations: BTreeMap::new(), + }; + + Ok(db) + } + + /// Register a migration. If a migration with the same version is already registered, a warning + /// is logged and the registration fails. + pub fn register_migration(&mut self, migration: Arc) { + let version = migration.version(); + if let Vacant(e) = self.migrations.entry(version) { + e.insert(migration); + } else { + warn!("Migration with version {:?} is already registered", version); + } + } + + async fn get_subscription_by_field( + &self, + field: &'static str, + value: String, + ) -> Result> { + self.pool + .get() + .await? + .interact(move |conn| { + conn.query_row( + format!( + r#"SELECT * + FROM subscriptions + WHERE {} = :value"#, + field + ) + .as_str(), + &[(":value", &value)], + row_to_subscription, + ) + .optional() + .map_err(|err| anyhow!(err)) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))? + } + + async fn get_heartbeats_by_field( + &self, + field: &'static str, + field_value: String, + subscription: Option, + ) -> Result> { + self.pool + .get() + .await? + .interact(move |conn| { + if let Some(value) = subscription { + let mut statement = conn.prepare( + format!( + r#"SELECT * + FROM heartbeats + JOIN subscriptions ON subscriptions.uuid = heartbeats.subscription + WHERE {} = :field_value + AND subscription = :subscription"#, + field + ) + .as_str() + )?; + let rows = statement.query_map(&[(":field_value", &field_value), (":subscription", &value)], row_to_heartbeat)?; + + let mut heartbeats = Vec::new(); + for heartbeat in rows { + heartbeats.push(heartbeat?); + } + Ok(heartbeats) + } else { + let mut statement = conn.prepare( + format!( + r#"SELECT * + FROM heartbeats + JOIN subscriptions ON subscriptions.uuid = heartbeats.subscription + WHERE {} = :field_value"#, + field + ) + .as_str() + )?; + let rows = statement.query_map(&[(":field_value", &field_value)], row_to_heartbeat)?; + let mut heartbeats = Vec::new(); + for heartbeat in rows { + heartbeats.push(heartbeat?); + } + Ok(heartbeats) + } + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))? + } +} + +fn row_to_subscription(row: &Row) -> Result { + let outputs_str: String = row.get("outputs")?; + let outputs = match serde_json::from_str(&outputs_str) { + Ok(outputs) => outputs, + Err(e) => { + error!( + "Failed to parse subscription output : {}. Subscription output is {}", + e, outputs_str + ); + // We are forced to create a rusqlite::Error + return Err(rusqlite::Error::InvalidColumnType( + 9, + "outputs".to_owned(), + Type::Text, + )); + } + }; + Ok(SubscriptionData::from( + row.get("uuid")?, + row.get("version")?, + row.get("name")?, + row.get("uri")?, + row.get("query")?, + row.get("heartbeat_interval")?, + row.get("connection_retry_count")?, + row.get("connection_retry_interval")?, + row.get("max_time")?, + row.get("max_envelope_size")?, + row.get("enabled")?, + row.get("read_existing_events")?, + outputs, + )) +} + +fn row_to_heartbeat(row: &Row) -> Result { + let subscription = row_to_subscription(row)?; + let heartbeat = HeartbeatData::new( + row.get("machine")?, + row.get("ip")?, + subscription, + row.get("first_seen")?, + row.get("last_seen")?, + row.get("last_event_seen")?, + ); + Ok(heartbeat) +} + +#[async_trait] +impl Database for SQLiteDatabase { + async fn get_bookmark(&self, machine: &str, subscription: &str) -> Result> { + let machine_owned = machine.to_string(); + let subscription_owned = subscription.to_string(); + self.pool + .get() + .await? + .interact(move |conn| { + conn.query_row( + r#"SELECT bookmark FROM bookmarks + WHERE machine = :machine + AND subscription = :subscription"#, + &[ + (":machine", &machine_owned), + (":subscription", &subscription_owned), + ], + |row| row.get(0), + ) + .optional() + .map_err(|err| anyhow!(err)) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))? + } + + async fn get_bookmarks(&self, subscription: &str) -> Result> { + let subscription_owned = subscription.to_string(); + self.pool + .get() + .await? + .interact(move |conn| { + let mut statement = conn.prepare( + r#"SELECT machine, bookmark FROM bookmarks + WHERE subscription = :subscription"#, + )?; + let rows = statement.query_map(&[ + (":subscription", &subscription_owned), + ], |row| Ok(BookmarkData { + machine: row.get(0)?, + bookmark: row.get(1)?, + subscription: subscription_owned.clone(), + }))?; + + let mut bookmarks = Vec::new(); + for bookmark in rows { + bookmarks.push(bookmark?); + } + Ok(bookmarks) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))? + } + + async fn store_bookmark( + &self, + machine: &str, + subscription: &str, + bookmark: &str, + ) -> Result<()> { + let machine_s = machine.to_string(); + let subscription_s = subscription.to_string(); + let bookmark_s = bookmark.to_string(); + let count = self + .pool + .get() + .await? + .interact(move |conn| { + conn.execute( + r#"INSERT INTO bookmarks(machine, subscription, bookmark) + VALUES (:machine, :subscription, :bookmark) + ON CONFLICT (machine, subscription) DO + UPDATE SET bookmark = excluded.bookmark"#, + &[ + (":machine", &machine_s), + (":subscription", &subscription_s), + (":bookmark", &bookmark_s), + ], + ) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))??; + + ensure!(count == 1, "Only one row must have been updated"); + + Ok(()) + } + async fn delete_bookmarks( + &self, + machine: Option<&str>, + subscription: Option<&str>, + ) -> Result<()> { + let client = self.pool.get().await?; + let future = match (machine, subscription) { + (Some(machine), Some(subscription)) => { + let machine = machine.to_owned(); + let subscription = subscription.to_owned(); + client.interact(move |conn| { + conn.execute("DELETE FROM bookmarks WHERE machine = ?1 AND subscription = ?2", params![machine, subscription]) + }).await + } + (Some(machine), None) => { + let machine = machine.to_owned(); + client.interact(move |conn| { + conn.execute("DELETE FROM bookmarks WHERE machine = ?1", params![machine]) + }).await + } + (None, Some(subscription)) => { + let subscription = subscription.to_owned(); + client.interact(move |conn| { + conn.execute("DELETE FROM bookmarks WHERE subscription = ?1", params![subscription]) + }).await + }, + (None, None) => { + client.interact(move |conn| { + conn.execute("DELETE FROM bookmarks", []) + }).await + } + }; + future.map_err(|err| anyhow!(format!("{}", err)))??; + Ok(()) + + } + + async fn get_heartbeats_by_machine( + &self, + machine: &str, + subscription: Option<&str>, + ) -> Result> { + self.get_heartbeats_by_field( + "machine", + machine.to_string(), + subscription.map(|s| s.to_owned()), + ) + .await + } + + async fn get_heartbeats_by_ip( + &self, + ip: &str, + subscription: Option<&str>, + ) -> Result> { + self.get_heartbeats_by_field("ip", ip.to_string(), subscription.map(|s| s.to_owned())) + .await + } + + async fn get_heartbeats(&self) -> Result> { + self.pool + .get() + .await? + .interact(move |conn| { + let mut statement = conn.prepare( + r#"SELECT * + FROM heartbeats + JOIN subscriptions ON subscriptions.uuid = heartbeats.subscription + "#, + )?; + let rows = statement.query_map((), row_to_heartbeat)?; + + let mut heartbeats = Vec::new(); + for heartbeat in rows { + heartbeats.push(heartbeat?); + } + Ok(heartbeats) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))? + } + + async fn get_heartbeats_by_subscription( + &self, + subscription: &str, + ) -> Result> { + let subscription_owned = subscription.to_string(); + self.pool + .get() + .await? + .interact(move |conn| { + let mut statement = conn.prepare( + r#"SELECT * + FROM heartbeats + JOIN subscriptions ON subscriptions.uuid = heartbeats.subscription + WHERE subscription = :subscription"#, + )?; + let rows = statement + .query_map(&[(":subscription", &subscription_owned)], row_to_heartbeat)?; + + let mut heartbeats = Vec::new(); + for heartbeat in rows { + heartbeats.push(heartbeat?); + } + Ok(heartbeats) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))? + } + + async fn store_heartbeat( + &self, + machine: &str, + ip: String, + subscription: &str, + is_event: bool, + ) -> Result<()> { + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH)? + .as_secs(); + let machine_owned = machine.to_string(); + let subscription_owned = subscription.to_string(); + + let query = if is_event { + r#"INSERT INTO heartbeats(machine, ip, subscription, first_seen, last_seen, last_event_seen) + VALUES (?1, ?2, ?3, ?4, ?4, ?4) + ON CONFLICT (machine, subscription) DO + UPDATE SET last_seen = excluded.last_seen, + last_event_seen = excluded.last_event_seen"# + } else { + r#"INSERT INTO heartbeats(machine, ip, subscription, first_seen, last_seen, last_event_seen) + VALUES (?1, ?2, ?3, ?4, ?4, NULL) + ON CONFLICT (machine, subscription) DO + UPDATE SET last_seen = excluded.last_seen"# + }; + + let count = self + .pool + .get() + .await? + .interact(move |conn| { + conn.execute( + query, + params![&machine_owned, &ip, &subscription_owned, now], + ) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))??; + + ensure!(count == 1, "Only one row must have been updated"); + + Ok(()) + } + + async fn store_heartbeats(&self, heartbeats: &HeartbeatsCache) -> Result<()> { + let client = self.pool.get().await?; + // TODO: remove this clone, maybe use an Arc + let heartbeats_cloned = heartbeats.clone(); + + client.interact(move |conn| { + let transaction = conn.transaction()?; + + let mut query_with_event = transaction.prepare( + r#"INSERT INTO heartbeats(machine, ip, subscription, first_seen, last_seen, last_event_seen) + VALUES (?1, ?2, ?3, ?4, ?4, ?5) + ON CONFLICT (machine, subscription) DO + UPDATE SET last_seen = excluded.last_seen, + last_event_seen = excluded.last_event_seen"#)?; + let mut query_without_event = transaction.prepare( + r#"INSERT INTO heartbeats(machine, ip, subscription, first_seen, last_seen, last_event_seen) + VALUES (?1, ?2, ?3, ?4, ?4, NULL) + ON CONFLICT (machine, subscription) DO + UPDATE SET last_seen = excluded.last_seen"#)?; + + for (key, value) in heartbeats_cloned { + match value.last_event_seen { + Some(last_event_seen) => { + query_with_event + .execute( + params![ + &key.machine, + &value.ip, + &key.subscription, + &value.last_seen, + &last_event_seen, + ], + )?; + } + None => { + query_without_event + .execute( + params![&key.machine, &value.ip, &key.subscription, &value.last_seen], + )?; + } + } + } + + query_with_event.finalize()?; + query_without_event.finalize()?; + transaction.commit()?; + Ok::<(), rusqlite::Error>(()) + }).await + .map_err(|err| anyhow!(format!("{}", err)))??; + Ok(()) + } + + async fn get_subscriptions(&self) -> Result> { + self.pool + .get() + .await? + .interact(move |conn| { + let mut statement = conn.prepare( + r#"SELECT * + FROM subscriptions + "#, + )?; + let rows = statement.query_map((), row_to_subscription)?; + + let mut subscriptions = Vec::new(); + for subscription in rows { + subscriptions.push(subscription?); + } + Ok(subscriptions) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))? + } + + async fn get_subscription(&self, version: &str) -> Result> { + self.get_subscription_by_field("version", version.to_string()) + .await + } + + async fn get_subscription_by_identifier( + &self, + identifier: &str, + ) -> Result> { + let identifier = identifier.to_string(); + self.pool + .get() + .await? + .interact(move |conn| { + conn.query_row( + r#"SELECT * + FROM subscriptions + WHERE name = :identifier OR uuid = :identifier"#, + &[(":identifier", &identifier)], + row_to_subscription, + ) + .optional() + .map_err(|err| anyhow!(err)) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))? + } + + async fn store_subscription(&self, subscription: SubscriptionData) -> Result<()> { + let count = self + .pool + .get() + .await? + .interact(move |conn| { + conn.execute( + r#"INSERT INTO subscriptions (uuid, version, name, uri, query, + heartbeat_interval, connection_retry_count, connection_retry_interval, + max_time, max_envelope_size, enabled, read_existing_events, outputs) + VALUES (:uuid, :version, :name, :uri, :query, + :heartbeat_interval, :connection_retry_count, :connection_retry_interval, + :max_time, :max_envelope_size, :enabled, :read_existing_events, :outputs) + ON CONFLICT (uuid) DO UPDATE SET + version = excluded.version, + name = excluded.name, + uri = excluded.uri, + query = excluded.query, + heartbeat_interval = excluded.heartbeat_interval, + connection_retry_count = excluded.connection_retry_count, + connection_retry_interval = excluded.connection_retry_interval, + max_time = excluded.max_time, + max_envelope_size = excluded.max_envelope_size, + enabled = excluded.enabled, + read_existing_events = excluded.read_existing_events, + outputs = excluded.outputs"#, + named_params! { + ":uuid": subscription.uuid(), + ":version": subscription.version(), + ":name": subscription.name(), + ":uri": subscription.uri(), + ":query": subscription.query(), + ":heartbeat_interval": subscription.heartbeat_interval(), + ":connection_retry_count": subscription.connection_retry_count(), + ":connection_retry_interval": subscription.connection_retry_interval(), + ":max_time": subscription.max_time(), + ":max_envelope_size": subscription.max_envelope_size(), + ":enabled": subscription.enabled(), + ":read_existing_events": subscription.read_existing_events(), + ":outputs": serde_json::to_string(subscription.outputs())?, + }, + ) + .map_err(|err| anyhow!(err)) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))??; + + ensure!(count == 1, "Only one row must have been updated"); + + Ok(()) + } + + async fn delete_subscription(&self, uuid: &str) -> Result<()> { + let uuid_owned = uuid.to_string(); + let count = self + .pool + .get() + .await? + .interact(move |conn| { + conn.execute( + r#"DELETE FROM subscriptions WHERE uuid = :uuid"#, + &[(":uuid", &uuid_owned)], + ) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))??; + + ensure!(count == 1, "Only one row must have been deleted"); + + Ok(()) + } + + /// Fails if `setup_schema` hasn't previously been called or if the query otherwise fails. + async fn current_version(&self) -> Result> { + let query = format!( + "SELECT version FROM {} ORDER BY version DESC LIMIT 1;", + MIGRATION_TABLE_NAME + ); + self.pool + .get() + .await? + .interact( + move |conn| match conn.query_row(&query, [], |row| row.get("version")) { + Ok(version) => Ok(Some(version)), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(e) => Err(e.into()), + }, + ) + .await + .map_err(|err| anyhow!(format!("{}", err)))? + } + + /// Fails if `setup_schema` hasn't previously been called or if the query otherwise fails. + async fn migrated_versions(&self) -> Result> { + let query = format!("SELECT version FROM {};", MIGRATION_TABLE_NAME); + // This clone is required if we want to be able to print the query in + // case of error. This function is called only in the start of programs so + // the overhead is acceptable. + let query_for_context_error = query.clone(); + + let versions = self + .pool + .get() + .await? + .interact(move |conn| { + let mut statement = conn.prepare(&query)?; + let result = + statement.query_map([], |row_result| row_result.get::<&str, i64>("version"))?; + let mut versions = BTreeSet::new(); + for vresult in result { + versions.insert(vresult?); + } + Ok::, rusqlite::Error>(versions) + }) + .await + .map_err(|err| anyhow!(format!("{}", err))) + .with_context(|| { + format!("Failed to execute query: \"{}\"", query_for_context_error) + })??; + + Ok(versions) + } + + /// Fails if `setup_schema` hasn't previously been called or if the migration otherwise fails. + async fn apply_migration(&self, version: Version) -> Result<()> { + let migration = self + .migrations + .get(&version) + .ok_or_else(|| anyhow!("Could not retrieve migration with version {}", version))? + .clone(); + self.pool + .get() + .await? + .interact(move |conn| { + let tx = conn.transaction()?; + + migration.up(&tx)?; + let query = format!( + "INSERT INTO {} (version) VALUES ($1);", + MIGRATION_TABLE_NAME + ); + let _count = tx.execute(&query, [&migration.version()])?; + + tx.commit()?; + Ok::<_, Error>(()) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))??; + + Ok(()) + } + + /// Fails if `setup_schema` hasn't previously been called or if the migration otherwise fails. + async fn revert_migration(&self, version: Version) -> Result<()> { + let migration = self + .migrations + .get(&version) + .ok_or_else(|| anyhow!("Could not retrieve migration with version {}", version))? + .clone(); + self.pool + .get() + .await? + .interact(move |conn| { + let tx = conn.transaction()?; + migration.down(&tx)?; + + let query = format!("DELETE FROM {} WHERE version = $1;", MIGRATION_TABLE_NAME); + let _count = tx.execute(&query, [&migration.version()])?; + tx.commit()?; + Ok::<_, Error>(()) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))??; + + Ok(()) + } + + /// Create the tables required to keep track of schema state. If the tables already + /// exist, this function has no operation. + async fn setup_schema(&self) -> Result<()> { + let query = format!( + "CREATE TABLE IF NOT EXISTS {} (version BIGINT PRIMARY KEY);", + MIGRATION_TABLE_NAME + ); + self.pool + .get() + .await? + .interact(move |conn| conn.execute(&query, [])) + .await + .map_err(|err| anyhow!(format!("{}", err)))??; + Ok(()) + } + + async fn migrations(&self) -> BTreeMap> { + // TODO: Remove copy/paste between db backends + let mut base_migrations = BTreeMap::new(); + for (version, migration) in self.migrations.iter() { + base_migrations.insert(*version, migration.to_base()); + } + base_migrations + } + + async fn get_stats( + &self, + subscription: &str, + start_time: i64, + ) -> Result { + let subscription_arc = Arc::new(subscription.to_string()); + let client = self.pool.get().await?; + let subscription_owned = subscription_arc.clone(); + let total_machines_count = client + .interact(move |conn| { + conn.query_row( + r#"SELECT COUNT(machine) + FROM heartbeats + WHERE subscription = :subscription"#, + &[(":subscription", &subscription_owned)], + |row| row.get(0), + ) + .map_err(|err| anyhow!(err)) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))??; + // subscription_owned bas been moved into previous interact closure + let subscription_owned = subscription_arc.clone(); + let alive_machines_count = client + .interact(move |conn| { + conn.query_row( + r#"SELECT COUNT(machine) + FROM heartbeats + WHERE subscription = :subscription AND last_seen > :start_time AND (last_event_seen IS NULL OR last_event_seen <= :start_time)"#, + named_params! { + ":subscription": &subscription_owned, + ":start_time": &start_time, + }, + |row| row.get(0), + ) + .map_err(|err| anyhow!(err)) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))??; + // subscription_owned bas been moved into previous interact closure + let subscription_owned = subscription_arc.clone(); + let active_machines_count = client + .interact(move |conn| { + conn.query_row( + r#"SELECT COUNT(machine) + FROM heartbeats + WHERE subscription = :subscription AND last_event_seen > :start_time"#, + named_params! { + ":subscription": &subscription_owned, + ":start_time": &start_time, + }, + |row| row.get(0), + ) + .map_err(|err| anyhow!(err)) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))??; + // subscription_owned bas been moved into previous interact closure + let subscription_owned = subscription_arc.clone(); + let dead_machines_count = client + .interact(move |conn| { + conn.query_row( + r#"SELECT COUNT(machine) + FROM heartbeats + WHERE subscription = :subscription AND (last_event_seen IS NULL OR last_event_seen <= :start_time) AND last_seen <= :start_time"#, + named_params! { + ":subscription": &subscription_owned, + ":start_time": &start_time, + }, + |row| row.get(0), + ) + .map_err(|err| anyhow!(err)) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))??; + Ok(SubscriptionStatsCounters::new( + total_machines_count, + alive_machines_count, + active_machines_count, + dead_machines_count, + )) + } + + async fn get_machines( + &self, + subscription: &str, + start_time: i64, + stat_type: Option, + ) -> Result> { + let subscription_owned = subscription.to_owned(); + let client = self.pool.get().await?; + client + .interact(move |conn| { + let mut result = Vec::new(); + match stat_type { + None => { + let mut statement = conn.prepare("SELECT * FROM heartbeats WHERE subscription = :subscription")?; + let rows = statement.query_map(named_params! { ":subscription": subscription_owned}, |row| { + Ok(SubscriptionMachine::new(row.get("machine")?, row.get("ip")?)) + })?; + for stat in rows { + result.push(stat?); + } + }, + Some(SubscriptionMachineState::Active) => { + let mut statement = conn.prepare("SELECT * FROM heartbeats WHERE subscription = :subscription AND last_event_seen > :start_time")?; + let rows = statement.query_map(named_params! { ":subscription": subscription_owned, ":start_time": start_time}, |row| { + Ok(SubscriptionMachine::new(row.get("machine")?, row.get("ip")?)) + })?; + for stat in rows { + result.push(stat?); + } + }, + Some(SubscriptionMachineState::Alive) => { + let mut statement = conn.prepare("SELECT * FROM heartbeats WHERE subscription = :subscription AND last_seen > :start_time AND (last_event_seen IS NULL OR last_event_seen <= :start_time)")?; + let rows = statement.query_map(named_params! { ":subscription": subscription_owned, ":start_time": start_time}, |row| { + Ok(SubscriptionMachine::new(row.get("machine")?, row.get("ip")?)) + })?; + for stat in rows { + result.push(stat?); + } + }, + Some(SubscriptionMachineState::Dead) => { + let mut statement = conn.prepare("SELECT * FROM heartbeats WHERE subscription = :subscription AND last_seen <= :start_time AND (last_event_seen IS NULL OR last_event_seen <= :start_time)")?; + let rows = statement.query_map(named_params! { ":subscription": subscription_owned, ":start_time": start_time}, |row| { + Ok(SubscriptionMachine::new(row.get("machine")?, row.get("ip")?)) + })?; + for stat in rows { + result.push(stat?); + } + } + }; + Ok::, anyhow::Error>(result) + }) + .await + .map_err(|err| anyhow!(format!("{}", err)))? + } +} + +#[cfg(test)] +mod tests { + + use tempfile::TempPath; + + use crate::{ + database::schema::{self, Migrator}, + migration, + }; + + use super::*; + + async fn db_with_migrations(path: &TempPath) -> Result> { + let mut db = SQLiteDatabase::new(path.to_str().expect("Invalid temp file name")).await?; + schema::sqlite::register_migrations(&mut db); + Ok(Arc::new(db)) + } + + #[tokio::test] + async fn test_open_and_close() -> Result<()> { + let temp_file = tempfile::NamedTempFile::new()?; + let path = temp_file.into_temp_path(); + { + SQLiteDatabase::new(path.to_str().expect("Invalid temp file name")) + .await + .expect("Could not create database"); + } + path.close()?; + Ok(()) + } + + #[tokio::test] + async fn test_bookmarks() -> Result<()> { + let temp_file = tempfile::NamedTempFile::new()?; + let path = temp_file.into_temp_path(); + { + crate::database::tests::test_bookmarks(db_with_migrations(&path).await?).await?; + } + path.close()?; + Ok(()) + } + + #[tokio::test] + async fn test_heartbeats() -> Result<()> { + let temp_file = tempfile::NamedTempFile::new()?; + let path = temp_file.into_temp_path(); + { + crate::database::tests::test_heartbeats(db_with_migrations(&path).await?).await?; + } + path.close()?; + Ok(()) + } + + #[tokio::test] + async fn test_subscriptions() -> Result<()> { + let temp_file = tempfile::NamedTempFile::new()?; + let path = temp_file.into_temp_path(); + { + crate::database::tests::test_subscriptions(db_with_migrations(&path).await?).await?; + } + Ok(()) + } + + struct CreateUsers; + migration!(CreateUsers, 1, "create users table"); + + impl SQLiteMigration for CreateUsers { + fn up(&self, conn: &Connection) -> Result<()> { + conn.execute("CREATE TABLE users (id BIGINT PRIMARY KEY);", []) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + Ok(()) + } + + fn down(&self, conn: &Connection) -> Result<()> { + conn.execute("DROP TABLE users;", []) + .map_err(|err| anyhow!("SQLiteError: {}", err))?; + Ok(()) + } + } + + #[tokio::test] + async fn test_register() -> Result<()> { + let temp_file = tempfile::NamedTempFile::new()?; + let path = temp_file.into_temp_path(); + { + let mut db = + SQLiteDatabase::new(path.to_str().expect("Invalid temp file name")).await?; + + db.register_migration(Arc::new(CreateUsers)); + db.setup_schema().await?; + + let arc_db = Arc::new(db); + let migrator = Migrator::new(arc_db.clone()); + + migrator.up(None, false).await.unwrap(); + + assert_eq!(arc_db.current_version().await.unwrap(), Some(1)); + + migrator.down(None, false).await.unwrap(); + + assert_eq!(arc_db.current_version().await.unwrap(), None); + } + path.close()?; + Ok(()) + } + + #[tokio::test] + async fn test_stats() -> Result<()> { + let temp_file = tempfile::NamedTempFile::new()?; + let path = temp_file.into_temp_path(); + { + crate::database::tests::test_stats_and_machines(db_with_migrations(&path).await?) + .await?; + } + Ok(()) + } +} diff --git a/common/src/encoding.rs b/common/src/encoding.rs new file mode 100644 index 0000000..5f0fddf --- /dev/null +++ b/common/src/encoding.rs @@ -0,0 +1,52 @@ +use anyhow::{anyhow, bail, Result}; +use log::warn; + +pub fn decode_utf16le(message: Vec) -> Result { + let mut decoder = encoding_rs::UTF_16LE.new_decoder(); + let mut decoded = String::with_capacity( + decoder + .max_utf8_buffer_length(message.len()) + .ok_or_else(|| anyhow!("Could not decode utf16 data"))?, + ); + let (result, _, replacements) = decoder.decode_to_string(&message, &mut decoded, true); + match result { + encoding_rs::CoderResult::InputEmpty => (), + _ => bail!("Failed to decode utf16 data"), + } + + if replacements { + warn!("Replacement character has been used to decode utf16"); + } + + Ok(decoded) +} + +pub fn encode_utf16le(message: String) -> Result> { + // encoding_rs does not support UTF16-LE encoding + // so we retrieve an iterator of u16 values using + // String::encode_utf16 and we expand it in little + // endian bytes by hand + // TODO: improve performances + let mut res: Vec = vec![0xff, 0xfe]; // BOM is mandatory + res.extend_from_slice( + &message + .encode_utf16() + .flat_map(|x| x.to_le_bytes()) + .collect::>(), + ); + Ok(res) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_encode_decode() -> Result<()> { + let message = "This is a super message with some àçcèént你好".to_owned(); + + assert_eq!(decode_utf16le(encode_utf16le(message.clone())?)?, message); + + Ok(()) + } +} diff --git a/common/src/heartbeat.rs b/common/src/heartbeat.rs new file mode 100644 index 0000000..e4a79a9 --- /dev/null +++ b/common/src/heartbeat.rs @@ -0,0 +1,90 @@ +use std::collections::HashMap; + +use serde::{ser::SerializeStruct, Serialize, Serializer}; + +use crate::{subscription::SubscriptionData, utils, utils::Timestamp}; + +#[derive(Debug, Serialize, PartialEq, Eq, Clone)] +pub struct HeartbeatData { + machine: String, + ip: String, + #[serde(flatten, serialize_with = "serialize_subscription_data")] + subscription: SubscriptionData, + #[serde(serialize_with = "utils::serialize_timestamp")] + first_seen: Timestamp, + #[serde(serialize_with = "utils::serialize_timestamp")] + last_seen: Timestamp, + #[serde(serialize_with = "utils::serialize_option_timestamp")] + last_event_seen: Option, +} + +fn serialize_subscription_data( + subscription: &SubscriptionData, + serializer: S, +) -> Result +where + S: Serializer, +{ + let mut state = serializer.serialize_struct("Subscription", 2)?; + state.serialize_field("subscription_uuid", subscription.uuid())?; + state.serialize_field("subscription_name", subscription.name())?; + state.end() +} + +impl HeartbeatData { + pub fn new( + machine: String, + ip: String, + subscription: SubscriptionData, + first_seen: i64, + last_seen: i64, + last_event_seen: Option, + ) -> Self { + HeartbeatData { + machine, + ip, + subscription, + first_seen, + last_seen, + last_event_seen, + } + } + pub fn first_seen(&self) -> i64 { + self.first_seen + } + + pub fn last_seen(&self) -> i64 { + self.last_seen + } + + pub fn machine(&self) -> &str { + self.machine.as_ref() + } + + pub fn ip(&self) -> &str { + self.ip.as_ref() + } + + pub fn subscription(&self) -> &SubscriptionData { + &self.subscription + } + + pub fn last_event_seen(&self) -> Option { + self.last_event_seen + } +} + +#[derive(Eq, Hash, PartialEq, Debug, Clone)] +pub struct HeartbeatKey { + pub machine: String, + pub subscription: String, +} + +#[derive(Debug, Clone)] +pub struct HeartbeatValue { + pub ip: String, + pub last_seen: u64, + pub last_event_seen: Option, +} + +pub type HeartbeatsCache = HashMap; diff --git a/common/src/lib.rs b/common/src/lib.rs new file mode 100644 index 0000000..2d80fbb --- /dev/null +++ b/common/src/lib.rs @@ -0,0 +1,7 @@ +pub mod bookmark; +pub mod database; +pub mod encoding; +pub mod heartbeat; +pub mod settings; +pub mod subscription; +pub mod utils; diff --git a/common/src/settings.rs b/common/src/settings.rs new file mode 100644 index 0000000..dae99c3 --- /dev/null +++ b/common/src/settings.rs @@ -0,0 +1,350 @@ +use anyhow::Result; +use serde::Deserialize; +use std::str::FromStr; +use std::{fs::File, io::Read}; + +pub const DEFAULT_CONFIG_FILE: &str = "/etc/openwec.conf.toml"; + +#[derive(Debug, Deserialize, Clone)] +#[serde(tag = "type")] +pub enum Authentication { + Kerberos(Kerberos), + Tls(Tls), +} + +#[derive(Debug, Deserialize, Clone)] +#[serde(tag = "type")] +pub enum Database { + SQLite(SQLite), + Postgres(Postgres), +} + +#[derive(Debug, Deserialize, Clone)] +pub struct Tls { + server_certificate: String, + server_private_key: String, + ca_certificate: String, +} + +impl Tls { + pub fn server_certificate(&self) -> &str { + self.server_certificate.as_ref() + } + + pub fn server_private_key(&self) -> &str { + self.server_private_key.as_ref() + } + + pub fn ca_certificate(&self) -> &str { + self.ca_certificate.as_ref() + } +} + +#[derive(Debug, Deserialize, Clone)] +pub struct Collector { + hostname: String, + listen_address: String, + listen_port: Option, + max_content_length: Option, + authentication: Authentication, +} + +impl Collector { + pub fn hostname(&self) -> &str { + &self.hostname + } + + pub fn listen_address(&self) -> &str { + &self.listen_address + } + + pub fn listen_port(&self) -> u16 { + self.listen_port.unwrap_or(5985) + } + + pub fn max_content_length(&self) -> u64 { + self.max_content_length.unwrap_or(512_000) + } + pub fn authentication(&self) -> &Authentication { + &self.authentication + } +} + +#[derive(Debug, Deserialize, Clone)] +pub struct Kerberos { + service_principal_name: String, + keytab: String, +} + +impl Kerberos { + pub fn service_principal_name(&self) -> &str { + &self.service_principal_name + } + + pub fn keytab(&self) -> &str { + &self.keytab + } +} + +#[derive(Debug, Deserialize, Clone)] +pub struct SQLite { + path: String, +} + +impl SQLite { + pub fn path(&self) -> &str { + &self.path + } +} + +#[derive(Debug, Deserialize, Clone, PartialEq, Eq)] +pub enum PostgresSslMode { + Disable, + Prefer, + Require, +} + +impl Default for PostgresSslMode { + fn default() -> Self { + PostgresSslMode::Prefer + } +} + +#[derive(Debug, Deserialize, Clone)] +pub struct Postgres { + host: String, + port: u16, + dbname: String, + user: String, + password: String, + #[serde(default)] + ssl_mode: PostgresSslMode, + ca_file: Option, + max_chunk_size: Option, +} + +impl Postgres { + pub fn new( + host: &str, + port: u16, + dbname: &str, + user: &str, + password: &str, + ssl_mode: PostgresSslMode, + ca_file: Option<&String>, + max_chunk_size: Option, + ) -> Postgres { + Postgres { + host: host.to_owned(), + port, + dbname: dbname.to_owned(), + user: user.to_owned(), + password: password.to_owned(), + ssl_mode, + ca_file: ca_file.cloned(), + max_chunk_size, + } + } + + pub fn host(&self) -> &str { + self.host.as_ref() + } + + pub fn port(&self) -> u16 { + self.port + } + + pub fn user(&self) -> &str { + self.user.as_ref() + } + + pub fn password(&self) -> &str { + self.password.as_ref() + } + + pub fn dbname(&self) -> &str { + self.dbname.as_ref() + } + + pub fn ssl_mode(&self) -> &PostgresSslMode { + &self.ssl_mode + } + + pub fn ca_file(&self) -> Option<&String> { + self.ca_file.as_ref() + } + + pub fn max_chunk_size(&self) -> usize { + self.max_chunk_size.unwrap_or(500) + } +} + +#[derive(Debug, Deserialize, Clone)] +pub struct Server { + verbosity: Option, + db_sync_interval: Option, + flush_heartbeats_interval: Option, + node_name: Option, +} + +impl Server { + pub fn verbosity(&self) -> Option<&String> { + self.verbosity.as_ref() + } + + pub fn db_sync_interval(&self) -> u64 { + self.db_sync_interval.unwrap_or(5) + } + + pub fn flush_heartbeats_interval(&self) -> u64 { + self.flush_heartbeats_interval.unwrap_or(5) + } + + pub fn node_name(&self) -> Option<&String> { + self.node_name.as_ref() + } +} + +#[derive(Debug, Deserialize, Clone)] +pub struct Settings { + collectors: Vec, + database: Database, + server: Server, +} + +impl std::str::FromStr for Settings { + type Err = anyhow::Error; + fn from_str(content: &str) -> Result { + toml::from_str(content).map_err(anyhow::Error::from) + } +} + +impl Settings { + pub fn new(config_file: Option<&String>) -> Result { + let default = DEFAULT_CONFIG_FILE.to_owned(); + let path = config_file.unwrap_or(&default); + let mut content = String::new(); + File::open(path)?.read_to_string(&mut content)?; + Settings::from_str(&content) + } + + pub fn collectors(&self) -> &[Collector] { + self.collectors.as_ref() + } + + pub fn database(&self) -> &Database { + &self.database + } + + pub fn server(&self) -> &Server { + &self.server + } +} + +#[cfg(test)] +mod tests { + use super::*; + + const CONFIG_KERBEROS_SQLITE: &str = r#" + [server] + verbosity = "debug" + + [database] + type = "SQLite" + path = "/tmp/toto.sqlite" + + [[collectors]] + hostname = "wec.windomain.local" + listen_address = "0.0.0.0" + listen_port = 5986 + max_content_length = 1000 + + [collectors.authentication] + type = "Kerberos" + service_principal_name = "http/wec.windomain.local@WINDOMAIN.LOCAL" + keytab = "wec.windomain.local.keytab" + "#; + + #[test] + fn test_settings_kerberos_sqlite() { + let s = Settings::from_str(CONFIG_KERBEROS_SQLITE).unwrap(); + assert_eq!(s.collectors().len(), 1); + let collector = &s.collectors()[0]; + assert_eq!(collector.hostname(), "wec.windomain.local"); + assert_eq!(collector.listen_address(), "0.0.0.0"); + assert_eq!(collector.listen_port(), 5986); + assert_eq!(collector.max_content_length(), 1000); + + let kerberos = match collector.authentication() { + Authentication::Kerberos(kerb) => kerb, + _ => panic!("Wrong authentication type"), + }; + assert_eq!(kerberos.keytab(), "wec.windomain.local.keytab"); + assert_eq!( + kerberos.service_principal_name(), + "http/wec.windomain.local@WINDOMAIN.LOCAL" + ); + + let sqlite = match s.database() { + Database::SQLite(sqlite) => sqlite, + _ => panic!("Wrong database type"), + }; + + assert_eq!(sqlite.path(), "/tmp/toto.sqlite"); + assert_eq!(s.server().verbosity().unwrap(), "debug"); + } + + const CONFIG_TLS_POSTGRES: &str = r#" + [server] + + [database] + type = "Postgres" + host = "localhost" + port = 26257 + dbname = "test" + user = "root" + password = "" + + [[collectors]] + hostname = "wec.windomain.local" + listen_address = "0.0.0.0" + + [collectors.authentication] + type = "Tls" + server_certificate = "/etc/server_certificate.pem" + server_private_key = "/etc/server_private_key.pem" + ca_certificate = "/etc/ca_certificate.pem" + "#; + + #[test] + fn test_settings_tls_postgres() { + let s = Settings::from_str(CONFIG_TLS_POSTGRES).unwrap(); + assert_eq!(s.collectors().len(), 1); + let collector = &s.collectors()[0]; + assert_eq!(collector.hostname(), "wec.windomain.local"); + assert_eq!(collector.listen_address(), "0.0.0.0"); + // Checks default values + assert_eq!(collector.listen_port(), 5985); + assert_eq!(collector.max_content_length(), 512_000); + + let tls = match collector.authentication() { + Authentication::Tls(tls) => tls, + _ => panic!("Wrong authentication type"), + }; + assert_eq!(tls.server_certificate(), "/etc/server_certificate.pem"); + assert_eq!(tls.server_private_key(), "/etc/server_private_key.pem"); + assert_eq!(tls.ca_certificate(), "/etc/ca_certificate.pem"); + + let postgres = match s.database() { + Database::Postgres(postgres) => postgres, + _ => panic!("Wrong database type"), + }; + + assert_eq!(postgres.host(), "localhost"); + assert_eq!(postgres.port(), 26257); + assert_eq!(postgres.dbname(), "test"); + assert_eq!(postgres.user(), "root"); + assert_eq!(postgres.password(), ""); + assert!(s.server().verbosity().is_none()); + } +} diff --git a/common/src/subscription.rs b/common/src/subscription.rs new file mode 100644 index 0000000..7098e22 --- /dev/null +++ b/common/src/subscription.rs @@ -0,0 +1,596 @@ +use std::{ + collections::HashMap, + fmt::{Display, Formatter}, +}; + +use crate::utils::new_uuid; +use anyhow::{anyhow, bail, Error, Result}; +use log::info; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct KafkaConfiguration { + topic: String, + options: HashMap, +} + +impl KafkaConfiguration { + pub fn new(topic: String, options: HashMap) -> Self { + KafkaConfiguration { topic, options } + } + + /// Get a reference to the kafka configuration's topic. + pub fn topic(&self) -> &str { + self.topic.as_ref() + } + + /// Get a reference to the kafka configuration's options. + pub fn options(&self) -> &HashMap { + &self.options + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct TcpConfiguration { + addr: String, + port: u16, +} + +impl TcpConfiguration { + pub fn new(addr: String, port: u16) -> Self { + TcpConfiguration { addr, port } + } + + pub fn addr(&self) -> &str { + self.addr.as_ref() + } + + pub fn port(&self) -> u16 { + self.port + } +} + +// File storage path format is: +// ///[/]/ +// can be splitted (depends of split_on_addr_index) +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct FileConfiguration { + base: String, + // None => don't split + // Some(n) => Split starting on the n-th segment (IPv4 and IPv6) + split_on_addr_index: Option, + // requires server.node_name to be configured + append_node_name: bool, + filename: String, +} + +impl FileConfiguration { + pub fn new( + base: String, + split_on_addr_index: Option, + append_node_name: bool, + filename: String, + ) -> Self { + Self { + base, + split_on_addr_index, + append_node_name, + filename, + } + } + + pub fn base(&self) -> &str { + self.base.as_ref() + } + + pub fn split_on_addr_index(&self) -> Option { + self.split_on_addr_index + } + + pub fn append_node_name(&self) -> bool { + self.append_node_name + } + + pub fn filename(&self) -> &str { + self.filename.as_ref() + } +} + +#[derive(Debug, Serialize, Clone, Eq, PartialEq, Deserialize)] +pub enum SubscriptionOutput { + // The last bool indicates whether the output is enabled or not. + Files(SubscriptionOutputFormat, FileConfiguration, bool), + Kafka(SubscriptionOutputFormat, KafkaConfiguration, bool), + Tcp(SubscriptionOutputFormat, TcpConfiguration, bool), +} + +impl SubscriptionOutput { + pub fn format(&self) -> &SubscriptionOutputFormat { + match self { + SubscriptionOutput::Files(format, _, _) => format, + SubscriptionOutput::Kafka(format, _, _) => format, + SubscriptionOutput::Tcp(format, _, _) => format, + } + } + + pub fn is_enabled(&self) -> bool { + match self { + SubscriptionOutput::Files(_, _, enabled) => *enabled, + SubscriptionOutput::Kafka(_, _, enabled) => *enabled, + SubscriptionOutput::Tcp(_, _, enabled) => *enabled, + } + } + + pub fn set_enabled(&self, value: bool) -> SubscriptionOutput { + match self { + SubscriptionOutput::Files(format, config, _) => { + SubscriptionOutput::Files(format.clone(), config.clone(), value) + } + SubscriptionOutput::Kafka(format, config, _) => { + SubscriptionOutput::Kafka(format.clone(), config.clone(), value) + } + SubscriptionOutput::Tcp(format, config, _) => { + SubscriptionOutput::Tcp(format.clone(), config.clone(), value) + } + } + } +} + +impl Display for SubscriptionOutput { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + SubscriptionOutput::Files(format, config, enabled) => { + write!( + f, + "Enabled: {:?}, Format: {}, Output: Files({:?})", + enabled, format, config + ) + } + SubscriptionOutput::Kafka(format, config, enabled) => { + write!( + f, + "Enabled: {:?}, Format: {}, Output: Kafka({:?})", + enabled, format, config + ) + } + SubscriptionOutput::Tcp(format, config, enabled) => { + write!( + f, + "Enabled: {:?}, Format: {}, Output: Tcp({}:{})", + enabled, format, config.addr, config.port + ) + } + } + } +} + +#[derive(Debug, Serialize, Clone, Eq, PartialEq, Deserialize)] +pub enum SubscriptionOutputFormat { + Json, + Raw, +} + +impl Display for SubscriptionOutputFormat { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match *self { + SubscriptionOutputFormat::Json => write!(f, "Json"), + SubscriptionOutputFormat::Raw => write!(f, "Raw"), + } + } +} + +impl TryFrom for SubscriptionOutputFormat { + type Error = Error; + fn try_from(value: u8) -> Result { + Ok(match value { + 0 => SubscriptionOutputFormat::Json, + 1 => SubscriptionOutputFormat::Raw, + _ => bail!("Unknown subscription output format {}", value), + }) + } +} + +#[derive(Debug, PartialEq, Clone, Eq, Serialize, Deserialize)] +pub struct SubscriptionData { + #[serde(default = "new_uuid")] + uuid: String, + #[serde(default = "new_uuid")] + version: String, + name: String, + uri: Option, + query: String, + heartbeat_interval: u32, + connection_retry_count: u16, + connection_retry_interval: u32, + max_time: u32, + max_envelope_size: u32, + enabled: bool, + read_existing_events: bool, + #[serde(default)] + outputs: Vec, +} + +impl Display for SubscriptionData { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + writeln!(f, "Subscription {}", self.name)?; + writeln!(f, "\tUUID: {}", self.uuid())?; + writeln!(f, "\tVersion: {}", self.version())?; + writeln!( + f, + "\tURI: {}", + match self.uri() { + Some(uri) => uri, + None => "None", + } + )?; + writeln!(f, "\tHeartbeat interval: {}s", self.heartbeat_interval())?; + writeln!( + f, + "\tConnection retry count: {}", + self.connection_retry_count() + )?; + writeln!( + f, + "\tConnection retry interval: {}s", + self.connection_retry_interval() + )?; + writeln!( + f, + "\tMax time without heartbeat/events: {}s", + self.max_time() + )?; + writeln!(f, "\tMax envelope size: {} bytes", self.max_envelope_size())?; + writeln!(f, "\tReadExistingEvents: {}", self.read_existing_events)?; + if self.outputs().is_empty() { + writeln!(f, "\tOutputs: None")?; + } else { + writeln!(f, "\tOutputs:")?; + for (index, output) in self.outputs().iter().enumerate() { + writeln!(f, "\t- {}: {}", index, output)?; + } + } + writeln!(f, "\tEnabled: {}", self.enabled) + } +} + +impl SubscriptionData { + pub fn empty() -> Self { + SubscriptionData { + uuid: Uuid::new_v4().to_string().to_ascii_uppercase(), + version: Uuid::new_v4().to_string().to_ascii_uppercase(), + name: String::new(), + uri: None, + query: String::new(), + heartbeat_interval: 3_600, + connection_retry_count: 5, + connection_retry_interval: 60, + max_time: 30, + max_envelope_size: 512_000, + enabled: true, + read_existing_events: false, + outputs: Vec::new(), + } + } + + pub fn new( + name: &str, + uri: Option<&str>, + query: &str, + heartbeat_interval: Option<&u32>, + connection_retry_count: Option<&u16>, + connection_retry_interval: Option<&u32>, + max_time: Option<&u32>, + max_envelope_size: Option<&u32>, + enabled: bool, + read_existing_events: bool, + outputs: Option>, + ) -> Self { + SubscriptionData { + uuid: Uuid::new_v4().to_string().to_ascii_uppercase(), + version: Uuid::new_v4().to_string().to_ascii_uppercase(), + name: name.to_owned(), + uri: uri.map(|e| e.to_string()), + query: query.to_owned(), + heartbeat_interval: *heartbeat_interval.unwrap_or(&3_600), + connection_retry_count: *connection_retry_count.unwrap_or(&5), + connection_retry_interval: *connection_retry_interval.unwrap_or(&60), + max_time: *max_time.unwrap_or(&30), + max_envelope_size: *max_envelope_size.unwrap_or(&512_000), + enabled, + read_existing_events, + outputs: outputs.unwrap_or_default(), + } + } + + pub fn from( + uuid: String, + version: String, + name: String, + uri: Option, + query: String, + heartbeat_interval: u32, + connection_retry_count: u16, + connection_retry_interval: u32, + max_time: u32, + max_envelope_size: u32, + enabled: bool, + read_existing_events: bool, + outputs: Vec, + ) -> Self { + SubscriptionData { + uuid, + version, + name, + uri, + query, + heartbeat_interval, + connection_retry_count, + connection_retry_interval, + max_time, + max_envelope_size, + enabled, + read_existing_events, + outputs, + } + } + + pub fn short(&self) -> String { + let mut res = String::new(); + if self.enabled { + res.push_str("[+] "); + } else { + res.push_str("[-] "); + } + + res.push_str(format!("{} ", self.name).as_str()); + if let Some(uri) = &self.uri { + res.push_str(format!("({})", uri).as_str()); + } else { + res.push_str("(*)"); + } + + res + } + + pub fn update_version(&mut self) { + self.version = Uuid::new_v4().to_string().to_ascii_uppercase(); + } + + pub fn update_uuid(&mut self) { + // This should only be used when duplicating an existing subscription + self.uuid = Uuid::new_v4().to_string().to_ascii_uppercase(); + } + + /// Get a reference to the subscription's uuid. + pub fn uuid(&self) -> &str { + self.uuid.as_ref() + } + + /// Get a reference to the subscription's version. + pub fn version(&self) -> &str { + self.version.as_ref() + } + + /// Get a reference to the subscription's name. + pub fn name(&self) -> &str { + self.name.as_ref() + } + + /// Get a reference to the subscription's heartbeat interval. + pub fn heartbeat_interval(&self) -> u32 { + self.heartbeat_interval + } + + /// Get a reference to the subscription's connection retry count. + pub fn connection_retry_count(&self) -> u16 { + self.connection_retry_count + } + + /// Get a reference to the subscription's connection retry interval. + pub fn connection_retry_interval(&self) -> u32 { + self.connection_retry_interval + } + + /// Get a reference to the subscription's max time. + pub fn max_time(&self) -> u32 { + self.max_time + } + + /// Get a reference to the subscription's max envelope size. + pub fn max_envelope_size(&self) -> u32 { + self.max_envelope_size + } + + /// Get a reference to the subscription's query. + pub fn query(&self) -> &str { + self.query.as_ref() + } + + /// Set the subscription's name. + pub fn set_name(&mut self, name: String) { + self.name = name; + self.update_version(); + } + + /// Set the subscription's query. + pub fn set_query(&mut self, query: String) { + self.query = query; + self.update_version(); + } + + /// Set the subscription's heartbeat interval. + pub fn set_heartbeat_interval(&mut self, heartbeat_interval: u32) { + self.heartbeat_interval = heartbeat_interval; + self.update_version(); + } + + /// Set the subscription's connection retry count. + pub fn set_connection_retry_count(&mut self, connection_retry_count: u16) { + self.connection_retry_count = connection_retry_count; + self.update_version(); + } + + /// Set the subscription's connection retry interval. + pub fn set_connection_retry_interval(&mut self, connection_retry_interval: u32) { + self.connection_retry_interval = connection_retry_interval; + self.update_version(); + } + + /// Set the subscription's max time. + pub fn set_max_time(&mut self, max_time: u32) { + self.max_time = max_time; + self.update_version(); + } + + /// Set the subscription's max envelope size. + pub fn set_max_envelope_size(&mut self, max_envelope_size: u32) { + self.max_envelope_size = max_envelope_size; + self.update_version(); + } + + /// Get a reference to the subscription's outputs. + pub fn outputs(&self) -> &[SubscriptionOutput] { + self.outputs.as_ref() + } + + pub fn enabled(&self) -> bool { + self.enabled + } + + pub fn set_enabled(&mut self, enabled: bool) { + self.enabled = enabled; + self.update_version(); + } + + pub fn read_existing_events(&self) -> bool { + self.read_existing_events + } + + pub fn set_read_existing_events(&mut self, read_existing_events: bool) { + self.read_existing_events = read_existing_events; + self.update_version(); + } + + pub fn add_output(&mut self, output: SubscriptionOutput) { + self.outputs.push(output); + self.update_version(); + } + + pub fn delete_output(&mut self, index: usize) -> Result<()> { + if index >= self.outputs.len() { + bail!("Index out of range"); + } + let output = self.outputs.remove(index); + info!("Deleting output {:?}", output); + self.update_version(); + Ok(()) + } + + pub fn set_output_enabled(&mut self, index: usize, value: bool) -> Result<()> { + if index >= self.outputs.len() { + bail!("Index out of range"); + } + let output = self + .outputs + .get(index) + .ok_or_else(|| anyhow!("Index out of range"))?; + if value { + info!("Enabling output {:?}", output); + } else { + info!("Disabling output {:?}", output); + } + self.outputs[index] = output.set_enabled(value); + self.update_version(); + Ok(()) + } + + pub fn uri(&self) -> Option<&String> { + self.uri.as_ref() + } + + pub fn set_uri(&mut self, uri: Option) { + self.uri = uri; + self.update_version(); + } + + pub fn is_active(&self) -> bool { + self.enabled() + && self + .outputs() + .iter() + .find(|output| output.is_enabled()) + .is_some() + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct SubscriptionStatsCounters { + /// Total number of machines seen in the subscription + total_machines_count: i64, + /// Number of machines that have sent an heartbeat "recently" but no events + alive_machines_count: i64, + /// Number of machines that have sent events "recently" + active_machines_count: i64, + /// Number of machines that did not interact "recently" + dead_machines_count: i64, +} + +impl SubscriptionStatsCounters { + pub fn new( + total_machines_count: i64, + alive_machines_count: i64, + active_machines_count: i64, + dead_machines_count: i64, + ) -> Self { + Self { + total_machines_count, + alive_machines_count, + active_machines_count, + dead_machines_count, + } + } + + pub fn total_machines_count(&self) -> i64 { + self.total_machines_count + } + + pub fn active_machines_count(&self) -> i64 { + self.active_machines_count + } + + pub fn alive_machines_count(&self) -> i64 { + self.alive_machines_count + } + + pub fn dead_machines_count(&self) -> i64 { + self.dead_machines_count + } +} + +pub enum SubscriptionMachineState { + Alive, + Active, + Dead, +} + +#[derive(Debug)] +pub struct SubscriptionMachine { + name: String, + ip: String, +} + +impl SubscriptionMachine { + /// Creates a new [`SubscriptionStat`]. + pub fn new(name: String, ip: String) -> Self { + Self { name, ip } + } + + pub fn name(&self) -> &str { + self.name.as_ref() + } + + pub fn ip(&self) -> &str { + self.ip.as_ref() + } +} diff --git a/common/src/utils.rs b/common/src/utils.rs new file mode 100644 index 0000000..b1e4e8c --- /dev/null +++ b/common/src/utils.rs @@ -0,0 +1,42 @@ +use anyhow::{anyhow, Result}; +use chrono::{DateTime, Local, TimeZone}; +use serde::{ser, Serializer}; +use uuid::Uuid; + +pub type Timestamp = i64; + +pub fn new_uuid() -> String { + format!("uuid:{}", Uuid::new_v4().to_string().to_uppercase()) +} + +pub fn timestamp_to_local_date(ts: i64) -> Result> { + Local + .timestamp_opt(ts, 0) + .single() + .ok_or_else(|| anyhow!("Invalid or ambiguous timestamp")) +} + +pub fn serialize_timestamp(timestamp: &Timestamp, serializer: S) -> Result +where + S: Serializer, +{ + let text = timestamp_to_local_date(*timestamp) + .map_err(|err| { + ser::Error::custom(format!("Could not retrieve date from timestamp: {}", err)) + })? + .to_rfc3339(); + serializer.serialize_str(&text) +} + +pub fn serialize_option_timestamp( + opt_ts: &Option, + serializer: S, +) -> Result +where + S: Serializer, +{ + match opt_ts { + Some(ts) => serialize_timestamp(ts, serializer), + None => serializer.serialize_none(), + } +} diff --git a/doc/cli.md b/doc/cli.md new file mode 100644 index 0000000..26c37e6 --- /dev/null +++ b/doc/cli.md @@ -0,0 +1,16 @@ +# Command Line Interface + +OpenWEC is composed of two binaries: +- `openwecd`: OpenWEC server +- `openwec`: OpenWEC CLI + +The CLI enables the user to manage its OpenWEC installation which means: +- modifying subscriptions +- getting statistics +- getting informations about machines activity +- doing database schema migrations +- ... + +OpenWEC works by storing a lot of information, notably its subscriptions, in an external [database](database.md). `openwec` CLI only interacts with this database, and **does not communicate directly with openwecd server**. + +This conception choice enables a multi-node OpenWEC cluster to be administrated from only one of its nodes. Information retrieved using the `openwec` CLI and changes made using the CLI apply to the entire cluster at once. diff --git a/doc/database.md b/doc/database.md new file mode 100644 index 0000000..d4f6d5f --- /dev/null +++ b/doc/database.md @@ -0,0 +1,112 @@ +# Database + +OpenWEC subscriptions and their metadata are stored in an external database. To be more precise, data stored in the database are: +* subscriptions (parameters and outputs) +* bookmarks +* heartbeats + +OpenWEC server uses the database heavily: +- a bookmark is stored at each events batch received from each client. +- a bookmark is retrieved each time a client enumerates subscriptions. +- heartbeats are stored in batch at a fixed interval (see OpenWEC configuration). + +The most precious information stored in the database is undoutbly bookmarks. A *bookmark* represents a pointer to a location in the stream of events, for each client and each subscription. If you lose them, you will probably lose event logs. Therefore, you should definitely backup regularly the database. + +The size of the database is proportionnal to the number of clients and subscriptions, but should be relatively small compared to the size of collected events. + +OpenWEC database schema must be initialized manually on installation using `openwec db init`. + +OpenWEC database schema may be updated from one version to another. This is handled by a migration system. If you run an OpenWEC binary with an outdated database schema version, it will fail and tell you to run `openwec db upgrade`. This command will apply the required migrations to upgrade your database schema. + +## Available commands + +### `openwec db init` + +This command initializes the database schema. On SQL based database backends, it creates tables and indexes. + +### `openwec db upgrade` + +This command upgrades the current database schema by applying required migrations. Database schema upgrades can update schema but also stored data. **Before applying a migration, you should always check its related release note** (if not its code). + +### `openwec db downgrade` + +This command downgrades the current database schema by inversing previously applied migrations. This may be usefull "one day" if an OpenWEC version that came with a database migration has critical bugs and a rollback is required, so better safe than sorry. + +## Available database backends + +### SQLite (on disk) + +SQLite is simple and yet powerful. It is great for testing and simple environments. However, redundancy and load balancing is not possible using SQLite. + +#### Configuration sample + +```toml +[database] +# [Required] +# Database type: SQLite | Postgres +type = "SQLite" + +# SQLite DB path +# The SQLite DB will be created and initialized if not already existing +path = "/var/db/openwec/openwec.sqlite" +``` + +### PostgreSQL + +For redundancy and/or scaling, you probably want to setup multiple OpenWEC nodes in different availability zones. To do that, you must use an external database storage backend such as PostgreSQL. Note that OpenWEC's PostgreSQL client is optimized to be used with [CockroachDB](https://github.com/cockroachdb/cockroach). + +#### Configuration sample + +```toml +[database] +# [Required] +# Database type: SQLite | Postgres +type = "Postgres" + +## Postgres configuration + +# [Required] +# Postgres database Hostname +host = "localhost" + +# [Required] +# Postgres database port +port = 5432 + +# [Required] +# Postgres database name. It must already exist and user should +# have all permissions on it. +dbname = "openwec" + +# [Required] +# Postgres database user. It must already exist and have all permissions +# on . +user = "openwec" + +# [Required] +# Postgres database user password +password = "" + +# [Optional] +# Postgres SSL mode. Possibles values are: +# - Disable: Do not use TLS +# - Prefer: Attempt to connect with TLS but allow sessions without +# - Require: Require the use of TLS +# ssl_mode = "Prefer" + +# [Optional] +# Custom SSL CA certificate file +# When ssl_mode is Prefer or Require, you may want to use a specific CA +# certificate file instead of the ones trusted by your system (default). +# ca_file = unset + +# [Optional] +# Max chunk size +# When performing bulk insert queries, this is the maximum number of +# entries that will be inserted in one query. +# max_chunk_size = 500 +``` + +## How to add a new database backend ? + +TODO diff --git a/doc/event.xsd b/doc/event.xsd new file mode 100644 index 0000000..a0a56ab --- /dev/null +++ b/doc/event.xsd @@ -0,0 +1,219 @@ + + + + + + + + + + + + + + + + + Hex 1-8 digits in size + + + + + + + + Hex 1-16 digits in size + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Classic eventlog binary data + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Generic event + + + + + Custom event + + + + + WPP debug event + + + + + + Non schematized event + + + + + + Instrumentation event + + + + + + + + + + + diff --git a/doc/formats.md b/doc/formats.md new file mode 100644 index 0000000..a19c9f3 --- /dev/null +++ b/doc/formats.md @@ -0,0 +1,190 @@ +# Formats + +Windows events received are in a XML format, the same one you can see in Windows Event Viewer. + +OpenWEC server can parse each event and format it differently. + +## Raw (XML) format + +Using this format, you get the exact event received by OpenWEC (no parsing happens). + +The XML schema is defined in the Windows SDK (see [event.xsd](event.xsd)). + +### Json format + +Using this format, raw XML events are parsed and then serialized using Json. + +In addition, OpenWEC adds some data that may be useful: the Windows client IP address, its principal, the time when the event was received and the OpenWEC subscription. + +The JSON document generated uses the following structure: +```json +event := { + "System": system, + "EventData": event_data, + "DebugData": debug_data, + "UserData": string, + "ProcessingErrorData": processing_error_data, + "BinaryEventData": string, + "RenderingInfo": rendering_info, + "OpenWEC": openwec_data +} + +openwec_data := { + /* IP Address of the Windows client */ + "IpAddress": string, + /* Time when the event was received by OpenWEC */ + "TimeReceived": date, + /* Principal of the Windows client */ + "Principal": string, + "Subscription": { + "Name": string, + "Version": string, + "Uuid": string, + "Uri": string + } +} + +system := { + "Provider": { + "Name": string, + "Guid": string, + "EventSourceName": string + }, + "EventID": number, + "EventIDQualifiers": number, + "Vesion": number, + "Level": number, + "Task": number, + "Opcode": number, + "Keywords": string, + "TimeCreated": date, + "EventRecordID": number, + "Correlation": { + "ActivityID": string, + "RelatedActivityID": string + }, + "Execution": execution, + "Channel": string, + "Computer": string, + "Container": string, + "UserID": string +} + +execution := { + "ProcessID": number, + "ThreadID": number, + "ProcessorID": number, + "SessionID": number, + "KernelTime": number, + "UserTime": number, + "ProcessorTime": number +} + +rendering_info := { + "Message": string, + "Level": string, + "Task": string, + "Opcode": string, + "Channel": string, + "Provider": string, + "Keywords": array[string], + "Culture": string +} + +event_data := { + /* Depends of the event */ + string: any, + ..., + "Data": array[string], + "Binary": array[string] +} + +debug_data := { + "SequenceNumber": number, + "FlagsName": string, + "LevelName": string, + "Component": string, + "SubComponent": string, + "FileLine": string, + "Function": string, + "Message": string +} + +processing_error_data := { + "ErrorCode": number, + "DataItemName": string, + "EventPayload": string +} +``` + +#### Example + +```json +{ + "System": { + "Provider": { + "Name": "Microsoft-Windows-Security-Auditing", + "Guid": "{54849625-5478-4994-a5ba-3e3b0328c30d}" + }, + "EventID": 4688, + "Version": 2, + "Level": 0, + "Task": 13312, + "Opcode": 0, + "Keywords": "0x8020000000000000", + "TimeCreated": "2022-12-14T16:06:51.0643605Z", + "EventRecordID": 114689, + "Correlation": {}, + "Execution": { + "ProcessID": 4, + "ThreadID": 196 + }, + "Channel": "Security", + "Computer": "win10.windomain.local" + }, + "EventData": { + "SubjectLogonId": "0x3e7", + "SubjectUserName": "WIN10$", + "SubjectDomainName": "WINDOMAIN", + "ParentProcessName": "C:\\Windows\\System32\\services.exe", + "MandatoryLabel": "S-1-16-16384", + "SubjectUserSid": "S-1-5-18", + "NewProcessName": "C:\\Program Files (x86)\\Microsoft\\EdgeUpdate\\MicrosoftEdgeUpdate.exe", + "TokenElevationType": "%%1936", + "TargetUserSid": "S-1-0-0", + "TargetDomainName": "-", + "CommandLine": "", + "TargetUserName": "-", + "NewProcessId": "0x3a8", + "TargetLogonId": "0x0", + "ProcessId": "0x240" + }, + "RenderingInfo": { + "Message": "A new process has been created.\n\nCreator Subject:\n\tSecurity ID:\t\tS-1-5-18\n\tAccount Name:\t\tWIN10$\n\tAccount Domain:\t\tWINDOMAIN\n\tLogon ID:\t\t0x3E7\n\nTarget Subject:\n\tSecurity ID:\t\tS-1-0-0\n\tAccount Name:\t\t-\n\tAccount Domain:\t\t-\n\tLogon ID:\t\t0x0\n\nProcess Information:\n\tNew Process ID:\t\t0x3a8\n\tNew Process Name:\tC:\\Program Files (x86)\\Microsoft\\EdgeUpdate\\MicrosoftEdgeUpdate.exe\n\tToken Elevation Type:\t%%1936\n\tMandatory Label:\t\tS-1-16-16384\n\tCreator Process ID:\t0x240\n\tCreator Process Name:\tC:\\Windows\\System32\\services.exe\n\tProcess Command Line:\t\n\nToken Elevation Type indicates the type of token that was assigned to the new process in accordance with User Account Control policy.\n\nType 1 is a full token with no privileges removed or groups disabled. A full token is only used if User Account Control is disabled or if the user is the built-in Administrator account or a service account.\n\nType 2 is an elevated token with no privileges removed or groups disabled. An elevated token is used when User Account Control is enabled and the user chooses to start the program using Run as administrator. An elevated token is also used when an application is configured to always require administrative privilege or to always require maximum privilege, and the user is a member of the Administrators group.\n\nType 3 is a limited token with administrative privileges removed and administrative groups disabled. The limited token is used when User Account Control is enabled, the application does not require administrative privilege, and the user does not choose to start the program using Run as administrator.", + "Level": "Information", + "Task": "Process Creation", + "Opcode": "Info", + "Channel": "Security", + "Provider": "Microsoft Windows security auditing.", + "Keywords": [ + "Audit Success" + ], + "Culture": "en-US" + }, + "OpenWEC": { + "IpAddress": "192.168.58.100", + "TimeReceived": "2022-12-14T17:07:03.331+01:00", + "Principal": "WIN10$@WINDOMAIN.LOCAL", + "Subscription": { + "Uuid": "8B18D83D-2964-4F35-AC3B-6F4E6FFA727B", + "Version": "AD0D118F-31EF-4111-A0CA-D87249747278", + "Name": "Test", + "Uri": "/this/is/a/test" + } + } +} +``` + +## How to add a new formatter ? + +TODO diff --git a/doc/getting_started.md b/doc/getting_started.md new file mode 100644 index 0000000..dd50c85 --- /dev/null +++ b/doc/getting_started.md @@ -0,0 +1,216 @@ +# Getting started + +## Building OpenWEC + +OpenWEC is not yet packaged for any distribution we know about. Therefore, you need to build it from source or get a precompiled binary from the release page. + +To build OpenWEC, you will need: +* cargo and rustc +* openssl-dev +* libgssapi + +And you will need to run: + +```bash +$ cargo build --release +$ strip target/release/openwec +$ strip target/release/openwecd +``` + +## Basic configuration example + +In an Active Directory domain `DC=windomain,DC=local`, let's configure OpenWEC on a machine named `wec.windomain.local` using an SQLite database. + +Requirements: +* A DNS entry for `wec.windomain.local` +* Authorise connections from your Windows machines to `wec.windomain.local` on TCP/5985 +* An Active Directory account for OpenWEC with `http/wec.windomain.local@WINDOMAIN.LOCAL` Service Principal Name. +* A keytab file containing keys for `http/wec.windomain.local@WINDOMAIN.LOCAL` SPN, available in `/etc/wec.windomain.local.keytab`. + +Write the following content in `/etc/openwec.conf.toml`: + +```toml +# /etc/openwec.conf.toml +[server] +verbosity = "info" +db_sync_interval = 5 +flush_heartbeats_interval = 5 + +[database] +type = "SQLite" +# You need to create /var/db/openwec yourself +path = "/var/db/openwec/db.sqlite" + +[[collectors]] +hostname = "wec.windomain.local" +listen_address = "0.0.0.0" + +[collectors.authentication] +type = "Kerberos" +service_principal_name = "http/wec.windomain.local@WINDOMAIN.LOCAL" +keytab = "/etc/wec.windomain.local.keytab" +``` + +See [openwec.conf.sample.toml](../openwec.conf.sample.toml) for further information on available parameters. + +We have configured OpenWEC to use the SQLite backend. The SQLite database will be stored on disk in `/var/db/openwec/db.sqlite`. You need to make sure that `/var/db/openwec` exists. + +We have set up a collector server. It listens on `0.0.0.0` (default port is `5985`) and can be contacted by Windows computers using `wec.windomain.local`. + +Authentication is made using Kerberos. A valid keytab containing credentials for `http/wec.windomain.local@WINDOMAIN.LOCAL` must be present in `/etc/wec.windomain.local.keytab`. + +## System configuration + +You should run OpenWEC with an unprivileged user, for example `openwec`. + +You may want to create a *systemd* service: + +```ini +# openwec.service +[Unit] +Description=Windows Events Collector +After=network.target + +[Service] +Type=simple +User=openwec +Restart=always +RestartSec=5s +ExecStart=/usr/bin/openwecd + +[Install] +WantedBy=multi-user.target +``` + +## Initializing database + +Database schema needs to be initialized manually using: + +```bash +$ openwec db init +``` + +## Creating a new subscription + +You need to build a query to retrieve events you are interested in. Event queries syntax is described by Microsoft [here](https://learn.microsoft.com/en-us/previous-versions/bb399427(v=vs.90)). + +In this example, let's say we want to retrieve every events in *Security*, *System*, *Application* and *Setup* sources. + +Create a file `query.xml` containing: + +```xml + + + + + + + + + +``` + +You can then create the subscription: + +```bash +$ openwec subscriptions new my-test-subscription query.xml +``` + +You may provide additional arguments to customize the subscriptions settings (see [OpenWEC subscription settings](subscription.md)), but you will be able to edit it later. + +Your newly created subscription is not yet enabled. You need to configure at least one [output](outputs.md). + +## Configuring outputs for the subscription + +Let's say we want to: +- store events in JSON format in files in the path `/data/logs///messages`, where `` is the IP address of the machine who sent the log messages and `` its Kerberos principal +- and send them in a Kafka topic (`my-kafka-topic`) on `localhost:9092` for further processing. + +We need to create 2 outputs: +* `Files` with base path `/data/logs` using the `json` formatter: + +```bash +$ openwec subscriptions edit my-test-subscription outputs add --format json files /data/logs +``` + +* `Kafka` also using the `Json` formatter: + +```bash +$ openwec subscriptions edit my-test-subscription outputs add --format json kafka my-kafka-topic -o bootstrap.servers localhost:9092 +``` + +## Enabling the subscription + +You may want to check your subscription configuration using: + +```bash +$ openwec subscriptions show my-test-subscription +``` + +If everything is OK, then you can enable the subscription: + +```bash +$ openwec subscriptions enable my-test-subscription +``` + +## Configuring Windows machines + +You can configure Windows machines using a GPO. + +This GPO will configure three things: +- start the WinRM service +- enable Windows Event Forwarding and configure it to look for subscriptions on your OpenWEC server +- authorise WinRM, i.e. the Network Service account, to read the wanted event channels + +1. Start the WinRM service + +Go to Computer Configuration > Policies > Windows Settings > Security Settings > System Services > Windows Remote Management (WS-Management), and select "Automatic" startup mode. + +2. Configure Event Forwarding + +- Go to Computer Configuration > Policies > Administrative Templates > Windows Components > Event Forwarding +- Double click on "Configure target Subscription Manager" +- Select "Enabled" +- Click on "Show" +- Add `Server=http://wec.windomain.local:5985/test,Refresh=30` which tells your Windows machines to + - fetch subscriptions from wec.windomain.local:5985 + - use URI "/test" + - look for subscriptions update every 30 seconds + +3. Set event channels permissions + +By default, WinRM is running as the Network Service account and therefore does not have the rights to read all event channels (such as the Security event channel). + +In its configuration examples, Microsoft provides a GPO that adds Network Service account to the built-in Event Log Readers group: ["Minimum GPO for WEF Client configuration"](https://learn.microsoft.com/en-us/windows/security/threat-protection/use-windows-event-forwarding-to-assist-in-intrusion-detection#appendix-d---minimum-gpo-for-wef-client-configuration). + +This enables WinRM to read all event channels but for this configuration to really apply every Windows machines has to reboot (in order for the Network Service account access token to contains the Event Log Readers group). + +In order to have everything working as soon as the GPO applies we can also modify event channel security descriptors. + +For example, to give the right to read the "Security" channel to Network Service account: + +- Go to Computer Configuration > Policies > Administratives Templates > Windows Components > Event Log Service > Security +- Double click on "Configure log access" +- Select "Enabled" +- Add this security descriptor in SDDL format in the "Log Access" field: `O:BAG:SYD:(A;;0xf0005;;;SY)(A;;0x5;;;BA)(A;;0x1;;;S-1-5-32-573)(A;;0x1;;;NS)` +- Do the same for "Configure log access (legacy)" + +There is a lot of recommendations, explanations and tips on configuring WinRM and Windows Event Forwarding in the ANSSI guide ["Recommandations de sécurité pour la journalisation des systèmes Microsoft Windows en environnement Active Directory"](https://www.ssi.gouv.fr/uploads/2022/01/anssi-guide-recommandations_securite_journalisation_systemes_microsoft_windows_environnement_active_directory.pdf) (in french). We strongly recommend that you read it before deploying this GPO in a production environment. + +Link your GPO and wait until it is applied on all Windows machines. + +And that's it, you're done! :thumbsup: + +To be sure that everything works well, you can: +- look at `openwecd` logs to see if Windows machines are connecting to your OpenWEC server +- check your subscription outputs to see if some events are being received + +## Going further + +Now that you have a basic working collector, you have multiple ways to improve your setup: +* Add additional sources in your Event query +* Customize your subscriptions parameters +* Add multiple OpenWEC nodes for redundancy and scaling. You must use PostgreSQL backend to do that (we advise using CockroachDB). You need to setup a load balancer such as Nginx in front of OpenWEC nodes. +* Use a gMSA (group Managed Service Account) instead of a standard Active Directory account (you may use [gmsad](https://github.com/cea-sec/gmsad) and [msktutil](https://github.com/msktutil/msktutil)). +* Create multiple subscriptions with different URIs, for example one by tier. Thus, you can monitor efficiently that you always receive logs from Tier 0 servers. You need to link one GPO per tier with the subscription URI. + diff --git a/doc/how_it_works.md b/doc/how_it_works.md new file mode 100644 index 0000000..91688ab --- /dev/null +++ b/doc/how_it_works.md @@ -0,0 +1,82 @@ +# How does it work ? + +## Windows Event Forwarding + +The following is a quick summary of the Windows Event Forwarding protocol in source-initiated mode (more specifically, WSMAN *Events* mode). A more detailed analysis is available [here](protocol.md). + +Basically, a Windows host is configured (using a GPO, for example) to enumerate active [*subscriptions*](subscription.md) from a *collector* server. Each *subscription* is made to retrieve a specific set of events (defined by a [*query*](query.md)). The Windows host then sends the corresponding events to the endpoint defined in the *subscription*. All this information is exchanged using SOAP over HTTP(S). + +Two transport protocols are available: +* Kerberos/HTTP (port 5985): SOAP messages are authenticated and encrypted using Kerberos and sent over HTTP. This is mainly used in Active Directory environments. +* HTTPS (port 5986): SOAP messages are authenticated and encrypted using HTTPS. Each Windows machine must have a valid client certificate. + +**OpenWEC only supports Kerberos/HTTP.** + +## Subscriptions + +Subscriptions are the heart of the Windows Event Forwarding protocol and thus of openwec :smile:. + +A subscription consists mainly of +* a name +* a query (XPath filter): only events matching this query will be sent +* a URI: if set, only computers enumerating using this URI will receive this subscription. This allows you to have different subscriptions for different sets of machines (OUs). *This is exclusive to OpenWEC. +* a boolean specifying whether you want to retrieve existing events or only new ones (defaults to only new ones). + +In OpenWEC, a subscription must be associated with at least one [*output*](outputs.md) that answers the question "*where should openwec put collected events and in what format?*". + +See the [documentation page about subscriptions](subscription.md) for more information. + +## Outputs + +Each output is actually made up of two elements: +1. an output type +2. a format + + +### Output Types + +Output types answer the question "*what should openwec do with collected events*". + +Currently there are several supported output types: +* `Files`: Events are stored in files in a tree architecture. You need to provide some information, such as the base path. +* `TCP`: Events are sent to a TCP server. You must specify a host and port. +* `Kafka`: Events are sent in a Kafka topic. You need to specify the name of the Kafka topic and the usual Kafka settings such as *bootstrap servers*. + +## Formats + +The OpenWEC server can parse each event and format it differently. There are currently two formatters available: +* `Raw`: as its name suggests, it does nothing to the events. It just writes raw XML data. *Warning: each event may contain EOL characters which are neither filtered nor transformed*. +* `Json`: format events in Json. Json schema is documented [there](formats.md). When using the `Json` formatter, OpenWEC parses XML events and is able to add useful data such as the Kerberos principal or the IP address that sent the event. + + + +## Bookmarks + +To achieve reliable delivery of events, Windows Event Forwarding uses a *bookmark* mechanism. A bookmark is a pointer to a location in the event stream of a Windows computer. The log forwarding service of a Windows computer sends a new bookmark with each event delivery. The *collector* server is responsible for persisting these *bookmarks* for each subscription and sending them during subscription enumeration. The Windows computer then sends all available events that match the *subscription* *query* since the last *bookmark*. + +When a subscription is created or a new computer starts sending its events, there are no bookmarks. The collector can choose to receive either all existing events matching filters and new events, or only new events (see `read_existing_events` parameter). + +OpenWEC needs a way to store these *bookmarks*: a database! + +## Database + +OpenWEC supports the use of two database storage systems: +* SQLite (on disk) +* PostgreSQL + +You need to configure one of them to run OpenWEC. *Subscriptions*, *bookmarks* and *heartbeats* are all stored in the database. + +SQLite is great for testing and simple environments. + +For redundancy and/or scaling, you will need to set up multiple OpenWEC nodes in different availability zones. To do this you will need to use an external database storage backend such as PostgreSQL. Note that OpenWEC's PostgreSQL client is optimised for use with [CockroachDB](https://github.com/cockroachdb/cockroach). + +## Heartbeats + +To be able to distinguish between a lack of events and an outage, Windows machines are required to send real events periodically (see the `heartbeat_interval` parameter), or *heartbeats* if no events match. + +For each tuple `(subscription, host)`, OpenWEC stores several datetimes in its database: +* First event received +* Last event received +* Last heartbeat received + +This allows OpenWEC to display a summary of active (real event received "recently"), alive (heartbeat received "recently") and dead (nothing received "recently") hosts. diff --git a/doc/issues.md b/doc/issues.md new file mode 100644 index 0000000..9b84f43 --- /dev/null +++ b/doc/issues.md @@ -0,0 +1,65 @@ +# Known issues + +## Adding a new source to an existing query + +### Behavior + +When a source is added to the query of a subscription, machines send all existing events of this source regardless of the `read_existing_events` parameter. This behavior might create a huge network trafic (for example if you add the "Security" source...). + +### Explanation + +The behavior of "Bookmarks" is explained in DSP0226 10.2.6. + +When a subscription is created, there are no existing bookmarks for it. If `read_existing_event` parameter is set, OpenWEC sends the reserved bookmark `http://schemas.dmtf.org/wbem/wsman/1/wsman/bookmark/earliest`: +> If a subscription is received with this bookmark, the event source replays all possible events that match the filter and any events that subsequently occur for that event source. + +Otherwise, we don't send any bookmarks : +> The absence of any bookmark means "begin at the next available event". + +Let's suppose that the subscription query is updated __and__ we have existing bookmarks for this subscription. On a host enumeration, OpenWEC sends its stored bookmark. However, this bookmark will not contain information for the newly added sources. Windows event forwarder seems to interpret this lack of information as "replay all possible events that match the filter" for these sources. + +### Solution + +There is no known solutions. Users need to be aware of this behavior, which may be acceptable if the added source is "small", or rather create a brand new subscription with the new query (and maybe lose events during the transition). + +## Hunting rogue Windows Event Forwarder + +### Goal + +We would like to prevent a rogue machine, legitimately authenticated, to send events concerning other machines. Indeed, this could be used by an attacker to create fake events and maybe mislead defenders. + +### TLDR + +This seems difficult to achieve by OpenWEC. + +For now, the best we can do is to add metadata in the events that are formatted in JSON (raw formatter means _raw_, so no additions allowed): +- `OpenWEC.IpAddress`: contains the IP address of the machine who sent the event +- `OpenWEC.Principal`: contains the Kerberos principal of the machine who sent the event + +These informations may be post-processed later to search for "rogue" events. + +### Lack of link between events "Computer" and Kerberos principal + +The field `Computer` of an event seems to contain the `dNSHostName` or the Netbios name of the machine. During experiments, we have seen events with the two types of values for the same machine. In addition, we could not find any specifications of the content of this field. + +Furthermore, the Kerberos principal does not contain this information. It could be guessed by an heuristic that `MACHINE$@WINDOMAIN.LOCAL` should have a `Hostname` value of `machine.windomain.local` or `MACHINE`, but this is not reliable. We could use the Active Directory database to find the object of the computer and retrieve its `dNSHostName` attribute, but it would be costly and bring with it many other problems and undesirable behaviours. + +### Intermediate Windows Event log forwarder + +In some environments, machines may send their events to an intermediate Windows Event Collector which would forward them to OpenWEC. In this situation, the forwarder, authenticated with his principal, would send events of other machines. + +To support this, we would need to enable users to configure a list of Kerberos principal with the ability to "impersonate" other machines (a little bit like unconstrained delegation in AD). + +# What is missing? + +## Limit subscriptions to a subset of machines + +In Microsoft implementation, subscriptions can be set to a group of machines. The only "clean" way to do this is to parse the PAC contained in the Kerberos service ticket provided by the machine, but this does not seem to be supported by GSSAPI. We could alternatively limit a subscription to a list of Kerberos principals. + +In any case, it won't work with an intermediate forwarder. + +## TLS support + +Windows clients support TLS for authentication and encryption. This is not currently supported by OpenWEC, but it should. For now OpenWEC only supports authentication and encryption with Kerberos. There are two possibilities in order to add the TLS support: +- implement TLS support within OpenWEC web server (powered by `hyper`). +- use a reverse proxy which handles TLS and then send cleartext messages to OpenWEC service. diff --git a/doc/monitoring.md b/doc/monitoring.md new file mode 100644 index 0000000..a954fa7 --- /dev/null +++ b/doc/monitoring.md @@ -0,0 +1,30 @@ +# Monitoring OpenWEC + +## Liveness statistics + +You should monitor OpenWEC to be sure that at any time you receive events from all your Windows machines. + +You can do that using `openwec stats`. + +For each subscription, you will retrieve: +* subscription name +* subscription URI +* subscription UUID +* `since`: `now - heartbeat_interval` +* `active_machines_count`: count of machines for which at least one event has been received since `since` +* `alive_machines_count`: count of machines for which at least one heartbeat has been received since `since` +* `total_machines_count`: count of machines that have sent at least one event once. + +You may filter the output to only one subscription using `--subscription`. + +Two output formats are available: `text` (default) and `json` (use `--format`). + +## Heartbeats + +You may want to retrieve heartbeats data for a subscription (`--subscription`) or/and a hostname (`--hostname`) or/and an IP address (`--address`). For example, let's say we want to retrieve heartbeats data for `192.168.1.0` and subscription `my-test-subscription`. + +```bash +$ openwec heartbeats -a 192.168.1.0 -s my-test-subscription +``` + +Two formats are available: `text` (default) and `json` (`--format`). diff --git a/doc/outputs.md b/doc/outputs.md new file mode 100644 index 0000000..0e703a0 --- /dev/null +++ b/doc/outputs.md @@ -0,0 +1,151 @@ +# Outputs + +Outputs answer the question "*what should openwec do with collected events?*". For one subscription, you may configure multiple outputs. + +Each output is in fact composed of two elements: +1. an output type +2. a format + +The output type determines where the event will be sent or stored, whereas the format describes how it will be formatted. Formarts are described in [Formats](formats.md). + +When an event is received for one subscription, it must be processed successfully by all its outputs. If one output fails, for example if there is no space left on device for a `Files` type output, an error is returned to the client which will try to resend the event later. + +When OpenWEC server starts, it retrieves all currently active subscriptions from its database. For each subscription, every output is initialized. + +When a subscription is updated or reloaded, all its outputs instances are dropped and initialized again. + +Note: OpenWEC does not guarantee that an event will not be written multiple times. Indeed, if one output fails to write a batch of events, these events will not be acknowledged to the client that sent them and it will try to send them again later. + +## Commands + +For each subscription, you can manipulate its outputs using `openwec subscriptions edit outputs`. + +### `openwec subscriptions edit outputs` + +This command prints the current outputs of the subscription. + +#### Example + +``` +$ openwec subscriptions edit my-subscription outputs +0: Format: Json, Output: Files(FileConfiguration { base: "/var/events/", split_on_addr_index: None, append_node_name: false, filename: "messages" }) +1: Format: Json, Output: Tcp(dc.windomain.local:12000) +``` + +The subscription `my-subscription` has two outputs configured: +* the first one is a `Files` output using `Json` format. +* the second one is a `Tcp` output using `Json` format. + +The index number at the beginning of each line can be used to delete the corresponding output. + +### `openwec subscriptions edit outputs add` + +This command adds an output to a subscription. + +You must specify a format (see [Formats](formats.md)) and an output type (see below). + +#### Example + +``` +$ openwec subscriptions edit my-subscription outputs add --format json files [...] +``` + +This command adds a `Files` output using `Json` format. + +### `openwec subscriptions edit outputs delete` + +This command deletes an output of a subscription. + +You must specify the index of the output to delete, index shown in `openwec subscriptions edit outputs` command. + +##### Example + +``` +$ openwec subscriptions edit my-subscription outputs delete 0 +``` + +This command deletes the first output of the subscription `my-subscription`. + + +## Output types + +### Files + +This output type stores events in files on the collector filesystem. + +For a given subscription, all events sent by a given Windows client will be stored in the following path: +``` +//[/]/ +``` +where: +* `base`: base path. It should be an absolute path. It must be configured in the output settings. +* `ip_path`: two formats can be configured in output settings: + * ``: only the Windows client IP address (default). + * The IP address of the client splitted on a given index to build a directory tree. For example, for an IPv4 address `A.B.C.D` and a split index equals to `1`, the resulting path will be `A/A.B/A.B.C/A.B.C.D`. +* `principal`: the Kerberos principal of the Windows client, without the `$` character. Example: `DC@WINDOMAIN.LOCAL`. +* `node_name` (optional): when you use a multi-node setup, you may want to add the node's name in the path. The node's name is configured in server settings, but you can choose to add it or not in each output settings. +* `filename`: the name of the file, configured in each output settings. It defaults to `messages`. + +When a `Files` output is initialized, it creates a blank hash table which will contains openned file descriptors. Therefore, each file is openned once. + +You may want to tell OpenWEC to close all its file descriptors and to open them again. This can be done using `openwec subscriptions reload `: the subscription outputs will be reloaded at the next "subscriptions reload" tick. You may want to reload subscriptions immediatly by sending a `SIGHUP` signal to `openwecd` process after executing the `openwec subscriptions reload` command. + +#### Examples + +* Store events in `/var/events///messages` for subscription `my-subscription`: + +``` +$ openwec subscriptions edit my-subscription outputs add --format files /var/events/ +``` + +* With ` = A.B.C.D`, store events in `/var/events/A.B.C/A.B.C.D//messages` for subscription `my-subscription`: + +``` +$ openwec subscriptions edit my-subscription outputs add --format files /var/events/ --split-on-addr-index 3 +``` + +* With ` = A.B.C.D`, store events in `/var/events/A.B/A.B.C/A.B.C.D//my-events` for subscription `my-subscription`: + +``` +$ openwec subscriptions edit my-subscription outputs add --format files /var/events/ --split-on-addr-index 2 --filename my-events +``` + +* With ` = A.B.C.D`, store events in `/var/events/A/A.B/A.B.C/A.B.C.D///my-events` for subscription `my-subscription`: + +``` +$ openwec subscriptions edit my-subscription outputs add --format files /var/events/ --split-on-addr-index 1 --filename my-events --append-node-name +``` + +### Kafka + +This output type sends events in a Kafka topic. + +For a given subscription, all events will be sent in the configured Kafka topic. You may want to add additionnal options to the inner Kafka client, such as `bootstrap.servers`. + +#### Examples + +* Send events to a Kafka cluster with two bootstrap servers `kafka1:9092` and `kafka2:9092` in topic `my-topic`: + +``` +$ openwec subscriptions edit my-subscription outputs add --format kafka my-topic -o bootstrap.servers kafka1:9092,kafka2:9092 +``` + +### TCP + +This output type send events in a "raw" TCP connection. + +The TCP connection is established when the first event has to be sent. It is kept openned as long as possible, and re-established if required. + +You must provide an IP address or a hostname and a port to connect to. + +#### Examples + +* Send events to a TCP server `my.server.windomain.local` using port `12000`: + +``` +$ openwec subscriptions edit my-subscription outputs add --format tcp my.server.windomain.local 12000 +``` + +## How to add a new output type ? + +TODO diff --git a/doc/protocol.md b/doc/protocol.md new file mode 100644 index 0000000..0722541 --- /dev/null +++ b/doc/protocol.md @@ -0,0 +1,1911 @@ +# Analysis of the Event Forwarding protocol in Push mode + + + + + +- [Lab](#lab) +- [Step by step](#step-by-step) + - [The client connects in TCP to srv.windomain.local:5985 (source port: 65091)](#the-client-connects-in-tcp-to-srv.windomain.local5985-source-port-65091) + - [The client sends a HTTP POST request](#the-client-sends-a-http-post-request) + - [The collector authenticates the client and sends its response](#the-collector-authenticates-the-client-and-sends-its-response) + - [The client sends a `Enumerate` request](#the-client-sends-a-enumerate-request) + - [The collector answers "EnumerateResponse"](#the-collector-answers-enumerateresponse) + - [The client sends an `End` request](#the-client-sends-an-end-request) + - [The collector sends back "No Content"](#the-collector-sends-back-no-content) + - [The client closes the TCP connection](#the-client-closes-the-tcp-connection) + - [The client opens a new TCP connection (source port 65092)](#the-client-opens-a-new-tcp-connection-source-port-65092) + - [The client sends a POST request to the URL found in `DeliveryTo/Address` and authenticate in Kerberos](#the-client-sends-a-post-request-to-the-url-found-in-deliverytoaddress-and-authenticate-in-kerberos) + - [The collector validates the authentication](#the-collector-validates-the-authentication) + - [The client sends a Heartbeat (may not always happen)](#the-client-sends-a-heartbeat-may-not-always-happen) + - [The collector acknowledges](#the-collector-acknowledges) + - [The client sends a POST request containing a batch of events](#the-client-sends-a-post-request-containing-a-batch-of-events) + - [The collector acknowledges](#the-collector-acknowledges) + - [And so on...](#and-so-on) + - [The client can end the subscription](#the-client-can-end-the-subscription) +- [Side note](#side-note) + + + +We analysed the protocol in Push mode. In this mode the client connects to the collector to send it its event. + +Documentation: +- [Web Services For Management (WS-Management) Specification (DSP0226)](https://www.dmtf.org/sites/default/files/standards/documents/DSP0226_1.0.0.pdf). Some interesting sections: + - 5 Addressing + - 8 WS-Enumeration + - 10 Eventing + - 12 Security + - 13 Transports and Message Encoding +- [MS-WSMV (Web Services Management Protocol Extensions for Windows Vista)](https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-WSMV/%5BMS-WSMV%5D.pdf) which is a Microsoft WS-Management extension. The main interesting part is: + - 3.1.4.1.30 - Subscription + +## Lab + +For this analysis we used: + +- an Active Directory domain with one domain controller + - see +- a Windows Event Collector server + - see +- a Windows machine configured to send its logs to a Windows Event Collector + - see as well + +For our analysis we used the following client configuration: + +``` +[HKEY_LOCAL_MACHINE\SOFTWARE\Policies\Microsoft\Windows\EventLog\EventForwarding\SubscriptionManager] +"1"="Server=HTTP://srv.windomain.local:5985/wsman/SubscriptionManager/WEC,Refresh=3" +``` + +## Step by step + +When our test environment was ready, we captured every traffic between the Windows machine and the Windows Event collector server. + +Here is what we saw step by step. + +### The client connects in TCP to srv.windomain.local:5985 (source port: 65091) + +### The client sends a HTTP POST request + +``` +Frame 3730: 90 bytes on wire (720 bits), 90 bytes captured (720 bits) on interface \Device\NPF_{D0B586C7-BCDD-4989-9F90-8AD183BA1268}, id 0 +Ethernet II, Src: PcsCompu_a6:35:47 (08:00:27:a6:35:47), Dst: PcsCompu_62:f7:34 (08:00:27:62:f7:34) +Internet Protocol Version 4, Src: 192.168.58.100, Dst: 192.168.58.103 +Transmission Control Protocol, Src Port: 65091, Dst Port: 5985, Seq: 2921, Ack: 1, Len: 36 +[3 Reassembled TCP Segments (2956 bytes): #3728(1460), #3729(1460), #3730(36)] +Hypertext Transfer Protocol + POST /wsman/SubscriptionManager/WEC HTTP/1.1\r\n + Connection: Keep-Alive\r\n + Content-Type: application/soap+xml;charset=UTF-16\r\n + [truncated]Authorization: Kerberos YIIH9AYJKoZIhvcSAQICAQBuggfjMIIH36ADAgEFoQMCAQ6iBwMFACAAAACjggYOYYIGCjCCBgagAwIBBaERGw9XSU5ET01BSU4uTE9DQUyiJjAkoAMCAQKhHTAbGwRIVFRQGxNzcnYud2luZG9tYWluLmxvY2Fso4IFwjCCBb6gAwIBEqEDAgEFooIFsASCBazWEk8V/B + User-Agent: Microsoft WinRM Client\r\n + Content-Length: 0\r\n + Host: srv.windomain.local:5985\r\n + \r\n + [Full request URI: http://srv.windomain.local:5985/wsman/SubscriptionManager/WEC] + [HTTP request 1/3] + [Response in frame: 3732] + [Next request in frame: 3736] +``` + +Within the `Authorization` blob we find a `KRB_AP_REQ`: + +``` + [truncated]Authorization: Kerberos YIIH9AYJKoZIhvcSAQICAQBuggfjMIIH36ADAgEFoQMCAQ6iBwMFACAAAACjggYOYYIGCjCCBgagAwIBBaERGw9XSU5ET01BSU4uTE9DQUyiJjAkoAMCAQKhHTAbGwRIVFRQGxNzcnYud2luZG9tYWluLmxvY2Fso4IFwjCCBb6gAwIBEqEDAgEFooIFsASCBazWEk8V/B + GSS-API Generic Security Service Application Program Interface + OID: 1.2.840.113554.1.2.2 (KRB5 - Kerberos 5) + krb5_blob: 01006e8207e3308207dfa003020105a10302010ea20703050020000000a382060e618206… + krb5_tok_id: KRB5_AP_REQ (0x0001) + Kerberos + ap-req + pvno: 5 + msg-type: krb-ap-req (14) + Padding: 0 + ap-options: 20000000 + 0... .... = reserved: False + .0.. .... = use-session-key: False + ..1. .... = mutual-required: True + ticket + tkt-vno: 5 + realm: WINDOMAIN.LOCAL + sname + name-type: kRB5-NT-SRV-INST (2) + sname-string: 2 items + SNameString: HTTP + SNameString: srv.windomain.local + enc-part + etype: eTYPE-AES256-CTS-HMAC-SHA1-96 (18) + kvno: 5 + cipher: d6124f15fc1864ea02cf6a5d15351620f470aaaa1059d073f17533c1f68c8926a0122380… + Decrypted keytype 18 usage 2 using keytab principal SRV$@WINDOMAIN.LOCAL (id=keytab.2 same=0) (b589d323...) + encTicketPart + Padding: 0 + flags: 40a10000 + 0... .... = reserved: False + .1.. .... = forwardable: True + ..0. .... = forwarded: False + ...0 .... = proxiable: False + .... 0... = proxy: False + .... .0.. = may-postdate: False + .... ..0. = postdated: False + .... ...0 = invalid: False + 1... .... = renewable: True + .0.. .... = initial: False + ..1. .... = pre-authent: True + ...0 .... = hw-authent: False + .... 0... = transited-policy-checked: False + .... .0.. = ok-as-delegate: False + .... ..0. = unused: False + .... ...1 = enc-pa-rep: True + 0... .... = anonymous: False + key + Learnt encTicketPart_key keytype 18 (id=3730.1) (3510bcf8...) + keytype: 18 + keyvalue: 3510bcf8db17bb605f95b3cd36f44a4e2a8e125b5c36bd3281cf0789401f95f7 + crealm: WINDOMAIN.LOCAL + cname + name-type: kRB5-NT-PRINCIPAL (1) + cname-string: 1 item + CNameString: WIN10$ + transited + tr-type: 1 + contents: + authtime: 2022-09-22 08:03:20 (UTC) + starttime: 2022-09-22 08:05:18 (UTC) + endtime: 2022-09-22 18:03:20 (UTC) + renew-till: 2022-09-29 08:03:20 (UTC) + authorization-data: 2 items + AuthorizationData item + ad-type: aD-IF-RELEVANT (1) + ad-data: 3082044a30820446a00402020080a182043c048204380900000000000000010000000802… + AuthorizationData item + ad-type: aD-WIN2K-PAC (128) + ad-data: 0900000000000000010000000802000098000000000000000e000000b8000000a0020000… + Verified Server checksum 16 keytype 18 using keytab principal SRV$@WINDOMAIN.LOCAL (id=keytab.2 same=0) (b589d323...) + Num Entries: 9 + Version: 0 + Type: Logon Info (1) + Size: 520 + Offset: 152 + PAC_LOGON_INFO: 01100800ccccccccf801000000000000000002005c85b7c759ced801ffffffffffffff7f… + MES header + Version: 1 + DREP + Byte order: Little-endian (1) + HDR Length: 8 + Fill bytes: 0xcccccccc + Blob Length: 504 + PAC_LOGON_INFO: + Referent ID: 0x00020000 + Logon Time: Sep 22, 2022 10:03:20.553404400 CEST + Logoff Time: Infinity (absolute time) + Kickoff Time: Infinity (absolute time) + PWD Last Set: Sep 21, 2022 14:43:14.781129900 CEST + PWD Can Change: Sep 21, 2022 14:43:14.781129900 CEST + PWD Must Change: Infinity (absolute time) + Acct Name: WIN10$ + Length: 12 + Size: 12 + Character Array: WIN10$ + Referent ID: 0x00020004 + Max Count: 6 + Offset: 0 + Actual Count: 6 + Acct Name: WIN10$ + Full Name + Length: 0 + Size: 0 + Character Array + Referent ID: 0x00020008 + Max Count: 0 + Offset: 0 + Actual Count: 0 + Logon Script + Length: 0 + Size: 0 + Character Array + Referent ID: 0x0002000c + Max Count: 0 + Offset: 0 + Actual Count: 0 + Profile Path + Length: 0 + Size: 0 + Character Array + Referent ID: 0x00020010 + Max Count: 0 + Offset: 0 + Actual Count: 0 + Home Dir + Length: 0 + Size: 0 + Character Array + Referent ID: 0x00020014 + Max Count: 0 + Offset: 0 + Actual Count: 0 + Dir Drive + Length: 0 + Size: 0 + Character Array + Referent ID: 0x00020018 + Max Count: 0 + Offset: 0 + Actual Count: 0 + Logon Count: 175 + Bad PW Count: 0 + User RID: 1105 + Group RID: 515 + Num RIDs: 1 + GroupIDs + Referent ID: 0x0002001c + Max Count: 1 + GROUP_MEMBERSHIP: + Group RID: 515 + Attributes: 0x00000007 + .... .... .... .... .... .... .... .1.. = Enabled: The enabled bit is SET + .... .... .... .... .... .... .... ..1. = Enabled By Default: The ENABLED_BY_DEFAULT bit is SET + .... .... .... .... .... .... .... ...1 = Mandatory: The MANDATORY bit is SET + User Flags: 0x00000020 + .... .... .... .... .... ..0. .... .... = Resource Groups: The resource_groups is NOT set + .... .... .... .... .... .... ..1. .... = Extra SIDs: The EXTRA_SIDS bit is SET + User Session Key: 00000000000000000000000000000000 + Server: DC + Length: 4 + Size: 6 + Character Array: DC + Referent ID: 0x00020020 + Max Count: 3 + Offset: 0 + Actual Count: 2 + Server: DC + Domain: WINDOMAIN + Length: 18 + Size: 20 + Character Array: WINDOMAIN + Referent ID: 0x00020024 + Max Count: 10 + Offset: 0 + Actual Count: 9 + Domain: WINDOMAIN + SID pointer: + SID pointer + Referent ID: 0x00020028 + Count: 4 + Domain SID: S-1-5-21-3597817948-3716833002-415491962 (Domain SID) + Revision: 1 + Num Auth: 4 + Authority: 5 + Subauthorities: 21-3597817948-3716833002-415491962 + Dummy1 Long: 0x00000000 + Dummy2 Long: 0x00000000 + User Account Control: 0x00000080 + .... .... .... ...0 .... .... .... .... = Don't Require PreAuth: This account REQUIRES preauthentication + .... .... .... .... 0... .... .... .... = Use DES Key Only: This account does NOT have to use_des_key_only + .... .... .... .... .0.. .... .... .... = Not Delegated: This might have been delegated + .... .... .... .... ..0. .... .... .... = Trusted For Delegation: This account is NOT trusted_for_delegation + .... .... .... .... ...0 .... .... .... = SmartCard Required: This account does NOT require_smartcard to authenticate + .... .... .... .... .... 0... .... .... = Encrypted Text Password Allowed: This account does NOT allow encrypted_text_password + .... .... .... .... .... .0.. .... .... = Account Auto Locked: This account is NOT auto_locked + .... .... .... .... .... ..0. .... .... = Don't Expire Password: This account might expire_passwords + .... .... .... .... .... ...0 .... .... = Server Trust Account: This account is NOT a server_trust_account + .... .... .... .... .... .... 1... .... = Workstation Trust Account: This account is a WORKSTATION_TRUST_ACCOUNT + .... .... .... .... .... .... .0.. .... = Interdomain trust Account: This account is NOT an interdomain_trust_account + .... .... .... .... .... .... ..0. .... = MNS Logon Account: This account is NOT a mns_logon_account + .... .... .... .... .... .... ...0 .... = Normal Account: This account is NOT a normal_account + .... .... .... .... .... .... .... 0... = Temp Duplicate Account: This account is NOT a temp_duplicate_account + .... .... .... .... .... .... .... .0.. = Password Not Required: This account REQUIRES a password + .... .... .... .... .... .... .... ..0. = Home Directory Required: This account does NOT require_home_directory + .... .... .... .... .... .... .... ...0 = Account Disabled: This account is NOT disabled + Dummy4 Long: 0x00000000 + Dummy5 Long: 0x00000000 + Dummy6 Long: 0x00000000 + Dummy7 Long: 0x00000000 + Dummy8 Long: 0x00000000 + Dummy9 Long: 0x00000000 + Dummy10 Long: 0x00000000 + Num Extra SID: 3 + SID_AND_ATTRIBUTES_ARRAY: + Referent ID: 0x0002002c + SID_AND_ATTRIBUTES array: + Max Count: 3 + SID_AND_ATTRIBUTES: + SID pointer: + SID pointer + Referent ID: 0x00020030 + Count: 5 + Domain SID: S-1-5-21-0-0-0-497 (Domain SID-Domain RID) + Revision: 1 + Num Auth: 5 + Authority: 5 + Subauthorities: 21-0-0-0-497 + RID: 497 (Domain RID) + Attributes: 0x00000007 + SID_AND_ATTRIBUTES: + SID pointer: + SID pointer + Referent ID: 0x00020034 + Count: 1 + Domain SID: S-1-18-1 (Authentication Authority Asserted Identity) + Revision: 1 + Num Auth: 1 + Authority: 18 + Subauthorities: 1 + Attributes: 0x00000007 + SID_AND_ATTRIBUTES: + SID pointer: + SID pointer + Referent ID: 0x00020038 + Count: 5 + Domain SID: S-1-5-21-0-0-0-496 (Domain SID-Domain RID) + Revision: 1 + Num Auth: 5 + Authority: 5 + Subauthorities: 21-0-0-0-496 + RID: 496 (Domain RID) + Attributes: 0x00000007 + ResourceGroupIDs + SID pointer: + NULL Pointer: SID pointer + ResourceGroup count: 0 + NULL Pointer: GroupIDs + Type: Device Info (14) + Size: 184 + Offset: 672 + PAC_DEVICE_INFO: 01100800cccccccca8000000000000000000020051040000030200000400020001000000… + MES header + Version: 1 + DREP + Byte order: Little-endian (1) + HDR Length: 8 + Fill bytes: 0xcccccccc + Blob Length: 168 + PAC_DEVICE_INFO: + Referent ID: 0x00020000 + User RID: 1105 + Group RID: 515 + SID pointer: + SID pointer + Referent ID: 0x00020004 + Count: 4 + Domain SID: S-1-5-21-3597817948-3716833002-415491962 (Domain SID) + Revision: 1 + Num Auth: 4 + Authority: 5 + Subauthorities: 21-3597817948-3716833002-415491962 + AccountDomainGroup count: 1 + AccountDomainGroupIds + Referent ID: 0x00020008 + Max Count: 1 + GROUP_MEMBERSHIP: + Group RID: 515 + Attributes: 0x00000007 + .... .... .... .... .... .... .... .1.. = Enabled: The enabled bit is SET + .... .... .... .... .... .... .... ..1. = Enabled By Default: The ENABLED_BY_DEFAULT bit is SET + .... .... .... .... .... .... .... ...1 = Mandatory: The MANDATORY bit is SET + Num Extra SID: 1 + ExtraSids:SID_AND_ATTRIBUTES_ARRAY: + Referent ID: 0x0002000c + SID_AND_ATTRIBUTES array: + Max Count: 1 + SID_AND_ATTRIBUTES: + SID pointer: + SID pointer + Referent ID: 0x00020010 + Count: 1 + Domain SID: S-1-18-1 (Authentication Authority Asserted Identity) + Revision: 1 + Num Auth: 1 + Authority: 18 + Subauthorities: 1 + Attributes: 0x00000007 + ExtraDomain Membership Array + Membership Domains count: 1 + ExtraDomain Membership Array + Referent ID: 0x00020014 + Max Count: 1 + DomainGroupIDs + SID pointer: + SID pointer + Referent ID: 0x00020018 + Count: 4 + Domain SID: S-1-5-21-0-0-0 (Domain SID) + Revision: 1 + Num Auth: 4 + Authority: 5 + Subauthorities: 21-0-0-0 + DomainGroup count: 1 + GroupIDs + Referent ID: 0x0002001c + Max Count: 1 + GROUP_MEMBERSHIP: + Group RID: 497 + Attributes: 0x00000007 + .... .... .... .... .... .... .... .1.. = Enabled: The enabled bit is SET + .... .... .... .... .... .... .... ..1. = Enabled By Default: The ENABLED_BY_DEFAULT bit is SET + .... .... .... .... .... .... .... ...1 = Mandatory: The MANDATORY bit is SET + Type: Client Claims Info (13) + Size: 0 + Offset: 856 + Type: Device Claims Info (15) + Size: 0 + Offset: 856 + Type: Client Info Type (10) + Size: 22 + Offset: 856 + PAC_CLIENT_INFO_TYPE: 001463c759ced8010c00570049004e00310030002400 + ClientID: Sep 22, 2022 10:03:20.000000000 CEST + Name Length: 12 + Name: WIN10$ + Type: UPN DNS Info (12) + Size: 152 + Offset: 880 + UPN_DNS_INFO: 2c0018001e004800030000000c0068001c00780000000000570049004e00310030002400… + UPN Len: 44 + UPN Offset: 24 + DNS Len: 30 + DNS Offset: 72 + Flags: 0x00000003 + UPN Name: WIN10$@windomain.local + DNS Name: WINDOMAIN.LOCAL + Type: Server Checksum (6) + Size: 16 + Offset: 1032 + PAC_SERVER_CHECKSUM: 100000000a21114e8d414728c22c1d70 + Type: 16 + Signature: 0a21114e8d414728c22c1d70 + Type: Privsvr Checksum (7) + Size: 16 + Offset: 1048 + PAC_PRIVSVR_CHECKSUM: 10000000489a91bb22cfa8a37c9280c2 + Type: 16 + Signature: 489a91bb22cfa8a37c9280c2 + Type: Ticket Checksum (16) + Size: 16 + Offset: 1064 + PAC_TICKET_CHECKSUM: 10000000751b39b96bc824a0f97d5876 + Type: 16 + Signature: 751b39b96bc824a0f97d5876 + AuthorizationData item + ad-type: aD-IF-RELEVANT (1) + ad-data: 305d303fa0040202008da137043530333031a003020100a12a04280000000000400000fe… + AuthorizationData item + ad-type: aD-TOKEN-RESTRICTIONS (141) + ad-data: 30333031a003020100a12a04280000000000400000fe4c6a1f9ca2986dae0a7734e70f88… + restriction-type: 0 + restriction: 0000000000400000fe4c6a1f9ca2986dae0a7734e70f88d17f92e0c31e2b578290e7dae6… + AuthorizationData item + ad-type: aD-LOCAL (142) + ad-data: 707c2305e3010000d5ed010000000000 + authenticator + etype: eTYPE-AES256-CTS-HMAC-SHA1-96 (18) + cipher: f258234f7f1156040f21bcb87650241ff2356fb9b90b88d4da2c421330d9ca1b265bcac8… + Decrypted keytype 18 usage 11 using learnt encTicketPart_key in frame 3714 (id=3714.1 same=41) (3510bcf8...) + authenticator + authenticator-vno: 5 + crealm: WINDOMAIN.LOCAL + cname + name-type: kRB5-NT-PRINCIPAL (1) + cname-string: 1 item + CNameString: WIN10$ + cksum + cksumtype: cKSUMTYPE-GSSAPI (32771) + checksum: 10000000000000000000000000000000000000003e000000 + Length: 16 + Bnd: 00000000000000000000000000000000 + .... .... .... .... ...0 .... .... .... = DCE-style: Not using DCE-STYLE + .... .... .... .... .... .... ..1. .... = Integ: Integrity protection (signing) may be invoked + .... .... .... .... .... .... ...1 .... = Conf: Confidentiality (sealing) may be invoked + .... .... .... .... .... .... .... 1... = Sequence: Enable Out-of-sequence detection for sign or sealed messages + .... .... .... .... .... .... .... .1.. = Replay: Enable replay protection for signed or sealed messages + .... .... .... .... .... .... .... ..1. = Mutual: Request that remote peer authenticates itself + .... .... .... .... .... .... .... ...0 = Deleg: Do NOT delegate + cusec: 49 + ctime: 2022-09-22 08:05:18 (UTC) + subkey + Learnt authenticator_subkey keytype 18 (id=3730.2) (d992625b...) + keytype: 18 + keyvalue: d992625b56544efa41daff864c59dfa71ccdeba53aed7122d8aeee3b71c56d15 + seq-number: 267182760 + authorization-data: 1 item + AuthorizationData item + ad-type: aD-IF-RELEVANT (1) + ad-data: 3081c9303fa0040202008da137043530333031a003020100a12a04280000000000400000… + AuthorizationData item + ad-type: aD-TOKEN-RESTRICTIONS (141) + ad-data: 30333031a003020100a12a04280000000000400000fe4c6a1f9ca2986dae0a7734e70f88… + restriction-type: 0 + restriction: 0000000000400000fe4c6a1f9ca2986dae0a7734e70f88d17f92e0c31e2b578290e7dae6… + AuthorizationData item + ad-type: aD-LOCAL (142) + ad-data: 707c2305e3010000d5ed010000000000 + AuthorizationData item + ad-type: aD-AP-OPTIONS (143) + ad-data: 00400000 + AD-AP-Options: 0x00004000, ChannelBindings + .... .... .... .... .1.. .... .... .... = ChannelBindings: Set + AuthorizationData item + ad-type: aD-TARGET-PRINCIPAL (144) + ad-data: 48005400540050002f007300720076002e00770069006e0064006f006d00610069006e00… + Target Principal: HTTP/srv.windomain.local@WINDOMAIN.LOCAL + Provides learnt encTicketPart_key in frame 3730 keytype 18 (id=3730.1 same=0) (3510bcf8...) + Provides learnt authenticator_subkey in frame 3730 keytype 18 (id=3730.2 same=0) (d992625b...) + Used keytab principal SRV$@WINDOMAIN.LOCAL keytype 18 (id=keytab.2 same=0) (b589d323...) + Used learnt encTicketPart_key in frame 3714 keytype 18 (id=3714.1 same=41) (3510bcf8...) +``` + +### The collector authenticates the client and sends its response + +``` +Frame 3732: 395 bytes on wire (3160 bits), 395 bytes captured (3160 bits) on interface \Device\NPF_{D0B586C7-BCDD-4989-9F90-8AD183BA1268}, id 0 +Ethernet II, Src: PcsCompu_62:f7:34 (08:00:27:62:f7:34), Dst: PcsCompu_a6:35:47 (08:00:27:a6:35:47) +Internet Protocol Version 4, Src: 192.168.58.103, Dst: 192.168.58.100 +Transmission Control Protocol, Src Port: 5985, Dst Port: 65091, Seq: 1, Ack: 2957, Len: 341 +Hypertext Transfer Protocol + HTTP/1.1 200 \r\n + WWW-Authenticate: Kerberos YIGXBgkqhkiG9xIBAgICAG+BhzCBhKADAgEFoQMCAQ+ieDB2oAMCARKibwRtyHBO/0Ej2ORfOEzDCqgm3IaPELAcqCgJ62zkjooYA357Cq6E79YS7CgATl0SCCCCMXNiNzMpBB9aveT7rBbEr6zvG8wo76kiQalRVuBZF0MmmHArk8sIpckkSsxsHFYvfPr5XNY7YSp2U2V3zw==\r\n + Server: Microsoft-HTTPAPI/2.0\r\n + Date: Thu, 22 Sep 2022 08:05:18 GMT\r\n + Content-Length: 0\r\n + \r\n + [HTTP response 1/3] + [Time since request: 0.005113000 seconds] + [Request in frame: 3730] + [Next request in frame: 3736] + [Next response in frame: 3738] + [Request URI: http://srv.windomain.local:5985/wsman/SubscriptionManager/WEC] +``` + +The Kerberos blob is a `KRB5_AP_REP`: + +``` +WWW-Authenticate: Kerberos YIGXBgkqhkiG9xIBAgICAG+BhzCBhKADAgEFoQMCAQ+ieDB2oAMCARKibwRtyHBO/0Ej2ORfOEzDCqgm3IaPELAcqCgJ62zkjooYA357Cq6E79YS7CgATl0SCCCCMXNiNzMpBB9aveT7rBbEr6zvG8wo76kiQalRVuBZF0MmmHArk8sIpckkSsxsHFYvfPr5XNY7YSp2U2V3zw==\r\n + GSS-API Generic Security Service Application Program Interface + OID: 1.2.840.113554.1.2.2 (KRB5 - Kerberos 5) + krb5_blob: 02006f8187308184a003020105a10302010fa2783076a003020112a26f046dc8704eff41… + krb5_tok_id: KRB5_AP_REP (0x0002) + Kerberos + ap-rep + pvno: 5 + msg-type: krb-ap-rep (15) + enc-part + etype: eTYPE-AES256-CTS-HMAC-SHA1-96 (18) + cipher: c8704eff4123d8e45f384cc30aa826dc868f10b01ca82809eb6ce48e8a18037e7b0aae84… + Decrypted keytype 18 usage 12 using learnt encTicketPart_key in frame 3714 (id=3714.1 same=41) (3510bcf8...) + encAPRepPart + ctime: 2022-09-22 08:05:18 (UTC) + cusec: 49 + subkey + Learnt encAPRepPart_subkey keytype 18 (id=3732.1) (c77374ba...) + keytype: 18 + keyvalue: c77374bac25c16eb95cc99b2945788ca0da111af2aba4dba568752ffe25fd0d5 + seq-number: 261443413 + Provides learnt encAPRepPart_subkey in frame 3732 keytype 18 (id=3732.1 same=0) (c77374ba...) + Used learnt encTicketPart_key in frame 3714 keytype 18 (id=3714.1 same=41) (3510bcf8...) +``` + +### The client sends a `Enumerate` request + +The request body is encrypted using Kerberos: + +``` +Frame 3736: 676 bytes on wire (5408 bits), 676 bytes captured (5408 bits) on interface \Device\NPF_{D0B586C7-BCDD-4989-9F90-8AD183BA1268}, id 0 +Ethernet II, Src: PcsCompu_a6:35:47 (08:00:27:a6:35:47), Dst: PcsCompu_62:f7:34 (08:00:27:62:f7:34) +Internet Protocol Version 4, Src: 192.168.58.100, Dst: 192.168.58.103 +Transmission Control Protocol, Src Port: 65091, Dst Port: 5985, Seq: 6159, Ack: 342, Len: 622 +[4 Reassembled TCP Segments (3824 bytes): #3733(282), #3734(1460), #3735(1460), #3736(622)] +Hypertext Transfer Protocol + POST /wsman/SubscriptionManager/WEC HTTP/1.1\r\n + [Expert Info (Chat/Sequence): POST /wsman/SubscriptionManager/WEC HTTP/1.1\r\n] + Request Method: POST + Request URI: /wsman/SubscriptionManager/WEC + Request Version: HTTP/1.1 + Connection: Keep-Alive\r\n + Content-Type: multipart/encrypted;protocol="application/HTTP-Kerberos-session-encrypted";boundary="Encrypted Boundary"\r\n + User-Agent: Microsoft WinRM Client\r\n + Content-Length: 3542\r\n + Host: srv.windomain.local:5985\r\n + \r\n + [Full request URI: http://srv.windomain.local:5985/wsman/SubscriptionManager/WEC] + [HTTP request 2/3] + [Prev request in frame: 3730] + [Response in frame: 3738] + [Next request in frame: 3755] + File Data: 3542 bytes +MIME Multipart Media Encapsulation, Type: multipart/encrypted, Boundary: "Encrypted Boundary" + [Type: multipart/encrypted] + First boundary: --Encrypted Boundary\r\n + Encapsulated multipart part: (application/http-kerberos-session-encrypted) + Content-Type: application/HTTP-Kerberos-session-encrypted\r\n + OriginalContent: type=application/soap+xml;charset=UTF-16;Length=3240 + Boundary: \r\n--Encrypted Boundary\r\n + Encapsulated multipart part: (application/octet-stream) + Content-Type: application/octet-stream\r\n + Length of security token: 60 + GSS-API Generic Security Service Application Program Interface + krb5_blob: 050406ff0000001c000000000fece2a8f629bb3f3c1bf9dfb3bc040b5f0cb9e637f383ce… + krb5_tok_id: KRB_TOKEN_CFX_WRAP (0x0405) + krb5_cfx_flags: 0x06, AcceptorSubkey, Sealed + .... .1.. = AcceptorSubkey: Set + .... ..1. = Sealed: Set + .... ...0 = SendByAcceptor: Not set + krb5_filler: ff + krb5_cfx_ec: 0 + krb5_cfx_rrc: 28 + krb5_cfx_seq: 267182760 + krb5_sgn_cksum: f629bb3f3c1bf9dfb3bc040b5f0cb9e637f383ce4db4c8a9dd4c37dc84317411768d7d3f… + Decrypted keytype 18 usage 24 using learnt encAPRepPart_subkey in frame 3732 (id=3732.1 same=0) (c77374ba...) + Media Type + Media type: application (3240 bytes) + Last boundary: --Encrypted Boundary--\r\n +``` + +If we provide a keytab of the WEC server, Wireshark is able to give us the cleartext of the body: + +``` +fffe3c0073003a0045006e00760065006c006f0070006500200078006d006c006e0073003a0073003d00220068007400740070003a002f002f007700770077002e00770033002e006f00720067002f0032003000300033002f00300035002f0073006f00610070002d0065006e00760065006c006f00700065002200200078006d006c006e0073003a0061003d00220068007400740070003a002f002f0073006300680065006d00610073002e0078006d006c0073006f00610070002e006f00720067002f00770073002f0032003000300034002f00300038002f00610064006400720065007300730069006e0067002200200078006d006c006e0073003a006e003d00220068007400740070003a002f002f0073006300680065006d00610073002e0078006d006c0073006f00610070002e006f00720067002f00770073002f0032003000300034002f00300039002f0065006e0075006d00650072006100740069006f006e002200200078006d006c006e0073003a0077003d00220068007400740070003a002f002f0073006300680065006d00610073002e0064006d00740066002e006f00720067002f007700620065006d002f00770073006d0061006e002f0031002f00770073006d0061006e002e007800730064002200200078006d006c006e0073003a0070003d00220068007400740070003a002f002f0073006300680065006d00610073002e006d006900630072006f0073006f00660074002e0063006f006d002f007700620065006d002f00770073006d0061006e002f0031002f00770073006d0061006e002e007800730064002200200078006d006c006e0073003a0062003d00220068007400740070003a002f002f0073006300680065006d00610073002e0064006d00740066002e006f00720067002f007700620065006d002f00770073006d0061006e002f0031002f00630069006d00620069006e00640069006e0067002e0078007300640022003e003c0073003a004800650061006400650072003e003c0061003a0054006f003e0068007400740070003a002f002f007300720076002e00770069006e0064006f006d00610069006e002e006c006f00630061006c003a0035003900380035002f00770073006d0061006e002f0053007500620073006300720069007000740069006f006e004d0061006e0061006700650072002f005700450043003c002f0061003a0054006f003e003c0077003a005200650073006f007500720063006500550052004900200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200740072007500650022003e0068007400740070003a002f002f0073006300680065006d00610073002e006d006900630072006f0073006f00660074002e0063006f006d002f007700620065006d002f00770073006d0061006e002f0031002f0053007500620073006300720069007000740069006f006e004d0061006e0061006700650072002f0053007500620073006300720069007000740069006f006e003c002f0077003a005200650073006f0075007200630065005500520049003e003c006d003a004d0061006300680069006e00650049004400200078006d006c006e0073003a006d003d00220068007400740070003a002f002f0073006300680065006d00610073002e006d006900630072006f0073006f00660074002e0063006f006d002f007700620065006d002f00770073006d0061006e002f0031002f006d0061006300680069006e006500690064002200200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200660061006c007300650022003e00770069006e00310030002e00770069006e0064006f006d00610069006e002e006c006f00630061006c003c002f006d003a004d0061006300680069006e006500490044003e003c0061003a005200650070006c00790054006f003e003c0061003a004100640064007200650073007300200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200740072007500650022003e0068007400740070003a002f002f0073006300680065006d00610073002e0078006d006c0073006f00610070002e006f00720067002f00770073002f0032003000300034002f00300038002f00610064006400720065007300730069006e0067002f0072006f006c0065002f0061006e006f006e0079006d006f00750073003c002f0061003a0041006400640072006500730073003e003c002f0061003a005200650070006c00790054006f003e003c0061003a0041006300740069006f006e00200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200740072007500650022003e0068007400740070003a002f002f0073006300680065006d00610073002e0078006d006c0073006f00610070002e006f00720067002f00770073002f0032003000300034002f00300039002f0065006e0075006d00650072006100740069006f006e002f0045006e0075006d00650072006100740065003c002f0061003a0041006300740069006f006e003e003c0077003a004d006100780045006e00760065006c006f0070006500530069007a006500200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200740072007500650022003e003500310032003000300030003c002f0077003a004d006100780045006e00760065006c006f0070006500530069007a0065003e003c0061003a004d00650073007300610067006500490044003e0075007500690064003a00450039003800300032003200350037002d0036004100370044002d0034004300300044002d0042004600410034002d004500380031004300370042003100430034003400370045003c002f0061003a004d00650073007300610067006500490044003e003c0077003a004c006f00630061006c006500200078006d006c003a006c0061006e0067003d00220065006e002d00550053002200200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200660061006c0073006500220020002f003e003c0070003a0044006100740061004c006f00630061006c006500200078006d006c003a006c0061006e0067003d00220065006e002d00550053002200200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200660061006c0073006500220020002f003e003c0070003a00530065007300730069006f006e0049006400200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200660061006c007300650022003e0075007500690064003a00440032003500440044003000330033002d0043003400300036002d0034003400410042002d0038003400340033002d003800350045003100420043003700390034004600310030003c002f0070003a00530065007300730069006f006e00490064003e003c0070003a004f007000650072006100740069006f006e0049004400200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200660061006c007300650022003e0075007500690064003a00300033004100330044003100420042002d0039004200310036002d0034003800340037002d0039004600300034002d004300390041003800450044003300380045003100450034003c002f0070003a004f007000650072006100740069006f006e00490044003e003c0070003a00530065007100750065006e006300650049006400200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200660061006c007300650022003e0031003c002f0070003a00530065007100750065006e0063006500490064003e003c0077003a004f007000650072006100740069006f006e00540069006d0065006f00750074003e0050005400360030002e0030003000300053003c002f0077003a004f007000650072006100740069006f006e00540069006d0065006f00750074003e003c002f0073003a004800650061006400650072003e003c0073003a0042006f00640079003e003c006e003a0045006e0075006d00650072006100740065003e003c0077003a004f007000740069006d0069007a00650045006e0075006d00650072006100740069006f006e002f003e003c0077003a004d006100780045006c0065006d0065006e00740073003e00330032003000300030003c002f0077003a004d006100780045006c0065006d0065006e00740073003e003c002f006e003a0045006e0075006d00650072006100740065003e003c002f0073003a0042006f00640079003e003c002f0073003a0045006e00760065006c006f00700065003e00 +``` + +This cleartext is a UTF-16 encoded string, starting with a BOM (Byte Order Mark: ). + +``` +FE FF UTF-16, big-endian +FF FE UTF-16, little-endian +``` + +In our case, it is in little-endian :) + +The decoded text is: + +```xml + + + http://srv.windomain.local:5985/wsman/SubscriptionManager/WEC + http://schemas.microsoft.com/wbem/wsman/1/SubscriptionManager/Subscription + win10.windomain.local + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + + http://schemas.xmlsoap.org/ws/2004/09/enumeration/Enumerate + 512000 + uuid:E9802257-6A7D-4C0D-BFA4-E81C7B1C447E + + + uuid:D25DD033-C406-44AB-8443-85E1BC794F10 + uuid:03A3D1BB-9B16-4847-9F04-C9A8ED38E1E4 + 1 + PT60.000S + + + + + 32000 + + + +``` + +If we look at each field we have: +- `a:To`: the service address (which is the collector) to which the request was sent (DSP0266 - 5.3). Must be in **all messages**. +- `w:ResourceURI`: `http://schemas.microsoft.com/wbem/wsman/1/SubscriptionManager/Subscription`. According to DSP0266, tells "what we are talking about". The `mustUnderstand=true` attribute is mandatory. `w:ResourceURI` is required for some `wsa:Action` (DSP0266 - R5.1.2.1-3). +- `m:MachineID`: the client DNS name. +- `ReplyTo`: `http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous` indicates that the response should be sent on the same connection as the request (DSP0226 - 5.4.2). + > "The service shall fully duplicate the entire wsa:Address of the wsa:ReplyTo element in the wsa:To header of the reply, even if some of the information is not understood by the service" +- `a:Action`: `Enumerate`, indicates what to do (see table at DSP0226 - 5.4.5). +- `w:MaxEnvelopeSize`: the client needs a response with a SOAP envelope smaller than 5120000. The `mustUnderstand=true` indicates that this condition must be fulfilled and that a `wsman:EncodingLimit` error should be responded if the response is too big (DSP0226 - 6.2). +- `a:MessageID`: indicates that the format `uuid:xxxxxxxx-xxxx--xxxx--xxxx--xxxxxxxxxxxx` should be used. This field is required. If not present, a `wsa:InvalidMessageInformationHeader` error is returned (DSP0266 5.4.4). This field is case sensitive. If we received a `MessageID` in uppercase, then the `RelatesTo` must contains the ID in uppercase even if RFC 1422 tells to use lowercase (see MS-WSMV 3.1.4.1.5). +- `w:Locale`: Specify the language in which the client wants the reponse to be. MS-WSMV tells that `mustUnderstand` must be "false", otherwise it sends back a `wsman:UnsupportedFeature` error. The language is defined in `xml:lang` and must be a valid code according to RFC 3066. +- `p:DataLocale`: Microsoft specific. Indicates the language used for numeric data. `mustUnderstand` must be "false". +- `p:SessionId`: Microsoft specific. Unique session id. +- `p:OperationID`: Microsoft specific. Indicates that the client supports "Robust-Connection/full-duplex" (cf MS-WSMV 3.1.4.1.39: the server caches request responses, enabling the client to retrieve previous responses even if there was a network issue). The server also have to specify this field to indicate that it is supported. If both the client and the server know this mode is supported, all future messages will be "Robust-Connections/full-duplex". If `mustUnderstand` is "true", it means it is a retransmission of a previous message, and in this case `p:SequenceId` must not be 1 (otherwise `wsa:InvalidMessageInformationHeader` error) and the server must send again its previous response. +- `p:SequenceId`: Microsoft specific, used in "Robust-Connections/full-duplex" mode (MS-WSMV 3.1.4.1.39) +- `w:OperationTimeout`: If not specified, Microsoft uses a default value in configuration. Specified in DSP0226 6.1. It uses the `xs:duration` format (). +- `w:OptimizeEnumeration`: "OptimizeEnumeration" means sending enumeration results immediately after the EnumerateResponse message, without using a `Pull` request (DSP0266 - 8.2.3). +- `w:MaxElements`: indicates the maximum number of elements to send using OptimizeEnumeration (DSP0266 - 8.2.3). + +For more details see documentation: +- MS-WSMV - 3.1.4.8 Enumerate +- DSP0226 - 8.2 + +mustUnderstand = "must comply" (DSP0266 - 5.2) is implicit for: +- wsa:To +- wsa:MessageID +- wsa:RelatesTo +- wsa:Action + +### The collector answers "EnumerateResponse" + +``` +Frame 3738: 9255 bytes on wire (74040 bits), 9255 bytes captured (74040 bits) on interface \Device\NPF_{D0B586C7-BCDD-4989-9F90-8AD183BA1268}, id 0 +Ethernet II, Src: PcsCompu_62:f7:34 (08:00:27:62:f7:34), Dst: PcsCompu_a6:35:47 (08:00:27:a6:35:47) +Internet Protocol Version 4, Src: 192.168.58.103, Dst: 192.168.58.100 +Transmission Control Protocol, Src Port: 5985, Dst Port: 65091, Seq: 342, Ack: 6781, Len: 9201 +Hypertext Transfer Protocol + HTTP/1.1 200 \r\n + Content-Type: multipart/encrypted;protocol="application/HTTP-Kerberos-session-encrypted";boundary="Encrypted Boundary"\r\n + Server: Microsoft-HTTPAPI/2.0\r\n + Date: Thu, 22 Sep 2022 08:05:18 GMT\r\n + Content-Length: 8974\r\n + \r\n + [HTTP response 2/3] + [Time since request: 0.003282000 seconds] + [Prev request in frame: 3730] + [Prev response in frame: 3732] + [Request in frame: 3736] + [Next request in frame: 3755] + [Next response in frame: 3757] + [Request URI: http://srv.windomain.local:5985/wsman/SubscriptionManager/WEC] + File Data: 8974 bytes +MIME Multipart Media Encapsulation, Type: multipart/encrypted, Boundary: "Encrypted Boundary" + [Type: multipart/encrypted] + First boundary: --Encrypted Boundary\r\n + Encapsulated multipart part: (application/http-kerberos-session-encrypted) + Content-Type: application/HTTP-Kerberos-session-encrypted\r\n + OriginalContent: type=application/soap+xml;charset=UTF-16;Length=8672 + Boundary: \r\n--Encrypted Boundary\r\n + Encapsulated multipart part: (application/octet-stream) + Content-Type: application/octet-stream\r\n + Length of security token: 60 + GSS-API Generic Security Service Application Program Interface + Media Type + Media type: application (8672 bytes) + Last boundary: --Encrypted Boundary--\r\n + +``` + +The content is also encrypted with Kerberos. After deciphering and decoding it we have: + +```xml + + + http://schemas.xmlsoap.org/ws/2004/09/enumeration/EnumerateResponse + uuid:45697184-34FA-4722-BF84-AF362DAF7832 + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + uuid:03A3D1BB-9B16-4847-9F04-C9A8ED38E1E4 + 1 + uuid:E9802257-6A7D-4C0D-BFA4-E81C7B1C447E + + + + + + + uuid:219C5353-5F3D-4CD7-A644-F6B69E57C1C1 + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + http://schemas.microsoft.com/wbem/wsman/1/windows/EventLog + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + + http://schemas.xmlsoap.org/ws/2004/08/eventing/Subscribe + 512000 + uuid:A666D835-B462-465E-ACEE-BA6354EA0E58 + + + uuid:24C4926E-F0EB-4F5A-A75F-5F1FA212F124 + 1 + + Toto + SLDC + + RenderedText + + + + + + + HTTP://srv.windomain.local:5985/wsman/subscriptions/B6BDBB59-FB07-4EE5-841F-EBEC9D67CDD4/1 + + 219C5353-5F3D-4CD7-A644-F6B69E57C1C1 + + + + PT3600.000S + + HTTP://srv.windomain.local:5985/wsman/subscriptions/B6BDBB59-FB07-4EE5-841F-EBEC9D67CDD4/1 + + 219C5353-5F3D-4CD7-A644-F6B69E57C1C1 + + + + + + + + + + PT60.0S + PT30.000S + 512000 + + + UTF-16 + + + + + + + + + + + + + + + + + + + + + + + +``` + +In the Header we have: +- `a:Action`: `EnumerateResponse`. According to DSP0266 8.2.3, if the client asks for an OptimizeEnumeration, the server will respond something like: + +```xml + + + ... + + ...same as for wsen:Items in wsen:PullResponse + + + + +``` + +- `a:MessageID`: message UUID. +- `p:OperationID`: the request UUID, implies that the server supports "Robust-Connection/full-duplex". +- `a:RelatesTo`: must contain the `a:MessageID` of the request (case sensitive). + +As specified in MS-WSMV, when the collector receives an Enumerate message, it must retrieve its enabled "subscriptions" and send a "Subscribe requests" list to the client. Items in this list are of type SubscriptionType (2.2.4.41 de MS-WSMV): + +```xml + + + + + + +``` + +`Version` is a GUID which changes each time the Subscription is modified. +`Envelope` must contain a `SubscribeMsg`. + +The client then needs to extracts those `SubscribeMsg`. + +In its Header we have: +- `a:Action`: `http://schemas.xmlsoap.org/ws/2004/08/eventing/Subscribe` (cf MS-WSMV 3.1.4.6) +- `w:ResourceURI`: `http://schemas.microsoft.com/wbem/wsman/1/windows/EventLog` (only resource supported by `Subscribe` action) +- `a:MessageID`: a new UUID +- `p:OperationID`: again a new UUID +- `p:SequenceId`: 1 +- `w:OptionSet`: options specific to Subscribe as defined in MS-WSMV 3.1.4.1.30.1: + - `SubscriptionName` + - `Compression`: compression algorithm to use. The only authorised value is SLDC. + - `CDATA`: should the data be parsed/validated or sent without any processing. + - If `xsi:nil` is "true", data should not be parsed. In this case, data are only sent to "Event subscriber". + - `ContentFormat`: + - `RenderedText`: raw data and printable informations are sent (in EventData and RenderingInfo). + - `Raw`: default, only raw data are sent. + - `IgnoreChannelError`: should we stop processing if there are errors in query options. If `xsi:nil` is true, we do not stop. + - `ReadExistingEvents`: if this option is "true", all already existing events that matches query options should be sent. + +In its Body we have: +- `e:EndTo`: if there is an issue with the subscription, the client sends a `SubscriptionEnd` at the specified address. + - `a:Address`: an URL on the collector specific to the current client (ended with a specific UUID). + - `a:ReferenceProperties`/`e:Identifier`: the subscription version GUID. +- `e:Delivery`: According to Microsoft documentation, the following mode are supported: + - `http://schemas.xmlsoap.org/ws/2004/08/eventing/DeliveryModes/Push`: every SOAP message contains one event, without ACK or SOAP response. Event transmission is asynchronous. + - `http://schemas.dmtf.org/wbem/wsman/1/wsman/PushWithAck`: every SOAP message contains one event, each one needs to be acknowledged before the next one is sent. The sender has a waiting list of events to send. + - `http://schemas.dmtf.org/wbem/wsman/1/wsman/Events`: every SOAP message can contain multiple events, each batch of events needs to be acknowledged before the next one is sent. + - `http://schemas.dmtf.org/wbem/wsman/1/wsman/Pull`: every SOAP message can contain multiple events, each batch of events needs to be acknowledged before the next one is sent. This mode implies that the collector uses "Pull" to retrieve events so acknowledgment is implicit. + - Windows collector uses the `Events` mode (detailed in DSP0226 - 10.2.9.4). In this mode, `e:Delivery` should look like: + ```xml + + + wsa:EndpointReferenceType + + xs:positiveInteger ? + xs:duration ? + + xs:positiveInteger + ? + + ``` + - `e:NotifyTo`: endpoint to send events to. + - `w:MaxTime`: max time between the moment the sender starts encoding the first event and the moment it sends the batch of events. PT30.000S is equivalent to "Minimize Latency" configuration. + - `w:MaxEnvelopeSize`: max size in bytes of SOAP envelopes. + - `@Policy`: defines what to do when events are too big: + - `CancelSubscription`: stop subscription. + - `Skip`: do not send those events. + - `Notify`: notify that events were deleted (default). + - `w:Heartbeats`: "Heartbeats" are sent periodically if there is no event to send. The collector should ensure it always receives either events or hearbeats. The Windows collector configures the heartbeat frequency to PT3600.000S, which means every hour (DSP0226 - 10.2.5). + - `w:ConnectionRetry`: if the subscriber is not joignable, retry "`@count` attribute" times every "`w:ConnectionRetry`" before giving up and considering the subscription as expired (DSP0226 - 10.2.3). + - `w:ContentEncoding`: Windows collector uses UTF-16 (DSP0226 - 10.2.1-7). + - `c:Policy`: specify how the client should authenticate to send events. Syntax is defined in [WS-Policy](https://www.w3.org/Submission/2006/SUBM-WS-Policy-20060425/). MS-WSMV - 3.1.4.1.30.3 suggests to use this minimal version: + ```xml + + + + ... assertions ... + + + + ``` + Every assertion is an authentication element. + Schema of each assertion is specified in MS-WSMV - 2.2.41: + ```xml + + + + + + + + + + + + + + + + + + + ``` + `Profile` tells which security profil is used. It can be `http://schemas.dmtf.org/wbem/wsman/1/wsman/secprofile/https/mutual` or `http://schemas.dmtf.org/wbem/wsman/1/wsman/secprofile/http/mutual` for TLS authentication, and can be `http://schemas.dmtf.org/wbem/wsman/1/wsman/secprofile/http/spnego-kerberos` or `http://schemas.dmtf.org/wbem/wsman/1/wsman/secprofile/https/spnego-kerberos` for Kerberos authentication. In our example we have Kerberos authentication. +- `w:Filter`: defines the filter to fetch events. According to documentation, this is not always required. If it is required, we would receive a `FilteringRequired` error. + - By default, the filter uses XPath language. In our example it is `http://schemas.microsoft.com/win/2004/08/events/eventquery`. + - The filter contains a `QueryList` element of type `QueryListType` (MS-WSMV 2.2.4.24). Every `Query` contains an unique `Id` attribute, and its child elements are of type `SelectType`. `Select` element has a `Path` attribute which contains an event source. Its value should be a level 1 XPath query (DSP0226 - Annexe D.1). We can easily export a valid query using Event Viewer (in the XML tab). +- `w:Bookmark`: ensures we do not miss any event. The sender sends an updated bookmark each time it sends events. The collector needs to save the bookmark (kind of "cookie"). Then if something goes wrong (a network issue for example) the collector can tell the sender which is the last bookmark it received. The sender will then know which events were missed and need to be sent (DSP0226 10.2.6). +- `w:SendBookmarks`: indicates to sender to send bookmarks everytime it sends events. + +### The client sends an `End` request + +``` +Frame 3755: 330 bytes on wire (2640 bits), 330 bytes captured (2640 bits) on interface \Device\NPF_{D0B586C7-BCDD-4989-9F90-8AD183BA1268}, id 0 +Ethernet II, Src: PcsCompu_a6:35:47 (08:00:27:a6:35:47), Dst: PcsCompu_62:f7:34 (08:00:27:62:f7:34) +Internet Protocol Version 4, Src: 192.168.58.100, Dst: 192.168.58.103 +Transmission Control Protocol, Src Port: 65091, Dst Port: 5985, Seq: 8523, Ack: 9543, Len: 276 +[3 Reassembled TCP Segments (2018 bytes): #3753(282), #3754(1460), #3755(276)] +Hypertext Transfer Protocol + POST /wsman/SubscriptionManager/WEC HTTP/1.1\r\n + Connection: Keep-Alive\r\n + Content-Type: multipart/encrypted;protocol="application/HTTP-Kerberos-session-encrypted";boundary="Encrypted Boundary"\r\n + User-Agent: Microsoft WinRM Client\r\n + Content-Length: 1736\r\n + Host: srv.windomain.local:5985\r\n + \r\n + [Full request URI: http://srv.windomain.local:5985/wsman/SubscriptionManager/WEC] + [HTTP request 3/3] + [Prev request in frame: 3736] + [Response in frame: 3757] + File Data: 1736 bytes +MIME Multipart Media Encapsulation, Type: multipart/encrypted, Boundary: "Encrypted Boundary" + [Type: multipart/encrypted] + First boundary: --Encrypted Boundary\r\n + Encapsulated multipart part: (application/http-kerberos-session-encrypted) + Content-Type: application/HTTP-Kerberos-session-encrypted\r\n + OriginalContent: type=application/soap+xml;charset=UTF-16;Length=1434 + Boundary: \r\n--Encrypted Boundary\r\n + Encapsulated multipart part: (application/octet-stream) + Content-Type: application/octet-stream\r\n + Length of security token: 60 + GSS-API Generic Security Service Application Program Interface + Media Type + Media type: application (1434 bytes) + Last boundary: --Encrypted Boundary--\r\n +``` + +Deciphered and decoded content: + +``` + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + http://schemas.microsoft.com/wbem/wsman/1/wsman/FullDuplex + http://schemas.microsoft.com/wbem/wsman/1/wsman/End + uuid:5A622A71-B73A-426E-B1CB-048A6079EB23 + uuid:03A3D1BB-9B16-4847-9F04-C9A8ED38E1E4 + + + +``` + +- `a:Action`: `http://schemas.microsoft.com/wbem/wsman/1/wsman/End`, connection ends here (voir MS-WSMV 3.1.4.19). + +### The collector sends back "No Content" + +``` +Frame 3757: 139 bytes on wire (1112 bits), 139 bytes captured (1112 bits) on interface \Device\NPF_{D0B586C7-BCDD-4989-9F90-8AD183BA1268}, id 0 +Ethernet II, Src: PcsCompu_62:f7:34 (08:00:27:62:f7:34), Dst: PcsCompu_a6:35:47 (08:00:27:a6:35:47) +Internet Protocol Version 4, Src: 192.168.58.103, Dst: 192.168.58.100 +Transmission Control Protocol, Src Port: 5985, Dst Port: 65091, Seq: 9543, Ack: 8799, Len: 85 +Hypertext Transfer Protocol + HTTP/1.1 204 \r\n + Server: Microsoft-HTTPAPI/2.0\r\n + Date: Thu, 22 Sep 2022 08:05:18 GMT\r\n + \r\n + [HTTP response 3/3] + [Time since request: 0.000364000 seconds] + [Prev request in frame: 3736] + [Prev response in frame: 3738] + [Request in frame: 3755] + [Request URI: http://srv.windomain.local:5985/wsman/SubscriptionManager/WEC] +``` + +204 => No-Content + +### The client closes the TCP connection + +### The client opens a new TCP connection (source port 65092) + +### The client sends a POST request to the URL found in `DeliveryTo/Address` and authenticates in Kerberos + +``` +Frame 3745: 143 bytes on wire (1144 bits), 143 bytes captured (1144 bits) on interface \Device\NPF_{D0B586C7-BCDD-4989-9F90-8AD183BA1268}, id 0 +Ethernet II, Src: PcsCompu_a6:35:47 (08:00:27:a6:35:47), Dst: PcsCompu_62:f7:34 (08:00:27:62:f7:34) +Internet Protocol Version 4, Src: 192.168.58.100, Dst: 192.168.58.103 +Transmission Control Protocol, Src Port: 65092, Dst Port: 5985, Seq: 2921, Ack: 1, Len: 89 +[3 Reassembled TCP Segments (3009 bytes): #3743(1460), #3744(1460), #3745(89)] +Hypertext Transfer Protocol + POST /wsman/subscriptions/B6BDBB59-FB07-4EE5-841F-EBEC9D67CDD4/1 HTTP/1.1\r\n + Connection: Keep-Alive\r\n + Content-Type: application/soap+xml;charset=UTF-16\r\n + Content-Encoding: SLDC\r\n + [truncated]Authorization: Kerberos YIIH9AYJKoZIhvcSAQICAQBuggfjMIIH36ADAgEFoQMCAQ6iBwMFACAAAACjggYOYYIGCjCCBgagAwIBBaERGw9XSU5ET01BSU4uTE9DQUyiJjAkoAMCAQKhHTAbGwRIVFRQGxNzcnYud2luZG9tYWluLmxvY2Fso4IFwjCCBb6gAwIBEqEDAgEFooIFsASCBazWEk8V/Bh + User-Agent: Microsoft WinRM Client\r\n + Content-Length: 0\r\n + Host: srv.windomain.local:5985\r\n + \r\n + [Full request URI: http://srv.windomain.local:5985/wsman/subscriptions/B6BDBB59-FB07-4EE5-841F-EBEC9D67CDD4/1] + [HTTP request 1/12] + [Response in frame: 3747] + [Next request in frame: 3749] +``` + +### The collector validates the authentication + +``` +Frame 3747: 395 bytes on wire (3160 bits), 395 bytes captured (3160 bits) on interface \Device\NPF_{D0B586C7-BCDD-4989-9F90-8AD183BA1268}, id 0 +Ethernet II, Src: PcsCompu_62:f7:34 (08:00:27:62:f7:34), Dst: PcsCompu_a6:35:47 (08:00:27:a6:35:47) +Internet Protocol Version 4, Src: 192.168.58.103, Dst: 192.168.58.100 +Transmission Control Protocol, Src Port: 5985, Dst Port: 65092, Seq: 1, Ack: 3010, Len: 341 +Hypertext Transfer Protocol + HTTP/1.1 200 \r\n + WWW-Authenticate: Kerberos YIGXBgkqhkiG9xIBAgICAG+BhzCBhKADAgEFoQMCAQ+ieDB2oAMCARKibwRtZd61blB9k7a2pmdvwbUxdztFcuYUccZERJQkCicXOfvL75j/xnc6gq8StyY5Fw5aXbMcyJmVywrlH2bXPMTlNv8393/0KQ3iaQwxE9lU/uZqErxGqk/cUPho8AMuFk9kygcZArduQoM6A3eF3A==\r\n + Server: Microsoft-HTTPAPI/2.0\r\n + Date: Thu, 22 Sep 2022 08:05:18 GMT\r\n + Content-Length: 0\r\n + \r\n + [HTTP response 1/12] + [Time since request: 0.000689000 seconds] + [Request in frame: 3745] + [Next request in frame: 3749] + [Next response in frame: 3751] + [Request URI: http://srv.windomain.local:5985/wsman/subscriptions/B6BDBB59-FB07-4EE5-841F-EBEC9D67CDD4/1] +``` + +### The client sends a Heartbeat (may not always happen) + +``` +Frame 3749: 1362 bytes on wire (10896 bits), 1362 bytes captured (10896 bits) on interface \Device\NPF_{D0B586C7-BCDD-4989-9F90-8AD183BA1268}, id 0 +Ethernet II, Src: PcsCompu_a6:35:47 (08:00:27:a6:35:47), Dst: PcsCompu_62:f7:34 (08:00:27:62:f7:34) +Internet Protocol Version 4, Src: 192.168.58.100, Dst: 192.168.58.103 +Transmission Control Protocol, Src Port: 65092, Dst Port: 5985, Seq: 3345, Ack: 342, Len: 1308 +[2 Reassembled TCP Segments (1643 bytes): #3748(335), #3749(1308)] +Hypertext Transfer Protocol + POST /wsman/subscriptions/B6BDBB59-FB07-4EE5-841F-EBEC9D67CDD4/1 HTTP/1.1\r\n + Connection: Keep-Alive\r\n + Content-Type: multipart/encrypted;protocol="application/HTTP-Kerberos-session-encrypted";boundary="Encrypted Boundary"\r\n + Content-Encoding: SLDC\r\n + User-Agent: Microsoft WinRM Client\r\n + Content-Length: 1308\r\n + Host: srv.windomain.local:5985\r\n + \r\n + [Full request URI: http://srv.windomain.local:5985/wsman/subscriptions/B6BDBB59-FB07-4EE5-841F-EBEC9D67CDD4/1] + [HTTP request 2/12] + [Prev request in frame: 3745] + [Response in frame: 3751] + [Next request in frame: 3778] + File Data: 1308 bytes +MIME Multipart Media Encapsulation, Type: multipart/encrypted, Boundary: "Encrypted Boundary" + [Type: multipart/encrypted] + First boundary: --Encrypted Boundary\r\n + Encapsulated multipart part: (application/http-kerberos-session-encrypted) + Content-Type: application/HTTP-Kerberos-session-encrypted\r\n + OriginalContent: type=application/soap+xml;charset=UTF-16;Length=1006 + Boundary: \r\n--Encrypted Boundary\r\n + Encapsulated multipart part: (application/octet-stream) + Content-Type: application/octet-stream\r\n + Length of security token: 60 + GSS-API Generic Security Service Application Program Interface + Media Type + Last boundary: --Encrypted Boundary--\r\n +``` + +The `Content-Encoding` header contains `SLDC`. This means the body is compressed with `SLDC` (https://www.ecma-international.org/wp-content/uploads/ECMA-321_1st_edition_june_2001.pdf). In this case, we first need to decipher it with Kerberos, only the cleartext is compressed (otherwise it would not be really useful :D). + +Wireshark does not understand that it is SLDC and that it need to be deciphered first. We just need to patch it to explain to him "no worry bro', it's only water": + +```diff +diff --git a/epan/dissectors/packet-http.c b/epan/dissectors/packet-http.c +index 1c9d5d5610..e71f4b3da7 100644 +--- a/epan/dissectors/packet-http.c ++++ b/epan/dissectors/packet-http.c +@@ -1813,7 +1813,8 @@ dissect_http_message(tvbuff_t *tvb, int offset, packet_info *pinfo, + * we handle it in any case). + */ + if (headers.content_encoding != NULL && +- g_ascii_strcasecmp(headers.content_encoding, "identity") != 0) { ++ g_ascii_strcasecmp(headers.content_encoding, "identity") != 0 && ++ g_ascii_strcasecmp(headers.content_encoding, "SLDC") != 0) { + /* + * We currently don't handle, for example, "compress"; + * just handle them as data for now. +``` + +With this patch, Wireshark deciphers the body: + +``` +ffabfdfc3c001cc003a00114006e001d80065001b0006f001c281a20001e0006da079bb2019cc003d000880068001d00074a098e8002f000bc0077001de8722ea1e8ce87e6f001c80067a1b8c80030000c28862fa298d68ae73a239868622da0af602845dc1730f8c2739800c748f32d1fcc349d176c7de03072118ef4b3e0270d322b8e29a661001900064a5f99698e73001a690667f08f265f9891665c8da6ea84f98ea3bfc0d01325634e80033754c4c5962f49d618e3756bc63d5afc24473acaf8162387c0d7336d56cc75a137e5293357b45d60b37edadf863543e00680224589f06e196c1b8948c36572a58747de17a4e5553c0818570c99874c39a2c5d91c4c76953659ec6a001cd53702e748b8e6003ad8970fc1cad2e70cab26fcbba2f001080036b6d912dbe42cd522d0011adca30000dedd634a03916dce2db568d2c7246b7f916dde450010edd244b6e8dee2244b8ae14818d99f06d709b0796b71d0009b72bc3f58a60ca0024dc7fb3ac6df9cbcb8eab968069dba3ad60eb7cb3a000ab9a3713163d4c75864ceba66cd4673c8ee3edd1231b78fc0288c651e6ce384cc52a1d9c284a79e397e04620f5377e0de51d290675dfeff2c41b12f721934ae0bf3c29b29e783a8dd9b4cf34bf37e475e1947618302c6920f409463e367fcc06e32569ce97efe8667c0a2617ee593956cf0e2186bdfc91ee4ab614c3bec2d3c5c3cdd68532e41d385884a76073d58a7e24b00d6bafa3706197e7e0509705309bb4b4c35bde01e1d6cbb8f10759be487937c8d7191a5c24836691bdabf17b10c0033b67e2ce22db1f17f10adb635b76e2e81a5b746b713215b8f83f5e4a83508133883f88cb45f0eae738940cf9954cb74116d904a7f15f1c41707c64186608997d4c3f4dc9ca7bc3f504126fd3f12f2ba56439a738c73778ced8646cdd645b68e2e01a51bc83469c0d041a2ee07a1b5194859b648945b20d24f03a10b382f9804e00013f3459cb3caf02c089f1f0648b9b3f15f0d690e36a428ca91e43a4be0721cd45c619b846f527c2498bb88b9b4f16e6120c0d832e24ff98c2e8001c732e9baf3f8c6be2c0c0c7a93fe06238641e2598a95fd36d7d4df5db3a57a4a16031b54dc5db9aca7c671f813e43cb228eb2f2e1dec47066b2fe232e1d41bb2cc9972dc9a2cba74cba22fb139cebc268b3b9b6c6a73b0dea6c39d91cc3683175a8ce57b7c45073b588cb30f8d2d6e30a2c8bed2276e32ce3b6c39032ae58e762ee2e616dba48d6eb22652fc12037ca7236b7c8d2e0246b8290ae1e39ca3a37ca578c7b1137a252e0f5106d1a6b0014b8baf86c0cb72fd7fd8a287400245d74c3b6a71b2636a215aacc8003cf1d048bc6cb39ef63f371de0dfb5dc116f5b8ea8d97bfc4603e007fd0000e20b0000 +``` + +We can then install sldc lib implemented by Romain Carré (and the python module https://github.com/rom1sqr/sldc). + +A python script that eats the Wireshark "stream hex" and decompresses it: + +```python +#!/usr/bin/python3 +# decompress.py + +import sldc +import sys + +while line := sys.stdin.readline(): + data = bytes.fromhex(line) + print(sldc.decompress(data).decode('utf-16')) +``` + +``` +$ ./decompress.py +ffabfdfc3c001cc003a00114006e001d80065001b0006f001c281a20001e0006da079bb2019cc003d000880068001d00074a098e8002f000bc0077001de8722ea1e8ce87e6f001c80067a1b8c80030000c28862fa298d68ae73a239868622da0af602845dc1730f8c2739800c748f32d1fcc349d176c7de03072118ef4b3e0270d322b8e29a661001900064a5f99698e73001a690667f08f265f9891665c8da6ea84f98ea3bfc0d01325634e80033754c4c5962f49d618e3756bc63d5afc24473acaf8162387c0d7336d56cc75a137e5293357b45d60b37edadf863543e00680224589f06e196c1b8948c36572a58747de17a4e5553c0818570c99874c39a2c5d91c4c76953659ec6a001cd53702e748b8e6003ad8970fc1cad2e70cab26fcbba2f001080036b6d912dbe42cd522d0011adca30000dedd634a03916dce2db568d2c7246b7f916dde450010edd244b6e8dee2244b8ae14818d99f06d709b0796b71d0009b72bc3f58a60ca0024dc7fb3ac6df9cbcb8eab968069dba3ad60eb7cb3a000ab9a3713163d4c75864ceba66cd4673c8ee3edd1231b78fc0288c651e6ce384cc52a1d9c284a79e397e04620f5377e0de51d290675dfeff2c41b12f721934ae0bf3c29b29e783a8dd9b4cf34bf37e475e1947618302c6920f409463e367fcc06e32569ce97efe8667c0a2617ee593956cf0e2186bdfc91ee4ab614c3bec2d3c5c3cdd68532e41d385884a76073d58a7e24b00d6bafa3706197e7e0509705309bb4b4c35bde01e1d6cbb8f10759be487937c8d7191a5c24836691bdabf17b10c0033b67e2ce22db1f17f10adb635b76e2e81a5b746b713215b8f83f5e4a83508133883f88cb45f0eae738940cf9954cb74116d904a7f15f1c41707c64186608997d4c3f4dc9ca7bc3f504126fd3f12f2ba56439a738c73778ced8646cdd645b68e2e01a51bc83469c0d041a2ee07a1b5194859b648945b20d24f03a10b382f9804e00013f3459cb3caf02c089f1f0648b9b3f15f0d690e36a428ca91e43a4be0721cd45c619b846f527c2498bb88b9b4f16e6120c0d832e24ff98c2e8001c732e9baf3f8c6be2c0c0c7a93fe06238641e2598a95fd36d7d4df5db3a57a4a16031b54dc5db9aca7c671f813e43cb228eb2f2e1dec47066b2fe232e1d41bb2cc9972dc9a2cba74cba22fb139cebc268b3b9b6c6a73b0dea6c39d91cc3683175a8ce57b7c45073b588cb30f8d2d6e30a2c8bed2276e32ce3b6c39032ae58e762ee2e616dba48d6eb22652fc12037ca7236b7c8d2e0246b8290ae1e39ca3a37ca578c7b1137a252e0f5106d1a6b0014b8baf86c0cb72fd7fd8a287400245d74c3b6a71b2636a215aacc8003cf1d048bc6cb39ef63f371de0dfb5dc116f5b8ea8d97bfc4603e007fd0000e20b0000 +http://srv.windomain.local:5985/wsman/subscriptions/B6BDBB59-FB07-4EE5-841F-EBEC9D67CDD4/1win10.windomain.localhttp://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymoushttp://schemas.dmtf.org/wbem/wsman/1/wsman/Heartbeat512000uuid:EEC04F74-A27D-4C3A-AEF5-BC5BF54359BAuuid:981C530F-BE2A-4AAB-BACB-6FB4CD1A14ABuuid:EA2EE566-2CC1-49A0-A726-BCE7DC356E221PT60.000S219C5353-5F3D-4CD7-A644-F6B69E57C1C1 +``` + +We get this first message: + +```xml + + + http://srv.windomain.local:5985/wsman/subscriptions/B6BDBB59-FB07-4EE5-841F-EBEC9D67CDD4/1 + win10.windomain.local + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + + http://schemas.dmtf.org/wbem/wsman/1/wsman/Heartbeat + 512000 + uuid:EEC04F74-A27D-4C3A-AEF5-BC5BF54359BA + + + uuid:981C530F-BE2A-4AAB-BACB-6FB4CD1A14AB + uuid:EA2EE566-2CC1-49A0-A726-BCE7DC356E22 + 1 + PT60.000S + 219C5353-5F3D-4CD7-A644-F6B69E57C1C1 + + + + + + + +``` + +- `a:To` : same address as `NotifyTo/Address` +- `m:MachineID`: again the machine DNS name +- `a:Action`: `http://schemas.dmtf.org/wbem/wsman/1/wsman/Heartbeat` (DSP0266 - 10.2.5) +- `p:SessionId`: a new UUID +- `p:OperationID`: a new UUID +- `e:Identifier`: identifier asked in `e:NotifyTo/a:Address/a:ReferenceProperties` (which the current subscription version) +- `w:AckRequested`: we need to respond a Ack + +So the first message we received was a Heartbeat and not a batch of events. + +### The collector acknowledges + +``` +Frame 3751: 2013 bytes on wire (16104 bits), 2013 bytes captured (16104 bits) on interface \Device\NPF_{D0B586C7-BCDD-4989-9F90-8AD183BA1268}, id 0 +Ethernet II, Src: PcsCompu_62:f7:34 (08:00:27:62:f7:34), Dst: PcsCompu_a6:35:47 (08:00:27:a6:35:47) +Internet Protocol Version 4, Src: 192.168.58.103, Dst: 192.168.58.100 +Transmission Control Protocol, Src Port: 5985, Dst Port: 65092, Seq: 342, Ack: 4653, Len: 1959 +Hypertext Transfer Protocol + HTTP/1.1 200 \r\n + Content-Type: multipart/encrypted;protocol="application/HTTP-Kerberos-session-encrypted";boundary="Encrypted Boundary"\r\n + Server: Microsoft-HTTPAPI/2.0\r\n + Date: Thu, 22 Sep 2022 08:05:18 GMT\r\n + Content-Length: 1732\r\n + \r\n + [HTTP response 2/12] + [Time since request: 0.000993000 seconds] + [Prev request in frame: 3745] + [Prev response in frame: 3747] + [Request in frame: 3749] + [Next request in frame: 3778] + [Next response in frame: 3780] + [Request URI: http://srv.windomain.local:5985/wsman/subscriptions/B6BDBB59-FB07-4EE5-841F-EBEC9D67CDD4/1] + File Data: 1732 bytes +MIME Multipart Media Encapsulation, Type: multipart/encrypted, Boundary: "Encrypted Boundary" + [Type: multipart/encrypted] + First boundary: --Encrypted Boundary\r\n + Encapsulated multipart part: (application/http-kerberos-session-encrypted) + Content-Type: application/HTTP-Kerberos-session-encrypted\r\n + OriginalContent: type=application/soap+xml;charset=UTF-16;Length=1430 + Boundary: \r\n--Encrypted Boundary\r\n + Encapsulated multipart part: (application/octet-stream) + Content-Type: application/octet-stream\r\n + Length of security token: 60 + GSS-API Generic Security Service Application Program Interface + Media Type + Last boundary: --Encrypted Boundary--\r\n +``` + +This time, the content is not SLDC compressed: + +```xml + + + http://schemas.dmtf.org/wbem/wsman/1/wsman/Ack + uuid:12AAFC00-5BB5-42B5-BE85-077D7C02B8E9 + uuid:EA2EE566-2CC1-49A0-A726-BCE7DC356E22 + 1 + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + uuid:EEC04F74-A27D-4C3A-AEF5-BC5BF54359BA + + + +``` + +What is interesting: +- `a:Action`: `http://schemas.dmtf.org/wbem/wsman/1/wsman/Ack` (DSP0266 - 10.7) +- `p:OperationID`: same UUID as the previous message +- `a:RelatesTo`: contains the `MessageID` of the Heartbeat + +### The client sends a POST request containing a batch of events + +``` +Frame 3778: 932 bytes on wire (7456 bits), 932 bytes captured (7456 bits) on interface \Device\NPF_{D0B586C7-BCDD-4989-9F90-8AD183BA1268}, id 0 +Ethernet II, Src: PcsCompu_a6:35:47 (08:00:27:a6:35:47), Dst: PcsCompu_62:f7:34 (08:00:27:62:f7:34) +Internet Protocol Version 4, Src: 192.168.58.100, Dst: 192.168.58.103 +Transmission Control Protocol, Src Port: 65092, Dst Port: 5985, Seq: 23969, Ack: 2301, Len: 878 +[15 Reassembled TCP Segments (20194 bytes): #3761(336), #3762(1460), #3763(1460), #3764(1460), #3765(1460), #3766(1460), #3767(1460), #3768(1460), #3769(1460), #3770(1460), #3772(1460), #3773(1460), #3774(1460), #3777(1460), #3778(878)] +Hypertext Transfer Protocol + POST /wsman/subscriptions/B6BDBB59-FB07-4EE5-841F-EBEC9D67CDD4/1 HTTP/1.1\r\n + Connection: Keep-Alive\r\n + Content-Type: multipart/encrypted;protocol="application/HTTP-Kerberos-session-encrypted";boundary="Encrypted Boundary"\r\n + Content-Encoding: SLDC\r\n + User-Agent: Microsoft WinRM Client\r\n + Content-Length: 19858\r\n + Host: srv.windomain.local:5985\r\n + \r\n + [Full request URI: http://srv.windomain.local:5985/wsman/subscriptions/B6BDBB59-FB07-4EE5-841F-EBEC9D67CDD4/1] + [HTTP request 3/12] + [Prev request in frame: 3749] + [Response in frame: 3780] + [Next request in frame: 3972] + File Data: 19858 bytes +MIME Multipart Media Encapsulation, Type: multipart/encrypted, Boundary: "Encrypted Boundary" + [Type: multipart/encrypted] + First boundary: --Encrypted Boundary\r\n + Encapsulated multipart part: (application/http-kerberos-session-encrypted) + Content-Type: application/HTTP-Kerberos-session-encrypted\r\n + OriginalContent: type=application/soap+xml;charset=UTF-16;Length=19555 + Boundary: \r\n--Encrypted Boundary\r\n + Encapsulated multipart part: (application/octet-stream) + Content-Type: application/octet-stream\r\n + Length of security token: 60 + GSS-API Generic Security Service Application Program Interface + Media Type + Last boundary: --Encrypted Boundary--\r\n +``` + +It is again compressed in SLDC and encrypted with Kerberos. We decipher it and decompress it using `decompress.py` and get: + +```xml + + + http://srv.windomain.local:5985/wsman/subscriptions/B6BDBB59-FB07-4EE5-841F-EBEC9D67CDD4/1 + win10.windomain.local + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + + http://schemas.dmtf.org/wbem/wsman/1/wsman/Events + 512000 + uuid:31652DEB-C9E8-45D6-B3E8-90AC64D48422 + + + uuid:981C530F-BE2A-4AAB-BACB-6FB4CD1A14AB + uuid:C7F39CB2-8FFD-4DA3-A111-CDB303EEA098 + 1 + PT60.000S + 219C5353-5F3D-4CD7-A644-F6B69E57C1C1 + + + + + + + + + + + + 25404000x4000000000000026149141Microsoft-Windows-WinRM/Operationalwin10.windomain.localActivity TransferInformationInfoMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClientServerActivity Transfer]]> + + + 16102700x400000000000000a149142Microsoft-Windows-WinRM/Operationalwin10.windomain.localThe client cannot connect to the destination specified in the request. Verify that the service on the destination is running and is accepting requests. Consult the logs and documentation for the WS-Management service running on the destination, most commonly IIS or WinRM. If the destination is the WinRM service, run the following command on the destination to analyze and configure the WinRM service: "winrm quickconfig".The client cannot connect to the destination specified in the request. Verify that the service on the destination is running and is accepting requests. Consult the logs and documentation for the WS-Management service running on the destination, most commonly IIS or WinRM. If the destination is the WinRM service, run the following command on the destination to analyze and configure the WinRM service: "winrm quickconfig".ErrorUser authenticationInfoMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClientSecurity]]> + + + 142021020x4000000000000002149143Microsoft-Windows-WinRM/Operationalwin10.windomain.localEnumeration2150858770WSMan operation Enumeration failed, error code 2150858770ErrorResponse handlingStopMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClient]]> + + + 14504510x4000000000000002149144Microsoft-Windows-WinRM/Operationalwin10.windomain.localEnumerationhttp://schemas.microsoft.com/wbem/wsman/1/SubscriptionManager/SubscriptionWSMan operation Enumeration started with resourceUri http://schemas.microsoft.com/wbem/wsman/1/SubscriptionManager/SubscriptionInformationWSMan API callStartMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClient]]> + + + 14504510x4000000000000002149145Microsoft-Windows-WinRM/Operationalwin10.windomain.localEnumerationhttp://schemas.microsoft.com/wbem/wsman/1/SubscriptionManager/SubscriptionWSMan operation Enumeration started with resourceUri http://schemas.microsoft.com/wbem/wsman/1/SubscriptionManager/SubscriptionInformationWSMan API callStartMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClient]]> + + + 132041020x4000000000000002149146Microsoft-Windows-WinRM/Operationalwin10.windomain.localEnumerationWSMan operation Enumeration completed successfullyInformationResponse handlingStopMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClient]]> + + + 132041020x4000000000000002149147Microsoft-Windows-WinRM/Operationalwin10.windomain.localEventDeliveryWSMan operation EventDelivery completed successfullyInformationResponse handlingStopMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClient]]> + + + 25404000x4000000000000026149148Microsoft-Windows-WinRM/Operationalwin10.windomain.localActivity TransferInformationInfoMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClientServerActivity Transfer]]> + + + 16102700x400000000000000a149149Microsoft-Windows-WinRM/Operationalwin10.windomain.localThe client cannot connect to the destination specified in the request. Verify that the service on the destination is running and is accepting requests. Consult the logs and documentation for the WS-Management service running on the destination, most commonly IIS or WinRM. If the destination is the WinRM service, run the following command on the destination to analyze and configure the WinRM service: "winrm quickconfig".The client cannot connect to the destination specified in the request. Verify that the service on the destination is running and is accepting requests. Consult the logs and documentation for the WS-Management service running on the destination, most commonly IIS or WinRM. If the destination is the WinRM service, run the following command on the destination to analyze and configure the WinRM service: "winrm quickconfig".ErrorUser authenticationInfoMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClientSecurity]]> + + + 142021020x4000000000000002149150Microsoft-Windows-WinRM/Operationalwin10.windomain.localEnumeration2150858770WSMan operation Enumeration failed, error code 2150858770ErrorResponse handlingStopMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClient]]> + + + 14504510x4000000000000002149151Microsoft-Windows-WinRM/Operationalwin10.windomain.localEnumerationhttp://schemas.microsoft.com/wbem/wsman/1/SubscriptionManager/SubscriptionWSMan operation Enumeration started with resourceUri http://schemas.microsoft.com/wbem/wsman/1/SubscriptionManager/SubscriptionInformationWSMan API callStartMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClient]]> + + + 14504510x4000000000000002149152Microsoft-Windows-WinRM/Operationalwin10.windomain.localEnumerationhttp://schemas.microsoft.com/wbem/wsman/1/SubscriptionManager/SubscriptionWSMan operation Enumeration started with resourceUri http://schemas.microsoft.com/wbem/wsman/1/SubscriptionManager/SubscriptionInformationWSMan API callStartMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClient]]> + + + 132041020x4000000000000002149153Microsoft-Windows-WinRM/Operationalwin10.windomain.localEnumerationWSMan operation Enumeration completed successfullyInformationResponse handlingStopMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClient]]> + + + 211041100x4000000000000004149154Microsoft-Windows-WinRM/Operationalwin10.windomain.localThe Winrm service is stoppingInformationWinrm service start/stopInfoMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementServer]]> + + + 132041020x4000000000000002149155Microsoft-Windows-WinRM/Operationalwin10.windomain.localEventDeliveryWSMan operation EventDelivery completed successfullyInformationResponse handlingStopMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClient]]> + + + 142021020x4000000000000002149156Microsoft-Windows-WinRM/Operationalwin10.windomain.localEnumeration995WSMan operation Enumeration failed, error code 995ErrorResponse handlingStopMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClient]]> + + + 212041120x4000000000000004149157Microsoft-Windows-WinRM/Operationalwin10.windomain.localThe Winrm service was stopped successfullyInformationWinrm service start/stopStopMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementServer]]> + + + 14504510x4000000000000002149158Microsoft-Windows-WinRM/Operationalwin10.windomain.localEnumerationhttp://schemas.microsoft.com/wbem/wsman/1/config/listenerWSMan operation Enumeration started with resourceUri http://schemas.microsoft.com/wbem/wsman/1/config/listenerInformationWSMan API callStartMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClient]]> + + + 25404000x4000000000000026149159Microsoft-Windows-WinRM/Operationalwin10.windomain.localActivity TransferInformationInfoMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClientServerActivity Transfer]]> + + + 16102700x400000000000000a149160Microsoft-Windows-WinRM/Operationalwin10.windomain.localThe client cannot connect to the destination specified in the request. Verify that the service on the destination is running and is accepting requests. Consult the logs and documentation for the WS-Management service running on the destination, most commonly IIS or WinRM. If the destination is the WinRM service, run the following command on the destination to analyze and configure the WinRM service: "winrm quickconfig".The client cannot connect to the destination specified in the request. Verify that the service on the destination is running and is accepting requests. Consult the logs and documentation for the WS-Management service running on the destination, most commonly IIS or WinRM. If the destination is the WinRM service, run the following command on the destination to analyze and configure the WinRM service: "winrm quickconfig".ErrorUser authenticationInfoMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClientSecurity]]> + + + 142021020x4000000000000002149161Microsoft-Windows-WinRM/Operationalwin10.windomain.localEnumeration2150858770WSMan operation Enumeration failed, error code 2150858770ErrorResponse handlingStopMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClient]]> + + + +' +``` + +And at last we have the events :D + +In the Header we can find: +- `a:Action`: `http://schemas.dmtf.org/wbem/wsman/1/wsman/Events` (see DSP226 10.2.9.4) +- `w:Bookmark`: keeps track of the last events we received, used to know what to send next. It should be sent during the next EnumerateResponse. +- `w:AckRequested`: the client requires an acknowledgment from the collector (mandatory in this mode). + +In the body, there is table that contains events. Each event is a CDATA (as ask using `w:OptionSet`). + +Inside one event we can see: + +```xml + + + + 142 + 0 + 2 + 10 + 2 + 0x4000000000000002 + + 149161 + + + Microsoft-Windows-WinRM/Operational + win10.windomain.local + + + + Enumeration + 2150858770 + + + WSMan operation Enumeration failed, error code 2150858770 + Error + Response handling + Stop + Microsoft-Windows-WinRM/Operational + Microsoft-Windows-Windows Remote Management + + Client + + + +``` + +As expected we have both `EventData` and `RenderingInfo` (because we specified `ContentFormat=RenderedText`). + +Some other examples: + +```xml + + + + 161 + 0 + 2 + 7 + 0 + 0x400000000000000a + + 149160 + + + Microsoft-Windows-WinRM/Operational + win10.windomain.local + + + + The client cannot connect to the destination specified in the request. Verify that the service on the destination is running and is accepting requests. Consult the logs and documentation for the WS-Management service running on the destination, most commonly IIS or WinRM. If the destination is the WinRM service, run the following command on the destination to analyze and configure the WinRM service: "winrm quickconfig". + + + The client cannot connect to the destination specified in the request. Verify that the service on the destination is running and is accepting requests. Consult the logs and documentation for the WS-Management service running on the destination, most commonly IIS or WinRM. If the destination is the WinRM service, run the following command on the destination to analyze and configure the WinRM service: "winrm quickconfig". + Error + User authentication + Info + Microsoft-Windows-WinRM/Operational + Microsoft-Windows-Windows Remote Management + + Client + Security + + + +``` + +```xml + + + + 208 + 0 + 4 + 11 + 1 + 0x4000000000000004 + + 149162 + + + Microsoft-Windows-WinRM/Operational + win10.windomain.local + + + + + The Winrm service is starting + Information + Winrm service start/stop + Start + Microsoft-Windows-WinRM/Operational + Microsoft-Windows-Windows Remote Management + + Server + + + +``` + +```xml + + + + 161 + 0 + 2 + 7 + 0 + 0x400000000000000a + + 149166 + + + Microsoft-Windows-WinRM/Operational + win10.windomain.local + + + + WinRM cannot process the request. The following error with errorcode 0x80090311 occurred while using Kerberos authentication: We can"t sign you in with this credential because your domain isn"t available. Make sure your device is connected to your organization"s network and try again. If you previously signed in on this device with another credential, you can sign in with that credential. \r\n Possible causes are:\r\n -The user name or password specified are invalid.\r\n -Kerberos is used when no authentication method and no user name are specified.\r\n -Kerberos accepts domain user names, but not local user names.\r\n -The Service Principal Name (SPN) for the remote computer name and port does not exist.\r\n -The client and remote computers are in different domains and there is no trust between the two domains.\r\n After checking for the above issues, try the following:\r\n -Check the Event Viewer for events related to authentication.\r\n -Change the authentication method; add the destination computer to the WinRM TrustedHosts configuration setting or use HTTPS transport.\r\n Note that computers in the TrustedHosts list might not be authenticated.\r\n -For more information about WinRM configuration, run the following command: winrm help config. + + + WinRM cannot process the request. The following error with errorcode 0x80090311 occurred while using Kerberos authentication: We can"t sign you in with this credential because your domain isn"t available. Make sure your device is connected to your organization"s network and try again. If you previously signed in on this device with another credential, you can sign in with that credential. \r\n Possible causes are:\r\n -The user name or password specified are invalid.\r\n -Kerberos is used when no authentication method and no user name are specified.\r\n -Kerberos accepts domain user names, but not local user names.\r\n -The Service Principal Name (SPN) for the remote computer name and port does not exist.\r\n -The client and remote computers are in different domains and there is no trust between the two domains.\r\n After checking for the above issues, try the following:\r\n -Check the Event Viewer for events related to authentication.\r\n -Change the authentication method; add the destination computer to the WinRM TrustedHosts configuration setting or use HTTPS transport.\r\n Note that computers in the TrustedHosts list might not be authenticated.\r\n -For more information about WinRM configuration, run the following command: winrm help config. + Error + User authentication + Info + Microsoft-Windows-WinRM/Operational + Microsoft-Windows-Windows Remote Management + + Client + Security + + + +``` + +```xml + + + + 4688 + 2 + 0 + 13312 + 0 + 0x8020000000000000 + + 72206 + + + Security + win10.windomain.local + + + + S-1-5-18 + WIN10$ + WINDOMAIN + 0x3e7 + 0x7ec + C:\\Windows\\System32\\svchost.exe + %%1936 + 0x244 + + S-1-0-0 + WIN10$ + WINDOMAIN + 0x3e4 + C:\\Windows\\System32\\services.exe + S-1-16-16384 + + + A new process has been created.\r\n\r\nCreator Subject:\r\n\tSecurity ID:\t\tS-1-5-18\r\n\tAccount Name:\t\tWIN10$\r\n\tAccount Domain:\t\tWINDOMAIN\r\n\tLogon ID:\t\t0x3E7\r\n\r\nTarget Subject:\r\n\tSecurity ID:\t\tS-1-0-0\r\n\tAccount Name:\t\tWIN10$\r\n\tAccount Domain:\t\tWINDOMAIN\r\n\tLogon ID:\t\t0x3E4\r\n\r\nProcess Information:\r\n\tNew Process ID:\t\t0x7ec\r\n\tNew Process Name:\tC:\\Windows\\System32\\svchost.exe\r\n\tToken Elevation Type:\t%%1936\r\n\tMandatory Label:\t\tS-1-16-16384\r\n\tCreator Process ID:\t0x244\r\n\tCreator Process Name:\tC:\\Windows\\System32\\services.exe\r\n\tProcess Command Line:\t\r\n\r\nToken Elevation Type indicates the type of token that was assigned to the new process in accordance with User Account Control policy.\r\n\r\nType 1 is a full token with no privileges removed or groups disabled. A full token is only used if User Account Control is disabled or if the user is the built-in Administrator account or a service account.\r\n\r\nType 2 is an elevated token with no privileges removed or groups disabled. An elevated token is used when User Account Control is enabled and the user chooses to start the program using Run as administrator. An elevated token is also used when an application is configured to always require administrative privilege or to always require maximum privilege, and the user is a member of the Administrators group.\r\n\r\nType 3 is a limited token with administrative privileges removed and administrative groups disabled. The limited token is used when User Account Control is enabled, the application does not require administrative privilege, and the user does not choose to start the program using Run as administrator. + Information + Process Creation + Info + Security + Microsoft Windows security auditing. + + Audit Success + + + +``` + +```xml + + + + 4624 + 2 + 0 + 12544 + 0 + 0x8020000000000000 + + 72207 + + + Security + win10.windomain.local + + + + S-1-5-18 + WIN10$ + WINDOMAIN + 0x3e7 + S-1-5-18 + SYSTEM + NT AUTHORITY + 0x3e7 + 5 + Advapi + Negotiate + - + {00000000-0000-0000-0000-000000000000} + - + - + 0 + 0x244 + C:\\Windows\\System32\\services.exe + - + - + %%1833 + - + - + - + %%1843 + 0x0 + %%1842 + + + An account was successfully logged on.\r\n\r\nSubject:\r\n\tSecurity ID:\t\tS-1-5-18\r\n\tAccount Name:\t\tWIN10$\r\n\tAccount Domain:\t\tWINDOMAIN\r\n\tLogon ID:\t\t0x3E7\r\n\r\nLogon Information:\r\n\tLogon Type:\t\t5\r\n\tRestricted Admin Mode:\t-\r\n\tVirtual Account:\t\tNo\r\n\tElevated Token:\t\tYes\r\n\r\nImpersonation Level:\t\tImpersonation\r\n\r\nNew Logon:\r\n\tSecurity ID:\t\tS-1-5-18\r\n\tAccount Name:\t\tSYSTEM\r\n\tAccount Domain:\t\tNT AUTHORITY\r\n\tLogon ID:\t\t0x3E7\r\n\tLinked Logon ID:\t\t0x0\r\n\tNetwork Account Name:\t-\r\n\tNetwork Account Domain:\t-\r\n\tLogon GUID:\t\t{00000000-0000-0000-0000-000000000000}\r\n\r\nProcess Information:\r\n\tProcess ID:\t\t0x244\r\n\tProcess Name:\t\tC:\\Windows\\System32\\services.exe\r\n\r\nNetwork Information:\r\n\tWorkstation Name:\t-\r\n\tSource Network Address:\t-\r\n\tSource Port:\t\t-\r\n\r\nDetailed Authentication Information:\r\n\tLogon Process:\t\tAdvapi \r\n\tAuthentication Package:\tNegotiate\r\n\tTransited Services:\t-\r\n\tPackage Name (NTLM only):\t-\r\n\tKey Length:\t\t0\r\n\r\nThis event is generated when a logon session is created. It is generated on the computer that was accessed.\r\n\r\nThe subject fields indicate the account on the local system which requested the logon. This is most commonly a service such as the Server service, or a local process such as Winlogon.exe or Services.exe.\r\n\r\nThe logon type field indicates the kind of logon that occurred. The most common types are 2 (interactive) and 3 (network).\r\n\r\nThe New Logon fields indicate the account for whom the new logon was created, i.e. the account that was logged on.\r\n\r\nThe network fields indicate where a remote logon request originated. Workstation name is not always available and may be left blank in some cases.\r\n\r\nThe impersonation level field indicates the extent to which a process in the logon session can impersonate.\r\n\r\nThe authentication information fields provide detailed information about this specific logon request.\r\n\t- Logon GUID is a unique identifier that can be used to correlate this event with a KDC event.\r\n\t- Transited services indicate which intermediate services have participated in this logon request.\r\n\t- Package name indicates which sub-protocol was used among the NTLM protocols.\r\n\t- Key length indicates the length of the generated session key. This will be 0 if no session key was requested. + Information + Logon + Info + Security + Microsoft Windows security auditing. + + Audit Success + + + +``` + +### The collector acknowledges + +``` +Frame 3780: 2013 bytes on wire (16104 bits), 2013 bytes captured (16104 bits) on interface \Device\NPF_{D0B586C7-BCDD-4989-9F90-8AD183BA1268}, id 0 +Ethernet II, Src: PcsCompu_62:f7:34 (08:00:27:62:f7:34), Dst: PcsCompu_a6:35:47 (08:00:27:a6:35:47) +Internet Protocol Version 4, Src: 192.168.58.103, Dst: 192.168.58.100 +Transmission Control Protocol, Src Port: 5985, Dst Port: 65092, Seq: 2301, Ack: 24847, Len: 1959 +Hypertext Transfer Protocol + HTTP/1.1 200 \r\n + Content-Type: multipart/encrypted;protocol="application/HTTP-Kerberos-session-encrypted";boundary="Encrypted Boundary"\r\n + Server: Microsoft-HTTPAPI/2.0\r\n + Date: Thu, 22 Sep 2022 08:05:18 GMT\r\n + Content-Length: 1732\r\n + \r\n + [HTTP response 3/12] + [Time since request: 0.000914000 seconds] + [Prev request in frame: 3749] + [Prev response in frame: 3751] + [Request in frame: 3778] + [Next request in frame: 3972] + [Next response in frame: 3977] + [Request URI: http://srv.windomain.local:5985/wsman/subscriptions/B6BDBB59-FB07-4EE5-841F-EBEC9D67CDD4/1] + File Data: 1732 bytes +MIME Multipart Media Encapsulation, Type: multipart/encrypted, Boundary: "Encrypted Boundary" + [Type: multipart/encrypted] + First boundary: --Encrypted Boundary\r\n + Encapsulated multipart part: (application/http-kerberos-session-encrypted) + Content-Type: application/HTTP-Kerberos-session-encrypted\r\n + OriginalContent: type=application/soap+xml;charset=UTF-16;Length=1430 + Boundary: \r\n--Encrypted Boundary\r\n + Encapsulated multipart part: (application/octet-stream) + Content-Type: application/octet-stream\r\n + Length of security token: 60 + GSS-API Generic Security Service Application Program Interface + Media Type + Last boundary: --Encrypted Boundary--\r\n +``` + +Deciphered, it gives us: + +```xml + + + http://schemas.dmtf.org/wbem/wsman/1/wsman/Ack + uuid:1FE15160-9BB5-4CAB-B200-CDCC83F77FCB + uuid:C7F39CB2-8FFD-4DA3-A111-CDB303EEA098 + 1 + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + uuid:31652DEB-C9E8-45D6-B3E8-90AC64D48422 + + + +``` + +### And so on... + +The client keeps sending POST requests containing events and/or hearbeat. + +At the same time, every "Refresh" secondes the client connects to the collector to check if subscriptions configuration has changed (is there new subscriptions ? has an existing subscription been updated ?). + +### The client can end the subscription + +This can happen, for example, when the computer is turned off. + +In this case, the client sends a `SubscriptionEnd` to the collector: + +``` +Received HTTP request from 192.168.58.100:56842: POST /wsman/subscriptions/0C98CAE1-EDA4-4C92-82D9-A8A20EB518D2 +``` +```xml + + + http://wec.windomain.local:5985/wsman/subscriptions/0C98CAE1-EDA4-4C92-82D9-A8A20EB518D2 + win10.windomain.local + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + + http://schemas.xmlsoap.org/ws/2004/08/eventing/SubscriptionEnd + 512000 + uuid:21DF76D2-A1ED-4333-BEAB-378AFB012DEE + + + uuid:B2FA79EC-9D29-4DFF-9BB4-D12809DC935D + uuid:50485065-8F54-44F2-8525-DBCBE293E317 + 1 + PT0.500S + 0C98CAE1-EDA4-4C92-82D9-A8A20EB518D2 + + + + + + + http://wec.windomain.local:5985/wsman/subscriptions/0C98CAE1-EDA4-4C92-82D9-A8A20EB518D2 + + 820FEC3F-E3BD-4064-A4FD-BA8D550C4432 + + + http://schemas.xmlsoap.org/ws/2004/08/eventing/SourceCancelling + + + + Windows Event Forward Plugin failed to read events. + + + + + + + +``` + +The next message is a `http://schemas.microsoft.com/wbem/wsman/1/wsman/End`. Fun fact: the client indicates that it is SLDC compressed but it is not. + +```xml + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + http://schemas.microsoft.com/wbem/wsman/1/wsman/FullDuplex + http://schemas.microsoft.com/wbem/wsman/1/wsman/End + uuid:3244F96B-8ECA-4A06-90DE-8A1DED23AB1F + uuid:50485065-8F54-44F2-8525-DBCBE293E317 + + + +``` + +# Side note + +We always had trouble understanding where to put WinRM while reading Microsoft documentation. + +We understood that the built-in Windows Event Forwarding plugin and the Windows Event Collector use the WinRM service and it is this service that implements [MS-WSMV]. + +We thought it was worth mentioning it here as we could not find this information clearly stated anywhere else. diff --git a/doc/query.md b/doc/query.md new file mode 100644 index 0000000..5969c04 --- /dev/null +++ b/doc/query.md @@ -0,0 +1,59 @@ +# Event filter query + +Each subscription requires an event filter query. It defines the events to be collected within that subscription. + +The event filter query syntax is [documented by Microsoft](https://learn.microsoft.com/en-us/windows/win32/wes/consuming-events). + + +Note that in OpenWEC an event filter query must be in the following format: +```xml + + [query] + ... + +``` + +## Example + +```xml + + + + + + + + +``` + +## Known limitations + +### Retrieving all channels + +In some cases, you may want to retrieve events from all existing channels for each client without having to explain them. However, this does not seem to be possible using event filter queries. + +We recommend that you generate a list of the channels available in your environment by other means and set your event filter query accordingly. + +### Editing the query of an existing subscription + +When editing the query of an existing subscription, **you should not add a new channel**. If you do, for each client for which OpenWEC has a bookmark, you will retrieve all existing events for the newly added channel **regardless** of the subscription's `read_existing_events' parameter. You have two (bad) options: +* Either accept that you will retrieve a lot of events (the number depends on the added channel). +* Either delete all existing bookmarks for this subscription. If you do this, you will probably lose logs. + +This is because a bookmark already exists for the client, so it will be sent when the client enumerates the subscription. However, this bookmark does not contain the newly added channel. Therefore, the Windows client (strangely) assumes that you want to retrieve all its events, including the existing ones, regardless of the `read_existing_events' setting. + +### Query size + +We understand that the event filter query must retrieve events from a maximum of 256 different channels. + +If your query contains more channels, it will be considered invalid by Windows clients. + +### Channel permissions + +The Windows Event Log Forwarder runs as `NETWORK SERVICE` within the `WinRM` service. This means that **by default** the forwarder is not authorised to read all channels (e.g. `Security`). + +If you want to collect event logs from these channels (you should!), you must either add the `WinRM service` (SID) to the local `Event Log Readers` group of each Windows client, or authorise the `WinRM service` (SID) to read these channels. Alternatively you can do the same with the `NETWORK SERVICE` account. + +If the event log forwarder does not have permission to read a channel selected in an event filter query, it will still send events according to the rest of the query. Therefore, **you must ensure that the Windows Event Log Forwarder is allowed to read all the channels selected in the event filter query BEFORE enabling the subscription**. + +If you don't, the client will send bookmarks without the forbidden channels. This means that if the forwarder is later allowed to read events in one of these channels, it will send all existing events in that channel, regardless of the `read_existing_event` subscription parameter. In the case of `Security` this can represent a lot of data, causing the network to become congested and the OpenWEC server to use the CPU heavily. diff --git a/doc/subscription.md b/doc/subscription.md new file mode 100644 index 0000000..a81d0ec --- /dev/null +++ b/doc/subscription.md @@ -0,0 +1,312 @@ +# Subscription + +A subscription enables a Windows Event Collector to retrieve a set of events from a set of machines using a dedicated configuration. + +The set of events is defined by a list of XPath filter queries. For example, here is a query list composed of a single query which retrieves all event logs within channels `Application`, `Security`, `Setup` and `System`: +```xml + + + + + + + + +``` + +In Windows Event Forwarding protocol, a subscription is identified by its `version`, a GUID which must be updated each time changes are made to the subscription. + +In OpenWEC, each subscription has a `version`, but because `version` is updated at each modification, each subscription is actually identified by another attribute called `uuid`, which is another GUID unique to a subscription and never updated. A subscription can also be identified using its `name` (user defined). + +Each Windows machine configured to contact a Windows Event Collector server will send an `Enumerate` request to get a list of subscriptions. It will then create locally these subscriptions and fullfill them. + +## Parameters + +Subscriptions and their parameters are not defined in OpenWEC configuration file but in OpenWEC database. Therefore, you **must** use `openwec` cli to edit them. You should **never update subscription parameters directly in database**. + + +| Parameter | Required | Default value | Description +|---|---|---|---| +| `name` | **Yes** | - | the name of subscription. This name can be used to identify a subscription using openwec cli, but it also identifies the subscription for Windows machines. When analyzing Microsoft-Windows-Forwarding event logs, you may search for events with `EventData.Id` element text equals to `name`. | +| `query` | **Yes** | - | the XPath filter queries of the subscription, defining the set of events retrieved. See [query](query.md). | +| `uri` | No | *Undefined* | when configuring Windows machines to connect to the collector, you define a Server url, for example `Server=http://wec.windomain.local:5985/this/is/my/custom/uri`. As shown here, you may chose a custom URI. Each Windows machine will regularly send an Enumerate request to the collector to retrieve the set of Subscriptions that it must fullfill.
  • If the `uri` parameter is `Undefined` (default), the subscription will always be sent.
  • If the `uri` parameter is defined, the subscription will be sent only if the request URI matches the subscription uri.
| +| `heartbeat_interval` | No | 3600 | The maximum allowable time, in seconds, before the client will send an heartbeat message if it has no new events to send. This is used by OpenWEC to determine the "status" of each machine. | +| `connection_retry_count` | No | 5 | Number of times the client will attempt to connect if the subscriber is unreachable. | +| `connection_retry_interval` | No | 60 | Interval observed between each connection attempt if the subscriber is unreachable. | +| `max_time` | No | 30 | The maximum time, in seconds, that the client should aggregate new events before sending them. | +| `max_envelope_size` | No | 512000 | The maximum number of bytes in the SOAP envelope used to deliver the events. | +| `enabled` | No | `False` | Whether the subscription is enabled or not. Not that a new subscription is **disabled** by default, and **can not** be enabled unless you configure at least one output. As a safe guard, subscriptions without outputs are ignored by openwec server. | +| `read_existing_events` | No | `False` | If `True`, the event source should replay all possible events that match the filter and any events that subsequently occur for that event source. + +## Subscription management + +On launch, OpenWEC retrieves currently active subscriptions from database and instantiates tasks for managing their outputs. Parameters of these subscriptions are cached. A refresh of active subscriptions is done regularly (see [`db_sync_interval` setting](../openwec.conf.sample.toml)) to react to any changes. During a refresh, subscriptions that have been changed (parameters, outputs, ...) will get their outputs instances dropped and created again. + +When you edit a subscription, it is not applied immediatly but only on the next "refresh" on the active subscriptions. The refresh interval is quite short by default (a few seconds) so this seems fine, but if required you can force a refresh by sending a `SIGHUP` signal to `openwecd` process. + +Sometimes, you may want to force every subscriptions to drop and create again their outputs. This can be done using `openwec subscriptions reload` (see [Available Commands](subscription.md#available-commands)). Same as any change, it will be applied on the next refresh (or `SIGHUP` signal received). + +## Event Delivery Optimization Options + +In its configuration UI, Microsoft WEC enables users to choose between three event delivery optimization options. These options are pre-defined subscription parameters set which can be reproduced on OpenWEC. + + +| Option | Microsoft description | Parameters | +|---|---|---| +| Normal | This option ensures reliable delivery of events and does not attempt to conserve bandwidth. It is the appropriate choice unless you need tighter control over bandwidth usage or need forwarded events delivered as quickly as possible. |
  • `max_time`: 900 (15 minutes)
  • `heartbeat_interval`: 900 (15 minutes)
  • TODO: Batch item max size!
| +| Minimize Bandwidth | This option ensures reliable delivery of events and does not attempt to conserve bandwidth. It is the appropriate choice unless you need tighter control over bandwidth usage or need forwarded events delivered as quickly as possible. |
  • `max_time`: 21600 (6 hours)
  • `heartbeat_interval`: 21600 (6 hours)
| +| Minimize Latency | This option ensures that events are delivered with minimal delay. It is an appropriate choice if you are collecting alerts or critical events. |
  • `max_time`: 30 (30 seconds)
  • `heartbeat_interval`: 3600 (1 hour)
| + + +In its documentation, Microsoft states that Normal mode uses a Pull delivery mode (meaning that its the collector who connects to Windows machines and retrieve their event logs). It seems to be a mistake, as the exported configuration of a subscription configured in Normal mode clearly specifies that it is SourceInitiated in Push mode. + +## Available commands + +### `openwec subscriptions` + +List subscriptions in a "short" format. Each line represents a subscription, with its status (enabled or not), its name and its URI. + +#### Usage + +``` +$ openwec subscriptions +[-] Old subscription (*) +[+] My-new-subscription (*) +[+] Subscription-toto (/toto) +``` + +There are 3 subscriptions: +- A subscription named `Old subscription`, disabled with no URI defined. +- A subscription named `My-new-subscription`, enabled with no URI defined. +- A subscription named `Subscription-toto`, enabled with a URI set to `/toto`. + +It means that when a Windows machine sends an Enumerate request using URI `/hello-world`, it gets an Enumerate reponse containing only the subscription `My-new-subscription`. +Otherwise, if a Windows machine sends an Enumerate request using URI `/toto`, it gets an Enumerate response containing subscriptions `My-new-subscription` **and** `Subscription-toto`. + +### `openwec subscriptions new` + +This command enables you to create a new subscription. + +There are 2 required parameters: +- `name`: the name of the subscription. Must be unique. +- `query`: the path a file containing the xml query list of this subscription. + +You can optionnally set all subscription parameters, except the `enabled` one because a newly created subscription is **always** `disabled`. + +#### Usage + +``` +$ openwec subscriptions new my-super-subscription query_simple.xml --uri /super --max-time 600 --heartbeat-interval 600 +Subscription my-super-subscription has been created successfully. You need to configure its outputs using `openwec subscriptions edit my-super-subscription outputs add --help`. When you are ready, you can enable it using `openwec subscriptions edit my-super-subscription --enable` +``` + +This command creates a new subscription named `my-super-subscription`, based on the query list contained in `query_simple.xml`, with URI `/super` and its `max_time` and `heartbeat_interval` configured both to `600`. Other parameters will get their default values. The newly created subscription is **disabled** and contains no outputs. + +You may add some using `openwec subscriptions output`, which is detailed in [Outputs documentation](outputs.md). + +### `openwec subscriptions edit` + +This command enables you to edit an already existing subscription. + +You must provide the identifier of the subscription to edit, which can be either its `name` or its `uuid`. + +You may edit every parameters of the subscription, even its name. + +You should be very careful when editing a subscription query, especially when adding new event log channels (see [Query known issues](query.md#known-issues)). + +Subscriptions update are not immediatly applied. openwec server maintains an in-memory cache of the current subscriptions, and refreshes its cache regularly. Your changes will only be applied when the cache is refreshed. You can force a cache refresh by sending a SIGHUP signal to openwec server process. + +#### Usage + +``` +$ openwec subscriptions edit my-super-subscription --uri /new-uri --connection-retry-count 10 +``` + +This commad edits the subscription named `my-super-subscription`, changing its uri to `/new-uri` and its `connection_retry_count` parameter to `10`. + + +### `openwec subscriptions show` + +This command prints all parameters of a subscription, including its query. + +#### Usage + +``` +$ openwec subscriptions show my-super-subscription +Subscription my-super-subscription + UUID: 27D8CE0B-CAFE-44CA-9FE1-4B9D6EE45AE8 + Version: 3366A5BD-9E71-482E-9359-9505EA1F8400 + URI: /new-uri + Heartbeat interval: 600s + Connection retry count: 10 + Connection retry interval: 60s + Max time without heartbeat/events: 600s + Max envelope size: 512000 bytes + ReadExistingEvents: false + Outputs: None + Enabled: false + +Event filter query: + + + + + + + + + +``` + +### `openwec subscriptions duplicate` + +This command duplicates an existing subscription. + +The newly created subscriptions will inherit all the parameters and outputs of its parent, but : +- it will be disabled. +- it will get a new unique `uuid`. +- it will get a new `version`. + + +#### Usage + +``` +$ openwec subscriptions duplicate my-super-subscription this-is-a-clone + +$ openwec subscriptions show this-is-a-clone +Subscription this-is-a-clone + UUID: 88C9BADD-BCB1-4324-98DC-2D56E4A893DA + Version: C460B829-C50F-42E1-8275-F9AB62A5058C + URI: /new-uri + Heartbeat interval: 600s + Connection retry count: 10 + Connection retry interval: 60s + Max time without heartbeat/events: 600s + Max envelope size: 512000 bytes + ReadExistingEvents: false + Outputs: None + Enabled: false + +Event filter query: + + + + + + + + + +``` + +### `openwec subscriptions export` + +This command exports the currently configured subscriptions in a `json` format. You may export only one subscription using `--subscription `. + +These subscriptions can be imported in another openwec installation. + +**Warning: Importing subscriptions exported from another openwec version might not work.** + + +#### Usage + +``` +$ openwec subscriptions export +[{"uuid":"27D8CE0B-CAFE-44CA-9FE1-4B9D6EE45AE8","version":"3366A5BD-9E71-482E-9359-9505EA1F8400","name":"my-super-subscription","uri":"/new-uri","query":"\n \n \n \n \n \n \n\n","heartbeat_interval":600,"connection_retry_count":10,"connection_retry_interval":60,"max_time":600,"max_envelope_size":512000,"enabled":false,"read_existing_events":false,"outputs":[]},[...]] +``` + +### `openwec subscriptions import` + +This command imports subscriptions from a file. Two formats are supported: +* `openwec`: the format generated by `openwec subscriptions export`. **Importing subscriptions exported from another openwec version might not work.** +* `windows`: the format generated on a Windows Server Windows Event Collector with `wecutil.exe /gs /format:xml`. Note that `openwec` only supports source initiated mode and does not support client filtering. + +Imported subscriptions are disabled by default. + +#### Usage + +``` +$ openwec subscriptions import -f windows windows-subscription.xml +1 subscription has been imported. You may want to enable it using `openwec subscriptions edit --enable`. +``` + +### `openwec subscriptions delete` + +This command deletes subscriptions, and all associated bookmarks and heartbeats. There is no way to undo this action (unless you backup your database, and **you should definitely do it**). + +#### Usage + +``` +$ openwec subscriptions delete windows-subscription +Are you sure that you want to delete "windows-subscription" (92A7836D-96FC-4EE5-9E45-03D0618607DE) ? [y/n] y +``` + +### `openwec subscriptions machines` + +This command enables you to retrieve the list of clients attached to a subscription. + +You may filter on status: +* `--active`: only show active clients, that is to say clients that sent events since `--interval` seconds ago (defaults to `heartbeat-interval`). +* `--alive`: only show alive clients, that is to say clients that sent heartbeats since `--interval` seconds ago, but no events. This probably means that these machines did not procude events matching the filter query of the subscription. +* `--dead`: only show dead clients, that is to say clients that did not sent heartbeats nor events since at least `--interval` seconds ago. Most of the time, this means that the machine is turned off or can not reach the collector due to network outage. + +If you only want numbers, check `openwec stats` command. + +The output format is `:`. + +#### Usage + +``` +$ openwec subscriptions machines my-super-subscription +192.168.58.102:DC$@WINDOMAIN.LOCAL +192.168.58.100:WIN10$@WINDOMAIN.LOCAL +``` + +### `openwec subscriptions enable` + +This command enables one or many subscriptions. You may also want to enable all configured subscriptions without listing them using `--all`. + +For one subscription (`openwec subscriptions enable `), you can alternatively use `openwec subscriptions edit --enable`. + +Subscriptions with no outputs configured can not be enabled and will not be enabled by this command. However, this command will fail only if no subscriptions could be enabled, and print warnings otherwise. + +Subscriptions update are not immediatly applied. openwec server maintains an in-memory cache of the current subscriptions, and refreshes its cache regularly. Your changes will only be applied when the cache is refreshed. You can force a cache refresh by sending a SIGHUP signal to openwec server process. + +#### Usage + +``` +$ openwec subscriptions enable my-super-subscription this-is-a-clone +``` + +### `openwec subscriptions disable` + +This command disables one or many subscriptions. You may also want to disable all configured subscriptions without listing them using `--all`. + +For one subscription (`openwec subscriptions disable `), you can alternatively use `openwec subscriptions edit --disable`. + +Subscriptions update are not immediatly applied. openwec server maintains an in-memory cache of the current subscriptions, and refreshes its cache regularly. Your changes will only be applied when the cache is refreshed. You can force a cache refresh by sending a SIGHUP signal to openwec server process. + +#### Usage + +``` +$ openwec subscriptions disable my-super-subscription this-is-a-clone +``` + + +### `openwec subscriptions reload` + +This command updates the version of one or many subscriptions. You may also want to reload all configured subscriptions without listing them using `--all`. + +This has two main effects: +* all running outputs will be killed and started again. +* Windows clients will close their "events" TCP connection and open a new one. + +In case of a multi-node setup, this command may be useful to "balance" clients between openwec nodes after a load balancing configuration change. It may also be used with `Files` output to close and re-open all file descriptors. + + +#### Usage + +``` +$ openwec subscriptions reload my-super-subscription this-is-a-clone +$ openwec subscriptions reload --all +``` diff --git a/openwec.conf.sample.toml b/openwec.conf.sample.toml new file mode 100644 index 0000000..41015dc --- /dev/null +++ b/openwec.conf.sample.toml @@ -0,0 +1,153 @@ +####################### +## Server settings ## +####################### + +[server] +# [Optional] +# Set server logging verbosity +# This parameter is overwritten by --verbosity argument and +# the OPENWEC_LOG environment variable. +# Default value is warn +# verbosity = "warn" + +# [Optional] +# Set subscriptions refresh interval +# Subscriptions are stored in the database. When you edit them using the cli, +# you only modify the database. +# The server retrieves its subscriptions from the database and cache +# them in memory. The in-memory subscriptions are refreshed at a fixed +# interval: `db_sync_interval`. +# Default value is 5 seconds. +# db_sync_interval = 5 + +# [Optional] +# Set heartbeat cache flush interval +# Heartbeat messages are cached in memory before being stored in database +# to improve performances. +# A flush operation saves all cached heartbeats in database at a fixed +# interval: `flush_heartbeats_interval` +# Default value is 5 seconds. +# flush_heartbeats_interval = 5 + +# [Optional] +# Set node name +# This may be used by outputs. Unset by default. +# node_name = unsef + +########################## +## Databases settings ## +########################## + +# Two database backends are currently supported: SQLite and Postgres. +# Note that Postgres implementation is made to work well with CockroachDB. + +[database] +# [Required] +# Database type: SQLite | Postgres +# type = "SQLite" + +## SQLite configuration + +# SQLite DB path +# The SQLite DB will be created and initialised if it does not already exist. +# path = "/var/db/openwec/openwec.sqlite" + +## End of SQLite + +## Postgres configuration + +# [Required] +# Postgres database Hostname +# host = "localhost" + +# [Required] +# Postgres database port +# port = 5432 + +# [Required] +# Postgres database name. It must already exist and user should +# have all permissions on it. +# dbname = "openwec" + +# [Required] +# Postgres database user. It must already exist and have all permissions +# on . +# user = "openwec" + +# [Required] +# Postgres database user password +# password = "" + +# [Optional] +# Postgres SSL mode. Possibles values are: +# - Disable: Do not use TLS +# - Prefer: Attempt to connect with TLS but allow sessions without +# - Require: Require the use of TLS +# ssl_mode = "Prefer" + +# [Optional] +# Custom SSL CA certificate file +# When ssl_mode is Prefer or Require, you may want to use a specific CA +# certificate file instead of the ones trusted by your system (default). +# ca_file = unset + +# [Optional] +# Max chunk size +# When performing bulk insert queries, this is the maximum number of +# entries that will be inserted in one query. +# max_chunk_size = 500 + +## End of Postgres configuration + +########################### +## Collectors settings ## +########################### + +# You may define multiple collect servers. +# Each collector must listen on a different (address, port) pair. +# All collectors share the same database. +# This is useful if you want to support both Kerberos and TLS authentication +# (TLS authentication is not yet supported). + +# This defines one collector +[[collectors]] +# [Required] +# Local Hostname +# Clients will contact this hostname to send their events +# hostname = "openwec.mydomain.local" + +# [Required] +# Server listen address +# listen_address = "127.0.0.1" + +# [Optional] +# Server listenning port +# If missing in config, default value is 5985 +# listen_port = 5985 + +# [Optional] +# Maximum content size allowed in client messages. +# If missing in config, default value is 512000 (which should be safe) +# max_content_length = 512000 + +# Authentication settings for this collector +# For now, the only available authentication method is Kerberos. +[collectors.authentication] +# [Required] +# Authentication method: Kerberos +# type = "Kerberos" + +## Kerberos configuration + +# [Required] +# Service Principal Name of the openwec account +# Should be something like "HTTP/openwec.mydomain.local@MYDOMAIN.LOCAL" +# service_principal_name = "" + +# [Required] +# Keytab file that contains secrets of the openwec account. +# It must contain an entry for the principal . +# It may contains other entries, which won't be used by openwec. +# keytab = "/etc/krb5.keytab" + +## End of Kerberos configuration diff --git a/server/Cargo.toml b/server/Cargo.toml new file mode 100644 index 0000000..0b16b54 --- /dev/null +++ b/server/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "server" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "openwecd" +path = "src/main.rs" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +common = { path = "../common" } +env_logger = "0.9.1" +anyhow = "1.0.59" +base64 = "0.13.0" +buf-read-ext = "0.4.0" +http = "0.2.8" +httparse = "1.7.1" +hyper = { version = "0.14.20", features = ["full"] } +itoa = "1.0.3" +libgssapi = { version = "0.6.3", features = ["iov"] } +log = "0.4.17" +mime = "0.3.16" +quick-xml = "0.25.0" +roxmltree = "0.15.0" +tokio = { version = "1.20.1", features = ["full"] } +rdkafka = { version = "0.28.0", features = ["zstd", "libz", "external-lz4"] } +regex = "1.6.0" +lazy_static = "1.4.0" +uuid = { version = "1.1.2", features = ["v4", "fast-rng"] } +serde = { version = "1.0.147", features = ["derive"] } +serde_json = "1.0.87" +async-trait = "0.1.58" +chrono = "0.4.22" +clap = { version = "4.0.29", features = ["cargo"] } +futures-util = "0.3.25" +xmlparser = "0.13.5" +itertools = "0.10.5" +futures = "0.3.27" +bitreader = "0.3.6" + + +[dev-dependencies] +hex = "0.4.3" diff --git a/server/src/event.rs b/server/src/event.rs new file mode 100644 index 0000000..85f480b --- /dev/null +++ b/server/src/event.rs @@ -0,0 +1,1038 @@ +use anyhow::{anyhow, bail, Context, Result}; +use log::{info, trace}; +use roxmltree::{Document, Node}; +use serde::Serialize; +use std::{collections::HashMap, net::SocketAddr}; + +#[derive(Debug, Default, Serialize, Clone)] +pub struct EventDataType { + #[serde(flatten, skip_serializing_if = "HashMap::is_empty")] + named_data: HashMap, + #[serde(rename = "Data", skip_serializing_if = "Vec::is_empty")] + unamed_data: Vec, + #[serde(rename = "Binary", skip_serializing_if = "Option::is_none")] + binary: Option, +} + +#[derive(Debug, Default, Serialize, Clone)] +pub struct DebugDataType { + #[serde(rename = "SequenceNumber", skip_serializing_if = "Option::is_none")] + sequence_number: Option, + #[serde(rename = "FlagsName", skip_serializing_if = "Option::is_none")] + flags_name: Option, + #[serde(rename = "LevelName", skip_serializing_if = "Option::is_none")] + level_name: Option, + #[serde(rename = "Component")] + component: String, + #[serde(rename = "SubComponent", skip_serializing_if = "Option::is_none")] + sub_component: Option, + #[serde(rename = "FileLine", skip_serializing_if = "Option::is_none")] + file_line: Option, + #[serde(rename = "Function", skip_serializing_if = "Option::is_none")] + function: Option, + #[serde(rename = "Message")] + message: String, +} + +#[derive(Debug, Default, Serialize, Clone)] +pub struct ProcessingErrorDataType { + #[serde(rename = "ErrorCode")] + error_code: u32, + #[serde(rename = "DataItemName")] + data_item_name: String, + #[serde(rename = "EventPayload")] + event_payload: String, +} + +pub type UserDataType = String; +pub type BinaryEventDataType = String; + +#[derive(Debug, Default, Serialize, Clone)] +pub enum DataType { + EventData(EventDataType), + UserData(UserDataType), + DebugData(DebugDataType), + ProcessingErrorData(ProcessingErrorDataType), + BinaryEventData(BinaryEventDataType), + #[default] + Unknown, +} + +impl DataType { + fn is_unknown(&self) -> bool { + matches!(self, DataType::Unknown) + } +} + +#[derive(Debug, Default, Serialize, Clone)] +pub struct Event { + #[serde(rename = "System")] + system: System, + #[serde(flatten, skip_serializing_if = "DataType::is_unknown")] + data: DataType, + #[serde(rename = "RenderingInfo")] + rendering_info: RenderingInfo, + #[serde(rename = "OpenWEC")] + additional: Additional, +} + +impl Event { + pub fn from_str( + addr: &str, + principal: &str, + time_received: &str, + subscription_uuid: &str, + subscription_version: &str, + subscription_name: &str, + subscription_uri: Option<&String>, + content: &str, + ) -> Result { + let doc = Document::parse(content).context("Failed to parse event XML")?; + let mut event = Event::default(); + event.additional = Additional { + addr: addr.to_owned(), + principal: principal.to_owned(), + time_received: time_received.to_owned(), + subscription: SubscriptionType { + uuid: subscription_uuid.to_owned(), + version: subscription_version.to_owned(), + name: subscription_name.to_owned(), + uri: subscription_uri.cloned(), + }, + }; + let root = doc.root_element(); + for node in root.children() { + if node.tag_name().name() == "System" { + event.system = System::from(&node).context("Parsing failure in System")? + } else if node.tag_name().name() == "EventData" { + event.data = parse_event_data(&node).context("Parsing failure in EventData")? + } else if node.tag_name().name() == "UserData" { + event.data = parse_user_data(&node).context("Parsing failure in UserData")? + } else if node.tag_name().name() == "BinaryEventData" { + event.data = DataType::BinaryEventData(node.text().unwrap_or_default().to_owned()); + } else if node.tag_name().name() == "DebugData" { + event.data = parse_debug_data(&node).context("Parsing failure in DebugData")? + } else if node.tag_name().name() == "ProcessingErrorData" { + event.data = parse_processing_error_data(&node) + .context("Parsing failure in ProcessingErrorData")? + } else if node.tag_name().name() == "RenderingInfo" { + event.rendering_info = + RenderingInfo::from(&node).context("Parsing failure in RenderingInfo")? + } else if node.tag_name().name() == "SubscriptionBookmarkEvent" { + // Nothing to do, this node is present in the first received event (EventID 111) + } else { + info!("Unknown node {} when parsing Event", node.tag_name().name()); + trace!("Event was: {}", content); + } + } + + Ok(event) + } +} + +fn parse_event_data(event_data_node: &Node) -> Result { + let mut named_data = HashMap::new(); + let mut unamed_data = Vec::new(); + let mut binary: Option = None; + for node in event_data_node.children() { + if node.tag_name().name() == "Data" { + let name = node.attribute("Name").map(str::to_string); + let value = node.text().unwrap_or_default().to_owned(); + + match name { + Some(n) => { + named_data.insert(n, value); + } + None if !value.is_empty() => unamed_data.push(value), + _ => (), + }; + } + if node.tag_name().name() == "Binary" { + binary = node.text().map(str::to_string); + } + } + Ok(DataType::EventData(EventDataType { + named_data, + unamed_data, + binary, + })) +} + +fn parse_debug_data(debug_data_node: &Node) -> Result { + let mut debug_data = DebugDataType::default(); + for node in debug_data_node.children() { + if node.tag_name().name() == "SequenceNumber" { + debug_data.sequence_number = node.text().and_then(|s| s.parse().ok()); + } else if node.tag_name().name() == "FlagsName" { + debug_data.flags_name = node.text().map(str::to_string); + } else if node.tag_name().name() == "LevelName" { + debug_data.level_name = node.text().map(str::to_string); + } else if node.tag_name().name() == "Component" { + debug_data.component = node.text().unwrap_or_default().to_owned(); + } else if node.tag_name().name() == "SubComponent" { + debug_data.sub_component = node.text().map(str::to_string); + } else if node.tag_name().name() == "FileLine" { + debug_data.file_line = node.text().map(str::to_string); + } else if node.tag_name().name() == "Function" { + debug_data.function = node.text().map(str::to_string); + } else if node.tag_name().name() == "Message" { + debug_data.message = node.text().unwrap_or_default().to_owned(); + } + } + Ok(DataType::DebugData(debug_data)) +} + +fn parse_processing_error_data(processing_error_data_node: &Node) -> Result { + let mut processing_error_data = ProcessingErrorDataType::default(); + for node in processing_error_data_node.children() { + if node.tag_name().name() == "ErrorCode" { + processing_error_data.error_code = node.text().unwrap_or_default().parse()?; + } else if node.tag_name().name() == "DataItemName" { + processing_error_data.data_item_name = node.text().unwrap_or_default().to_owned(); + } else if node.tag_name().name() == "EventPayload" { + processing_error_data.event_payload = node.text().unwrap_or_default().to_owned(); + } + } + Ok(DataType::ProcessingErrorData(processing_error_data)) +} + +fn parse_user_data(user_data_node: &Node) -> Result { + // We don't try to parse UserData node content as XML since its content + // is not specified. Instead, we retrieve its content as text. + let mut data = String::new(); + for node in user_data_node.children() { + data.push_str(node.document().input_text()[node.range()].as_ref()) + } + Ok(DataType::UserData(data)) +} + +#[derive(Debug, Default, Serialize, Clone)] +struct Additional { + #[serde(rename = "IpAddress")] + addr: String, + #[serde(rename = "TimeReceived")] + time_received: String, + #[serde(rename = "Principal")] + principal: String, + #[serde(rename = "Subscription")] + subscription: SubscriptionType, +} + +#[derive(Debug, Default, Serialize, Clone)] +struct SubscriptionType { + #[serde(rename = "Uuid")] + uuid: String, + #[serde(rename = "Version")] + version: String, + #[serde(rename = "Name")] + name: String, + #[serde(rename = "Uri", skip_serializing_if = "Option::is_none")] + uri: Option, +} + +#[derive(Debug, Default, Serialize, Clone)] +pub struct Provider { + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "Name")] + pub name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "Guid")] + pub guid: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "EventSourceName")] + pub event_source_name: Option, +} + +#[derive(Debug, Default, Serialize, Clone)] +pub struct Correlation { + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "ActivityID")] + pub activity_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "RelatedActivityID")] + pub related_activity_id: Option, +} + +#[derive(Debug, Default, Serialize, Clone)] +pub struct Execution { + #[serde(rename = "ProcessID")] + pub process_id: u32, + + #[serde(rename = "ThreadID")] + pub thread_id: u32, + + #[serde(rename = "ProcessorID")] + #[serde(skip_serializing_if = "Option::is_none")] + pub processor_id: Option, + + #[serde(rename = "SessionID")] + #[serde(skip_serializing_if = "Option::is_none")] + pub session_id: Option, + + #[serde(rename = "KernelTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub kernel_time: Option, + + #[serde(rename = "UserTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub user_time: Option, + + #[serde(rename = "ProcessorTime")] + #[serde(skip_serializing_if = "Option::is_none")] + pub processor_time: Option, +} + +#[derive(Debug, Default, Serialize, Clone)] +struct System { + #[serde(rename = "Provider")] + provider: Provider, + #[serde(rename = "EventID")] + event_id: u32, + #[serde(rename = "EventIDQualifiers", skip_serializing_if = "Option::is_none")] + event_id_qualifiers: Option, + #[serde(rename = "Version", skip_serializing_if = "Option::is_none")] + version: Option, + #[serde(rename = "Level", skip_serializing_if = "Option::is_none")] + level: Option, + #[serde(rename = "Task", skip_serializing_if = "Option::is_none")] + task: Option, + #[serde(rename = "Opcode", skip_serializing_if = "Option::is_none")] + opcode: Option, + #[serde(rename = "Keywords", skip_serializing_if = "Option::is_none")] + keywords: Option, + #[serde(rename = "TimeCreated", skip_serializing_if = "Option::is_none")] + time_created: Option, + #[serde(rename = "EventRecordID", skip_serializing_if = "Option::is_none")] + event_record_id: Option, + #[serde(rename = "Correlation", skip_serializing_if = "Option::is_none")] + correlation: Option, + #[serde(rename = "Execution", skip_serializing_if = "Option::is_none")] + execution: Option, + #[serde(rename = "Channel", skip_serializing_if = "Option::is_none")] + channel: Option, + #[serde(rename = "Computer")] + computer: String, + #[serde(rename = "Container", skip_serializing_if = "Option::is_none")] + container: Option, + #[serde(rename = "UserID", skip_serializing_if = "Option::is_none")] + user_id: Option, +} + +impl System { + fn from(system_node: &Node) -> Result { + let mut system = System::default(); + let mut computer_opt = None; + let mut event_id_opt = None; + for node in system_node.children() { + let tag = node.tag_name(); + if tag.name() == "Provider" { + system.provider = Provider { + name: node.attribute("Name").map(str::to_string), + guid: node.attribute("Guid").map(str::to_string), + event_source_name: node.attribute("EventSourceName").map(str::to_string), + }; + } else if tag.name() == "EventID" { + event_id_opt = node.text().and_then(|s| s.parse().ok()); + system.event_id_qualifiers = node + .attribute("Qualifiers") + .unwrap_or_default() + .parse() + .ok(); + } else if tag.name() == "Version" { + system.version = node.text().and_then(|s| s.parse().ok()); + } else if tag.name() == "Level" { + system.level = node.text().and_then(|s| s.parse().ok()); + } else if tag.name() == "Task" { + system.task = node.text().and_then(|s| s.parse().ok()); + } else if tag.name() == "Opcode" { + system.opcode = node.text().and_then(|s| s.parse().ok()); + } else if tag.name() == "Keywords" { + system.keywords = node.text().map(str::to_string); + } else if tag.name() == "TimeCreated" { + system.time_created = Some( + node.attribute("SystemTime") + .ok_or_else(|| { + anyhow!("SystemTime attribute of TimeCreated field is missing") + })? + .to_owned(), + ); + } else if tag.name() == "EventRecordID" { + system.event_record_id = node.text().and_then(|s| s.parse().ok()); + } else if tag.name() == "Correlation" { + system.correlation = Some(Correlation { + activity_id: node.attribute("ActivityID").map(str::to_string), + related_activity_id: node.attribute("RelatedActivityID").map(str::to_string), + }); + } else if tag.name() == "Execution" { + system.execution = Some(Execution { + process_id: node + .attribute("ProcessID") + .ok_or_else(|| anyhow!("ProcessID field is missing"))? + .parse()?, + thread_id: node + .attribute("ThreadID") + .ok_or_else(|| anyhow!("ThreadID is missing"))? + .parse()?, + processor_id: node.attribute("ProcessorID").and_then(|s| s.parse().ok()), + session_id: node.attribute("SessionID").and_then(|s| s.parse().ok()), + kernel_time: node.attribute("KernelTime").and_then(|s| s.parse().ok()), + user_time: node.attribute("UserTime").and_then(|s| s.parse().ok()), + processor_time: node.attribute("ProcessorTime").and_then(|s| s.parse().ok()), + }); + } else if tag.name() == "Channel" { + system.channel = node.text().map(str::to_string); + } else if tag.name() == "Computer" { + computer_opt = node.text().map(str::to_string); + } else if tag.name() == "Container" { + system.container = node.text().map(str::to_string); + } else if tag.name() == "Security" { + system.user_id = node.attribute("UserID").and_then(|s| s.parse().ok()); + } + } + + if let Some(computer) = computer_opt { + system.computer = computer; + } else { + bail!("Computer field is missing or invalid"); + } + + if let Some(event_id) = event_id_opt { + system.event_id = event_id; + } else { + bail!("EventID field is missing or invalid"); + } + + Ok(system) + } +} + +#[derive(Debug, Default, Serialize, Clone)] +struct RenderingInfo { + #[serde(rename = "Message", skip_serializing_if = "Option::is_none")] + message: Option, + #[serde(rename = "Level", skip_serializing_if = "Option::is_none")] + level: Option, + #[serde(rename = "Task", skip_serializing_if = "Option::is_none")] + task: Option, + #[serde(rename = "Opcode", skip_serializing_if = "Option::is_none")] + opcode: Option, + #[serde(rename = "Channel", skip_serializing_if = "Option::is_none")] + channel: Option, + #[serde(rename = "Provider", skip_serializing_if = "Option::is_none")] + // Microsoft schema states that this field should be called "Publisher" + // but this is not what has been observed in practice + provider: Option, + #[serde(rename = "Keywords", skip_serializing_if = "Option::is_none")] + keywords: Option>, + #[serde(rename = "Culture")] + culture: String, +} + +impl RenderingInfo { + fn from(rendering_info_node: &Node) -> Result { + let mut rendering_info = RenderingInfo::default(); + + rendering_info.culture = rendering_info_node + .attribute("Culture") + .unwrap_or_default() + .to_owned(); + for node in rendering_info_node.children() { + let tag = node.tag_name(); + if tag.name() == "Message" { + rendering_info.message = node.text().map(str::to_string); + } else if tag.name() == "Level" { + rendering_info.level = node.text().map(str::to_string); + } else if tag.name() == "Task" { + rendering_info.task = node.text().map(str::to_string); + } else if tag.name() == "Opcode" { + rendering_info.opcode = node.text().map(str::to_string); + } else if tag.name() == "Channel" { + rendering_info.channel = node.text().map(str::to_string); + } else if tag.name() == "Provider" { + rendering_info.provider = node.text().map(str::to_string); + } else if tag.name() == "Keywords" { + let mut keywords = Vec::new(); + for keyword_node in node.children() { + if keyword_node.tag_name().name() == "Keyword" && keyword_node.text().is_some() + { + keywords.push(keyword_node.text().unwrap_or_default().to_owned()); + } + } + rendering_info.keywords = Some(keywords); + } + } + + Ok(rendering_info) + } +} + +#[derive(Debug, Clone)] +pub struct EventMetadata { + addr: SocketAddr, + principal: String, + node_name: Option, +} + +impl EventMetadata { + pub fn new(addr: &SocketAddr, principal: &str, node_name: Option) -> Self { + EventMetadata { + addr: *addr, + principal: principal.to_owned(), + node_name, + } + } + + /// Get a reference to the event metadata's addr. + pub fn addr(&self) -> SocketAddr { + self.addr + } + + pub fn principal(&self) -> &str { + self.principal.as_ref() + } + + pub fn node_name(&self) -> Option<&String> { + self.node_name.as_ref() + } +} + +#[cfg(test)] +mod tests { + use serde_json::Value; + + use super::*; + + const EVENT_142: &str = r#" + + + + 142 + 0 + 2 + 10 + 2 + 0x4000000000000002 + + 149161 + + + Microsoft-Windows-WinRM/Operational + win10.windomain.local + + + + Enumeration + 2150858770 + + + WSMan operation Enumeration failed, error code 2150858770 + Error + Response handling + Stop + Microsoft-Windows-WinRM/Operational + Microsoft-Windows-Windows Remote Management + + Client + + + "#; + + #[test] + fn test_142_system_parsing() { + let doc = Document::parse(EVENT_142).expect("Failed to parse Event"); + let root = doc.root_element(); + for node in root.children() { + if node.tag_name().name() == "System" { + let system = System::from(&node).expect("Failed to parse System node"); + assert_eq!(system.provider.name.unwrap(), "Microsoft-Windows-WinRM"); + assert_eq!( + system.provider.guid.unwrap(), + "{a7975c8f-ac13-49f1-87da-5a984a4ab417}" + ); + assert_eq!(system.event_id, 142); + assert_eq!(system.version.unwrap(), 0); + assert_eq!(system.level.unwrap(), 2); + assert_eq!(system.task.unwrap(), 10); + assert_eq!(system.opcode.unwrap(), 2); + assert_eq!(system.keywords.unwrap(), "0x4000000000000002"); + assert_eq!(system.time_created.unwrap(), "2022-09-22T07:49:32.0356778Z"); + assert_eq!(system.event_record_id.unwrap(), 149161); + assert_eq!( + system.correlation.unwrap().activity_id.unwrap(), + "{8cb1229f-ce57-0000-8437-b18c57ced801}" + ); + assert_eq!(system.execution.as_ref().unwrap().process_id, 352); + assert_eq!(system.execution.as_ref().unwrap().thread_id, 2468); + assert_eq!( + system.channel.unwrap(), + "Microsoft-Windows-WinRM/Operational" + ); + assert_eq!(system.computer, "win10.windomain.local"); + assert_eq!(system.user_id.unwrap(), "S-1-5-18"); + } + } + } + + #[test] + fn test_142_event_data_parsing() { + let doc = Document::parse(EVENT_142).expect("Failed to parse Event"); + let root = doc.root_element(); + for node in root.children() { + if node.tag_name().name() == "EventData" { + let data = parse_event_data(&node).expect("Failed to parse EventData node"); + match data { + DataType::EventData(event_data) => { + assert_eq!( + event_data.named_data.get("operationName"), + Some(&"Enumeration".to_string()) + ); + assert_eq!( + event_data.named_data.get("errorCode"), + Some(&"2150858770".to_string()) + ); + } + _ => panic!("Wrong EventData node"), + } + } + } + } + + #[test] + fn test_142_rendering_info_parsing() { + let doc = Document::parse(EVENT_142).expect("Failed to parse Event"); + let root = doc.root_element(); + for node in root.children() { + if node.tag_name().name() == "RenderingInfo" { + let rendering_info = + RenderingInfo::from(&node).expect("Failed to parse RenderingInfo node"); + assert_eq!(rendering_info.culture, "en-US"); + assert_eq!( + rendering_info.message.unwrap(), + "WSMan operation Enumeration failed, error code 2150858770" + ); + assert_eq!(rendering_info.level.unwrap(), "Error"); + assert_eq!(rendering_info.task.unwrap(), "Response handling"); + assert_eq!(rendering_info.opcode.unwrap(), "Stop"); + assert_eq!( + rendering_info.channel.unwrap(), + "Microsoft-Windows-WinRM/Operational" + ); + assert_eq!( + rendering_info.provider.unwrap(), + "Microsoft-Windows-Windows Remote Management" + ); + assert_eq!(rendering_info.keywords.unwrap(), ["Client"]); + } + } + } + + const EVENT_4624: &str = r#" + + + + 4624 + 2 + 0 + 12544 + 0 + 0x8020000000000000 + + 72207 + + + Security + win10.windomain.local + + + + S-1-5-18 + WIN10$ + WINDOMAIN + 0x3e7 + S-1-5-18 + SYSTEM + NT AUTHORITY + 0x3e7 + 5 + Advapi + Negotiate + - + {00000000-0000-0000-0000-000000000000} + - + - + 0 + 0x244 + C:\\Windows\\System32\\services.exe + - + - + %%1833 + - + - + - + %%1843 + 0x0 + %%1842 + + + An account was successfully logged on.\r\n\r\nSubject:\r\n\tSecurity ID:\t\tS-1-5-18\r\n\tAccount Name:\t\tWIN10$\r\n\tAccount Domain:\t\tWINDOMAIN\r\n\tLogon ID:\t\t0x3E7\r\n\r\nLogon Information:\r\n\tLogon Type:\t\t5\r\n\tRestricted Admin Mode:\t-\r\n\tVirtual Account:\t\tNo\r\n\tElevated Token:\t\tYes\r\n\r\nImpersonation Level:\t\tImpersonation\r\n\r\nNew Logon:\r\n\tSecurity ID:\t\tS-1-5-18\r\n\tAccount Name:\t\tSYSTEM\r\n\tAccount Domain:\t\tNT AUTHORITY\r\n\tLogon ID:\t\t0x3E7\r\n\tLinked Logon ID:\t\t0x0\r\n\tNetwork Account Name:\t-\r\n\tNetwork Account Domain:\t-\r\n\tLogon GUID:\t\t{00000000-0000-0000-0000-000000000000}\r\n\r\nProcess Information:\r\n\tProcess ID:\t\t0x244\r\n\tProcess Name:\t\tC:\\Windows\\System32\\services.exe\r\n\r\nNetwork Information:\r\n\tWorkstation Name:\t-\r\n\tSource Network Address:\t-\r\n\tSource Port:\t\t-\r\n\r\nDetailed Authentication Information:\r\n\tLogon Process:\t\tAdvapi \r\n\tAuthentication Package:\tNegotiate\r\n\tTransited Services:\t-\r\n\tPackage Name (NTLM only):\t-\r\n\tKey Length:\t\t0\r\n\r\nThis event is generated when a logon session is created. It is generated on the computer that was accessed.\r\n\r\nThe subject fields indicate the account on the local system which requested the logon. This is most commonly a service such as the Server service, or a local process such as Winlogon.exe or Services.exe.\r\n\r\nThe logon type field indicates the kind of logon that occurred. The most common types are 2 (interactive) and 3 (network).\r\n\r\nThe New Logon fields indicate the account for whom the new logon was created, i.e. the account that was logged on.\r\n\r\nThe network fields indicate where a remote logon request originated. Workstation name is not always available and may be left blank in some cases.\r\n\r\nThe impersonation level field indicates the extent to which a process in the logon session can impersonate.\r\n\r\nThe authentication information fields provide detailed information about this specific logon request.\r\n\t- Logon GUID is a unique identifier that can be used to correlate this event with a KDC event.\r\n\t- Transited services indicate which intermediate services have participated in this logon request.\r\n\t- Package name indicates which sub-protocol was used among the NTLM protocols.\r\n\t- Key length indicates the length of the generated session key. This will be 0 if no session key was requested. + Information + Logon + Info + Security + Microsoft Windows security auditing. + + Audit Success + + + "#; + #[test] + fn test_4624_system_parsing() { + let doc = Document::parse(EVENT_4624).expect("Failed to parse Event"); + let root = doc.root_element(); + for node in root.children() { + if node.tag_name().name() == "System" { + let system = System::from(&node).expect("Failed to parse System node"); + assert_eq!( + system.provider.name.unwrap(), + "Microsoft-Windows-Security-Auditing" + ); + assert_eq!( + system.provider.guid.unwrap(), + "{54849625-5478-4994-a5ba-3e3b0328c30d}" + ); + assert_eq!(system.event_id, 4624); + assert_eq!(system.version.unwrap(), 2); + assert_eq!(system.level.unwrap(), 0); + assert_eq!(system.task.unwrap(), 12544); + assert_eq!(system.opcode.unwrap(), 0); + assert_eq!(system.keywords.unwrap(), "0x8020000000000000"); + assert_eq!(system.time_created.unwrap(), "2022-09-23T11:53:47.9077543Z"); + assert_eq!(system.event_record_id.unwrap(), 72207); + assert_eq!( + system.correlation.unwrap().activity_id.unwrap(), + "{d88ee832-cf42-0000-26e9-8ed842cfd801}" + ); + assert_eq!(system.execution.as_ref().unwrap().process_id, 588); + assert_eq!(system.execution.as_ref().unwrap().thread_id, 652); + assert_eq!(system.channel.unwrap(), "Security"); + assert_eq!(system.computer, "win10.windomain.local"); + assert!(system.user_id.is_none()); + } + } + } + + #[test] + fn test_4624_event_data_parsing() { + let doc = Document::parse(EVENT_4624).expect("Failed to parse Event"); + let root = doc.root_element(); + for node in root.children() { + if node.tag_name().name() == "EventData" { + let data = parse_event_data(&node).expect("Failed to parse EventData node"); + match data { + DataType::EventData(event) => { + assert_eq!(event.named_data.get("SubjectUserSid").unwrap(), "S-1-5-18"); + assert_eq!(event.named_data.get("SubjectUserName").unwrap(), "WIN10$"); + assert_eq!( + event.named_data.get("SubjectDomainName").unwrap(), + "WINDOMAIN" + ); + assert_eq!(event.named_data.get("SubjectLogonId").unwrap(), "0x3e7"); + assert_eq!(event.named_data.get("TargetUserSid").unwrap(), "S-1-5-18"); + assert_eq!(event.named_data.get("TargetUserName").unwrap(), "SYSTEM"); + assert_eq!( + event.named_data.get("TargetDomainName").unwrap(), + "NT AUTHORITY" + ); + assert_eq!(event.named_data.get("TargetLogonId").unwrap(), "0x3e7"); + assert_eq!(event.named_data.get("LogonType").unwrap(), "5"); + assert_eq!( + event.named_data.get("LogonProcessName").unwrap(), + "Advapi " + ); + assert_eq!( + event.named_data.get("AuthenticationPackageName").unwrap(), + "Negotiate" + ); + assert_eq!(event.named_data.get("WorkstationName").unwrap(), "-"); + assert_eq!( + event.named_data.get("LogonGuid").unwrap(), + "{00000000-0000-0000-0000-000000000000}" + ); + assert_eq!(event.named_data.get("TransmittedServices").unwrap(), "-"); + assert_eq!(event.named_data.get("LmPackageName").unwrap(), "-"); + assert_eq!(event.named_data.get("KeyLength").unwrap(), "0"); + assert_eq!(event.named_data.get("ProcessId").unwrap(), "0x244"); + assert_eq!( + event.named_data.get("ProcessName").unwrap(), + r#"C:\\Windows\\System32\\services.exe"# + ); + assert_eq!(event.named_data.get("IpAddress").unwrap(), "-"); + assert_eq!(event.named_data.get("IpPort").unwrap(), "-"); + assert_eq!( + event.named_data.get("ImpersonationLevel").unwrap(), + "%%1833" + ); + assert_eq!(event.named_data.get("RestrictedAdminMode").unwrap(), "-"); + assert_eq!(event.named_data.get("TargetOutboundUserName").unwrap(), "-"); + assert_eq!( + event.named_data.get("TargetOutboundDomainName").unwrap(), + "-" + ); + assert_eq!(event.named_data.get("VirtualAccount").unwrap(), "%%1843"); + assert_eq!(event.named_data.get("TargetLinkedLogonId").unwrap(), "0x0"); + assert_eq!(event.named_data.get("ElevatedToken").unwrap(), "%%1842"); + } + _ => panic!("Wrong EventData node"), + } + } + } + } + + #[test] + fn test_4624_rendering_info_parsing() { + let doc = Document::parse(EVENT_4624).expect("Failed to parse Event"); + let root = doc.root_element(); + for node in root.children() { + if node.tag_name().name() == "RenderingInfo" { + let rendering_info = + RenderingInfo::from(&node).expect("Failed to parse RenderingInfo node"); + assert_eq!(rendering_info.culture, "en-US"); + assert_eq!( + rendering_info.message.unwrap(), + r#"An account was successfully logged on.\r\n\r\nSubject:\r\n\tSecurity ID:\t\tS-1-5-18\r\n\tAccount Name:\t\tWIN10$\r\n\tAccount Domain:\t\tWINDOMAIN\r\n\tLogon ID:\t\t0x3E7\r\n\r\nLogon Information:\r\n\tLogon Type:\t\t5\r\n\tRestricted Admin Mode:\t-\r\n\tVirtual Account:\t\tNo\r\n\tElevated Token:\t\tYes\r\n\r\nImpersonation Level:\t\tImpersonation\r\n\r\nNew Logon:\r\n\tSecurity ID:\t\tS-1-5-18\r\n\tAccount Name:\t\tSYSTEM\r\n\tAccount Domain:\t\tNT AUTHORITY\r\n\tLogon ID:\t\t0x3E7\r\n\tLinked Logon ID:\t\t0x0\r\n\tNetwork Account Name:\t-\r\n\tNetwork Account Domain:\t-\r\n\tLogon GUID:\t\t{00000000-0000-0000-0000-000000000000}\r\n\r\nProcess Information:\r\n\tProcess ID:\t\t0x244\r\n\tProcess Name:\t\tC:\\Windows\\System32\\services.exe\r\n\r\nNetwork Information:\r\n\tWorkstation Name:\t-\r\n\tSource Network Address:\t-\r\n\tSource Port:\t\t-\r\n\r\nDetailed Authentication Information:\r\n\tLogon Process:\t\tAdvapi \r\n\tAuthentication Package:\tNegotiate\r\n\tTransited Services:\t-\r\n\tPackage Name (NTLM only):\t-\r\n\tKey Length:\t\t0\r\n\r\nThis event is generated when a logon session is created. It is generated on the computer that was accessed.\r\n\r\nThe subject fields indicate the account on the local system which requested the logon. This is most commonly a service such as the Server service, or a local process such as Winlogon.exe or Services.exe.\r\n\r\nThe logon type field indicates the kind of logon that occurred. The most common types are 2 (interactive) and 3 (network).\r\n\r\nThe New Logon fields indicate the account for whom the new logon was created, i.e. the account that was logged on.\r\n\r\nThe network fields indicate where a remote logon request originated. Workstation name is not always available and may be left blank in some cases.\r\n\r\nThe impersonation level field indicates the extent to which a process in the logon session can impersonate.\r\n\r\nThe authentication information fields provide detailed information about this specific logon request.\r\n\t- Logon GUID is a unique identifier that can be used to correlate this event with a KDC event.\r\n\t- Transited services indicate which intermediate services have participated in this logon request.\r\n\t- Package name indicates which sub-protocol was used among the NTLM protocols.\r\n\t- Key length indicates the length of the generated session key. This will be 0 if no session key was requested."# + ); + assert_eq!(rendering_info.level.unwrap(), "Information"); + assert_eq!(rendering_info.task.unwrap(), "Logon"); + assert_eq!(rendering_info.opcode.unwrap(), "Info"); + assert_eq!(rendering_info.channel.unwrap(), "Security"); + assert_eq!( + rendering_info.provider.unwrap(), + "Microsoft Windows security auditing." + ); + assert_eq!(rendering_info.keywords.unwrap(), ["Audit Success"]); + } + } + } + + const EVENT_4689: &str = r#"4689001331300x802000000000000094071Securitywin10.windomain.localS-1-5-21-2892044109-3067629140-1698523921-1000vagrantWIN100x391d20x00x10fcC:\\Windows\\System32\\RuntimeBroker.exeA process has exited.\r\n\r\nSubject:\r\n\tSecurity ID:\t\tS-1-5-21-2892044109-3067629140-1698523921-1000\r\n\tAccount Name:\t\tvagrant\r\n\tAccount Domain:\t\tWIN10\r\n\tLogon ID:\t\t0x391D2\r\n\r\nProcess Information:\r\n\tProcess ID:\t0x10fc\r\n\tProcess Name:\tC:\\Windows\\System32\\RuntimeBroker.exe\r\n\tExit Status:\t0x0InformationProcess TerminationInfoSecurityMicrosoft Windows security auditing.Audit Success"#; + + #[test] + fn test_4689_parsing() { + Event::from_str( + "192.168.0.1", + "win10.windomain.local", + "2022-11-07T17:08:27.169805+01:00", + "8B18D83D-2964-4F35-AC3B-6F4E6FFA727B", + "AD0D118F-31EF-4111-A0CA-D87249747278", + "Test", + Some(&"/this/is/a/test".to_string()), + EVENT_4689, + ) + .expect("Failed to parse Event"); + } + + const EVENT_4688: &str = r#"4688201331200x8020000000000000114689Securitywin10.windomain.localS-1-5-18WIN10$WINDOMAIN0x3e70x3a8C:\Program Files (x86)\Microsoft\EdgeUpdate\MicrosoftEdgeUpdate.exe%%19360x240S-1-0-0--0x0C:\Windows\System32\services.exeS-1-16-16384A new process has been created. + +Creator Subject: + Security ID: S-1-5-18 + Account Name: WIN10$ + Account Domain: WINDOMAIN + Logon ID: 0x3E7 + +Target Subject: + Security ID: S-1-0-0 + Account Name: - + Account Domain: - + Logon ID: 0x0 + +Process Information: + New Process ID: 0x3a8 + New Process Name: C:\Program Files (x86)\Microsoft\EdgeUpdate\MicrosoftEdgeUpdate.exe + Token Elevation Type: %%1936 + Mandatory Label: S-1-16-16384 + Creator Process ID: 0x240 + Creator Process Name: C:\Windows\System32\services.exe + Process Command Line: + +Token Elevation Type indicates the type of token that was assigned to the new process in accordance with User Account Control policy. + +Type 1 is a full token with no privileges removed or groups disabled. A full token is only used if User Account Control is disabled or if the user is the built-in Administrator account or a service account. + +Type 2 is an elevated token with no privileges removed or groups disabled. An elevated token is used when User Account Control is enabled and the user chooses to start the program using Run as administrator. An elevated token is also used when an application is configured to always require administrative privilege or to always require maximum privilege, and the user is a member of the Administrators group. + +Type 3 is a limited token with administrative privileges removed and administrative groups disabled. The limited token is used when User Account Control is enabled, the application does not require administrative privilege, and the user does not choose to start the program using Run as administrator.InformationProcess CreationInfoSecurityMicrosoft Windows security auditing.Audit Success"#; + const EVENT_4688_JSON: &str = r#"{"System":{"Provider":{"Name":"Microsoft-Windows-Security-Auditing","Guid":"{54849625-5478-4994-a5ba-3e3b0328c30d}"},"EventID":4688,"Version":2,"Level":0,"Task":13312,"Opcode":0,"Keywords":"0x8020000000000000","TimeCreated":"2022-12-14T16:06:51.0643605Z","EventRecordID":114689,"Correlation":{},"Execution":{"ProcessID":4,"ThreadID":196},"Channel":"Security","Computer":"win10.windomain.local"},"EventData":{"SubjectLogonId":"0x3e7","SubjectUserName":"WIN10$","SubjectDomainName":"WINDOMAIN","ParentProcessName":"C:\\Windows\\System32\\services.exe","MandatoryLabel":"S-1-16-16384","SubjectUserSid":"S-1-5-18","NewProcessName":"C:\\Program Files (x86)\\Microsoft\\EdgeUpdate\\MicrosoftEdgeUpdate.exe","TokenElevationType":"%%1936","TargetUserSid":"S-1-0-0","TargetDomainName":"-","CommandLine":"","TargetUserName":"-","NewProcessId":"0x3a8","TargetLogonId":"0x0","ProcessId":"0x240"},"RenderingInfo":{"Message":"A new process has been created.\n\nCreator Subject:\n\tSecurity ID:\t\tS-1-5-18\n\tAccount Name:\t\tWIN10$\n\tAccount Domain:\t\tWINDOMAIN\n\tLogon ID:\t\t0x3E7\n\nTarget Subject:\n\tSecurity ID:\t\tS-1-0-0\n\tAccount Name:\t\t-\n\tAccount Domain:\t\t-\n\tLogon ID:\t\t0x0\n\nProcess Information:\n\tNew Process ID:\t\t0x3a8\n\tNew Process Name:\tC:\\Program Files (x86)\\Microsoft\\EdgeUpdate\\MicrosoftEdgeUpdate.exe\n\tToken Elevation Type:\t%%1936\n\tMandatory Label:\t\tS-1-16-16384\n\tCreator Process ID:\t0x240\n\tCreator Process Name:\tC:\\Windows\\System32\\services.exe\n\tProcess Command Line:\t\n\nToken Elevation Type indicates the type of token that was assigned to the new process in accordance with User Account Control policy.\n\nType 1 is a full token with no privileges removed or groups disabled. A full token is only used if User Account Control is disabled or if the user is the built-in Administrator account or a service account.\n\nType 2 is an elevated token with no privileges removed or groups disabled. An elevated token is used when User Account Control is enabled and the user chooses to start the program using Run as administrator. An elevated token is also used when an application is configured to always require administrative privilege or to always require maximum privilege, and the user is a member of the Administrators group.\n\nType 3 is a limited token with administrative privileges removed and administrative groups disabled. The limited token is used when User Account Control is enabled, the application does not require administrative privilege, and the user does not choose to start the program using Run as administrator.","Level":"Information","Task":"Process Creation","Opcode":"Info","Channel":"Security","Provider":"Microsoft Windows security auditing.","Keywords":["Audit Success"],"Culture":"en-US"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T17:07:03.331+01:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"AD0D118F-31EF-4111-A0CA-D87249747278","Name":"Test","Uri":"/this/is/a/test"}}}"#; + + #[test] + fn test_serialize_4688_event_data() { + let event = Event::from_str( + "192.168.58.100", + "WIN10$@WINDOMAIN.LOCAL", + "2022-12-14T17:07:03.331+01:00", + "8B18D83D-2964-4F35-AC3B-6F4E6FFA727B", + "AD0D118F-31EF-4111-A0CA-D87249747278", + "Test", + Some(&"/this/is/a/test".to_string()), + EVENT_4688, + ) + .expect("Failed to parse Event"); + + let event_json = serde_json::to_string(&event).unwrap(); + + let event_json_value: Value = serde_json::from_str(&event_json).unwrap(); + let expected_value: Value = serde_json::from_str(EVENT_4688_JSON).unwrap(); + + println!("{}", event_json_value); + println!("{}", expected_value); + assert_eq!(event_json_value, expected_value); + } + + const EVENT_1003: &str = r#"100304000x800000000000007603Applicationwin10.windomain.local55c92734-d682-4d71-983e-d6ec3f16059f +1: 3f4c0546-36c6-46a8-a37f-be13cdd0cf25, 1, 1 [(0 [0xC004E003, 0, 0], [( 9 0xC004FC07 90 0)( 1 0x00000000)(?)( 2 0x00000000 0 0 msft:rm/algorithm/hwid/4.0 0x00000000 0)(?)( 9 0xC004FC07 90 0)( 10 0x00000000 msft:rm/algorithm/flags/1.0)(?)])(1 )(2 )(3 [0x00000000, 0, 0], [( 6 0xC004F009 0 0)( 1 0x00000000)( 6 0xC004F009 0 0)(?)(?)(?)( 10 0x00000000 msft:rm/algorithm/flags/1.0)( 11 0x00000000 0xC004FC07)])] + +The Software Protection service has completed licensing status check. +Application Id=55c92734-d682-4d71-983e-d6ec3f16059f +Licensing Status= +1: 3f4c0546-36c6-46a8-a37f-be13cdd0cf25, 1, 1 [(0 [0xC004E003, 0, 0], [( 9 0xC004FC07 90 0)( 1 0x00000000)(?)( 2 0x00000000 0 0 msft:rm/algorithm/hwid/4.0 0x00000000 0)(?)( 9 0xC004FC07 90 0)( 10 0x00000000 msft:rm/algorithm/flags/1.0)(?)])(1 )(2 )(3 [0x00000000, 0, 0], [( 6 0xC004F009 0 0)( 1 0x00000000)( 6 0xC004F009 0 0)(?)(?)(?)( 10 0x00000000 msft:rm/algorithm/flags/1.0)( 11 0x00000000 0xC004FC07)])] + +InformationMicrosoft-Windows-Security-SPPClassic + "#; + const EVENT_1003_JSON: &str = r#"{"System":{"Provider":{"Name":"Microsoft-Windows-Security-SPP","Guid":"{E23B33B0-C8C9-472C-A5F9-F2BDFEA0F156}","EventSourceName":"Software Protection Platform Service"},"EventID":1003,"EventIDQualifiers":16384,"Version":0,"Level":4,"Task":0,"Opcode":0,"Keywords":"0x80000000000000","TimeCreated":"2022-12-14T16:05:59.7074374Z","EventRecordID":7603,"Correlation":{},"Execution":{"ProcessID":0,"ThreadID":0},"Channel":"Application","Computer":"win10.windomain.local"},"EventData":{"Data":["55c92734-d682-4d71-983e-d6ec3f16059f","\n1: 3f4c0546-36c6-46a8-a37f-be13cdd0cf25, 1, 1 [(0 [0xC004E003, 0, 0], [( 9 0xC004FC07 90 0)( 1 0x00000000)(?)( 2 0x00000000 0 0 msft:rm/algorithm/hwid/4.0 0x00000000 0)(?)( 9 0xC004FC07 90 0)( 10 0x00000000 msft:rm/algorithm/flags/1.0)(?)])(1 )(2 )(3 [0x00000000, 0, 0], [( 6 0xC004F009 0 0)( 1 0x00000000)( 6 0xC004F009 0 0)(?)(?)(?)( 10 0x00000000 msft:rm/algorithm/flags/1.0)( 11 0x00000000 0xC004FC07)])]\n\n"]},"RenderingInfo":{"Message":"The Software Protection service has completed licensing status check.\nApplication Id=55c92734-d682-4d71-983e-d6ec3f16059f\nLicensing Status=\n1: 3f4c0546-36c6-46a8-a37f-be13cdd0cf25, 1, 1 [(0 [0xC004E003, 0, 0], [( 9 0xC004FC07 90 0)( 1 0x00000000)(?)( 2 0x00000000 0 0 msft:rm/algorithm/hwid/4.0 0x00000000 0)(?)( 9 0xC004FC07 90 0)( 10 0x00000000 msft:rm/algorithm/flags/1.0)(?)])(1 )(2 )(3 [0x00000000, 0, 0], [( 6 0xC004F009 0 0)( 1 0x00000000)( 6 0xC004F009 0 0)(?)(?)(?)( 10 0x00000000 msft:rm/algorithm/flags/1.0)( 11 0x00000000 0xC004FC07)])]\n\n","Level":"Information","Provider":"Microsoft-Windows-Security-SPP","Keywords":["Classic"],"Culture":"en-US"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T17:07:03.324+01:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"AD0D118F-31EF-4111-A0CA-D87249747278","Name":"Test"}}}"#; + + #[test] + fn test_serialize_1003_event_data_unamed() { + let event = Event::from_str( + "192.168.58.100", + "WIN10$@WINDOMAIN.LOCAL", + "2022-12-14T17:07:03.324+01:00", + "8B18D83D-2964-4F35-AC3B-6F4E6FFA727B", + "AD0D118F-31EF-4111-A0CA-D87249747278", + "Test", + None, + EVENT_1003, + ) + .expect("Failed to parse Event"); + + let event_json = serde_json::to_string(&event).unwrap(); + + let event_json_value: Value = serde_json::from_str(&event_json).unwrap(); + let expected_value: Value = serde_json::from_str(EVENT_1003_JSON).unwrap(); + + println!("{}", event_json_value); + println!("{}", expected_value); + assert_eq!(event_json_value, expected_value); + } + + const EVENT_5719: &str = r#"571902000x800000000000009466Systemwin10.windomain.localWINDOMAIN%%13115E0000C0This computer was not able to set up a secure session with a domain controller in domain WINDOMAIN due to the following: +We can't sign you in with this credential because your domain isn't available. Make sure your device is connected to your organization's network and try again. If you previously signed in on this device with another credential, you can sign in with that credential. +This may lead to authentication problems. Make sure that this computer is connected to the network. If the problem persists, please contact your domain administrator. + +ADDITIONAL INFO +If this computer is a domain controller for the specified domain, it sets up the secure session to the primary domain controller emulator in the specified domain. Otherwise, this computer sets up the secure session to any domain controller in the specified domain.ErrorInfoClassic"#; + const EVENT_5719_JSON: &str = r#"{"System":{"Provider":{"Name":"NETLOGON"},"EventID":5719,"EventIDQualifiers":0,"Version":0,"Level":2,"Task":0,"Opcode":0,"Keywords":"0x80000000000000","TimeCreated":"2022-12-14T16:04:59.0817047Z","EventRecordID":9466,"Correlation":{},"Execution":{"ProcessID":0,"ThreadID":0},"Channel":"System","Computer":"win10.windomain.local"},"EventData":{"Data":["WINDOMAIN","%%1311"],"Binary":"5E0000C0"},"RenderingInfo":{"Message":"This computer was not able to set up a secure session with a domain controller in domain WINDOMAIN due to the following: \nWe can't sign you in with this credential because your domain isn't available. Make sure your device is connected to your organization's network and try again. If you previously signed in on this device with another credential, you can sign in with that credential. \nThis may lead to authentication problems. Make sure that this computer is connected to the network. If the problem persists, please contact your domain administrator. \n\nADDITIONAL INFO \nIf this computer is a domain controller for the specified domain, it sets up the secure session to the primary domain controller emulator in the specified domain. Otherwise, this computer sets up the secure session to any domain controller in the specified domain.","Level":"Error","Opcode":"Info","Keywords":["Classic"],"Culture":"en-US"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T17:07:02.919+01:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"AD0D118F-31EF-4111-A0CA-D87249747278","Name":"Test","Uri":"/this/is/a/test"}}}"#; + + #[test] + fn test_serialize_5719_event_data_binary() { + let event = Event::from_str( + "192.168.58.100", + "WIN10$@WINDOMAIN.LOCAL", + "2022-12-14T17:07:02.919+01:00", + "8B18D83D-2964-4F35-AC3B-6F4E6FFA727B", + "AD0D118F-31EF-4111-A0CA-D87249747278", + "Test", + Some(&"/this/is/a/test".to_string()), + EVENT_5719, + ) + .expect("Failed to parse Event"); + + let event_json = serde_json::to_string(&event).unwrap(); + + let event_json_value: Value = serde_json::from_str(&event_json).unwrap(); + let expected_value: Value = serde_json::from_str(EVENT_5719_JSON).unwrap(); + + println!("{}", event_json_value); + println!("{}", expected_value); + assert_eq!(event_json_value, expected_value); + } + + const EVENT_6013: &str = r#"601304000x800000000000009427Systemwin10.windomain.local6600 Coordinated Universal Time31002E003100000030000000570069006E0064006F0077007300200031003000200045006E007400650072007000720069007300650020004500760061006C0075006100740069006F006E000000310030002E0030002E003100390030003400330020004200750069006C0064002000310039003000340033002000200000004D0075006C0074006900700072006F0063006500730073006F007200200046007200650065000000310039003000340031002E00760062005F00720065006C0065006100730065002E003100390031003200300036002D00310034003000360000003600320031003400640066003100630000004E006F007400200041007600610069006C00610062006C00650000004E006F007400200041007600610069006C00610062006C00650000003900000031000000320030003400380000003400300039000000770069006E00310030002E00770069006E0064006F006D00610069006E002E006C006F00630061006C0000000000The system uptime is 6 seconds.InformationClassic"#; + const EVENT_6013_JSON: &str = r#"{"System":{"Provider":{"Name":"EventLog"},"EventID":6013,"EventIDQualifiers":32768,"Version":0,"Level":4,"Task":0,"Opcode":0,"Keywords":"0x80000000000000","TimeCreated":"2022-12-14T16:04:43.7965565Z","EventRecordID":9427,"Correlation":{},"Execution":{"ProcessID":0,"ThreadID":0},"Channel":"System","Computer":"win10.windomain.local"},"EventData":{"Data":["6","60","0 Coordinated Universal Time"],"Binary":"31002E003100000030000000570069006E0064006F0077007300200031003000200045006E007400650072007000720069007300650020004500760061006C0075006100740069006F006E000000310030002E0030002E003100390030003400330020004200750069006C0064002000310039003000340033002000200000004D0075006C0074006900700072006F0063006500730073006F007200200046007200650065000000310039003000340031002E00760062005F00720065006C0065006100730065002E003100390031003200300036002D00310034003000360000003600320031003400640066003100630000004E006F007400200041007600610069006C00610062006C00650000004E006F007400200041007600610069006C00610062006C00650000003900000031000000320030003400380000003400300039000000770069006E00310030002E00770069006E0064006F006D00610069006E002E006C006F00630061006C0000000000"},"RenderingInfo":{"Message":"The system uptime is 6 seconds.","Level":"Information","Keywords":["Classic"],"Culture":"en-US"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T17:07:02.524+01:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"AD0D118F-31EF-4111-A0CA-D87249747278","Name":"Test","Uri":"/this/is/a/test"}}}"#; + + #[test] + fn test_serialize_6013_event_data_unamed_empty() { + let event = Event::from_str( + "192.168.58.100", + "WIN10$@WINDOMAIN.LOCAL", + "2022-12-14T17:07:02.524+01:00", + "8B18D83D-2964-4F35-AC3B-6F4E6FFA727B", + "AD0D118F-31EF-4111-A0CA-D87249747278", + "Test", + Some(&"/this/is/a/test".to_string()), + EVENT_6013, + ) + .expect("Failed to parse Event"); + + let event_json = serde_json::to_string(&event).unwrap(); + + let event_json_value: Value = serde_json::from_str(&event_json).unwrap(); + let expected_value: Value = serde_json::from_str(EVENT_6013_JSON).unwrap(); + + println!("{}", event_json_value); + println!("{}", expected_value); + assert_eq!(event_json_value, expected_value); + } + + const EVENT_1100: &str = r#"11000410300x4020000000000000114371Securitywin10.windomain.localThe event logging service has shut down.InformationService shutdownInfoSecurityMicrosoft-Windows-EventlogAudit Success"#; + const EVENT_1100_JSON: &str = r#"{"System":{"Provider":{"Name":"Microsoft-Windows-Eventlog","Guid":"{fc65ddd8-d6ef-4962-83d5-6e5cfe9ce148}"},"EventID":1100,"Version":0,"Level":4,"Task":103,"Opcode":0,"Keywords":"0x4020000000000000","TimeCreated":"2022-12-14T14:39:07.1686183Z","EventRecordID":114371,"Correlation":{},"Execution":{"ProcessID":496,"ThreadID":204},"Channel":"Security","Computer":"win10.windomain.local"},"UserData":"","RenderingInfo":{"Message":"The event logging service has shut down.","Level":"Information","Task":"Service shutdown","Opcode":"Info","Channel":"Security","Provider":"Microsoft-Windows-Eventlog","Keywords":["Audit Success"],"Culture":"en-US"},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T17:07:02.156+01:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"AD0D118F-31EF-4111-A0CA-D87249747278","Name":"Test","Uri":"/this/is/a/test"}}}"#; + + #[test] + fn test_serialize_1100_user_data() { + let event = Event::from_str( + "192.168.58.100", + "WIN10$@WINDOMAIN.LOCAL", + "2022-12-14T17:07:02.156+01:00", + "8B18D83D-2964-4F35-AC3B-6F4E6FFA727B", + "AD0D118F-31EF-4111-A0CA-D87249747278", + "Test", + Some(&"/this/is/a/test".to_string()), + EVENT_1100, + ) + .expect("Failed to parse Event"); + + let event_json = serde_json::to_string(&event).unwrap(); + + let event_json_value: Value = serde_json::from_str(&event_json).unwrap(); + let expected_value: Value = serde_json::from_str(EVENT_1100_JSON).unwrap(); + + println!("{}", event_json_value); + println!("{}", expected_value); + assert_eq!(event_json_value, expected_value); + } + + const EVENT_111: &str = r#"111win10.windomain.local"#; + const EVENT_111_JSON: &str = r#"{"System":{"Provider":{"Name":"Microsoft-Windows-EventForwarder"},"EventID":111,"TimeCreated":"2023-02-14T09:14:23.175Z","Computer":"win10.windomain.local"},"RenderingInfo":{"Culture":""},"OpenWEC":{"IpAddress":"192.168.58.100","TimeReceived":"2022-12-14T17:07:02.156+01:00","Principal":"WIN10$@WINDOMAIN.LOCAL","Subscription":{"Uuid":"8B18D83D-2964-4F35-AC3B-6F4E6FFA727B","Version":"AD0D118F-31EF-4111-A0CA-D87249747278","Name":"Test","Uri":"/this/is/a/test"}}}"#; + + #[test] + fn test_serialize_111() { + let event = Event::from_str( + "192.168.58.100", + "WIN10$@WINDOMAIN.LOCAL", + "2022-12-14T17:07:02.156+01:00", + "8B18D83D-2964-4F35-AC3B-6F4E6FFA727B", + "AD0D118F-31EF-4111-A0CA-D87249747278", + "Test", + Some(&"/this/is/a/test".to_string()), + EVENT_111, + ) + .expect("Failed to parse Event"); + + let event_json = serde_json::to_string(&event).expect("Failed to serialize event"); + + let event_json_value: Value = serde_json::from_str(&event_json).unwrap(); + let expected_value: Value = serde_json::from_str(EVENT_111_JSON).unwrap(); + + assert_eq!(event_json_value, expected_value); + } +} diff --git a/server/src/formatter.rs b/server/src/formatter.rs new file mode 100644 index 0000000..27ae946 --- /dev/null +++ b/server/src/formatter.rs @@ -0,0 +1,61 @@ +use std::sync::Arc; + +use anyhow::{Context, Result}; +use chrono::{Local, SecondsFormat}; + +use crate::{event::Event, subscription::Subscription, RequestData}; +use common::subscription::SubscriptionOutputFormat; + +#[derive(Debug, Clone, Hash, PartialEq, Eq)] +pub enum Format { + Json, + Raw, +} + +impl From<&SubscriptionOutputFormat> for Format { + fn from(sof: &SubscriptionOutputFormat) -> Self { + match sof { + SubscriptionOutputFormat::Json => Format::Json, + SubscriptionOutputFormat::Raw => Format::Raw, + } + } +} + +impl Format { + pub fn format( + &self, + subscription: Arc, + request_data: &RequestData, + raw: Arc, + ) -> Result> { + match &self { + Format::Json => format_json(subscription, request_data, raw), + Format::Raw => format_raw(raw), + } + } +} + +fn format_json( + subscription: Arc, + request_data: &RequestData, + raw: Arc, +) -> Result> { + let event = Event::from_str( + &request_data.remote_addr().ip().to_string(), + request_data.principal(), + &Local::now().to_rfc3339_opts(SecondsFormat::Millis, true), + subscription.uuid(), + subscription.version(), + subscription.data().name(), + subscription.data().uri(), + raw.as_ref(), + ) + .with_context(|| format!("Failed to parse event: {:?}", raw))?; + Ok(Arc::new(serde_json::to_string(&event).with_context( + || format!("Failed to format event: {:?}", event), + )?)) +} + +fn format_raw(raw: Arc) -> Result> { + Ok(raw) +} diff --git a/server/src/heartbeat.rs b/server/src/heartbeat.rs new file mode 100644 index 0000000..711c866 --- /dev/null +++ b/server/src/heartbeat.rs @@ -0,0 +1,142 @@ +use std::{ + collections::HashMap, + time::{Duration, SystemTime}, +}; + +use anyhow::{Context, Result}; +use common::{ + database::Db, + heartbeat::{HeartbeatKey, HeartbeatValue, HeartbeatsCache}, +}; +use log::{debug, error, info}; +use tokio::{ + select, + sync::{mpsc, oneshot}, + time, +}; + +pub async fn store_heartbeat( + heartbeat_tx: mpsc::Sender, + machine: &str, + ip: String, + subscription: &str, + is_event: bool, +) -> Result<()> { + if is_event { + debug!( + "Store event heartbeat for {} ({}) with subscription {}", + machine, ip, subscription + ) + } else { + debug!( + "Store heartbeat for {} ({}) with subscription {}", + machine, ip, subscription + ) + } + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH)? + .as_secs(); + heartbeat_tx + .send(WriteHeartbeatMessage { + machine: machine.to_owned(), + ip, + subscription: subscription.to_owned(), + timestamp: now, + is_event, + }) + .await + .context("Failed to send WriteHeartbeatMessage")?; + Ok(()) +} + +#[derive(Debug)] +pub struct WriteHeartbeatMessage { + pub machine: String, + pub ip: String, + pub subscription: String, + pub timestamp: u64, + pub is_event: bool, +} + +pub async fn heartbeat_task( + db: Db, + interval: u64, + mut task_rx: mpsc::Receiver, + mut task_exit_rx: oneshot::Receiver>, +) { + info!("Heartbeat task started"); + let mut interval = time::interval(Duration::from_secs(interval)); + let mut heartbeats: HeartbeatsCache = HashMap::new(); + + loop { + select! { + Some(heartbeat) = task_rx.recv() => { + // Store heartbeat in cache storage + + let key = HeartbeatKey { + machine: heartbeat.machine.clone(), + subscription: heartbeat.subscription.clone() + }; + + let value = if !heartbeat.is_event { + // If we just received a heartbeat, we just want to + // update "last_seen" value. + + let mut value = HeartbeatValue { + ip: heartbeat.ip.clone(), + last_seen: heartbeat.timestamp, + last_event_seen: None + }; + let old_opt = heartbeats.get(&key); + if let Some(old) = old_opt { + value.last_event_seen = old.last_event_seen; + } + value + } else { + HeartbeatValue { + ip: heartbeat.ip.clone(), + last_seen: heartbeat.timestamp, + last_event_seen: Some(heartbeat.timestamp), + } + }; + debug!( + "Cache heartbeat for {} ({}) with subscription {}. last_seen = {}, last_event_seen = {:?}", + key.machine, value.ip, key.subscription, value.last_seen, value.last_event_seen + ); + heartbeats.insert(key, value); + }, + _ = interval.tick() => { + if !heartbeats.is_empty() { + info!("Flush heartbeat cache"); + if let Err(e) = db.store_heartbeats(&heartbeats).await { + error!("Could not store heartbeats in database: {:?}", e); + } + + // Clear the cache to be ready to accept new heartbeats + heartbeats.clear(); + info!("Heartbeat cache flushed and cleared"); + } + }, + sender = &mut task_exit_rx => { + if !heartbeats.is_empty() { + info!("Flush heartbeat cache before killing the task"); + if let Err(e) = db.store_heartbeats(&heartbeats).await { + error!("Could not store heartbeats in database: {:?}", e); + } + } + + match sender { + Ok(sender) => { + if let Err(e) = sender.send(()) { + error!("Failed to respond to kill order: {:?}", e); + } + }, + Err(e) => { + error!("Could not respond to kill order: {:?}", e); + } + } + break; + } + } + } +} diff --git a/server/src/kerberos.rs b/server/src/kerberos.rs new file mode 100644 index 0000000..c4c4544 --- /dev/null +++ b/server/src/kerberos.rs @@ -0,0 +1,286 @@ +use anyhow::{anyhow, bail, Context, Result}; +use common::encoding::{decode_utf16le, encode_utf16le}; +use common::settings::Collector; +use hyper::body::HttpBody; +use hyper::header::AUTHORIZATION; +use hyper::{Body, Request}; +use libgssapi::{ + context::{CtxFlags, SecurityContext, ServerCtx}, + credential::{Cred, CredUsage}, + error::Error, + name::Name, + oid::{OidSet, GSS_MECH_KRB5, GSS_NT_KRB5_PRINCIPAL}, + util::{GssIov, GssIovType}, +}; +use log::{debug, error}; +use mime::Mime; +use std::sync::Arc; +use std::sync::Mutex; + +use crate::multipart; +use crate::sldc; +use crate::AuthenticationResult; + +#[derive(Debug)] +pub struct State { + context: Option, +} + +impl State { + pub fn new(principal: &str) -> Self { + let context = setup_server_ctx(principal.as_bytes()); + + match context { + Ok(ctx) => State { context: Some(ctx) }, + Err(e) => { + error!("Could not setup Kerberos server context: {:?}", e); + State { context: None } + } + } + } + + pub fn context_is_none(&self) -> bool { + self.context.is_none() + } +} + +fn setup_server_ctx(principal: &[u8]) -> Result { + let desired_mechs = { + let mut s = OidSet::new()?; + s.add(&GSS_MECH_KRB5)?; + s + }; + let name = Name::new(principal, Some(&GSS_NT_KRB5_PRINCIPAL))?; + let cname = name.canonicalize(Some(&GSS_MECH_KRB5))?; + let server_cred = Cred::acquire(Some(&cname), None, CredUsage::Accept, Some(&desired_mechs))?; + debug!("Acquired server credentials: {:?}", server_cred.info()); + Ok(ServerCtx::new(server_cred)) +} + +/// +/// Perform Kerberos authentication +/// +pub async fn authenticate( + conn_state: &Arc>, + req: &Request, +) -> Result { + let mut state = conn_state.lock().unwrap(); + let server_ctx = state + .context + .as_mut() + .ok_or_else(|| anyhow!("Kerberos server context is empty"))?; + + // Server context has already been established for this TCP connection + if server_ctx.is_complete() { + return Ok(AuthenticationResult { + principal: server_ctx.source_name()?.to_string(), + token: None, + }); + } + + // TODO: return a specific error + let auth_header = req + .headers() + .get(AUTHORIZATION) + .ok_or_else(|| anyhow!("Client request does not contain authorization header"))? + .to_str() + .context("Failed to convert authorization header to str")?; + + let b64_token = auth_header + .strip_prefix("Kerberos ") + .ok_or_else(|| anyhow!("Authorization header does not start with 'Kerberos '"))?; + let token = base64::decode(b64_token) + .context("Failed to decode authorization header token as base64")?; + match server_ctx + .step(&token) + .context("Failed to perform Kerberos operation")? + { + // TODO: should we return Ok in this case ? + None => Ok(AuthenticationResult { + principal: server_ctx.source_name()?.to_string(), + token: None, + }), + Some(step) => { + // TODO: support multiple steps + // see RFC4559 "5. Negotiate Operation Example" + if !server_ctx.is_complete() { + bail!( + "Authentication is not complete after first round. Multiple rounds + are not supported" + ); + } + let flags = server_ctx.flags().context("Error in server ctx")?; + let required_flags = CtxFlags::GSS_C_CONF_FLAG + | CtxFlags::GSS_C_MUTUAL_FLAG + | CtxFlags::GSS_C_INTEG_FLAG; + if flags & required_flags != required_flags { + bail!("Kerberos flags not compliant"); + } + + debug!("Server context info: {:?}", server_ctx.info()); + Ok(AuthenticationResult { + principal: server_ctx.source_name()?.to_string(), + token: Some(base64::encode(&*step)), + }) + } + } +} + +fn get_boundary(mime: &Mime) -> Result { + if mime.type_() != "multipart" { + bail!("Top level media type must be multipart"); + } + + if mime.subtype() != "encrypted" { + bail!("Sub media type must be encrypted"); + } + + match mime.get_param("protocol") { + Some(protocol) if protocol == "application/HTTP-Kerberos-session-encrypted" => {} + _ => bail!("Invalid or missing parameter 'protocol' in Content-Type"), + } + + match mime.get_param("boundary") { + Some(boundary) => Ok(boundary.to_string()), + _ => bail!("Missing parameter 'boundary' in Content-Type"), + } +} + +fn decrypt_payload(encrypted_payload: Vec, server_ctx: &mut ServerCtx) -> Result> { + log::debug!("Try to decrypt Kerberos payload"); + let i32_size = std::mem::size_of::(); + let (signature_length_bytes, _) = encrypted_payload.split_at(i32_size); + let signature_length = i32::from_le_bytes(signature_length_bytes.try_into()?) as usize; + let mut signature = Vec::with_capacity(signature_length); + signature.extend_from_slice( + encrypted_payload + .get(i32_size..signature_length + i32_size) + .ok_or_else(|| anyhow!("Failed to retrieve encrypted message signature"))?, + ); + let mut encrypted_message = + Vec::with_capacity(encrypted_payload.len() - signature_length - i32_size); + encrypted_message.extend_from_slice( + encrypted_payload + .get(i32_size + signature_length..) + .ok_or_else(|| anyhow!("Failed to retrieve encrypted message payload"))?, + ); + let mut iovs = [ + GssIov::new(GssIovType::Header, &mut signature), + GssIov::new(GssIovType::Data, &mut encrypted_message), + ]; + server_ctx.unwrap_iov(&mut iovs)?; + drop(iovs); + + log::debug!("Kerberos payload decrypted successfully"); + Ok(encrypted_message) +} + +fn encrypt_payload(mut payload: Vec, server_ctx: &mut ServerCtx) -> Result> { + let mut iovs = [ + GssIov::new_alloc(GssIovType::Header), + GssIov::new(GssIovType::Data, &mut payload), + GssIov::new_alloc(GssIovType::Padding), + // TODO: should we add a trailer + // see https://web.mit.edu/kerberos/krb5-1.18/doc/appdev/gssapi.html + // and https://learn.microsoft.com/en-us/windows/win32/secauthn/sspi-kerberos-interoperability-with-gssapi + ]; + server_ctx.wrap_iov(true, &mut iovs)?; + + let mut encrypted_payload = Vec::with_capacity( + std::mem::size_of::() + iovs[0].len() + iovs[1].len() + iovs[2].len(), + ); + + encrypted_payload.extend_from_slice(&i32::try_from(iovs[0].len())?.to_le_bytes()); + encrypted_payload.extend_from_slice(&iovs[0]); + encrypted_payload.extend_from_slice(&iovs[1]); + encrypted_payload.extend_from_slice(&iovs[2]); + drop(iovs); + + Ok(encrypted_payload) +} + +pub async fn get_request_payload( + settings: &Collector, + conn_state: &Arc>, + req: Request, +) -> Result> { + let (parts, body) = req.into_parts(); + + let response_content_length = body + .size_hint() + .upper() + .ok_or_else(|| anyhow!("Header Content-Length is not present")) + .context("Could not check Content-Length header of request")?; + + let max_content_length = settings.max_content_length(); + + if response_content_length > max_content_length { + bail!( + "HTTP request body is too large ({} bytes larger than the maximum allowed {} bytes).", + response_content_length, + max_content_length + ); + } + + let data = hyper::body::to_bytes(body) + .await + .context("Could not retrieve request body")?; + if data.is_empty() { + return Ok(None); + } + + let content_type = match parts.headers.get("Content-Type") { + Some(content_type) => content_type, + None => bail!("Request does not contain 'Content-Type' header"), + }; + + let mime = content_type + .to_str()? + .parse::() + .context("Could not parse Content-Type header")?; + let boundary = get_boundary(&mime).context("Could not get multipart boundaries")?; + let encrypted_payload = multipart::read_multipart_body(&mut &*data, &boundary) + .context("Could not retrieve encrypted payload")?; + + let mut state = conn_state.lock().unwrap(); + let server_ctx = state + .context + .as_mut() + .ok_or_else(|| anyhow!("Kerberos server context is empty"))?; + + let decrypted_message = + decrypt_payload(encrypted_payload, server_ctx).context("Could not decrypt payload")?; + + let message = match parts.headers.get("Content-Encoding") { + Some(value) if value == "SLDC" => { + sldc::decompress(&decrypted_message).unwrap_or(decrypted_message) + } + None => decrypted_message, + value => bail!("Unsupported Content-Encoding {:?}", value), + }; + + Ok(Some(decode_utf16le(message)?)) +} + +pub fn get_response_payload( + conn_state: &Arc>, + payload: String, + boundary: &str, +) -> Result> { + let mut payload = encode_utf16le(payload).context("Failed to encode payload in utf16le")?; + + let cleartext_payload_len = payload.len(); + + let mut state = conn_state.lock().unwrap(); + let server_ctx = &mut state + .context + .as_mut() + .ok_or_else(|| anyhow!("Kerberos server context is empty"))?; + payload = encrypt_payload(payload, server_ctx).context("Failed to encrypt payload")?; + + Ok(multipart::get_multipart_body( + &payload, + cleartext_payload_len, + boundary, + )) +} diff --git a/server/src/lib.rs b/server/src/lib.rs new file mode 100644 index 0000000..51fc760 --- /dev/null +++ b/server/src/lib.rs @@ -0,0 +1,532 @@ +mod event; +mod formatter; +mod heartbeat; +mod kerberos; +mod logic; +mod multipart; +mod output; +mod outputs; +mod sldc; +mod soap; +mod subscription; + +use anyhow::{anyhow, bail, Context, Result}; +use common::database::{db_from_settings, schema_is_up_to_date, Db}; +use common::settings::{Collector, Server as ServerSettings, Settings}; +use futures_util::future::join_all; +use heartbeat::{heartbeat_task, WriteHeartbeatMessage}; +use http::response::Builder; +use http::status::StatusCode; +use hyper::header::{CONTENT_TYPE, WWW_AUTHENTICATE}; +use hyper::server::conn::AddrStream; +use hyper::service::{make_service_fn, service_fn}; +use hyper::{Body, Request, Response, Server}; +use lazy_static::lazy_static; +use libgssapi::error::MajorFlags; +use log::{debug, error, info, trace, warn}; +use quick_xml::writer::Writer; +use regex::Regex; +use soap::Serializable; +use std::collections::HashMap; +use std::convert::Infallible; +use std::env; +use std::io::Cursor; +use std::net::{IpAddr, SocketAddr}; +use std::str::FromStr; +use std::sync::Mutex; +use std::sync::{Arc, RwLock}; +use std::time::Instant; +use subscription::{reload_subscriptions_task, Subscriptions}; +use tokio::signal::unix::SignalKind; +use tokio::sync::{mpsc, oneshot}; + +#[derive(Copy, Clone)] +pub enum AuthenticationMechanism { + Kerberos, + Tls, +} + +pub enum RequestCategory { + Enumerate(String), + Subscription(String), +} + +impl TryFrom<&Request> for RequestCategory { + type Error = anyhow::Error; + fn try_from(req: &Request) -> Result { + if req.method() != "POST" { + bail!("Invalid HTTP method {}", req.method()); + } + + lazy_static! { + static ref SUBSCRIPTION_RE: Regex = Regex::new(r"^/wsman/subscriptions/([0-9A-Fa-f]{8}\b-[0-9A-Fa-f]{4}\b-[0-9A-Fa-f]{4}\b-[0-9A-Fa-f]{4}\b-[0-9A-F]{12})$").expect("Failed to compile SUBSCRIPTION regular expression"); + } + if let Some(c) = SUBSCRIPTION_RE.captures(req.uri().path()) { + return Ok(RequestCategory::Subscription( + c.get(1) + .ok_or_else(|| anyhow!("Could not get identifier from URI"))? + .as_str() + .to_owned(), + )); + } + + return Ok(Self::Enumerate(req.uri().to_string())); + } +} + +pub struct RequestData { + principal: String, + remote_addr: SocketAddr, + category: RequestCategory, +} + +impl RequestData { + fn new(principal: &str, remote_addr: &SocketAddr, req: &Request) -> Result { + Ok(RequestData { + principal: principal.to_owned(), + remote_addr: remote_addr.to_owned(), + category: RequestCategory::try_from(req)?, + }) + } + + /// Get a reference to the request data's principal. + pub fn principal(&self) -> &str { + self.principal.as_ref() + } + + /// Get a reference to the request data's remote addr. + pub fn remote_addr(&self) -> &SocketAddr { + &self.remote_addr + } + + /// Get a reference to the request data's category. + pub fn category(&self) -> &RequestCategory { + &self.category + } +} + +pub struct AuthenticationResult { + principal: String, + token: Option, +} + +async fn get_request_payload( + auth_mech: AuthenticationMechanism, + collector: &Collector, + conn_state: &Arc>, + req: Request, +) -> Result> { + match auth_mech { + AuthenticationMechanism::Tls => bail!("TLS is not supported yet"), + AuthenticationMechanism::Kerberos => { + kerberos::get_request_payload(collector, conn_state, req).await + } + } +} + +fn create_response( + auth_mech: AuthenticationMechanism, + conn_state: &Arc>, + mut response: Builder, + payload: Option, +) -> Result> { + match auth_mech { + AuthenticationMechanism::Tls => bail!("TLS is not supported yet"), + AuthenticationMechanism::Kerberos => { + let boundary = "Encrypted Boundary"; + if payload.is_some() { + response = response.header(CONTENT_TYPE, "multipart/encrypted;protocol=\"application/HTTP-Kerberos-session-encrypted\";boundary=\"".to_owned() + boundary + "\""); + } + let body = match payload { + None => Body::empty(), + Some(payload) => Body::from( + kerberos::get_response_payload(conn_state, payload, boundary) + .context("Failed to compute Kerberos encrypted payload")?, + ), + }; + Ok(response.body(body)?) + } + } +} + +async fn authenticate( + auth_mech: AuthenticationMechanism, + conn_state: &Arc>, + req: &Request, + addr: &SocketAddr, +) -> Result<(String, Builder)> { + match auth_mech { + AuthenticationMechanism::Tls => { + error!("TLS is not supported yet"); + bail!("TLS is not supported yet") + } + AuthenticationMechanism::Kerberos => { + let mut response = Response::builder(); + let auth_result = kerberos::authenticate(conn_state, req) + .await + .map_err(|err| { + match err.root_cause().downcast_ref::() { + Some(e) if e.major == MajorFlags::GSS_S_CONTEXT_EXPIRED => (), + _ => warn!( + "Authentication failed for {}:{} ({}:{}): {:?}", + addr.ip(), + addr.port(), + req.method(), + req.uri(), + err + ), + }; + err + })?; + if let Some(token) = auth_result.token { + response = response.header(WWW_AUTHENTICATE, format!("Kerberos {}", token)) + } + Ok((auth_result.principal, response)) + } + } +} + +async fn handle_payload( + server: &ServerSettings, + collector: &Collector, + db: Db, + subscriptions: Subscriptions, + heartbeat_tx: mpsc::Sender, + request_data: RequestData, + request_payload: Option, +) -> Result<(StatusCode, Option)> { + match request_payload { + None => Ok((StatusCode::OK, None)), + Some(payload) => { + let message = soap::parse(&payload).context("Failed to parse SOAP message")?; + trace!("Parsed request: {:?}", message); + let response = logic::handle_message( + server, + collector, + db, + subscriptions, + heartbeat_tx, + request_data, + &message, + ) + .await + .context("Failed to handle SOAP message")?; + + match response { + logic::Response::Err(status_code) => Ok((status_code, None)), + logic::Response::Ok(action, body) => { + let payload = soap::Message::response_from(&message, &action, body) + .context("Failed to build a response payload")?; + let mut writer = Writer::new(Cursor::new(Vec::new())); + payload + .serialize(&mut writer) + .context("Failed to serialize response payload")?; + let result = String::from_utf8(writer.into_inner().into_inner())?; + trace!("Response is: {}", result); + Ok((StatusCode::OK, Some(result))) + } + } + } + } +} + +fn log_response(addr: &SocketAddr, method: &str, uri: &str, start: &Instant, status: StatusCode) { + let duration: f32 = start.elapsed().as_micros() as f32; + info!( + "Responded status {} to {}:{} (request was {}:{}) in {:.3}ms", + status, + addr.ip(), + addr.port(), + method, + uri, + duration / 1000.0 + ); +} + +async fn handle( + server: ServerSettings, + collector: Collector, + db: Db, + subscriptions: Subscriptions, + heartbeat_tx: mpsc::Sender, + auth_mech: AuthenticationMechanism, + conn_state: Arc>, + addr: SocketAddr, + req: Request, +) -> Result, Infallible> { + let start = Instant::now(); + + debug!( + "Received HTTP request from {}:{}: {} {}", + addr.ip(), + addr.port(), + req.method(), + req.uri() + ); + + let method = req.method().to_string(); + let uri = req.uri().to_string(); + + // Check authentication + let (principal, mut response_builder) = + match authenticate(auth_mech, &conn_state, &req, &addr).await { + Ok((principal, builder)) => (principal, builder), + Err(e) => { + debug!( + "Authentication failed for {}:{} ({}:{}): {:?}", + addr.ip(), + addr.port(), + &method, + &uri, + e + ); + let status = StatusCode::UNAUTHORIZED; + log_response(&addr, &method, &uri, &start, status); + return Ok(Response::builder() + .status(status) + .body(Body::empty()) + .expect("Failed to build HTTP response")); + } + }; + + debug!("Successfully authenticated {}", principal); + + let request_data = match RequestData::new(&principal, &addr, &req) { + Ok(request_data) => request_data, + Err(e) => { + error!("Failed to compute request data: {:?}", e); + let status = StatusCode::NOT_FOUND; + log_response(&addr, &method, &uri, &start, status); + return Ok(Response::builder() + .status(status) + .body(Body::empty()) + .expect("Failed to build HTTP response")); + } + }; + + // Get request payload + let request_payload = match get_request_payload(auth_mech, &collector, &conn_state, req).await { + Ok(payload) => payload, + Err(e) => { + error!("Failed to retrieve request payload: {:?}", e); + let status = StatusCode::BAD_REQUEST; + log_response(&addr, &method, &uri, &start, status); + return Ok(Response::builder() + .status(status) + .body(Body::empty()) + .expect("Failed to build HTTP response")); + } + }; + + trace!( + "Received payload: {:?}", + request_payload.as_ref().unwrap_or(&String::from("")) + ); + + // Handle request payload, and retrieves response payload + let (status, response_payload) = match handle_payload( + &server, + &collector, + db, + subscriptions, + heartbeat_tx, + request_data, + request_payload, + ) + .await + { + Ok((status, response_payload)) => (status, response_payload), + Err(e) => { + error!("Failed to compute a response payload to request: {:?}", e); + let status = StatusCode::INTERNAL_SERVER_ERROR; + log_response(&addr, &method, &uri, &start, status); + return Ok(Response::builder() + .status(status) + .body(Body::empty()) + .expect("Failed to build HTTP response")); + } + }; + + response_builder = response_builder.status(status); + // Create HTTP response + let response = match create_response(auth_mech, &conn_state, response_builder, response_payload) + { + Ok(response) => response, + Err(e) => { + error!("Failed to build HTTP response: {:?}", e); + let status = StatusCode::INTERNAL_SERVER_ERROR; + log_response(&addr, &method, &uri, &start, status); + return Ok(Response::builder() + .status(status) + .body(Body::empty()) + .expect("Failed to build HTTP response")); + } + }; + + log_response(&addr, &method, &uri, &start, response.status()); + // debug!("Send response: {:?}", response); + Ok(response) +} + +async fn shutdown_signal() { + let ctrl_c = tokio::signal::ctrl_c(); + let mut sigterm = tokio::signal::unix::signal(SignalKind::terminate()) + .expect("failed to install SIGTERM handler"); + + tokio::select! { + _ = ctrl_c => { info!("Received CTRL+C") }, + _ = sigterm.recv() => { info!("Received SIGTERM signal") }, + }; +} + +pub async fn run(settings: Settings) { + let mut servers = Vec::new(); + + let db: Db = db_from_settings(&settings) + .await + .expect("Failed to initialize database"); + + // Check that database schema is up to date + match schema_is_up_to_date(db.clone()).await { + Ok(true) => (), + Ok(false) => panic!("Schema needs to be updated. Please check migration guide and then run `openwec db upgrade`"), + Err(err) => panic!("An error occurred while checking schema version: {:?}.\nHelp: You may need to run `openwec db init` to setup your database.", err), + }; + + let subscriptions = Arc::new(RwLock::new(HashMap::new())); + + let interval = settings.server().db_sync_interval(); + let update_task_db = db.clone(); + let update_task_subscriptions = subscriptions.clone(); + let (update_task_subscription_exit_tx, update_task_subscription_exit_rx) = oneshot::channel(); + // Launch a task responsible for updating subscriptions + tokio::spawn(async move { + reload_subscriptions_task( + update_task_db, + update_task_subscriptions, + interval, + update_task_subscription_exit_rx, + ) + .await + }); + + let interval = settings.server().flush_heartbeats_interval(); + let update_task_db = db.clone(); + // Channel to communicate with heartbeat task + // TODO: why 32? + let (heartbeat_tx, heartbeat_rx) = mpsc::channel(32); + let (heartbeat_exit_tx, heartbeat_exit_rx) = oneshot::channel(); + + // Launch a task responsible for managing heartbeats + tokio::spawn(async move { + heartbeat_task(update_task_db, interval, heartbeat_rx, heartbeat_exit_rx).await + }); + + for collector in settings.collectors() { + let collector_db = db.clone(); + let collector_subscriptions = subscriptions.clone(); + let collector_settings = collector.clone(); + let collector_heartbeat_tx = heartbeat_tx.clone(); + let collector_server_settings = settings.server().clone(); + + // Construct our SocketAddr to listen on... + let addr = SocketAddr::from(( + IpAddr::from_str(collector.listen_address()) + .expect("Failed to parse server.listen_address"), + collector.listen_port(), + )); + + trace!("Listen address is {}", addr); + + // FIXME + let kerberos = match collector.authentication() { + common::settings::Authentication::Kerberos(kerberos) => kerberos, + _ => panic!("Unsupported authentication type"), + }; + + env::set_var("KRB5_KTNAME", kerberos.keytab()); + + let principal = kerberos.service_principal_name().to_owned(); + // Try to initialize a security context. This is to be sure that an error in + // Kerberos configuration will be reported as soon as possible. + let state = kerberos::State::new(&principal); + if state.context_is_none() { + panic!("Could not initialize Kerberos context"); + } + + // A `MakeService` that produces a `Service` to handle each connection. + let make_service = make_service_fn(move |conn: &AddrStream| { + // We have to clone the context to share it with each invocation of + // `make_service`. + + // Initialise Kerberos context once for each TCP connection + let conn_state = Arc::new(Mutex::new(kerberos::State::new(&principal))); + let collector_settings = collector_settings.clone(); + let svc_db = collector_db.clone(); + let svc_server_settings = collector_server_settings.clone(); + let auth_mec = AuthenticationMechanism::Kerberos; + let subscriptions = collector_subscriptions.clone(); + let collector_heartbeat_tx = collector_heartbeat_tx.clone(); + + let addr = conn.remote_addr(); + + debug!("Received TCP connection from {}", addr); + + // Create a `Service` for responding to the request. + let service = service_fn(move |req| { + handle( + svc_server_settings.clone(), + collector_settings.clone(), + svc_db.clone(), + subscriptions.clone(), + collector_heartbeat_tx.clone(), + auth_mec, + conn_state.clone(), + addr, + req, + ) + }); + + // Return the service to hyper. + async move { Ok::<_, Infallible>(service) } + }); + + // Then bind and serve... + let server = Server::bind(&addr) + .serve(make_service) + .with_graceful_shutdown(shutdown_signal()); + + info!("Server listenning on {}", addr); + servers.push(server); + } + + let result = join_all(servers).await; + + for server in result { + if let Err(e) = server { + error!("Server error: {}", e); + } + } + + info!("HTTP server has been shutdown."); + + let (task_ended_tx, task_ended_rx) = oneshot::channel(); + if let Err(e) = heartbeat_exit_tx.send(task_ended_tx) { + error!("Failed to shutdown heartbeat task: {:?}", e); + }; + if let Err(e) = task_ended_rx.await { + error!("Failed to wait for heartbeat task shutdown: {:?}", e); + } + + info!("Heartbeat task has been terminated."); + + let (task_ended_tx, task_ended_rx) = oneshot::channel(); + if let Err(e) = update_task_subscription_exit_tx.send(task_ended_tx) { + error!("Failed to shutdown update subscription task: {:?}", e); + } + if let Err(e) = task_ended_rx.await { + error!("Failed to wait for heartbeat task shutdown: {:?}", e); + } + + info!("Subscription update task has been terminated."); +} diff --git a/server/src/logic.rs b/server/src/logic.rs new file mode 100644 index 0000000..dea1cc7 --- /dev/null +++ b/server/src/logic.rs @@ -0,0 +1,426 @@ +use crate::{ + event::EventMetadata, + formatter::Format, + heartbeat::{store_heartbeat, WriteHeartbeatMessage}, + soap::{ + Body, Header, Message, OptionSetValue, Subscription as SoapSubscription, SubscriptionBody, + ACTION_ACK, ACTION_END, ACTION_ENUMERATE, ACTION_ENUMERATE_RESPONSE, ACTION_EVENTS, + ACTION_HEARTBEAT, ACTION_SUBSCRIBE, ACTION_SUBSCRIPTION_END, ANONYMOUS, RESOURCE_EVENT_LOG, + }, + subscription::{Subscription, Subscriptions}, + RequestCategory, RequestData, +}; +use common::{ + database::Db, + settings::{Collector, Server}, +}; +use http::status::StatusCode; +use log::{debug, error, info, warn}; +use std::{collections::HashMap, sync::Arc}; +use tokio::{sync::mpsc, task::JoinSet}; + +use anyhow::{anyhow, bail, Context, Result}; + +pub enum Response { + Ok(String, Option), + Err(StatusCode), +} + +impl Response { + pub fn ok(action: &str, body: Option) -> Self { + Response::Ok(action.to_owned(), body) + } + + pub fn err(status_code: StatusCode) -> Self { + Response::Err(status_code) + } +} + +fn check_sub_request_data(request_data: &RequestData, version: &str) -> bool { + let uri_version = if let RequestCategory::Subscription(version) = request_data.category() { + version + } else { + error!("Request URI is incoherent with body message"); + return false; + }; + + if version != uri_version { + error!( + "URI identifier and message identifier do not match: {} != {}", + uri_version, version + ); + return false; + } + true +} + +async fn handle_enumerate( + collector: &Collector, + db: &Db, + subscriptions: Subscriptions, + request_data: &RequestData, +) -> Result { + // Check that URI corresponds to an enumerate Request + let uri = match request_data.category() { + RequestCategory::Enumerate(uri) => uri, + _ => { + error!("Invalid URI for Enumerate request"); + return Ok(Response::err(StatusCode::BAD_REQUEST)); + } + }; + + info!( + "Received Enumerate request from {}:{} ({}) with URI {}", + request_data.remote_addr.ip(), + request_data.remote_addr.port(), + request_data.principal(), + uri + ); + + // Clone subscriptions references into a new vec + let current_subscriptions = { + let subscriptions_unlocked = subscriptions.read().unwrap(); + let mut current = Vec::with_capacity(subscriptions_unlocked.len()); + for (_, subscription) in subscriptions.read().unwrap().iter() { + current.push(subscription.clone()); + } + current + }; + + // Build Enumerate Response + let mut res_subscriptions = Vec::new(); + for subscription in current_subscriptions { + let subscription_data = subscription.data(); + // Skip disabled subscriptions or subscriptions without enabled outputs + if !subscription_data.is_active() { + continue; + } + + // Skip subscriptions that are not linked with the current URI (or with subscription.uri = None) + match subscription_data.uri() { + Some(subscription_uri) if uri != subscription_uri => { + debug!( + "Skip subscription \"{}\" ({}) which uri {} does not match with {}", + subscription_data.name(), + subscription_data.uuid(), + subscription_uri, + uri + ); + continue; + } + _ => (), + } + + debug!( + "Include subscription \"{}\" ({})", + subscription_data.name(), + subscription_data.uuid() + ); + + let mut options = HashMap::new(); + options.insert( + "SubscriptionName".to_string(), + OptionSetValue::String(subscription_data.name().to_string()), + ); + options.insert( + "Compression".to_string(), + OptionSetValue::String("SLDC".to_string()), + ); + // TODO: Make content format an option + options.insert( + "ContentFormat".to_string(), + OptionSetValue::String("RenderedText".to_string()), + ); + options.insert( + "IgnoreChannelError".to_string(), + OptionSetValue::Boolean(true), + ); + options.insert("CDATA".to_string(), OptionSetValue::Boolean(true)); + + // Add ReadExistingEvents option + if subscription_data.read_existing_events() { + options.insert( + "ReadExistingEvents".to_string(), + OptionSetValue::Boolean(true), + ); + } + + let header = Header::new( + ANONYMOUS.to_string(), + RESOURCE_EVENT_LOG.to_string(), + ACTION_SUBSCRIBE.to_string(), + subscription_data.max_envelope_size(), + None, + None, + None, + Some(1), + options, + ); + + let mut bookmark: Option = db + .get_bookmark(request_data.principal(), subscription_data.uuid()) + .await + .context("Failed to retrieve current bookmark from database")?; + + if bookmark.is_none() && subscription_data.read_existing_events() { + bookmark = + Some("http://schemas.dmtf.org/wbem/wsman/1/wsman/bookmark/earliest".to_string()) + } + + debug!( + "Load bookmark of {} for subscription {}: {:?}", + request_data.principal(), + subscription_data.uuid(), + bookmark + ); + + let body = SubscriptionBody { + heartbeat_interval: subscription_data.heartbeat_interval() as u64, + identifier: subscription_data.version().to_owned(), + bookmark, + query: subscription_data.query().to_owned(), + address: format!( + "http://{}:{}/wsman/subscriptions/{}", + collector.hostname(), + collector.listen_port(), + subscription_data.version() + ), + connection_retry_count: subscription_data.connection_retry_count(), + connection_retry_interval: subscription_data.connection_retry_interval(), + max_time: subscription_data.max_time(), + max_envelope_size: subscription_data.max_envelope_size(), + }; + + res_subscriptions.push(SoapSubscription { + identifier: subscription_data.version().to_owned(), + header, + body, + }); + } + + Ok(Response::ok( + ACTION_ENUMERATE_RESPONSE, + Some(Body::EnumerateResponse(res_subscriptions)), + )) +} + +async fn handle_heartbeat( + subscriptions: Subscriptions, + heartbeat_tx: mpsc::Sender, + request_data: &RequestData, + message: &Message, +) -> Result { + let version = message + .header() + .identifier() + .ok_or_else(|| anyhow!("Missing field identifier"))?; + + if !check_sub_request_data(request_data, version) { + return Ok(Response::err(StatusCode::BAD_REQUEST)); + } + + let subscription = { + let subscriptions = subscriptions.read().unwrap(); + match subscriptions.get(version) { + Some(subscription) => subscription.to_owned(), + None => { + warn!( + "Received Heartbeat of {}:{} ({}) for unknown subscription {}", + request_data.remote_addr().ip(), + request_data.remote_addr().port(), + request_data.principal(), + version + ); + return Ok(Response::err(StatusCode::BAD_REQUEST)); + } + } + }; + + info!( + "Received Heartbeat of {}:{} ({:?}) for subscription {} ({})", + request_data.remote_addr().ip(), + request_data.remote_addr().port(), + request_data.principal(), + subscription.data().name(), + subscription.uuid(), + ); + + store_heartbeat( + heartbeat_tx, + request_data.principal(), + request_data.remote_addr().ip().to_string(), + subscription.uuid(), + false, + ) + .await + .context("Failed to store heartbeat")?; + Ok(Response::ok(ACTION_ACK, None)) +} + +async fn handle_events( + server: &Server, + db: &Db, + subscriptions: Subscriptions, + heartbeat_tx: mpsc::Sender, + request_data: &RequestData, + message: &Message, +) -> Result { + if let Some(Body::Events(events)) = &message.body { + let version = message + .header() + .identifier() + .ok_or_else(|| anyhow!("Missing field identifier"))?; + + if !check_sub_request_data(request_data, version) { + return Ok(Response::err(StatusCode::BAD_REQUEST)); + } + + let subscription: Arc = { + let subscriptions = subscriptions.read().unwrap(); + let subscription = subscriptions.get(version); + match subscription { + Some(subscription) => subscription.to_owned(), + None => { + warn!("Unknown subscription version {}", version); + return Ok(Response::err(StatusCode::NOT_FOUND)); + } + } + }; + info!( + "Received Events from {}:{} ({}) for subscription {} ({})", + request_data.remote_addr().ip(), + request_data.remote_addr().port(), + request_data.principal(), + subscription.data().name(), + subscription.uuid() + ); + + let metadata = Arc::new(EventMetadata::new( + request_data.remote_addr(), + request_data.principal(), + server.node_name().cloned(), + )); + + // Build event strings for all formats + let mut formatted_events: HashMap>>> = HashMap::new(); + for format in subscription.formats() { + let mut content = Vec::new(); + for raw in events.iter() { + content.push( + format + .format(subscription.clone(), request_data, raw.clone()) + .with_context(|| format!("Failed to format event with {:?}", format))?, + ); + } + formatted_events.insert(format.clone(), Arc::new(content)); + } + + let mut handles = JoinSet::new(); + + // Spawn tasks to write events to every outputs of the subscription + for output in subscription.outputs() { + let output = output.clone(); + let metadata = metadata.clone(); + let format = output.format(); + let content = formatted_events + .get(format) + .ok_or_else(|| anyhow!("Could not get formatted event for format {:?}", format))? + .clone(); + + handles.spawn(async move { + output.write(metadata, content).await.with_context(|| { + format!("Failed to write event to output {}", output.describe()) + }) + }); + } + + // Wait for all tasks to finish + let mut succeed = true; + while let Some(res) = handles.join_next().await { + match res { + Ok(Ok(())) => (), + Ok(Err(err)) => { + succeed = false; + warn!("Failed to process output and send event: {:?}", err); + } + Err(err) => { + succeed = false; + warn!("Something bad happened with a process task: {:?}", err) + } + } + } + + if !succeed { + return Ok(Response::err(StatusCode::SERVICE_UNAVAILABLE)); + } + + let bookmark = message + .header() + .bookmarks() + .ok_or_else(|| anyhow!("Missing bookmarks in request payload"))?; + // Store bookmarks and heartbeats + db.store_bookmark(request_data.principal(), subscription.uuid(), bookmark) + .await + .context("Failed to store bookmarks")?; + + debug!( + "Store bookmark from {}:{} ({}) for subscription {} ({}): {}", + request_data.remote_addr().ip(), + request_data.remote_addr().port(), + request_data.principal(), + subscription.data().name(), + subscription.uuid(), + bookmark + ); + store_heartbeat( + heartbeat_tx, + request_data.principal(), + request_data.remote_addr().ip().to_string(), + subscription.uuid(), + true, + ) + .await + .context("Failed to store heartbeat")?; + Ok(Response::ok(ACTION_ACK, None)) + } else { + bail!("Invalid events message"); + } +} + +pub async fn handle_message( + server: &Server, + collector: &Collector, + db: Db, + subscriptions: Subscriptions, + heartbeat_tx: mpsc::Sender, + request_data: RequestData, + message: &Message, +) -> Result { + let action = message.action()?; + debug!("Received {} request", action); + + if action == ACTION_ENUMERATE { + handle_enumerate(collector, &db, subscriptions, &request_data) + .await + .context("Failed to handle Enumerate action") + } else if action == ACTION_END || action == ACTION_SUBSCRIPTION_END { + Ok(Response::err(StatusCode::OK)) + } else if action == ACTION_HEARTBEAT { + handle_heartbeat(subscriptions, heartbeat_tx, &request_data, message) + .await + .context("Failed to handle Heartbeat action") + } else if action == ACTION_EVENTS { + handle_events( + server, + &db, + subscriptions, + heartbeat_tx, + &request_data, + message, + ) + .await + .context("Failed to handle Events action") + } else { + Err(anyhow!("Unsupported message {}", action)) + } +} diff --git a/server/src/main.rs b/server/src/main.rs new file mode 100644 index 0000000..e6528b7 --- /dev/null +++ b/server/src/main.rs @@ -0,0 +1,50 @@ +use clap::{arg, command}; +use common::settings::{Settings, DEFAULT_CONFIG_FILE}; +use server::run; +use std::env; + +#[tokio::main] +async fn main() { + let matches = command!() + .name("openwecd") + .arg( + arg!(-c --config "Sets a custom config file") + .default_value(DEFAULT_CONFIG_FILE) + .required(false), + ) + .arg(arg!(-v --verbosity ... "Sets the level of verbosity")) + .get_matches(); + + let config_file = matches.get_one::("config"); + let settings = match Settings::new(config_file) { + Ok(settings) => settings, + Err(err) => { + eprintln!("Could not load config: {}", err); + std::process::exit(1); + } + }; + + if env::var("OPENWEC_LOG").is_err() { + if matches.get_count("verbosity") > 0 { + env::set_var( + "OPENWEC_LOG", + match matches.get_count("verbosity") { + 1 => "info", + 2 => "debug", + _ => "trace", + }, + ); + } else if let Some(verbosity) = settings.server().verbosity() { + env::set_var("OPENWEC_LOG", verbosity); + } else { + env::set_var("OPENWEC_LOG", "warn"); + } + } + + env_logger::Builder::from_env("OPENWEC_LOG") + .format_module_path(false) + .format_timestamp(None) + .init(); + + run(settings).await; +} diff --git a/server/src/multipart.rs b/server/src/multipart.rs new file mode 100644 index 0000000..5f0d23a --- /dev/null +++ b/server/src/multipart.rs @@ -0,0 +1,158 @@ +use anyhow::{bail, Result}; +use buf_read_ext::BufReadExt; +use hyper::header::CONTENT_TYPE; +use log::debug; +use mime::Mime; +use std::io::{BufReader, Read}; + +pub fn read_multipart_body(stream: &mut S, boundary: &str) -> Result> { + let mut reader = BufReader::with_capacity(4096, stream); + + let mut buf: Vec = Vec::new(); + + let middle_boundary = "--".to_owned() + boundary + "\r\n"; + let end_boundary = "--".to_owned() + boundary + "--\r\n"; + let lt = vec![b'\r', b'\n']; + + // Read past the initial boundary + let (_, found) = reader.stream_until_token(middle_boundary.as_bytes(), &mut buf)?; + if !found { + bail!("EoF found before first boundary"); + } + + // Read first part which contains control information according + // to RFC 1847 + + // Read the headers (which should end in 2 line terminators, but do not + // for unknown reasons). But there are only headers in this part so this + // is fine :) + buf.truncate(0); // start fresh + let (_, found) = reader.stream_until_token(middle_boundary.as_bytes(), &mut buf)?; + if !found { + bail!("EofInPartHeaders"); + } + + // Keep the 2 line terminators as httparse will expect it + buf.extend(lt.iter().cloned()); + + // Parse the headers + let mut header_memory = [httparse::EMPTY_HEADER; 4]; + match httparse::parse_headers(&buf, &mut header_memory)? { + httparse::Status::Complete((_, raw_headers)) => { + for header in raw_headers { + debug!("Header found: {:?}", header); + if header.name == CONTENT_TYPE { + let mime = std::str::from_utf8(header.value)?.parse::()?; + if mime.type_() != "application" { + bail!("Wrong encapsulated multipart type"); + } + if mime.subtype() != "HTTP-Kerberos-session-encrypted" { + bail!("Wrong encapsulated multipart sub type"); + } + } + if header.name == "OriginalContent" { + // This should be checked later: first we decrypt, then we + // try to understand what is inside ? + // TODO: store charset somewhere and use it to decode + // decrypted bytes + // TODO: check something with Length ? + } + } + } + httparse::Status::Partial => bail!("PartialHeaders"), + } + + // Read Content-Type header + buf.truncate(0); // start fresh + let (_, found) = reader.stream_until_token(<, &mut buf)?; + if !found { + bail!("No cr lf after headers"); + } + + // Keep the 2 line terminators as httparse will expect it + buf.extend(lt.iter().cloned()); + buf.extend(lt.iter().cloned()); + + let mut header_memory = [httparse::EMPTY_HEADER; 4]; + match httparse::parse_headers(&buf, &mut header_memory)? { + httparse::Status::Complete((_, raw_headers)) => { + for header in raw_headers { + debug!("Header found: {:?}", header); + if header.name == CONTENT_TYPE { + let mime = std::str::from_utf8(header.value)?.parse::()?; + if mime.type_() != "application" { + bail!("Wrong encapsulated multipart type"); + } + if mime.subtype() != "octet-stream" { + bail!("Wrong encapsulated multipart sub type"); + } + } + } + } + httparse::Status::Partial => bail!("PartialHeaders"), + } + + // Read interesting data + buf.truncate(0); // start fresh + let (size, found) = reader.stream_until_token(end_boundary.as_bytes(), &mut buf)?; + if !found { + log::error!( + "Could not find end boundary in {} bytes: {:?}", + size, + String::from_utf8_lossy(&buf) + ); + bail!("EofInPart"); + } + + Ok(buf) +} + +pub fn get_multipart_body( + encrypted_payload: &[u8], + cleartext_payload_len: usize, + boundary: &str, +) -> Vec { + let mut body = Vec::with_capacity(4096); + + let middle_boundary = "--".to_owned() + boundary + "\r\n"; + let end_boundary = "--".to_owned() + boundary + "--\r\n"; + + body.extend_from_slice(middle_boundary.as_bytes()); + body.extend_from_slice( + "Content-Type: application/HTTP-Kerberos-session-encrypted\r\n".as_bytes(), + ); + + let mut buffer = itoa::Buffer::new(); + body.extend_from_slice( + ("OriginalContent: type=application/soap+xml;charset=UTF-16;Length=".to_owned() + + buffer.format(cleartext_payload_len) + + "\r\n") + .as_bytes(), + ); + + body.extend_from_slice(middle_boundary.as_bytes()); + body.extend_from_slice("Content-Type: application/octet-stream\r\n".as_bytes()); + body.extend_from_slice(encrypted_payload); + body.extend_from_slice(end_boundary.as_bytes()); + + body +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_multipart() -> Result<()> { + let payload = "this is a very good payload".to_owned(); + let length = payload.len(); + let boundary = "super cool boundary"; + + let body = get_multipart_body(&payload.as_bytes(), length, boundary); + + let received_payload = read_multipart_body(&mut &*body, boundary)?; + assert_eq!(payload.as_bytes(), received_payload); + + Ok(()) + } +} diff --git a/server/src/output.rs b/server/src/output.rs new file mode 100644 index 0000000..e064e42 --- /dev/null +++ b/server/src/output.rs @@ -0,0 +1,47 @@ +use std::sync::Arc; + +use anyhow::Result; +use async_trait::async_trait; +use common::subscription::{FileConfiguration, KafkaConfiguration, SubscriptionOutput}; + +use crate::{event::EventMetadata, formatter::Format}; + +#[derive(Debug, Clone)] +pub enum OutputType { + Files(Format, FileConfiguration, bool), + Kafka(Format, KafkaConfiguration, bool), + Tcp(Format, String, u16, bool), +} + +impl From<&SubscriptionOutput> for OutputType { + fn from(so: &SubscriptionOutput) -> Self { + match so { + SubscriptionOutput::Files(sof, config, enabled) => { + OutputType::Files(sof.into(), config.clone(), *enabled) + } + SubscriptionOutput::Kafka(sof, config, enabled) => { + OutputType::Kafka(sof.into(), config.clone(), *enabled) + } + SubscriptionOutput::Tcp(sof, config, enabled) => OutputType::Tcp( + sof.into(), + config.addr().to_string(), + config.port(), + *enabled, + ), + } + } +} + +// async_trait is required to be able to use async functions +// in traits +#[async_trait] +pub trait Output { + async fn write( + &self, + metadata: Arc, + events: Arc>>, + ) -> Result<()>; + + fn describe(&self) -> String; + fn format(&self) -> &Format; +} diff --git a/server/src/outputs/file.rs b/server/src/outputs/file.rs new file mode 100644 index 0000000..b6b1c68 --- /dev/null +++ b/server/src/outputs/file.rs @@ -0,0 +1,375 @@ +use async_trait::async_trait; +use log::{debug, info, warn}; +use tokio::fs::OpenOptions; +use tokio::sync::{mpsc, oneshot}; + +use crate::event::EventMetadata; +use crate::formatter::Format; +use crate::output::Output; +use common::subscription::FileConfiguration; +use std::collections::HashMap; +use std::sync::Arc; +use std::{path::PathBuf, str::FromStr}; +use tokio::fs::{create_dir_all, File}; +use tokio::io::AsyncWriteExt; +use std::net::{IpAddr}; +use anyhow::{anyhow, Context, Result, bail}; + +#[derive(Debug)] +pub struct WriteFileMessage { + path: PathBuf, + content: String, + resp: oneshot::Sender>, +} + +async fn handle_message( + file_handles: &mut HashMap, + message: &mut WriteFileMessage, +) -> Result<()> { + let parent = message + .path + .parent() + .ok_or_else(|| anyhow!("Failed to retrieve messages parent folder"))?; + let path = &message.path; + let file = match file_handles.get_mut(path) { + Some(file) => { + debug!("File {} is already opened", path.display()); + // The path already exists in file_handles map + file + } + None => { + // Create directory (if it does not already exist) + debug!("Create directory {}", parent.display()); + create_dir_all(parent).await?; + // Open file + debug!("Open file {}", path.display()); + let file = OpenOptions::new() + .create(true) + .append(true) + .open(path) + .await + .with_context(|| format!("Failed to open file {}", path.display()))?; + // Insert it into file_buffers map + file_handles.insert(path.clone(), file); + // Retrieve it + file_handles + .get_mut(path) + .ok_or_else(|| anyhow!("Could not find newly inserted File in file handles"))? + } + }; + file.write_all(message.content.as_bytes()).await?; + Ok(()) +} + +pub async fn run( + mut task_rx: mpsc::Receiver, + mut task_exit_rx: oneshot::Receiver<()>, +) { + info!("File output task started"); + let mut file_handles: HashMap = HashMap::new(); + loop { + tokio::select! { + Some(mut message) = task_rx.recv() => { + let result = handle_message(&mut file_handles, &mut message).await; + if let Err(e) = message + .resp + .send(result) { + warn!("Failed to send File write result because the receiver dropped. Result was: {:?}", e); + } + }, + _ = &mut task_exit_rx => { + break + }, + }; + } + info!("Exiting File output task"); +} + +pub struct OutputFile { + format: Format, + config: FileConfiguration, + task_tx: mpsc::Sender, + task_exit_tx: Option>, +} + +impl OutputFile { + pub fn new(format: Format, config: &FileConfiguration) -> Self { + debug!( + "Initialize file output with format {:?} and config {:?}", + format, config + ); + // Create a communication channel with the task responsible for file management + // TODO: Why 32? + let (task_tx, task_rx) = mpsc::channel(32); + + // Create a oneshot channel to ask to the ask to end itself + let (task_exit_tx, task_exit_rx) = oneshot::channel(); + + // Launch the task responsible for handling file system operations + tokio::spawn(async move { + run(task_rx, task_exit_rx).await; + }); + OutputFile { + format, + config: config.clone(), + task_tx, + task_exit_tx: Some(task_exit_tx), + } + } + + fn build_path(&self, ip: &IpAddr, principal: &str, node_name: Option<&String>) -> Result { + let mut path: PathBuf = PathBuf::from_str(self.config.base())?; + + match self.config.split_on_addr_index() { + Some(index) => { + match ip { + IpAddr::V4(ipv4) => { + // Sanitize index + let index = if index < 1 { + warn!("File configuration split_on_addr_index can not be inferior as 1: found {}", index); + 1 + } else if index > 4 { + warn!("File configuration split_on_addr_index can not be superior as 4 for IPv4: found {}", index); + 4 + } else { + index + }; + + // Split on "." + // a.b.c.d + // 1 => a/a.b/a.b.c/a.b.c.d + // 2 => a.b/a.b.c/a.b.c.d + // 3 => a.b.c/a.b.c.d + // 4 => a.b.c.d + let octets = ipv4.octets(); + for i in index..5 { + let mut fname = String::new(); + for j in 0..i { + fname.push_str(format!("{}", octets.get(j as usize).ok_or_else(|| anyhow!("Could not get segment {} of ipv4 addr {:?}", j, ipv4))?).as_ref()); + // There is probably a better way to write this + if j != i-1 { + fname.push('.'); + } + } + path.push(&fname); + } + }, + IpAddr::V6(ipv6) => { + // Sanitize index + let index = if index < 1 { + warn!("File configuration split_on_addr_index can not be inferior as 1: found {}", index); + 1 + } else if index > 8 { + warn!("File configuration split_on_addr_index can not be superior as 8 for IPv6: found {}", index); + 8 + } else { + index + }; + + // Split on ":" + // a:b:c:d:e:f:g:h + // 1 => a/a:b/a:b:c/... + // 2 => a:b/a:b:c/... + // 3 => a:b:c/a:b:c:d/... + // 4 => a:b:c:d/... + // 5 => a:b:c:d:e/... + // 6 => a:b:c:d:e:f/... + // 7 => a:b:c:d:e:f:g/ + // 8 => a:b:c:d:e:f:g:h + let segments = ipv6.segments(); + for i in index..9 { + let mut fname = String::new(); + for j in 0..i { + fname.push_str(format!("{:x}", segments.get(j as usize).ok_or_else(|| anyhow!("Could not get segment {} of ipv6 addr {:?}", j, ipv6))?).as_ref()); + // There is probably a better way to write this + if j != i-1 { + fname.push(':'); + } + } + path.push(&fname); + } + } + } + + }, + None => { + path.push(ip.to_string()); + } + } + + path.push(sanitize_name(principal)); + + if self.config.append_node_name() { + match node_name { + Some(name) => path.push(name), + None => bail!("Could not append node name to path because it is unset"), + } + } + + path.push(self.config.filename()); + Ok(path) + } +} + +#[async_trait] +impl Output for OutputFile { + async fn write( + &self, + metadata: Arc, + events: Arc>>, + ) -> Result<()> { + // Build path + let path = self.build_path(&metadata.addr().ip(), metadata.principal(), metadata.node_name())?; + debug!("Computed path is {}", path.display()); + + // Build the "content" string to write + let mut content = String::new(); + for event in events.iter() { + content.push_str(event); + content.push('\n'); + } + + // Create a oneshot channel to retrieve the result of the operation + let (tx, rx) = oneshot::channel(); + self.task_tx + .send(WriteFileMessage { + path, + content, + resp: tx, + }) + .await?; + + // Wait for the result + rx.await??; + + Ok(()) + } + + fn describe(&self) -> String { + format!("Files ({:?})", self.config) + } + + fn format(&self) -> &Format { + &self.format + } +} + +impl Drop for OutputFile { + fn drop(&mut self) { + if let Some(sender) = self.task_exit_tx.take() { + // Using `let _ =` to ignore send errors. + let _ = sender.send(()); + } + } +} + +fn sanitize_name(name: &str) -> String { + // We only allow strings containing at most 255 chars within [a-z][A-Z][0-9][.-_@] + let mut new_str = String::with_capacity(name.len()); + + let mut count: usize = 0; + for ch in name.chars() { + if count >= 255 { + warn!( + "The string is too long. Keeping only 255 first chars: \"{}\"", + new_str + ); + break; + } + + if ch.is_ascii_alphanumeric() || ch == '.' || ch == '-' || ch == '_' || ch == '@' { + new_str.push(ch); + count += 1; + } else if ch == '$' { + // Discard silently '$' + // TODO: Are we sure that we want to do this? + // The idea is to remove '$' from principals because it + // may cause bugs with badly written shell scripts. + } else { + warn!( + "An invalid char '{}' in \"{}\" has been removed", + ch, new_str + ); + } + } + + new_str +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_sanitize() { + assert_eq!(sanitize_name("test"), "test"); + assert_eq!(sanitize_name("wec.windomain.local"), "wec.windomain.local"); + assert_eq!(sanitize_name("/bad/dir"), "baddir"); + assert_eq!( + sanitize_name("AcceptLettersANDN3293Mbers"), + "AcceptLettersANDN3293Mbers" + ); + assert_eq!( + sanitize_name("test_underscore-and-hyphen"), + "test_underscore-and-hyphen" + ); + let too_long = sanitize_name("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); + assert_eq!(too_long.len(), 255); + assert_eq!(sanitize_name("DC$@WINDOMAIN.LOCAL"), "DC@WINDOMAIN.LOCAL"); + } + + #[tokio::test] + async fn test_build_path() -> Result<()> { + let config = FileConfiguration::new("/base".to_string(), None, false, "messages".to_string()); + let ip: IpAddr = "127.0.0.1".parse()?; + + let output_file = OutputFile::new(Format::Json, &config); + + assert_eq!(output_file.build_path(&ip, "princ", None)?,PathBuf::from_str("/base/127.0.0.1/princ/messages")?); + + let config = FileConfiguration::new("/base".to_string(), None, true, "messages".to_string()); + let output_file = OutputFile::new(Format::Json, &config); + + assert_eq!(output_file.build_path(&ip, "princ", Some(&"node".to_string()))?,PathBuf::from_str("/base/127.0.0.1/princ/node/messages")?); + + let config = FileConfiguration::new("/base".to_string(), Some(1), true, "messages".to_string()); + let output_file = OutputFile::new(Format::Json, &config); + + assert_eq!(output_file.build_path(&ip, "princ", Some(&"node".to_string()))?,PathBuf::from_str("/base/127/127.0/127.0.0/127.0.0.1/princ/node/messages")?); + + let config = FileConfiguration::new("/base".to_string(), Some(2), false, "other".to_string()); + let output_file = OutputFile::new(Format::Json, &config); + + assert_eq!(output_file.build_path(&ip, "princ", Some(&"node".to_string()))?,PathBuf::from_str("/base/127.0/127.0.0/127.0.0.1/princ/other")?); + + let config = FileConfiguration::new("/base".to_string(), Some(3), false, "messages".to_string()); + let output_file = OutputFile::new(Format::Json, &config); + + assert_eq!(output_file.build_path(&ip, "princ", Some(&"node".to_string()))?,PathBuf::from_str("/base/127.0.0/127.0.0.1/princ/messages")?); + + let config = FileConfiguration::new("/base".to_string(), Some(4), false, "messages".to_string()); + let output_file = OutputFile::new(Format::Json, &config); + + assert_eq!(output_file.build_path(&ip, "princ", Some(&"node".to_string()))?,PathBuf::from_str("/base/127.0.0.1/princ/messages")?); + + let config = FileConfiguration::new("/base".to_string(), Some(5), false, "messages".to_string()); + let output_file = OutputFile::new(Format::Json, &config); + + assert_eq!(output_file.build_path(&ip, "princ", Some(&"node".to_string()))?,PathBuf::from_str("/base/127.0.0.1/princ/messages")?); + + let config = FileConfiguration::new("/base".to_string(), None, true, "messages".to_string()); + let output_file = OutputFile::new(Format::Json, &config); + + assert!(output_file.build_path(&ip, "princ", None).is_err()); + + let ip: IpAddr = "1:2:3:4:5:6:7:8".parse()?; + let config = FileConfiguration::new("/base".to_string(), None, false, "messages".to_string()); + let output_file = OutputFile::new(Format::Json, &config); + assert_eq!(output_file.build_path(&ip, "princ", Some(&"node".to_string()))?,PathBuf::from_str("/base/1:2:3:4:5:6:7:8/princ/messages")?); + + let config = FileConfiguration::new("/base".to_string(), Some(3), false, "messages".to_string()); + let output_file = OutputFile::new(Format::Json, &config); + assert_eq!(output_file.build_path(&ip, "princ", Some(&"node".to_string()))?,PathBuf::from_str("/base/1:2:3/1:2:3:4/1:2:3:4:5/1:2:3:4:5:6/1:2:3:4:5:6:7/1:2:3:4:5:6:7:8/princ/messages")?); + Ok(()) + } +} diff --git a/server/src/outputs/kafka.rs b/server/src/outputs/kafka.rs new file mode 100644 index 0000000..2cc4683 --- /dev/null +++ b/server/src/outputs/kafka.rs @@ -0,0 +1,78 @@ +use anyhow::{bail, Result}; +use async_trait::async_trait; +use common::subscription::KafkaConfiguration; +use futures::future::join_all; +use log::debug; +use rdkafka::{ + producer::{FutureProducer, FutureRecord}, + util::Timeout, + ClientConfig, +}; +use std::{sync::Arc, time::Duration}; + +use crate::{event::EventMetadata, formatter::Format, output::Output}; + +pub struct OutputKafka { + format: Format, + config: KafkaConfiguration, + producer: FutureProducer, +} + +impl OutputKafka { + pub fn new(format: Format, config: &KafkaConfiguration) -> Result { + let mut client_config = ClientConfig::new(); + // Set a default value for Kafka delivery timeout + // This can be overwritten in Kafka configuration + client_config.set("delivery.timeout.ms", "30000"); + for (key, value) in config.options() { + client_config.set(key, value); + } + debug!( + "Initialize kafka output with format {:?} and config {:?}", + format, config + ); + Ok(OutputKafka { + format, + config: config.clone(), + producer: client_config.create()?, + }) + } +} + +#[async_trait] +impl Output for OutputKafka { + async fn write( + &self, + _metadata: Arc, + events: Arc>>, + ) -> Result<()> { + let mut futures = Vec::new(); + for event in events.iter() { + // We need to explicitly assign the Key type as () + futures.push(self.producer.send::<(), _, _>( + FutureRecord::to(self.config.topic()).payload(event.as_ref()), + Timeout::After(Duration::from_secs(30)), + )); + } + + // Wait for all events to be sent and ack + let results = join_all(futures).await; + + for result in results { + match result { + Ok(delivery) => debug!("Kafka message sent: {:?}", delivery), + Err((e, _)) => bail!(e), + } + } + + Ok(()) + } + + fn describe(&self) -> String { + format!("Kafka (topic {})", self.config.topic()) + } + + fn format(&self) -> &Format { + &self.format + } +} diff --git a/server/src/outputs/mod.rs b/server/src/outputs/mod.rs new file mode 100644 index 0000000..d8511b1 --- /dev/null +++ b/server/src/outputs/mod.rs @@ -0,0 +1,3 @@ +pub mod file; +pub mod kafka; +pub mod tcp; diff --git a/server/src/outputs/tcp.rs b/server/src/outputs/tcp.rs new file mode 100644 index 0000000..b70358e --- /dev/null +++ b/server/src/outputs/tcp.rs @@ -0,0 +1,162 @@ +use std::sync::Arc; + +use crate::{event::EventMetadata, formatter::Format, output::Output}; +use anyhow::{anyhow, Result}; +use async_trait::async_trait; +use common::subscription::TcpConfiguration; +use log::{debug, info, warn}; +use tokio::{ + net::TcpStream, + sync::{mpsc, oneshot}, +}; + +use tokio::io::AsyncWriteExt; + +#[derive(Debug)] +pub struct WriteTCPMessage { + content: String, + resp: oneshot::Sender>, +} + +fn send_response(sender: oneshot::Sender>, msg: Result<()>) { + if let Err(e) = sender.send(msg) { + warn!( + "Failed to send TCP write result because the receiver dropped. Result was: {:?}", + e + ); + } +} + +pub async fn run( + addr: String, + port: u16, + mut task_rx: mpsc::Receiver, + mut task_exit_rx: oneshot::Receiver<()>, +) { + let mut stream_opt: Option = None; + loop { + tokio::select! { + Some(message) = task_rx.recv() => { + // Establish TCP connection if not already done + if stream_opt.is_none() { + match TcpStream::connect((addr.as_str(), port)).await { + Ok(stream) => { + stream_opt = Some(stream); + }, + Err(e) => { + warn!("Failed to connect to {}:{}: {}", addr, port, e); + send_response(message.resp, Err(anyhow!(format!("Failed to connect to {}:{}: {}", addr, port, e)))); + continue; + } + }; + } + // This should never fail + let stream = match stream_opt.as_mut() { + Some(stream) => stream, + None => { + warn!("TCP stream is unset !"); + send_response(message.resp, Err(anyhow!(format!("TCP stream of {}:{} is unset!", addr, port)))); + continue; + } + }; + + // Write data to stream + if let Err(e) = stream.write_all(message.content.as_bytes()).await { + stream_opt = None; + send_response(message.resp, Err(anyhow!(format!("Failed to write in TCP connection ({}:{}): {}", addr, port, e)))); + continue; + } + + send_response(message.resp, Ok(())); + }, + _ = &mut task_exit_rx => { + break + }, + }; + } + info!("Exiting TCP output task ({}:{})", addr, port); +} + +pub struct OutputTcp { + format: Format, + addr: String, + port: u16, + task_tx: mpsc::Sender, + task_exit_tx: Option>, +} + +impl OutputTcp { + pub fn new(format: Format, config: &TcpConfiguration) -> Result { + debug!( + "Initialize TCP output with format {:?} and peer {}:{}", + format, + config.addr(), + config.port() + ); + + // Create a communication channel with the task responsible for file management + // TODO: Why 32? + let (task_tx, task_rx) = mpsc::channel(32); + + // Create a oneshot channel to ask to the ask to end itself + let (task_exit_tx, task_exit_rx) = oneshot::channel(); + + let addr = config.addr().to_string(); + let port = config.port(); + + // Launch the task responsible for handling the TCP connection + tokio::spawn(async move { run(addr, port, task_rx, task_exit_rx).await }); + + Ok(OutputTcp { + format, + addr: config.addr().to_string(), + port: config.port(), + task_tx, + task_exit_tx: Some(task_exit_tx), + }) + } +} + +#[async_trait] +impl Output for OutputTcp { + async fn write( + &self, + _metadata: Arc, + events: Arc>>, + ) -> Result<()> { + // Build the "content" string to write + let mut content = String::new(); + for event in events.iter() { + content.push_str(event); + content.push('\n'); + } + + // Create a oneshot channel to retrieve the result of the operation + let (tx, rx) = oneshot::channel(); + self.task_tx + .send(WriteTCPMessage { content, resp: tx }) + .await?; + + // Wait for the result + rx.await??; + + Ok(()) + } + + fn describe(&self) -> String { + format!("TCP ({}:{})", self.addr, self.port) + } + + fn format(&self) -> &Format { + &self.format + } +} + +impl Drop for OutputTcp { + fn drop(&mut self) { + if let Some(sender) = self.task_exit_tx.take() { + // Using `let _ =` to ignore send errors. + let _ = sender.send(()); + } + } +} diff --git a/server/src/sldc.rs b/server/src/sldc.rs new file mode 100644 index 0000000..d32f737 --- /dev/null +++ b/server/src/sldc.rs @@ -0,0 +1,160 @@ +use anyhow::{bail, Context, Result}; +use bitreader::BitReader; +use log::debug; + +const CTRLSYMB_FLUSH: u16 = 0b1111111110000; +const CTRLSYMB_SCHEME_1: u16 = 0b1111111110001; +const CTRLSYMB_SCHEME_2: u16 = 0b1111111110010; +const CTRLSYMB_FILE_MARK: u16 = 0b1111111110011; +const CTRLSYMB_END_OF_RECORD: u16 = 0b1111111110100; +const CTRLSYMB_RESET_1: u16 = 0b1111111110101; +const CTRLSYMB_RESET_2: u16 = 0b1111111110110; +const CTRLSYMB_END_MARKER: u16 = 0b1111111111111; + +pub fn decompress(compressed_bytes: &Vec) -> Result> { + // Implemented according to ECMA-321 + debug!( + "Try to decompress SLDC data ({} compressed bytes)", + compressed_bytes.len() + ); + + let mut reader = BitReader::new(compressed_bytes); + let mut res: Vec = Vec::new(); + + let mut scheme_1 = false; + let mut scheme_2 = false; + let mut history_buffer: Vec = vec![0; 1024]; + let mut history_index: usize = 0; + while reader.remaining() > 0 { + // Try to find a control symbol + if reader.peek_u16(9)? == 0x1FF { + // 0b111111111 + let symb = reader.read_u16(13)?; + match symb { + CTRLSYMB_FLUSH => (), + CTRLSYMB_SCHEME_1 => scheme_1 = true, + CTRLSYMB_SCHEME_2 => scheme_2 = true, + CTRLSYMB_FILE_MARK => (), + CTRLSYMB_END_OF_RECORD => { + debug!( + "SLDC decompression succeed ({} uncompressed bytes)", + res.len() + ); + return Ok(res); + } + CTRLSYMB_RESET_1 => { + scheme_1 = true; + history_buffer = vec![0; 1024]; + } + CTRLSYMB_RESET_2 => { + scheme_2 = true; + history_buffer = vec![0; 1024]; + } + CTRLSYMB_END_MARKER => (), + _ => bail!("Found invalid control symbol"), + } + } else if scheme_1 { + let is_literal = !reader.read_bool()?; + if is_literal { + // This is a Literal 1 Data Symbol + // The next 8 bits represent the data byte + let c = reader.read_u8(8)?; + // Store c in result buffer + res.push(c); + // Store c in rotating history buffer + history_buffer[history_index] = c; + history_index = (history_index + 1) % 1024; + } else { + // This a Copy Pointer Data Symbol + // We need to read the MCF (Match Count Field) in order + // to find the size of the Match String. + let next_4_bits = reader.peek_u16(4)?; + let size: u16 = if (next_4_bits & 0b1000) == 0 { + reader.skip(2)?; + // If the next bit is 0, then the MCF is `0b00` or `0b01` + // and the size is respectively `2` or `3` + 2 + ((next_4_bits & 0b0100) >> 2) + } else if (next_4_bits & 0b0100) == 0 { + // If the next first bits are 10, then the MCF is `0b10xy` + reader.skip(4)?; + 4 + (next_4_bits & 0b0011) + } else if (next_4_bits & 0b0010) == 0 { + // If the next first bits are 110, then the MCF is `0b110xyz` + reader.skip(3)?; + 8 + reader.read_u16(3)? + } else if (next_4_bits & 0b0001) == 0 { + // If the next first bits are 1110, then the MCF is `0b1110xyza` + reader.skip(4)?; + 16 + reader.read_u16(4)? + } else if (next_4_bits & 0b0001) == 1 { + // Check for reserved and control symbols + reader.skip(4)?; + let next = reader.read_u16(8)?; + if next & 0b11110000 == 0b11110000 { + bail!("Found a Reserved or a Control Symbol instead of a Copy Pointer Data Symbol"); + } + // the MCF is `0b1111 abcdefgh` + 32 + next + } else { + bail!("Found invalid Match Count Field value"); + }; + + let displacement_field = reader.read_u16(10)?; + + for k in displacement_field..displacement_field + size { + // Find c in history_buffer + let c = *history_buffer + .get((k % 1024) as usize) + .context("Index not found in history buffer")?; + // Store c in result buffer + res.push(c); + // Store c in rotating history buffer + history_buffer[history_index] = c; + history_index = (history_index + 1) % 1024; + } + } + } else if scheme_2 { + let c = reader.read_u8(8)?; + if c == 0xff { + reader.skip(1)?; + } + // Store c in result buffer + res.push(c); + // Store c in rotating history buffer + history_buffer[history_index] = c; + history_index = (history_index + 1) % 1024; + } else { + bail!("Could not uncompress data"); + } + } + + bail!("Missing END_OF_RECORD control symbol !"); +} + +// We only need to decompress data. Therefore, compress function +// is not to be implemented for now. +// pub fn compress(bytes: &Vec) -> Vec {} + +#[cfg(test)] +mod tests { + use super::*; + use hex::FromHex; + + #[test] + fn decompress_test_string() -> Result<()> { + let test_string_compressed = Vec::from_hex("ffb3a32b9ba1039ba3934b733ffd0000")?; + let test_string = decompress(&test_string_compressed)?; + + assert_eq!(test_string, "test string".as_bytes()); + Ok(()) + } + + #[test] + fn decompress_heartbeat() -> Result<()> { + let heartbeat_compressed = Vec::from_hex("ffabfdfc3c001cc003a00114006e001d80065001b0006f001c281a20001e0006da079bb2019cc003d000880068001d00074a098e8002f000bc0077001de8722ea1e8ce87e6f001c80067a1b8c80030000c28862fa298d68ae73a239868622da0af602845dc1730f8c2739800c748f32d1fcc349d176c7de03072118ef4b3e0270d322b8e29a661001900064a5f99698e73001a690667f08f265f9891665c8da6ea84f98ea3bfc0d01325634e80033754c4c5962f49d618e3756bc63d5afc24473acaf8162387c0d7336d56cc75a137e5293357b45d60b37edadf863543e00680224589f06e196c1b8948c36572a58747de17a4e5553c0818570c99874c39a2c5d91c4c76953659ec6a001cd53702e748b8e6003ad8970fc1cad2e70cab26fcbba2f001080036b6d912dbe42cd522d0011adca30000dedd634a03916dce2db568d2c7246b7f916dde450010edd244b6e8dee2244b8ae14818d99f06d709b0796b71d0009b72bc3f58a60ca0024dc7fb3ac6df9cbcb8eab968069dba3ad60eb7cb3a000ab9a3713163d4c75864ceba66cd4673c8ee3edd1231b78fc0288c651e6ce384cc52a1d9c284a79e397e04620f5377e0de51d290675dfeff2c41b12f721934ae0bf3c29b29e783a8dd9b4cf34bf37e475e1947618302c6920f409463e367fcc06e32569ce97efe8667c0a2617ee593956cf0e2186bdfc91ee4ab614c3bec2d3c5c3cdd68532e41d385884a76073d58a7e24b00d6bafa3706197e7e0509705309bb4b4c35bde01e1d6cbb8f10759be487937c8d7191a5c24836691bdabf17b10c0033b67e2ce22db1f17f10adb635b76e2e81a5b746b713215b8f83f5e4a83508133883f88cb45f0eae738940cf9954cb74116d904a7f15f1c41707c64186608997d4c3f4dc9ca7bc3f504126fd3f12f2ba56439a738c73778ced8646cdd645b68e2e01a51bc83469c0d041a2ee07a1b5194859b648945b20d24f03a10b382f9804e00013f3459cb3caf02c089f1f0648b9b3f15f0d690e36a428ca91e43a4be0721cd45c619b846f527c2498bb88b9b4f16e6120c0d832e24ff98c2e8001c732e9baf3f8c6be2c0c0c7a93fe06238641e2598a95fd36d7d4df5db3a57a4a16031b54dc5db9aca7c671f813e43cb228eb2f2e1dec47066b2fe232e1d41bb2cc9972dc9a2cba74cba22fb139cebc268b3b9b6c6a73b0dea6c39d91cc3683175a8ce57b7c45073b588cb30f8d2d6e30a2c8bed2276e32ce3b6c39032ae58e762ee2e616dba48d6eb22652fc12037ca7236b7c8d2e0246b8290ae1e39ca3a37ca578c7b1137a252e0f5106d1a6b0014b8baf86c0cb72fd7fd8a287400245d74c3b6a71b2636a215aacc8003cf1d048bc6cb39ef63f371de0dfb5dc116f5b8ea8d97bfc4603e007fd0000e20b0000")?; + let heartbeat = Vec::from_hex("fffe3c0073003a0045006e00760065006c006f0070006500200078006d006c006e0073003a0073003d00220068007400740070003a002f002f007700770077002e00770033002e006f00720067002f0032003000300033002f00300035002f0073006f00610070002d0065006e00760065006c006f00700065002200200078006d006c006e0073003a0061003d00220068007400740070003a002f002f0073006300680065006d00610073002e0078006d006c0073006f00610070002e006f00720067002f00770073002f0032003000300034002f00300038002f00610064006400720065007300730069006e0067002200200078006d006c006e0073003a0065003d00220068007400740070003a002f002f0073006300680065006d00610073002e0078006d006c0073006f00610070002e006f00720067002f00770073002f0032003000300034002f00300038002f006500760065006e00740069006e0067002200200078006d006c006e0073003a0077003d00220068007400740070003a002f002f0073006300680065006d00610073002e0064006d00740066002e006f00720067002f007700620065006d002f00770073006d0061006e002f0031002f00770073006d0061006e002e007800730064002200200078006d006c006e0073003a0070003d00220068007400740070003a002f002f0073006300680065006d00610073002e006d006900630072006f0073006f00660074002e0063006f006d002f007700620065006d002f00770073006d0061006e002f0031002f00770073006d0061006e002e0078007300640022003e003c0073003a004800650061006400650072003e003c0061003a0054006f003e0068007400740070003a002f002f007300720076002e00770069006e0064006f006d00610069006e002e006c006f00630061006c003a0035003900380035002f00770073006d0061006e002f0073007500620073006300720069007000740069006f006e0073002f00420036004200440042004200350039002d0046004200300037002d0034004500450035002d0038003400310046002d004500420045004300390044003600370043004400440034002f0031003c002f0061003a0054006f003e003c006d003a004d0061006300680069006e00650049004400200078006d006c006e0073003a006d003d00220068007400740070003a002f002f0073006300680065006d00610073002e006d006900630072006f0073006f00660074002e0063006f006d002f007700620065006d002f00770073006d0061006e002f0031002f006d0061006300680069006e006500690064002200200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200660061006c007300650022003e00770069006e00310030002e00770069006e0064006f006d00610069006e002e006c006f00630061006c003c002f006d003a004d0061006300680069006e006500490044003e003c0061003a005200650070006c00790054006f003e003c0061003a004100640064007200650073007300200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200740072007500650022003e0068007400740070003a002f002f0073006300680065006d00610073002e0078006d006c0073006f00610070002e006f00720067002f00770073002f0032003000300034002f00300038002f00610064006400720065007300730069006e0067002f0072006f006c0065002f0061006e006f006e0079006d006f00750073003c002f0061003a0041006400640072006500730073003e003c002f0061003a005200650070006c00790054006f003e003c0061003a0041006300740069006f006e00200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200740072007500650022003e0068007400740070003a002f002f0073006300680065006d00610073002e0064006d00740066002e006f00720067002f007700620065006d002f00770073006d0061006e002f0031002f00770073006d0061006e002f004800650061007200740062006500610074003c002f0061003a0041006300740069006f006e003e003c0077003a004d006100780045006e00760065006c006f0070006500530069007a006500200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200740072007500650022003e003500310032003000300030003c002f0077003a004d006100780045006e00760065006c006f0070006500530069007a0065003e003c0061003a004d00650073007300610067006500490044003e0075007500690064003a00450045004300300034004600370034002d0041003200370044002d0034004300330041002d0041004500460035002d004200430035004200460035003400330035003900420041003c002f0061003a004d00650073007300610067006500490044003e003c0077003a004c006f00630061006c006500200078006d006c003a006c0061006e0067003d00220065006e002d00550053002200200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200660061006c0073006500220020002f003e003c0070003a0044006100740061004c006f00630061006c006500200078006d006c003a006c0061006e0067003d00220065006e002d00550053002200200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200660061006c0073006500220020002f003e003c0070003a00530065007300730069006f006e0049006400200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200660061006c007300650022003e0075007500690064003a00390038003100430035003300300046002d0042004500320041002d0034004100410042002d0042004100430042002d003600460042003400430044003100410031003400410042003c002f0070003a00530065007300730069006f006e00490064003e003c0070003a004f007000650072006100740069006f006e0049004400200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200660061006c007300650022003e0075007500690064003a00450041003200450045003500360036002d0032004300430031002d0034003900410030002d0041003700320036002d004200430045003700440043003300350036004500320032003c002f0070003a004f007000650072006100740069006f006e00490044003e003c0070003a00530065007100750065006e006300650049006400200073003a006d0075007300740055006e006400650072007300740061006e0064003d002200660061006c007300650022003e0031003c002f0070003a00530065007100750065006e0063006500490064003e003c0077003a004f007000650072006100740069006f006e00540069006d0065006f00750074003e0050005400360030002e0030003000300053003c002f0077003a004f007000650072006100740069006f006e00540069006d0065006f00750074003e003c0065003a004900640065006e00740069006600690065007200200078006d006c006e0073003a0065003d00220068007400740070003a002f002f0073006300680065006d00610073002e0078006d006c0073006f00610070002e006f00720067002f00770073002f0032003000300034002f00300038002f006500760065006e00740069006e00670022003e00320031003900430035003300350033002d0035004600330044002d0034004300440037002d0041003600340034002d004600360042003600390045003500370043003100430031003c002f0065003a004900640065006e007400690066006900650072003e003c0077003a00410063006b005200650071007500650073007400650064002f003e003c002f0073003a004800650061006400650072003e003c0073003a0042006f00640079003e003c0077003a004500760065006e00740073003e003c002f0077003a004500760065006e00740073003e003c002f0073003a0042006f00640079003e003c002f0073003a0045006e00760065006c006f00700065003e00")?; + + assert_eq!(decompress(&heartbeat_compressed)?, heartbeat); + Ok(()) + } +} diff --git a/server/src/soap.rs b/server/src/soap.rs new file mode 100644 index 0000000..2d6ab87 --- /dev/null +++ b/server/src/soap.rs @@ -0,0 +1,781 @@ +use anyhow::{anyhow, bail, ensure, Context, Result}; +use common::utils::new_uuid; +use log::{debug, trace}; +use quick_xml::events::{BytesText, Event}; +use quick_xml::reader::Reader; +use quick_xml::writer::Writer; +use roxmltree::{Document, Node}; +use std::collections::HashMap; +use std::sync::Arc; +use xmlparser::XmlCharExt; + +const SOAP_ENVELOPE_NS: &str = "http://www.w3.org/2003/05/soap-envelope"; +const MACHINE_ID_NS: &str = "http://schemas.microsoft.com/wbem/wsman/1/machineid"; +const ADDRESSING_NS: &str = "http://schemas.xmlsoap.org/ws/2004/08/addressing"; +const WSMAN_NS: &str = "http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd"; +const MS_WSMAN_NS: &str = "http://schemas.microsoft.com/wbem/wsman/1/wsman.xsd"; +const SUBSCRIPTION_NS: &str = "http://schemas.microsoft.com/wbem/wsman/1/subscription"; +const EVENTING_NS: &str = "http://schemas.xmlsoap.org/ws/2004/08/eventing"; +const ENUMERATION_NS: &str = "http://schemas.xmlsoap.org/ws/2004/09/enumeration"; +const POLICY_NS: &str = "http://schemas.xmlsoap.org/ws/2002/12/policy"; +const AUTH_NS: &str = "http://schemas.microsoft.com/wbem/wsman/1/authentication"; +const XSI_NS: &str = "http://www.w3.org/2001/XMLSchema-instance"; + +pub const ANONYMOUS: &str = "http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous"; +pub const RESOURCE_EVENT_LOG: &str = "http://schemas.microsoft.com/wbem/wsman/1/windows/EventLog"; +pub const SPNEGO_KERBEROS: &str = + "http://schemas.dmtf.org/wbem/wsman/1/wsman/secprofile/http/spnego-kerberos"; +pub const EVENT_QUERY: &str = "http://schemas.microsoft.com/win/2004/08/events/eventquery"; + +pub const ACTION_EVENTS: &str = "http://schemas.dmtf.org/wbem/wsman/1/wsman/Events"; +pub const ACTION_SUBSCRIBE: &str = "http://schemas.xmlsoap.org/ws/2004/08/eventing/Subscribe"; +pub const ACTION_ENUMERATE: &str = "http://schemas.xmlsoap.org/ws/2004/09/enumeration/Enumerate"; +pub const ACTION_ENUMERATE_RESPONSE: &str = + "http://schemas.xmlsoap.org/ws/2004/09/enumeration/EnumerateResponse"; +pub const ACTION_END: &str = "http://schemas.microsoft.com/wbem/wsman/1/wsman/End"; +pub const ACTION_SUBSCRIPTION_END: &str = + "http://schemas.xmlsoap.org/ws/2004/08/eventing/SubscriptionEnd"; +pub const ACTION_HEARTBEAT: &str = "http://schemas.dmtf.org/wbem/wsman/1/wsman/Heartbeat"; +pub const ACTION_ACK: &str = "http://schemas.dmtf.org/wbem/wsman/1/wsman/Ack"; + +pub trait Serializable { + fn serialize(&self, writer: &mut Writer) -> quick_xml::Result<()>; +} + +#[derive(Debug)] +pub struct Subscription { + pub identifier: String, + pub header: Header, + pub body: SubscriptionBody, +} + +impl Serializable for Subscription { + fn serialize(&self, writer: &mut Writer) -> quick_xml::Result<()> { + writer + .create_element("m:Subscription") + .with_attribute(("xmlns:m", SUBSCRIPTION_NS)) + .write_inner_content(|writer| { + writer + .create_element("m:Version") + .write_text_content(BytesText::new( + format!("uuid:{}", self.identifier).as_str(), + ))?; + writer + .create_element("s:Envelope") + .with_attribute(("xmlns:s", SOAP_ENVELOPE_NS)) + .with_attribute(("xmlns:a", ADDRESSING_NS)) + .with_attribute(("xmlns:e", EVENTING_NS)) + .with_attribute(("xmlns:n", ENUMERATION_NS)) + .with_attribute(("xmlns:w", WSMAN_NS)) + .with_attribute(("xmlns:p", MS_WSMAN_NS)) + .write_inner_content(|writer| { + self.header.serialize(writer)?; + self.body.serialize(writer)?; + Ok(()) + })?; + Ok(()) + })?; + Ok(()) + } +} + +#[derive(Debug)] +pub struct SubscriptionBody { + // Heartbeats interval in seconds + pub heartbeat_interval: u64, + pub identifier: String, + pub bookmark: Option, + pub query: String, + pub address: String, + pub connection_retry_interval: u32, + pub connection_retry_count: u16, + pub max_time: u32, + pub max_envelope_size: u32, +} + +impl Serializable for SubscriptionBody { + fn serialize(&self, writer: &mut Writer) -> quick_xml::Result<()> { + writer + .create_element("s:Body") + .write_inner_content(|writer| { + writer + .create_element("e:Subscribe") + .write_inner_content(|writer| { + writer + .create_element("e:EndTo") + .write_inner_content(|writer| { + writer + .create_element("a:Address") + .write_text_content(BytesText::new(&self.address))?; + writer + .create_element("a:ReferenceProperties") + .write_inner_content(|writer| { + writer + .create_element("e:Identifier") + .write_text_content(BytesText::new(&self.identifier))?; + Ok(()) + })?; + Ok(()) + })?; + writer + .create_element("e:Delivery") + .with_attribute(("Mode", ACTION_EVENTS)) + .write_inner_content(|writer| { + writer.create_element("w:Heartbeats").write_text_content( + BytesText::new( + format!("PT{}.000S", &self.heartbeat_interval).as_str(), + ), + )?; + writer.create_element("e:NotifyTo").write_inner_content( + |writer| { + writer + .create_element("a:Address") + .write_text_content(BytesText::new(&self.address))?; + writer + .create_element("a:ReferenceProperties") + .write_inner_content(|writer| { + writer + .create_element("e:Identifier") + .write_text_content(BytesText::new( + &self.identifier, + ))?; + Ok(()) + })?; + writer + .create_element("c:Policy") + .with_attribute(("xmlns:c", POLICY_NS)) + .with_attribute(("xmlns:auth", AUTH_NS)) + .write_inner_content(|writer| { + writer + .create_element("c:ExactlyOne") + .write_inner_content(|writer| { + writer + .create_element("c:All") + .write_inner_content(|writer| { + writer + .create_element( + "auth:Authentication", + ) + .with_attribute(( + "Profile", + SPNEGO_KERBEROS, + )) + .write_empty()?; + Ok(()) + })?; + Ok(()) + })?; + Ok(()) + })?; + Ok(()) + }, + )?; + writer + .create_element("w:ConnectionRetry") + .with_attribute(( + "Total", + format!("{}", self.connection_retry_count).as_str(), + )) + .write_text_content(BytesText::new( + format!("PT{}.0S", self.connection_retry_interval).as_str(), + ))?; + writer.create_element("w:MaxTime").write_text_content( + BytesText::new(format!("PT{}.000S", self.max_time).as_str()), + )?; + writer + .create_element("w:MaxEnvelopeSize") + .with_attribute(("Policy", "Notify")) + .write_text_content(BytesText::new( + format!("{}", self.max_envelope_size).as_str(), + ))?; + writer + .create_element("w:Locale") + .with_attribute(("xml:lang", "en-US")) + .with_attribute(("s:mustUnderstand", "false")) + .write_empty()?; + writer + .create_element("p:DataLocale") + .with_attribute(("xml:lang", "en-US")) + .with_attribute(("s:mustUnderstand", "false")) + .write_empty()?; + writer + .create_element("w:ContentEncoding") + .write_text_content(BytesText::new("UTF-16"))?; + Ok(()) + })?; + writer + .create_element("w:Filter") + .with_attribute(("Dialect", EVENT_QUERY)) + .write_inner_content(|writer| { + // Copy filter from "query" field + let mut reader = Reader::from_str(&self.query); + reader.trim_text(true); + loop { + match reader.read_event() { + Ok(Event::Eof) => break, + Ok(e) => writer.write_event(e)?, + _ => (), + }; + } + Ok(()) + })?; + if let Some(bookmark) = &self.bookmark { + writer + .create_element("w:Bookmark") + .write_inner_content(|writer| { + let mut reader = Reader::from_str(bookmark); + reader.trim_text(true); + loop { + match reader.read_event() { + Ok(Event::Eof) => break, + Ok(e) => writer.write_event(e)?, + _ => (), + }; + } + Ok(()) + })?; + } + writer.create_element("w:SendBookmarks").write_empty()?; + Ok(()) + })?; + Ok(()) + })?; + Ok(()) + } +} + +#[derive(Debug)] +pub enum OptionSetValue { + String(String), + Boolean(bool), +} + +#[derive(Debug)] +pub struct Header { + to: Option, + resource_uri: Option, + // Hostname of the source + machine_id: Option, + reply_to: Option, + action: Option, + max_envelope_size: Option, + message_id: Option, + // TODO: difference between Locale and DataLocale + // Might be interesting to keep this data if you want to translate things ? + // Locale: String, + // DataLocale: String, + session_id: Option, + operation_id: Option, + sequence_id: Option, + // OperationTimeout: String // Unused ? + + // Responses field + relates_to: Option, + + option_set: HashMap, + // Specific to Events + identifier: Option, + bookmarks: Option, + ack_requested: Option, +} + +impl Header { + fn empty() -> Self { + Header { + to: None, + resource_uri: None, + machine_id: None, + reply_to: None, + action: None, + max_envelope_size: None, + message_id: None, + session_id: None, + operation_id: None, + sequence_id: None, + relates_to: None, + option_set: HashMap::new(), + ack_requested: None, + bookmarks: None, + identifier: None, + } + } + pub fn new( + to: String, + uri: String, + action: String, + max_envelope_size: u32, + message_id: Option, + session_id: Option, + operation_id: Option, + sequence_id: Option, + options: HashMap, + ) -> Self { + Header { + to: Some(to), + resource_uri: Some(uri), + machine_id: None, + reply_to: Some(ANONYMOUS.to_string()), + action: Some(action), + max_envelope_size: Some(max_envelope_size), + message_id: Some(message_id.unwrap_or_else(new_uuid)), + session_id, + operation_id: Some(operation_id.unwrap_or_else(new_uuid)), + sequence_id, + relates_to: None, + option_set: options, + ack_requested: None, + bookmarks: None, + identifier: None, + } + } + + /// Get a reference to the header's bookmarks. + pub fn bookmarks(&self) -> Option<&String> { + self.bookmarks.as_ref() + } + + /// Get a reference to the header's identifier. + pub fn identifier(&self) -> Option<&String> { + self.identifier.as_ref() + } +} + +impl Serializable for Header { + fn serialize(&self, writer: &mut Writer) -> quick_xml::Result<()> { + writer + .create_element("s:Header") + .write_inner_content(|writer| { + if let Some(to) = &self.to { + writer + .create_element("a:To") + .write_text_content(BytesText::new(to))?; + } + if let Some(uri) = &self.resource_uri { + writer + .create_element("w:ResourceURI") + .with_attribute(("s:mustUnderstand", "true")) + .write_text_content(BytesText::new(uri))?; + } + if let Some(reply_to) = &self.reply_to { + writer + .create_element("a:ReplyTo") + .write_inner_content(|writer| { + writer + .create_element("a:Address") + .with_attribute(("s:mustUnderstand", "true")) + .write_text_content(BytesText::new(reply_to))?; + Ok(()) + })?; + } + if let Some(action) = &self.action { + writer + .create_element("a:Action") + .with_attribute(("s:mustUnderstand", "true")) + .write_text_content(BytesText::new(action))?; + } + if let Some(max_envelope_size) = &self.max_envelope_size { + writer + .create_element("w:MaxEnvelopeSize") + .with_attribute(("s:mustUnderstand", "true")) + .write_text_content(BytesText::new( + format!("{}", max_envelope_size).as_str(), + ))?; + } + if let Some(message_id) = &self.message_id { + writer + .create_element("a:MessageID") + .write_text_content(BytesText::new(message_id))?; + } + + writer + .create_element("w:Locale") + .with_attribute(("xml:lang", "en-US")) + .with_attribute(("s:mustUnderstand", "false")) + .write_empty()?; + writer + .create_element("p:DataLocale") + .with_attribute(("xml:lang", "en-US")) + .with_attribute(("s:mustUnderstand", "false")) + .write_empty()?; + if let Some(operation_id) = &self.operation_id { + writer + .create_element("p:OperationID") + .with_attribute(("s:mustUnderstand", "false")) + .write_text_content(BytesText::new(operation_id))?; + } + if let Some(sequence_id) = self.sequence_id { + writer + .create_element("p:SequenceId") + .with_attribute(("s:mustUnderstand", "false")) + .write_text_content(BytesText::new(format!("{}", sequence_id).as_str()))?; + } + if let Some(relates_to) = &self.relates_to { + writer + .create_element("a:RelatesTo") + .write_text_content(BytesText::new(relates_to))?; + } + if !self.option_set.is_empty() { + writer + .create_element("w:OptionSet") + .with_attribute(("xmlns:xsi", XSI_NS)) + .write_inner_content(|writer| { + for (name, value) in &self.option_set { + match value { + OptionSetValue::String(value) => writer + .create_element("w:Option") + .with_attribute(("Name", name.as_str())) + .write_text_content(BytesText::new(value))?, + OptionSetValue::Boolean(value) => writer + .create_element("w:Option") + .with_attribute(("Name", name.as_str())) + .with_attribute(( + "xsi:nil", + if *value { "true" } else { "false" }, + )) + .write_empty()?, + }; + } + Ok(()) + })?; + } + Ok(()) + })?; + Ok(()) + } +} + +#[derive(Debug)] +pub enum Body { + EnumerateResponse(Vec), + Events(Vec>), +} + +impl Serializable for Body { + fn serialize(&self, writer: &mut Writer) -> quick_xml::Result<()> { + match self { + Body::EnumerateResponse(subscriptions) => { + writer + .create_element("s:Body") + .write_inner_content(|writer| { + writer + .create_element("n:EnumerateResponse") + .write_inner_content(|writer| { + writer + .create_element("n:EnumerationContext") + .write_empty()?; + writer + .create_element("w:Items") + .write_inner_content(|writer| { + for subscription in subscriptions { + subscription.serialize(writer)?; + } + Ok(()) + })?; + writer.create_element("w:EndOfSequence").write_empty()?; + Ok(()) + })?; + Ok(()) + })?; + } + x => { + return Err(quick_xml::Error::UnexpectedToken(format!( + "Can not serialize body of {:?}", + x + ))) + } // anyhow!("Can not serialize body of {:?}", x), + } + Ok(()) + } +} + +#[derive(Debug)] +pub struct Message { + header: Header, + pub body: Option, +} + +impl Serializable for Message { + fn serialize(&self, writer: &mut Writer) -> quick_xml::Result<()> { + writer + .create_element("s:Envelope") + .with_attribute(("xml:lang", "en-US")) + .with_attribute(("xmlns:s", SOAP_ENVELOPE_NS)) + .with_attribute(("xmlns:a", ADDRESSING_NS)) + .with_attribute(("xmlns:n", ENUMERATION_NS)) + .with_attribute(("xmlns:w", WSMAN_NS)) + .with_attribute(("xmlns:p", MS_WSMAN_NS)) + .write_inner_content(|writer| { + self.header.serialize(writer)?; + match &self.body { + Some(body) => { + body.serialize(writer)?; + } + _ => { + writer.create_element("s:Body").write_empty()?; + } + } + Ok(()) + })?; + Ok(()) + } +} + +impl Message { + pub fn action(&self) -> Result<&str> { + Ok(self + .header + .action + .as_ref() + .ok_or_else(|| anyhow!("Missing Action in message"))?) + } + + pub fn header(&self) -> &Header { + &self.header + } + + pub fn response_from(message: &Message, action: &str, body: Option) -> Result { + Ok(Message { + header: Header { + to: Some( + message + .header + .reply_to + .as_ref() + .unwrap_or(&ANONYMOUS.to_owned()) + .to_string(), + ), + resource_uri: None, + machine_id: None, + reply_to: None, + action: Some(action.to_owned()), + max_envelope_size: None, + message_id: Some(new_uuid()), + session_id: None, + operation_id: message.header.operation_id.clone(), + sequence_id: Some(1), + relates_to: Some( + message + .header + .message_id + .as_ref() + .ok_or_else(|| anyhow!("Missing Message ID in original message"))? + .clone(), + ), + option_set: HashMap::new(), + ack_requested: None, + bookmarks: None, + identifier: None, + }, + body, + }) + } +} + +fn parse_header(header_node: Node) -> Result
{ + let mut header = Header::empty(); + for node in header_node.children() { + let tag = node.tag_name(); + if tag == (WSMAN_NS, "ResourceURI").into() { + header.resource_uri = node.text().map(String::from); + } + if tag == (MACHINE_ID_NS, "MachineID").into() { + header.machine_id = node.text().map(String::from); + } else if tag == (ADDRESSING_NS, "To").into() { + header.to = node.text().map(String::from); + } else if tag == (ADDRESSING_NS, "ReplyTo").into() { + for reply_to_node in node.children() { + if reply_to_node.tag_name() == (ADDRESSING_NS, "Address").into() { + header.reply_to = reply_to_node.text().map(String::from); + break; + } + } + } else if tag == (ADDRESSING_NS, "Action").into() { + header.action = node.text().map(String::from); + } else if tag == (WSMAN_NS, "MaxEnvelopeSize").into() { + header.max_envelope_size = match node.text() { + Some(text) => Some(text.parse()?), + None => None, + }; + } else if tag == (ADDRESSING_NS, "MessageID").into() { + header.message_id = node.text().map(String::from); + } else if tag == (MS_WSMAN_NS, "SessionId").into() { + header.session_id = node.text().map(String::from); + } else if tag == (MS_WSMAN_NS, "OperationID").into() { + header.operation_id = node.text().map(String::from); + } else if tag == (MS_WSMAN_NS, "SequenceId").into() { + header.sequence_id = match node.text() { + Some(text) => Some(text.parse()?), + None => None, + }; + } else if tag == (WSMAN_NS, "AckRequested").into() { + header.ack_requested = Some(true) + } else if tag == (WSMAN_NS, "Bookmark").into() { + header.bookmarks = Some(String::from( + &node.document().input_text()[node + .first_child() + .ok_or_else(|| anyhow!("No bookmarks!"))? + .range()], + )); + } else if tag == (EVENTING_NS, "Identifier").into() { + header.identifier = node.text().map(String::from) + } + } + Ok(header) +} + +fn parse_body_events(node: Node) -> Result>> { + let mut events = Vec::new(); + ensure!( + node.has_tag_name((SOAP_ENVELOPE_NS, "Body")), + "Invalid Body tag" + ); + for event in node + .first_element_child() + .ok_or_else(|| anyhow!("Malformed Events body"))? + .children() + { + events.push(Arc::new( + event + .text() + .ok_or_else(|| anyhow!("Missing Event body"))? + .to_owned(), + )) + } + Ok(events) +} + +pub fn parse(payload: &str) -> Result { + // This is only used if we need to replace invalid XML characters, but it must + // be declared here because of scope level. + let mut sanitized_payload = String::new(); + let doc = { + let doc_res = Document::parse(payload); + // Some events contain invalid XML characters (such as \u0x5 or \u0x4). + // In that case, we try to replace these bad characters so that + // the XML parsing can succeed. + match doc_res { + Ok(doc) => doc, + Err(roxmltree::Error::ParserError(xmlparser::Error::InvalidCdata( + xmlparser::StreamError::NonXmlChar(c, pos), + _, + ))) => { + debug!("Could not parse payload because of a non-XML character {:?} in CDATA at pos {}. Try to sanitize payload.", c, pos); + trace!("Payload was {:?}", payload); + sanitized_payload.reserve(payload.len()); + for c in payload.chars() { + if c.is_xml_char() { + sanitized_payload.push(c); + } else { + trace!( + "Character '{:?}' has been replaced by the string \"{:?}\"", + c, + c + ); + sanitized_payload.push_str(format!("\\u{{{:x}}}", c as u32).as_ref()); + } + } + Document::parse(&sanitized_payload) + .context("Could not parse SOAP message even with non-XML character removed")? + } + Err(err) => bail!( + "Could not parse SOAP message: {:?}. Payload was {:?}", + err, + payload + ), + } + }; + + let root = doc.root_element(); + ensure!( + root.has_tag_name((SOAP_ENVELOPE_NS, "Envelope")), + "Invalid Envelope" + ); + + let mut header_opt: Option
= None; + let mut body_node_opt: Option = None; + for node in root.children() { + let tag = node.tag_name(); + if tag == (SOAP_ENVELOPE_NS, "Header").into() { + header_opt = Some(parse_header(node).context("Failed to parse Header section")?); + } else if tag == (SOAP_ENVELOPE_NS, "Body").into() { + body_node_opt = Some(node) + } + } + + let header = header_opt.ok_or_else(|| anyhow!("Could not parse SOAP headers"))?; + let mut body = None; + + // Parse body depending on Action field + if header + .action + .as_ref() + .ok_or_else(|| anyhow!("Missing Action header"))? + == ACTION_EVENTS + { + body = Some(Body::Events( + parse_body_events( + body_node_opt.ok_or_else(|| anyhow!("Missing Body for Events message"))?, + ) + .context("Failed to parse Body section for Events action")?, + )); + } + + Ok(Message { header, body }) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn non_xml_char() { + let payload = " + + http://srv.windomain.local:5985/wsman/subscriptions/B6BDBB59-FB07-4EE5-841F-EBEC9D67CDD4/1 + win10.windomain.local + + + http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous + + http://schemas.dmtf.org/wbem/wsman/1/wsman/Events + 512000 + uuid:31652DEB-C9E8-45D6-B3E8-90AC64D48422 + + + uuid:981C530F-BE2A-4AAB-BACB-6FB4CD1A14AB + uuid:C7F39CB2-8FFD-4DA3-A111-CDB303EEA098 + 1 + PT60.000S + 219C5353-5F3D-4CD7-A644-F6B69E57C1C1 + + + + + + + + + + 25404000x4000000000000026149141Microsoft-Windows-WinRM/Operationalwin10.windomain.localActivity TransferInformationInfoMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClientServerActivity Transfer]]> + + "; + assert!(Document::parse(payload).is_err()); + let doc = parse(payload).unwrap(); + match doc.body { + Some(Body::Events(events)) => { + println!("{:?}", events); + assert_eq!(1, events.len()); + let event = events[0].clone(); + assert_eq!(event, Arc::new("25404000x4000000000000026149141Microsoft-Windows-WinRM/Operationalwin10.windomain.localActivity TransferInformationInfoMicrosoft-Windows-WinRM/OperationalMicrosoft-Windows-Windows Remote ManagementClientServerActivity Transfer".to_owned())); + } + _ => panic!("Wrong body type"), + } + } +} diff --git a/server/src/subscription.rs b/server/src/subscription.rs new file mode 100644 index 0000000..ae74a60 --- /dev/null +++ b/server/src/subscription.rs @@ -0,0 +1,275 @@ +use anyhow::Result; +use common::{ + database::Db, + subscription::{SubscriptionData, SubscriptionOutput}, +}; +use itertools::Itertools; +use log::{debug, error, info, warn}; +use std::{ + collections::{HashMap, HashSet}, + sync::{Arc, RwLock}, + time::Duration, +}; +use tokio::{ + signal::unix::{signal, SignalKind}, + sync::oneshot, + time, +}; + +use crate::{ + formatter::Format, + output::Output, + outputs::{file::OutputFile, kafka::OutputKafka, tcp::OutputTcp}, +}; + +pub struct Subscription { + data: SubscriptionData, + outputs: Vec>>, + formats: HashSet, +} + +impl TryFrom for Subscription { + type Error = anyhow::Error; + fn try_from(data: SubscriptionData) -> Result { + let mut formats: HashSet = HashSet::new(); + for output in data.outputs() { + formats.insert(output.format().into()); + } + let mut subscription = Subscription { + data, + outputs: Vec::new(), + formats, + }; + subscription.init()?; + Ok(subscription) + } +} + +impl Subscription { + /// Get a reference to the subscription's uuid. + pub fn uuid(&self) -> &str { + self.data.uuid() + } + + /// Get a reference to the subscription's version. + pub fn version(&self) -> &str { + self.data.version() + } + + /// Get a reference to the subscription's outputs. + pub fn outputs(&self) -> &[Arc>] { + self.outputs.as_ref() + } + + fn init(&mut self) -> Result<()> { + // Initialize outputs + for output_data in self.data.outputs() { + if output_data.is_enabled() { + self.outputs.push(match output_data { + SubscriptionOutput::Files(format, config, _) => { + Arc::new(Box::new(OutputFile::new(Format::from(format), config))) + } + SubscriptionOutput::Kafka(format, config, _) => { + Arc::new(Box::new(OutputKafka::new(Format::from(format), config)?)) + } + SubscriptionOutput::Tcp(format, config, _) => { + Arc::new(Box::new(OutputTcp::new(Format::from(format), config)?)) + } + }); + } + } + Ok(()) + } + + pub fn data(&self) -> &SubscriptionData { + &self.data + } + + pub fn formats(&self) -> &HashSet { + &self.formats + } +} + +pub type Subscriptions = Arc>>>; + +pub async fn reload_subscriptions_task( + db: Db, + subscriptions: Subscriptions, + interval: u64, + mut task_exit_rx: oneshot::Receiver>, +) { + let mut interval = time::interval(Duration::from_secs(interval)); + let sig_opt = match signal(SignalKind::hangup()) { + Ok(sig) => Some(sig), + Err(e) => { + warn!("Could not listen to SIGHUP: {:?}", e); + None + } + }; + + if let Some(mut sig) = sig_opt { + loop { + tokio::select! { + _ = interval.tick() => { + debug!("Update subscriptions from db (interval tick)"); + if let Err(e) = reload_subscriptions(db.clone(), subscriptions.clone()).await { + warn!("Failed to update subscriptions on interval tick: {:?}", e); + continue; + } + }, + _ = sig.recv() => { + debug!("Update subscriptions from db (signal)"); + if let Err(e) = reload_subscriptions(db.clone(), subscriptions.clone()).await { + warn!("Failed to update subscriptions on SIGHUP: {:?}", e); + continue; + } + } + sender = &mut task_exit_rx => { + info!("Exit task reload_subscriptions"); + match sender { + Ok(sender) => { + if let Err(e) = sender.send(()) { + error!("Failed to respond to kill order: {:?}", e); + } + }, + Err(e) => { + error!("Could not respond to kill order: {:?}", e); + } + } + break; + } + } + } + } else { + loop { + tokio::select! { + _ = interval.tick() => { + debug!("Update subscriptions from db (interval tick)"); + if let Err(e) = reload_subscriptions(db.clone(), subscriptions.clone()).await { + warn!("Failed to update subscriptions on interval tick: {:?}", e); + continue; + } + }, + sender = &mut task_exit_rx => { + info!("Exit task reload_subscriptions"); + match sender { + Ok(sender) => { + if let Err(e) = sender.send(()) { + error!("Failed to respond to kill order: {:?}", e); + } + }, + Err(e) => { + error!("Could not respond to kill order: {:?}", e); + } + } + break; + } + } + } + } +} + +async fn reload_subscriptions(db: Db, mem_subscriptions: Subscriptions) -> Result<()> { + let db_subscriptions = db.get_subscriptions().await?; + + let mut active_subscriptions: HashSet = HashSet::with_capacity(db_subscriptions.len()); + + // Take a write lock other subscriptions + // This will be release at the end of the function + let mut mem_subscriptions = mem_subscriptions.write().unwrap(); + + for subscription_data in db_subscriptions { + let version = subscription_data.version(); + + if !subscription_data.is_active() { + debug!( + "Subscription {} is disabled or have no enabled outputs", + subscription_data.uuid() + ); + continue; + } + + active_subscriptions.insert(version.to_string()); + + // Update the in memory representation of this subscription if necessary + match mem_subscriptions.get(version) { + Some(_) => { + // This subscription has not been changed. Nothing to do + } + None => { + debug!( + "Subscription version {} not found in the in memory subscriptions", + version + ); + // The version of this subscription does not exist in the in-memory + // subscriptions HashMap. This may happen in 2 situations: + // 1. This is a new subscription. We must add it to the in-memory subscriptions. + // 2. The subscription has been updated. We must remove the old subscription and add the new one to the + // in memory subscriptions. + + // `subscription.uuid()` stays the same after an update + let old_subscription = { + mem_subscriptions + .values() + .find(|old_subscription| { + subscription_data.uuid() == old_subscription.uuid() + }) + .cloned() + }; + + if let Some(old_subscription) = old_subscription { + info!("Subscription {} has been updated", subscription_data.uuid()); + mem_subscriptions.remove(old_subscription.version()); + } else { + info!( + "New subscription {} has been created", + subscription_data.uuid() + ); + } + + // Initialize the new subscription and add it to in-memory subscriptions + let new_subscription = Arc::new(Subscription::try_from(subscription_data.clone())?); + mem_subscriptions.insert(version.to_owned(), new_subscription); + } + } + } + + debug!("Active subscriptions are: {:?}", active_subscriptions); + + // Make a list of subscriptions that need to be removed from in-memory subscriptions + // These subscriptions have been disabled or deleted + let mut to_delete = HashSet::new(); + for version in mem_subscriptions.keys() { + if !active_subscriptions.contains(version) { + debug!("Mark {} as 'to delete'", version); + to_delete.insert(version.to_string()); + } + } + + // Remove listed subscriptions + for version in to_delete { + info!( + "Remove subscription {} from in memory subscriptions", + version + ); + mem_subscriptions.remove(&version); + } + + if mem_subscriptions.is_empty() { + warn!("There are no active subscriptions!"); + } else { + info!( + "Active subscriptions are: {}", + mem_subscriptions + .iter() + .map(|(_, subscription)| format!( + "\"{}\" ({})", + subscription.data.name(), + subscription.data.uuid() + )) + .join(", ") + ); + } + + Ok(()) +}