diff --git a/.env-example b/.env-example index 8a7f0c7..17a572d 100644 --- a/.env-example +++ b/.env-example @@ -2,3 +2,7 @@ # These should be updated to more secure values outside of testing environments. GF_SECURITY_ADMIN_USER=admin GF_SECURITY_ADMIN_PASSWORD=admin + +# Optional Parameters Required for metrics export to F5 DataFabric +SENSOR_SECRET_TOKEN="YOUR_TOKEN" +SENSOR_ID="YOUR_ID" \ No newline at end of file diff --git a/.github/workflows/deploy-pages.yml b/.github/workflows/deploy-pages.yml new file mode 100644 index 0000000..202dec3 --- /dev/null +++ b/.github/workflows/deploy-pages.yml @@ -0,0 +1,26 @@ +name: GitHub Pages +run-name: ${{ github.actor }} is running this workflow +on: + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' +jobs: + # Single deploy job no building + deploy: + environment: + name: github-pages + url: ${{steps.deployment.outputs.page_url}} + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Setup Pages + uses: actions/configure-pages@v3 + - name: Upload Artifact + uses: actions/upload-pages-artifact@v2 + with: + # upload pages directory + path: './pages' + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v2 \ No newline at end of file diff --git a/README.md b/README.md index 1c4add2..aebe7c7 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,16 @@ # Application Study Tool +> ๐Ÿšจ๐Ÿšจ**Notice**๐Ÿšจ๐Ÿšจ +> +> Configuration for the Application Study Tool has changed significantly in the v0.6.0 release. To +update a legacy configuration, see [docs/config_migration.md](docs/config_migration.md). + ## Overview The Application Study Tool is intended to provide enhanced insights into (classic) BIG-IP products, leveraging best in class open source telemetry tools. The full installation includes: -* Custom Instance of OpenTelemetry Collector with enhanced BIG-IP data receivers (data fetched via iControlRest). +* Custom Instance of OpenTelemetry Collector with enhanced BIG-IP data receivers (data fetched via iControlRest) [Full List of Metrics Collected](docs/receiver_metrics.md). * Prometheus timeseries database for storing and querying collected data. * Grafana Instance with pre-configured dashboards for quick insights at the device and "fleet" levels. @@ -34,42 +39,102 @@ cd application-study-tool cp .env-example .env # Edit the following file with device secrets as required (see "Configure Device Secrets" below) cp .env.device-secrets-example .env.device-secrets -# Edit the config file with device / connection info (see "Configure Devices To Scrape" below) -vi ./config/big-ips.json +# Edit the default settings for your environment as required +# (see "Configure Default Device Settings" below) +vi ./config/ast_defaults.yaml +# Edit the config file with device / connection info +# (see "Configure Devices To Scrape" below) +vi ./config/bigip_receivers.yaml +# Run the configuration generator +docker run --rm -it -w /app -v ${PWD}:/app --entrypoint /app/src/bin/init_entrypoint.sh python:3.12.6-slim-bookworm --generate-config # Start the tool docker-compose up ``` -### Configure Devices To Scrape -Application Study Tool includes an init container which builds an OpenTelemetry -Collector Configuration file based on a provided list of BIG-IPs in JSON format. - -Edit config/big-ips.json to reflect your list of BIG-IPs and their access credentials: -```json -[ - { - // Set this to the management IP for the device. This must be - // reachable from the Application Study Tool host. - "endpoint": "https://10.0.0.1", - // Set this to the desired account's user name - "username": "admin", - // This field tells the collector the name of an environment variable - // which contains the password for the device. - // This field does not contain the password itself. - "password_env_ref": "BIGIP_PASSWORD_1", - // Secure TLS communication requires mounting the certificate bundle - // used to sign the BigIP certificates. Though not recommended, in the - // case of self-signed certificates or for testing purposes, you can skip - // this check by setting this field to true. - "tls_insecure_skip_verify": false, - // The path to a CA File used to validate BIG-IP certificates. This is required - // if tls_insecure_skip_verify is set to false. See below for details. - "ca_file": "", - } -] -``` - -### Configure Device Secrets +## Configuration + +For additional configuration management background, see +[docs/config-management.md](docs/config-management.md). +The below assumes you're using the config_helper script for assisted management. + + +Application Study Tool config management relies on default configs in +[/configs/ast_defaults.yaml](/configs/ast_defaults.yaml) and device specific information in +[/configs/bigip_receivers.yaml](/configs/bigip_receivers.yaml). + +Settings in the bigip_receivers.yaml override those in ast_defaults.yaml. + +To update a legacy (pre v0.6.0) configuration, to the new scheme see +[docs/config_migration.md](docs/config_migration.md) + +## Configure Default Device Settings + +Edit config/ast_defaults.yaml to reflect common values for your BIG-IPs: +```yaml +# These configs are applied to each entry in the bigip_receivers file +# where they don't contain an equivalent / overriding entry. +bigip_receiver_defaults: + # The time to wait between metric collection runs + collection_interval: 60s + # The username to login to the device with + username: admin + # The password (not recommended) or a reference to an env variable (recommended) + # Below tells the collector to look for an environment variable named + # BIGIP_PASSWORD_1 + password: "${env:BIGIP_PASSWORD_1}" + # The data_types that should be enabled or disabled. + # DNS and GTM are disabled by default and users can enable those modules + # on all devices by setting the below to true. + # A full list of data_types is in /docs/receiver_readme.md. + data_types: + f5.dns: + enabled: false + f5.gtm: + enabled: false + # The TLS settings to use. Either a CA file must be specified or + # insecure_skip_verify set to true (not recommended). + tls: + # Secure TLS communication requires mounting the certificate bundle + # used to sign the BigIP certificates. Though not recommended, in the + # case of self-signed certificates or for testing purposes, you can skip + # this check by setting this field to true. + insecure_skip_verify: false + # The path to a CA File used to validate BIG-IP certificates. This is required + # if tls_insecure_skip_verify is set to false. See below for details. + ca_file: "" +``` + +## Configure Devices To Scrape +Edit the device list in config/bigip_receivers.yaml: +```yaml +#### Values not explicitly configured here inherit values in ast_defaults.yaml. +#### Each entry must have a unique name, starting with bigip/ +#### (e.g. bigip/1, bigip/2) +bigip/1: + #### Endpoint must be specified for each device + #### because there's no rational default. + #### Set this to the management IP for the device. This must be + #### reachable from the Application Study Tool host. + endpoint: https://10.0.0.1 + #### Override some default settings with device specific values + username: SOME_OVERRIDE_ACCOUNT_NAME + password: "${SOME_OTHER_ENV_VAR_WITH_ANOTHER_PASSWORD}" + #### Everything commented out here gets the value from default + # collection_interval: 30s + # data_types: + # f5.dns: + # enabled: false + # f5.gtm: + # enabled: false + # tls: + # insecure_skip_verify: true + # ca_file: +bigip/2: + endpoint: https://10.0.0.2 +``` + + +## Configure Device Secrets The application study tool default configuration relies on environment variables which contain device access credentials. There are a number of ways to manage and inject secrets into a container environment (modifications to the docker-compose file @@ -84,12 +149,78 @@ BIGIP_PASSWORD_2=bar-foo123! ``` The variable name (the part on the left of the equal sign) must match the configured -value for the devices that use this password in config/big-ips.json. +value for the devices that use this password in config/ast_defaults.yaml or device specifc +cofig in config/bigip_receivers.yaml. In the following example, bigip/1 uses BIGIP_PASSWORD_1 +from the defaults and bigip/2 uses BIGIP_PASSWORD_2 from the device settings: + +``` +############################## +## config/ast_defaults.yaml +############################## + +bigip_receiver_defaults: + ... + password: "${env:BIGIP_PASSWORD_1}" + ... + +############################## +## config/bigip_receivers.yaml +############################## + +# This gets the default "${env:BIGIP_PASSWORD_1}" +bigip/1: + endpoint: https://10.0.0.1 -#### Account Permissions +# This overrides it with "${env:BIGIP_PASSWORD_2}" +bigip/2: + endpoint: https://10.0.0.1 + password: ${env:BIGIP_PASSWORD_2} + +``` + + +## Configure Periodic Metric Data Export To F5 +The application study tool can be configured to periodically (every 5 minutes) export a snapshot of your +BigIP metrics to F5. Contact your F5 Sales Representative for a "Sensor ID" (a unique string used to associate +your metrics with your Organization) and a "Sensor Secret Token" (used to authenticate to the F5 Datafabric as +an authorized data sender for your Org). + +This functionality is enabled as follows: + +1. Enable the flag in [config/ast_defaults.yaml](config/ast_defaults.yaml) file as follows: + +```yaml +# Set this true to enable periodic metric export to F5 DataFabric. +# Requires adding your sensor ID and secret token to the container environment (see .env-example). +# Contact your F5 sales rep to obtain the ID / secret token. +f5_data_export: true +``` + +2. Add the Sensor ID and Secret Token to the .env file, or otherwise attach it to the Opentelemetry Collector container +as SENSOR_ID and SENSOR_SECRET_TOKEN (see [.env-example](./.env-example) for example). + +3. Run the configuration helper script (see below). + +## Run The Configuration Helper +The config helper script can be run natively or via docker to merge the default and device +level configs into the final OTEL Collector config as follows: +```shell +# Run the configuration generator +docker run --rm -it -w /app -v ${PWD}:/app --entrypoint /app/src/bin/init_entrypoint.sh python:3.12.6-slim-bookworm --generate-config +``` + +This will write 2 new files in the services/otel_collector directory: + +* `receivers.yaml` - The final list of scraper configs and their settings. +* `pipelines.yaml` - The final pipeline configs that map receievers to output destinations +(prometheus, and optionally F5). + +## Account Permissions The vast majority of telemetry data can be collected with read-only access to the BigIP. Some -granular stats are only available as output to a iControl Rest 'bash' shell command, and these require -read-write access. If a read-only account is used, the following metrics are unavailable: +granular stats are only available as output to a iControl Rest 'bash' shell command, and +these require read-write access. + +If a read-only account is used, the following metrics are unavailable: ``` f5_virtual_server_profile_client_ssl_connection_count{} @@ -117,9 +248,10 @@ f5_plane_cpu_utilization_5s{} This will impact data output in several dashboards/panels (denoted with description fields indicating as such). ### Configure CA File -AST expects a valid TLS cert bundle unless `tls_insecure_skip_verify` is +AST expects a valid TLS cert bundle unless `tls.insecure_skip_verify` is set to true for each device. In order to mount and use your CA file, you must -configure the docker-compose.yaml file in this directory, and set the `ca_file` parameter to the resulting path. Example: +configure the docker-compose.yaml file in this directory, and set the `ca_file` parameter to +the resulting path. Example: docker-compose.yaml: ```yaml @@ -127,29 +259,24 @@ docker-compose.yaml: otel-collector: ... volumes: - - otel_collector:/etc/otel-collector-config + - ./services/otel_collector:/etc/otel-collector-config - ./config/ca_bundle.pem:/etc/ssl/ca_bundle.pem ``` -big-ips.json: -```json -[ - { // device 1 - ... - "ca_file": "/etc/ssl/ca_bundle.pem", - }, - { // device 2 - ... - "ca_file": "/etc/ssl/ca_bundle.pem", - }, -] +config/ast_defaults.yaml (or the tls section of each device in config/bigip_receivers.yaml): +```yaml +bigip_receiver_defaults: + ... + tls: + insecure_skip_verify: false + ca_file: "/etc/ssl/ca_bundle.pem" ``` -The configuration parameter `tls_insecure_skip_verify` defaults to false. Installers - that would like to opt-in to run in an insecure TLS mode must set - `tls_insecure_skip_verify: true` for each BIG-IP in the config array and understand - that the connection between the OTEL collector and the BIG-IP does not have secure - TLS termination. +The configuration parameter `tls.insecure_skip_verify` defaults to false. Installers +who would like to opt-in to run in an insecure TLS mode must set +`tls.insecure_skip_verify: true` and understand +that the connection between the OTEL collector and the BIG-IP does not have secure +TLS termination. ### Configure Grafana The Grafana instance can be configured via environment variables using their standard diff --git a/VERSION b/VERSION index 48080b4..e07d136 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v0.5.0 \ No newline at end of file +v0.6.0 \ No newline at end of file diff --git a/_config.yml b/_config.yml new file mode 100644 index 0000000..911bafd --- /dev/null +++ b/_config.yml @@ -0,0 +1 @@ +markdown: kramdown \ No newline at end of file diff --git a/config/README.md b/config/README.md new file mode 100644 index 0000000..8116e83 --- /dev/null +++ b/config/README.md @@ -0,0 +1,8 @@ +# Configuration + +Files in this directory can be used to configure aspects of the Application Study Tool. + +For additional detail, see: + +* [Configuration Management](/docs/config_management.md) (new users start here) +* [Pre v0.6.0 Config Migration](/docs/config_migration.md) \ No newline at end of file diff --git a/config/ast_defaults.yaml b/config/ast_defaults.yaml new file mode 100644 index 0000000..c4df056 --- /dev/null +++ b/config/ast_defaults.yaml @@ -0,0 +1,57 @@ +# These configs are applied to each entry in the bigip_receivers file +# where they don't contain an equivalent / overriding entry. +bigip_receiver_defaults: + # The time to wait between metric collection runs + collection_interval: 60s + # The username to login to the device with + username: admin + # The password (not recommended) or a reference to an env variable (recommended, shown) + # Below tells the collector to look for an environment variable named BIGIP_PASSWORD_1 + password: "${env:BIGIP_PASSWORD_1}" + # The data_types that should be enabled or disabled. DNS and GTM users can enable those modules + # by setting the below to true. These will apply to all devices and may be better specified on the + # per-reciever settings file below. + data_types: + f5.dns: + enabled: false + f5.gtm: + enabled: false + # The TLS settings to use. Either a CA file must be specified or insecure_skip_verify + # set to true (not recommended) + tls: + insecure_skip_verify: false + ca_file: "" + +# Set to true to enable periodic metric export to F5 DataFabric. +# Requires adding your Sensor ID and secret token to the container environment (see .env-example). +# Contact your F5 sales rep to obtain the ID / secret token. +f5_data_export: false + + +# Most people should not need to modify settings below this line + + +# The default local pipeline to use if one isn't specified in the per-device configs. +pipeline_default: metrics/local +# The default pipeline to use if metric export to F5 is enabled (if f5_data_export.sensor_id field above is set) +f5_pipeline_default: metrics/f5-datafabric + +pipelines: + + # These pipeline configs are written to the OTEL config after having the configured receivers + # added to the dictionary in accordance with the "pipeline_default" field above and "pipeline" + # field on the per-receiver config file. Otel Collector documentation explains the syntax in more + # detail. + metrics/local: + #receivers list are generated via the config helper script + processors: [batch] + exporters: [otlphttp/metrics-local, debug/bigip] + + # These pipeline configs are written to the OTEL config after having the configured receivers + # added to the dictionary in accordance with the "f5_pipeline_default" field above and "f5_pipeline" + # field on the per-receiver config file. Otel Collector documentation explains the syntax in more + # detail. + metrics/f5-datafabric: + #receivers list are generated via the config helper script + processors: [batch, interval/f5-datafabric, attributes/f5-datafabric] + exporters: [otlp/f5-datafabric, debug/bigip] \ No newline at end of file diff --git a/config/bigip_receivers.yaml b/config/bigip_receivers.yaml new file mode 100644 index 0000000..f2beb84 --- /dev/null +++ b/config/bigip_receivers.yaml @@ -0,0 +1,23 @@ +# Your bigip targets +# Values not explicitly configured here inherit values in +# the ast_defaults.yaml bigip_receiver_defaults section. +# Each entry must have a unique name, starting with bigip/ +# (e.g. bigip/1, bigip/2) +bigip/1: + # Endpoint must be specified for each device + # Set this to the management IP for the device. This must be + # reachable from the Application Study Tool host. + endpoint: https://10.0.0.1 + ## Uncommenting any of the following lines will override the defaults in + ## ast_defaults.yaml bigip_receiver_defaults section. + # username: SOME_OVERRIDE_ACCOUNT_NAME + # password: "${SOME_OTHER_ENV_VAR_WITH_ANOTHER_PASSWORD}" + # collection_interval: 30s + # data_types: + # f5.dns: + # enabled: false + # f5.gtm: + # enabled: false + # tls: + # insecure_skip_verify: true + # ca_file: \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml index 28044fc..497b887 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -2,7 +2,6 @@ version: '3' volumes: prometheus: - otel_collector: grafana: services: @@ -27,33 +26,19 @@ services: networks: - 7lc_network - otel-init: - image: python:3.12.4-slim - env_file: - - ".env" - volumes: - - otel_collector:/app/otel-collector-config - - ./config/big-ips.json:/app/config.json - - ./slc/bin/generate_otel_config.py:/app/generate_otel_config.py - - ./slc/bin/init_entrypoint.sh:/app/init_entrypoint.sh - - ./requirements.txt:/app/requirements.txt - - ./slc/data/templates:/app/templates - command: ["/bin/sh", "/app/init_entrypoint.sh"] - otel-collector: - image: ghcr.io/f5devcentral/application-study-tool/otel_custom_collector:v0.5.0 + image: ghcr.io/f5devcentral/application-study-tool/otel_custom_collector:v0.6.0 restart: unless-stopped volumes: - - otel_collector:/etc/otel-collector-config - command: ["--config=/etc/otel-collector-config/config.yaml"] + - ./services/otel_collector:/etc/otel-collector-config + command: + - "--config=/etc/otel-collector-config/defaults/default-config.yaml" + - "--config=/etc/otel-collector-config/defaults/bigip-scraper-config.yaml" env_file: - ".env" - ".env.device-secrets" networks: - 7lc_network - depends_on: - otel-init: - condition: service_completed_successfully grafana: image: grafana/grafana:11.2.0 @@ -61,7 +46,6 @@ services: restart: unless-stopped ports: - 3000:3000 - - 4317:4317 volumes: - grafana:/var/lib/grafana - ./services/grafana/provisioning/:/etc/grafana/provisioning diff --git a/docs/config_management.md b/docs/config_management.md new file mode 100644 index 0000000..d0ea36a --- /dev/null +++ b/docs/config_management.md @@ -0,0 +1,201 @@ +# AST Configuration Management Detail + +## Config Management Options +In the post v0.6.0 management scheme, users can choose from one of the below options to manage +the AST Otel Collector configs: + +1. Using the [/src/config_helper.py](/src/config_helper.py) script to generate full Otel Collector +Config files from a small set of configuration for each device plus a set of defaults (recommended for +most users, and includes migration path from old big-ips.json configs) + +2. Manual maintenance of the Otel Collector Config files in +[/services/otel_collector/pipelines.yaml](/services/otel_collector/pipelines.yaml) +and [/services/otel_collector/receivers.yaml](/services/otel_collector/receivers.yaml) + +3. Manual maintenance of the Otel Collector Config files in +[/services/otel_collector/defaults](/services/otel_collector/defaults) + +### Using config_helper.py (Recommended For Most Users) +With the included python script in [/src/config_helper.py](/src/config_helper.py), AST collector +configuration is managed through 2 primary files: + +1. A default file which contains settings that should be applied for each BigIP unless overriden (see below) +in [/config/ast_defaults.yaml](/config/ast_defaults.yaml): + +```yaml +# These configs are applied to each entry in the bigip_receivers file +# where they don't contain an equivalent / overriding entry. +bigip_receiver_defaults: + # The time to wait between metric collection runs + collection_interval: 60s + # The username to login to the device with + username: admin + # The password (not recommended) or a reference to an env variable (recommended) + # Below tells the collector to look for an environment variable named BIGIP_PASSWORD_1 + password: "${env:BIGIP_PASSWORD_1}" + # The data_types that should be enabled or disabled. DNS and GTM users can enable those modules + # by setting the below to true. These will apply to all devices and may be better specified on the + # per-reciever settings file below. + data_types: + f5.dns: + enabled: false + f5.gtm: + enabled: false + # The TLS settings to use. Either a CA file must be specified or insecure_skip_verify + # set to true (not recommended) + tls: + insecure_skip_verify: false + ca_file: "" + + +# Set to true to enable periodic metric export to F5 DataFabric. +# Requires adding your Sensor ID and secret token to the container environment (see .env-example). +# Contact your F5 sales rep to obtain the ID / secret token. +f5_data_export: false + + +# Most people should not need to modify settings below this line + +# The default local pipeline to use if one isn't specified in the per-device configs. +pipeline_default: metrics/local +# The default pipeline to use if metric export to F5 is enabled (if f5_data_export.sensor_id field above is set) +f5_pipeline_default: metrics/f5-datafabric + +pipelines: + + # These pipeline configs are written to the OTEL config after having the configured receivers + # added to the dictionary in accordance with the "pipeline_default" field above and "pipeline" + # field on the per-receiver config file. Otel Collector documentation explains the syntax in more + # detail. + metrics/local: + #receivers list are generated via the config helper script + processors: [batch] + exporters: [otlphttp/metrics-local, debug/bigip] + + # These pipeline configs are written to the OTEL config after having the configured receivers + # added to the dictionary in accordance with the "f5_pipeline_default" field above and "f5_pipeline" + # field on the per-receiver config file. Otel Collector documentation explains the syntax in more + # detail. + metrics/f5-datafabric: + #receivers list are generated via the config helper script + processors: [batch, interval/f5-datafabric, attributes/f5-datafabric] + exporters: [otlp/f5-datafabric, debug/bigip] +``` + +2. A file which contains settings that should override (or have no default) at the individual bigip level +in [/config/bigip_receivers.yaml](/config/bigip_receivers.yaml): +```yaml +# This file contains the list of BigIP receivers (scrape jobs). +# Each item must have a unique key (e.g. bigip/1, bigip/2, etc). +# Values not explicitly configured here inherit values in ast_defaults.yaml. +bigip/1: + # Endpoint must be specified for each device because there's no rational default. + endpoint: https://10.0.0.1 + ## Pipeline is used to tell the config_helper script which pipeline to attach it to. + ## Most users shouldn't configure this (and it will inherit from the value in ast_defaults.yaml) + # pipeline: metrics/bigip + ## Anything below here could be uncommented to override the default value + # collection_interval: 30s + username: SOME_OVERRIDE_ACCOUNT_NAME + password: "${SOME_OTHER_ENV_VAR_WITH_ANOTHER_PASSWORD}" + # data_types: + # f5.dns: + # enabled: false + # f5.gtm: + # enabled: false + # tls: + # insecure_skip_verify: false + # ca_file: +``` + +When the config_helper script is run with the --generate-configs option, 2 new files are written out +to the [/services/otel_collector](/services/otel_collector) directory: + +1. The first contains the OTEL Collector pipelines configuration [/services/otel_collector/pipelines.yaml](/services/otel_collector/pipelines.yaml) which is basically the contents of the default config pipelines section plus the list of receivers (bigip scrape jobs): +```yaml +metrics/bigip: + exporters: + - otlphttp/metrics-bigip + - debug/bigip + processors: + - batch + receivers: + # This was inserted here because the pipeline / default_pipeline for this device was + # set to "metrics/bigip" + - bigip/1 +``` + +2. The second contains the OTEL Collector receivers configuration +[/services/otel_collector/receivers.yaml](/services/otel_collector/receivers.yaml) +which is the merged contents of the default config settings and the per-device settings: +```yaml +bigip/1: + collection_interval: 30s + data_types: + f5.dns: + enabled: false + f5.gtm: + enabled: false + endpoint: https://10.0.0.1 + password: ${SOME_OTHER_ENV_VAR_WITH_ANOTHER_PASSWORD} + tls: + ca_file: "" + insecure_skip_verify: false + username: SOME_OVERRIDE_ACCOUNT_NAME +``` + +When the OTEL container is run, the default configs in +[/services/otel_collector/defaults/](/services/otel_collector/defaults/) merge these files into +the final configuration the OTEL Collector needs to run correctly. + +### Manual Maintenance Of Receiver and Pipeline Files +The files mentioned above can be managed directly by users if they want to skip the config_helper +script in favor of their own automation / templating. In this case, you just need to update the files: + +* [/services/otel_collector/receivers.yaml](/services/otel_collector/receivers.yaml) +* [/services/otel_collector/pipelines.yaml](/services/otel_collector/pipelines.yaml) + +These are mapped into the final OTEL Collector config via the "file" directives in the +receivers and services.pipelines section of the +[/services/otel_collector/defaults/bigip-scraper-config.yaml]() file: + +``` +receivers: ${file:/etc/otel-collector-config/receivers.yaml} + +processors: + batch: + +exporters: + otlphttp/metrics-bigip: + endpoint: http://prometheus:9090/api/v1/otlp + debug/bigip: + verbosity: basic + sampling_initial: 5 + sampling_thereafter: 200 + +service: + pipelines: ${file:/etc/otel-collector-config/pipelines.yaml} + +``` + +Any of these files can be modified directly to update settings as desired (e.g. adding additional +logging levels). + + +### Manual Maintenance Of The OTEL Collector Config +You can also forgo all of the above config structure in favor of your own management scheme. If you're +running with the base docker-compose file, you may need to modify the commands section to point at your +own config files: + +```yaml + otel-collector: + ... + # Update these as needed + volumes: + - ./services/otel_collector:/etc/otel-collector-config + # Update these as needed + command: + - "--config=/etc/otel-collector-config/defaults/default-config.yaml" + - "--config=/etc/otel-collector-config/defaults/bigip-scraper-config.yaml" + ... +``` \ No newline at end of file diff --git a/docs/config_migration.md b/docs/config_migration.md new file mode 100644 index 0000000..1f6639c --- /dev/null +++ b/docs/config_migration.md @@ -0,0 +1,150 @@ +# AST Config Migration for Pre v0.6.0 Deployments + +## Background +The configuration mangagement for AST Otel Collector is being updated to allow for more flexible +configuration, and to simplify configuration for advanced usecases. + +The old configuration process relied on a docker container that would run each time the AST +docker-compose instance was started. The process wrote the generated configs to an internal volume +where users were unable to view and modify the files to tune parameters for their deployment. + +In the new process, the raw otel configs are exposed in the /services/otel_collector directory where +they can be managed manually, or through continued use of a refactored config_helper script. + +For additional detail on configuration management options in the post v0.6.0 scheme, please see [/docs/config_management.md](/docs/config_management.md) + +## Migrating From pre v0.6.0 Configs +There's a python script in /src/config_helper.py which will convert the original big-ips.json schema +into the new management format. Assuming you have an existing list of configured BigIPs in +/config/big-ips.json, migration is a 2 step process: + +1. Make sure the default values in [/config/ast_defaults.yaml](/config/ast_defaults.yaml) match your +desired default settings. +2. Run the migration script. + + +## Verify Default Settings +The default settings in [/config/ast_defaults.yaml](/config/ast_defaults.yaml) are merged with your existing values in big-ips.json by the script. + +You can reduce the amount of repetitive yaml in the output by making sure these values match +your common values (e.g. if you use the username: "telemetry", updating that value in the defaults +file will prevent each per-bigip config stanza from containing that value to overide the "admin" +value). + +The script will intelligently merge values that are logically equivalent but not identical. +In particular: + +* password_env_ref in big-ips.json will be converted to the password field with the OTEL Collector +compatible env escaping "\${env:NAME}". Be sure to use the "\${env:BIGIP_PASSWORD_1}" format in the +ast_defaults file. +* collection_interval in big-ips.json was an int - it will be converted to string with the "s" suffix. +* tls_insecure_skip_verify in big-ips.json is converted to the nested tls.insecure_skip_verify setting. +* ca_file in big-ips.json is converted to the nested tls.ca_file setting. + +```yaml +# These configs are applied to each entry in the bigip_receivers file +# where they don't contain an equivalent / overriding entry. +bigip_receiver_defaults: + # The time to wait between metric collection runs + collection_interval: 60s + # The username to login to the device with + username: admin + # The password (not recommended) or a reference to an env variable (recommended) + # Below tells the collector to look for an environment variable named BIGIP_PASSWORD_1 + password: "${env:BIGIP_PASSWORD_1}" + # The data_types that should be enabled or disabled. DNS and GTM users can enable those modules + # by setting the below to true. These will apply to all devices and may be better specified on the + # per-reciever settings file below. + data_types: + f5.dns: + enabled: false + f5.gtm: + enabled: false + # The TLS settings to use. Either a CA file must be specified or insecure_skip_verify + # set to true (not recommended) + tls: + insecure_skip_verify: false + ca_file: "" + +# Most people should not need to modify settings below this line +# The default pipeline to use if one isn't specified in the per-device configs. +pipeline_default: metrics/bigip + +# These pipeline configs are written to the OTEL config after having the configured receivers +# added to the dictionary in accordance with the "pipeline_default" field above and "pipeline" +# field on the per-receiver config file. Otel Collector documentation explains the syntax in more +# detail. +pipelines: + metrics/bigip: + #receivers list are generated via the config helper script + processors: [batch] + exporters: [otlphttp/metrics-bigip, debug/bigip] +``` + +## Run The Conversion Script + +The /src/config_helper.py script can be run on a system with python or via docker image as follows: + +### Conversion Run Via Docker +If you don't have an environment with python handy, you can run the script via +docker as follows: + +```shell +docker run --rm -it -w /app -v ${PWD}:/app --entrypoint /app/src/bin/init_entrypoint.sh python:3.12.6-slim-bookworm --convert-legacy-config --dry-run +``` + +You should see output similar to: +``` +... +2024-09-25 17:04:46,420 - INFO - Converted the legacy config to the following bigip_receivers.yaml output: + +bigip/1: + endpoint: https://10.0.0.1 +bigip/2: + endpoint: https://10.0.0.2 + password: ${env:BIGIP_PASSWORD_2} +``` + +If the planned output looks correct, you can run again without the --dry-run to update +the contents of [./config/bigip_receivers.yaml](./config/bigip_receivers.yaml) +```shell +docker run --rm -it -w /app -v ${PWD}:/app --entrypoint /app/src/bin/init_entrypoint.sh python:3.12.6-slim-bookworm --convert-legacy-config +``` +Output: +``` +... +2024-09-25 17:06:29,897 - INFO - Successfully wrote data to './config/bigip_receivers.yaml'. +``` + +### Conversion Via System Python + +If you have a recent version of python available, you can install dependencies +and run the config helper to view expected from the project root as follows. +```shell +pip install -r requirements.txt +python ./src/config_helper.py --convert-legacy-config --dry-run +``` +You should see output similar to: +``` +... +2024-09-25 17:04:46,420 - INFO - Converted the legacy config to the following bigip_receivers.yaml output: + +bigip/1: + endpoint: https://10.0.0.1 +bigip/2: + endpoint: https://10.0.0.2 + password: ${env:BIGIP_PASSWORD_2} +``` + +If the planned output looks correct, you can run again without the --dry-run to update +the contents of [./config/bigip_receivers.yaml](./config/bigip_receivers.yaml) +```shell +python ./src/config_helper.py --convert-legacy-config +``` +Output: +``` +... +2024-09-25 17:06:29,897 - INFO - Successfully wrote data to './config/bigip_receivers.yaml'. +``` + +### Adding New Devices \ No newline at end of file diff --git a/docs/metric_obfuscation.md b/docs/metric_obfuscation.md new file mode 100644 index 0000000..669fd72 --- /dev/null +++ b/docs/metric_obfuscation.md @@ -0,0 +1,47 @@ +# Metric Obfuscation + +Metric data can be obfuscated before sending to storage systems (local Prometheus, F5 Datafabric, +or your Org metrics store) using the built-in functionality in the Opentelemetry Collector processors. + +The Application Study Tool Opentelemetry Collector includes the following data processors which can +be used to manipulate data before it's exported: + +* `Transform Processor` [README](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md) +* `Resource Processor` - [README](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourceprocessor/README.md) +* `Metrics Transform Processor` - [README](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/metricstransformprocessor/README.md) +* `Attributes Processor` - [README](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/attributesprocessor/README.md) +* `Filter Processor` - [README](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/filterprocessor/README.md) + +## Masking Attributes Example +The attributes processor can be used to hash metric attributes before they're exported. + +To enable this functionality for data being exported to F5: + +1. Add an attributes processor config as shown to +[services/otel_collector/defaults/bigip-scraper-config.yaml](services/otel_collector/defaults/bigip-scraper-config.yaml) + + +(This example will mask the f5.instance.management_ip and f5.node.ip_address fields): +```yaml +processors: +... + attributes/mask-some-attributes: + actions: + - key: f5.instance.management_ip + action: hash + - key: f5.node.ip_address + action: hash +``` + +2. Edit the [config/ast_defaults.yaml](config/ast_defaults.yaml) file to include the new processor +on the F5 Datafabric pipeline: +```yaml +pipelines: +... + metrics/f5-datafabric: + # receivers list are generated via the config helper script + # Adding attributes/mask-some-attributes to the list of enabled processors. + processors: [batch, interval/f5-datafabric, attributes/mask-some-attributes, attributes/f5-datafabric] + exporters: [otlp/f5-datafabric, debug/bigip] +... +``` \ No newline at end of file diff --git a/docs/receiver_readme.md b/docs/receiver_readme.md index 1891441..6d16490 100644 --- a/docs/receiver_readme.md +++ b/docs/receiver_readme.md @@ -31,6 +31,8 @@ The following settings are optional: - `collection_interval` (default = `10s`): This receiver collects metrics on an interval. Valid time units are `ns`, `us` (or `ยตs`), `ms`, `s`, `m`, `h`. - `tls` (defaults defined [here](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md)): TLS control. By default insecure settings are rejected and certificate verification is on. - `enable_http_client_metrics` (default = `false`): Enable collection of metrics for http client requests to the device. +- `page_item_limit` (default = 100): The number of objects per page for paginated api requests +- `concurrent_workers` (default = 2): The number of concurrent API requests per receiver. - `data_types` (default: all enabled): This map allows you to enable / disable collection and sending of data by type. The list of available data types can be found in `./config.go`, in the DataTypesConfig struct definition. ### Example Configuration @@ -45,14 +47,52 @@ receivers: tls: insecure_skip_verify: true enable_http_client_metrics: true + concurrent_workers: 2 + page_item_limit: 100 data_types: f5.profile.web_acceleration: enabled: false attribute_name: some_alternative_data_type_name ``` -The full list of settings exposed for this receiver are documented [here](./config.go) with detailed sample configurations [here](./testdata/config.yaml). TLS config is documented further under the [opentelemetry collector's configtls package](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md). +Available data_types: +``` +f5.collector +f5.license +f5.module +f5.node +f5.plane +f5.policy.eviction +f5.policy.firewall +f5.policy.ip_intelligence +f5.policy.api_protection +f5.policy.bandwidth_control +f5.policy.nat +f5.pool +f5.profile.client_ssl +f5.profile.server_ssl +f5.profile.dos +f5.profile.fasthttp +f5.profile.fastl4 +f5.profile.http +f5.profile.one_connect +f5.profile.quic +f5.profile.udp +f5.profile.tcp +f5.profile.http2 +f5.profile.http3 +f5.profile.web_acceleration +f5.rule +f5.ssl_certificate +f5.system +f5.virtual_server +f5.policy.asm +f5.dns ### DEFAULT DISABLE +f5.gtm ### DEFAULT DISABLE +``` + +TLS config is documented further under the [opentelemetry collector's configtls package](https://github.com/open-telemetry/opentelemetry-collector/blob/main/config/configtls/README.md). ## Metrics -Details about the metrics produced by this receiver can be found in [documentation.md](./documentation.md) +Details about the metrics produced by this receiver can be found in [/docs/receiver_metrics.md](/docs/receiver_metrics.md) diff --git a/pages/index.md b/pages/index.md new file mode 100644 index 0000000..7e3ff42 --- /dev/null +++ b/pages/index.md @@ -0,0 +1,20 @@ +# Seven Layer Cake + +### Overview +The Application Study Tool is intended to provide enhanced insights into (classic) BIG-IP products, leveraging best in class +open source telemetry tools. The full installation includes: + +* Custom Instance of OpenTelemetry Collector with enhanced BIG-IP data receivers (data fetched via iControlRest) [Full List of Metrics Collected](/docs/receiver_metrics.md). +* Prometheus timeseries database for storing and querying collected data. +* Grafana Instance with pre-configured dashboards for quick insights at the device and "fleet" levels. + +The Application Study Tool has everything needed to quickly get up and running with application insights at less than +production levels of reliability. For production/operational use cases, you can build on the included components, +accounting for things like high availability, enhanced security via e.g. Grafana OIDC integration, and similar. Alternatively, +the Openetlemetry Collector can be configured to send data to existing production ops monitoring tools as desired. + +![](../diagrams/ui.gif) + +### Table of Contents + +- [Quick Start](./quickstart.md) \ No newline at end of file diff --git a/pages/quickstart.md b/pages/quickstart.md new file mode 100644 index 0000000..93ab0ed --- /dev/null +++ b/pages/quickstart.md @@ -0,0 +1,210 @@ +# Quick Start + +## Getting Started + +### Prerequisites + +docker (or compatible) - [Installation Instructions](https://docs.docker.com/engine/install/) + +### Installation + +Clone the repo or download source tarball from the [release](https://github.com/f5devcentral/application-study-tool/releases) section. + +```shell +# Clone the repo +git clone https://github.com/f5devcentral/application-study-tool.git +cd application-study-tool +# Edit the following file with Grafana variables as required +cp .env-example .env +# Edit the following file with device secrets as required (see "Configure Device Secrets" below) +cp .env.device-secrets-example .env.device-secrets +# Edit the default settings for your environment as required +# (see "Configure Default Device Settings" below) +vi ./config/ast_defaults.yaml +# Edit the config file with device / connection info +# (see "Configure Devices To Scrape" below) +vi ./config/big_receivers.yaml +# Run the configuration generator +docker run --rm -it -w /app -v ${PWD}:/app --entrypoint /app/src/bin/init_entrypoint.sh python:3.12.6-slim-bookworm --generate-config +# Start the tool +docker-compose up +``` + +## Configure Default Device Settings + +Edit config/ast_defaults.yaml to reflect common values for your BIG-IPs: +```yaml +# These configs are applied to each entry in the bigip_receivers file +# where they don't contain an equivalent / overriding entry. +bigip_receiver_defaults: + # The time to wait between metric collection runs + collection_interval: 60s + # The username to login to the device with + username: admin + # The password (not recommended) or a reference to an env variable (recommended) + # Below tells the collector to look for an environment variable named + # BIGIP_PASSWORD_1 + password: "${env:BIGIP_PASSWORD_1}" + # The data_types that should be enabled or disabled. + # DNS and GTM are disabled by default and users can enable those modules + # on all devices by setting the below to true. + # A full list of data_types is in /docs/receiver_readme.md. + data_types: + f5.dns: + enabled: false + f5.gtm: + enabled: false + # The TLS settings to use. Either a CA file must be specified or + # insecure_skip_verify set to true (not recommended). + tls: + # Secure TLS communication requires mounting the certificate bundle + # used to sign the BigIP certificates. Though not recommended, in the + # case of self-signed certificates or for testing purposes, you can skip + # this check by setting this field to true. + insecure_skip_verify: false + # The path to a CA File used to validate BIG-IP certificates. This is required + # if tls_insecure_skip_verify is set to false. See below for details. + ca_file: "" +``` + +## Configure Devices To Scrape +Edit the device list in config/bigip_receivers.yaml: +```yaml +#### Values not explicitly configured here inherit values in ast_defaults.yaml. +#### Each entry must have a unique name, starting with bigip/ +#### (e.g. bigip/1, bigip/2) +bigip/1: + #### Endpoint must be specified for each device + #### because there's no rational default. + #### Set this to the management IP for the device. This must be + #### reachable from the Application Study Tool host. + endpoint: https://10.0.0.1 + #### Override some default settings with device specific values + username: SOME_OVERRIDE_ACCOUNT_NAME + password: "${SOME_OTHER_ENV_VAR_WITH_ANOTHER_PASSWORD}" + #### Everything commented out here gets the value from default + # collection_interval: 30s + # data_types: + # f5.dns: + # enabled: false + # f5.gtm: + # enabled: false + # tls: + # insecure_skip_verify: true + # ca_file: +bigip/2: + endpoint: https://10.0.0.2 +``` + + +## Configure Device Secrets +The application study tool default configuration relies on environment variables +which contain device access credentials. There are a number of ways to manage and +inject secrets into a container environment (modifications to the docker-compose file +to support your preferred management process are encouraged), but for simplicity, +if there is a file named .env.device-secrets in the root project directory they will be +mounted. + +Create a file called .env.device-secrets, and add your BIP passwords like so: +``` +BIGIP_PASSWORD_1=foo-bar123! +BIGIP_PASSWORD_2=bar-foo123! +``` + +The variable name (the part on the left of the equal sign) must match the configured +value for the devices that use this password in config/ast_defaults.yaml or device specifc +cofig in config/bigip_receivers.yaml. In the following example, bigip/1 uses BIGIP_PASSWORD_1 +from the defaults and bigip/2 uses BIGIP_PASSWORD_2 from the device settings: + +``` +############################## +## config/ast_defaults.yaml +############################## + +bigip_receiver_defaults: + ... + password: "${env:BIGIP_PASSWORD_1}" + ... + +############################## +## config/bigip_receivers.yaml +############################## + +# This gets the default "${env:BIGIP_PASSWORD_1}" +bigip/1: + endpoint: https://10.0.0.1 + +# This overrides it with "${env:BIGIP_PASSWORD_2}" +bigip/2: + endpoint: https://10.0.0.1 + password: ${env:BIGIP_PASSWORD_2} + +``` + +## Run The Configuration Helper +The config helper script can be run natively or via docker to merge the default and device +level configs into the final OTEL Collector config as follows: +```shell +# Run the configuration generator +docker run --rm -it -w /app -v ${PWD}:/app --entrypoint /app/src/bin/init_entrypoint.sh python:3.12.6-slim-bookworm --generate-config +``` + +This will write 2 new files in the services/otel_collector directory: + +* `receivers.yaml` - The final list of scraper configs and their settings. +* `pipelines.yaml` - The final pipeline configs that map receievers to output destinations +(prometheus). + +### Configure CA File +AST expects a valid TLS cert bundle unless `tls.insecure_skip_verify` is +set to true for each device. In order to mount and use your CA file, you must +configure the docker-compose.yaml file in this directory, and set the `ca_file` parameter to +the resulting path. Example: + +docker-compose.yaml: +```yaml + ... + otel-collector: + ... + volumes: + - ./services/otel_collector:/etc/otel-collector-config + - ./config/ca_bundle.pem:/etc/ssl/ca_bundle.pem +``` + +config/ast_defaults.yaml (or the tls section of each device in config/bigip_receivers.yaml): +```yaml +bigip_receiver_defaults: + ... + tls: + insecure_skip_verify: false + ca_file: "/etc/ssl/ca_bundle.pem" +``` + +The configuration parameter `tls.insecure_skip_verify` defaults to false. Installers +who would like to opt-in to run in an insecure TLS mode must set +`tls.insecure_skip_verify: true` and understand +that the connection between the OTEL collector and the BIG-IP does not have secure +TLS termination. + +### Configure Grafana +The Grafana instance can be configured via environment variables using their standard +[options](https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#override-configuration-with-environment-variables). + +The included .env-example can be copied over and modified to set the initial admin +password to a value you select: + +``` +cp .env-example .env + +``` + +### Run Application Study Tool +Once the above configurations have been made, the tool can be started with: + +``` +docker compose up +``` + +#### View The Dashboards +The default Grafana user/pass is `admin/admin`, and can be accessed at +`http://:3000`. diff --git a/requirements.txt b/requirements.txt index 1f62827..8392d54 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1 @@ -flake8==7.0.0 -black==24.4.2 -jinja2==3.1.4 -validators==0.28.3 +PyYAML==6.0.2 diff --git a/services/README.md b/services/README.md index fe0c000..c5e22ce 100644 --- a/services/README.md +++ b/services/README.md @@ -1,4 +1,3 @@ # Services -This folder contains static configuration files for 7LC services. For dynamically generated -and runtime storage volumes, check ../db_graf/. +This folder contains configuration files for AST services. diff --git a/services/grafana/provisioning/dashboards/bigip/device/device-gtm.json b/services/grafana/provisioning/dashboards/bigip/device/device-gtm.json index a1071c5..c8834f6 100644 --- a/services/grafana/provisioning/dashboards/bigip/device/device-gtm.json +++ b/services/grafana/provisioning/dashboards/bigip/device/device-gtm.json @@ -29,6 +29,12 @@ "name": "Table", "version": "" }, + { + "type": "panel", + "id": "text", + "name": "Text", + "version": "" + }, { "type": "panel", "id": "timeseries", @@ -58,6 +64,32 @@ "id": null, "links": [], "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "gridPos": { + "h": 13, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 9, + "options": { + "code": { + "language": "plaintext", + "showLineNumbers": false, + "showMiniMap": false + }, + "content": "GTM Metrics are not enabled in the Opentelemtry Collector by default\n(due to the relatively large number of requests required to gather the data).\n\n\nFor data to populate in this dashboard, you need to enable them as follows (and then you might want\nto delete this panel):\n\n## Enable For All BigIPs\nYou can enable GTM metrics for all BigIPs by editing the\nconfig/ast_defaults.yaml file in the AST directory and setting\n\n```yaml\nbigip_receiver_defaults:\n...\n data_types:\n...\n f5.gtm:\n enabled: true\n```\n\n## Enable For Select BigIPs\nYou can enable GTM metrics for specific BigIPs by editing the\nconfig/bigip_receivers.yaml file and setting the flag for the\nspecific devices you need:\n\n```yaml\nbigip/1:\n endpoint: https://10.0.0.1\n data_types:\n f5.gtm:\n enabled: true\n```", + "mode": "markdown" + }, + "pluginVersion": "11.2.0", + "title": "Enabling GTM Metrics", + "type": "text" + }, { "datasource": { "name": "${DS_PROMETHEUS}", @@ -97,7 +129,7 @@ "h": 4, "w": 24, "x": 0, - "y": 0 + "y": 13 }, "id": 8, "options": { @@ -416,7 +448,7 @@ "h": 12, "w": 24, "x": 0, - "y": 4 + "y": 17 }, "id": 6, "options": { @@ -504,6 +536,6 @@ "timezone": "browser", "title": "Device GTM", "uid": "fdyi5w8d0yayoa", - "version": 3, + "version": 1, "weekStart": "" } \ No newline at end of file diff --git a/services/grafana/provisioning/dashboards/bigip/profile/ltm-dns.json b/services/grafana/provisioning/dashboards/bigip/profile/ltm-dns.json index 614f7f8..368996b 100644 --- a/services/grafana/provisioning/dashboards/bigip/profile/ltm-dns.json +++ b/services/grafana/provisioning/dashboards/bigip/profile/ltm-dns.json @@ -29,6 +29,12 @@ "name": "Table", "version": "" }, + { + "type": "panel", + "id": "text", + "name": "Text", + "version": "" + }, { "type": "panel", "id": "timeseries", @@ -58,13 +64,39 @@ "id": null, "links": [], "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "gridPos": { + "h": 14, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 37, + "options": { + "code": { + "language": "plaintext", + "showLineNumbers": false, + "showMiniMap": false + }, + "content": "DNS Metrics are not enabled in the Opentelemtry Collector by default\n(due to the relatively large number of requests required to gather the data).\n\n\nFor data to populate in this dashboard, you need to enable them as follows (and then you might want\nto delete this panel):\n\n## Enable For All BigIPs\nYou can enable GTM metrics for all BigIPs by editing the\nconfig/ast_defaults.yaml file in the AST directory and setting\n\n```yaml\nbigip_receiver_defaults:\n...\n data_types:\n...\n f5.dns:\n enabled: true\n```\n\n## Enable For Select BigIPs\nYou can enable DNS metrics for specific BigIPs by editing the\nconfig/bigip_receivers.yaml file and setting the flag for the\nspecific devices you need:\n\n```yaml\nbigip/1:\n endpoint: https://10.0.0.1\n data_types:\n f5.dns:\n enabled: true\n```", + "mode": "markdown" + }, + "pluginVersion": "11.2.0", + "title": "Enable DNS Metrics", + "type": "text" + }, { "collapsed": false, "gridPos": { "h": 1, "w": 24, "x": 0, - "y": 0 + "y": 14 }, "id": 3, "panels": [], @@ -110,7 +142,7 @@ "h": 7, "w": 24, "x": 0, - "y": 1 + "y": 15 }, "id": 1, "options": { @@ -280,7 +312,7 @@ "h": 8, "w": 12, "x": 0, - "y": 8 + "y": 22 }, "id": 5, "options": { @@ -382,7 +414,7 @@ "h": 8, "w": 12, "x": 12, - "y": 8 + "y": 22 }, "id": 6, "options": { @@ -425,7 +457,7 @@ "h": 1, "w": 24, "x": 0, - "y": 16 + "y": 30 }, "id": 2, "panels": [ @@ -477,8 +509,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -494,7 +525,7 @@ "h": 8, "w": 12, "x": 0, - "y": 17 + "y": 25 }, "id": 10, "options": { @@ -579,8 +610,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -596,7 +626,7 @@ "h": 8, "w": 12, "x": 12, - "y": 17 + "y": 25 }, "id": 9, "options": { @@ -681,8 +711,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -698,7 +727,7 @@ "h": 8, "w": 12, "x": 0, - "y": 25 + "y": 33 }, "id": 4, "options": { @@ -783,8 +812,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -800,7 +828,7 @@ "h": 8, "w": 12, "x": 12, - "y": 25 + "y": 33 }, "id": 7, "options": { @@ -885,8 +913,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -902,7 +929,7 @@ "h": 8, "w": 12, "x": 12, - "y": 33 + "y": 41 }, "id": 8, "options": { @@ -949,7 +976,7 @@ "h": 1, "w": 24, "x": 0, - "y": 17 + "y": 31 }, "id": 12, "panels": [ @@ -1001,8 +1028,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1017,7 +1043,7 @@ "h": 8, "w": 12, "x": 0, - "y": 18 + "y": 26 }, "id": 14, "options": { @@ -1136,8 +1162,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1152,7 +1177,7 @@ "h": 8, "w": 12, "x": 12, - "y": 18 + "y": 26 }, "id": 17, "options": { @@ -1236,8 +1261,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1252,7 +1276,7 @@ "h": 8, "w": 12, "x": 0, - "y": 26 + "y": 34 }, "id": 16, "options": { @@ -1336,8 +1360,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1352,7 +1375,7 @@ "h": 8, "w": 12, "x": 12, - "y": 26 + "y": 34 }, "id": 15, "options": { @@ -1398,7 +1421,7 @@ "h": 1, "w": 24, "x": 0, - "y": 18 + "y": 32 }, "id": 11, "panels": [ @@ -1450,8 +1473,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1467,7 +1489,7 @@ "h": 8, "w": 12, "x": 0, - "y": 19 + "y": 27 }, "id": 13, "options": { @@ -1551,8 +1573,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1567,7 +1588,7 @@ "h": 8, "w": 12, "x": 12, - "y": 19 + "y": 27 }, "id": 18, "options": { @@ -1625,8 +1646,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1641,7 +1661,7 @@ "h": 8, "w": 12, "x": 0, - "y": 27 + "y": 35 }, "id": 19, "options": { @@ -1729,8 +1749,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1745,7 +1764,7 @@ "h": 8, "w": 12, "x": 12, - "y": 27 + "y": 35 }, "id": 23, "options": { @@ -1821,7 +1840,7 @@ "h": 1, "w": 24, "x": 0, - "y": 19 + "y": 33 }, "id": 34, "panels": [ @@ -1873,8 +1892,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1890,7 +1908,7 @@ "h": 8, "w": 12, "x": 0, - "y": 20 + "y": 28 }, "id": 22, "options": { @@ -1988,8 +2006,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2005,7 +2022,7 @@ "h": 8, "w": 12, "x": 12, - "y": 20 + "y": 28 }, "id": 24, "options": { @@ -2102,8 +2119,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2119,7 +2135,7 @@ "h": 8, "w": 12, "x": 0, - "y": 28 + "y": 36 }, "id": 25, "options": { @@ -2178,7 +2194,7 @@ "h": 1, "w": 24, "x": 0, - "y": 20 + "y": 34 }, "id": 35, "panels": [ @@ -2231,8 +2247,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2248,7 +2263,7 @@ "h": 8, "w": 12, "x": 0, - "y": 21 + "y": 29 }, "id": 27, "options": { @@ -2333,8 +2348,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2350,7 +2364,7 @@ "h": 8, "w": 12, "x": 12, - "y": 21 + "y": 29 }, "id": 30, "options": { @@ -2435,8 +2449,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2452,7 +2465,7 @@ "h": 8, "w": 12, "x": 0, - "y": 29 + "y": 37 }, "id": 28, "options": { @@ -2537,8 +2550,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2554,7 +2566,7 @@ "h": 8, "w": 12, "x": 12, - "y": 29 + "y": 37 }, "id": 29, "options": { @@ -2600,7 +2612,7 @@ "h": 1, "w": 24, "x": 0, - "y": 21 + "y": 35 }, "id": 36, "panels": [ @@ -2653,8 +2665,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2670,7 +2681,7 @@ "h": 8, "w": 12, "x": 0, - "y": 22 + "y": 30 }, "id": 31, "options": { @@ -2755,8 +2766,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2772,7 +2782,7 @@ "h": 8, "w": 12, "x": 12, - "y": 22 + "y": 30 }, "id": 33, "options": { @@ -2831,8 +2841,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2847,7 +2856,7 @@ "h": 8, "w": 12, "x": 0, - "y": 30 + "y": 38 }, "id": 32, "options": { @@ -2992,6 +3001,6 @@ "timezone": "browser", "title": "LTM - DNS Profile", "uid": "cdyemmjm65d6of", - "version": 6, + "version": 2, "weekStart": "" } \ No newline at end of file diff --git a/services/grafana/provisioning/dashboards/bigip/profile/ltm-http.json b/services/grafana/provisioning/dashboards/bigip/profile/ltm-http.json index 4d167fb..6240b00 100644 --- a/services/grafana/provisioning/dashboards/bigip/profile/ltm-http.json +++ b/services/grafana/provisioning/dashboards/bigip/profile/ltm-http.json @@ -737,7 +737,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -776,7 +777,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(f5_virtual_server_profile_http_requests_total{f5_profile_http_name=~\"$profile_name\", job=\"$device_name\"}[$__rate_interval])", + "expr": "rate(f5_virtual_server_profile_http_requests_by_method_total{f5_profile_http_name=~\"$profile_name\", job=\"$device_name\"}[$__rate_interval])", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -838,7 +839,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -877,7 +879,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(f5_virtual_server_profile_http_responses_total{f5_profile_http_name=~\"$profile_name\", job=\"$device_name\"}[$__rate_interval])", + "expr": "rate(f5_virtual_server_profile_http_responses_by_status_total{f5_profile_http_name=~\"$profile_name\", job=\"$device_name\"}[$__rate_interval])", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -959,13 +961,13 @@ ] }, "time": { - "from": "now-6h", + "from": "now-1h", "to": "now" }, "timepicker": {}, "timezone": "browser", "title": "LTM - HTTP Profile", "uid": "cdu8euqz7w0zkc", - "version": 6, + "version": 8, "weekStart": "" } \ No newline at end of file diff --git a/services/otel_collector/defaults/bigip-scraper-config.yaml b/services/otel_collector/defaults/bigip-scraper-config.yaml new file mode 100644 index 0000000..14e9f22 --- /dev/null +++ b/services/otel_collector/defaults/bigip-scraper-config.yaml @@ -0,0 +1,33 @@ +receivers: ${file:/etc/otel-collector-config/receivers.yaml} + +processors: + batch: + # Only export data to f5 (if enabled) every 300s + interval/f5-datafabric: + interval: 300s + # Apply the folowing transformations to metrics bound for F5 Datafabric + attributes/f5-datafabric: + actions: + - key: dataType + action: upsert + value: bigip-ast-metric + +exporters: + otlphttp/metrics-local: + endpoint: http://prometheus:9090/api/v1/otlp + otlp/f5-datafabric: + endpoint: us.edge.df.f5.com:443 + headers: + # Requires Sensor ID and Token to authenticate. + Authorization: "kovacs ${env:SENSOR_ID} ${env:SENSOR_SECRET_TOKEN}" + X-F5-OTEL: "GRPC" + tls: + insecure: false + ca_file: /etc/ssl/certs/ca-certificates.pem + debug/bigip: + verbosity: basic + sampling_initial: 5 + sampling_thereafter: 200 + +service: + pipelines: ${file:/etc/otel-collector-config/pipelines.yaml} diff --git a/services/otel_collector/defaults/default-config.yaml b/services/otel_collector/defaults/default-config.yaml new file mode 100644 index 0000000..61923f1 --- /dev/null +++ b/services/otel_collector/defaults/default-config.yaml @@ -0,0 +1,37 @@ +# This file contains default / non-BigIP specific settings that allow +# The otel collector to monitor itself. Most people won't need to configure +# anything in this file. +receivers: + prometheus/collector: + config: + scrape_configs: + - job_name: 'opentelemetry-collector' + static_configs: + - targets: ['localhost:8888'] + +processors: + batch: + interval/xcdf-interval: + interval: 300s + attributes/xcdf-datatype: + actions: + - key: dataType + action: upsert + value: bigip-ast-metric + +exporters: + otlphttp/prometheus-default: + endpoint: http://prometheus:9090/api/v1/otlp + # Support sending downsampled metrics to F5 Data Fabric if enabled. + + debug/default: + verbosity: basic + sampling_initial: 5 + sampling_thereafter: 200 + +service: + pipelines: + metrics/prometheus: + receivers: [prometheus/collector] + processors: [batch] + exporters: [otlphttp/prometheus-default, debug/default] \ No newline at end of file diff --git a/services/otel_collector/pipelines.yaml b/services/otel_collector/pipelines.yaml new file mode 100644 index 0000000..5ed48b8 --- /dev/null +++ b/services/otel_collector/pipelines.yaml @@ -0,0 +1,8 @@ +metrics/bigip: + exporters: + - otlphttp/metrics-bigip + - debug/bigip + processors: + - batch + receivers: + - bigip/1 diff --git a/services/otel_collector/receivers.yaml b/services/otel_collector/receivers.yaml new file mode 100644 index 0000000..34475dd --- /dev/null +++ b/services/otel_collector/receivers.yaml @@ -0,0 +1,13 @@ +bigip/1: + collection_interval: 60s + data_types: + f5.dns: + enabled: false + f5.gtm: + enabled: false + endpoint: https://10.0.0.1 + password: ${env:BIGIP_PASSWORD_1} + tls: + ca_file: '' + insecure_skip_verify: false + username: admin diff --git a/src/bin/init_entrypoint.sh b/src/bin/init_entrypoint.sh new file mode 100755 index 0000000..a5ca0f3 --- /dev/null +++ b/src/bin/init_entrypoint.sh @@ -0,0 +1,3 @@ +#!/usr/bin/bash +pip install PyYAML==6.0.2 +python /app/src/config_helper.py "$@" \ No newline at end of file diff --git a/src/config_helper.py b/src/config_helper.py new file mode 100755 index 0000000..9ee4bbc --- /dev/null +++ b/src/config_helper.py @@ -0,0 +1,628 @@ +""" +config_helper.py + +A command-line tool for helping simplify application study tool configurations. It takes 2 input files, +one containing defaults that should be applied to each bigip receiver configuration, and a second with +the individual bigip targets and any non-default values to use as overrides. + +The output is written to ./services/otel_collector/receivers.yaml (and pipelines.yaml) where the AST +Otel Instance merges them with the base configuration templates. + +Key Features: +- Convert legacy JSON configurations to a new YAML format. +- Generate output configurations based on default settings and per-device inputs. +- Supports dry-run mode to preview changes without writing to files. + +Command-Line Interface: +The tool can be executed from the command line with the following options: +- --convert-legacy-config: Convert the legacy configuration file to the new format. +- --generate-configs: Generate new configurations based on the input files. +- --dry-run: Preview changes without writing to files. + +These additional flags can also be specified (but probably shouldn't be): +- --legacy-config-file: Specify the path to the legacy configuration file (default: ./config/big-ips.json). +- --default-config-file: Specify the path to the default settings file (default: ./config/ast_defaults.yaml). +- --receiver-input-file: Specify the path to the receiver input file (default: ./config/bigip_receivers.yaml). +- --receiver-output-file: Specify the output path for the receiver configuration file (default: ./services/otel_collector/receivers.yaml). +- --pipelines-output-file: Specify the output path for the pipeline configuration file (default: ./services/otel_collector/pipelines.yaml). + +Usage Example: +To convert a legacy configuration in the default ./config/big-ips.json file: + python ./src/config_helper.py --convert-legacy-config + +To generate configurations: + python ./src/config_helper.py --generate-configs + +To re-generate configurations (e.g. after adding new devices or changing default settings), re-run the above command. +""" + +import argparse +import json +import logging + +import yaml +from copy import deepcopy + + +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" +) + + +def load_yaml(path): + """Load a YAML file from the specified path. + + This function reads a YAML file and parses its content into a Python dictionary. + It logs the status of the loading operation, including success and various error cases. + + Parameters: + path (str): The file path to the YAML file to be loaded. + + Returns: + dict or None: The content of the YAML file as a dictionary if loading is successful; + None if an error occurs (e.g., file not found, permission denied, + or invalid YAML format). + """ + try: + with open(path, "r") as f: + content = yaml.safe_load(f) + logging.info("Successfully loaded '%s'.", path) + return content + except FileNotFoundError: + logging.error("Error: The file '%s' does not exist.", path) + return None + except PermissionError: + logging.error("Error: Permission denied when trying to open '%s'.", path) + return None + except yaml.YAMLError as e: + logging.error("Error reading YAML file '%s': %s", path, e) + return None + + +def load_json(path): + """Load a JSON file from the specified path. + + This function reads a JSON file and parses its content into a Python dictionary. + It logs the status of the loading operation, including success and various error cases. + + Parameters: + path (str): The file path to the JSON file to be loaded. + + Returns: + dict or None: The content of the JSON file as a dictionary if loading is successful; + None if an error occurs (e.g., file not found, permission denied, + or invalid JSON format). + """ + try: + with open(path, "r") as f: + content = json.loads(f.read()) + logging.info("Successfully loaded '%s'.", path) + return content + except FileNotFoundError: + logging.error("Error: The file '%s' does not exist.", path) + return None + except PermissionError: + logging.error("Error: Permission denied when trying to open '%s'.", path) + return None + except json.JSONDecodeError as e: + logging.error("Error reading JSON file '%s': %s", path, e) + return None + + +def write_yaml_to_file(data, path): + """Write a dictionary to a YAML file. + + This function serializes a given dictionary and writes it to a specified YAML file. + It logs the success or failure of the write operation. + + Parameters: + data (dict): The dictionary to be written to the YAML file. + path (str): The file path where the YAML data will be saved. + + Returns: + None: This function does not return a value. It either successfully writes the data + to the file or logs an error if the operation fails. + """ + try: + with open(path, "w") as f: + yaml.dump(data, f, default_flow_style=False) # Write data to YAML file + logging.info("Successfully wrote data to '%s'.", path) + except IOError as e: + logging.error("Error writing to YAML file '%s': %s", path, e) + + +def load_default_config(args): + """Load the default configuration settings from a YAML file. + + This function retrieves the default settings for the application study tool + by loading a YAML configuration file specified in the command-line arguments. + + Parameters: + args (argparse.Namespace): The command-line arguments that include the + path to the default configuration file. + + Returns: + dict or None: The content of the YAML file as a dictionary if loading is successful; + None if an error occurs while loading the file. + """ + logging.info("Loading AST Default Settings in %s...", args.default_config_file) + return load_yaml(args.default_config_file) + + +def load_receiver_config(args): + """Load the per-receiver / bigip Device configuration settings from a YAML file. + + This function retrieves the receiver settings for the application study tool + by loading a YAML configuration file specified in the command-line arguments. + + Parameters: + args (argparse.Namespace): The command-line arguments that include the + path to the default configuration file. + + Returns: + dict or None: The content of the YAML file as a dictionary if loading is successful; + None if an error occurs while loading the file. + """ + logging.info( + "Loading Per-Receiver (BigIP) Settings in %s...", args.receiver_input_file + ) + return load_yaml(args.receiver_input_file) + + +def load_legacy_config(args): + """Load the legacy config from a JSON file. + + This function retrieves the legacy configuration for the application study tool + by loading a JSON configuration file specified in the command-line arguments. + + Parameters: + args (argparse.Namespace): The command-line arguments that include the + path to the legacy configuration file. + + Returns: + dict or None: The content of the JSON file as a dictionary if loading is successful; + None if an error occurs while loading the file. + """ + logging.info("Loading legacy configuration in %s...", args.legacy_config_file) + return load_json(args.legacy_config_file) + + +def convert_legacy_config(args): + """Convert legacy configuration to the new format. + + This function loads the default configuration and legacy configuration files, then transforms + the legacy configuration into a new format using the defaults specified in the default configuration. + + Args: + args (argparse.Namespace): Command-line arguments containing paths to configuration files. + + Returns: + dict or None: A dictionary representing the transformed receiver configurations, or None if any error occurs during loading or processing. + """ + logging.info("Converting legacy configuration in %s...", args.legacy_config_file) + + default_config = load_default_config(args) + if not default_config: + return None + + default_receiver_configs = default_config.get("bigip_receiver_defaults") + if not default_receiver_configs: + logging.error( + "Error: Default receiver configs not found in default settings file." + ) + return None + + legacy_config = load_legacy_config(args) + if not legacy_config: + return None + + return transform_receiver_configs(legacy_config, default_receiver_configs) + + +def transform_receiver_configs(legacy_configs, default_configs): + """Transform legacy receiver configurations into the new format. + + This function takes a list of legacy receiver configurations and transforms each configuration + into a new format based on the provided default configurations. It generates a dictionary where + each key is a unique identifier for a receiver (e.g., "bigip/1") and the value is the transformed + receiver configuration. + + Args: + legacy_configs (list): A list of legacy receiver configuration dictionaries. + default_configs (dict): A dictionary containing default configuration values for the receivers. + + Returns: + dict: A dictionary containing the transformed receiver configurations, + where keys are formatted as "bigip/{index}" and values are the transformed configurations. + """ + new_receiver_configs = {} + for idx, receiver_config in enumerate(legacy_configs): + new_receiver_configs[f"bigip/{idx + 1}"] = transform_single_receiver( + receiver_config, default_configs + ) + return new_receiver_configs + + +def handle_collection_interval(value, default_value): + """Handle collection interval formatting.""" + with_seconds = f"{value}s" + return with_seconds if with_seconds != default_value else None + + +def handle_password_env_ref(value, default_value): + """Handle password environment reference formatting.""" + escaped_value = f"${{env:{value}}}" + return escaped_value if escaped_value != default_value else None + + +def handle_tls_settings(new_receiver_config, key, value, default_configs): + """Handle TLS settings for the receiver configuration.""" + if key == "tls_insecure_skip_verify": + default_value = default_configs.get("tls", {}).get("insecure_skip_verify") + key = "insecure_skip_verify" + else: # key == "ca_file" + default_value = default_configs.get("tls", {}).get("ca_file") + + if value != default_value: + if "tls" not in new_receiver_config: + new_receiver_config["tls"] = {} + new_receiver_config["tls"][key] = value + + +def transform_single_receiver(receiver_config, default_configs): + """Transform a single receiver configuration. + + This function takes a legacy receiver configuration and transforms it into the new format + based on the provided default configurations. It processes specific keys differently and + incorporates logic for handling collection intervals, passwords, and TLS settings. + + Args: + receiver_config (dict): A dictionary representing the legacy receiver configuration. + default_configs (dict): A dictionary containing default configuration values for comparison. + + Returns: + dict: A dictionary containing the transformed receiver configuration, including any updates + based on the provided defaults and specific handling for certain keys. + + The following transformations are applied: + - **collection_interval**: Formatted with seconds using the `handle_collection_interval` function. + - **password_env_ref**: Processed using the `handle_password_env_ref` function. + - **TLS settings**: Handled through the `handle_tls_settings` function. + - If a key's value matches the default value, it is skipped. + """ + new_receiver_config = {} + for key, value in receiver_config.items(): + default_value = default_configs.get(key) + + if key == "collection_interval": + interval = handle_collection_interval(value, default_value) + if interval: + new_receiver_config[key] = interval + elif key == "password_env_ref": + pw = handle_password_env_ref(value, default_value) + if pw: + new_receiver_config["password"] = pw + elif key in ["tls_insecure_skip_verify", "ca_file"]: + handle_tls_settings(new_receiver_config, key, value, default_configs) + elif default_value and default_value == value: + continue # Skip if value is the same as the default + else: + new_receiver_config[key] = value + + return new_receiver_config + + +def deep_merge(dict1, dict2): + """Deep merge two dictionaries.""" + for key, value in dict2.items(): + if key in dict1: + # If both values are dicts, merge them + if isinstance(dict1[key], dict) and isinstance(value, dict): + deep_merge(dict1[key], value) + else: + dict1[key] = value # Overwrite with dict2's value + else: + dict1[key] = value # Add new key from dict2 + return dict1 + + +def generate_receiver_configs(receiver_input_configs, default_config): + """Generate merged receiver configurations from input and defaults. + + This function takes a dictionary of receiver input configurations and a default configuration, + and generates a merged configuration for each receiver. It ensures that any specific receiver + configurations override the default values while maintaining the overall structure defined by + the defaults. + + Args: + receiver_input_configs (dict): A dictionary where keys are receiver identifiers and values + are their corresponding configurations. + default_config (dict): A dictionary containing default configuration values, particularly + under the key 'bigip_receiver_defaults'. + + Returns: + dict: A dictionary containing the merged receiver configurations, where each key corresponds + to a receiver identifier and each value is the resulting merged configuration. + + The following operations are performed for each receiver: + - Deep copies of the default configurations and the receiver-specific configurations are created. + - If the receiver configuration contains a 'pipeline' key, it is removed. + - The merged configuration is generated by deep merging the defaults with the specific receiver + configuration, ensuring that specific values take precedence over defaults. + """ + merged_config = {} + for k, v in receiver_input_configs.items(): + defaults = deepcopy(default_config.get("bigip_receiver_defaults")) + this_cfg = deepcopy(v) + if this_cfg.get("pipeline"): + del this_cfg["pipeline"] + merged_config[k] = deep_merge(defaults, this_cfg) + return merged_config + + +def assemble_pipelines( + pipeline_key, default_pipeline, receiver_input_configs, pipelines, filename +): + """ + Assembles pipeline configurations by linking receivers to their respective pipelines. + + This function iterates over the provided receiver input configurations and associates each receiver + with its corresponding pipeline. If a specified pipeline is not found in the defined pipelines, + an error is logged, and the function returns None. + + Parameters: + - pipeline_key (str): The key used to retrieve the pipeline name from the receiver's configuration. + - default_pipeline (str): The default pipeline to use if none is specified in the receiver's config. + - receiver_input_configs (dict): A dictionary mapping receiver names to their configuration settings. + - pipelines (dict): A dictionary of available pipelines, where each pipeline is identified by its name. + - filename (str): The name of the configuration file being processed, used for logging errors. + + Returns: + - None: If any specified pipeline is not found, the function logs an error and exits early. + """ + for receiver, config in receiver_input_configs.items(): + pipeline = config.get(pipeline_key, default_pipeline) + this_pipeline = pipelines.get(pipeline) + if not this_pipeline: + logging.error( + "Pipeline %s on Receiver %s is not found in config pipelines section of %s...", + pipeline, + receiver, + filename, + ) + return None + if not this_pipeline.get("receivers"): + this_pipeline["receivers"] = [] + this_pipeline["receivers"].append(receiver) + + +def generate_pipeline_configs(receiver_input_configs, default_config, args): + """Generate pipeline configurations based on receiver inputs and default settings. + + This function constructs pipeline configurations by associating receivers with their respective + pipelines based on the provided receiver input configurations and default settings. It validates + the existence of default pipelines and the pipelines specified in the default configuration. + + Args: + receiver_input_configs (dict): A dictionary where keys are receiver identifiers and values + are their corresponding configurations. + default_config (dict): A dictionary containing default configuration values, particularly + under the keys 'pipeline_default' and 'pipelines'. + args (argparse.Namespace): The parsed command-line arguments, used for logging context. + + Returns: + dict or None: A dictionary containing the updated pipeline configurations, or None if + there are errors (e.g., missing default pipelines or pipelines for receivers). + + The function performs the following steps: + - Retrieves the default pipeline from the default configuration. + - Validates the existence of pipelines in the default configuration. + - Iterates over each receiver in the input configurations to determine its associated pipeline. + - If the specified pipeline does not exist, logs an error and returns None. + - Appends the receiver to the appropriate pipeline's receivers list, creating the list if it does not exist. + """ + pipelines = default_config.get("pipelines") + if not pipelines: + logging.error( + "No pipelines set in default config file:\n\n%s", yaml.dump(default_config) + ) + return None + + default_pipeline = default_config.get("pipeline_default") + if not default_pipeline: + logging.error( + "No default pipeline set in default config file:\n\n%s", + yaml.dump(default_config), + ) + return None + + assemble_pipelines( + "pipeline", + default_pipeline, + receiver_input_configs, + pipelines, + args.receiver_input_file, + ) + + f5_pipeline_default = default_config.get("f5_pipeline_default") + enabled = default_config.get("f5_data_export", False) + f5_export_enabled = f5_pipeline_default and enabled + if not f5_export_enabled: + logging.warning( + "The f5_data_export=true and f5_pipeline_default fields are required to " + "export metrics periodically to F5. Contact your F5 Sales Rep to provision a " + "Sensor ID and Access Token.", + ) + else: + assemble_pipelines( + "f5_pipeline", + f5_pipeline_default, + receiver_input_configs, + pipelines, + args.receiver_input_file, + ) + + final_pipelines = {} + for pipeline, settings in pipelines.items(): + receivers = settings.get("receivers", []) + if len(receivers) == 0: + continue + final_pipelines[pipeline] = settings + return final_pipelines + + +def generate_configs(args): + """Generate configuration files for receivers and pipelines. + + This function orchestrates the generation of configuration files by loading default settings and + receiver-specific configurations. It logs the process of generating both receiver and pipeline + configurations based on the provided arguments. + + Args: + args (argparse.Namespace): The parsed command-line arguments containing file paths for + default configurations and receiver inputs. + + Returns: + tuple: A tuple containing two dictionaries: + - receiver_output_configs (dict): The generated receiver configurations. + - pipeline_output_configs (dict): The generated pipeline configurations. + """ + logging.info( + "Generating configs from %s and %s...", + args.default_config_file, + args.receiver_input_file, + ) + default_config = load_default_config(args) + receiver_input_configs = load_receiver_config(args) + logging.info("Generating receiver configs...") + receiver_output_configs = generate_receiver_configs( + receiver_input_configs, default_config + ) + logging.info("Generating pipeline configs...") + pipeline_output_configs = generate_pipeline_configs( + receiver_input_configs, default_config, args + ) + return receiver_output_configs, pipeline_output_configs + + +def get_args(): + """Initialize the argument parser. + + Returns: + parser: argumentparser object with config_helper arguments specified. + """ + parser = argparse.ArgumentParser( + description="A tool for helping with application study tool configurations." + ) + + parser.add_argument( + "--convert-legacy-config", + action="store_true", + help="Convert the legacy big-ips.json to the new format.", + ) + + parser.add_argument( + "--legacy-config-file", + type=str, + default="./config/big-ips.json", + help="Path to the legacy big-ips.json file to convert (default: ./config/big-ips.json).", + ) + + parser.add_argument( + "--dry-run", action="store_true", help="Don't write output to files" + ) + + parser.add_argument( + "--default-config-file", + type=str, + default="./config/ast_defaults.yaml", + help="Path to the default settings file to generate configs from (default: ./config/ast_defaults.yaml).", + ) + + parser.add_argument( + "--receiver-input-file", + type=str, + default="./config/bigip_receivers.yaml", + help="Path to the receiver settings input file (bigIP Configs) to generate configs from (default: ./config/bigip_receivers.yaml).", + ) + + parser.add_argument( + "--generate-configs", + action="store_true", + help="Read files in config directory and write AST Otel Config", + ) + + parser.add_argument( + "--receiver-output-file", + type=str, + default="./services/otel_collector/receivers.yaml", + help="Path to the receiver settings otel file (default: ./services/otel_collector/receivers.yaml).", + ) + + parser.add_argument( + "--pipelines-output-file", + type=str, + default="./services/otel_collector/pipelines.yaml", + help="Path to the pipeline settings otel config file (default: ./services/otel_collector/pipelines.yaml).", + ) + return parser + + +def main(): + """Main entry point for the configuration management tool. + + This function orchestrates the command-line interface for the application, handling user input + and executing the appropriate actions based on the specified command-line arguments. It supports + converting legacy configurations and generating new configuration files. + + Steps performed by this function: + - Parses command-line arguments using `get_args`. + - If the `--convert-legacy-config` flag is provided: + - Calls `convert_legacy_config` to convert the legacy configuration file. + - Logs the converted output and writes it to a specified YAML file unless in dry-run mode. + - If the `--generate-configs` flag is specified: + - Calls `generate_configs` to create new receiver and pipeline configurations. + - Logs the generated configurations and writes them to their respective output files unless in dry-run mode. + - If neither action is specified, logs an informational message prompting the user to choose an action. + """ + parser = get_args() + + args = parser.parse_args() + + if args.convert_legacy_config: + new_receivers = convert_legacy_config(args) + if not new_receivers: + return + logging.info( + "Converted the legacy config to the following " + "bigip_receivers.yaml output:\n\n%s", + yaml.dump(new_receivers, default_flow_style=False), + ) + if not args.dry_run: + write_yaml_to_file(new_receivers, args.receiver_input_file) + return + + if args.generate_configs: + receiver_config, pipeline_config = generate_configs(args) + if not receiver_config or not pipeline_config: + return + logging.info( + "Built the following pipeline file:\n\n%s", + yaml.dump(pipeline_config, default_flow_style=False), + ) + logging.info( + "Built the following receiver file:\n\n%s", + yaml.dump(receiver_config, default_flow_style=False), + ) + if not args.dry_run: + write_yaml_to_file(pipeline_config, args.pipelines_output_file) + write_yaml_to_file(receiver_config, args.receiver_output_file) + return + + logging.info( + "Found nothing to do... Try running with --convert-legacy-config or --generate-configs..." + ) + + +if __name__ == "__main__": + main() diff --git a/src/config_helper_test.py b/src/config_helper_test.py new file mode 100644 index 0000000..8f2961e --- /dev/null +++ b/src/config_helper_test.py @@ -0,0 +1,283 @@ +import unittest +from unittest.mock import patch, MagicMock +import logging +import yaml + +# Assuming the convert_legacy_config function is in a module named my_module +from config_helper import ( + convert_legacy_config, + deep_merge, + generate_receiver_configs, + generate_pipeline_configs, + generate_configs, +) + + +class TestConvertLegacyConfig(unittest.TestCase): + + @patch("config_helper.load_default_config") + @patch("config_helper.load_legacy_config") + def test_convert_legacy_config_success(self, mock_load_legacy, mock_load_defaults): + # Setup mock return values + mock_load_defaults.return_value = { + "bigip_receiver_defaults": { + "collection_interval": "10s", + "password": "${env:default_password}", + "tls": {"insecure_skip_verify": False, "ca_file": "/path/to/ca.crt"}, + } + } + mock_load_legacy.return_value = [ + { + "collection_interval": 10, + "password_env_ref": "secret_password", + "tls_insecure_skip_verify": True, + }, + {"collection_interval": 15, "ca_file": "/path/to/new_ca.crt"}, + ] + + # Mock args + class Args: + legacy_config_file = "path/to/legacy_config.json" + default_config_file = "path/to/default_config.yaml" + + args = Args() + + result = convert_legacy_config(args) + + expected_output = { + "bigip/1": { + "password": "${env:secret_password}", + "tls": {"insecure_skip_verify": True}, + }, + "bigip/2": { + "collection_interval": "15s", + "tls": {"ca_file": "/path/to/new_ca.crt"}, + }, + } + + self.assertEqual(result, expected_output) + + @patch("config_helper.load_default_config") + @patch("config_helper.load_legacy_config") + def test_convert_legacy_config_no_default( + self, mock_load_legacy, mock_load_defaults + ): + mock_load_defaults.return_value = None + mock_load_legacy.return_value = [] + + class Args: + legacy_config_file = "path/to/legacy_config.json" + default_config_file = "path/to/default_config.yaml" + + args = Args() + + result = convert_legacy_config(args) + + self.assertIsNone(result) + + @patch("config_helper.load_default_config") + @patch("config_helper.load_legacy_config") + def test_convert_legacy_config_no_receiver_defaults( + self, mock_load_legacy, mock_load_defaults + ): + mock_load_defaults.return_value = {} + mock_load_legacy.return_value = [] + + class Args: + legacy_config_file = "path/to/legacy_config.json" + default_config_file = "path/to/default_config.yaml" + + args = Args() + + result = convert_legacy_config(args) + + self.assertIsNone(result) + + @patch("config_helper.load_default_config") + @patch("config_helper.load_legacy_config") + def test_convert_legacy_config_no_legacy_config( + self, mock_load_legacy, mock_load_defaults + ): + mock_load_defaults.return_value = { + "bigip_receiver_defaults": { + "collection_interval": "10s", + "password": "${env:default_password}", + } + } + mock_load_legacy.return_value = None + + class Args: + legacy_config_file = "path/to/legacy_config.json" + default_config_file = "path/to/default_config.yaml" + + args = Args() + + result = convert_legacy_config(args) + + self.assertIsNone(result) + + +class TestConfigFunctions(unittest.TestCase): + + def test_deep_merge(self): + dict1 = {"key1": {"subkey1": "value1", "subkey2": "value2"}, "key2": "value3"} + dict2 = { + "key1": {"subkey2": "new_value2", "subkey3": "value4"}, + "key3": "value5", + } + + expected_merged = { + "key1": {"subkey1": "value1", "subkey2": "new_value2", "subkey3": "value4"}, + "key2": "value3", + "key3": "value5", + } + + result = deep_merge(dict1, dict2) + self.assertEqual(result, expected_merged) + + def test_generate_receiver_configs(self): + receiver_input_configs = { + "receiver1": {"setting1": "value1", "pipeline": "some_pipeline"}, + "receiver2": {"setting2": "value2"}, + } + + default_config = { + "bigip_receiver_defaults": { + "setting1": "default_value1", + "setting2": "default_value2", + } + } + + expected_output = { + "receiver1": {"setting1": "value1", "setting2": "default_value2"}, + "receiver2": {"setting2": "value2", "setting1": "default_value1"}, + } + + result = generate_receiver_configs(receiver_input_configs, default_config) + self.assertEqual(result, expected_output) + + @patch("config_helper.logging.error") + def test_generate_pipeline_configs_no_pipeline(self, mock_error): + receiver_input_configs = {"receiver1": {"pipeline": "pipeline1"}} + default_config = {} + args = MagicMock() + args.receiver_input_file = "dummy_file.yaml" + + result = generate_pipeline_configs(receiver_input_configs, default_config, args) + self.assertIsNone(result) + mock_error.assert_called_once() + + @patch("config_helper.load_default_config") + @patch("config_helper.load_receiver_config") + @patch("config_helper.logging.info") + def test_generate_configs(self, mock_info, mock_load_receiver, mock_load_default): + mock_load_default.return_value = { + "bigip_receiver_defaults": {}, + "pipeline_default": "default_pipeline", + "pipelines": {"default_pipeline": {"receivers": []}}, + } + + mock_load_receiver.return_value = { + "receiver1": {"pipeline": "default_pipeline"} + } + + args = MagicMock() + args.default_config_file = "default.yaml" + args.receiver_input_file = "input.yaml" + + receiver_output, pipeline_output = generate_configs(args) + + self.assertIsNotNone(receiver_output) + self.assertIsNotNone(pipeline_output) + self.assertIn("receiver1", pipeline_output["default_pipeline"]["receivers"]) + mock_info.assert_any_call( + "Generating configs from %s and %s...", + args.default_config_file, + args.receiver_input_file, + ) + + @patch("config_helper.load_default_config") + @patch("config_helper.load_receiver_config") + @patch("config_helper.logging.info") + def test_generate_configs_f5_export( + self, mock_info, mock_load_receiver, mock_load_default + ): + mock_load_default.return_value = { + "bigip_receiver_defaults": {}, + "f5_data_export": True, + "pipeline_default": "default_pipeline", + "f5_pipeline_default": "default_pipeline2", + "pipelines": { + "default_pipeline": {"receivers": []}, + "default_pipeline2": {"receivers": []}, + }, + } + + mock_load_receiver.return_value = { + "receiver1": { + "pipeline": "default_pipeline", + "f5_pipeline": "default_pipeline2", + }, + "receiver2": {"pipeline": "default_pipeline"}, + } + + args = MagicMock() + args.default_config_file = "default.yaml" + args.receiver_input_file = "input.yaml" + + receiver_output, pipeline_output = generate_configs(args) + + self.assertIsNotNone(receiver_output) + self.assertIsNotNone(pipeline_output) + self.assertIn("receiver1", pipeline_output["default_pipeline"]["receivers"]) + self.assertIn("receiver1", pipeline_output["default_pipeline2"]["receivers"]) + self.assertIn("receiver2", pipeline_output["default_pipeline2"]["receivers"]) + mock_info.assert_any_call( + "Generating configs from %s and %s...", + args.default_config_file, + args.receiver_input_file, + ) + + @patch("config_helper.load_default_config") + @patch("config_helper.load_receiver_config") + @patch("config_helper.logging.info") + def test_generate_configs_f5_export_not_true( + self, mock_info, mock_load_receiver, mock_load_default + ): + mock_load_default.return_value = { + "bigip_receiver_defaults": {}, + "pipeline_default": "default_pipeline", + "f5_pipeline_default": "default_pipeline2", + "pipelines": { + "default_pipeline": {"receivers": []}, + "default_pipeline2": {"receivers": []}, + }, + } + + mock_load_receiver.return_value = { + "receiver1": { + "pipeline": "default_pipeline", + "f5_pipeline": "default_pipeline2", + }, + "receiver2": {"pipeline": "default_pipeline"}, + } + + args = MagicMock() + args.default_config_file = "default.yaml" + args.receiver_input_file = "input.yaml" + + receiver_output, pipeline_output = generate_configs(args) + + self.assertIsNotNone(receiver_output) + self.assertIsNotNone(pipeline_output) + self.assertIn("receiver1", pipeline_output["default_pipeline"]["receivers"]) + self.assertNotIn("default_pipeline2", pipeline_output) + mock_info.assert_any_call( + "Generating configs from %s and %s...", + args.default_config_file, + args.receiver_input_file, + ) + + +if __name__ == "__main__": + unittest.main()