From 8f6e95f1a3ff9db369613134d2deb4ee92e8d7e4 Mon Sep 17 00:00:00 2001 From: Sarah Chavis <62406755+schavis@users.noreply.github.com> Date: Thu, 19 Dec 2024 16:21:33 -0800 Subject: [PATCH 01/15] [DOCS] SEO updates for config docs (#29242) * seo updates for config docs * fix content errors * Make KMS title consistent with other stanza titles --- .../adaptive-overload-protection.mdx | 10 +- .../configuration/entropy-augmentation.mdx | 8 +- website/content/docs/configuration/index.mdx | 7 +- .../docs/configuration/kms-library.mdx | 6 +- .../docs/configuration/listener/index.mdx | 8 +- .../docs/configuration/listener/tcp/index.mdx | 7 +- .../docs/configuration/listener/unix.mdx | 8 +- .../docs/configuration/log-requests-level.mdx | 8 +- .../prevent-lease-explosions.mdx | 2 +- .../configuration/programmatic-management.mdx | 2 +- .../docs/configuration/replication.mdx | 2 +- .../content/docs/configuration/reporting.mdx | 7 +- .../docs/configuration/seal/alicloudkms.mdx | 9 +- .../docs/configuration/seal/awskms.mdx | 9 +- .../docs/configuration/seal/azurekeyvault.mdx | 9 +- .../docs/configuration/seal/gcpckms.mdx | 9 +- .../content/docs/configuration/seal/index.mdx | 5 +- .../docs/configuration/seal/ocikms.mdx | 9 +- .../docs/configuration/seal/pkcs11.mdx | 9 +- .../seal/seal-best-practices.mdx | 6 +- .../docs/configuration/seal/seal-ha.mdx | 9 +- .../seal/transit-best-practices.mdx | 5 +- .../docs/configuration/seal/transit.mdx | 9 +- .../content/docs/configuration/sentinel.mdx | 4 +- .../service-registration/consul.mdx | 9 +- .../service-registration/index.mdx | 8 +- .../service-registration/kubernetes.mdx | 7 +- .../docs/configuration/storage/aerospike.mdx | 9 +- .../configuration/storage/alicloudoss.mdx | 9 +- .../docs/configuration/storage/azure.mdx | 10 +- .../docs/configuration/storage/cassandra.mdx | 9 +- .../configuration/storage/cockroachdb.mdx | 7 +- .../docs/configuration/storage/consul.mdx | 11 +- .../docs/configuration/storage/couchdb.mdx | 9 +- .../docs/configuration/storage/dynamodb.mdx | 9 +- .../docs/configuration/storage/etcd.mdx | 10 +- .../docs/configuration/storage/filesystem.mdx | 11 +- .../configuration/storage/foundationdb.mdx | 9 +- .../storage/google-cloud-spanner.mdx | 10 +- .../storage/google-cloud-storage.mdx | 9 +- .../docs/configuration/storage/in-memory.mdx | 11 +- .../docs/configuration/storage/index.mdx | 11 +- .../docs/configuration/storage/manta.mdx | 9 +- .../docs/configuration/storage/mssql.mdx | 9 +- .../docs/configuration/storage/mysql.mdx | 9 +- .../storage/oci-object-storage.mdx | 4 +- .../docs/configuration/storage/postgresql.mdx | 9 +- .../docs/configuration/storage/raft.mdx | 10 +- .../content/docs/configuration/storage/s3.mdx | 9 +- .../docs/configuration/storage/swift.mdx | 9 +- .../docs/configuration/storage/zookeeper.mdx | 7 +- .../content/docs/configuration/telemetry.mdx | 6 +- website/content/docs/configuration/ui.mdx | 32 ++-- .../docs/configuration/user-lockout.mdx | 9 +- website/data/docs-nav-data.json | 154 +++++++++--------- 55 files changed, 283 insertions(+), 328 deletions(-) diff --git a/website/content/docs/configuration/adaptive-overload-protection.mdx b/website/content/docs/configuration/adaptive-overload-protection.mdx index 208413f7769f..d9f19b8a7cad 100644 --- a/website/content/docs/configuration/adaptive-overload-protection.mdx +++ b/website/content/docs/configuration/adaptive-overload-protection.mdx @@ -1,12 +1,12 @@ --- layout: docs -page_title: Adaptive overload protection - Configuration -description: |- - Use adaptive overload protection with Vault Enterprise to automatically - prevent workloads from overloading different resources of your Vault servers. +page_title: adaptive_overload_protection - Configuration +description: >- + Configure the adaptive_overload_protection stanza to customize automatic + prevention for resource overloads in Vault Enterprise servers. --- -# `adaptive_overload_protection` +# `adaptive_overload_protection` stanza @include 'alerts/enterprise-only.mdx' diff --git a/website/content/docs/configuration/entropy-augmentation.mdx b/website/content/docs/configuration/entropy-augmentation.mdx index 70c81d2a2cb3..c4a3f2442d69 100644 --- a/website/content/docs/configuration/entropy-augmentation.mdx +++ b/website/content/docs/configuration/entropy-augmentation.mdx @@ -1,12 +1,12 @@ --- layout: docs -page_title: Entropy Augmentation - Configuration +page_title: Configure entropy augmentation description: >- - Entropy augmentation enables Vault to sample entropy from external - cryptographic modules. + Configure entropy augmentation to sample entropy from external cryptographic + modules when sealing Vault. --- -# `Entropy augmentation` seal +# Configure entropy augmentation Entropy augmentation enables Vault to sample entropy from external cryptographic modules. Sourcing external entropy is done by configuring a supported [Seal](/vault/docs/configuration/seal) type which diff --git a/website/content/docs/configuration/index.mdx b/website/content/docs/configuration/index.mdx index edf1907a2c7c..590742bec792 100644 --- a/website/content/docs/configuration/index.mdx +++ b/website/content/docs/configuration/index.mdx @@ -1,10 +1,11 @@ --- layout: docs -page_title: Server Configuration -description: Vault server configuration reference. +page_title: Vault configuration parameters +description: >- + Example Vault configuration file and high-level parameter reference. --- -# Vault configuration +# Vault configuration parameters Outside of development mode, Vault servers are configured using a file. The format of this file is [HCL](https://github.com/hashicorp/hcl) or JSON. diff --git a/website/content/docs/configuration/kms-library.mdx b/website/content/docs/configuration/kms-library.mdx index d83655abb5c1..68adbff10b22 100644 --- a/website/content/docs/configuration/kms-library.mdx +++ b/website/content/docs/configuration/kms-library.mdx @@ -1,9 +1,9 @@ --- layout: docs -page_title: Kms Library - Configuration +page_title: kms_library - Configuration description: >- - The kms_library stanza allows node specific configuration for access to - KMS access libraries + Configure the kms_library stanza to customize node-specific access to KMS + access libraries --- # `kms_library` stanza diff --git a/website/content/docs/configuration/listener/index.mdx b/website/content/docs/configuration/listener/index.mdx index ec188c9d78d4..d480d382681a 100644 --- a/website/content/docs/configuration/listener/index.mdx +++ b/website/content/docs/configuration/listener/index.mdx @@ -1,9 +1,9 @@ --- layout: docs -page_title: Listeners - Configuration -description: |- - The listener stanza configures the addresses and ports on which Vault will - respond to requests. +page_title: listener - Configuration +description: >- + Configure the listener stanza to customize the addresses and ports where Vault + listens for, and responds to, requests. --- # `listener` stanza diff --git a/website/content/docs/configuration/listener/tcp/index.mdx b/website/content/docs/configuration/listener/tcp/index.mdx index aed9b722ac71..2fe9fd3166ba 100644 --- a/website/content/docs/configuration/listener/tcp/index.mdx +++ b/website/content/docs/configuration/listener/tcp/index.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: TCP - Listeners - Configuration +page_title: TCP listener configuration description: >- - The TCP listener configures Vault to listen on the specified TCP address and - port. + Configure Vault to listen on a TCP address and port. --- -# `tcp` listener +# TCP listener configuration @include 'alerts/ipv6-compliance.mdx' diff --git a/website/content/docs/configuration/listener/unix.mdx b/website/content/docs/configuration/listener/unix.mdx index f3c15151da30..44671c8cb5c2 100644 --- a/website/content/docs/configuration/listener/unix.mdx +++ b/website/content/docs/configuration/listener/unix.mdx @@ -1,11 +1,11 @@ --- layout: docs -page_title: Unix - Listeners - Configuration -description: |- - The Unix listener configures Vault to listen on the specified Unix domain socket. +page_title: Unix listener configuration +description: >- + Configure Vault to listen on a Unix domain socket. --- -# `unix` listener +# Unix listener configuration The Unix listener configures Vault to listen on the specified Unix domain socket. diff --git a/website/content/docs/configuration/log-requests-level.mdx b/website/content/docs/configuration/log-requests-level.mdx index 856f8bac6e37..35cdb3b7a4b9 100644 --- a/website/content/docs/configuration/log-requests-level.mdx +++ b/website/content/docs/configuration/log-requests-level.mdx @@ -1,11 +1,11 @@ --- layout: docs -page_title: Log Completed Requests - Configuration -description: |- - Vault can be configured to log completed requests. +page_title: Configure completed request logging +description: >- + Configure the logging level for completed requests in Vault. --- -# Log completed requests +# Configure completed request logging Vault can be configured to log completed requests using the `log_requests_level` configuration parameter. diff --git a/website/content/docs/configuration/prevent-lease-explosions.mdx b/website/content/docs/configuration/prevent-lease-explosions.mdx index 3029c7ed45e2..86cc32a1cf9a 100644 --- a/website/content/docs/configuration/prevent-lease-explosions.mdx +++ b/website/content/docs/configuration/prevent-lease-explosions.mdx @@ -2,7 +2,7 @@ layout: docs page_title: Prevent lease explosions description: >- - Learn how to prevent lease explosions in Vault. + Best practices for avoiding, and dealing with, lease explosions in Vault. --- # Prevent lease explosions diff --git a/website/content/docs/configuration/programmatic-management.mdx b/website/content/docs/configuration/programmatic-management.mdx index 45f65ac9d355..4851db56bfb9 100644 --- a/website/content/docs/configuration/programmatic-management.mdx +++ b/website/content/docs/configuration/programmatic-management.mdx @@ -1,6 +1,6 @@ --- layout: docs -page_title: Manage Vault resources programmatically +page_title: Manage Vault programmatically with Terraform description: >- Step-by-step instructions for managing Vault resources programmatically with Terraform diff --git a/website/content/docs/configuration/replication.mdx b/website/content/docs/configuration/replication.mdx index a858f9f80efd..2a05d585a310 100644 --- a/website/content/docs/configuration/replication.mdx +++ b/website/content/docs/configuration/replication.mdx @@ -2,7 +2,7 @@ layout: docs page_title: Replication - Configuration description: |- - The replication stanza specifies various parameters for tuning replication related values. + Configure the replication stanza to tune replication behavior in Vault. --- # `replication` stanza diff --git a/website/content/docs/configuration/reporting.mdx b/website/content/docs/configuration/reporting.mdx index 4978d6dd3654..792940007a28 100644 --- a/website/content/docs/configuration/reporting.mdx +++ b/website/content/docs/configuration/reporting.mdx @@ -1,11 +1,12 @@ --- layout: docs page_title: Reporting - Configuration -description: |- - The reporting stanza specifies various parameters for tuning reporting and licensing related values. +description: >- + Configure the reporting stanza to customize the reporting behavior for tuning + and licensing in Vault. --- -# `replication` stanza +# `reporting` stanza @include 'alerts/enterprise-only.mdx' diff --git a/website/content/docs/configuration/seal/alicloudkms.mdx b/website/content/docs/configuration/seal/alicloudkms.mdx index 1f3f55c58ab1..28eff85cd439 100644 --- a/website/content/docs/configuration/seal/alicloudkms.mdx +++ b/website/content/docs/configuration/seal/alicloudkms.mdx @@ -1,14 +1,11 @@ --- layout: docs -page_title: AliCloud KMS - Seals - Configuration +page_title: AliCloud KMS seal configuration description: >- - The AliCloud KMS seal configures Vault to use AliCloud KMS as the seal - wrapping - - mechanism. + Configure Vault sealing to use AliCloud KMS. --- -# `alicloudkms` seal +# AliCloud KMS seal configuration diff --git a/website/content/docs/configuration/seal/awskms.mdx b/website/content/docs/configuration/seal/awskms.mdx index 66d51f8fc830..0c0b73c72f30 100644 --- a/website/content/docs/configuration/seal/awskms.mdx +++ b/website/content/docs/configuration/seal/awskms.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: AWS KMS - Seals - Configuration -description: |- - The AWS KMS seal configures Vault to use AWS KMS as the seal wrapping - mechanism. +page_title: AWS KMS seal configuration +description: >- + Configure Vault sealing to use AWS KMS. --- -# `awskms` seal +# AWS KMS seal configuration diff --git a/website/content/docs/configuration/seal/azurekeyvault.mdx b/website/content/docs/configuration/seal/azurekeyvault.mdx index 43b5d7dc275c..40b62f372169 100644 --- a/website/content/docs/configuration/seal/azurekeyvault.mdx +++ b/website/content/docs/configuration/seal/azurekeyvault.mdx @@ -1,14 +1,11 @@ --- layout: docs -page_title: Azure Key Vault - Seals - Configuration +page_title: Azure Key Vault seal configuration description: >- - The Azure Key Vault seal configures Vault to use Azure Key Vault as the seal - wrapping - - mechanism. + Configure Vault sealing to use Azure Key Vault. --- -# `azurekeyvault` seal +# Azure Key Vault seal configuration diff --git a/website/content/docs/configuration/seal/gcpckms.mdx b/website/content/docs/configuration/seal/gcpckms.mdx index 01f2bf6f01f3..8483a0b7e11b 100644 --- a/website/content/docs/configuration/seal/gcpckms.mdx +++ b/website/content/docs/configuration/seal/gcpckms.mdx @@ -1,14 +1,11 @@ --- layout: docs -page_title: GCP Cloud KMS - Seals - Configuration +page_title: GCP Cloud KMS seal configuration description: >- - The GCP Cloud KMS seal configures Vault to use GCP Cloud KMS as the seal - wrapping - - mechanism. + Configure Vault sealing to use GCP Cloud KMS. --- -# `gcpckms` seal +# GCP Cloud KMS seal configuration diff --git a/website/content/docs/configuration/seal/index.mdx b/website/content/docs/configuration/seal/index.mdx index 1f5dd3e6a481..405abfb95780 100644 --- a/website/content/docs/configuration/seal/index.mdx +++ b/website/content/docs/configuration/seal/index.mdx @@ -1,9 +1,8 @@ --- layout: docs -page_title: Seals - Configuration +page_title: seal - Configuration description: >- - The seal stanza configures the seal type to use for additional data - protection. + Configure the seal stanza to customize the sealing behavior for Vault. --- # `seal` stanza diff --git a/website/content/docs/configuration/seal/ocikms.mdx b/website/content/docs/configuration/seal/ocikms.mdx index ac5c2b5f59be..1697fe21be16 100644 --- a/website/content/docs/configuration/seal/ocikms.mdx +++ b/website/content/docs/configuration/seal/ocikms.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: OCI KMS - Seals - Configuration -description: |- - The OCI KMS seal configures Vault to use OCI KMS as the seal wrapping - mechanism. +page_title: OCI KM seal configuration +description: >- + Configure Vault sealing to use OCI KMS. --- -# `ocikms` seal +# OCI KMS seal configuration diff --git a/website/content/docs/configuration/seal/pkcs11.mdx b/website/content/docs/configuration/seal/pkcs11.mdx index 67371ae4a074..4f7fa3f87ead 100644 --- a/website/content/docs/configuration/seal/pkcs11.mdx +++ b/website/content/docs/configuration/seal/pkcs11.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: PKCS11 - Seals - Configuration -description: |- - The PKCS11 seal configures Vault to use an HSM with PKCS11 as the seal - wrapping mechanism. +page_title: HSM PKCS11 seal configuration +description: >- + Configure Vault sealing to use PKCS11. --- -# `pkcs11` seal +# HSM PKCS11 seal configuration diff --git a/website/content/docs/configuration/seal/seal-best-practices.mdx b/website/content/docs/configuration/seal/seal-best-practices.mdx index c07bb4c8a2fc..9e8cd40d1933 100644 --- a/website/content/docs/configuration/seal/seal-best-practices.mdx +++ b/website/content/docs/configuration/seal/seal-best-practices.mdx @@ -1,11 +1,11 @@ --- layout: docs -page_title: Seal best practices +page_title: Sealing best practices description: >- - The recommended pattern and best practices for unsealing a production Vault cluster. + Best practices for configuring seal and unseal behavior in Vault. --- -# Seal best practices +# Sealing best practices This documentation explains the concepts, options, and considerations for unsealing a production Vault cluster. It builds on the [Reference Architecture](/vault/tutorials/raft/raft-reference-architecture) and [Deployment Guide](/vault/tutorials/day-one-raft/raft-deployment-guide) for Vault to deliver a pattern for a common Vault use case. diff --git a/website/content/docs/configuration/seal/seal-ha.mdx b/website/content/docs/configuration/seal/seal-ha.mdx index 77b19dd8a4fb..3e95d6033e7f 100644 --- a/website/content/docs/configuration/seal/seal-ha.mdx +++ b/website/content/docs/configuration/seal/seal-ha.mdx @@ -1,11 +1,12 @@ --- layout: docs -page_title: Seal High Availability - Seals - Configuration -description: |- - How to configure multiple Seals for high availability. +page_title: Configure HA for Vault seals +description: >- + Customize the seal stanza to support multiple seals in high availability + deployments. --- -# Seal High Availability +# Configure high availability for Vault seals @include 'alerts/enterprise-only.mdx' diff --git a/website/content/docs/configuration/seal/transit-best-practices.mdx b/website/content/docs/configuration/seal/transit-best-practices.mdx index 840115c07e18..afcd4f021144 100644 --- a/website/content/docs/configuration/seal/transit-best-practices.mdx +++ b/website/content/docs/configuration/seal/transit-best-practices.mdx @@ -2,10 +2,11 @@ layout: docs page_title: Transit auto-unseal best practices description: >- - Recommendations and best practices for using Vault's transit secrets engine to auto-unseal your production Vault clusters. + Best practices for using the Transit plugin for auto-unsealing production + Vault clusters. --- -# Transit auto-unseal best practices +# Auto-unseal best practices for the Transit plugin This document provides a framework for creating a usable solution for auto-unseal using Vault when HSM or cloud-based KMS auto-unseal mechanism is not available for your environment, such as in an internal data center deployment. diff --git a/website/content/docs/configuration/seal/transit.mdx b/website/content/docs/configuration/seal/transit.mdx index 29fc3fca1384..d6b9d666bbb5 100644 --- a/website/content/docs/configuration/seal/transit.mdx +++ b/website/content/docs/configuration/seal/transit.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: Vault Transit - Seals - Configuration -description: |- - The Transit seal configures Vault to use Vault's Transit Secret Engine as the - autoseal mechanism. +page_title: Transit seal configuration +description: >- + Configure Vault sealing to use the Transit secrets plugin. --- -# `transit` seal +# Transit seal configuration diff --git a/website/content/docs/configuration/sentinel.mdx b/website/content/docs/configuration/sentinel.mdx index 8cbc8e27d8bd..72869dc0a961 100644 --- a/website/content/docs/configuration/sentinel.mdx +++ b/website/content/docs/configuration/sentinel.mdx @@ -1,8 +1,8 @@ --- layout: docs page_title: Sentinel - Configuration -description: |- - The sentinel stanza specifies configurations for Vault's Sentinel integration. +description: >- + Configure the sentinel stanza to customize your Sentinel integration. --- # `sentinel` stanza diff --git a/website/content/docs/configuration/service-registration/consul.mdx b/website/content/docs/configuration/service-registration/consul.mdx index b956d748fe6d..0efbe7da30ec 100644 --- a/website/content/docs/configuration/service-registration/consul.mdx +++ b/website/content/docs/configuration/service-registration/consul.mdx @@ -1,14 +1,11 @@ --- layout: docs -page_title: Consul - Service Registration - Configuration +page_title: Consul service registration description: >- - Consul Service Registration registers Vault as a service in Consul with a - default - - health check. + Configure Vault to use Consul to manage service registration. --- -# Consul service registration +# Configure Consul service registration Consul Service Registration registers Vault as a service in [Consul][consul] with a default health check. When Consul is configured as the storage backend, the stanza diff --git a/website/content/docs/configuration/service-registration/index.mdx b/website/content/docs/configuration/service-registration/index.mdx index 0882fd56d6df..1c95b105d1a6 100644 --- a/website/content/docs/configuration/service-registration/index.mdx +++ b/website/content/docs/configuration/service-registration/index.mdx @@ -1,9 +1,9 @@ --- layout: docs -page_title: Service Registration - Configuration -description: |- - The optional `service_registration` stanza configures Vault's mechanism for - service registration. +page_title: service_registration - Configuration +description: >- + Configure the `service_registration` stanza to customize the service + registration mechanism for Vault. --- # `service_registration` stanza diff --git a/website/content/docs/configuration/service-registration/kubernetes.mdx b/website/content/docs/configuration/service-registration/kubernetes.mdx index e34742893ebc..7088750cecd6 100644 --- a/website/content/docs/configuration/service-registration/kubernetes.mdx +++ b/website/content/docs/configuration/service-registration/kubernetes.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: Kubernetes - Service Registration - Configuration +page_title: Kubernetes service registration description: >- - Kubernetes Service Registration labels Vault pods with their current status - for use with selectors. + Configure Vault to use Kubernetes to manage service registration. --- -# Kubernetes service registration +# Configure Kubernetes service registration Kubernetes Service Registration tags Vault pods with their current status for use with selectors. Service registration is only available when Vault is running in diff --git a/website/content/docs/configuration/storage/aerospike.mdx b/website/content/docs/configuration/storage/aerospike.mdx index c9bf68cef7d2..7974171f6676 100644 --- a/website/content/docs/configuration/storage/aerospike.mdx +++ b/website/content/docs/configuration/storage/aerospike.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: Aerospike - Storage Backends - Configuration -description: |- - The Aerospike storage backend is used to persist Vault's data in an Aerospike - cluster. +page_title: Aerospike configuration +description: >- + Configure Vault backend storage to use Aerospike clusters. --- -# Aerospike storage backend +# Aerospike configuration for Vault backend storage The Aerospike storage backend is used to persist Vault's data in an [Aerospike][aerospike] cluster. diff --git a/website/content/docs/configuration/storage/alicloudoss.mdx b/website/content/docs/configuration/storage/alicloudoss.mdx index 5ff1197e1445..3188f07f5ed8 100644 --- a/website/content/docs/configuration/storage/alicloudoss.mdx +++ b/website/content/docs/configuration/storage/alicloudoss.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: Alicloud OSS - Storage Backends - Configuration -description: |- - The Alicloud OSS storage backend is used to persist Vault's data in - an Alicloud OSS bucket. +page_title: Alicloud OSS configuration +description: >- + Configure Vault backend storage to use Alicloud OSS buckets. --- -# Alicloud OSS storage backend +# Alicloud OSS configuration for Vault backend storage The Alicloud OSS storage backend is used to persist Vault's data in an [Alicloud OSS][alicloudoss] bucket. diff --git a/website/content/docs/configuration/storage/azure.mdx b/website/content/docs/configuration/storage/azure.mdx index ab0b418496e2..bb8bf4864538 100644 --- a/website/content/docs/configuration/storage/azure.mdx +++ b/website/content/docs/configuration/storage/azure.mdx @@ -1,13 +1,11 @@ --- layout: docs -page_title: Azure - Storage Backends - Configuration -description: |- - The Azure storage backend is used to persist Vault's data in an Azure Storage - Container. The storage container must already exist and the provided account - credentials must have read and write permissions to the storage container. +page_title: Azure configuration +description: >- + Configure Vault backend storage to use Azure storage containers. --- -# Azure storage backend +# Azure configuration for Vault backend storage The Azure storage backend is used to persist Vault's data in an [Azure Storage Container][azure-storage]. The storage container must already diff --git a/website/content/docs/configuration/storage/cassandra.mdx b/website/content/docs/configuration/storage/cassandra.mdx index 1d0dd4ad3901..80d2c5e071dd 100644 --- a/website/content/docs/configuration/storage/cassandra.mdx +++ b/website/content/docs/configuration/storage/cassandra.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: Cassandra - Storage Backends - Configuration -description: |- - The Cassandra storage backend is used to persist Vault's data in an Apache - Cassandra cluster. +page_title: Cassandra configuration +description: >- + Configure Vault backend storage to use an Apache Cassandra cluster. --- -# Cassandra storage backend +# Cassandra configuration for Vault backend storage The Cassandra storage backend is used to persist Vault's data in an [Apache Cassandra][cassandra] cluster. diff --git a/website/content/docs/configuration/storage/cockroachdb.mdx b/website/content/docs/configuration/storage/cockroachdb.mdx index 16f2b6766d36..07c86165caf6 100644 --- a/website/content/docs/configuration/storage/cockroachdb.mdx +++ b/website/content/docs/configuration/storage/cockroachdb.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: CockroachDB - Storage Backends - Configuration +page_title: CockroachDB configuration description: >- - The CockroachDB storage backend is used to persist Vault's data in a - CockroachDB server or cluster. + Configure Vault backend storage to use CockroachDB servers or clusters. --- -# CockroachDB storage backend +# CockroachDB configuration for Vault backend storage The CockroachDB storage backend is used to persist Vault's data in a [CockroachDB][cockroachdb] server or cluster. diff --git a/website/content/docs/configuration/storage/consul.mdx b/website/content/docs/configuration/storage/consul.mdx index 939db3971ba1..3ac756b384ec 100644 --- a/website/content/docs/configuration/storage/consul.mdx +++ b/website/content/docs/configuration/storage/consul.mdx @@ -1,14 +1,11 @@ --- layout: docs -page_title: Consul - Storage Backends - Configuration -description: |- - The Consul storage backend is used to persist Vault's data in Consul's - key-value store. In addition to providing durable storage, inclusion of this - backend will also register Vault as a service in Consul with a default health - check. +page_title: Consul configuration +description: >- + Configure Vault backend storage to use a Consul key-value store. --- -# Consul storage backend +# Consul configuration for Vault backend storage The Consul storage backend is used to persist Vault's data in [Consul's][consul] key-value store. In addition to providing durable storage, inclusion of this diff --git a/website/content/docs/configuration/storage/couchdb.mdx b/website/content/docs/configuration/storage/couchdb.mdx index f4a5540f71d8..35f834cff06c 100644 --- a/website/content/docs/configuration/storage/couchdb.mdx +++ b/website/content/docs/configuration/storage/couchdb.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: CouchDB - Storage Backends - Configuration -description: |- - The CouchDB storage backend is used to persist Vault's data in a CouchDB - database. +page_title: CouchDB configuration +description: >- + Configure Vault backend storage to use CouchDB. --- -# CouchDB storage backend +# CouchDB configuration for Vault backend storage The CouchDB storage backend is used to persist Vault's data in [CouchDB][couchdb] table. diff --git a/website/content/docs/configuration/storage/dynamodb.mdx b/website/content/docs/configuration/storage/dynamodb.mdx index ba5d7eabf70c..973c663de293 100644 --- a/website/content/docs/configuration/storage/dynamodb.mdx +++ b/website/content/docs/configuration/storage/dynamodb.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: DynamoDB - Storage Backends - Configuration -description: |- - The DynamoDB storage backend is used to persist Vault's data in DynamoDB - table. +page_title: DynamoDB configuration +description: >- + Configure Vault backend storage to use DynamoDB tables. --- -# DynamoDB storage backend +# DynamoDB configuration for Vault backend storage The DynamoDB storage backend is used to persist Vault's data in [DynamoDB][dynamodb] table. diff --git a/website/content/docs/configuration/storage/etcd.mdx b/website/content/docs/configuration/storage/etcd.mdx index 6dbc234a8ce6..807a76136027 100644 --- a/website/content/docs/configuration/storage/etcd.mdx +++ b/website/content/docs/configuration/storage/etcd.mdx @@ -1,13 +1,11 @@ --- layout: docs -page_title: Etcd - Storage Backends - Configuration -description: |- - The Etcd storage backend is used to persist Vault's data in Etcd. It supports - both the v2 and v3 Etcd APIs, and the version is automatically detected based - on the version of the Etcd cluster. +page_title: Etcd configuration +description: >- + Configure Vault backend storage to use Etcd clusters. --- -# Etcd storage backend +# Etcd configuration for Vault backend storage The Etcd storage backend is used to persist Vault's data in [Etcd][etcd]. It supports both the v2 and v3 Etcd APIs, and the version is automatically detected diff --git a/website/content/docs/configuration/storage/filesystem.mdx b/website/content/docs/configuration/storage/filesystem.mdx index 8c4c2498f070..9ca1484101ec 100644 --- a/website/content/docs/configuration/storage/filesystem.mdx +++ b/website/content/docs/configuration/storage/filesystem.mdx @@ -1,13 +1,12 @@ --- layout: docs -page_title: Filesystem - Storage Backends - Configuration -description: |- - The Filesystem storage backend stores Vault's data on the filesystem using a - standard directory structure. It can be used for durable single server - situations, or to develop locally where durability is not critical. +page_title: File system configuration +description: >- + Configure Vault backend storage to use a standard directory structure on the + local filesystem. --- -# Filesystem storage backend +# File system configuration for Vault backend storage The Filesystem storage backend stores Vault's data on the filesystem using a standard directory structure. It can be used for durable single server diff --git a/website/content/docs/configuration/storage/foundationdb.mdx b/website/content/docs/configuration/storage/foundationdb.mdx index 2eeacb6b8c7d..c9cfc6a5bc3c 100644 --- a/website/content/docs/configuration/storage/foundationdb.mdx +++ b/website/content/docs/configuration/storage/foundationdb.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: FoundationDB - Storage Backends - Configuration -description: |- - The FoundationDB storage backend is used to persist Vault's data in the - FoundationDB KV store. +page_title: FoundationDB configuration +description: >- + Configure Vault backend storage to use the FoundationDB KV store. --- -# FoundationDB storage backend +# FoundationDB configuration for Vault backend storage The FoundationDB storage backend is used to persist Vault's data in [FoundationDB][foundationdb]. diff --git a/website/content/docs/configuration/storage/google-cloud-spanner.mdx b/website/content/docs/configuration/storage/google-cloud-spanner.mdx index e5511fb07209..682835ffc45a 100644 --- a/website/content/docs/configuration/storage/google-cloud-spanner.mdx +++ b/website/content/docs/configuration/storage/google-cloud-spanner.mdx @@ -1,13 +1,11 @@ --- layout: docs -page_title: Google Cloud Spanner - Storage Backends - Configuration -description: |- - The Google Cloud Spanner storage backend is used to persist Vault's data in - Spanner, a fully managed, mission-critical, relational database service that - offers transactional consistency at global scale. +page_title: Google Cloud Spanner configuration +description: >- + Configure Vault backend storage to use Google Cloud Spanner. --- -# Google Cloud spanner storage backend +# Google Cloud Spanner configuration for Vault backend storage The Google Cloud Spanner storage backend is used to persist Vault's data in [Spanner][spanner-docs], a fully managed, mission-critical, relational database diff --git a/website/content/docs/configuration/storage/google-cloud-storage.mdx b/website/content/docs/configuration/storage/google-cloud-storage.mdx index eddbf93f661d..c967c78277f3 100644 --- a/website/content/docs/configuration/storage/google-cloud-storage.mdx +++ b/website/content/docs/configuration/storage/google-cloud-storage.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: Google Cloud Storage - Storage Backends - Configuration -description: |- - The Google Cloud Storage storage backend is used to persist Vault's data in - Google Cloud Storage. +page_title: Google Cloud Storage configuration +description: >- + Configure Vault backend storage to use Google Cloud Storage. --- -# Google Cloud storage storage backend +# Google Cloud Storage configuration for Vault backend storage The Google Cloud Storage storage backend is used to persist Vault's data in [Google Cloud Storage][gcs-docs]. diff --git a/website/content/docs/configuration/storage/in-memory.mdx b/website/content/docs/configuration/storage/in-memory.mdx index 0f8f1f6f1d8f..5638094c5f74 100644 --- a/website/content/docs/configuration/storage/in-memory.mdx +++ b/website/content/docs/configuration/storage/in-memory.mdx @@ -1,14 +1,11 @@ --- layout: docs -page_title: In-Memory - Storage Backends - Configuration -description: |- - The In-Memory storage backend is used to persist Vault's data entirely - in-memory on the same machine in which Vault is running. This is useful for - development and experimentation, but use of this backend is highly discouraged - in production except in very specific use-cases. +page_title: In-memory storage configuration +description: >- + Configure Vault backend storage to use in-memory storage. --- -# In-Memory storage backend +# In-memory storage configuration for Vault backend storage The In-Memory storage backend is used to persist Vault's data entirely in-memory on the same machine in which Vault is running. This is useful for development diff --git a/website/content/docs/configuration/storage/index.mdx b/website/content/docs/configuration/storage/index.mdx index 6e81927243f7..5bc07e8f1f35 100644 --- a/website/content/docs/configuration/storage/index.mdx +++ b/website/content/docs/configuration/storage/index.mdx @@ -1,12 +1,9 @@ --- layout: docs -page_title: Storage Backends - Configuration -description: |- - The storage stanza configures the storage backend, which represents the - location for the durable storage of Vault's information. Each backend has - pros, cons, advantages, and trade-offs. For example, some backends support - high availability while others provide a more robust backup and restoration - process. +page_title: storage - Configuration +description: >- + Configure the storage stanza to customize persistent, backend storage for + Vault. --- # `storage` stanza diff --git a/website/content/docs/configuration/storage/manta.mdx b/website/content/docs/configuration/storage/manta.mdx index c208e5618617..4037013cceaf 100644 --- a/website/content/docs/configuration/storage/manta.mdx +++ b/website/content/docs/configuration/storage/manta.mdx @@ -1,14 +1,11 @@ --- layout: docs -page_title: Manta - Storage Backends - Configuration +page_title: Manta configuration description: >- - The Manta storage backend is used to persist Vault's data in Triton's Manta - Object - - Storage. The storage folder must already exist. + Configure Vault backend storage to use Manta Object Storage from Triton. --- -# Manta storage backend +# Manta Object Storage configuration for Vault backend storage The Manta storage backend is used to persist Vault's data in [Triton's Manta Object Storage][manta-object-store]. The storage folder must already exist. diff --git a/website/content/docs/configuration/storage/mssql.mdx b/website/content/docs/configuration/storage/mssql.mdx index db5f5b5eeaf4..955bc876aa27 100644 --- a/website/content/docs/configuration/storage/mssql.mdx +++ b/website/content/docs/configuration/storage/mssql.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: MSSQL - Storage Backends - Configuration -description: >- - The MSSQL storage backend is used to persist Vault's data in a Microsoft SQL - Server. +page_title: MSSQL configuration +description: >- + Configure Vault backend storage to use Microsoft SQL Server. --- -# MSSQL storage backend +# Microsoft SQL Server configuration for Vault backend storage The MSSQL storage backend is used to persist Vault's data in a Microsoft SQL Server. diff --git a/website/content/docs/configuration/storage/mysql.mdx b/website/content/docs/configuration/storage/mysql.mdx index 70b97f7a5ff7..2e08cb1e0761 100644 --- a/website/content/docs/configuration/storage/mysql.mdx +++ b/website/content/docs/configuration/storage/mysql.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: MySQL - Storage Backends - Configuration -description: |- - The MySQL storage backend is used to persist Vault's data in a MySQL server or - cluster. +page_title: MySQL configuration +description: >- + Configure Vault backend storage to use a MySQL server or cluster. --- -# MySQL storage backend +# MySQL configuration for Vault backend storage The MySQL storage backend is used to persist Vault's data in a [MySQL][mysql] server or cluster. diff --git a/website/content/docs/configuration/storage/oci-object-storage.mdx b/website/content/docs/configuration/storage/oci-object-storage.mdx index 2cdcdaf55e16..a656e19da9e7 100644 --- a/website/content/docs/configuration/storage/oci-object-storage.mdx +++ b/website/content/docs/configuration/storage/oci-object-storage.mdx @@ -1,12 +1,12 @@ --- layout: docs -page_title: OCI Object Storage - Storage Backends - Configuration +page_title: OCI Object Storage configuration description: >- The OCI Object Storage backend is used to persist Vault's data in OCI Object Storage. --- -# OCI object storage storage backend +# OCI Object Storage configuration for Vault backend storage The OCI Object Storage backend is used to persist Vault's data in OCI Object Storage. diff --git a/website/content/docs/configuration/storage/postgresql.mdx b/website/content/docs/configuration/storage/postgresql.mdx index 85a8399bf413..8977a055f845 100644 --- a/website/content/docs/configuration/storage/postgresql.mdx +++ b/website/content/docs/configuration/storage/postgresql.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: PostgreSQL - Storage Backends - Configuration -description: |- - The PostgreSQL storage backend is used to persist Vault's data in a PostgreSQL - server or cluster. +page_title: PostgreSQL configuration +description: >- + Configure Vault backend storage to use a PostgreSQL server or cluster. --- -# PostgreSQL storage backend +# PostgreSQL configuration for Vault backend storage The PostgreSQL storage backend is used to persist Vault's data in a [PostgreSQL][postgresql] server or cluster. diff --git a/website/content/docs/configuration/storage/raft.mdx b/website/content/docs/configuration/storage/raft.mdx index 25f5cc1f5fef..e184ab49d98a 100644 --- a/website/content/docs/configuration/storage/raft.mdx +++ b/website/content/docs/configuration/storage/raft.mdx @@ -1,13 +1,11 @@ --- layout: docs -page_title: Integrated Storage - Storage Backends - Configuration +page_title: Integrated storage configuration description: >- - The Integrated Storage (Raft) backend is used to persist Vault's data. Unlike all the other - storage backends, this backend does not operate from a single source for the - data. Instead all the nodes in a Vault cluster will have a replicated copy of - the entire data. The data is replicated across the nodes using the Raft - Consensus Algorithm. + Configure Vault backend storage to use the integrated storage backend so that + all the nodes in a Vault cluster have a replicated copy of persistent storage + managed by the Raft consensus algorithm. --- # Integrated storage (Raft) backend diff --git a/website/content/docs/configuration/storage/s3.mdx b/website/content/docs/configuration/storage/s3.mdx index 14526bc051c5..a03924212a10 100644 --- a/website/content/docs/configuration/storage/s3.mdx +++ b/website/content/docs/configuration/storage/s3.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: S3 - Storage Backends - Configuration -description: |- - The S3 storage backend is used to persist Vault's data in an Amazon S3 - bucket. +page_title: S3 configuration +description: >- + Configure Vault backend storage to use Amazon S3 buckets. --- -# S3 storage backend +# S3 configuration for Vault backend storage The S3 storage backend is used to persist Vault's data in an [Amazon S3][s3] bucket. diff --git a/website/content/docs/configuration/storage/swift.mdx b/website/content/docs/configuration/storage/swift.mdx index 6f16e8ad4a05..b4d19f360c24 100644 --- a/website/content/docs/configuration/storage/swift.mdx +++ b/website/content/docs/configuration/storage/swift.mdx @@ -1,12 +1,11 @@ --- layout: docs -page_title: Swift - Storage Backends - Configuration -description: |- - The Swift storage backend is used to persist Vault's data in an OpenStack - Swift Container. +page_title: Swift configuration +description: >- + Configure Vault backend storage to use OpenStack Swift containers. --- -# Swift storage backend +# Swift configuration for Vault backend storage The Swift storage backend is used to persist Vault's data in an [OpenStack Swift Container][swift]. diff --git a/website/content/docs/configuration/storage/zookeeper.mdx b/website/content/docs/configuration/storage/zookeeper.mdx index e2120e6a19e9..e595687ca76b 100644 --- a/website/content/docs/configuration/storage/zookeeper.mdx +++ b/website/content/docs/configuration/storage/zookeeper.mdx @@ -1,10 +1,11 @@ --- layout: docs -page_title: Zookeeper - Storage Backends - Configuration -description: The Zookeeper storage backend is used to persist Vault's data in Zookeeper. +page_title: Zookeeper configuration +description: >- + Configure Vault backend storage to use Zookeeper. --- -# Zookeeper storage backend +# Zookeeper configuration for Vault backend storage The Zookeeper storage backend is used to persist Vault's data in [Zookeeper][zk]. diff --git a/website/content/docs/configuration/telemetry.mdx b/website/content/docs/configuration/telemetry.mdx index 939f25d9b620..5f3da4cf2672 100644 --- a/website/content/docs/configuration/telemetry.mdx +++ b/website/content/docs/configuration/telemetry.mdx @@ -1,9 +1,9 @@ --- layout: docs page_title: Telemetry - Configuration -description: |- - The telemetry stanza specifies various configurations for Vault to publish - metrics to upstream systems. +description: >- + Configure the telemetry stanza to publish Vault usage metrics to upstream + systems. --- # `telemetry` stanza diff --git a/website/content/docs/configuration/ui.mdx b/website/content/docs/configuration/ui.mdx index c2e6b859c8f2..d5ebe4150159 100644 --- a/website/content/docs/configuration/ui.mdx +++ b/website/content/docs/configuration/ui.mdx @@ -1,23 +1,19 @@ --- layout: docs -page_title: UI - Configuration -description: |- - Vault features a user interface (web interface) for interacting with Vault. - Easily create, read, update, and delete secrets, authenticate, unseal, and - more with the Vault UI. +page_title: GUI - Configuration +description: >- + Configure the web interface (GUI) for Vault. --- -# Vault UI +# Vault GUI configuration Vault features a user interface (web interface) for interacting with Vault. Easily create, read, update, and delete secrets, authenticate, unseal, and -more with the Vault UI. +more with the Vault GUI. --> The UI requires **Vault 0.10 or higher** or Vault Enterprise. +## Activating the Vault GUI -## Activating the Vault UI - -The Vault UI is not activated by default. To activate the UI, set the `ui` +The Vault GUI is not activated by default. To activate the UI, set the `ui` configuration option in the Vault server configuration. Vault clients do not need to set this option, since they will not be serving the UI. @@ -32,7 +28,7 @@ listener "tcp" { For more information, please see the [Vault configuration options](/vault/docs/configuration). -## Accessing the Vault UI +## Accessing the Vault GUI The UI runs on the same port as the Vault listener. As such, you must configure at least one `listener` stanza in order to access the UI. @@ -41,7 +37,7 @@ at least one `listener` stanza in order to access the UI. listener "tcp" { address = "10.0.1.35:8200" - # If bound to localhost, the Vault UI is only + # If bound to localhost, the Vault GUI is only # accessible from the local machine! # address = "127.0.0.1:8200" } @@ -64,16 +60,16 @@ https://vault.service.consul:8200/ui/ ### Note on TLS When using TLS (recommended), the certificate must be valid for all DNS entries -you will be accessing the Vault UI on, and any IP addresses on the SAN. If you +you will be accessing the Vault GUI on, and any IP addresses on the SAN. If you are running Vault with a self-signed certificate, any browsers that access the -Vault UI will need to have the root CA installed. Failure to do so may result in +Vault GUI will need to have the root CA installed. Failure to do so may result in the browser displaying a warning that the site is "untrusted". It is highly -recommended that client browsers accessing the Vault UI install the proper CA +recommended that client browsers accessing the Vault GUI install the proper CA root for validation to reduce the chance of a MITM attack. -## Vault UI Web REPL +## CLI emulation in the Vault GUI -The Vault UI includes an interactive Web REPL to interact with Vault's API much +The Vault GUI includes an interactive Web REPL to interact with Vault's API much like the Vault CLI. For more on that, see the [Web REPL documentation](/vault/docs/commands/web). diff --git a/website/content/docs/configuration/user-lockout.mdx b/website/content/docs/configuration/user-lockout.mdx index f3c81c80522f..e08f2d347e0f 100644 --- a/website/content/docs/configuration/user-lockout.mdx +++ b/website/content/docs/configuration/user-lockout.mdx @@ -1,10 +1,11 @@ --- layout: docs -page_title: User Lockout - Configuration -description: |- - The user_lockout stanza specifies various configurations for user lockout behaviour for - failed logins in vault. +page_title: User lockout - Configuration +description: >- + Configure the user_lockout stanza to customize lockout behavior for failed + logins in vault. --- + # User lockout @include 'user-lockout.mdx' diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index d472488ea166..6a12ed1bd2cc 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -320,14 +320,14 @@ "path": "concepts/duration-format" }, { - "title": "User Lockout", + "title": "User lockout", "path": "concepts/user-lockout" }, { "title": "Events", "path": "concepts/events", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" } @@ -336,7 +336,7 @@ "title": "Filtering", "path": "concepts/filtering", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" } @@ -344,7 +344,7 @@ { "title": "Adaptive overload protection", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" }, @@ -365,9 +365,13 @@ "title": "Configuration", "routes": [ { - "title": "Overview", + "title": "Parameter overview ", "path": "configuration" }, + { + "title": "GUI configuration", + "path": "configuration/ui" + }, { "title": "Programmatic best practices", "path": "configuration/programmatic-best-practices" @@ -384,6 +388,40 @@ "title": "Create a lease count quota", "path": "configuration/create-lease-count-quota" }, + { + "title": "Configure completed request logging", + "path": "configuration/log-requests-level" + }, + { + "title": "Configure entropy augmentation", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, + "path": "configuration/entropy-augmentation" + }, + + { "heading": "Configuration stanzas" }, + + { + "title": "adaptive_overload_protection", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, + "path": "configuration/adaptive-overload-protection" + }, + { + "title": "kms_library", + "badge": { + "text": "ENT", + "type": "outlined", + "color": "neutral" + }, + "path": "configuration/kms-library" + }, { "title": "listener", "routes": [ @@ -392,10 +430,10 @@ "path": "configuration/listener" }, { - "title": "TCP", + "title": "TCP listener configuration", "routes": [ { - "title": "Overview", + "title": "Basic configuration", "path": "configuration/listener/tcp" }, { @@ -405,7 +443,7 @@ ] }, { - "title": "Unix", + "title": "Unix listener configuration", "path": "configuration/listener/unix" } ] @@ -413,7 +451,7 @@ { "title": "replication", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" }, @@ -422,7 +460,7 @@ { "title": "reporting", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" }, @@ -436,18 +474,19 @@ "path": "configuration/seal" }, { - "title": "Seal best practices", + "title": "Sealing best practices", "path": "configuration/seal/seal-best-practices" }, { - "title": "High Availability", + "title": "Configure HA for seals", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" }, "path": "configuration/seal/seal-ha" }, + { "heading": "Seal wrapper options" }, { "title": "AliCloud KMS", "path": "configuration/seal/alicloudkms" @@ -471,18 +510,18 @@ { "title": "HSM PKCS11", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" }, "path": "configuration/seal/pkcs11" }, { - "title": "Vault Transit", + "title": "Transit plugin", "path": "configuration/seal/transit" }, { - "title": "Transit best practices", + "title": "Auto-unseal best practices for Transit", "path": "configuration/seal/transit-best-practices" } ] @@ -490,7 +529,7 @@ { "title": "sentinel", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" }, @@ -503,6 +542,7 @@ "title": "Overview", "path": "configuration/service-registration" }, + { "heading": "Service registration options" }, { "title": "Consul", "path": "configuration/service-registration/consul" @@ -517,9 +557,10 @@ "title": "storage", "routes": [ { - "title": "Overview", + "title": "Backend storage for Vault", "path": "configuration/storage" }, + { "heading": "Storage options" }, { "title": "Aerospike", "path": "configuration/storage/aerospike" @@ -557,7 +598,7 @@ "path": "configuration/storage/etcd" }, { - "title": "Filesystem", + "title": "File system", "path": "configuration/storage/filesystem" }, { @@ -573,9 +614,13 @@ "path": "configuration/storage/google-cloud-storage" }, { - "title": "In-Memory", + "title": "In-memory storage", "path": "configuration/storage/in-memory" }, + { + "title": "Integrated storage (Raft)", + "path": "configuration/storage/raft" + }, { "title": "Manta", "path": "configuration/storage/manta" @@ -596,10 +641,6 @@ "title": "PostgreSQL", "path": "configuration/storage/postgresql" }, - { - "title": "Integrated Storage (Raft)", - "path": "configuration/storage/raft" - }, { "title": "S3", "path": "configuration/storage/s3" @@ -618,44 +659,9 @@ "title": "telemetry", "path": "configuration/telemetry" }, - { - "title": "Adaptive overload protection", - "badge": { - "text": "ENTERPRISE", - "type": "outlined", - "color": "neutral" - }, - "path": "configuration/adaptive-overload-protection" - }, - { - "title": "ui", - "path": "configuration/ui" - }, { "title": "user_lockout", "path": "configuration/user-lockout" - }, - { - "title": "Log Completed Requests", - "path": "configuration/log-requests-level" - }, - { - "title": "Entropy Augmentation", - "badge": { - "text": "ENTERPRISE", - "type": "outlined", - "color": "neutral" - }, - "path": "configuration/entropy-augmentation" - }, - { - "title": "kms_library", - "badge": { - "text": "ENTERPRISE", - "type": "outlined", - "color": "neutral" - }, - "path": "configuration/kms-library" } ] }, @@ -1193,7 +1199,7 @@ "title": "Custom Messages", "path": "ui/custom-messages", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" } @@ -1308,7 +1314,7 @@ "title": "Improve Vault traffic resiliency", "path": "agent-and-proxy/proxy/caching/static-secret-caching", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" } @@ -1539,7 +1545,7 @@ { "title": "Key Management", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" }, @@ -1658,7 +1664,7 @@ { "title": "KMIP", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" }, @@ -1715,7 +1721,7 @@ { "title": "Certificate Issuance External Policy Service (CIEPS)", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" }, @@ -1725,7 +1731,7 @@ "title": "Enrollment over Secure Transport (EST)", "path": "secrets/pki/est", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" }, @@ -1734,7 +1740,7 @@ { "title": "Certificate Management Protocol (CMPv2)", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" }, @@ -1778,7 +1784,7 @@ { "title": "Transform", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" }, @@ -1794,7 +1800,7 @@ { "title": "Tokenization Transform", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" }, @@ -1824,7 +1830,7 @@ { "title": "Secrets Sync", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" }, @@ -1858,7 +1864,7 @@ { "title": "Secrets Import", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" }, @@ -2028,7 +2034,7 @@ { "title": "Use SAML authentication", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" }, @@ -2125,7 +2131,7 @@ "title": "Plugin Development — Event Notifications", "path": "plugins/plugin-development-event-notifications", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" } @@ -3055,7 +3061,7 @@ "title": "Run Vault with many namespaces", "path": "enterprise/namespaces/namespace-limits", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" } @@ -3064,7 +3070,7 @@ "title": "Configure cross namespace access", "path": "enterprise/namespaces/configure-cross-namespace-access", "badge": { - "text": "ENTERPRISE", + "text": "ENT", "type": "outlined", "color": "neutral" } From 796a565f4c38b4daf029d61e5bd670cbacb8cd12 Mon Sep 17 00:00:00 2001 From: Rachel Culpepper <84159930+rculpepper@users.noreply.github.com> Date: Fri, 20 Dec 2024 10:35:51 -0600 Subject: [PATCH 02/15] fix key type for hybrid key creation tests (#29135) --- builtin/logical/transit/path_keys_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/builtin/logical/transit/path_keys_test.go b/builtin/logical/transit/path_keys_test.go index ed6e84583469..038125afe677 100644 --- a/builtin/logical/transit/path_keys_test.go +++ b/builtin/logical/transit/path_keys_test.go @@ -267,27 +267,27 @@ func TestTransit_CreateKey(t *testing.T) { entOnly: true, }, "Hybrid ML-DSA-65-ECDSA-P256": { - creationParams: map[string]interface{}{"type": "ml-dsa", "parameter_set": "65", "hybrid_key_type_ec": "ecdsa-p256", "hybrid_key_type_pqc": "ml-dsa"}, + creationParams: map[string]interface{}{"type": "hybrid", "parameter_set": "65", "hybrid_key_type_ec": "ecdsa-p256", "hybrid_key_type_pqc": "ml-dsa"}, entOnly: true, }, "Hybrid ML-DSA-65-ECDSA-P384": { - creationParams: map[string]interface{}{"type": "ml-dsa", "parameter_set": "65", "hybrid_key_type_ec": "ecdsa-p384", "hybrid_key_type_pqc": "ml-dsa"}, + creationParams: map[string]interface{}{"type": "hybrid", "parameter_set": "65", "hybrid_key_type_ec": "ecdsa-p384", "hybrid_key_type_pqc": "ml-dsa"}, entOnly: true, }, "Hybrid ML-DSA-65-ECDSA-P521": { - creationParams: map[string]interface{}{"type": "ml-dsa", "parameter_set": "65", "hybrid_key_type_ec": "ecdsa-p521", "hybrid_key_type_pqc": "ml-dsa"}, + creationParams: map[string]interface{}{"type": "hybrid", "parameter_set": "65", "hybrid_key_type_ec": "ecdsa-p521", "hybrid_key_type_pqc": "ml-dsa"}, entOnly: true, }, "Hybrid ML-DSA-87-ECDSA-P256": { - creationParams: map[string]interface{}{"type": "ml-dsa", "parameter_set": "87", "hybrid_key_type_ec": "ecdsa-p256", "hybrid_key_type_pqc": "ml-dsa"}, + creationParams: map[string]interface{}{"type": "hybrid", "parameter_set": "87", "hybrid_key_type_ec": "ecdsa-p256", "hybrid_key_type_pqc": "ml-dsa"}, entOnly: true, }, "Hybrid ML-DSA-87-ECDSA-P384": { - creationParams: map[string]interface{}{"type": "ml-dsa", "parameter_set": "87", "hybrid_key_type_ec": "ecdsa-p384", "hybrid_key_type_pqc": "ml-dsa"}, + creationParams: map[string]interface{}{"type": "hybrid", "parameter_set": "87", "hybrid_key_type_ec": "ecdsa-p384", "hybrid_key_type_pqc": "ml-dsa"}, entOnly: true, }, "Hybrid ML-DSA-87-ECDSA-P521": { - creationParams: map[string]interface{}{"type": "ml-dsa", "parameter_set": "87", "hybrid_key_type_ec": "ecdsa-p521", "hybrid_key_type_pqc": "ml-dsa"}, + creationParams: map[string]interface{}{"type": "hybrid", "parameter_set": "87", "hybrid_key_type_ec": "ecdsa-p521", "hybrid_key_type_pqc": "ml-dsa"}, entOnly: true, }, "bad key type": { From 70325d2dde6c462012aadfcd02129a43e5ff81b2 Mon Sep 17 00:00:00 2001 From: Evan Moncuso <46458931+emoncuso@users.noreply.github.com> Date: Fri, 20 Dec 2024 10:27:59 -0800 Subject: [PATCH 03/15] update changelog for GH-28670 (#29240) --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a7fd87f7cac6..f680750853f8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ seal unwrapper was performing the read twice, and would also issue an unnecessar * secret/db: Update static role rotation to generate a new password after 2 failed attempts. [[GH-28989](https://github.com/hashicorp/vault/pull/28989)] * ui: Allow users to search the full json object within the json code-editor edit/create view. [[GH-28808](https://github.com/hashicorp/vault/pull/28808)] * ui: Decode `connection_url` to fix database connection updates (i.e. editing connection config, deleting roles) failing when urls include template variables. [[GH-29114](https://github.com/hashicorp/vault/pull/29114)] +* ui: Fix Swagger explorer bug where requests with path params were not working. [[GH-28670](https://github.com/hashicorp/vault/issues/28670)] * vault/diagnose: Fix time to expiration reporting within the TLS verification to not be a month off. [[GH-29128](https://github.com/hashicorp/vault/pull/29128)] ## 1.18.2 @@ -339,6 +340,7 @@ BUG FIXES: * secret/db: Update static role rotation to generate a new password after 2 failed attempts. [[GH-28989](https://github.com/hashicorp/vault/pull/28989)] * ui: Allow users to search the full json object within the json code-editor edit/create view. [[GH-28808](https://github.com/hashicorp/vault/pull/28808)] * ui: Decode `connection_url` to fix database connection updates (i.e. editing connection config, deleting roles) failing when urls include template variables. [[GH-29114](https://github.com/hashicorp/vault/pull/29114)] +* ui: Fix Swagger explorer bug where requests with path params were not working. [[GH-28670](https://github.com/hashicorp/vault/issues/28670)] * vault/diagnose: Fix time to expiration reporting within the TLS verification to not be a month off. [[GH-29128](https://github.com/hashicorp/vault/pull/29128)] ## 1.17.9 Enterprise From e349c998912277b9a9ce117e3734a9259adc8336 Mon Sep 17 00:00:00 2001 From: "Shannon Roberts (Beagin)" Date: Fri, 20 Dec 2024 10:47:02 -0800 Subject: [PATCH 04/15] [VAULT-33146] Update tutorial link for creating a policy (#29226) * [VAULT-33146] Update vault tutorial link * add changelog * update changelog * remove changelog --------- Co-authored-by: Tony Wittinger --- ui/app/templates/vault/cluster/policies/index.hbs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/app/templates/vault/cluster/policies/index.hbs b/ui/app/templates/vault/cluster/policies/index.hbs index fc010973279e..b0c0d5baeea4 100644 --- a/ui/app/templates/vault/cluster/policies/index.hbs +++ b/ui/app/templates/vault/cluster/policies/index.hbs @@ -155,7 +155,7 @@ @iconPosition="trailing" @icon="learn-link" @text="Getting started with policies" - @href={{doc-link "/vault/tutorials/getting-started/getting-started-policies"}} + @href={{doc-link "/vault/tutorials/get-started/introduction-policies"}} /> {{else}} Date: Fri, 20 Dec 2024 10:47:34 -0800 Subject: [PATCH 05/15] [VAULT-19467] UI Breadcrumb Title Case (#29206) * [VAULT-19467] Vault UI Breadcrumb Title Case * should use Title Case * update changelog * rename changelog * update tests * more test updates * update tests --------- Co-authored-by: Tony Wittinger --- changelog/29206.txt | 3 +++ ui/lib/ldap/addon/routes/libraries/library/check-out.ts | 4 ++-- ui/lib/ldap/addon/routes/libraries/library/details.ts | 2 +- ui/lib/ldap/addon/routes/libraries/library/edit.ts | 4 ++-- ui/lib/pki/addon/components/page/pki-role-details.ts | 2 +- ui/lib/pki/addon/routes/keys/index.js | 2 +- ui/lib/pki/addon/routes/keys/key/details.js | 2 +- ui/lib/pki/addon/routes/keys/key/edit.js | 2 +- ui/lib/pki/addon/routes/roles/create.js | 4 ++-- ui/lib/pki/addon/routes/roles/role/details.js | 2 +- ui/lib/pki/addon/routes/roles/role/edit.js | 4 ++-- ui/lib/pki/addon/routes/roles/role/generate.js | 4 ++-- ui/lib/pki/addon/routes/roles/role/sign.js | 4 ++-- ui/lib/pki/addon/routes/tidy/auto/configure.js | 8 ++++---- ui/lib/pki/addon/routes/tidy/auto/index.js | 4 ++-- ui/lib/pki/addon/routes/tidy/manual.js | 6 +++--- .../acceptance/secrets/backend/ldap/libraries-test.js | 2 +- .../components/kubernetes/page/configure-test.js | 2 +- .../components/kubernetes/page/credentials-test.js | 4 ++-- .../kubernetes/page/role/create-and-edit-test.js | 4 ++-- .../components/kubernetes/page/role/details-test.js | 4 ++-- ui/tests/integration/components/kv/kv-page-header-test.js | 2 +- .../components/kv/page/kv-page-configuration-test.js | 2 +- .../components/kv/page/kv-page-secret-edit-test.js | 2 +- .../components/kv/page/kv-page-secrets-create-test.js | 2 +- .../components/ldap/page/library/check-out-test.js | 8 ++++---- .../components/ldap/page/library/details-test.js | 4 ++-- .../components/ldap/page/role/credentials-test.js | 8 ++++---- .../integration/components/ldap/page/role/details-test.js | 4 ++-- .../components/pki/page/pki-configure-create-test.js | 2 +- .../components/pki/page/pki-tidy-auto-settings-test.js | 4 ++-- 31 files changed, 57 insertions(+), 54 deletions(-) create mode 100644 changelog/29206.txt diff --git a/changelog/29206.txt b/changelog/29206.txt new file mode 100644 index 000000000000..26cda21a9cbc --- /dev/null +++ b/changelog/29206.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Application static breadcrumbs should be formatted in title case. +``` \ No newline at end of file diff --git a/ui/lib/ldap/addon/routes/libraries/library/check-out.ts b/ui/lib/ldap/addon/routes/libraries/library/check-out.ts index 174c60034ae6..23025f96892b 100644 --- a/ui/lib/ldap/addon/routes/libraries/library/check-out.ts +++ b/ui/lib/ldap/addon/routes/libraries/library/check-out.ts @@ -49,9 +49,9 @@ export default class LdapLibraryCheckOutRoute extends Route { const library = this.modelFor('libraries.library') as LdapLibraryModel; controller.breadcrumbs = [ { label: library.backend, route: 'overview' }, - { label: 'libraries', route: 'libraries' }, + { label: 'Libraries', route: 'libraries' }, { label: library.name, route: 'libraries.library' }, - { label: 'check-out' }, + { label: 'Check-Out' }, ]; } diff --git a/ui/lib/ldap/addon/routes/libraries/library/details.ts b/ui/lib/ldap/addon/routes/libraries/library/details.ts index 7f8c579dc0bb..233835b602c8 100644 --- a/ui/lib/ldap/addon/routes/libraries/library/details.ts +++ b/ui/lib/ldap/addon/routes/libraries/library/details.ts @@ -25,7 +25,7 @@ export default class LdapLibraryDetailsRoute extends Route { controller.breadcrumbs = [ { label: resolvedModel.backend, route: 'overview' }, - { label: 'libraries', route: 'libraries' }, + { label: 'Libraries', route: 'libraries' }, { label: resolvedModel.name }, ]; } diff --git a/ui/lib/ldap/addon/routes/libraries/library/edit.ts b/ui/lib/ldap/addon/routes/libraries/library/edit.ts index 329983097b24..191190cee140 100644 --- a/ui/lib/ldap/addon/routes/libraries/library/edit.ts +++ b/ui/lib/ldap/addon/routes/libraries/library/edit.ts @@ -25,9 +25,9 @@ export default class LdapLibraryEditRoute extends Route { controller.breadcrumbs = [ { label: resolvedModel.backend, route: 'overview' }, - { label: 'libraries', route: 'libraries' }, + { label: 'Libraries', route: 'libraries' }, { label: resolvedModel.name, route: 'libraries.library.details' }, - { label: 'edit' }, + { label: 'Edit' }, ]; } } diff --git a/ui/lib/pki/addon/components/page/pki-role-details.ts b/ui/lib/pki/addon/components/page/pki-role-details.ts index e8f3865dbfc7..28ed6d7dbac8 100644 --- a/ui/lib/pki/addon/components/page/pki-role-details.ts +++ b/ui/lib/pki/addon/components/page/pki-role-details.ts @@ -25,7 +25,7 @@ export default class DetailsPage extends Component { return [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.secretMountPath.currentPath, route: 'overview' }, - { label: 'roles', route: 'roles.index' }, + { label: 'Roles', route: 'roles.index' }, { label: this.args.role.id }, ]; } diff --git a/ui/lib/pki/addon/routes/keys/index.js b/ui/lib/pki/addon/routes/keys/index.js index 432438223a1b..9f8bc35f1e8c 100644 --- a/ui/lib/pki/addon/routes/keys/index.js +++ b/ui/lib/pki/addon/routes/keys/index.js @@ -48,7 +48,7 @@ export default class PkiKeysIndexRoute extends Route { controller.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.secretMountPath.currentPath, route: 'overview', model: resolvedModel.parentModel.id }, - { label: 'keys', route: 'keys.index', model: resolvedModel.parentModel.id }, + { label: 'Keys', route: 'keys.index', model: resolvedModel.parentModel.id }, ]; controller.notConfiguredMessage = PKI_DEFAULT_EMPTY_STATE_MSG; } diff --git a/ui/lib/pki/addon/routes/keys/key/details.js b/ui/lib/pki/addon/routes/keys/key/details.js index a768be577ce0..df01d91a5aea 100644 --- a/ui/lib/pki/addon/routes/keys/key/details.js +++ b/ui/lib/pki/addon/routes/keys/key/details.js @@ -17,7 +17,7 @@ export default class PkiKeyDetailsRoute extends Route { controller.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.secretMountPath.currentPath, route: 'overview', model: resolvedModel.backend }, - { label: 'keys', route: 'keys.index', model: resolvedModel.backend }, + { label: 'Keys', route: 'keys.index', model: resolvedModel.backend }, { label: resolvedModel.id }, ]; } diff --git a/ui/lib/pki/addon/routes/keys/key/edit.js b/ui/lib/pki/addon/routes/keys/key/edit.js index 1414fa76f69e..8ba5b148963c 100644 --- a/ui/lib/pki/addon/routes/keys/key/edit.js +++ b/ui/lib/pki/addon/routes/keys/key/edit.js @@ -20,7 +20,7 @@ export default class PkiKeyEditRoute extends Route { controller.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.secretMountPath.currentPath, route: 'overview', model: this.secretMountPath.currentPath }, - { label: 'keys', route: 'keys.index', model: this.secretMountPath.currentPath }, + { label: 'Keys', route: 'keys.index', model: this.secretMountPath.currentPath }, { label: resolvedModel.id }, ]; } diff --git a/ui/lib/pki/addon/routes/roles/create.js b/ui/lib/pki/addon/routes/roles/create.js index c3a08a5cd58c..ea9a40c11200 100644 --- a/ui/lib/pki/addon/routes/roles/create.js +++ b/ui/lib/pki/addon/routes/roles/create.js @@ -32,8 +32,8 @@ export default class PkiRolesCreateRoute extends Route { controller.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.secretMountPath.currentPath, route: 'overview', model: this.secretMountPath.currentPath }, - { label: 'roles', route: 'roles.index', model: this.secretMountPath.currentPath }, - { label: 'create' }, + { label: 'Roles', route: 'roles.index', model: this.secretMountPath.currentPath }, + { label: 'Create' }, ]; } diff --git a/ui/lib/pki/addon/routes/roles/role/details.js b/ui/lib/pki/addon/routes/roles/role/details.js index 3b084789b489..64eb510049ae 100644 --- a/ui/lib/pki/addon/routes/roles/role/details.js +++ b/ui/lib/pki/addon/routes/roles/role/details.js @@ -24,7 +24,7 @@ export default class RolesRoleDetailsRoute extends Route { controller.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.secretMountPath.currentPath, route: 'overview', model: this.secretMountPath.currentPath }, - { label: 'roles', route: 'roles.index', model: this.secretMountPath.currentPath }, + { label: 'Roles', route: 'roles.index', model: this.secretMountPath.currentPath }, { label: id }, ]; } diff --git a/ui/lib/pki/addon/routes/roles/role/edit.js b/ui/lib/pki/addon/routes/roles/role/edit.js index ef0b739da1f4..128c2f6a2169 100644 --- a/ui/lib/pki/addon/routes/roles/role/edit.js +++ b/ui/lib/pki/addon/routes/roles/role/edit.js @@ -40,9 +40,9 @@ export default class PkiRoleEditRoute extends Route { controller.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.secretMountPath.currentPath, route: 'overview', model: this.secretMountPath.currentPath }, - { label: 'roles', route: 'roles.index', model: this.secretMountPath.currentPath }, + { label: 'Roles', route: 'roles.index', model: this.secretMountPath.currentPath }, { label: id, route: 'roles.role.details', models: [this.secretMountPath.currentPath, id] }, - { label: 'edit' }, + { label: 'Edit' }, ]; } } diff --git a/ui/lib/pki/addon/routes/roles/role/generate.js b/ui/lib/pki/addon/routes/roles/role/generate.js index 3c7ba5933d39..ef4d4862710e 100644 --- a/ui/lib/pki/addon/routes/roles/role/generate.js +++ b/ui/lib/pki/addon/routes/roles/role/generate.js @@ -25,9 +25,9 @@ export default class PkiRoleGenerateRoute extends Route { controller.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.secretMountPath.currentPath, route: 'overview', model: this.secretMountPath.currentPath }, - { label: 'roles', route: 'roles.index', model: this.secretMountPath.currentPath }, + { label: 'Roles', route: 'roles.index', model: this.secretMountPath.currentPath }, { label: role, route: 'roles.role.details', models: [this.secretMountPath.currentPath, role] }, - { label: 'generate certificate' }, + { label: 'Generate Certificate' }, ]; // This is updated on successful generate in the controller controller.hasSubmitted = false; diff --git a/ui/lib/pki/addon/routes/roles/role/sign.js b/ui/lib/pki/addon/routes/roles/role/sign.js index 6b8414f8dd44..cd9f1d5b48ea 100644 --- a/ui/lib/pki/addon/routes/roles/role/sign.js +++ b/ui/lib/pki/addon/routes/roles/role/sign.js @@ -25,9 +25,9 @@ export default class PkiRoleSignRoute extends Route { controller.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.secretMountPath.currentPath, route: 'overview', model: this.secretMountPath.currentPath }, - { label: 'roles', route: 'roles.index', model: this.secretMountPath.currentPath }, + { label: 'Roles', route: 'roles.index', model: this.secretMountPath.currentPath }, { label: role, route: 'roles.role.details', models: [this.secretMountPath.currentPath, role] }, - { label: 'sign certificate' }, + { label: 'Sign Certificate' }, ]; // This is updated on successful generate in the controller controller.hasSubmitted = false; diff --git a/ui/lib/pki/addon/routes/tidy/auto/configure.js b/ui/lib/pki/addon/routes/tidy/auto/configure.js index ba9f84778e81..83b80875fa8c 100644 --- a/ui/lib/pki/addon/routes/tidy/auto/configure.js +++ b/ui/lib/pki/addon/routes/tidy/auto/configure.js @@ -21,10 +21,10 @@ export default class PkiTidyAutoConfigureRoute extends Route { controller.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.secretMountPath.currentPath, route: 'overview', model: backend }, - { label: 'configuration', route: 'configuration.index', model: backend }, - { label: 'tidy', route: 'tidy', model: backend }, - { label: 'auto', route: 'tidy.auto', model: backend }, - { label: 'configure' }, + { label: 'Configuration', route: 'configuration.index', model: backend }, + { label: 'Tidy', route: 'tidy', model: backend }, + { label: 'Auto', route: 'tidy.auto', model: backend }, + { label: 'Configure' }, ]; } } diff --git a/ui/lib/pki/addon/routes/tidy/auto/index.js b/ui/lib/pki/addon/routes/tidy/auto/index.js index c5dddb1d2fc6..1ad5cf99de39 100644 --- a/ui/lib/pki/addon/routes/tidy/auto/index.js +++ b/ui/lib/pki/addon/routes/tidy/auto/index.js @@ -19,8 +19,8 @@ export default class TidyAutoIndexRoute extends Route { controller.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.secretMountPath.currentPath, route: 'overview', model: backend }, - { label: 'tidy', route: 'tidy.index', model: backend }, - { label: 'auto' }, + { label: 'Tidy', route: 'tidy.index', model: backend }, + { label: 'Auto' }, ]; controller.title = this.secretMountPath.currentPath; } diff --git a/ui/lib/pki/addon/routes/tidy/manual.js b/ui/lib/pki/addon/routes/tidy/manual.js index d4d631776c08..156680f14fe6 100644 --- a/ui/lib/pki/addon/routes/tidy/manual.js +++ b/ui/lib/pki/addon/routes/tidy/manual.js @@ -21,9 +21,9 @@ export default class PkiTidyManualRoute extends Route { controller.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.secretMountPath.currentPath, route: 'overview', model: resolvedModel.backend }, - { label: 'configuration', route: 'configuration.index', model: resolvedModel.backend }, - { label: 'tidy', route: 'tidy', model: resolvedModel.backend }, - { label: 'manual' }, + { label: 'Configuration', route: 'configuration.index', model: resolvedModel.backend }, + { label: 'Tidy', route: 'tidy', model: resolvedModel.backend }, + { label: 'Manual' }, ]; } } diff --git a/ui/tests/acceptance/secrets/backend/ldap/libraries-test.js b/ui/tests/acceptance/secrets/backend/ldap/libraries-test.js index 4e9cc5a57059..fee09ad7f859 100644 --- a/ui/tests/acceptance/secrets/backend/ldap/libraries-test.js +++ b/ui/tests/acceptance/secrets/backend/ldap/libraries-test.js @@ -69,7 +69,7 @@ module('Acceptance | ldap | libraries', function (hooks) { isURL(`libraries/test-library/${uri}`, this.backend), `Transitions to ${action} route on list item action menu click` ); - await click('[data-test-breadcrumb="libraries"] a'); + await click('[data-test-breadcrumb="Libraries"] a'); } }); diff --git a/ui/tests/integration/components/kubernetes/page/configure-test.js b/ui/tests/integration/components/kubernetes/page/configure-test.js index 9fae07022f7d..5d862e1f1d87 100644 --- a/ui/tests/integration/components/kubernetes/page/configure-test.js +++ b/ui/tests/integration/components/kubernetes/page/configure-test.js @@ -36,7 +36,7 @@ module('Integration | Component | kubernetes | Page::Configure', function (hooks this.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: 'kubernetes', route: 'overview' }, - { label: 'configure' }, + { label: 'Configure' }, ]; this.expectedInferred = { disable_local_ca_jwt: false, diff --git a/ui/tests/integration/components/kubernetes/page/credentials-test.js b/ui/tests/integration/components/kubernetes/page/credentials-test.js index 9f1760cbb079..beb6bf22e118 100644 --- a/ui/tests/integration/components/kubernetes/page/credentials-test.js +++ b/ui/tests/integration/components/kubernetes/page/credentials-test.js @@ -38,9 +38,9 @@ module('Integration | Component | kubernetes | Page::Credentials', function (hoo }; this.breadcrumbs = [ { label: this.backend, route: 'overview' }, - { label: 'roles', route: 'roles' }, + { label: 'Roles', route: 'roles' }, { label: this.roleName, route: 'roles.role.details' }, - { label: 'credentials' }, + { label: 'Credentials' }, ]; this.renderComponent = () => { return render( diff --git a/ui/tests/integration/components/kubernetes/page/role/create-and-edit-test.js b/ui/tests/integration/components/kubernetes/page/role/create-and-edit-test.js index daeb6ba4c6a0..99f87eaa1193 100644 --- a/ui/tests/integration/components/kubernetes/page/role/create-and-edit-test.js +++ b/ui/tests/integration/components/kubernetes/page/role/create-and-edit-test.js @@ -40,8 +40,8 @@ module('Integration | Component | kubernetes | Page::Role::CreateAndEdit', funct this.newModel = store.createRecord('kubernetes/role', { backend: 'kubernetes-test' }); this.breadcrumbs = [ { label: this.newModel.backend, route: 'overview' }, - { label: 'roles', route: 'roles' }, - { label: 'create' }, + { label: 'Roles', route: 'roles' }, + { label: 'Create' }, ]; setRunOptions({ rules: { diff --git a/ui/tests/integration/components/kubernetes/page/role/details-test.js b/ui/tests/integration/components/kubernetes/page/role/details-test.js index 91e3912ad773..c9db002cedc0 100644 --- a/ui/tests/integration/components/kubernetes/page/role/details-test.js +++ b/ui/tests/integration/components/kubernetes/page/role/details-test.js @@ -45,7 +45,7 @@ module('Integration | Component | kubernetes | Page::Role::Details', function (h this.model = store.peekRecord('kubernetes/role', data.name); this.breadcrumbs = [ { label: this.model.backend, route: 'overview' }, - { label: 'roles', route: 'roles' }, + { label: 'Roles', route: 'roles' }, { label: this.model.name }, ]; return render(hbs``, { @@ -86,7 +86,7 @@ module('Integration | Component | kubernetes | Page::Role::Details', function (h assert .dom('[data-test-breadcrumbs] li:nth-child(1)') .containsText(this.model.backend, 'Overview breadcrumb renders'); - assert.dom('[data-test-breadcrumbs] li:nth-child(2) a').containsText('roles', 'Roles breadcrumb renders'); + assert.dom('[data-test-breadcrumbs] li:nth-child(2) a').containsText('Roles', 'Roles breadcrumb renders'); assert .dom('[data-test-breadcrumbs] li:nth-child(3)') .containsText(this.model.name, 'Role breadcrumb renders'); diff --git a/ui/tests/integration/components/kv/kv-page-header-test.js b/ui/tests/integration/components/kv/kv-page-header-test.js index 687b9dc9df8b..7f19e27975d7 100644 --- a/ui/tests/integration/components/kv/kv-page-header-test.js +++ b/ui/tests/integration/components/kv/kv-page-header-test.js @@ -37,7 +37,7 @@ module('Integration | Component | kv | kv-page-header', function (hooks) { { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.model.backend, route: 'secrets' }, { label: this.model.path, route: 'secrets.secret.details', model: this.model.path }, - { label: 'edit' }, + { label: 'Edit' }, ]; }); diff --git a/ui/tests/integration/components/kv/page/kv-page-configuration-test.js b/ui/tests/integration/components/kv/page/kv-page-configuration-test.js index d36a7fcfae25..392be73b69dc 100644 --- a/ui/tests/integration/components/kv/page/kv-page-configuration-test.js +++ b/ui/tests/integration/components/kv/page/kv-page-configuration-test.js @@ -54,7 +54,7 @@ module('Integration | Component | kv-v2 | Page::Configuration', function (hooks) this.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.model.mountConfig.path, route: 'list' }, - { label: 'configuration' }, + { label: 'Configuration' }, ]; }); diff --git a/ui/tests/integration/components/kv/page/kv-page-secret-edit-test.js b/ui/tests/integration/components/kv/page/kv-page-secret-edit-test.js index bd8ebfca82a2..99fde6735df5 100644 --- a/ui/tests/integration/components/kv/page/kv-page-secret-edit-test.js +++ b/ui/tests/integration/components/kv/page/kv-page-secret-edit-test.js @@ -35,7 +35,7 @@ module('Integration | Component | kv-v2 | Page::Secret::Edit', function (hooks) this.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.backend, route: 'list' }, - { label: 'edit' }, + { label: 'Edit' }, ]; setRunOptions({ rules: { diff --git a/ui/tests/integration/components/kv/page/kv-page-secrets-create-test.js b/ui/tests/integration/components/kv/page/kv-page-secrets-create-test.js index c48b544f2472..686118bc0b39 100644 --- a/ui/tests/integration/components/kv/page/kv-page-secrets-create-test.js +++ b/ui/tests/integration/components/kv/page/kv-page-secrets-create-test.js @@ -32,7 +32,7 @@ module('Integration | Component | kv-v2 | Page::Secrets::Create', function (hook this.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.backend, route: 'list' }, - { label: 'create' }, + { label: 'Create' }, ]; setRunOptions({ rules: { diff --git a/ui/tests/integration/components/ldap/page/library/check-out-test.js b/ui/tests/integration/components/ldap/page/library/check-out-test.js index 6280a20b869c..ea33162fc107 100644 --- a/ui/tests/integration/components/ldap/page/library/check-out-test.js +++ b/ui/tests/integration/components/ldap/page/library/check-out-test.js @@ -26,9 +26,9 @@ module('Integration | Component | ldap | Page::Library::CheckOut', function (hoo }; this.breadcrumbs = [ { label: 'ldap-test', route: 'overview' }, - { label: 'libraries', route: 'libraries' }, + { label: 'Libraries', route: 'libraries' }, { label: 'test-library', route: 'libraries.library' }, - { label: 'check-out' }, + { label: 'Check-Out' }, ]; this.renderComponent = () => { @@ -48,13 +48,13 @@ module('Integration | Component | ldap | Page::Library::CheckOut', function (hoo .containsText('ldap-test', 'Overview breadcrumb renders'); assert .dom('[data-test-breadcrumbs] li:nth-child(2) a') - .containsText('libraries', 'Libraries breadcrumb renders'); + .containsText('Libraries', 'Libraries breadcrumb renders'); assert .dom('[data-test-breadcrumbs] li:nth-child(3)') .containsText('test-library', 'Library breadcrumb renders'); assert .dom('[data-test-breadcrumbs] li:nth-child(4)') - .containsText('check-out', 'Check-out breadcrumb renders'); + .containsText('Check-Out', 'Check-out breadcrumb renders'); }); test('it should render check out information and credentials', async function (assert) { diff --git a/ui/tests/integration/components/ldap/page/library/details-test.js b/ui/tests/integration/components/ldap/page/library/details-test.js index ae9e634f6e2e..c7ef6515b6f7 100644 --- a/ui/tests/integration/components/ldap/page/library/details-test.js +++ b/ui/tests/integration/components/ldap/page/library/details-test.js @@ -34,7 +34,7 @@ module('Integration | Component | ldap | Page::Library::Details', function (hook this.breadcrumbs = [ { label: 'ldap-test', route: 'overview' }, - { label: 'libraries', route: 'libraries' }, + { label: 'Libraries', route: 'libraries' }, { label: 'test-library' }, ]; }); @@ -57,7 +57,7 @@ module('Integration | Component | ldap | Page::Library::Details', function (hook .containsText(this.model.backend, 'Overview breadcrumb renders'); assert .dom('[data-test-breadcrumbs] li:nth-child(2) a') - .containsText('libraries', 'Libraries breadcrumb renders'); + .containsText('Libraries', 'Libraries breadcrumb renders'); assert .dom('[data-test-breadcrumbs] li:nth-child(3)') .containsText(this.model.name, 'Library breadcrumb renders'); diff --git a/ui/tests/integration/components/ldap/page/role/credentials-test.js b/ui/tests/integration/components/ldap/page/role/credentials-test.js index 579d1292373c..15e29109ef1c 100644 --- a/ui/tests/integration/components/ldap/page/role/credentials-test.js +++ b/ui/tests/integration/components/ldap/page/role/credentials-test.js @@ -21,9 +21,9 @@ module('Integration | Component | ldap | Page::Role::Credentials', function (hoo hooks.beforeEach(function () { this.breadcrumbs = [ { label: 'ldap-test', route: 'overview' }, - { label: 'roles', route: 'roles' }, + { label: 'Roles', route: 'roles' }, { label: 'test-role', route: 'roles.role' }, - { label: 'credentials' }, + { label: 'Credentials' }, ]; this.transitionStub = sinon.stub(this.owner.lookup('service:router'), 'transitionTo'); }); @@ -39,13 +39,13 @@ module('Integration | Component | ldap | Page::Role::Credentials', function (hoo assert .dom('[data-test-breadcrumbs] li:nth-child(1)') .containsText('ldap-test', 'Overview breadcrumb renders'); - assert.dom('[data-test-breadcrumbs] li:nth-child(2) a').containsText('roles', 'Roles breadcrumb renders'); + assert.dom('[data-test-breadcrumbs] li:nth-child(2) a').containsText('Roles', 'Roles breadcrumb renders'); assert .dom('[data-test-breadcrumbs] li:nth-child(3)') .containsText('test-role', 'Role breadcrumb renders'); assert .dom('[data-test-breadcrumbs] li:nth-child(4)') - .containsText('credentials', 'Credentials breadcrumb renders'); + .containsText('Credentials', 'Credentials breadcrumb renders'); }); test('it should render error', async function (assert) { diff --git a/ui/tests/integration/components/ldap/page/role/details-test.js b/ui/tests/integration/components/ldap/page/role/details-test.js index 6d2709cd28bf..6425e9a62dec 100644 --- a/ui/tests/integration/components/ldap/page/role/details-test.js +++ b/ui/tests/integration/components/ldap/page/role/details-test.js @@ -37,7 +37,7 @@ module('Integration | Component | ldap | Page::Role::Details', function (hooks) this.model = store.peekRecord('ldap/role', ldapRoleID(type, data.name)); this.breadcrumbs = [ { label: this.model.backend, route: 'overview' }, - { label: 'roles', route: 'roles' }, + { label: 'Roles', route: 'roles' }, { label: this.model.name }, ]; return render(hbs``, { @@ -52,7 +52,7 @@ module('Integration | Component | ldap | Page::Role::Details', function (hooks) assert .dom('[data-test-breadcrumbs] li:nth-child(1)') .containsText(this.model.backend, 'Overview breadcrumb renders'); - assert.dom('[data-test-breadcrumbs] li:nth-child(2) a').containsText('roles', 'Roles breadcrumb renders'); + assert.dom('[data-test-breadcrumbs] li:nth-child(2) a').containsText('Roles', 'Roles breadcrumb renders'); assert .dom('[data-test-breadcrumbs] li:nth-child(3)') .containsText(this.model.name, 'Role breadcrumb renders'); diff --git a/ui/tests/integration/components/pki/page/pki-configure-create-test.js b/ui/tests/integration/components/pki/page/pki-configure-create-test.js index 4007f3e70986..49d90f143354 100644 --- a/ui/tests/integration/components/pki/page/pki-configure-create-test.js +++ b/ui/tests/integration/components/pki/page/pki-configure-create-test.js @@ -23,7 +23,7 @@ module('Integration | Component | page/pki-configure-create', function (hooks) { this.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: 'pki', route: 'overview', model: 'pki' }, - { label: 'configure' }, + { label: 'Configure' }, ]; this.config = this.store.createRecord('pki/action'); this.urls = this.store.createRecord('pki/config/urls'); diff --git a/ui/tests/integration/components/pki/page/pki-tidy-auto-settings-test.js b/ui/tests/integration/components/pki/page/pki-tidy-auto-settings-test.js index 658a91573889..9c44f479ce75 100644 --- a/ui/tests/integration/components/pki/page/pki-tidy-auto-settings-test.js +++ b/ui/tests/integration/components/pki/page/pki-tidy-auto-settings-test.js @@ -23,8 +23,8 @@ module('Integration | Component | page/pki-tidy-auto-settings', function (hooks) this.breadcrumbs = [ { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: backend, route: 'overview', model: backend }, - { label: 'tidy', route: 'tidy.index', model: backend }, - { label: 'auto' }, + { label: 'Tidy', route: 'tidy.index', model: backend }, + { label: 'Auto' }, ]; }); From 3754c67abf7f813e6a8b4c1c04b2ba1d8692576a Mon Sep 17 00:00:00 2001 From: Scott Miller Date: Fri, 20 Dec 2024 13:03:34 -0600 Subject: [PATCH 06/15] Enable seal wrapping for approle secret storage (#28703) * Enable seal wrapping for approle secret storage * changelog --- builtin/credential/approle/backend.go | 4 ++++ changelog/28703.txt | 3 +++ 2 files changed, 7 insertions(+) create mode 100644 changelog/28703.txt diff --git a/builtin/credential/approle/backend.go b/builtin/credential/approle/backend.go index 4afdd596078c..1ce71dfa6e29 100644 --- a/builtin/credential/approle/backend.go +++ b/builtin/credential/approle/backend.go @@ -110,6 +110,10 @@ func Backend(conf *logical.BackendConfig) (*backend, error) { secretIDLocalPrefix, secretIDAccessorLocalPrefix, }, + SealWrapStorage: []string{ + secretIDPrefix, + secretIDLocalPrefix, + }, }, Paths: framework.PathAppend( rolePaths(b), diff --git a/changelog/28703.txt b/changelog/28703.txt new file mode 100644 index 000000000000..c4283eccb2e5 --- /dev/null +++ b/changelog/28703.txt @@ -0,0 +1,3 @@ +```release-note:improvement +auth/approle: seal wrap approle secrets if seal wrap is enabled. +``` \ No newline at end of file From 357a13fbb0e818fd9715e11ccfb71048745dad04 Mon Sep 17 00:00:00 2001 From: akshya96 <87045294+akshya96@users.noreply.github.com> Date: Fri, 20 Dec 2024 11:28:10 -0800 Subject: [PATCH 07/15] Revert "OSS-Changes Patch (#29193)" (#29249) This reverts commit 1fab64e9c6bd1b7b7c52e87e4346e9ba50038aa8. --- vault/activity_log.go | 12 +++--------- vault/activity_log_util_common.go | 23 +++++++++++++++++++++++ 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/vault/activity_log.go b/vault/activity_log.go index 820834b8a592..aa15828c6274 100644 --- a/vault/activity_log.go +++ b/vault/activity_log.go @@ -4095,10 +4095,11 @@ func (a *ActivityLog) writeExport(ctx context.Context, rw http.ResponseWriter, f func (c *Core) activityLogMigrationTask(ctx context.Context) { manager := c.activityLog if !c.IsPerfSecondary() { - // If no migrations tasks have been run, kick off the migration task - if !manager.hasDedupClientsUpgrade(ctx) { + // If the oldest version is less than 1.19 and no migrations tasks have been run, kick off the migration task + if !manager.OldestVersionHasDeduplicatedClients(ctx) && !manager.hasDedupClientsUpgrade(ctx) { go c.primaryDuplicateClientMigrationWorker(ctx) } else { + // Store that upgrade processes have already been completed manager.writeDedupClientsUpgrade(ctx) } } else { @@ -4107,8 +4108,6 @@ func (c *Core) activityLogMigrationTask(ctx context.Context) { // already upgraded primary if !manager.hasDedupClientsUpgrade(ctx) { go c.secondaryDuplicateClientMigrationWorker(ctx) - } else { - manager.writeDedupClientsUpgrade(ctx) } } } @@ -4119,12 +4118,7 @@ func (c *Core) activityLogMigrationTask(ctx context.Context) { // current cluster. This method wil only exit once all connected secondary clusters have // upgraded to 1.19, and this cluster receives global data from all of them. func (c *Core) primaryDuplicateClientMigrationWorker(ctx context.Context) error { - c.activityLogLock.Lock() a := c.activityLog - c.activityLogLock.Unlock() - if a == nil { - return fmt.Errorf("activity log not configured") - } a.logger.Trace("started primary activity log migration worker") ctx, cancel := context.WithCancel(ctx) defer cancel() diff --git a/vault/activity_log_util_common.go b/vault/activity_log_util_common.go index e8b85bf20daf..8b1bea9bcbb0 100644 --- a/vault/activity_log_util_common.go +++ b/vault/activity_log_util_common.go @@ -15,6 +15,7 @@ import ( "time" "github.com/axiomhq/hyperloglog" + semver "github.com/hashicorp/go-version" "github.com/hashicorp/vault/helper/timeutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault/activity" @@ -639,6 +640,28 @@ func (a *ActivityLog) getAllEntitySegmentsForMonth(ctx context.Context, path str return segments, nil } +// OldestVersionHasDeduplicatedClients returns whether this cluster is 1.19+, and +// hence supports deduplicated clients +func (a *ActivityLog) OldestVersionHasDeduplicatedClients(ctx context.Context) bool { + oldestVersionIsDedupClients := a.core.IsNewInstall(ctx) + if !oldestVersionIsDedupClients { + if v, _, err := a.core.FindOldestVersionTimestamp(); err == nil { + oldestVersion, err := semver.NewSemver(v) + if err != nil { + a.core.logger.Debug("could not extract version instance", "version", v) + return false + } + dedupChangeVersion, err := semver.NewSemver(DeduplicatedClientMinimumVersion) + if err != nil { + a.core.logger.Debug("could not extract version instance", "version", DeduplicatedClientMinimumVersion) + return false + } + oldestVersionIsDedupClients = oldestVersionIsDedupClients || oldestVersion.GreaterThanOrEqual(dedupChangeVersion) + } + } + return oldestVersionIsDedupClients +} + func (a *ActivityLog) loadClientDataIntoSegment(ctx context.Context, pathPrefix string, startTime time.Time, seqNum uint64, currentSegment *segmentInfo) ([]*activity.EntityRecord, error) { path := pathPrefix + activityEntityBasePath + fmt.Sprint(startTime.Unix()) + "/" + strconv.FormatUint(seqNum, 10) out, err := a.readEntitySegmentAtPath(ctx, path) From 6501a562f661b1dabb02de5bcd6e184498bdd012 Mon Sep 17 00:00:00 2001 From: divyaac Date: Fri, 20 Dec 2024 11:35:02 -0800 Subject: [PATCH 08/15] Revert "OSS Changes Patch (#29154)" (#29250) This reverts commit 46c69d739def76f9c436119f9e6a193c1c02cbb3. --- vault/logical_system_activity.go | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/vault/logical_system_activity.go b/vault/logical_system_activity.go index cfa67d078910..56f9ecb66532 100644 --- a/vault/logical_system_activity.go +++ b/vault/logical_system_activity.go @@ -29,8 +29,6 @@ const ( // WarningCurrentMonthIsAnEstimate is a warning string that is used to let the customer know that for this query, the current month's data is estimated. WarningCurrentMonthIsAnEstimate = "Since this usage period includes both the current month and at least one historical month, counts returned in this usage period are an estimate. Client counts for this period will no longer be estimated at the start of the next month." - - ErrorUpgradeInProgress = "Upgrade to 1.19+ is in progress; the activity log is not queryable until the upgrade is complete" ) // activityQueryPath is available in every namespace @@ -295,9 +293,6 @@ func (b *SystemBackend) handleClientExport(ctx context.Context, req *logical.Req if a == nil { return logical.ErrorResponse("no activity log present"), nil } - if !a.hasDedupClientsUpgrade(ctx) { - return logical.ErrorResponse(ErrorUpgradeInProgress), nil - } startTime, endTime, err := parseStartEndTimes(d, b.Core.BillingStart()) if err != nil { @@ -344,9 +339,6 @@ func (b *SystemBackend) handleClientMetricQuery(ctx context.Context, req *logica if a == nil { return logical.ErrorResponse("no activity log present"), nil } - if !a.hasDedupClientsUpgrade(ctx) { - return logical.ErrorResponse(ErrorUpgradeInProgress), nil - } warnings := make([]string, 0) @@ -393,9 +385,6 @@ func (b *SystemBackend) handleMonthlyActivityCount(ctx context.Context, req *log if a == nil { return logical.ErrorResponse("no activity log present"), nil } - if !a.hasDedupClientsUpgrade(ctx) { - return logical.ErrorResponse(ErrorUpgradeInProgress), nil - } results, err := a.partialMonthClientCount(ctx) if err != nil { @@ -417,9 +406,6 @@ func (b *SystemBackend) handleActivityConfigRead(ctx context.Context, req *logic if a == nil { return logical.ErrorResponse("no activity log present"), nil } - if !a.hasDedupClientsUpgrade(ctx) { - return logical.ErrorResponse(ErrorUpgradeInProgress), nil - } config, err := a.loadConfigOrDefault(ctx) if err != nil { @@ -454,9 +440,6 @@ func (b *SystemBackend) handleActivityConfigUpdate(ctx context.Context, req *log if a == nil { return logical.ErrorResponse("no activity log present"), nil } - if !a.hasDedupClientsUpgrade(ctx) { - return logical.ErrorResponse(ErrorUpgradeInProgress), nil - } warnings := make([]string, 0) From df73491763981fb7abb938cc43e274370fbd1f35 Mon Sep 17 00:00:00 2001 From: divyaac Date: Fri, 20 Dec 2024 11:38:59 -0800 Subject: [PATCH 09/15] Revert "Added LocalMount field to Export API (#29145)" (#29251) This reverts commit af6d983404ad0b16bdd8f6d2d6d23f9be9987a84. --- changelog/29145.txt | 4 ---- vault/activity_log.go | 11 +++-------- .../activity_testonly/activity_testonly_test.go | 3 +-- 3 files changed, 4 insertions(+), 14 deletions(-) delete mode 100644 changelog/29145.txt diff --git a/changelog/29145.txt b/changelog/29145.txt deleted file mode 100644 index dc5a78944612..000000000000 --- a/changelog/29145.txt +++ /dev/null @@ -1,4 +0,0 @@ -```release-note:improvement -activity: Add a "local_mount" field to the Export API response. This field is true if the client is a token or created on a -local mount. -``` diff --git a/vault/activity_log.go b/vault/activity_log.go index aa15828c6274..90ebffdd3887 100644 --- a/vault/activity_log.go +++ b/vault/activity_log.go @@ -336,9 +336,6 @@ type ActivityLogExportRecord struct { // MountPath is the path of the auth mount associated with the token used MountPath string `json:"mount_path" mapstructure:"mount_path"` - // LocalMount indicates if the mount only belongs to the current cluster - LocalMount bool `json:"local_mount" mapstructure:"local_mount"` - // Timestamp denotes the time at which the activity occurred formatted using RFC3339 Timestamp string `json:"timestamp" mapstructure:"timestamp"` @@ -919,7 +916,7 @@ func (a *ActivityLog) getLastSegmentNumberByEntityPath(ctx context.Context, enti } // WalkEntitySegments loads each of the entity segments for a particular start time -func (a *ActivityLog) WalkEntitySegments(ctx context.Context, startTime time.Time, hll *hyperloglog.Sketch, walkFn func(*activity.EntityActivityLog, time.Time, bool) error) error { +func (a *ActivityLog) WalkEntitySegments(ctx context.Context, startTime time.Time, hll *hyperloglog.Sketch, walkFn func(*activity.EntityActivityLog, time.Time, *hyperloglog.Sketch) error) error { baseGlobalPath := activityGlobalPathPrefix + activityEntityBasePath + fmt.Sprint(startTime.Unix()) + "/" baseLocalPath := activityLocalPathPrefix + activityEntityBasePath + fmt.Sprint(startTime.Unix()) + "/" @@ -943,7 +940,7 @@ func (a *ActivityLog) WalkEntitySegments(ctx context.Context, startTime time.Tim if err != nil { return fmt.Errorf("unable to parse segment %v%v: %w", basePath, path, err) } - err = walkFn(out, startTime, basePath == baseLocalPath) + err = walkFn(out, startTime, hll) if err != nil { return fmt.Errorf("unable to walk entities: %w", err) } @@ -3837,7 +3834,7 @@ func (a *ActivityLog) writeExport(ctx context.Context, rw http.ResponseWriter, f return err } - walkEntities := func(l *activity.EntityActivityLog, startTime time.Time, isLocal bool) error { + walkEntities := func(l *activity.EntityActivityLog, startTime time.Time, hll *hyperloglog.Sketch) error { for _, e := range l.Clients { if _, ok := dedupIDs[e.ClientID]; ok { continue @@ -3869,7 +3866,6 @@ func (a *ActivityLog) writeExport(ctx context.Context, rw http.ResponseWriter, f NamespacePath: nsDisplayPath, Timestamp: ts.UTC().Format(time.RFC3339), MountAccessor: e.MountAccessor, - LocalMount: isLocal, // Default following to empty versus nil, will be overwritten if necessary Policies: []string{}, @@ -4265,7 +4261,6 @@ func baseActivityExportCSVHeader() []string { "client_id", "client_type", "local_entity_alias", - "local_mount", "namespace_id", "namespace_path", "mount_accessor", diff --git a/vault/external_tests/activity_testonly/activity_testonly_test.go b/vault/external_tests/activity_testonly/activity_testonly_test.go index 4fcca08c04aa..cd9dfb21574b 100644 --- a/vault/external_tests/activity_testonly/activity_testonly_test.go +++ b/vault/external_tests/activity_testonly/activity_testonly_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -//go:build testonly +////go:build testonly package activity_testonly @@ -549,7 +549,6 @@ func getCSVExport(t *testing.T, client *api.Client, monthsPreviousTo int, now ti boolFields := map[string]struct{}{ "local_entity_alias": {}, - "local_mount": {}, } mapFields := map[string]struct{}{ From b9e949bf7330fb8b5048171bdc5b144989eeeb06 Mon Sep 17 00:00:00 2001 From: Victor Rodriguez Date: Fri, 20 Dec 2024 20:55:25 +0100 Subject: [PATCH 10/15] Support all fields of the name constraints extension when generating CA certificates (#29245) Support all fields of the name constraints extension when generating CA certs. The PKI secrets engine only provided parameter permitted_dns_domains to create the name constraints extension when generating CA certificates. Add the following parameters to provide full support for the extension: * permitted_email_addresses * permitted_ip_ranges * permitted_uri_domains * excluded_dns_domains * excluded_email_addresses * excluded_ip_ranges * excluded_uri_domains Specifying any combination of these parameters will trigger the creation of the name constraints extension as per RFC 5280 section 4.2.1.10. --- builtin/logical/pki/cert_util.go | 56 +++ builtin/logical/pki/cert_util_test.go | 468 +++++++++++------- builtin/logical/pki/fields.go | 54 ++ builtin/logical/pki/issuing/sign_cert.go | 53 ++ changelog/29245.txt | 3 + sdk/helper/certutil/helpers.go | 98 ++-- sdk/helper/certutil/types.go | 11 +- website/content/api-docs/secret/pki/index.mdx | 38 ++ .../docs/commands/pki/health-check.mdx | 7 + .../docs/secrets/pki/considerations.mdx | 18 +- 10 files changed, 588 insertions(+), 218 deletions(-) create mode 100644 changelog/29245.txt diff --git a/builtin/logical/pki/cert_util.go b/builtin/logical/pki/cert_util.go index ae5d3504d465..880468212293 100644 --- a/builtin/logical/pki/cert_util.go +++ b/builtin/logical/pki/cert_util.go @@ -348,6 +348,19 @@ func generateCert(sc *storageContext, if isCA { data.Params.IsCA = isCA data.Params.PermittedDNSDomains = input.apiData.Get("permitted_dns_domains").([]string) + data.Params.ExcludedDNSDomains = input.apiData.Get("excluded_dns_domains").([]string) + data.Params.PermittedIPRanges, err = convertIpRanges(input.apiData.Get("permitted_ip_ranges").([]string)) + if err != nil { + return nil, nil, errutil.UserError{Err: fmt.Sprintf("invalid permitted_ip_ranges value: %s", err)} + } + data.Params.ExcludedIPRanges, err = convertIpRanges(input.apiData.Get("excluded_ip_ranges").([]string)) + if err != nil { + return nil, nil, errutil.UserError{Err: fmt.Sprintf("invalid excluded_ip_ranges value: %s", err)} + } + data.Params.PermittedEmailAddresses = input.apiData.Get("permitted_email_addresses").([]string) + data.Params.ExcludedEmailAddresses = input.apiData.Get("excluded_email_addresses").([]string) + data.Params.PermittedURIDomains = input.apiData.Get("permitted_uri_domains").([]string) + data.Params.ExcludedURIDomains = input.apiData.Get("excluded_uri_domains").([]string) if data.SigningBundle == nil { // Generating a self-signed root certificate. Since we have no @@ -399,6 +412,21 @@ func generateCert(sc *storageContext, return parsedBundle, warnings, nil } +// convertIpRanges parses each string in the input slice as an IP network. Input +// strings are expected to be in the CIDR notation of IP address and prefix length +// like "192.0.2.0/24" or "2001:db8::/32", as defined in RFC 4632 and RFC 4291. +func convertIpRanges(ipRanges []string) ([]*net.IPNet, error) { + var ret []*net.IPNet + for _, ipRange := range ipRanges { + _, ipnet, err := net.ParseCIDR(ipRange) + if err != nil { + return nil, fmt.Errorf("error parsing IP range %q: %w", ipRange, err) + } + ret = append(ret, ipnet) + } + return ret, nil +} + // N.B.: This is only meant to be used for generating intermediate CAs. // It skips some sanity checks. func generateIntermediateCSR(sc *storageContext, input *inputBundle, randomSource io.Reader) (*certutil.ParsedCSRBundle, []string, error) { @@ -472,6 +500,34 @@ func (i SignCertInputFromDataFields) GetPermittedDomains() []string { return i.data.Get("permitted_dns_domains").([]string) } +func (i SignCertInputFromDataFields) GetExcludedDomains() []string { + return i.data.Get("excluded_dns_domains").([]string) +} + +func (i SignCertInputFromDataFields) GetPermittedIpRanges() ([]*net.IPNet, error) { + return convertIpRanges(i.data.Get("permitted_ip_ranges").([]string)) +} + +func (i SignCertInputFromDataFields) GetExcludedIpRanges() ([]*net.IPNet, error) { + return convertIpRanges(i.data.Get("excluded_ip_ranges").([]string)) +} + +func (i SignCertInputFromDataFields) GetPermittedEmailAddresses() []string { + return i.data.Get("permitted_email_addresses").([]string) +} + +func (i SignCertInputFromDataFields) GetExcludedEmailAddresses() []string { + return i.data.Get("excluded_email_addresses").([]string) +} + +func (i SignCertInputFromDataFields) GetPermittedUriDomains() []string { + return i.data.Get("permitted_uri_domains").([]string) +} + +func (i SignCertInputFromDataFields) GetExcludedUriDomains() []string { + return i.data.Get("excluded_uri_domains").([]string) +} + func (i SignCertInputFromDataFields) IgnoreCSRSignature() bool { return false } diff --git a/builtin/logical/pki/cert_util_test.go b/builtin/logical/pki/cert_util_test.go index 212d5bda709c..30f0f71c7715 100644 --- a/builtin/logical/pki/cert_util_test.go +++ b/builtin/logical/pki/cert_util_test.go @@ -267,13 +267,13 @@ func TestPki_PermitFQDNs(t *testing.T) { } type parseCertificateTestCase struct { - name string - data map[string]interface{} - roleData map[string]interface{} // if a role is to be created - ttl time.Duration - wantParams certutil.CreationParameters - wantFields map[string]interface{} - wantErr bool + name string + data map[string]interface{} + roleData map[string]interface{} // if a role is to be created + ttl time.Duration + wantParams certutil.CreationParameters + wantFields map[string]interface{} + wantIssuanceErr string // If not empty, require.ErrorContains will be used on this string } // TestDisableVerifyCertificateEnvVar verifies that env var VAULT_DISABLE_PKI_CONSTRAINTS_VERIFICATION @@ -387,12 +387,16 @@ func TestParseCertificate(t *testing.T) { parseURL := func(s string) *url.URL { u, err := url.Parse(s) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return u } + convertIps := func(ipRanges ...string) []*net.IPNet { + ret, err := convertIpRanges(ipRanges) + require.NoError(t, err) + return ret + } + tests := []*parseCertificateTestCase{ { name: "simple CA", @@ -434,56 +438,69 @@ func TestParseCertificate(t *testing.T) { SKID: []byte("We'll assert that it is not nil as an special case"), }, wantFields: map[string]interface{}{ - "common_name": "the common name", - "alt_names": "", - "ip_sans": "", - "uri_sans": "", - "other_sans": "", - "signature_bits": 384, - "exclude_cn_from_sans": true, - "ou": "", - "organization": "", - "country": "", - "locality": "", - "province": "", - "street_address": "", - "postal_code": "", - "serial_number": "", - "ttl": "1h0m30s", - "max_path_length": -1, - "permitted_dns_domains": "", - "use_pss": false, - "key_type": "ec", - "key_bits": 384, - "skid": "We'll assert that it is not nil as an special case", + "common_name": "the common name", + "alt_names": "", + "ip_sans": "", + "uri_sans": "", + "other_sans": "", + "signature_bits": 384, + "exclude_cn_from_sans": true, + "ou": "", + "organization": "", + "country": "", + "locality": "", + "province": "", + "street_address": "", + "postal_code": "", + "serial_number": "", + "ttl": "1h0m30s", + "max_path_length": -1, + "permitted_dns_domains": "", + "excluded_dns_domains": "", + "permitted_ip_ranges": "", + "excluded_ip_ranges": "", + "permitted_email_addresses": "", + "excluded_email_addresses": "", + "permitted_uri_domains": "", + "excluded_uri_domains": "", + "use_pss": false, + "key_type": "ec", + "key_bits": 384, + "skid": "We'll assert that it is not nil as an special case", }, - wantErr: false, }, { // Note that this test's data is used to create the internal CA used by test "full non CA cert" name: "full CA", data: map[string]interface{}{ // using the same order as in https://developer.hashicorp.com/vault/api-docs/secret/pki#sign-certificate - "common_name": "the common name", - "alt_names": "user@example.com,admin@example.com,example.com,www.example.com", - "ip_sans": "1.2.3.4,1.2.3.5", - "uri_sans": "https://example.com,https://www.example.com", - "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", - "ttl": "2h", - "max_path_length": 2, - "permitted_dns_domains": "example.com,.example.com,.www.example.com", - "ou": "unit1, unit2", - "organization": "org1, org2", - "country": "US, CA", - "locality": "locality1, locality2", - "province": "province1, province2", - "street_address": "street_address1, street_address2", - "postal_code": "postal_code1, postal_code2", - "not_before_duration": "45s", - "key_type": "rsa", - "use_pss": true, - "key_bits": 2048, - "signature_bits": 384, + "common_name": "the common name", + "alt_names": "user@example.com,admin@example.com,example.com,www.example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", + "ttl": "2h", + "max_path_length": 2, + "permitted_dns_domains": "example.com,.example.com,.www.example.com", + "excluded_dns_domains": "bad.example.com,reallybad.com", + "permitted_ip_ranges": "192.0.2.1/24,76.76.21.21/24,2001:4860:4860::8889/32", // Note that while an IP address if specified here, it is the network address that will be stored + "excluded_ip_ranges": "127.0.0.1/16,2001:4860:4860::8888/32", + "permitted_email_addresses": "info@example.com,user@example.com,admin@example.com", + "excluded_email_addresses": "root@example.com,robots@example.com", + "permitted_uri_domains": "example.com,www.example.com", + "excluded_uri_domains": "ftp.example.com,gopher.www.example.com", + "ou": "unit1, unit2", + "organization": "org1, org2", + "country": "US, CA", + "locality": "locality1, locality2", + "province": "province1, province2", + "street_address": "street_address1, street_address2", + "postal_code": "postal_code1, postal_code2", + "not_before_duration": "45s", + "key_type": "rsa", + "use_pss": true, + "key_bits": 2048, + "signature_bits": 384, // TODO(kitography): Specify key usage }, ttl: 2 * time.Hour, @@ -517,36 +534,49 @@ func TestParseCertificate(t *testing.T) { ForceAppendCaChain: false, UseCSRValues: false, PermittedDNSDomains: []string{"example.com", ".example.com", ".www.example.com"}, + ExcludedDNSDomains: []string{"bad.example.com", "reallybad.com"}, + PermittedIPRanges: convertIps("192.0.2.0/24", "76.76.21.0/24", "2001:4860::/32"), // Note that we stored the network address rather than the specific IP address + ExcludedIPRanges: convertIps("127.0.0.0/16", "2001:4860::/32"), + PermittedEmailAddresses: []string{"info@example.com", "user@example.com", "admin@example.com"}, + ExcludedEmailAddresses: []string{"root@example.com", "robots@example.com"}, + PermittedURIDomains: []string{"example.com", "www.example.com"}, + ExcludedURIDomains: []string{"ftp.example.com", "gopher.www.example.com"}, URLs: nil, MaxPathLength: 2, NotBeforeDuration: 45 * time.Second, SKID: []byte("We'll assert that it is not nil as an special case"), }, wantFields: map[string]interface{}{ - "common_name": "the common name", - "alt_names": "example.com,www.example.com,admin@example.com,user@example.com", - "ip_sans": "1.2.3.4,1.2.3.5", - "uri_sans": "https://example.com,https://www.example.com", - "other_sans": "1.3.6.1.4.1.311.20.2.3;UTF-8:caadmin@example.com", - "signature_bits": 384, - "exclude_cn_from_sans": true, - "ou": "unit1,unit2", - "organization": "org1,org2", - "country": "CA,US", - "locality": "locality1,locality2", - "province": "province1,province2", - "street_address": "street_address1,street_address2", - "postal_code": "postal_code1,postal_code2", - "serial_number": "", - "ttl": "2h0m45s", - "max_path_length": 2, - "permitted_dns_domains": "example.com,.example.com,.www.example.com", - "use_pss": true, - "key_type": "rsa", - "key_bits": 2048, - "skid": "We'll assert that it is not nil as an special case", + "common_name": "the common name", + "alt_names": "example.com,www.example.com,admin@example.com,user@example.com", + "ip_sans": "1.2.3.4,1.2.3.5", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;UTF-8:caadmin@example.com", + "signature_bits": 384, + "exclude_cn_from_sans": true, + "ou": "unit1,unit2", + "organization": "org1,org2", + "country": "CA,US", + "locality": "locality1,locality2", + "province": "province1,province2", + "street_address": "street_address1,street_address2", + "postal_code": "postal_code1,postal_code2", + "serial_number": "", + "ttl": "2h0m45s", + "max_path_length": 2, + "permitted_dns_domains": "example.com,.example.com,.www.example.com", + "excluded_dns_domains": "bad.example.com,reallybad.com", + "permitted_ip_ranges": "192.0.2.0/24,76.76.21.0/24,2001:4860::/32", + "excluded_ip_ranges": "127.0.0.0/16,2001:4860::/32", + "permitted_email_addresses": "info@example.com,user@example.com,admin@example.com", + "excluded_email_addresses": "root@example.com,robots@example.com", + "permitted_uri_domains": "example.com,www.example.com", + "excluded_uri_domains": "ftp.example.com,gopher.www.example.com", + "use_pss": true, + "key_type": "rsa", + "key_bits": 2048, + "skid": "We'll assert that it is not nil as an special case", }, - wantErr: false, }, { // Note that we use the data of test "full CA" to create the internal CA needed for this test @@ -555,7 +585,7 @@ func TestParseCertificate(t *testing.T) { // using the same order as in https://developer.hashicorp.com/vault/api-docs/secret/pki#generate-certificate-and-key "common_name": "the common name non ca", "alt_names": "user@example.com,admin@example.com,example.com,www.example.com", - "ip_sans": "1.2.3.4,1.2.3.5", + "ip_sans": "192.0.2.1,192.0.2.2", // These must be permitted by the full CA "uri_sans": "https://example.com,https://www.example.com", "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", "ttl": "2h", @@ -589,7 +619,7 @@ func TestParseCertificate(t *testing.T) { }, DNSNames: []string{"example.com", "www.example.com"}, EmailAddresses: []string{"admin@example.com", "user@example.com"}, - IPAddresses: []net.IP{[]byte{1, 2, 3, 4}, []byte{1, 2, 3, 5}}, + IPAddresses: []net.IP{[]byte{192, 0, 2, 1}, []byte{192, 0, 2, 2}}, URIs: []*url.URL{parseURL("https://example.com"), parseURL("https://www.example.com")}, OtherSANs: map[string][]string{"1.3.6.1.4.1.311.20.2.3": {"caadmin@example.com"}}, IsCA: false, @@ -612,30 +642,120 @@ func TestParseCertificate(t *testing.T) { SKID: []byte("We'll assert that it is not nil as an special case"), }, wantFields: map[string]interface{}{ - "common_name": "the common name non ca", - "alt_names": "example.com,www.example.com,admin@example.com,user@example.com", - "ip_sans": "1.2.3.4,1.2.3.5", - "uri_sans": "https://example.com,https://www.example.com", - "other_sans": "1.3.6.1.4.1.311.20.2.3;UTF-8:caadmin@example.com", - "signature_bits": 384, - "exclude_cn_from_sans": true, - "ou": "", - "organization": "", - "country": "", - "locality": "", - "province": "", - "street_address": "", - "postal_code": "", - "serial_number": "", - "ttl": "2h0m45s", - "max_path_length": 0, - "permitted_dns_domains": "", - "use_pss": false, - "key_type": "rsa", - "key_bits": 2048, - "skid": "We'll assert that it is not nil as an special case", + "common_name": "the common name non ca", + "alt_names": "example.com,www.example.com,admin@example.com,user@example.com", + "ip_sans": "192.0.2.1,192.0.2.2", + "uri_sans": "https://example.com,https://www.example.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;UTF-8:caadmin@example.com", + "signature_bits": 384, + "exclude_cn_from_sans": true, + "ou": "", + "organization": "", + "country": "", + "locality": "", + "province": "", + "street_address": "", + "postal_code": "", + "serial_number": "", + "ttl": "2h0m45s", + "max_path_length": 0, + "permitted_dns_domains": "", + "excluded_dns_domains": "", + "permitted_ip_ranges": "", + "excluded_ip_ranges": "", + "permitted_email_addresses": "", + "excluded_email_addresses": "", + "permitted_uri_domains": "", + "excluded_uri_domains": "", + "use_pss": false, + "key_type": "rsa", + "key_bits": 2048, + "skid": "We'll assert that it is not nil as an special case", + }, + }, + { + name: "DNS domain not permitted", + data: map[string]interface{}{ + "common_name": "the common name non ca", + "alt_names": "badexample.com", + "ttl": "2h", + }, + ttl: 2 * time.Hour, + roleData: map[string]interface{}{ + "allow_any_name": true, + "cn_validations": "disabled", + }, + wantIssuanceErr: `DNS name "badexample.com" is not permitted by any constraint`, + }, + { + name: "DNS domain explicitly excluded", + data: map[string]interface{}{ + "common_name": "the common name non ca", + "alt_names": "bad.example.com", + "ttl": "2h", + }, + ttl: 2 * time.Hour, + roleData: map[string]interface{}{ + "allow_any_name": true, + "cn_validations": "disabled", + }, + wantIssuanceErr: `DNS name "bad.example.com" is excluded by constraint "bad.example.com"`, + }, + { + name: "IP address not permitted", + data: map[string]interface{}{ + "common_name": "the common name non ca", + "ip_sans": "192.0.3.1", + "ttl": "2h", + }, + ttl: 2 * time.Hour, + roleData: map[string]interface{}{ + "allow_any_name": true, + "cn_validations": "disabled", + }, + wantIssuanceErr: `IP address "192.0.3.1" is not permitted by any constraint`, + }, + { + name: "IP address explicitly excluded", + data: map[string]interface{}{ + "common_name": "the common name non ca", + "ip_sans": "127.0.0.123", + "ttl": "2h", + }, + ttl: 2 * time.Hour, + roleData: map[string]interface{}{ + "allow_any_name": true, + "cn_validations": "disabled", + }, + wantIssuanceErr: `IP address "127.0.0.123" is excluded by constraint "127.0.0.0/16"`, + }, + { + name: "email address not permitted", + data: map[string]interface{}{ + "common_name": "the common name non ca", + "alt_names": "random@example.com", + "ttl": "2h", + }, + ttl: 2 * time.Hour, + roleData: map[string]interface{}{ + "allow_any_name": true, + "cn_validations": "disabled", }, - wantErr: false, + wantIssuanceErr: `email address "random@example.com" is not permitted by any constraint`, + }, + { + name: "email address explicitly excluded", + data: map[string]interface{}{ + "common_name": "the common name non ca", + "alt_names": "root@example.com", + "ttl": "2h", + }, + ttl: 2 * time.Hour, + roleData: map[string]interface{}{ + "allow_any_name": true, + "cn_validations": "disabled", + }, + wantIssuanceErr: `email address "root@example.com" is excluded by constraint "root@example.com"`, }, } for _, tt := range tests { @@ -668,15 +788,22 @@ func TestParseCertificate(t *testing.T) { // create the cert resp, err = CBWrite(b, s, "issue/test", tt.data) - require.NoError(t, err) - require.NotNil(t, resp) - - certData := resp.Data["certificate"].(string) - cert, err = parsing.ParseCertificateFromString(certData) - require.NoError(t, err) - require.NotNil(t, cert) + if tt.wantIssuanceErr != "" { + require.ErrorContains(t, err, tt.wantIssuanceErr) + } else { + require.NoError(t, err) + require.NotNil(t, resp) + + certData := resp.Data["certificate"].(string) + cert, err = parsing.ParseCertificateFromString(certData) + require.NoError(t, err) + require.NotNil(t, cert) + } } + if tt.wantIssuanceErr != "" { + return + } t.Run(tt.name+" parameters", func(t *testing.T) { testParseCertificateToCreationParameters(t, issueTime, tt, cert) }) @@ -690,72 +817,64 @@ func TestParseCertificate(t *testing.T) { func testParseCertificateToCreationParameters(t *testing.T, issueTime time.Time, tt *parseCertificateTestCase, cert *x509.Certificate) { params, err := certutil.ParseCertificateToCreationParameters(*cert) - if tt.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) + require.NoError(t, err) - ignoreBasicConstraintsValidForNonCA := tt.wantParams.IsCA - - var diff []string - for _, d := range deep.Equal(tt.wantParams, params) { - switch { - case strings.HasPrefix(d, "SKID"): - continue - case strings.HasPrefix(d, "BasicConstraintsValidForNonCA") && ignoreBasicConstraintsValidForNonCA: - continue - case strings.HasPrefix(d, "NotBeforeDuration"): - continue - case strings.HasPrefix(d, "NotAfter"): - continue - } - diff = append(diff, d) - } - if diff != nil { - t.Errorf("testParseCertificateToCreationParameters() diff: %s", strings.Join(diff, "\n")) + ignoreBasicConstraintsValidForNonCA := tt.wantParams.IsCA + + var diff []string + for _, d := range deep.Equal(tt.wantParams, params) { + switch { + case strings.HasPrefix(d, "SKID"): + continue + case strings.HasPrefix(d, "BasicConstraintsValidForNonCA") && ignoreBasicConstraintsValidForNonCA: + continue + case strings.HasPrefix(d, "NotBeforeDuration"): + continue + case strings.HasPrefix(d, "NotAfter"): + continue } + diff = append(diff, d) + } + if diff != nil { + t.Errorf("testParseCertificateToCreationParameters() diff: %s", strings.Join(diff, "\n")) + } - require.NotNil(t, params.SKID) - require.GreaterOrEqual(t, params.NotBeforeDuration, tt.wantParams.NotBeforeDuration, - "NotBeforeDuration want: %s got: %s", tt.wantParams.NotBeforeDuration, params.NotBeforeDuration) + require.NotNil(t, params.SKID) + require.GreaterOrEqual(t, params.NotBeforeDuration, tt.wantParams.NotBeforeDuration, + "NotBeforeDuration want: %s got: %s", tt.wantParams.NotBeforeDuration, params.NotBeforeDuration) - require.GreaterOrEqual(t, params.NotAfter, issueTime.Add(tt.ttl).Add(-1*time.Minute), - "NotAfter want: %s got: %s", tt.wantParams.NotAfter, params.NotAfter) - require.LessOrEqual(t, params.NotAfter, issueTime.Add(tt.ttl).Add(1*time.Minute), - "NotAfter want: %s got: %s", tt.wantParams.NotAfter, params.NotAfter) - } + require.GreaterOrEqual(t, params.NotAfter, issueTime.Add(tt.ttl).Add(-1*time.Minute), + "NotAfter want: %s got: %s", tt.wantParams.NotAfter, params.NotAfter) + require.LessOrEqual(t, params.NotAfter, issueTime.Add(tt.ttl).Add(1*time.Minute), + "NotAfter want: %s got: %s", tt.wantParams.NotAfter, params.NotAfter) } func testParseCertificateToFields(t *testing.T, issueTime time.Time, tt *parseCertificateTestCase, cert *x509.Certificate) { fields, err := certutil.ParseCertificateToFields(*cert) - if tt.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) + require.NoError(t, err) - require.NotNil(t, fields["skid"]) - delete(fields, "skid") - delete(tt.wantFields, "skid") + require.NotNil(t, fields["skid"]) + delete(fields, "skid") + delete(tt.wantFields, "skid") - { - // Sometimes TTL comes back as 1s off, so we'll allow that - expectedTTL, err := parseutil.ParseDurationSecond(tt.wantFields["ttl"].(string)) - require.NoError(t, err) - actualTTL, err := parseutil.ParseDurationSecond(fields["ttl"].(string)) - require.NoError(t, err) - - diff := expectedTTL - actualTTL - require.LessOrEqual(t, actualTTL, expectedTTL, // NotAfter is generated before NotBefore so the time.Now of notBefore may be later, shrinking our calculated TTL during very slow tests - "ttl should be, if off, smaller than expected want: %s got: %s", tt.wantFields["ttl"], fields["ttl"]) - require.LessOrEqual(t, diff, 30*time.Second, // Test can be slow, allow more off in the other direction - "ttl must be at most 30s off, want: %s got: %s", tt.wantFields["ttl"], fields["ttl"]) - delete(fields, "ttl") - delete(tt.wantFields, "ttl") - } + { + // Sometimes TTL comes back as 1s off, so we'll allow that + expectedTTL, err := parseutil.ParseDurationSecond(tt.wantFields["ttl"].(string)) + require.NoError(t, err) + actualTTL, err := parseutil.ParseDurationSecond(fields["ttl"].(string)) + require.NoError(t, err) - if diff := deep.Equal(tt.wantFields, fields); diff != nil { - t.Errorf("testParseCertificateToFields() diff: %s", strings.ReplaceAll(strings.Join(diff, "\n"), "map", "\nmap")) - } + diff := expectedTTL - actualTTL + require.LessOrEqual(t, actualTTL, expectedTTL, // NotAfter is generated before NotBefore so the time.Now of notBefore may be later, shrinking our calculated TTL during very slow tests + "ttl should be, if off, smaller than expected want: %s got: %s", tt.wantFields["ttl"], fields["ttl"]) + require.LessOrEqual(t, diff, 30*time.Second, // Test can be slow, allow more off in the other direction + "ttl must be at most 30s off, want: %s got: %s", tt.wantFields["ttl"], fields["ttl"]) + delete(fields, "ttl") + delete(tt.wantFields, "ttl") + } + + if diff := deep.Equal(tt.wantFields, fields); diff != nil { + t.Errorf("testParseCertificateToFields() diff: %s", strings.ReplaceAll(strings.Join(diff, "\n"), "map", "\nmap")) } } @@ -831,7 +950,6 @@ func TestParseCsr(t *testing.T) { "serial_number": "", "add_basic_constraints": false, }, - wantErr: false, }, { name: "full CSR with basic constraints", @@ -918,7 +1036,6 @@ func TestParseCsr(t *testing.T) { "serial_number": "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8", "add_basic_constraints": true, }, - wantErr: false, }, { name: "full CSR without basic constraints", @@ -1005,7 +1122,6 @@ func TestParseCsr(t *testing.T) { "serial_number": "37:60:16:e4:85:d5:96:38:3a:ed:31:06:8d:ed:7a:46:d4:22:63:d8", "add_basic_constraints": false, }, - wantErr: false, }, } for _, tt := range tests { @@ -1034,26 +1150,18 @@ func TestParseCsr(t *testing.T) { func testParseCsrToCreationParameters(t *testing.T, issueTime time.Time, tt *parseCertificateTestCase, csr *x509.CertificateRequest) { params, err := certutil.ParseCsrToCreationParameters(*csr) - if tt.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) + require.NoError(t, err) - if diff := deep.Equal(tt.wantParams, params); diff != nil { - t.Errorf("testParseCertificateToCreationParameters() diff: %s", strings.ReplaceAll(strings.Join(diff, "\n"), "map", "\nmap")) - } + if diff := deep.Equal(tt.wantParams, params); diff != nil { + t.Errorf("testParseCertificateToCreationParameters() diff: %s", strings.ReplaceAll(strings.Join(diff, "\n"), "map", "\nmap")) } } func testParseCsrToFields(t *testing.T, issueTime time.Time, tt *parseCertificateTestCase, csr *x509.CertificateRequest) { fields, err := certutil.ParseCsrToFields(*csr) - if tt.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) + require.NoError(t, err) - if diff := deep.Equal(tt.wantFields, fields); diff != nil { - t.Errorf("testParseCertificateToFields() diff: %s", strings.ReplaceAll(strings.Join(diff, "\n"), "map", "\nmap")) - } + if diff := deep.Equal(tt.wantFields, fields); diff != nil { + t.Errorf("testParseCertificateToFields() diff: %s", strings.ReplaceAll(strings.Join(diff, "\n"), "map", "\nmap")) } } diff --git a/builtin/logical/pki/fields.go b/builtin/logical/pki/fields.go index dfeec9df4bb3..24d030576f56 100644 --- a/builtin/logical/pki/fields.go +++ b/builtin/logical/pki/fields.go @@ -390,6 +390,60 @@ func addCAIssueFields(fields map[string]*framework.FieldSchema) map[string]*fram Name: "Permitted DNS Domains", }, } + fields["excluded_dns_domains"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Domains for which this certificate is not allowed to sign or issue child certificates (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Excluded DNS Domains", + }, + } + + fields["permitted_ip_ranges"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `IP ranges for which this certificate is allowed to sign or issue child certificates (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10). +Ranges must be specified in the notation of IP address and prefix length, like "192.0.2.0/24" or "2001:db8::/32", as defined in RFC 4632 and RFC 4291.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Permitted IP ranges", + }, + } + fields["excluded_ip_ranges"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `IP ranges for which this certificate is not allowed to sign or issue child certificates (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10). +Ranges must be specified in the notation of IP address and prefix length, like "192.0.2.0/24" or "2001:db8::/32", as defined in RFC 4632 and RFC 4291.`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Excluded IP ranges", + }, + } + + fields["permitted_email_addresses"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Email addresses for which this certificate is allowed to sign or issue child certificates (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Permitted email adresses", + }, + } + fields["excluded_email_addresses"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `Email addresses for which this certificate is not allowed to sign or issue child certificates (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Excluded email addresses", + }, + } + + fields["permitted_uri_domains"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `URI domains for which this certificate is allowed to sign or issue child certificates (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Permitted URI domains", + }, + } + fields["excluded_uri_domains"] = &framework.FieldSchema{ + Type: framework.TypeCommaStringSlice, + Description: `URI domains for which this certificate is not allowed to sign or issue child certificates (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).`, + DisplayAttrs: &framework.DisplayAttributes{ + Name: "Excluded URI domains", + }, + } fields = addIssuerNameField(fields) diff --git a/builtin/logical/pki/issuing/sign_cert.go b/builtin/logical/pki/issuing/sign_cert.go index c6548f69521e..8894f2def8b4 100644 --- a/builtin/logical/pki/issuing/sign_cert.go +++ b/builtin/logical/pki/issuing/sign_cert.go @@ -9,6 +9,7 @@ import ( "crypto/rsa" "crypto/x509" "fmt" + "net" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/errutil" @@ -21,6 +22,13 @@ type SignCertInput interface { IsCA() bool UseCSRValues() bool GetPermittedDomains() []string + GetExcludedDomains() []string + GetPermittedIpRanges() ([]*net.IPNet, error) + GetExcludedIpRanges() ([]*net.IPNet, error) + GetPermittedEmailAddresses() []string + GetExcludedEmailAddresses() []string + GetPermittedUriDomains() []string + GetExcludedUriDomains() []string } func NewBasicSignCertInput(csr *x509.CertificateRequest, isCA, useCSRValues bool) BasicSignCertInput { @@ -113,6 +121,38 @@ func (b BasicSignCertInput) GetPermittedDomains() []string { return []string{} } +func (b BasicSignCertInput) GetExcludedDomains() []string { + return []string{} +} + +// GetPermittedIpRanges returns the permitted IP ranges for the name constraints extension. +// ignore-nil-nil-function-check +func (b BasicSignCertInput) GetPermittedIpRanges() ([]*net.IPNet, error) { + return nil, nil +} + +// GetExcludedIpRanges returns the excluded IP ranges for the name constraints extension. +// ignore-nil-nil-function-check +func (b BasicSignCertInput) GetExcludedIpRanges() ([]*net.IPNet, error) { + return nil, nil +} + +func (b BasicSignCertInput) GetPermittedEmailAddresses() []string { + return []string{} +} + +func (b BasicSignCertInput) GetExcludedEmailAddresses() []string { + return []string{} +} + +func (b BasicSignCertInput) GetPermittedUriDomains() []string { + return []string{} +} + +func (b BasicSignCertInput) GetExcludedUriDomains() []string { + return []string{} +} + func SignCert(b logical.SystemView, role *RoleEntry, entityInfo EntityInfo, caSign *certutil.CAInfoBundle, signInput SignCertInput) (*certutil.ParsedCertBundle, []string, error) { if role == nil { return nil, nil, errutil.InternalError{Err: "no role found in data bundle"} @@ -284,6 +324,19 @@ func SignCert(b logical.SystemView, role *RoleEntry, entityInfo EntityInfo, caSi if signInput.IsCA() { creation.Params.PermittedDNSDomains = signInput.GetPermittedDomains() + creation.Params.ExcludedDNSDomains = signInput.GetExcludedDomains() + creation.Params.PermittedIPRanges, err = signInput.GetPermittedIpRanges() + if err != nil { + return nil, nil, errutil.UserError{Err: fmt.Sprintf("error parsinng permitted IP ranges: %v", err)} + } + creation.Params.ExcludedIPRanges, err = signInput.GetExcludedIpRanges() + if err != nil { + return nil, nil, errutil.UserError{Err: fmt.Sprintf("error parsinng excluded IP ranges: %v", err)} + } + creation.Params.PermittedEmailAddresses = signInput.GetPermittedEmailAddresses() + creation.Params.ExcludedEmailAddresses = signInput.GetExcludedEmailAddresses() + creation.Params.PermittedURIDomains = signInput.GetPermittedUriDomains() + creation.Params.ExcludedURIDomains = signInput.GetExcludedUriDomains() } else { for _, ext := range csr.Extensions { if ext.Id.Equal(certutil.ExtensionBasicConstraintsOID) { diff --git a/changelog/29245.txt b/changelog/29245.txt new file mode 100644 index 000000000000..17d6b0837d4c --- /dev/null +++ b/changelog/29245.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Complete the set of name constraints parameters by adding permitted_email_addresses, permitted_ip_ranges, permitted_uri_domains, excluded_dns_domains, excluded_email_addresses, excluded_ip_ranges, and excluded_uri_domains; this makes it possible for the name constraints extension to be fully specified when creating root and intermediate CA certificates. +``` diff --git a/sdk/helper/certutil/helpers.go b/sdk/helper/certutil/helpers.go index 7f34da6168ae..2bfa302a1f61 100644 --- a/sdk/helper/certutil/helpers.go +++ b/sdk/helper/certutil/helpers.go @@ -939,10 +939,18 @@ func createCertificate(data *CreationBundle, randReader io.Reader, privateKeyGen } // This will only be filled in from the generation paths - if len(data.Params.PermittedDNSDomains) > 0 { - certTemplate.PermittedDNSDomains = data.Params.PermittedDNSDomains - certTemplate.PermittedDNSDomainsCritical = true - } + certTemplate.PermittedDNSDomains = append(certTemplate.PermittedDNSDomains, data.Params.PermittedDNSDomains...) + certTemplate.ExcludedDNSDomains = append(certTemplate.ExcludedDNSDomains, data.Params.ExcludedDNSDomains...) + certTemplate.PermittedIPRanges = append(certTemplate.PermittedIPRanges, data.Params.PermittedIPRanges...) + certTemplate.ExcludedIPRanges = append(certTemplate.ExcludedIPRanges, data.Params.ExcludedIPRanges...) + certTemplate.PermittedEmailAddresses = append(certTemplate.PermittedEmailAddresses, data.Params.PermittedEmailAddresses...) + certTemplate.ExcludedEmailAddresses = append(certTemplate.ExcludedEmailAddresses, data.Params.ExcludedEmailAddresses...) + certTemplate.PermittedURIDomains = append(certTemplate.PermittedURIDomains, data.Params.PermittedURIDomains...) + certTemplate.ExcludedURIDomains = append(certTemplate.ExcludedURIDomains, data.Params.ExcludedURIDomains...) + // Note that it is harmless to set PermittedDNSDomainsCritical even if all other + // permitted or excluded fields are empty, as the name constraints extension won't be created in + // that case + certTemplate.PermittedDNSDomainsCritical = true AddPolicyIdentifiers(data, certTemplate) @@ -1352,10 +1360,15 @@ func signCertificate(data *CreationBundle, randReader io.Reader) (*ParsedCertBun certTemplate.IsCA = false } - if len(data.Params.PermittedDNSDomains) > 0 { - certTemplate.PermittedDNSDomains = data.Params.PermittedDNSDomains - certTemplate.PermittedDNSDomainsCritical = true - } + certTemplate.ExcludedDNSDomains = append(certTemplate.ExcludedDNSDomains, data.Params.ExcludedDNSDomains...) + certTemplate.PermittedIPRanges = append(certTemplate.PermittedIPRanges, data.Params.PermittedIPRanges...) + certTemplate.ExcludedIPRanges = append(certTemplate.ExcludedIPRanges, data.Params.ExcludedIPRanges...) + certTemplate.PermittedEmailAddresses = append(certTemplate.PermittedEmailAddresses, data.Params.PermittedEmailAddresses...) + certTemplate.ExcludedEmailAddresses = append(certTemplate.ExcludedEmailAddresses, data.Params.ExcludedEmailAddresses...) + certTemplate.PermittedURIDomains = append(certTemplate.PermittedURIDomains, data.Params.PermittedURIDomains...) + certTemplate.ExcludedURIDomains = append(certTemplate.ExcludedURIDomains, data.Params.ExcludedURIDomains...) + // Note that it is harmless to set PermittedDNSDomainsCritical even if all other permitted/excluded fields are empty + certTemplate.PermittedDNSDomainsCritical = true certBytes, err = x509.CreateCertificate(randReader, certTemplate, caCert, data.CSR.PublicKey, data.SigningBundle.PrivateKey) if err != nil { @@ -1792,7 +1805,15 @@ func ParseCertificateToCreationParameters(certificate x509.Certificate) (creatio // The following two values are on creation parameters, but are impossible to parse from the certificate // ForceAppendCaChain // UseCSRValues - PermittedDNSDomains: certificate.PermittedDNSDomains, + PermittedDNSDomains: certificate.PermittedDNSDomains, + ExcludedDNSDomains: certificate.ExcludedDNSDomains, + PermittedIPRanges: certificate.PermittedIPRanges, + ExcludedIPRanges: certificate.ExcludedIPRanges, + PermittedEmailAddresses: certificate.PermittedEmailAddresses, + ExcludedEmailAddresses: certificate.ExcludedEmailAddresses, + PermittedURIDomains: certificate.PermittedURIDomains, + ExcludedURIDomains: certificate.ExcludedURIDomains, + // URLs: punting on this for now MaxPathLength: certificate.MaxPathLen, NotBeforeDuration: time.Now().Sub(certificate.NotBefore), // Assumes Certificate was created this moment @@ -1934,33 +1955,48 @@ func ParseCertificateToFields(certificate x509.Certificate) (map[string]interfac } templateData := map[string]interface{}{ - "common_name": certificate.Subject.CommonName, - "alt_names": MakeAltNamesCommaSeparatedString(certificate.DNSNames, certificate.EmailAddresses), - "ip_sans": MakeIpAddressCommaSeparatedString(certificate.IPAddresses), - "uri_sans": MakeUriCommaSeparatedString(certificate.URIs), - "other_sans": otherSans, - "signature_bits": FindSignatureBits(certificate.SignatureAlgorithm), - "exclude_cn_from_sans": DetermineExcludeCnFromCertSans(certificate), - "ou": makeCommaSeparatedString(certificate.Subject.OrganizationalUnit), - "organization": makeCommaSeparatedString(certificate.Subject.Organization), - "country": makeCommaSeparatedString(certificate.Subject.Country), - "locality": makeCommaSeparatedString(certificate.Subject.Locality), - "province": makeCommaSeparatedString(certificate.Subject.Province), - "street_address": makeCommaSeparatedString(certificate.Subject.StreetAddress), - "postal_code": makeCommaSeparatedString(certificate.Subject.PostalCode), - "serial_number": certificate.Subject.SerialNumber, - "ttl": (certificate.NotAfter.Sub(certificate.NotBefore)).String(), - "max_path_length": certificate.MaxPathLen, - "permitted_dns_domains": strings.Join(certificate.PermittedDNSDomains, ","), - "use_pss": IsPSS(certificate.SignatureAlgorithm), - "skid": hex.EncodeToString(certificate.SubjectKeyId), - "key_type": GetKeyType(certificate.PublicKeyAlgorithm.String()), - "key_bits": FindBitLength(certificate.PublicKey), + "common_name": certificate.Subject.CommonName, + "alt_names": MakeAltNamesCommaSeparatedString(certificate.DNSNames, certificate.EmailAddresses), + "ip_sans": MakeIpAddressCommaSeparatedString(certificate.IPAddresses), + "uri_sans": MakeUriCommaSeparatedString(certificate.URIs), + "other_sans": otherSans, + "signature_bits": FindSignatureBits(certificate.SignatureAlgorithm), + "exclude_cn_from_sans": DetermineExcludeCnFromCertSans(certificate), + "ou": makeCommaSeparatedString(certificate.Subject.OrganizationalUnit), + "organization": makeCommaSeparatedString(certificate.Subject.Organization), + "country": makeCommaSeparatedString(certificate.Subject.Country), + "locality": makeCommaSeparatedString(certificate.Subject.Locality), + "province": makeCommaSeparatedString(certificate.Subject.Province), + "street_address": makeCommaSeparatedString(certificate.Subject.StreetAddress), + "postal_code": makeCommaSeparatedString(certificate.Subject.PostalCode), + "serial_number": certificate.Subject.SerialNumber, + "ttl": (certificate.NotAfter.Sub(certificate.NotBefore)).String(), + "max_path_length": certificate.MaxPathLen, + "permitted_dns_domains": strings.Join(certificate.PermittedDNSDomains, ","), + "excluded_dns_domains": strings.Join(certificate.ExcludedDNSDomains, ","), + "permitted_ip_ranges": strings.Join(ipRangesToStrings(certificate.PermittedIPRanges), ","), + "excluded_ip_ranges": strings.Join(ipRangesToStrings(certificate.ExcludedIPRanges), ","), + "permitted_email_addresses": strings.Join(certificate.PermittedEmailAddresses, ","), + "excluded_email_addresses": strings.Join(certificate.ExcludedEmailAddresses, ","), + "permitted_uri_domains": strings.Join(certificate.PermittedURIDomains, ","), + "excluded_uri_domains": strings.Join(certificate.ExcludedURIDomains, ","), + "use_pss": IsPSS(certificate.SignatureAlgorithm), + "skid": hex.EncodeToString(certificate.SubjectKeyId), + "key_type": GetKeyType(certificate.PublicKeyAlgorithm.String()), + "key_bits": FindBitLength(certificate.PublicKey), } return templateData, nil } +func ipRangesToStrings(ipRanges []*net.IPNet) []string { + var ret []string + for _, ipRange := range ipRanges { + ret = append(ret, ipRange.String()) + } + return ret +} + func getBasicConstraintsFromExtension(exts []pkix.Extension) (found bool, isCA bool, maxPathLength int, err error) { for _, ext := range exts { if ext.Id.Equal(ExtensionBasicConstraintsOID) { diff --git a/sdk/helper/certutil/types.go b/sdk/helper/certutil/types.go index 4f16659c1dd6..398a58e210df 100644 --- a/sdk/helper/certutil/types.go +++ b/sdk/helper/certutil/types.go @@ -807,8 +807,15 @@ type CreationParameters struct { ForceAppendCaChain bool // Only used when signing a CA cert - UseCSRValues bool - PermittedDNSDomains []string + UseCSRValues bool + PermittedDNSDomains []string + ExcludedDNSDomains []string + PermittedIPRanges []*net.IPNet + ExcludedIPRanges []*net.IPNet + PermittedEmailAddresses []string + ExcludedEmailAddresses []string + PermittedURIDomains []string + ExcludedURIDomains []string // URLs to encode into the certificate URLs *URLEntries diff --git a/website/content/api-docs/secret/pki/index.mdx b/website/content/api-docs/secret/pki/index.mdx index e76829bfb150..abe7c34af202 100644 --- a/website/content/api-docs/secret/pki/index.mdx +++ b/website/content/api-docs/secret/pki/index.mdx @@ -774,6 +774,44 @@ intermediary goes beyond the prescribed length. the domain, as per [RFC 5280 Section 4.2.1.10 - Name Constraints](https://tools.ietf.org/html/rfc5280#section-4.2.1.10) +- `excluded_dns_domains` `(string: "")` - A comma separated string (or, string + array) containing DNS domains for which certificates are not allowed to be issued + or signed by this CA certificate. Supports subdomains via a `.` in front of + the domain, as per [RFC 5280 Section 4.2.1.10 - Name + Constraints](https://tools.ietf.org/html/rfc5280#section-4.2.1.10) + +- `permitted_ip_ranges` `(string: "")` - A comma separated string (or, string + array) containing IP ranges for which certificates are allowed to be issued or + signed by this CA certificate. IP ranges must be in the CIDR notation of IP + address and prefix length like "192.0.2.0/24" or "2001:db8::/32", as defined + in RFC 4632 and RFC 4291. + +- `excluded_ip_ranges` `(string: "")` - A comma separated string (or, string + array) containing IP ranges for which certificates are not allowed to be + issued or signed by this CA certificate. IP ranges must be in the CIDR + notation of IP address and prefix length like "192.0.2.0/24" or + "2001:db8::/32", as defined in RFC 4632 and RFC 4291. + +- `permitted_email_addresses` `(string: "")` - A comma separated string (or, string + array) containing email addresses for which certificates are allowed to be issued or + signed by this CA certificate. + +- `excluded_email_addresses` `(string: "")` - A comma separated string (or, + string array) containing email addresses for which certificates are not + allowed to be issued or signed by this CA certificate. + +- `permitted_uri_domains` `(string: "")` - A comma separated string (or, string + array) containing fully qualified domain names for which certificates are + allowed to be issued or signed by this CA certificate. Supports subdomains via + a `.` in front of the domain, as per [RFC 5280 Section 4.2.1.10 - Name + Constraints](https://tools.ietf.org/html/rfc5280#section-4.2.1.10) + +- `excluded_uri_domains` `(string: "")` - A comma separated string (or, string + array) containing fully qualified domain names for which certificates are not + allowed to be issued or signed by this CA certificate. Supports subdomains via + a `.` in front of the domain, as per [RFC 5280 Section 4.2.1.10 - Name + Constraints](https://tools.ietf.org/html/rfc5280#section-4.2.1.10) + - `ou` `(string: "")` - Specifies the OU (OrganizationalUnit) values in the subject field of the resulting certificate. This is a comma-separated string or JSON array. diff --git a/website/content/docs/commands/pki/health-check.mdx b/website/content/docs/commands/pki/health-check.mdx index 568a9d407827..4b237db091ec 100644 --- a/website/content/docs/commands/pki/health-check.mdx +++ b/website/content/docs/commands/pki/health-check.mdx @@ -346,6 +346,13 @@ $ vault secrets tune \ -audit-non-hmac-request-keys=street_address \ -audit-non-hmac-request-keys=postal_code \ -audit-non-hmac-request-keys=permitted_dns_domains \ + -audit-non-hmac-request-keys=permitted_email_addresses \ + -audit-non-hmac-request-keys=permitted_ip_ranges \ + -audit-non-hmac-request-keys=permitted_uri_domains \ + -audit-non-hmac-request-keys=excluded_dns_domains \ + -audit-non-hmac-request-keys=excluded_email_addresses \ + -audit-non-hmac-request-keys=excluded_ip_ranges \ + -audit-non-hmac-request-keys=excluded_uri_domains \ -audit-non-hmac-request-keys=policy_identifiers \ -audit-non-hmac-request-keys=ext_key_usage_oids \ -audit-non-hmac-request-keys=csr \ diff --git a/website/content/docs/secrets/pki/considerations.mdx b/website/content/docs/secrets/pki/considerations.mdx index 4d57eeb3becb..abe4175f8fb3 100644 --- a/website/content/docs/secrets/pki/considerations.mdx +++ b/website/content/docs/secrets/pki/considerations.mdx @@ -2,7 +2,7 @@ layout: docs page_title: 'PKI secrets engine considerations' description: >- - Understand the important considerations and guidance before using the PKI secrets engine to generate certificates before using the PKI secrets engine. + Understand the important considerations and guidance before using the PKI secrets engine to generate certificates before using the PKI secrets engine. --- # PKI secrets engine considerations @@ -223,10 +223,11 @@ performance of easier-to-rotate intermediates and certificates (such as TLS intermediates). Vault supports the use of both the [`allowed_domains` parameter on -Roles](/vault/api-docs/secret/pki#allowed_domains) and the [`permitted_dns_domains` -parameter to set the Name Constraints extension](/vault/api-docs/secret/pki#permitted_dns_domains) -on root and intermediate generation. This allows for several layers of -separation of concerns between TLS-based services. +Roles](/vault/api-docs/secret/pki#allowed_domains) and the sef of parameters for +permitted and excluded DNS domains, IP ranges, email addresses and URI domains +to set the [Name Constraints extension](/vault/api-docs/secret/pki#permitted_dns_domains) +on root and intermediate generation. This allows for several layers of separation of +concerns between TLS-based services. ### Cross-Signed intermediates @@ -780,6 +781,13 @@ Some suggested keys to un-HMAC for requests are as follows: - `street_address` - the subject's street address, - `postal_code` - the subject's postal code, - `permitted_dns_domains` - permitted DNS domains, + - `permitted_ip_ranges` - permitted IP ranges, + - `permitted_email_addresses` - permitted email addresses, + - `permitted_uri_domains` - permitted URI domains, + - `excluded_dns_domains` - excluded DNS domains, + - `excluded_email_addresses` - excluded email addresses, + - `excluded_ip_ranges` - excluded IP ranges, + - `excluded_uri_domains` - excluded URI domains, - `policy_identifiers` - the requested policy identifiers when creating a role, and - `ext_key_usage_oids` - the extended key usage OIDs for the requested certificate. From 30e853da9dbf9501c8719d53bde5f61ccf83507c Mon Sep 17 00:00:00 2001 From: divyaac Date: Fri, 20 Dec 2024 13:46:17 -0800 Subject: [PATCH 11/15] Revert "Send Global Data From Secondary to Primary During Upgrade (#29137)" (#29252) This reverts commit 537fc0f3eacdce6f291bb4def847880339ebc260. --- vault/activity_log.go | 314 ++++++--------- vault/activity_log_test.go | 526 ++++++++++++------------- vault/activity_log_testing_util.go | 203 +++------- vault/activity_log_util.go | 12 - vault/activity_log_util_common.go | 69 +--- vault/activity_log_util_common_test.go | 16 - 6 files changed, 440 insertions(+), 700 deletions(-) diff --git a/vault/activity_log.go b/vault/activity_log.go index 90ebffdd3887..1a9b23f4038d 100644 --- a/vault/activity_log.go +++ b/vault/activity_log.go @@ -36,20 +36,18 @@ import ( const ( // activitySubPath is the directory under the system view where // the log will be stored. - activitySubPath = "counters/activity/" - activityEntityBasePath = "log/entity/" - activityTokenBasePath = "log/directtokens/" - activityTokenLocalBasePath = "local/" + activityTokenBasePath - activityQueryBasePath = "queries/" - activityConfigKey = "config" - activityIntentLogKey = "endofmonth" - activityGlobalPathPrefix = "global/" - activityLocalPathPrefix = "local/" - activitySecondaryTempDataPathPrefix = "secondary/" + activitySubPath = "counters/activity/" + activityEntityBasePath = "log/entity/" + activityTokenBasePath = "log/directtokens/" + activityTokenLocalBasePath = "local/" + activityTokenBasePath + activityQueryBasePath = "queries/" + activityConfigKey = "config" + activityIntentLogKey = "endofmonth" + activityGlobalPathPrefix = "global/" + activityLocalPathPrefix = "local/" activityACMERegenerationKey = "acme-regeneration" activityDeduplicationUpgradeKey = "deduplication-upgrade" - activitySecondaryDataRecCount = "secondary-data-received" // sketch for each month that stores hash of client ids distinctClientsBasePath = "log/distinctclients/" @@ -204,6 +202,8 @@ type ActivityLog struct { // Channel to signal global clients have received by the primary from the secondary, during upgrade to 1.19 dedupUpgradeGlobalClientsReceivedCh chan struct{} + // track whether the current cluster is in the middle of an upgrade to 1.19 + dedupClientsUpgradeComplete *atomic.Bool // track metadata and contents of the most recent log segment currentSegment segmentInfo @@ -237,6 +237,9 @@ type ActivityLog struct { // This channel is relevant for upgrades to 1.17. It indicates whether precomputed queries have been // generated for ACME clients. computationWorkerDone chan struct{} + // This channel is relevant for upgrades to 1.19+ (version with deduplication of clients) + // This indicates that paths that were used before 1.19 to store clients have been cleaned + oldStoragePathsCleaned chan struct{} // channel to indicate that a global clients have been // sent to the primary from a secondary @@ -253,9 +256,6 @@ type ActivityLog struct { globalPartialMonthClientTracker map[string]*activity.EntityRecord inprocessExport *atomic.Bool - // RetryUntilFalse is a test only attribute that allows us to run the sendPreviousMonthGlobalClientsWorker - // for as long as the test wants - RetryUntilFalse *atomic.Bool // clock is used to support manipulating time in unit and integration tests clock timeutil.Clock @@ -427,8 +427,8 @@ func NewActivityLog(core *Core, logger log.Logger, view *BarrierView, metrics me standbyGlobalFragmentsReceived: make([]*activity.LogFragment, 0), secondaryGlobalClientFragments: make([]*activity.LogFragment, 0), inprocessExport: atomic.NewBool(false), - RetryUntilFalse: atomic.NewBool(false), precomputedQueryWritten: make(chan struct{}), + dedupClientsUpgradeComplete: atomic.NewBool(false), } config, err := a.loadConfigOrDefault(core.activeContext) @@ -497,14 +497,18 @@ func (a *ActivityLog) saveCurrentSegmentToStorageLocked(ctx context.Context, for {"type", "client"}, }) - if a.hasDedupClientsUpgrade(ctx) { - // Since we are the primary, store global clients - // Create fragments from global clients and store the segment - if ret := a.createCurrentSegmentFromFragments(ctx, globalFragments, &a.currentGlobalSegment, force, activityGlobalPathPrefix); ret != nil { - return ret - } + // Since we are the primary, store global clients + // Create fragments from global clients and store the segment + if ret := a.createCurrentSegmentFromFragments(ctx, globalFragments, &a.currentGlobalSegment, force, activityGlobalPathPrefix); ret != nil { + return ret } + } else if !a.dedupClientsUpgradeComplete.Load() { + // We are the secondary, and an upgrade is in progress. In this case we will temporarily store the data at this old path + // This data will be garbage collected after the upgrade has completed + if ret := a.createCurrentSegmentFromFragments(ctx, globalFragments, &a.currentSegment, force, ""); ret != nil { + return ret + } } // If segment start time is zero, do not update or write @@ -536,17 +540,8 @@ func (a *ActivityLog) saveCurrentSegmentToStorageLocked(ctx context.Context, for }) } - allLocalFragments := append(standbyLocalFragments, localFragment) - - if !a.hasDedupClientsUpgrade(ctx) { - // In case an upgrade is in progress we will temporarily store the data at this old path - // This data will be garbage collected after the upgrade has completed - a.logger.Debug("upgrade to 1.19 or above is in progress. storing data at old storage path until upgrade is complete") - return a.createCurrentSegmentFromFragments(ctx, append(globalFragments, allLocalFragments...), &a.currentSegment, force, "") - } - // store local fragments - if ret := a.createCurrentSegmentFromFragments(ctx, allLocalFragments, &a.currentLocalSegment, force, activityLocalPathPrefix); ret != nil { + if ret := a.createCurrentSegmentFromFragments(ctx, append(standbyLocalFragments, localFragment), &a.currentLocalSegment, force, activityLocalPathPrefix); ret != nil { return ret } @@ -640,7 +635,7 @@ func (a *ActivityLog) createCurrentSegmentFromFragments(ctx context.Context, fra return nil } -func (a *ActivityLog) savePreviousTokenSegments(ctx context.Context, startTime int64, fragments []*activity.LogFragment) error { +func (a *ActivityLog) savePreviousTokenSegments(ctx context.Context, startTime int64, pathPrefix string, fragments []*activity.LogFragment) error { tokenByNamespace := make(map[string]uint64) for _, fragment := range fragments { // As of 1.9, a fragment should no longer have any NonEntityTokens. However @@ -665,7 +660,7 @@ func (a *ActivityLog) savePreviousTokenSegments(ctx context.Context, startTime i tokenCount: &activity.TokenCount{CountByNamespaceID: tokenByNamespace}, } - if _, err := a.saveSegmentTokensInternal(ctx, segmentToStore, false); err != nil { + if _, err := a.saveSegmentEntitiesInternal(ctx, segmentToStore, false, pathPrefix); err != nil { return err } return nil @@ -851,9 +846,9 @@ func (a *ActivityLog) availableTimesAtPath(ctx context.Context, onlyIncludeTimes return nil, err } out := make([]time.Time, 0) - for _, pathTime := range paths { + for _, path := range paths { // generate a set of unique start times - segmentTime, err := timeutil.ParseTimeFromPath(pathTime) + segmentTime, err := timeutil.ParseTimeFromPath(path) if err != nil { return nil, err } @@ -1040,21 +1035,56 @@ func (a *ActivityLog) loadCurrentClientSegment(ctx context.Context, startTime ti a.currentSegment.startTimestamp = startTime.Unix() // load current global segment - clients, err := a.loadClientDataIntoSegment(ctx, activityGlobalPathPrefix, startTime, globalSegmentSequenceNumber, &a.currentGlobalSegment) - if err != nil { + path := activityGlobalPathPrefix + activityEntityBasePath + fmt.Sprint(startTime.Unix()) + "/" + strconv.FormatUint(globalSegmentSequenceNumber, 10) + + out, err := a.readEntitySegmentAtPath(ctx, path) + if err != nil && !errors.Is(err, ErrEmptyResponse) { return err } - for _, entity := range clients { - a.globalPartialMonthClientTracker[entity.ClientID] = entity + if out != nil { + if !a.core.perfStandby { + a.currentGlobalSegment = segmentInfo{ + startTimestamp: startTime.Unix(), + currentClients: &activity.EntityActivityLog{ + Clients: out.Clients, + }, + tokenCount: &activity.TokenCount{ + CountByNamespaceID: make(map[string]uint64), + }, + clientSequenceNumber: globalSegmentSequenceNumber, + } + } else { + // populate this for edge case checking (if end of month passes while background loading on standby) + a.currentGlobalSegment.startTimestamp = startTime.Unix() + } + for _, client := range out.Clients { + a.globalPartialMonthClientTracker[client.ClientID] = client + } } // load current local segment - clients, err = a.loadClientDataIntoSegment(ctx, activityLocalPathPrefix, startTime, localSegmentSequenceNumber, &a.currentLocalSegment) - if err != nil { + path = activityLocalPathPrefix + activityEntityBasePath + fmt.Sprint(startTime.Unix()) + "/" + strconv.FormatUint(localSegmentSequenceNumber, 10) + out, err = a.readEntitySegmentAtPath(ctx, path) + if err != nil && !errors.Is(err, ErrEmptyResponse) { return err } - for _, entity := range clients { - a.partialMonthLocalClientTracker[entity.ClientID] = entity + if out != nil { + if !a.core.perfStandby { + a.currentLocalSegment = segmentInfo{ + startTimestamp: startTime.Unix(), + currentClients: &activity.EntityActivityLog{ + Clients: out.Clients, + }, + tokenCount: a.currentLocalSegment.tokenCount, + clientSequenceNumber: localSegmentSequenceNumber, + } + } else { + // populate this for edge case checking (if end of month passes while background loading on standby) + a.currentLocalSegment.startTimestamp = startTime.Unix() + } + for _, client := range out.Clients { + a.partialMonthLocalClientTracker[client.ClientID] = client + } } return nil @@ -1111,7 +1141,7 @@ func (a *ActivityLog) tokenCountExists(ctx context.Context, startTime time.Time) // loadTokenCount populates the in-memory representation of activity token count // this function should be called with the lock held -func (a *ActivityLog) loadTokenCount(ctx context.Context, startTime time.Time, segment *segmentInfo) error { +func (a *ActivityLog) loadTokenCount(ctx context.Context, startTime time.Time) error { tokenCountExists, err := a.tokenCountExists(ctx, startTime) if err != nil { return err @@ -1143,7 +1173,7 @@ func (a *ActivityLog) loadTokenCount(ctx context.Context, startTime time.Time, s // We must load the tokenCount of the current segment into the activity log // so that TWEs counted before the introduction of a client ID for TWEs are // still reported in the partial client counts. - segment.tokenCount = out + a.currentLocalSegment.tokenCount = out return nil } @@ -1172,8 +1202,8 @@ func (a *ActivityLog) entityBackgroundLoader(ctx context.Context, wg *sync.WaitG // Call with fragmentLock, globalFragmentLock, localFragmentLock and l held. func (a *ActivityLog) startNewCurrentLogLocked(now time.Time) { a.logger.Trace("initializing new log") - // We will normalize times to start of the month to avoid errors - a.newMonthCurrentLogLocked(now) + a.resetCurrentLog() + a.setCurrentSegmentTimeLocked(now) } // Should be called with fragmentLock, globalFragmentLock, localFragmentLock and l held. @@ -1209,10 +1239,6 @@ func (a *ActivityLog) setCurrentSegmentTimeLocked(t time.Time) { func (a *ActivityLog) resetCurrentLog() { // setting a.currentSegment timestamp to support upgrades a.currentSegment.startTimestamp = 0 - a.currentSegment.currentClients = &activity.EntityActivityLog{ - Clients: make([]*activity.EntityRecord, 0), - } - a.currentSegment.clientSequenceNumber = 0 // global segment a.currentGlobalSegment.startTimestamp = 0 @@ -1263,19 +1289,18 @@ func (a *ActivityLog) deleteLogWorker(ctx context.Context, startTimestamp int64, } func (a *ActivityLog) deleteOldStoragePathWorker(ctx context.Context, pathPrefix string) { - times, err := a.availableTimesAtPath(ctx, time.Now(), pathPrefix) + pathTimes, err := a.view.List(ctx, pathPrefix) if err != nil { a.logger.Error("could not list segment paths", "error", err) return } - for _, pathTime := range times { - pathWithTime := fmt.Sprintf("%s%d/", pathPrefix, pathTime.Unix()) - segments, err := a.view.List(ctx, pathWithTime) + for _, pathTime := range pathTimes { + segments, err := a.view.List(ctx, pathPrefix+pathTime) if err != nil { a.logger.Error("could not list segment path", "error", err) } for _, seqNum := range segments { - err = a.view.Delete(ctx, pathWithTime+seqNum) + err = a.view.Delete(ctx, pathPrefix+pathTime+seqNum) if err != nil { a.logger.Error("could not delete log", "error", err) } @@ -1310,19 +1335,6 @@ func (a *ActivityLog) refreshFromStoredLog(ctx context.Context, wg *sync.WaitGro a.localFragmentLock.Lock() defer a.localFragmentLock.Unlock() - // Garbage collect data at old storage paths - if a.hasDedupClientsUpgrade(ctx) { - a.deleteOldStoragePathWorker(ctx, activityEntityBasePath) - a.deleteOldStoragePathWorker(ctx, activityTokenBasePath) - secondaryIds, err := a.view.List(ctx, activitySecondaryTempDataPathPrefix) - if err != nil { - return err - } - for _, secondaryId := range secondaryIds { - a.deleteOldStoragePathWorker(ctx, activitySecondaryTempDataPathPrefix+secondaryId+activityEntityBasePath) - } - } - decreasingLogTimes, err := a.getMostRecentActivityLogSegment(ctx, now) if err != nil { return err @@ -1337,35 +1349,7 @@ func (a *ActivityLog) refreshFromStoredLog(ctx context.Context, wg *sync.WaitGro a.startNewCurrentLogLocked(now) } } - } - // If we have not finished upgrading, we will refresh currentSegment so data - // can be stored at the old paths until the upgrade is complete. - if !a.hasDedupClientsUpgrade(ctx) && !a.core.perfStandby { - times, err := a.availableTimesAtPath(ctx, now, activityEntityBasePath) - if err != nil { - return err - } - if len(times) > 0 { - mostRecentTimeOldEntityPath := times[len(times)-1] - // The most recent time is either the current month or the next month (if we missed the rotation perhaps) - if timeutil.IsCurrentMonth(mostRecentTimeOldEntityPath, now) { - // setting a.currentSegment timestamp to support upgrades - a.currentSegment.startTimestamp = mostRecentTimeOldEntityPath.Unix() - // This follows the logic in loadCurrentClientSegment - // We do not want need to set a clientSeq number of perf nodes because no client data is written on perf nodes, it is forwarded to the active node - if !a.core.perfStandby { - segmentNum, exists, err := a.getLastSegmentNumberByEntityPath(ctx, activityEntityBasePath+fmt.Sprint(mostRecentTimeOldEntityPath.Unix())+"/") - if err == nil && exists { - a.loadClientDataIntoSegment(ctx, "", mostRecentTimeOldEntityPath, segmentNum, &a.currentSegment) - } - } - } - } - } - - // We can exit before doing any further refreshing if we are in the middle of an upgrade or there are no logs - if len(decreasingLogTimes) == 0 || !a.hasDedupClientsUpgrade(ctx) { return nil } @@ -1411,7 +1395,7 @@ func (a *ActivityLog) refreshFromStoredLog(ctx context.Context, wg *sync.WaitGro // is still required since without it, we would lose replicated TWE counts for the // current segment. if !a.core.perfStandby { - err = a.loadTokenCount(ctx, mostRecent, &a.currentLocalSegment) + err = a.loadTokenCount(ctx, mostRecent) if err != nil { return err } @@ -1681,21 +1665,17 @@ func (c *Core) secondaryDuplicateClientMigrationWorker(ctx context.Context) { manager := c.activityLog manager.logger.Trace("started secondary activity log migration worker") storageMigrationComplete := atomic.NewBool(false) - globalClientDataSent := atomic.NewBool(false) wg := &sync.WaitGroup{} wg.Add(1) go func() { - defer wg.Done() - _, err := manager.sendPreviousMonthGlobalClientsWorker(ctx) - if err != nil { - manager.logger.Debug("failed to send previous months client data to primary", "error", err) - return + if !c.IsPerfSecondary() { + // TODO: Create function for the secondary to continuously attempt to send data to the primary } - globalClientDataSent.Store(true) + + wg.Done() }() wg.Add(1) go func() { - defer wg.Done() localClients, _, err := manager.extractLocalGlobalClientsDeprecatedStoragePath(ctx) if err != nil { return @@ -1710,46 +1690,31 @@ func (c *Core) secondaryDuplicateClientMigrationWorker(ctx context.Context) { return } } - - // Get tokens from previous months at old storage paths - clusterTokens, err := manager.extractTokensDeprecatedStoragePath(ctx) - - // Store tokens at new path - for month, tokenCount := range clusterTokens { - // Combine all token counts from all clusters - logFragments := make([]*activity.LogFragment, len(tokenCount)) - for i, tokens := range tokenCount { - logFragments[i] = &activity.LogFragment{NonEntityTokens: tokens} - } - if err = manager.savePreviousTokenSegments(ctx, month, logFragments); err != nil { - manager.logger.Error("failed to write token segment", "error", err, "month", month) - return - } - } - storageMigrationComplete.Store(true) // TODO: generate/store PCQs for these local clients + wg.Done() }() wg.Wait() if !storageMigrationComplete.Load() { manager.logger.Error("could not complete migration of duplicate clients on cluster") return } - if !globalClientDataSent.Load() { - manager.logger.Error("could not send global clients to the primary") - return - } // We have completed the vital portions of the storage migration if err := manager.writeDedupClientsUpgrade(ctx); err != nil { manager.logger.Error("could not complete migration of duplicate clients on cluster") return } - // TODO: Delete old PCQs - - // Refresh activity log and load current month entities into memory - manager.refreshFromStoredLog(ctx, wg, time.Now().UTC()) + // Now that all the clients have been migrated and PCQs have been created, remove all clients at old storage paths + manager.oldStoragePathsCleaned = make(chan struct{}) + go func() { + defer close(manager.oldStoragePathsCleaned) + manager.deleteOldStoragePathWorker(ctx, activityEntityBasePath) + manager.deleteOldStoragePathWorker(ctx, activityTokenBasePath) + // TODO: Delete old PCQs + }() + manager.dedupClientsUpgradeComplete.Store(true) manager.logger.Trace("completed secondary activity log migration worker") } @@ -1787,31 +1752,6 @@ func (a *ActivityLog) writeDedupClientsUpgrade(ctx context.Context) error { return a.view.Put(ctx, regeneratedEntry) } -func (a *ActivityLog) incrementSecondaryClientRecCount(ctx context.Context) error { - val, _ := a.getSecondaryClientRecCount(ctx) - val += 1 - regeneratedEntry, err := logical.StorageEntryJSON(activitySecondaryDataRecCount, val) - if err != nil { - return err - } - return a.view.Put(ctx, regeneratedEntry) -} - -func (a *ActivityLog) getSecondaryClientRecCount(ctx context.Context) (int, error) { - out, err := a.view.Get(ctx, activitySecondaryDataRecCount) - if err != nil { - return 0, err - } - if out == nil { - return 0, nil - } - var data int - if err = out.DecodeJSON(&data); err != nil { - return 0, err - } - return data, err -} - func (a *ActivityLog) regeneratePrecomputedQueries(ctx context.Context) error { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -1980,7 +1920,7 @@ func (a *ActivityLog) secondaryFragmentWorker(ctx context.Context) { } // Only send data if no upgrade is in progress. Else, the active worker will // store the data in a temporary location until it is garbage collected - if a.hasDedupClientsUpgrade(ctx) { + if a.dedupClientsUpgradeComplete.Load() { sendFunc() } @@ -1995,7 +1935,7 @@ func (a *ActivityLog) secondaryFragmentWorker(ctx context.Context) { } // If an upgrade is in progress, don't do anything // The active fragmentWorker will take care of flushing the clients to a temporary location - if a.hasDedupClientsUpgrade(ctx) { + if a.dedupClientsUpgradeComplete.Load() { sendFunc() // clear active entity set a.globalFragmentLock.Lock() @@ -4097,6 +4037,7 @@ func (c *Core) activityLogMigrationTask(ctx context.Context) { } else { // Store that upgrade processes have already been completed manager.writeDedupClientsUpgrade(ctx) + manager.dedupClientsUpgradeComplete.Store(true) } } else { // We kick off the secondary migration worker in any chance that the primary has not yet upgraded. @@ -4104,6 +4045,11 @@ func (c *Core) activityLogMigrationTask(ctx context.Context) { // already upgraded primary if !manager.hasDedupClientsUpgrade(ctx) { go c.secondaryDuplicateClientMigrationWorker(ctx) + } else { + // Store that upgrade processes have already been completed + manager.writeDedupClientsUpgrade(ctx) + manager.dedupClientsUpgradeComplete.Store(true) + } } } @@ -4116,11 +4062,10 @@ func (c *Core) activityLogMigrationTask(ctx context.Context) { func (c *Core) primaryDuplicateClientMigrationWorker(ctx context.Context) error { a := c.activityLog a.logger.Trace("started primary activity log migration worker") - ctx, cancel := context.WithCancel(ctx) - defer cancel() // Collect global clients from secondary - if err := a.waitForSecondaryGlobalClients(ctx); err != nil { + err := a.waitForSecondaryGlobalClients(ctx) + if err != nil { return err } @@ -4132,36 +4077,8 @@ func (c *Core) primaryDuplicateClientMigrationWorker(ctx context.Context) error } // Get tokens from previous months at old storage paths clusterTokens, err := a.extractTokensDeprecatedStoragePath(ctx) - if err != nil { - return nil - } - // Collect global clients from secondaries and put them in the clusterGlobalClients map - secondaryIds, err := a.view.List(ctx, activitySecondaryTempDataPathPrefix) - if err != nil { - return err - } - for _, secondaryId := range secondaryIds { - times, err := a.availableTimesAtPath(ctx, time.Now(), activitySecondaryTempDataPathPrefix+secondaryId+activityEntityBasePath) - if err != nil { - a.logger.Error("could not list secondary cluster clients until for cluster", "cluster", secondaryId) - return err - } - for _, time := range times { - segments, err := a.getAllEntitySegmentsForMonth(ctx, activitySecondaryTempDataPathPrefix+secondaryId+activityEntityBasePath, time.Unix()) - if err != nil { - return err - } - for _, segment := range segments { - for _, entity := range segment.GetClients() { - if _, ok := clusterGlobalClients[time.Unix()]; !ok { - clusterGlobalClients[time.Unix()] = make([]*activity.EntityRecord, 0) - } - clusterGlobalClients[time.Unix()] = append(clusterGlobalClients[time.Unix()], entity) - } - } - } - } + // TODO: Collect clients from secondaries into slice of fragments // Store global clients at new path for month, entitiesForMonth := range clusterGlobalClients { @@ -4190,7 +4107,7 @@ func (c *Core) primaryDuplicateClientMigrationWorker(ctx context.Context) error for i, tokens := range tokenCount { logFragments[i] = &activity.LogFragment{NonEntityTokens: tokens} } - if err = a.savePreviousTokenSegments(ctx, month, logFragments); err != nil { + if err = a.savePreviousTokenSegments(ctx, month, activityLocalPathPrefix+activityTokenBasePath, logFragments); err != nil { a.logger.Error("failed to write token segment", "error", err, "month", month) return err } @@ -4202,12 +4119,15 @@ func (c *Core) primaryDuplicateClientMigrationWorker(ctx context.Context) error a.logger.Error("could not complete migration of duplicate clients on cluster") return err } - - // TODO: We will also need to delete old PCQs - - // Refresh activity log and load current month entities into memory - a.refreshFromStoredLog(ctx, &sync.WaitGroup{}, time.Now().UTC()) - + // Garbage collect data at old paths + a.oldStoragePathsCleaned = make(chan struct{}) + go func() { + defer close(a.oldStoragePathsCleaned) + a.deleteOldStoragePathWorker(ctx, activityEntityBasePath) + a.deleteOldStoragePathWorker(ctx, activityTokenBasePath) + // We will also need to delete old PCQs + }() + a.dedupClientsUpgradeComplete.Store(true) a.logger.Trace("completed primary activity log migration worker") return nil } diff --git a/vault/activity_log_test.go b/vault/activity_log_test.go index 314cb22c2c46..8599592d8007 100644 --- a/vault/activity_log_test.go +++ b/vault/activity_log_test.go @@ -12,7 +12,9 @@ import ( "io" "net/http" "reflect" + "sort" "strconv" + "strings" "sync" "testing" "time" @@ -1370,6 +1372,69 @@ func TestActivityLog_tokenCountExists(t *testing.T) { } } +// entityRecordsEqual compares the parts we care about from two activity entity record slices +// note: this makes a copy of the []*activity.EntityRecord so that misordered slices won't fail the comparison, +// but the function won't modify the order of the slices to compare +func entityRecordsEqual(t *testing.T, record1, record2 []*activity.EntityRecord) bool { + t.Helper() + + if record1 == nil { + return record2 == nil + } + if record2 == nil { + return record1 == nil + } + + if len(record1) != len(record2) { + return false + } + + // sort first on namespace, then on ID, then on timestamp + entityLessFn := func(e []*activity.EntityRecord, i, j int) bool { + ei := e[i] + ej := e[j] + + nsComp := strings.Compare(ei.NamespaceID, ej.NamespaceID) + if nsComp == -1 { + return true + } + if nsComp == 1 { + return false + } + + idComp := strings.Compare(ei.ClientID, ej.ClientID) + if idComp == -1 { + return true + } + if idComp == 1 { + return false + } + + return ei.Timestamp < ej.Timestamp + } + + entitiesCopy1 := make([]*activity.EntityRecord, len(record1)) + entitiesCopy2 := make([]*activity.EntityRecord, len(record2)) + copy(entitiesCopy1, record1) + copy(entitiesCopy2, record2) + + sort.Slice(entitiesCopy1, func(i, j int) bool { + return entityLessFn(entitiesCopy1, i, j) + }) + sort.Slice(entitiesCopy2, func(i, j int) bool { + return entityLessFn(entitiesCopy2, i, j) + }) + + for i, a := range entitiesCopy1 { + b := entitiesCopy2[i] + if a.ClientID != b.ClientID || a.NamespaceID != b.NamespaceID || a.Timestamp != b.Timestamp { + return false + } + } + + return true +} + func (a *ActivityLog) resetEntitiesInMemory(t *testing.T) { t.Helper() @@ -1521,7 +1586,7 @@ func TestActivityLog_loadCurrentClientSegment(t *testing.T) { } currentGlobalEntities := a.GetCurrentGlobalEntities() - if !EntityRecordsEqual(t, currentGlobalEntities.Clients, tc.entities.Clients) { + if !entityRecordsEqual(t, currentGlobalEntities.Clients, tc.entities.Clients) { t.Errorf("bad data loaded. expected: %v, got: %v for path %q", tc.entities.Clients, currentGlobalEntities, tc.path) } @@ -1677,7 +1742,7 @@ func TestActivityLog_loadTokenCount(t *testing.T) { } for _, tc := range testCases { - err := a.loadTokenCount(ctx, time.Unix(tc.time, 0), &a.currentLocalSegment) + err := a.loadTokenCount(ctx, time.Unix(tc.time, 0)) if err != nil { t.Fatalf("got error loading data for %q: %v", tc.path, err) } @@ -1745,99 +1810,13 @@ func TestActivityLog_StopAndRestart(t *testing.T) { } } -func addActivityRecordsOldStoragePath(t *testing.T, core *Core, base time.Time, includeEntities, includeTokens bool) (*ActivityLog, []*activity.EntityRecord, map[string]uint64) { - t.Helper() - - monthsAgo := base.AddDate(0, -3, 0) - a := core.activityLog - var entityRecords []*activity.EntityRecord - if includeEntities { - entityRecords = []*activity.EntityRecord{ - { - ClientID: "11111111-1111-1111-1111-111111111111", - NamespaceID: namespace.RootNamespaceID, - Timestamp: time.Now().Unix(), - }, - { - ClientID: "22222222-2222-2222-2222-222222222222", - NamespaceID: namespace.RootNamespaceID, - Timestamp: time.Now().Unix(), - }, - { - ClientID: "33333333-2222-2222-2222-222222222222", - NamespaceID: namespace.RootNamespaceID, - Timestamp: time.Now().Unix(), - }, - } - if constants.IsEnterprise { - entityRecords = append(entityRecords, []*activity.EntityRecord{ - { - ClientID: "44444444-1111-1111-1111-111111111111", - NamespaceID: "ns1", - Timestamp: time.Now().Unix(), - }, - }...) - } - - // append some local entity data - entityRecords = append(entityRecords, &activity.EntityRecord{ - ClientID: "44444444-4444-4444-4444-444444444444", - NamespaceID: namespace.RootNamespaceID, - Timestamp: time.Now().Unix(), - }) - - for i, entityRecord := range entityRecords { - entityData, err := proto.Marshal(&activity.EntityActivityLog{ - Clients: []*activity.EntityRecord{entityRecord}, - }) - if err != nil { - t.Fatalf(err.Error()) - } - switch i { - case 0: - WriteToStorage(t, core, ActivityPrefix+activityEntityBasePath+fmt.Sprint(monthsAgo.Unix())+"/0", entityData) - - case len(entityRecords) - 1: - // local data - WriteToStorage(t, core, ActivityPrefix+activityEntityBasePath+fmt.Sprint(base.Unix())+"/"+strconv.Itoa(i-1), entityData) - default: - WriteToStorage(t, core, ActivityPrefix+activityEntityBasePath+fmt.Sprint(base.Unix())+"/"+strconv.Itoa(i-1), entityData) - } - } - } - - var tokenRecords map[string]uint64 - if includeTokens { - tokenRecords = make(map[string]uint64) - tokenRecords[namespace.RootNamespaceID] = uint64(1) - if constants.IsEnterprise { - for i := 1; i < 4; i++ { - nsID := "ns" + strconv.Itoa(i) - tokenRecords[nsID] = uint64(i) - } - } - tokenCount := &activity.TokenCount{ - CountByNamespaceID: tokenRecords, - } - - tokenData, err := proto.Marshal(tokenCount) - if err != nil { - t.Fatalf(err.Error()) - } - - WriteToStorage(t, core, ActivityPrefix+activityTokenBasePath+fmt.Sprint(base.Unix())+"/0", tokenData) - } - - return a, entityRecords, tokenRecords -} - // :base: is the timestamp to start from for the setup logic (use to simulate newest log from past or future) // entity records returned include [0] data from a previous month and [1:] data from the current month // token counts returned are from the current month -func setupActivityRecordsInStorage(t *testing.T, base time.Time, includeEntities, includeTokens, addOldStoragePathData bool) (*ActivityLog, []*activity.EntityRecord, map[string]uint64) { +func setupActivityRecordsInStorage(t *testing.T, base time.Time, includeEntities, includeTokens bool) (*ActivityLog, []*activity.EntityRecord, map[string]uint64) { t.Helper() - core, _, _ := TestCoreUnsealedWithConfig(t, &CoreConfig{ActivityLogConfig: ActivityLogCoreConfig{ForceEnable: true}}) + core, _, _ := TestCoreUnsealed(t) a := core.activityLog monthsAgo := base.AddDate(0, -3, 0) @@ -1919,17 +1898,13 @@ func setupActivityRecordsInStorage(t *testing.T, base time.Time, includeEntities WriteToStorage(t, core, ActivityLogLocalPrefix+"directtokens/"+fmt.Sprint(base.Unix())+"/0", tokenData) } - if addOldStoragePathData { - return addActivityRecordsOldStoragePath(t, core, base, includeEntities, includeTokens) - } return a, entityRecords, tokenRecords } -// TestActivityLog_refreshFromStoredLog_DedupUpgradeComplete writes records for 3 months ago and this month, then calls refreshFromStoredLog. -// The system believes the upgrade to 1.19+ is already complete. It should not refresh data from old storage paths, only data at the new storage paths. +// TestActivityLog_refreshFromStoredLog writes records for 3 months ago and this month, then calls refreshFromStoredLog. // The test verifies that current entities and current tokens are correct. -func TestActivityLog_refreshFromStoredLog_DedupUpgradeComplete(t *testing.T) { - a, expectedClientRecords, expectedTokenCounts := setupActivityRecordsInStorage(t, time.Now().UTC(), true, true, true) +func TestActivityLog_refreshFromStoredLog(t *testing.T) { + a, expectedClientRecords, expectedTokenCounts := setupActivityRecordsInStorage(t, time.Now().UTC(), true, true) a.SetEnable(true) var wg sync.WaitGroup @@ -1958,101 +1933,13 @@ func TestActivityLog_refreshFromStoredLog_DedupUpgradeComplete(t *testing.T) { } currentEntities := a.GetCurrentGlobalEntities() - if !EntityRecordsEqual(t, currentEntities.Clients, expectedCurrent.Clients) { - // we only expect the newest entity segment to be loaded (for the current month) - t.Errorf("bad activity entity logs loaded. expected: %v got: %v", expectedCurrent, currentEntities) - } - - currentLocalEntities := a.GetCurrentLocalEntities() - if !EntityRecordsEqual(t, currentLocalEntities.Clients, expectedCurrentLocal.Clients) { - // we only expect the newest local entity segment to be loaded (for the current month) - t.Errorf("bad activity entity logs loaded. expected: %v got: %v", expectedCurrentLocal, currentLocalEntities) - } - - nsCount := a.GetStoredTokenCountByNamespaceID() - require.Equal(t, nsCount, expectedTokenCounts) - - activeClients := a.core.GetActiveClientsList() - if err := ActiveEntitiesEqual(activeClients, expectedActive.Clients); err != nil { - // we expect activeClients to be loaded for the entire month - t.Errorf("bad data loaded into active entities. expected only set of EntityID from %v in %v: %v", expectedActive.Clients, activeClients, err) - } - - // verify active global clients list - activeGlobalClients := a.core.GetActiveGlobalClientsList() - if err := ActiveEntitiesEqual(activeGlobalClients, expectedActiveGlobal.Clients); err != nil { - // we expect activeClients to be loaded for the entire month - t.Errorf("bad data loaded into active global entities. expected only set of EntityID from %v in %v: %v", expectedActiveGlobal.Clients, activeGlobalClients, err) - } - // verify active local clients list - activeLocalClients := a.core.GetActiveLocalClientsList() - if err := ActiveEntitiesEqual(activeLocalClients, expectedCurrentLocal.Clients); err != nil { - // we expect activeClients to be loaded for the entire month - t.Errorf("bad data loaded into active local entities. expected only set of EntityID from %v in %v: %v", expectedCurrentLocal.Clients, activeLocalClients, err) - } - - // No data from the old storage paths should have been loaded because the system believes that the upgrade was already complete - a.ExpectOldSegmentRefreshed(t, time.Now().UTC().Unix(), false, []*activity.EntityRecord{}, map[string]uint64{}) -} - -// TestActivityLog_refreshFromStoredLog_DedupUpgradeIncomplete writes records for 3 months ago and this month, then calls refreshFromStoredLog. -// The system thinks the upgrade to 1.19+ is incomplete. It should not refresh data from new storage paths, only data at the old storage paths. -// The test verifies that current entities and current tokens are correct. -func TestActivityLog_refreshFromStoredLog_DedupUpgradeIncomplete(t *testing.T) { - a, expectedClientRecords, expectedTokenCounts := setupActivityRecordsInStorage(t, time.Now().UTC(), true, true, true) - a.SetEnable(true) - - // Reset the system to state where the upgrade is incomplete - a.ResetDedupUpgrade(context.Background()) - - var wg sync.WaitGroup - now := time.Now().UTC() - err := a.refreshFromStoredLog(context.Background(), &wg, now) - if err != nil { - t.Fatalf("got error loading stored activity logs: %v", err) - } - wg.Wait() - - // active clients for the entire month - expectedActive := &activity.EntityActivityLog{ - Clients: expectedClientRecords[1:], - } - - // global clients added to the newest local entity segment - expectedCurrent := &activity.EntityActivityLog{ - Clients: expectedClientRecords[len(expectedClientRecords)-2 : len(expectedClientRecords)-1], - } - - expectedActiveGlobal := &activity.EntityActivityLog{ - Clients: expectedClientRecords[1 : len(expectedClientRecords)-1], - } - - // local client is only added to the newest segment for the current month. This should also appear in the active clients for the entire month. - expectedCurrentLocal := &activity.EntityActivityLog{ - Clients: expectedClientRecords[len(expectedClientRecords)-1:], - } - - // Data should be loaded into the old segment - a.ExpectOldSegmentRefreshed(t, now.Unix(), false, expectedCurrentLocal.GetClients(), map[string]uint64{}) - a.ExpectCurrentSegmentsRefreshed(t, timeutil.StartOfMonth(now).Unix(), false) - - // Simulate the completion of an upgrade - a.writeDedupClientsUpgrade(context.Background()) - - err = a.refreshFromStoredLog(context.Background(), &wg, now) - if err != nil { - t.Fatalf("got error loading stored activity logs: %v", err) - } - wg.Wait() - - currentEntities := a.GetCurrentGlobalEntities() - if !EntityRecordsEqual(t, currentEntities.Clients, expectedCurrent.Clients) { + if !entityRecordsEqual(t, currentEntities.Clients, expectedCurrent.Clients) { // we only expect the newest entity segment to be loaded (for the current month) t.Errorf("bad activity entity logs loaded. expected: %v got: %v", expectedCurrent, currentEntities) } currentLocalEntities := a.GetCurrentLocalEntities() - if !EntityRecordsEqual(t, currentLocalEntities.Clients, expectedCurrentLocal.Clients) { + if !entityRecordsEqual(t, currentLocalEntities.Clients, expectedCurrentLocal.Clients) { // we only expect the newest local entity segment to be loaded (for the current month) t.Errorf("bad activity entity logs loaded. expected: %v got: %v", expectedCurrentLocal, currentLocalEntities) } @@ -2087,7 +1974,7 @@ func TestActivityLog_refreshFromStoredLog_DedupUpgradeIncomplete(t *testing.T) { // test closes a.doneCh and calls refreshFromStoredLog, which will not do any processing because the doneCh is closed. // The test verifies that the current data is not loaded. func TestActivityLog_refreshFromStoredLogWithBackgroundLoadingCancelled(t *testing.T) { - a, expectedClientRecords, expectedTokenCounts := setupActivityRecordsInStorage(t, time.Now().UTC(), true, true, false) + a, expectedClientRecords, expectedTokenCounts := setupActivityRecordsInStorage(t, time.Now().UTC(), true, true) a.SetEnable(true) var wg sync.WaitGroup @@ -2120,13 +2007,13 @@ func TestActivityLog_refreshFromStoredLogWithBackgroundLoadingCancelled(t *testi } currentEntities := a.GetCurrentGlobalEntities() - if !EntityRecordsEqual(t, currentEntities.Clients, expectedCurrent.Clients) { + if !entityRecordsEqual(t, currentEntities.Clients, expectedCurrent.Clients) { // we only expect the newest entity segment to be loaded (for the current month) t.Errorf("bad activity entity logs loaded. expected: %v got: %v", expectedCurrent, currentEntities) } currentLocalEntities := a.GetCurrentLocalEntities() - if !EntityRecordsEqual(t, currentLocalEntities.Clients, expectedCurrentLocal.Clients) { + if !entityRecordsEqual(t, currentLocalEntities.Clients, expectedCurrentLocal.Clients) { // we only expect the newest local entity segment to be loaded (for the current month) t.Errorf("bad activity entity logs loaded. expected: %v got: %v", expectedCurrentLocal, currentLocalEntities) } @@ -2159,7 +2046,7 @@ func TestActivityLog_refreshFromStoredLogWithBackgroundLoadingCancelled(t *testi // TestActivityLog_refreshFromStoredLogContextCancelled writes data from 3 months ago to this month and calls // refreshFromStoredLog with a canceled context, verifying that the function errors because of the canceled context. func TestActivityLog_refreshFromStoredLogContextCancelled(t *testing.T) { - a, _, _ := setupActivityRecordsInStorage(t, time.Now().UTC(), true, true, false) + a, _, _ := setupActivityRecordsInStorage(t, time.Now().UTC(), true, true) var wg sync.WaitGroup ctx, cancelFn := context.WithCancel(context.Background()) @@ -2174,7 +2061,7 @@ func TestActivityLog_refreshFromStoredLogContextCancelled(t *testing.T) { // TestActivityLog_refreshFromStoredLogNoTokens writes only entities from 3 months ago to today, then calls // refreshFromStoredLog. It verifies that there are no tokens loaded. func TestActivityLog_refreshFromStoredLogNoTokens(t *testing.T) { - a, expectedClientRecords, _ := setupActivityRecordsInStorage(t, time.Now().UTC(), true, false, false) + a, expectedClientRecords, _ := setupActivityRecordsInStorage(t, time.Now().UTC(), true, false) a.SetEnable(true) var wg sync.WaitGroup @@ -2195,13 +2082,13 @@ func TestActivityLog_refreshFromStoredLogNoTokens(t *testing.T) { } currentGlobalEntities := a.GetCurrentGlobalEntities() - if !EntityRecordsEqual(t, currentGlobalEntities.Clients, expectedCurrentGlobal.Clients) { + if !entityRecordsEqual(t, currentGlobalEntities.Clients, expectedCurrentGlobal.Clients) { // we only expect the newest entity segment to be loaded (for the current month) t.Errorf("bad activity entity logs loaded. expected: %v got: %v", expectedCurrentGlobal, currentGlobalEntities) } currentLocalEntities := a.GetCurrentLocalEntities() - if !EntityRecordsEqual(t, currentLocalEntities.Clients, expectedCurrentLocal.Clients) { + if !entityRecordsEqual(t, currentLocalEntities.Clients, expectedCurrentLocal.Clients) { // we only expect the newest local entity segment to be loaded (for the current month) t.Errorf("bad activity entity logs loaded. expected: %v got: %v", expectedCurrentLocal, currentLocalEntities) } @@ -2221,7 +2108,7 @@ func TestActivityLog_refreshFromStoredLogNoTokens(t *testing.T) { // TestActivityLog_refreshFromStoredLogNoEntities writes only direct tokens from 3 months ago to today, and runs // refreshFromStoredLog. It verifies that there are no entities or clients loaded. func TestActivityLog_refreshFromStoredLogNoEntities(t *testing.T) { - a, _, expectedTokenCounts := setupActivityRecordsInStorage(t, time.Now().UTC(), false, true, false) + a, _, expectedTokenCounts := setupActivityRecordsInStorage(t, time.Now().UTC(), false, true) a.SetEnable(true) var wg sync.WaitGroup @@ -2251,29 +2138,17 @@ func TestActivityLog_refreshFromStoredLogNoEntities(t *testing.T) { // current segment counts are zero. func TestActivityLog_refreshFromStoredLogNoData(t *testing.T) { now := time.Now().UTC() - a, _, _ := setupActivityRecordsInStorage(t, now, false, false, true) + a, _, _ := setupActivityRecordsInStorage(t, now, false, false) a.SetEnable(true) - // Simulate an upgrade that is incomplete - a.ResetDedupUpgrade(context.Background()) var wg sync.WaitGroup err := a.refreshFromStoredLog(context.Background(), &wg, now) if err != nil { t.Fatalf("got error loading stored activity logs: %v", err) } wg.Wait() - a.ExpectOldSegmentRefreshed(t, timeutil.StartOfMonth(now).Unix(), false, []*activity.EntityRecord{}, map[string]uint64{}) - a.ExpectCurrentSegmentsRefreshed(t, timeutil.StartOfMonth(now).Unix(), false) - // Simulate an upgrade that is complete - require.NoError(t, a.writeDedupClientsUpgrade(context.Background())) - err = a.refreshFromStoredLog(context.Background(), &wg, now) - if err != nil { - t.Fatalf("got error loading stored activity logs: %v", err) - } - wg.Wait() - a.ExpectOldSegmentRefreshed(t, timeutil.StartOfMonth(now).Unix(), false, []*activity.EntityRecord{}, map[string]uint64{}) - a.ExpectCurrentSegmentsRefreshed(t, timeutil.StartOfMonth(now).Unix(), false) + a.ExpectCurrentSegmentRefreshed(t, now.Unix(), false) } // TestActivityLog_refreshFromStoredLogTwoMonthsPrevious creates segment data from 5 months ago to 2 months ago and @@ -2282,29 +2157,17 @@ func TestActivityLog_refreshFromStoredLogTwoMonthsPrevious(t *testing.T) { // test what happens when the most recent data is from month M-2 (or earlier - same effect) now := time.Now().UTC() twoMonthsAgoStart := timeutil.StartOfPreviousMonth(timeutil.StartOfPreviousMonth(now)) - a, _, _ := setupActivityRecordsInStorage(t, twoMonthsAgoStart, true, true, true) + a, _, _ := setupActivityRecordsInStorage(t, twoMonthsAgoStart, true, true) a.SetEnable(true) - // Simulate an upgrade that is incomplete - a.ResetDedupUpgrade(context.Background()) var wg sync.WaitGroup err := a.refreshFromStoredLog(context.Background(), &wg, now) if err != nil { t.Fatalf("got error loading stored activity logs: %v", err) } wg.Wait() - a.ExpectCurrentSegmentsRefreshed(t, timeutil.StartOfMonth(now).Unix(), false) - a.ExpectOldSegmentRefreshed(t, timeutil.StartOfMonth(now).Unix(), false, []*activity.EntityRecord{}, map[string]uint64{}) - // Simulate an upgrade that is complete - a.writeDedupClientsUpgrade(context.Background()) - err = a.refreshFromStoredLog(context.Background(), &wg, now) - if err != nil { - t.Fatalf("got error loading stored activity logs: %v", err) - } - wg.Wait() - a.ExpectCurrentSegmentsRefreshed(t, timeutil.StartOfMonth(now).Unix(), false) - a.ExpectOldSegmentRefreshed(t, timeutil.StartOfMonth(now).Unix(), false, []*activity.EntityRecord{}, map[string]uint64{}) + a.ExpectCurrentSegmentRefreshed(t, now.Unix(), false) } // TestActivityLog_refreshFromStoredLogPreviousMonth creates segment data from 4 months ago to 1 month ago, then calls @@ -2315,12 +2178,9 @@ func TestActivityLog_refreshFromStoredLogPreviousMonth(t *testing.T) { // can handle end of month rotations monthStart := timeutil.StartOfMonth(time.Now().UTC()) oneMonthAgoStart := timeutil.StartOfPreviousMonth(monthStart) - a, expectedClientRecords, expectedTokenCounts := setupActivityRecordsInStorage(t, oneMonthAgoStart, true, true, true) + a, expectedClientRecords, expectedTokenCounts := setupActivityRecordsInStorage(t, oneMonthAgoStart, true, true) a.SetEnable(true) - // Reset upgrade attributes to simulate startup - a.ResetDedupUpgrade(context.Background()) - var wg sync.WaitGroup err := a.refreshFromStoredLog(context.Background(), &wg, time.Now().UTC()) if err != nil { @@ -2328,18 +2188,6 @@ func TestActivityLog_refreshFromStoredLogPreviousMonth(t *testing.T) { } wg.Wait() - // Previous month data should not be loaded into the currentSegment - a.ExpectOldSegmentRefreshed(t, monthStart.Unix(), false, []*activity.EntityRecord{}, map[string]uint64{}) - a.ExpectCurrentSegmentsRefreshed(t, monthStart.Unix(), false) - - // Simulate completion of upgrade - require.NoError(t, a.writeDedupClientsUpgrade(context.Background())) - - // With a refresh after upgrade is complete, the currentGlobalSegment and currentLocalSegment should contain data - err = a.refreshFromStoredLog(context.Background(), &wg, time.Now().UTC()) - require.NoError(t, err) - wg.Wait() - expectedActive := &activity.EntityActivityLog{ Clients: expectedClientRecords[1:], } @@ -2348,13 +2196,16 @@ func TestActivityLog_refreshFromStoredLogPreviousMonth(t *testing.T) { } currentEntities := a.GetCurrentGlobalEntities() - if !EntityRecordsEqual(t, currentEntities.Clients, expectedCurrent.Clients) { + if !entityRecordsEqual(t, currentEntities.Clients, expectedCurrent.Clients) { // we only expect the newest entity segment to be loaded (for the current month) t.Errorf("bad activity entity logs loaded. expected: %v got: %v", expectedCurrent, currentEntities) } nsCount := a.GetStoredTokenCountByNamespaceID() - require.Equal(t, expectedTokenCounts, nsCount) + if !reflect.DeepEqual(nsCount, expectedTokenCounts) { + // we expect all token counts to be loaded + t.Errorf("bad activity token counts loaded. expected: %v got: %v", expectedTokenCounts, nsCount) + } activeClients := a.core.GetActiveClientsList() if err := ActiveEntitiesEqual(activeClients, expectedActive.Clients); err != nil { @@ -2582,7 +2433,7 @@ func TestActivityLog_EnableDisable(t *testing.T) { } expectMissingSegment(t, core, path) - a.ExpectCurrentSegmentsRefreshed(t, 0, false) + a.ExpectCurrentSegmentRefreshed(t, 0, false) // enable (if not already) which force-writes an empty segment enableRequest() @@ -4301,7 +4152,7 @@ func TestActivityLog_partialMonthClientCount(t *testing.T) { ctx := namespace.RootContext(nil) now := time.Now().UTC() - a, clients, _ := setupActivityRecordsInStorage(t, timeutil.StartOfMonth(now), true, true, false) + a, clients, _ := setupActivityRecordsInStorage(t, timeutil.StartOfMonth(now), true, true) // clients[0] belongs to previous month clients = clients[1:] @@ -4372,7 +4223,7 @@ func TestActivityLog_partialMonthClientCountUsingHandleQuery(t *testing.T) { ctx := namespace.RootContext(nil) now := time.Now().UTC() - a, clients, _ := setupActivityRecordsInStorage(t, timeutil.StartOfMonth(now), true, true, false) + a, clients, _ := setupActivityRecordsInStorage(t, timeutil.StartOfMonth(now), true, true) // clients[0] belongs to previous month clients = clients[1:] @@ -5980,7 +5831,7 @@ func TestActivityLog_PrimaryDuplicateClientMigrationWorker(t *testing.T) { a.SetEnable(true) ctx := context.Background() - timeStamp := time.Now().UTC() + timeStamp := time.Now() startOfMonth := timeutil.StartOfMonth(timeStamp) oneMonthAgo := timeutil.StartOfPreviousMonth(timeStamp) twoMonthsAgo := timeutil.StartOfPreviousMonth(oneMonthAgo) @@ -6014,28 +5865,13 @@ func TestActivityLog_PrimaryDuplicateClientMigrationWorker(t *testing.T) { a.savePreviousEntitySegments(ctx, oneMonthAgo.Unix(), "", []*activity.LogFragment{{Clients: append(clientRecordsLocal[1:], clientRecordsGlobal[1:]...)}}) a.savePreviousEntitySegments(ctx, startOfMonth.Unix(), "", []*activity.LogFragment{{Clients: append(clientRecordsLocal[2:], clientRecordsGlobal[2:]...)}}) - // Write tokens to old path. We write twice to simulate multiple segments for each month - for i := 0; i < 2; i++ { - writeTokenSegmentOldPath(t, core, twoMonthsAgo, i, &activity.TokenCount{CountByNamespaceID: tokenCounts}) - writeTokenSegmentOldPath(t, core, oneMonthAgo, i, &activity.TokenCount{CountByNamespaceID: tokenCounts}) - writeTokenSegmentOldPath(t, core, startOfMonth, i, &activity.TokenCount{CountByNamespaceID: tokenCounts}) - } - - // Write secondary cluster data. This is to make sure that the data at these paths are garbage collected at the end of the migration routine - numSecondarySegments := 4 - secondaryIds := make([]string, 0) - for i := 0; i < numSecondarySegments; i++ { - writeSecondaryClusterSegment(t, core, twoMonthsAgo, i, fmt.Sprintf("cluster_%d", i), &activity.EntityActivityLog{Clients: clientRecordsGlobal[:ActivitySegmentClientCapacity]}) - writeSecondaryClusterSegment(t, core, oneMonthAgo, i, fmt.Sprintf("cluster_%d", i), &activity.EntityActivityLog{Clients: clientRecordsGlobal[1:ActivitySegmentClientCapacity]}) - writeSecondaryClusterSegment(t, core, startOfMonth, i, fmt.Sprintf("cluster_%d", i), &activity.EntityActivityLog{Clients: clientRecordsGlobal[2:ActivitySegmentClientCapacity]}) - secondaryIds = append(secondaryIds, fmt.Sprintf("cluster_%d", i)) - } - // Assert that the migration workers have not been run require.True(t, a.hasDedupClientsUpgrade(ctx)) + require.True(t, a.dedupClientsUpgradeComplete.Load()) // Resetting this to false so that we can // verify that after the migrations is completed, the correct values have been stored + a.dedupClientsUpgradeComplete.Store(false) require.NoError(t, a.view.Delete(ctx, activityDeduplicationUpgradeKey)) // Forcefully run the primary migration worker @@ -6055,7 +5891,6 @@ func TestActivityLog_PrimaryDuplicateClientMigrationWorker(t *testing.T) { require.NoError(t, err) globalClients = append(globalClients, segment.GetClients()...) } - // We've added duplicate clients from secondaries, so this should not affect the count of the global clients require.Equal(t, len(clientRecordsGlobal)-index, len(globalClients)) } @@ -6079,23 +5914,31 @@ func TestActivityLog_PrimaryDuplicateClientMigrationWorker(t *testing.T) { for _, time := range times { reader, err := a.NewSegmentFileReader(ctx, time) require.NoError(t, err) - numTokenSegments := 0 for { segment, err := reader.ReadToken(ctx) if errors.Is(err, io.EOF) { break } - numTokenSegments += 1 require.NoError(t, err) // Verify that the data is correct deep.Equal(segment.GetCountByNamespaceID(), tokenCounts) } - // All tokens should have been combined into one segment - require.Equal(t, 1, numTokenSegments) } // Check that the storage key has been updated require.True(t, a.hasDedupClientsUpgrade(ctx)) + // Check that the bool has been updated + require.True(t, a.dedupClientsUpgradeComplete.Load()) + + // Wait for the deletion of old logs to complete + timeout := time.After(25 * time.Second) + // Wait for channel indicating deletion to be written + select { + case <-timeout: + t.Fatal("timed out waiting for deletion to complete") + case <-a.oldStoragePathsCleaned: + break + } // Verify there is no data at the old paths times, err := a.availableTimesAtPath(ctx, time.Now(), activityEntityBasePath) @@ -6106,11 +5949,152 @@ func TestActivityLog_PrimaryDuplicateClientMigrationWorker(t *testing.T) { times, err = a.availableTimesAtPath(ctx, time.Now(), activityTokenBasePath) require.NoError(t, err) require.Equal(t, 0, len(times)) +} + +// TestActivityLog_SecondaryDuplicateClientMigrationWorker verifies that the secondary +// migration worker correctly moves local data from old location to the new location +func TestActivityLog_SecondaryDuplicateClientMigrationWorker(t *testing.T) { + cluster := NewTestCluster(t, nil, nil) + core := cluster.Cores[0].Core + a := core.activityLog + a.SetEnable(true) + + ctx := context.Background() + timeStamp := time.Now() + startOfMonth := timeutil.StartOfMonth(timeStamp) + oneMonthAgo := timeutil.StartOfPreviousMonth(timeStamp) + twoMonthsAgo := timeutil.StartOfPreviousMonth(oneMonthAgo) + + clientRecordsGlobal := make([]*activity.EntityRecord, ActivitySegmentClientCapacity*2+1) + for i := range clientRecordsGlobal { + clientRecordsGlobal[i] = &activity.EntityRecord{ + ClientID: fmt.Sprintf("111122222-3333-4444-5555-%012v", i), + Timestamp: timeStamp.Unix(), + NonEntity: false, + } + } + clientRecordsLocal := make([]*activity.EntityRecord, ActivitySegmentClientCapacity*2+1) + for i := range clientRecordsGlobal { + clientRecordsLocal[i] = &activity.EntityRecord{ + ClientID: fmt.Sprintf("011122222-3333-4444-5555-%012v", i), + Timestamp: timeStamp.Unix(), + // This is to trick the system into believing this a local client when parsing data + ClientType: nonEntityTokenActivityType, + } + } + + tokenCounts := map[string]uint64{ + "ns1": 10, + "ns2": 11, + "ns3": 12, + } + + // Write global and local clients to old path + a.savePreviousEntitySegments(ctx, twoMonthsAgo.Unix(), "", []*activity.LogFragment{{Clients: append(clientRecordsLocal, clientRecordsGlobal...)}}) + a.savePreviousEntitySegments(ctx, oneMonthAgo.Unix(), "", []*activity.LogFragment{{Clients: append(clientRecordsLocal[1:], clientRecordsGlobal[1:]...)}}) + a.savePreviousEntitySegments(ctx, startOfMonth.Unix(), "", []*activity.LogFragment{{Clients: append(clientRecordsLocal[2:], clientRecordsGlobal[2:]...)}}) - // Verify there is no data at the secondary cluster paths - for _, secondaryId := range secondaryIds { - times, err = a.availableTimesAtPath(ctx, time.Now(), activitySecondaryTempDataPathPrefix+secondaryId+activityEntityBasePath) + // Write tokens to old path + a.savePreviousTokenSegments(ctx, twoMonthsAgo.Unix(), "", []*activity.LogFragment{{NonEntityTokens: tokenCounts}}) + a.savePreviousTokenSegments(ctx, oneMonthAgo.Unix(), "", []*activity.LogFragment{{NonEntityTokens: tokenCounts}}) + a.savePreviousTokenSegments(ctx, startOfMonth.Unix(), "", []*activity.LogFragment{{NonEntityTokens: tokenCounts}}) + + // Assert that the migration workers have not been run + require.True(t, a.hasDedupClientsUpgrade(ctx)) + require.True(t, a.dedupClientsUpgradeComplete.Load()) + + // Resetting this to false so that we can + // verify that after the migrations is completed, the correct values have been stored + a.dedupClientsUpgradeComplete.Store(false) + require.NoError(t, a.view.Delete(ctx, activityDeduplicationUpgradeKey)) + + // Forcefully run the secondary migration worker + core.secondaryDuplicateClientMigrationWorker(ctx) + + // Wait for the storage migration to complete + ticker := time.NewTicker(100 * time.Millisecond) + timeout := time.After(25 * time.Second) + for { + select { + case <-timeout: + t.Fatal("timed out waiting for migration to complete") + case <-ticker.C: + } + if a.dedupClientsUpgradeComplete.Load() { + break + } + } + + // Verify that no global clients have been migrated + times := []time.Time{twoMonthsAgo, oneMonthAgo, startOfMonth} + for _, time := range times { + reader, err := a.NewSegmentFileReader(ctx, time) require.NoError(t, err) - require.Equal(t, 0, len(times)) + globalClients := make([]*activity.EntityRecord, 0) + for { + segment, err := reader.ReadGlobalEntity(ctx) + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) + globalClients = append(globalClients, segment.GetClients()...) + } + require.Equal(t, 0, len(globalClients)) } + + // Verify local clients have been correctly migrated + for index, time := range times { + reader, err := a.NewSegmentFileReader(ctx, time) + require.NoError(t, err) + localClients := make([]*activity.EntityRecord, 0) + for { + segment, err := reader.ReadLocalEntity(ctx) + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) + localClients = append(localClients, segment.GetClients()...) + } + require.Equal(t, len(clientRecordsLocal)-index, len(localClients)) + } + + // Verify non-entity tokens have been correctly migrated + for _, time := range times { + reader, err := a.NewSegmentFileReader(ctx, time) + require.NoError(t, err) + for { + segment, err := reader.ReadToken(ctx) + if errors.Is(err, io.EOF) { + break + } + require.NoError(t, err) + // Verify that the data is correct + deep.Equal(segment.GetCountByNamespaceID(), tokenCounts) + } + } + + // Check that the storage key has been updated + require.True(t, a.hasDedupClientsUpgrade(ctx)) + // Check that the bool has been updated + require.True(t, a.dedupClientsUpgradeComplete.Load()) + + // Wait for the deletion of old logs to complete + timeout = time.After(25 * time.Second) + // Wait for channel indicating deletion to be written + select { + case <-timeout: + t.Fatal("timed out waiting for deletion to complete") + case <-a.oldStoragePathsCleaned: + break + } + + // Verify there is no data at the old entity paths + times, err := a.availableTimesAtPath(ctx, time.Now(), activityEntityBasePath) + require.NoError(t, err) + require.Equal(t, 0, len(times)) + + // Verify there is no data at the old token paths + times, err = a.availableTimesAtPath(ctx, time.Now(), activityTokenBasePath) + require.NoError(t, err) + require.Equal(t, 0, len(times)) } diff --git a/vault/activity_log_testing_util.go b/vault/activity_log_testing_util.go index 42566bb9201e..d0fd4b7b35ae 100644 --- a/vault/activity_log_testing_util.go +++ b/vault/activity_log_testing_util.go @@ -7,18 +7,13 @@ import ( "context" "fmt" "math/rand" - "sort" - "strings" - "sync" "testing" - "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/vault/helper/constants" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault/activity" - "github.com/stretchr/testify/require" "google.golang.org/protobuf/testing/protocmp" ) @@ -192,134 +187,74 @@ func RandStringBytes(n int) string { return string(b) } -// ExpectOldSegmentRefreshed verifies that the old current segment structure has been refreshed -// non-nil empty components and updated with the `expectedStart` timestamp. This is expected when -// an upgrade has not yet completed. +// ExpectCurrentSegmentRefreshed verifies that the current segment has been refreshed +// non-nil empty components and updated with the `expectedStart` timestamp // Note: if `verifyTimeNotZero` is true, ignore `expectedStart` and just make sure the timestamp isn't 0 -func (a *ActivityLog) ExpectOldSegmentRefreshed(t *testing.T, expectedStart int64, verifyTimeNotZero bool, expectedEntities []*activity.EntityRecord, directTokens map[string]uint64) { +func (a *ActivityLog) ExpectCurrentSegmentRefreshed(t *testing.T, expectedStart int64, verifyTimeNotZero bool) { t.Helper() a.l.RLock() defer a.l.RUnlock() a.fragmentLock.RLock() defer a.fragmentLock.RUnlock() - require.NotNil(t, a.currentSegment.currentClients) - require.NotNil(t, a.currentSegment.currentClients.Clients) - require.NotNil(t, a.currentSegment.tokenCount) - require.NotNil(t, a.currentSegment.tokenCount.CountByNamespaceID) - if !EntityRecordsEqual(t, a.currentSegment.currentClients.Clients, expectedEntities) { - // we only expect the newest entity segment to be loaded (for the current month) - t.Errorf("bad activity entity logs loaded. expected: %v got: %v", a.currentSegment.currentClients.Clients, expectedEntities) + if a.currentGlobalSegment.currentClients == nil { + t.Fatalf("expected non-nil currentSegment.currentClients") } - require.Equal(t, directTokens, a.currentSegment.tokenCount.CountByNamespaceID) - if verifyTimeNotZero { - require.NotEqual(t, a.currentSegment.startTimestamp, 0) - } else { - require.Equal(t, a.currentSegment.startTimestamp, expectedStart) + if a.currentGlobalSegment.currentClients.Clients == nil { + t.Errorf("expected non-nil currentSegment.currentClients.Entities") } -} - -// ExpectCurrentSegmentsRefreshed verifies that the current segment has been refreshed -// non-nil empty components and updated with the `expectedStart` timestamp -// Note: if `verifyTimeNotZero` is true, ignore `expectedStart` and just make sure the timestamp isn't 0 -func (a *ActivityLog) ExpectCurrentSegmentsRefreshed(t *testing.T, expectedStart int64, verifyTimeNotZero bool) { - t.Helper() - - a.l.RLock() - defer a.l.RUnlock() - a.fragmentLock.RLock() - defer a.fragmentLock.RUnlock() - require.NotNil(t, a.currentGlobalSegment.currentClients) - require.NotNil(t, a.currentGlobalSegment.currentClients.Clients) - require.NotNil(t, a.currentGlobalSegment.tokenCount) - require.NotNil(t, a.currentGlobalSegment.tokenCount.CountByNamespaceID) - - require.NotNil(t, a.currentLocalSegment.currentClients) - require.NotNil(t, a.currentLocalSegment.currentClients.Clients) - require.NotNil(t, a.currentLocalSegment.tokenCount) - require.NotNil(t, a.currentLocalSegment.tokenCount.CountByNamespaceID) - - require.NotNil(t, a.partialMonthLocalClientTracker) - require.NotNil(t, a.globalPartialMonthClientTracker) - - require.Equal(t, 0, len(a.currentGlobalSegment.currentClients.Clients)) - require.Equal(t, 0, len(a.currentLocalSegment.currentClients.Clients)) - require.Equal(t, 0, len(a.currentLocalSegment.tokenCount.CountByNamespaceID)) - - require.Equal(t, 0, len(a.partialMonthLocalClientTracker)) - require.Equal(t, 0, len(a.globalPartialMonthClientTracker)) - - if verifyTimeNotZero { - require.NotEqual(t, 0, a.currentGlobalSegment.startTimestamp) - require.NotEqual(t, 0, a.currentLocalSegment.startTimestamp) - require.NotEqual(t, 0, a.currentSegment.startTimestamp) - } else { - require.Equal(t, expectedStart, a.currentGlobalSegment.startTimestamp) - require.Equal(t, expectedStart, a.currentLocalSegment.startTimestamp) + if a.currentGlobalSegment.tokenCount == nil { + t.Fatalf("expected non-nil currentSegment.tokenCount") } -} - -// EntityRecordsEqual compares the parts we care about from two activity entity record slices -// note: this makes a copy of the []*activity.EntityRecord so that misordered slices won't fail the comparison, -// but the function won't modify the order of the slices to compare -func EntityRecordsEqual(t *testing.T, record1, record2 []*activity.EntityRecord) bool { - t.Helper() - - if record1 == nil { - return record2 == nil + if a.currentGlobalSegment.tokenCount.CountByNamespaceID == nil { + t.Errorf("expected non-nil currentSegment.tokenCount.CountByNamespaceID") } - if record2 == nil { - return record1 == nil + if a.currentLocalSegment.currentClients == nil { + t.Fatalf("expected non-nil currentSegment.currentClients") } - - if len(record1) != len(record2) { - return false + if a.currentLocalSegment.currentClients.Clients == nil { + t.Errorf("expected non-nil currentSegment.currentClients.Entities") } - - // sort first on namespace, then on ID, then on timestamp - entityLessFn := func(e []*activity.EntityRecord, i, j int) bool { - ei := e[i] - ej := e[j] - - nsComp := strings.Compare(ei.NamespaceID, ej.NamespaceID) - if nsComp == -1 { - return true - } - if nsComp == 1 { - return false - } - - idComp := strings.Compare(ei.ClientID, ej.ClientID) - if idComp == -1 { - return true - } - if idComp == 1 { - return false - } - - return ei.Timestamp < ej.Timestamp + if a.currentLocalSegment.tokenCount == nil { + t.Fatalf("expected non-nil currentSegment.tokenCount") + } + if a.currentLocalSegment.tokenCount.CountByNamespaceID == nil { + t.Errorf("expected non-nil currentSegment.tokenCount.CountByNamespaceID") + } + if a.partialMonthLocalClientTracker == nil { + t.Errorf("expected non-nil partialMonthLocalClientTracker") + } + if a.globalPartialMonthClientTracker == nil { + t.Errorf("expected non-nil globalPartialMonthClientTracker") + } + if len(a.currentGlobalSegment.currentClients.Clients) > 0 { + t.Errorf("expected no current entity segment to be loaded. got: %v", a.currentGlobalSegment.currentClients) + } + if len(a.currentLocalSegment.currentClients.Clients) > 0 { + t.Errorf("expected no current entity segment to be loaded. got: %v", a.currentLocalSegment.currentClients) + } + if len(a.currentLocalSegment.tokenCount.CountByNamespaceID) > 0 { + t.Errorf("expected no token counts to be loaded. got: %v", a.currentLocalSegment.tokenCount.CountByNamespaceID) + } + if len(a.partialMonthLocalClientTracker) > 0 { + t.Errorf("expected no active entity segment to be loaded. got: %v", a.partialMonthLocalClientTracker) + } + if len(a.globalPartialMonthClientTracker) > 0 { + t.Errorf("expected no active entity segment to be loaded. got: %v", a.globalPartialMonthClientTracker) } - entitiesCopy1 := make([]*activity.EntityRecord, len(record1)) - entitiesCopy2 := make([]*activity.EntityRecord, len(record2)) - copy(entitiesCopy1, record1) - copy(entitiesCopy2, record2) - - sort.Slice(entitiesCopy1, func(i, j int) bool { - return entityLessFn(entitiesCopy1, i, j) - }) - sort.Slice(entitiesCopy2, func(i, j int) bool { - return entityLessFn(entitiesCopy2, i, j) - }) - - for i, a := range entitiesCopy1 { - b := entitiesCopy2[i] - if a.ClientID != b.ClientID || a.NamespaceID != b.NamespaceID || a.Timestamp != b.Timestamp { - return false + if verifyTimeNotZero { + if a.currentGlobalSegment.startTimestamp == 0 { + t.Error("bad start timestamp. expected no reset but timestamp was reset") + } + if a.currentLocalSegment.startTimestamp == 0 { + t.Error("bad start timestamp. expected no reset but timestamp was reset") } + } else if a.currentGlobalSegment.startTimestamp != expectedStart { + t.Errorf("bad start timestamp. expected: %v got: %v", expectedStart, a.currentGlobalSegment.startTimestamp) + } else if a.currentLocalSegment.startTimestamp != expectedStart { + t.Errorf("bad start timestamp. expected: %v got: %v", expectedStart, a.currentLocalSegment.startTimestamp) } - - return true } // ActiveEntitiesEqual checks that only the set of `test` exists in `active` @@ -349,7 +284,6 @@ func (a *ActivityLog) SetStartTimestamp(timestamp int64) { defer a.l.Unlock() a.currentGlobalSegment.startTimestamp = timestamp a.currentLocalSegment.startTimestamp = timestamp - a.currentSegment.startTimestamp = timestamp } // GetStoredTokenCountByNamespaceID returns the count of tokens by namespace ID @@ -436,38 +370,3 @@ func (c *Core) DeleteLogsAtPath(ctx context.Context, t *testing.T, storagePath s } } } - -// SaveEntitySegment is a test helper function to keep the savePreviousEntitySegments function internal -func (a *ActivityLog) SaveEntitySegment(ctx context.Context, startTime int64, pathPrefix string, fragments []*activity.LogFragment) error { - return a.savePreviousEntitySegments(ctx, startTime, pathPrefix, fragments) -} - -// LaunchMigrationWorker is a test only helper function that launches the migration workers. -// This allows us to keep the migration worker methods internal -func (a *ActivityLog) LaunchMigrationWorker(ctx context.Context, isSecondary bool) { - if isSecondary { - go a.core.secondaryDuplicateClientMigrationWorker(ctx) - } else { - go a.core.primaryDuplicateClientMigrationWorker(ctx) - } -} - -// DedupUpgradeComplete is a test helper function that indicates whether the -// all correct states have been set after completing upgrade processes to 1.19+ -func (a *ActivityLog) DedupUpgradeComplete(ctx context.Context) bool { - return a.hasDedupClientsUpgrade(ctx) -} - -// ResetDedupUpgrade is a test helper function that resets the state to reflect -// how the system should look before running/completing any upgrade process to 1.19+ -func (a *ActivityLog) ResetDedupUpgrade(ctx context.Context) { - a.view.Delete(ctx, activityDeduplicationUpgradeKey) - a.view.Delete(ctx, activitySecondaryDataRecCount) -} - -// RefreshActivityLog is a test helper functions that refreshes the activity logs -// segments and current month data. This allows us to keep the refreshFromStoredLog -// function internal -func (a *ActivityLog) RefreshActivityLog(ctx context.Context) { - a.refreshFromStoredLog(ctx, &sync.WaitGroup{}, time.Now().UTC()) -} diff --git a/vault/activity_log_util.go b/vault/activity_log_util.go index 8c2585717c92..890af5533fad 100644 --- a/vault/activity_log_util.go +++ b/vault/activity_log_util.go @@ -7,21 +7,9 @@ package vault import ( "context" - - "github.com/hashicorp/vault/vault/activity" ) // sendCurrentFragment is a no-op on OSS func (a *ActivityLog) sendCurrentFragment(ctx context.Context) error { return nil } - -// receiveSecondaryPreviousMonthGlobalData is a no-op on OSS -func (a *ActivityLog) receiveSecondaryPreviousMonthGlobalData(ctx context.Context, month int64, clients *activity.LogFragment) error { - return nil -} - -// sendPreviousMonthGlobalClientsWorker is a no-op on OSS -func (a *ActivityLog) sendPreviousMonthGlobalClientsWorker(ctx context.Context) (map[int64][]*activity.EntityRecord, error) { - return map[int64][]*activity.EntityRecord{}, nil -} diff --git a/vault/activity_log_util_common.go b/vault/activity_log_util_common.go index 8b1bea9bcbb0..86c824adebab 100644 --- a/vault/activity_log_util_common.go +++ b/vault/activity_log_util_common.go @@ -10,7 +10,6 @@ import ( "io" "slices" "sort" - "strconv" "strings" "time" @@ -566,25 +565,32 @@ func (a *ActivityLog) extractLocalGlobalClientsDeprecatedStoragePath(ctx context return clusterLocalClients, clusterGlobalClients, fmt.Errorf("could not list available logs on the cluster") } for _, time := range times { - segments, err := a.getAllEntitySegmentsForMonth(ctx, activityEntityBasePath, time.Unix()) + entityPath := activityEntityBasePath + fmt.Sprint(time.Unix()) + "/" + segmentPaths, err := a.view.List(ctx, entityPath) if err != nil { return nil, nil, err } - for _, segment := range segments { + for _, seqNumber := range segmentPaths { + segment, err := a.readEntitySegmentAtPath(ctx, entityPath+seqNumber) + if segment == nil { + continue + } + if err != nil { + a.logger.Warn("failed to read segment", "error", err) + return clusterLocalClients, clusterGlobalClients, err + } for _, entity := range segment.GetClients() { // If the client is not local, then add it to a map - // Normalize month value to the beginning of the month to avoid multiple storage entries for the same month - startOfMonth := timeutil.StartOfMonth(time.UTC()) if local, _ := a.isClientLocal(entity); !local { - if _, ok := clusterGlobalClients[startOfMonth.Unix()]; !ok { - clusterGlobalClients[startOfMonth.Unix()] = make([]*activity.EntityRecord, 0) + if _, ok := clusterGlobalClients[time.Unix()]; !ok { + clusterGlobalClients[time.Unix()] = make([]*activity.EntityRecord, 0) } - clusterGlobalClients[startOfMonth.Unix()] = append(clusterGlobalClients[startOfMonth.Unix()], entity) + clusterGlobalClients[time.Unix()] = append(clusterGlobalClients[time.Unix()], entity) } else { - if _, ok := clusterLocalClients[startOfMonth.Unix()]; !ok { - clusterLocalClients[startOfMonth.Unix()] = make([]*activity.EntityRecord, 0) + if _, ok := clusterLocalClients[time.Unix()]; !ok { + clusterLocalClients[time.Unix()] = make([]*activity.EntityRecord, 0) } - clusterLocalClients[startOfMonth.Unix()] = append(clusterLocalClients[startOfMonth.Unix()], entity) + clusterLocalClients[time.Unix()] = append(clusterLocalClients[time.Unix()], entity) } } } @@ -621,25 +627,6 @@ func (a *ActivityLog) extractTokensDeprecatedStoragePath(ctx context.Context) (m return tokensByMonth, nil } -func (a *ActivityLog) getAllEntitySegmentsForMonth(ctx context.Context, path string, time int64) ([]*activity.EntityActivityLog, error) { - entityPathWithTime := fmt.Sprintf("%s%d/", path, time) - segments := make([]*activity.EntityActivityLog, 0) - segmentPaths, err := a.view.List(ctx, entityPathWithTime) - if err != nil { - return segments, err - } - for _, seqNum := range segmentPaths { - segment, err := a.readEntitySegmentAtPath(ctx, entityPathWithTime+seqNum) - if err != nil { - return segments, err - } - if segment != nil { - segments = append(segments, segment) - } - } - return segments, nil -} - // OldestVersionHasDeduplicatedClients returns whether this cluster is 1.19+, and // hence supports deduplicated clients func (a *ActivityLog) OldestVersionHasDeduplicatedClients(ctx context.Context) bool { @@ -661,25 +648,3 @@ func (a *ActivityLog) OldestVersionHasDeduplicatedClients(ctx context.Context) b } return oldestVersionIsDedupClients } - -func (a *ActivityLog) loadClientDataIntoSegment(ctx context.Context, pathPrefix string, startTime time.Time, seqNum uint64, currentSegment *segmentInfo) ([]*activity.EntityRecord, error) { - path := pathPrefix + activityEntityBasePath + fmt.Sprint(startTime.Unix()) + "/" + strconv.FormatUint(seqNum, 10) - out, err := a.readEntitySegmentAtPath(ctx, path) - if err != nil && !errors.Is(err, ErrEmptyResponse) { - return nil, err - } - if out != nil { - if !a.core.perfStandby { - a.logger.Debug(fmt.Sprintf("loading client data from %s into segment", path)) - currentSegment.startTimestamp = startTime.Unix() - currentSegment.currentClients = &activity.EntityActivityLog{Clients: out.Clients} - currentSegment.clientSequenceNumber = seqNum - - } else { - // populate this for edge case checking (if end of month passes while background loading on standby) - currentSegment.startTimestamp = startTime.Unix() - } - return out.GetClients(), nil - } - return []*activity.EntityRecord{}, nil -} diff --git a/vault/activity_log_util_common_test.go b/vault/activity_log_util_common_test.go index 482589972447..2d0a0c4ceee2 100644 --- a/vault/activity_log_util_common_test.go +++ b/vault/activity_log_util_common_test.go @@ -991,22 +991,6 @@ func Test_ActivityLog_ComputeCurrentMonth_NamespaceMounts(t *testing.T) { } } -// writeOldEntityPathSegment writes a single segment to the old storage path with the given time and index for an entity -func writeOldEntityPathSegment(t *testing.T, core *Core, ts time.Time, index int, item *activity.EntityActivityLog) { - t.Helper() - protoItem, err := proto.Marshal(item) - require.NoError(t, err) - WriteToStorage(t, core, makeSegmentPath(t, activityEntityBasePath, ts, index), protoItem) -} - -// writeSecondaryClusterSegment writes a single secondary global segment file with the given time and index for an entity -func writeSecondaryClusterSegment(t *testing.T, core *Core, ts time.Time, index int, clusterId string, item *activity.EntityActivityLog) { - t.Helper() - protoItem, err := proto.Marshal(item) - require.NoError(t, err) - WriteToStorage(t, core, makeSegmentPath(t, fmt.Sprintf("%s%s/%s", activitySecondaryTempDataPathPrefix, clusterId, activityEntityBasePath), ts, index), protoItem) -} - // writeGlobalEntitySegment writes a single global segment file with the given time and index for an entity func writeGlobalEntitySegment(t *testing.T, core *Core, ts time.Time, index int, item *activity.EntityActivityLog) { t.Helper() From 88f0710e2649bd82b5aba9ffce9d4e2c264ce1e1 Mon Sep 17 00:00:00 2001 From: divyaac Date: Fri, 20 Dec 2024 16:06:41 -0800 Subject: [PATCH 12/15] =?UTF-8?q?Revert=20"Migrate=20Clients=20From=20Old?= =?UTF-8?q?=20Storage=20Paths=20to=20New=20Paths=20During=20Upgrade=20#7?= =?UTF-8?q?=E2=80=A6"=20(#29253)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 9ba62bec6f1005782c508a6dc45e6cc79c06beb6. --- vault/activity_log.go | 574 ++++++------------------- vault/activity_log_stubs_oss.go | 13 +- vault/activity_log_test.go | 277 ------------ vault/activity_log_util_common.go | 96 ----- vault/activity_log_util_common_test.go | 56 --- 5 files changed, 121 insertions(+), 895 deletions(-) diff --git a/vault/activity_log.go b/vault/activity_log.go index 1a9b23f4038d..757165f3e1f1 100644 --- a/vault/activity_log.go +++ b/vault/activity_log.go @@ -46,9 +46,7 @@ const ( activityGlobalPathPrefix = "global/" activityLocalPathPrefix = "local/" - activityACMERegenerationKey = "acme-regeneration" - activityDeduplicationUpgradeKey = "deduplication-upgrade" - + activityACMERegenerationKey = "acme-regeneration" // sketch for each month that stores hash of client ids distinctClientsBasePath = "log/distinctclients/" @@ -116,8 +114,6 @@ const ( // CSV encoder. Indexes will be generated to ensure that values are slotted into the // correct column. This initial value is used prior to finalizing the CSV header. exportCSVFlatteningInitIndex = -1 - - DeduplicatedClientMinimumVersion = "1.19.0" ) var ( @@ -200,11 +196,6 @@ type ActivityLog struct { // Channel to stop background processing doneCh chan struct{} - // Channel to signal global clients have received by the primary from the secondary, during upgrade to 1.19 - dedupUpgradeGlobalClientsReceivedCh chan struct{} - // track whether the current cluster is in the middle of an upgrade to 1.19 - dedupClientsUpgradeComplete *atomic.Bool - // track metadata and contents of the most recent log segment currentSegment segmentInfo @@ -233,18 +224,8 @@ type ActivityLog struct { // channel closed when deletion at startup is done // (for unit test robustness) - retentionDone chan struct{} - // This channel is relevant for upgrades to 1.17. It indicates whether precomputed queries have been - // generated for ACME clients. + retentionDone chan struct{} computationWorkerDone chan struct{} - // This channel is relevant for upgrades to 1.19+ (version with deduplication of clients) - // This indicates that paths that were used before 1.19 to store clients have been cleaned - oldStoragePathsCleaned chan struct{} - - // channel to indicate that a global clients have been - // sent to the primary from a secondary - globalClientsSent chan struct{} - clientsReceivedForMigration map[int64][]*activity.LogFragment // for testing: is config currently being invalidated. protected by l configInvalidationInProgress bool @@ -369,21 +350,19 @@ func NewActivityLog(core *Core, logger log.Logger, view *BarrierView, metrics me clock = timeutil.DefaultClock{} } a := &ActivityLog{ - core: core, - configOverrides: &core.activityLogConfig, - logger: logger, - view: view, - metrics: metrics, - nodeID: hostname, - newFragmentCh: make(chan struct{}, 1), - sendCh: make(chan struct{}, 1), // buffered so it can be triggered by fragment size - doneCh: make(chan struct{}, 1), - partialMonthLocalClientTracker: make(map[string]*activity.EntityRecord), - newGlobalClientFragmentCh: make(chan struct{}, 1), - dedupUpgradeGlobalClientsReceivedCh: make(chan struct{}, 1), - clientsReceivedForMigration: make(map[int64][]*activity.LogFragment), - globalPartialMonthClientTracker: make(map[string]*activity.EntityRecord), - clock: clock, + core: core, + configOverrides: &core.activityLogConfig, + logger: logger, + view: view, + metrics: metrics, + nodeID: hostname, + newFragmentCh: make(chan struct{}, 1), + sendCh: make(chan struct{}, 1), // buffered so it can be triggered by fragment size + doneCh: make(chan struct{}, 1), + partialMonthLocalClientTracker: make(map[string]*activity.EntityRecord), + newGlobalClientFragmentCh: make(chan struct{}, 1), + globalPartialMonthClientTracker: make(map[string]*activity.EntityRecord), + clock: clock, currentSegment: segmentInfo{ startTimestamp: 0, currentClients: &activity.EntityActivityLog{ @@ -428,7 +407,6 @@ func NewActivityLog(core *Core, logger log.Logger, view *BarrierView, metrics me secondaryGlobalClientFragments: make([]*activity.LogFragment, 0), inprocessExport: atomic.NewBool(false), precomputedQueryWritten: make(chan struct{}), - dedupClientsUpgradeComplete: atomic.NewBool(false), } config, err := a.loadConfigOrDefault(core.activeContext) @@ -481,8 +459,6 @@ func (a *ActivityLog) saveCurrentSegmentToStorageLocked(ctx context.Context, for a.currentGlobalFragment = nil a.globalFragmentLock.Unlock() - globalFragments := append(append(secondaryGlobalClients, globalClients), standbyGlobalClients...) - if !a.core.IsPerfSecondary() { if a.currentGlobalFragment != nil { a.metrics.IncrCounterWithLabels([]string{"core", "activity", "global_fragment_size"}, @@ -491,24 +467,19 @@ func (a *ActivityLog) saveCurrentSegmentToStorageLocked(ctx context.Context, for {"type", "client"}, }) } + var globalReceivedFragmentTotal int + for _, globalReceivedFragment := range secondaryGlobalClients { + globalReceivedFragmentTotal += len(globalReceivedFragment.Clients) + } + for _, globalReceivedFragment := range standbyGlobalClients { + globalReceivedFragmentTotal += len(globalReceivedFragment.Clients) + } a.metrics.IncrCounterWithLabels([]string{"core", "activity", "global_received_fragment_size"}, - float32(len(globalFragments)), + float32(globalReceivedFragmentTotal), []metricsutil.Label{ {"type", "client"}, }) - // Since we are the primary, store global clients - // Create fragments from global clients and store the segment - if ret := a.createCurrentSegmentFromFragments(ctx, globalFragments, &a.currentGlobalSegment, force, activityGlobalPathPrefix); ret != nil { - return ret - } - - } else if !a.dedupClientsUpgradeComplete.Load() { - // We are the secondary, and an upgrade is in progress. In this case we will temporarily store the data at this old path - // This data will be garbage collected after the upgrade has completed - if ret := a.createCurrentSegmentFromFragments(ctx, globalFragments, &a.currentSegment, force, ""); ret != nil { - return ret - } } // If segment start time is zero, do not update or write @@ -518,6 +489,15 @@ func (a *ActivityLog) saveCurrentSegmentToStorageLocked(ctx context.Context, for return nil } + // If we are the primary, store global clients + // Create fragments from global clients and store the segment + if !a.core.IsPerfSecondary() { + globalFragments := append(append(secondaryGlobalClients, globalClients), standbyGlobalClients...) + if ret := a.createCurrentSegmentFromFragments(ctx, globalFragments, &a.currentGlobalSegment, force, activityGlobalPathPrefix); ret != nil { + return ret + } + } + // Swap out the pending local fragments a.localFragmentLock.Lock() localFragment := a.localFragment @@ -635,74 +615,6 @@ func (a *ActivityLog) createCurrentSegmentFromFragments(ctx context.Context, fra return nil } -func (a *ActivityLog) savePreviousTokenSegments(ctx context.Context, startTime int64, pathPrefix string, fragments []*activity.LogFragment) error { - tokenByNamespace := make(map[string]uint64) - for _, fragment := range fragments { - // As of 1.9, a fragment should no longer have any NonEntityTokens. However - // in order to not lose any information about the current segment during the - // month when the client upgrades to 1.9, we must retain this functionality. - for ns, val := range fragment.NonEntityTokens { - // We track these pre-1.9 values in the old location, which is - // a.currentSegment.tokenCount, as opposed to the counter that stores tokens - // without entities that have client IDs, namely - // a.partialMonthClientTracker.nonEntityCountByNamespaceID. This preserves backward - // compatibility for the precomputedQueryWorkers and the segment storing - // logic. - tokenByNamespace[ns] += val - } - } - segmentToStore := segmentInfo{ - startTimestamp: startTime, - clientSequenceNumber: 0, - currentClients: &activity.EntityActivityLog{ - Clients: make([]*activity.EntityRecord, 0), - }, - tokenCount: &activity.TokenCount{CountByNamespaceID: tokenByNamespace}, - } - - if _, err := a.saveSegmentEntitiesInternal(ctx, segmentToStore, false, pathPrefix); err != nil { - return err - } - return nil -} - -func (a *ActivityLog) savePreviousEntitySegments(ctx context.Context, startTime int64, pathPrefix string, allFragments []*activity.LogFragment) error { - deduplicatedClients := make(map[string]*activity.EntityRecord) - for _, f := range allFragments { - for _, entity := range f.GetClients() { - deduplicatedClients[entity.ClientID] = entity - } - } - - segmentToStore := segmentInfo{ - startTimestamp: startTime, - clientSequenceNumber: 0, - currentClients: &activity.EntityActivityLog{ - Clients: make([]*activity.EntityRecord, 0), - }, - } - incrementSegmentNum := func() { - segmentToStore.clientSequenceNumber = segmentToStore.clientSequenceNumber + 1 - segmentToStore.currentClients.Clients = make([]*activity.EntityRecord, 0) - } - numAddedClients := 0 - for _, entity := range deduplicatedClients { - segmentToStore.currentClients.Clients = append(segmentToStore.currentClients.Clients, entity) - numAddedClients++ - if numAddedClients%ActivitySegmentClientCapacity == 0 { - if _, err := a.saveSegmentEntitiesInternal(ctx, segmentToStore, false, pathPrefix); err != nil { - return err - } - incrementSegmentNum() - } - } - // Store any remaining clients if they exist - if _, err := a.saveSegmentEntitiesInternal(ctx, segmentToStore, false, pathPrefix); err != nil { - return err - } - return nil -} - // :force: forces a save of tokens/entities even if the in-memory log is empty func (a *ActivityLog) saveCurrentSegmentInternal(ctx context.Context, force bool, currentSegment segmentInfo, storagePathPrefix string) error { _, err := a.saveSegmentEntitiesInternal(ctx, currentSegment, force, storagePathPrefix) @@ -799,30 +711,28 @@ func parseSegmentNumberFromPath(path string) (int, bool) { // availableLogs returns the start_time(s) (in UTC) associated with months for which logs exist, // sorted last to first func (a *ActivityLog) availableLogs(ctx context.Context, upTo time.Time) ([]time.Time, error) { - pathSet := make(map[time.Time]struct{}) - out := make([]time.Time, 0) - availableTimes := make([]time.Time, 0) - - times, err := a.availableTimesAtPath(ctx, upTo, activityTokenLocalBasePath) - if err != nil { - return nil, err - } - availableTimes = append(availableTimes, times...) + paths := make([]string, 0) + for _, basePath := range []string{activityLocalPathPrefix + activityEntityBasePath, activityGlobalPathPrefix + activityEntityBasePath, activityTokenLocalBasePath} { + p, err := a.view.List(ctx, basePath) + if err != nil { + return nil, err + } - times, err = a.availableTimesAtPath(ctx, upTo, activityGlobalPathPrefix+activityEntityBasePath) - if err != nil { - return nil, err + paths = append(paths, p...) } - availableTimes = append(availableTimes, times...) - times, err = a.availableTimesAtPath(ctx, upTo, activityLocalPathPrefix+activityEntityBasePath) - if err != nil { - return nil, err - } - availableTimes = append(availableTimes, times...) + pathSet := make(map[time.Time]struct{}) + out := make([]time.Time, 0) + for _, path := range paths { + // generate a set of unique start times + segmentTime, err := timeutil.ParseTimeFromPath(path) + if err != nil { + return nil, err + } + if segmentTime.After(upTo) { + continue + } - // Remove duplicate start times - for _, segmentTime := range availableTimes { if _, present := pathSet[segmentTime]; !present { pathSet[segmentTime] = struct{}{} out = append(out, segmentTime) @@ -839,27 +749,6 @@ func (a *ActivityLog) availableLogs(ctx context.Context, upTo time.Time) ([]time return out, nil } -// availableTimesAtPath returns a sorted list of all available times at the pathPrefix up until the provided time. -func (a *ActivityLog) availableTimesAtPath(ctx context.Context, onlyIncludeTimesUpTo time.Time, path string) ([]time.Time, error) { - paths, err := a.view.List(ctx, path) - if err != nil { - return nil, err - } - out := make([]time.Time, 0) - for _, path := range paths { - // generate a set of unique start times - segmentTime, err := timeutil.ParseTimeFromPath(path) - if err != nil { - return nil, err - } - if segmentTime.After(onlyIncludeTimesUpTo) { - continue - } - out = append(out, segmentTime) - } - return out, nil -} - // getMostRecentActivityLogSegment gets the times (in UTC) associated with the most recent // contiguous set of activity logs, sorted in decreasing order (latest to earliest) func (a *ActivityLog) getMostRecentActivityLogSegment(ctx context.Context, now time.Time) ([]time.Time, error) { @@ -988,42 +877,54 @@ func (a *ActivityLog) loadPriorEntitySegment(ctx context.Context, startTime time // load all the active global clients if !isLocal { globalPath := activityGlobalPathPrefix + activityEntityBasePath + fmt.Sprint(startTime.Unix()) + "/" + strconv.FormatUint(sequenceNum, 10) - out, err := a.readEntitySegmentAtPath(ctx, globalPath) - if err != nil && !errors.Is(err, ErrEmptyResponse) { + data, err := a.view.Get(ctx, globalPath) + if err != nil { return err } - if out != nil { - a.globalFragmentLock.Lock() - // Handle the (unlikely) case where the end of the month has been reached while background loading. - // Or the feature has been disabled. - if a.enabled && startTime.Unix() == a.currentGlobalSegment.startTimestamp { - for _, ent := range out.Clients { - a.globalPartialMonthClientTracker[ent.ClientID] = ent - } - } - a.globalFragmentLock.Unlock() + if data == nil { + return nil } - - } else { - // load all the active local clients - localPath := activityLocalPathPrefix + activityEntityBasePath + fmt.Sprint(startTime.Unix()) + "/" + strconv.FormatUint(sequenceNum, 10) - out, err := a.readEntitySegmentAtPath(ctx, localPath) - if err != nil && !errors.Is(err, ErrEmptyResponse) { + out := &activity.EntityActivityLog{} + err = proto.Unmarshal(data.Value, out) + if err != nil { return err } - if out != nil { - a.localFragmentLock.Lock() - // Handle the (unlikely) case where the end of the month has been reached while background loading. - // Or the feature has been disabled. - if a.enabled && startTime.Unix() == a.currentLocalSegment.startTimestamp { - for _, ent := range out.Clients { - a.partialMonthLocalClientTracker[ent.ClientID] = ent - } + a.globalFragmentLock.Lock() + // Handle the (unlikely) case where the end of the month has been reached while background loading. + // Or the feature has been disabled. + if a.enabled && startTime.Unix() == a.currentGlobalSegment.startTimestamp { + for _, ent := range out.Clients { + a.globalPartialMonthClientTracker[ent.ClientID] = ent } - a.localFragmentLock.Unlock() } + a.globalFragmentLock.Unlock() + return nil + } + // load all the active local clients + localPath := activityLocalPathPrefix + activityEntityBasePath + fmt.Sprint(startTime.Unix()) + "/" + strconv.FormatUint(sequenceNum, 10) + data, err := a.view.Get(ctx, localPath) + if err != nil { + return err } + if data == nil { + return nil + } + out := &activity.EntityActivityLog{} + err = proto.Unmarshal(data.Value, out) + if err != nil { + return err + } + a.localFragmentLock.Lock() + // Handle the (unlikely) case where the end of the month has been reached while background loading. + // Or the feature has been disabled. + if a.enabled && startTime.Unix() == a.currentLocalSegment.startTimestamp { + for _, ent := range out.Clients { + a.partialMonthLocalClientTracker[ent.ClientID] = ent + } + } + a.localFragmentLock.Unlock() + return nil } @@ -1031,17 +932,23 @@ func (a *ActivityLog) loadPriorEntitySegment(ctx context.Context, startTime time // into memory (to append new entries), and to the globalPartialMonthClientTracker and partialMonthLocalClientTracker to // avoid duplication call with fragmentLock, globalFragmentLock, localFragmentLock and l held. func (a *ActivityLog) loadCurrentClientSegment(ctx context.Context, startTime time.Time, localSegmentSequenceNumber uint64, globalSegmentSequenceNumber uint64) error { - // setting a.currentSegment timestamp to support upgrades - a.currentSegment.startTimestamp = startTime.Unix() - // load current global segment path := activityGlobalPathPrefix + activityEntityBasePath + fmt.Sprint(startTime.Unix()) + "/" + strconv.FormatUint(globalSegmentSequenceNumber, 10) - out, err := a.readEntitySegmentAtPath(ctx, path) - if err != nil && !errors.Is(err, ErrEmptyResponse) { + // setting a.currentSegment timestamp to support upgrades + a.currentSegment.startTimestamp = startTime.Unix() + + data, err := a.view.Get(ctx, path) + if err != nil { return err } - if out != nil { + if data != nil { + out := &activity.EntityActivityLog{} + err = proto.Unmarshal(data.Value, out) + if err != nil { + return err + } + if !a.core.perfStandby { a.currentGlobalSegment = segmentInfo{ startTimestamp: startTime.Unix(), @@ -1064,11 +971,17 @@ func (a *ActivityLog) loadCurrentClientSegment(ctx context.Context, startTime ti // load current local segment path = activityLocalPathPrefix + activityEntityBasePath + fmt.Sprint(startTime.Unix()) + "/" + strconv.FormatUint(localSegmentSequenceNumber, 10) - out, err = a.readEntitySegmentAtPath(ctx, path) - if err != nil && !errors.Is(err, ErrEmptyResponse) { + data, err = a.view.Get(ctx, path) + if err != nil { return err } - if out != nil { + if data != nil { + out := &activity.EntityActivityLog{} + err = proto.Unmarshal(data.Value, out) + if err != nil { + return err + } + if !a.core.perfStandby { a.currentLocalSegment = segmentInfo{ startTimestamp: startTime.Unix(), @@ -1085,41 +998,10 @@ func (a *ActivityLog) loadCurrentClientSegment(ctx context.Context, startTime ti for _, client := range out.Clients { a.partialMonthLocalClientTracker[client.ClientID] = client } - } - - return nil -} -func (a *ActivityLog) readEntitySegmentAtPath(ctx context.Context, path string) (*activity.EntityActivityLog, error) { - data, err := a.view.Get(ctx, path) - if err != nil { - return nil, err - } - if data == nil { - return nil, ErrEmptyResponse } - out := &activity.EntityActivityLog{} - err = proto.Unmarshal(data.Value, out) - if err != nil { - return nil, err - } - return out, nil -} -func (a *ActivityLog) readTokenSegmentAtPath(ctx context.Context, path string) (*activity.TokenCount, error) { - data, err := a.view.Get(ctx, path) - if err != nil { - return nil, err - } - if data == nil { - return nil, ErrEmptyResponse - } - out := &activity.TokenCount{} - err = proto.Unmarshal(data.Value, out) - if err != nil { - return nil, err - } - return out, nil + return nil } // tokenCountExists checks if there's a token log for :startTime: @@ -1288,26 +1170,6 @@ func (a *ActivityLog) deleteLogWorker(ctx context.Context, startTimestamp int64, close(whenDone) } -func (a *ActivityLog) deleteOldStoragePathWorker(ctx context.Context, pathPrefix string) { - pathTimes, err := a.view.List(ctx, pathPrefix) - if err != nil { - a.logger.Error("could not list segment paths", "error", err) - return - } - for _, pathTime := range pathTimes { - segments, err := a.view.List(ctx, pathPrefix+pathTime) - if err != nil { - a.logger.Error("could not list segment path", "error", err) - } - for _, seqNum := range segments { - err = a.view.Delete(ctx, pathPrefix+pathTime+seqNum) - if err != nil { - a.logger.Error("could not delete log", "error", err) - } - } - } -} - func (a *ActivityLog) WaitForDeletion() { a.l.Lock() // May be nil, if never set @@ -1646,76 +1508,9 @@ func (c *Core) setupActivityLogLocked(ctx context.Context, wg *sync.WaitGroup, r manager.retentionWorker(ctx, manager.clock.Now(), months) close(manager.retentionDone) }(manager.retentionMonths) - - // We do not want to hold up unseal, and we need access to - // the replicationRpcClient in order for the secondary to migrate data. - // This client is only reliable preset after unseal. - c.postUnsealFuncs = append(c.postUnsealFuncs, func() { - c.activityLogMigrationTask(ctx) - }) - - } - return nil -} - -// secondaryDuplicateClientMigrationWorker will attempt to send global data living on the -// current cluster to the primary cluster. This routine will only exit when its connected primary -// has reached version 1.19+, and this cluster has completed sending any global data that lives at the old storage paths -func (c *Core) secondaryDuplicateClientMigrationWorker(ctx context.Context) { - manager := c.activityLog - manager.logger.Trace("started secondary activity log migration worker") - storageMigrationComplete := atomic.NewBool(false) - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - if !c.IsPerfSecondary() { - // TODO: Create function for the secondary to continuously attempt to send data to the primary - } - - wg.Done() - }() - wg.Add(1) - go func() { - localClients, _, err := manager.extractLocalGlobalClientsDeprecatedStoragePath(ctx) - if err != nil { - return - } - // Store local clients at new path - for month, entitiesForMonth := range localClients { - logFragments := []*activity.LogFragment{{ - Clients: entitiesForMonth, - }} - if err = manager.savePreviousEntitySegments(ctx, month, activityLocalPathPrefix, logFragments); err != nil { - manager.logger.Error("failed to write local segment", "error", err, "month", month) - return - } - } - storageMigrationComplete.Store(true) - // TODO: generate/store PCQs for these local clients - wg.Done() - }() - wg.Wait() - if !storageMigrationComplete.Load() { - manager.logger.Error("could not complete migration of duplicate clients on cluster") - return } - // We have completed the vital portions of the storage migration - if err := manager.writeDedupClientsUpgrade(ctx); err != nil { - manager.logger.Error("could not complete migration of duplicate clients on cluster") - return - } - - // Now that all the clients have been migrated and PCQs have been created, remove all clients at old storage paths - manager.oldStoragePathsCleaned = make(chan struct{}) - go func() { - defer close(manager.oldStoragePathsCleaned) - manager.deleteOldStoragePathWorker(ctx, activityEntityBasePath) - manager.deleteOldStoragePathWorker(ctx, activityTokenBasePath) - // TODO: Delete old PCQs - }() - manager.dedupClientsUpgradeComplete.Store(true) - manager.logger.Trace("completed secondary activity log migration worker") + return nil } func (a *ActivityLog) hasRegeneratedACME(ctx context.Context) bool { @@ -1727,15 +1522,6 @@ func (a *ActivityLog) hasRegeneratedACME(ctx context.Context) bool { return regenerated != nil } -func (a *ActivityLog) hasDedupClientsUpgrade(ctx context.Context) bool { - regenerated, err := a.view.Get(ctx, activityDeduplicationUpgradeKey) - if err != nil { - a.logger.Error("unable to access deduplication regeneration key") - return false - } - return regenerated != nil -} - func (a *ActivityLog) writeRegeneratedACME(ctx context.Context) error { regeneratedEntry, err := logical.StorageEntryJSON(activityACMERegenerationKey, true) if err != nil { @@ -1744,14 +1530,6 @@ func (a *ActivityLog) writeRegeneratedACME(ctx context.Context) error { return a.view.Put(ctx, regeneratedEntry) } -func (a *ActivityLog) writeDedupClientsUpgrade(ctx context.Context) error { - regeneratedEntry, err := logical.StorageEntryJSON(activityDeduplicationUpgradeKey, true) - if err != nil { - return err - } - return a.view.Put(ctx, regeneratedEntry) -} - func (a *ActivityLog) regeneratePrecomputedQueries(ctx context.Context) error { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -1918,12 +1696,7 @@ func (a *ActivityLog) secondaryFragmentWorker(ctx context.Context) { <-timer.C } } - // Only send data if no upgrade is in progress. Else, the active worker will - // store the data in a temporary location until it is garbage collected - if a.dedupClientsUpgradeComplete.Load() { - sendFunc() - } - + sendFunc() case <-endOfMonth.C: a.logger.Trace("sending global fragment on end of month") // Flush the current fragment, if any @@ -1933,16 +1706,13 @@ func (a *ActivityLog) secondaryFragmentWorker(ctx context.Context) { <-timer.C } } - // If an upgrade is in progress, don't do anything - // The active fragmentWorker will take care of flushing the clients to a temporary location - if a.dedupClientsUpgradeComplete.Load() { - sendFunc() - // clear active entity set - a.globalFragmentLock.Lock() - a.globalPartialMonthClientTracker = make(map[string]*activity.EntityRecord) - - a.globalFragmentLock.Unlock() - } + sendFunc() + + // clear active entity set + a.globalFragmentLock.Lock() + a.globalPartialMonthClientTracker = make(map[string]*activity.EntityRecord) + + a.globalFragmentLock.Unlock() // Set timer for next month. // The current segment *probably* hasn't been set yet (via invalidation), @@ -4028,110 +3798,6 @@ func (a *ActivityLog) writeExport(ctx context.Context, rw http.ResponseWriter, f return nil } -func (c *Core) activityLogMigrationTask(ctx context.Context) { - manager := c.activityLog - if !c.IsPerfSecondary() { - // If the oldest version is less than 1.19 and no migrations tasks have been run, kick off the migration task - if !manager.OldestVersionHasDeduplicatedClients(ctx) && !manager.hasDedupClientsUpgrade(ctx) { - go c.primaryDuplicateClientMigrationWorker(ctx) - } else { - // Store that upgrade processes have already been completed - manager.writeDedupClientsUpgrade(ctx) - manager.dedupClientsUpgradeComplete.Store(true) - } - } else { - // We kick off the secondary migration worker in any chance that the primary has not yet upgraded. - // If we have already completed the migration task, it indicates that the cluster has completed sending data to an - // already upgraded primary - if !manager.hasDedupClientsUpgrade(ctx) { - go c.secondaryDuplicateClientMigrationWorker(ctx) - } else { - // Store that upgrade processes have already been completed - manager.writeDedupClientsUpgrade(ctx) - manager.dedupClientsUpgradeComplete.Store(true) - - } - } -} - -// primaryDuplicateClientMigrationWorker will attempt to receive global data living on the -// connected secondary clusters. Once the data has been received, it will combine it with -// its own global data at old storage paths, and migrate all of it to new storage paths on the -// current cluster. This method wil only exit once all connected secondary clusters have -// upgraded to 1.19, and this cluster receives global data from all of them. -func (c *Core) primaryDuplicateClientMigrationWorker(ctx context.Context) error { - a := c.activityLog - a.logger.Trace("started primary activity log migration worker") - - // Collect global clients from secondary - err := a.waitForSecondaryGlobalClients(ctx) - if err != nil { - return err - } - - // Get local and global entities from previous months - clusterLocalClients, clusterGlobalClients, err := a.extractLocalGlobalClientsDeprecatedStoragePath(ctx) - if err != nil { - a.logger.Error("could not extract local and global clients from storage", "error", err) - return err - } - // Get tokens from previous months at old storage paths - clusterTokens, err := a.extractTokensDeprecatedStoragePath(ctx) - - // TODO: Collect clients from secondaries into slice of fragments - - // Store global clients at new path - for month, entitiesForMonth := range clusterGlobalClients { - logFragments := []*activity.LogFragment{{ - Clients: entitiesForMonth, - }} - if err = a.savePreviousEntitySegments(ctx, month, activityGlobalPathPrefix, logFragments); err != nil { - a.logger.Error("failed to write global segment", "error", err, "month", month) - return err - } - } - // Store local clients at new path - for month, entitiesForMonth := range clusterLocalClients { - logFragments := []*activity.LogFragment{{ - Clients: entitiesForMonth, - }} - if err = a.savePreviousEntitySegments(ctx, month, activityLocalPathPrefix, logFragments); err != nil { - a.logger.Error("failed to write local segment", "error", err, "month", month) - return err - } - } - // Store tokens at new path - for month, tokenCount := range clusterTokens { - // Combine all token counts from all clusters - logFragments := make([]*activity.LogFragment, len(tokenCount)) - for i, tokens := range tokenCount { - logFragments[i] = &activity.LogFragment{NonEntityTokens: tokens} - } - if err = a.savePreviousTokenSegments(ctx, month, activityLocalPathPrefix+activityTokenBasePath, logFragments); err != nil { - a.logger.Error("failed to write token segment", "error", err, "month", month) - return err - } - } - - // TODO: After data has been migrated to new locations, we will regenerate all the global and local PCQs - - if err := a.writeDedupClientsUpgrade(ctx); err != nil { - a.logger.Error("could not complete migration of duplicate clients on cluster") - return err - } - // Garbage collect data at old paths - a.oldStoragePathsCleaned = make(chan struct{}) - go func() { - defer close(a.oldStoragePathsCleaned) - a.deleteOldStoragePathWorker(ctx, activityEntityBasePath) - a.deleteOldStoragePathWorker(ctx, activityTokenBasePath) - // We will also need to delete old PCQs - }() - a.dedupClientsUpgradeComplete.Store(true) - a.logger.Trace("completed primary activity log migration worker") - return nil -} - type encoder interface { Encode(*ActivityLogExportRecord) error Flush() diff --git a/vault/activity_log_stubs_oss.go b/vault/activity_log_stubs_oss.go index e7115d41e475..7d2457360563 100644 --- a/vault/activity_log_stubs_oss.go +++ b/vault/activity_log_stubs_oss.go @@ -5,22 +5,11 @@ package vault -import ( - "context" - "errors" -) +import "context" //go:generate go run github.com/hashicorp/vault/tools/stubmaker -// ErrEmptyResponse error is used to avoid returning "nil, nil" from a function -var ErrEmptyResponse = errors.New("empty response; the system encountered a statement that exclusively returns nil values") - // sendGlobalClients is a no-op on CE func (a *ActivityLog) sendGlobalClients(ctx context.Context) error { return nil } - -// waitForSecondaryGlobalClients is a no-op on CE -func (a *ActivityLog) waitForSecondaryGlobalClients(ctx context.Context) error { - return nil -} diff --git a/vault/activity_log_test.go b/vault/activity_log_test.go index 8599592d8007..1f36a7856582 100644 --- a/vault/activity_log_test.go +++ b/vault/activity_log_test.go @@ -5821,280 +5821,3 @@ func TestCreateSegment_StoreSegment(t *testing.T) { }) } } - -// TestActivityLog_PrimaryDuplicateClientMigrationWorker verifies that the primary -// migration worker correctly moves data from old location to the new location -func TestActivityLog_PrimaryDuplicateClientMigrationWorker(t *testing.T) { - cluster := NewTestCluster(t, nil, nil) - core := cluster.Cores[0].Core - a := core.activityLog - a.SetEnable(true) - - ctx := context.Background() - timeStamp := time.Now() - startOfMonth := timeutil.StartOfMonth(timeStamp) - oneMonthAgo := timeutil.StartOfPreviousMonth(timeStamp) - twoMonthsAgo := timeutil.StartOfPreviousMonth(oneMonthAgo) - - clientRecordsGlobal := make([]*activity.EntityRecord, ActivitySegmentClientCapacity*2+1) - for i := range clientRecordsGlobal { - clientRecordsGlobal[i] = &activity.EntityRecord{ - ClientID: fmt.Sprintf("111122222-3333-4444-5555-%012v", i), - Timestamp: timeStamp.Unix(), - NonEntity: false, - } - } - clientRecordsLocal := make([]*activity.EntityRecord, ActivitySegmentClientCapacity*2+1) - for i := range clientRecordsGlobal { - clientRecordsLocal[i] = &activity.EntityRecord{ - ClientID: fmt.Sprintf("011122222-3333-4444-5555-%012v", i), - Timestamp: timeStamp.Unix(), - // This is to trick the system into believing this a local client when parsing data - ClientType: nonEntityTokenActivityType, - } - } - - tokenCounts := map[string]uint64{ - "ns1": 10, - "ns2": 11, - "ns3": 12, - } - - // Write global and local clients to old path - a.savePreviousEntitySegments(ctx, twoMonthsAgo.Unix(), "", []*activity.LogFragment{{Clients: append(clientRecordsLocal, clientRecordsGlobal...)}}) - a.savePreviousEntitySegments(ctx, oneMonthAgo.Unix(), "", []*activity.LogFragment{{Clients: append(clientRecordsLocal[1:], clientRecordsGlobal[1:]...)}}) - a.savePreviousEntitySegments(ctx, startOfMonth.Unix(), "", []*activity.LogFragment{{Clients: append(clientRecordsLocal[2:], clientRecordsGlobal[2:]...)}}) - - // Assert that the migration workers have not been run - require.True(t, a.hasDedupClientsUpgrade(ctx)) - require.True(t, a.dedupClientsUpgradeComplete.Load()) - - // Resetting this to false so that we can - // verify that after the migrations is completed, the correct values have been stored - a.dedupClientsUpgradeComplete.Store(false) - require.NoError(t, a.view.Delete(ctx, activityDeduplicationUpgradeKey)) - - // Forcefully run the primary migration worker - core.primaryDuplicateClientMigrationWorker(ctx) - - // Verify that we have the correct number of global clients at the new storage paths - times := []time.Time{twoMonthsAgo, oneMonthAgo, startOfMonth} - for index, time := range times { - reader, err := a.NewSegmentFileReader(ctx, time) - require.NoError(t, err) - globalClients := make([]*activity.EntityRecord, 0) - for { - segment, err := reader.ReadGlobalEntity(ctx) - if errors.Is(err, io.EOF) { - break - } - require.NoError(t, err) - globalClients = append(globalClients, segment.GetClients()...) - } - require.Equal(t, len(clientRecordsGlobal)-index, len(globalClients)) - } - - // Verify local clients - for index, time := range times { - reader, err := a.NewSegmentFileReader(ctx, time) - require.NoError(t, err) - localClients := make([]*activity.EntityRecord, 0) - for { - segment, err := reader.ReadLocalEntity(ctx) - if errors.Is(err, io.EOF) { - break - } - require.NoError(t, err) - localClients = append(localClients, segment.GetClients()...) - } - require.Equal(t, len(clientRecordsLocal)-index, len(localClients)) - } - - // Verify non-entity tokens have been correctly migrated - for _, time := range times { - reader, err := a.NewSegmentFileReader(ctx, time) - require.NoError(t, err) - for { - segment, err := reader.ReadToken(ctx) - if errors.Is(err, io.EOF) { - break - } - require.NoError(t, err) - // Verify that the data is correct - deep.Equal(segment.GetCountByNamespaceID(), tokenCounts) - } - } - - // Check that the storage key has been updated - require.True(t, a.hasDedupClientsUpgrade(ctx)) - // Check that the bool has been updated - require.True(t, a.dedupClientsUpgradeComplete.Load()) - - // Wait for the deletion of old logs to complete - timeout := time.After(25 * time.Second) - // Wait for channel indicating deletion to be written - select { - case <-timeout: - t.Fatal("timed out waiting for deletion to complete") - case <-a.oldStoragePathsCleaned: - break - } - - // Verify there is no data at the old paths - times, err := a.availableTimesAtPath(ctx, time.Now(), activityEntityBasePath) - require.NoError(t, err) - require.Equal(t, 0, len(times)) - - // Verify there is no data at the old token paths - times, err = a.availableTimesAtPath(ctx, time.Now(), activityTokenBasePath) - require.NoError(t, err) - require.Equal(t, 0, len(times)) -} - -// TestActivityLog_SecondaryDuplicateClientMigrationWorker verifies that the secondary -// migration worker correctly moves local data from old location to the new location -func TestActivityLog_SecondaryDuplicateClientMigrationWorker(t *testing.T) { - cluster := NewTestCluster(t, nil, nil) - core := cluster.Cores[0].Core - a := core.activityLog - a.SetEnable(true) - - ctx := context.Background() - timeStamp := time.Now() - startOfMonth := timeutil.StartOfMonth(timeStamp) - oneMonthAgo := timeutil.StartOfPreviousMonth(timeStamp) - twoMonthsAgo := timeutil.StartOfPreviousMonth(oneMonthAgo) - - clientRecordsGlobal := make([]*activity.EntityRecord, ActivitySegmentClientCapacity*2+1) - for i := range clientRecordsGlobal { - clientRecordsGlobal[i] = &activity.EntityRecord{ - ClientID: fmt.Sprintf("111122222-3333-4444-5555-%012v", i), - Timestamp: timeStamp.Unix(), - NonEntity: false, - } - } - clientRecordsLocal := make([]*activity.EntityRecord, ActivitySegmentClientCapacity*2+1) - for i := range clientRecordsGlobal { - clientRecordsLocal[i] = &activity.EntityRecord{ - ClientID: fmt.Sprintf("011122222-3333-4444-5555-%012v", i), - Timestamp: timeStamp.Unix(), - // This is to trick the system into believing this a local client when parsing data - ClientType: nonEntityTokenActivityType, - } - } - - tokenCounts := map[string]uint64{ - "ns1": 10, - "ns2": 11, - "ns3": 12, - } - - // Write global and local clients to old path - a.savePreviousEntitySegments(ctx, twoMonthsAgo.Unix(), "", []*activity.LogFragment{{Clients: append(clientRecordsLocal, clientRecordsGlobal...)}}) - a.savePreviousEntitySegments(ctx, oneMonthAgo.Unix(), "", []*activity.LogFragment{{Clients: append(clientRecordsLocal[1:], clientRecordsGlobal[1:]...)}}) - a.savePreviousEntitySegments(ctx, startOfMonth.Unix(), "", []*activity.LogFragment{{Clients: append(clientRecordsLocal[2:], clientRecordsGlobal[2:]...)}}) - - // Write tokens to old path - a.savePreviousTokenSegments(ctx, twoMonthsAgo.Unix(), "", []*activity.LogFragment{{NonEntityTokens: tokenCounts}}) - a.savePreviousTokenSegments(ctx, oneMonthAgo.Unix(), "", []*activity.LogFragment{{NonEntityTokens: tokenCounts}}) - a.savePreviousTokenSegments(ctx, startOfMonth.Unix(), "", []*activity.LogFragment{{NonEntityTokens: tokenCounts}}) - - // Assert that the migration workers have not been run - require.True(t, a.hasDedupClientsUpgrade(ctx)) - require.True(t, a.dedupClientsUpgradeComplete.Load()) - - // Resetting this to false so that we can - // verify that after the migrations is completed, the correct values have been stored - a.dedupClientsUpgradeComplete.Store(false) - require.NoError(t, a.view.Delete(ctx, activityDeduplicationUpgradeKey)) - - // Forcefully run the secondary migration worker - core.secondaryDuplicateClientMigrationWorker(ctx) - - // Wait for the storage migration to complete - ticker := time.NewTicker(100 * time.Millisecond) - timeout := time.After(25 * time.Second) - for { - select { - case <-timeout: - t.Fatal("timed out waiting for migration to complete") - case <-ticker.C: - } - if a.dedupClientsUpgradeComplete.Load() { - break - } - } - - // Verify that no global clients have been migrated - times := []time.Time{twoMonthsAgo, oneMonthAgo, startOfMonth} - for _, time := range times { - reader, err := a.NewSegmentFileReader(ctx, time) - require.NoError(t, err) - globalClients := make([]*activity.EntityRecord, 0) - for { - segment, err := reader.ReadGlobalEntity(ctx) - if errors.Is(err, io.EOF) { - break - } - require.NoError(t, err) - globalClients = append(globalClients, segment.GetClients()...) - } - require.Equal(t, 0, len(globalClients)) - } - - // Verify local clients have been correctly migrated - for index, time := range times { - reader, err := a.NewSegmentFileReader(ctx, time) - require.NoError(t, err) - localClients := make([]*activity.EntityRecord, 0) - for { - segment, err := reader.ReadLocalEntity(ctx) - if errors.Is(err, io.EOF) { - break - } - require.NoError(t, err) - localClients = append(localClients, segment.GetClients()...) - } - require.Equal(t, len(clientRecordsLocal)-index, len(localClients)) - } - - // Verify non-entity tokens have been correctly migrated - for _, time := range times { - reader, err := a.NewSegmentFileReader(ctx, time) - require.NoError(t, err) - for { - segment, err := reader.ReadToken(ctx) - if errors.Is(err, io.EOF) { - break - } - require.NoError(t, err) - // Verify that the data is correct - deep.Equal(segment.GetCountByNamespaceID(), tokenCounts) - } - } - - // Check that the storage key has been updated - require.True(t, a.hasDedupClientsUpgrade(ctx)) - // Check that the bool has been updated - require.True(t, a.dedupClientsUpgradeComplete.Load()) - - // Wait for the deletion of old logs to complete - timeout = time.After(25 * time.Second) - // Wait for channel indicating deletion to be written - select { - case <-timeout: - t.Fatal("timed out waiting for deletion to complete") - case <-a.oldStoragePathsCleaned: - break - } - - // Verify there is no data at the old entity paths - times, err := a.availableTimesAtPath(ctx, time.Now(), activityEntityBasePath) - require.NoError(t, err) - require.Equal(t, 0, len(times)) - - // Verify there is no data at the old token paths - times, err = a.availableTimesAtPath(ctx, time.Now(), activityTokenBasePath) - require.NoError(t, err) - require.Equal(t, 0, len(times)) -} diff --git a/vault/activity_log_util_common.go b/vault/activity_log_util_common.go index 86c824adebab..f3cd616ed99a 100644 --- a/vault/activity_log_util_common.go +++ b/vault/activity_log_util_common.go @@ -14,7 +14,6 @@ import ( "time" "github.com/axiomhq/hyperloglog" - semver "github.com/hashicorp/go-version" "github.com/hashicorp/vault/helper/timeutil" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault/activity" @@ -553,98 +552,3 @@ func (a *ActivityLog) namespaceRecordToCountsResponse(record *activity.Namespace ACMEClients: int(record.ACMEClients), } } - -func (a *ActivityLog) extractLocalGlobalClientsDeprecatedStoragePath(ctx context.Context) (map[int64][]*activity.EntityRecord, map[int64][]*activity.EntityRecord, error) { - clusterGlobalClients := make(map[int64][]*activity.EntityRecord) - clusterLocalClients := make(map[int64][]*activity.EntityRecord) - - // Extract global clients on the current cluster per month store them in a map - times, err := a.availableTimesAtPath(ctx, time.Now(), activityEntityBasePath) - if err != nil { - a.logger.Error("could not list available logs until now") - return clusterLocalClients, clusterGlobalClients, fmt.Errorf("could not list available logs on the cluster") - } - for _, time := range times { - entityPath := activityEntityBasePath + fmt.Sprint(time.Unix()) + "/" - segmentPaths, err := a.view.List(ctx, entityPath) - if err != nil { - return nil, nil, err - } - for _, seqNumber := range segmentPaths { - segment, err := a.readEntitySegmentAtPath(ctx, entityPath+seqNumber) - if segment == nil { - continue - } - if err != nil { - a.logger.Warn("failed to read segment", "error", err) - return clusterLocalClients, clusterGlobalClients, err - } - for _, entity := range segment.GetClients() { - // If the client is not local, then add it to a map - if local, _ := a.isClientLocal(entity); !local { - if _, ok := clusterGlobalClients[time.Unix()]; !ok { - clusterGlobalClients[time.Unix()] = make([]*activity.EntityRecord, 0) - } - clusterGlobalClients[time.Unix()] = append(clusterGlobalClients[time.Unix()], entity) - } else { - if _, ok := clusterLocalClients[time.Unix()]; !ok { - clusterLocalClients[time.Unix()] = make([]*activity.EntityRecord, 0) - } - clusterLocalClients[time.Unix()] = append(clusterLocalClients[time.Unix()], entity) - } - } - } - } - - return clusterLocalClients, clusterGlobalClients, nil -} - -func (a *ActivityLog) extractTokensDeprecatedStoragePath(ctx context.Context) (map[int64][]map[string]uint64, error) { - tokensByMonth := make(map[int64][]map[string]uint64) - times, err := a.availableTimesAtPath(ctx, time.Now(), activityTokenBasePath) - if err != nil { - return nil, err - } - for _, monthTime := range times { - tokenPath := activityTokenBasePath + fmt.Sprint(monthTime.Unix()) + "/" - segmentPaths, err := a.view.List(ctx, tokenPath) - if err != nil { - return nil, err - } - tokensByMonth[monthTime.Unix()] = make([]map[string]uint64, 0) - for _, seqNum := range segmentPaths { - tokenCount, err := a.readTokenSegmentAtPath(ctx, tokenPath+seqNum) - if tokenCount == nil { - a.logger.Error("data at path has been unexpectedly deleted", "path", tokenPath+seqNum) - continue - } - if err != nil { - return nil, err - } - tokensByMonth[monthTime.Unix()] = append(tokensByMonth[monthTime.Unix()], tokenCount.CountByNamespaceID) - } - } - return tokensByMonth, nil -} - -// OldestVersionHasDeduplicatedClients returns whether this cluster is 1.19+, and -// hence supports deduplicated clients -func (a *ActivityLog) OldestVersionHasDeduplicatedClients(ctx context.Context) bool { - oldestVersionIsDedupClients := a.core.IsNewInstall(ctx) - if !oldestVersionIsDedupClients { - if v, _, err := a.core.FindOldestVersionTimestamp(); err == nil { - oldestVersion, err := semver.NewSemver(v) - if err != nil { - a.core.logger.Debug("could not extract version instance", "version", v) - return false - } - dedupChangeVersion, err := semver.NewSemver(DeduplicatedClientMinimumVersion) - if err != nil { - a.core.logger.Debug("could not extract version instance", "version", DeduplicatedClientMinimumVersion) - return false - } - oldestVersionIsDedupClients = oldestVersionIsDedupClients || oldestVersion.GreaterThanOrEqual(dedupChangeVersion) - } - } - return oldestVersionIsDedupClients -} diff --git a/vault/activity_log_util_common_test.go b/vault/activity_log_util_common_test.go index 2d0a0c4ceee2..f84775da3fc2 100644 --- a/vault/activity_log_util_common_test.go +++ b/vault/activity_log_util_common_test.go @@ -13,7 +13,6 @@ import ( "time" "github.com/axiomhq/hyperloglog" - "github.com/go-test/deep" "github.com/hashicorp/vault/helper/timeutil" "github.com/hashicorp/vault/vault/activity" "github.com/stretchr/testify/require" @@ -1015,14 +1014,6 @@ func writeTokenSegment(t *testing.T, core *Core, ts time.Time, index int, item * WriteToStorage(t, core, makeSegmentPath(t, activityTokenLocalBasePath, ts, index), protoItem) } -// writeTokenSegmentOldPath writes a single segment file with the given time and index for a token at the old path -func writeTokenSegmentOldPath(t *testing.T, core *Core, ts time.Time, index int, item *activity.TokenCount) { - t.Helper() - protoItem, err := proto.Marshal(item) - require.NoError(t, err) - WriteToStorage(t, core, makeSegmentPath(t, activityTokenBasePath, ts, index), protoItem) -} - // makeSegmentPath formats the path for a segment at a particular time and index func makeSegmentPath(t *testing.T, typ string, ts time.Time, index int) string { t.Helper() @@ -1222,50 +1213,3 @@ func TestSegmentFileReader(t *testing.T) { require.True(t, proto.Equal(gotTokens[i], tokens[i])) } } - -// TestExtractTokens_OldStoragePaths verifies that the correct tokens are extracted -// from the old token paths in storage. These old storage paths were used in <=1.9 to -// store tokens without clientIds (non-entity tokens). -func TestExtractTokens_OldStoragePaths(t *testing.T) { - core, _, _ := TestCoreUnsealed(t) - now := time.Now() - - // write token at index 3 - token := &activity.TokenCount{CountByNamespaceID: map[string]uint64{ - "ns": 10, - "ns3": 1, - "ns1": 2, - }} - - lastMonth := timeutil.StartOfPreviousMonth(now) - twoMonthsAgo := timeutil.StartOfPreviousMonth(lastMonth) - - thisMonthData := []map[string]uint64{token.CountByNamespaceID, token.CountByNamespaceID} - lastMonthData := []map[string]uint64{token.CountByNamespaceID, token.CountByNamespaceID, token.CountByNamespaceID, token.CountByNamespaceID} - twoMonthsAgoData := []map[string]uint64{token.CountByNamespaceID} - - expected := map[int64][]map[string]uint64{ - now.Unix(): thisMonthData, - lastMonth.Unix(): lastMonthData, - twoMonthsAgo.Unix(): twoMonthsAgoData, - } - - // This month's token data is at broken segment sequences - writeTokenSegmentOldPath(t, core, now, 1, token) - writeTokenSegmentOldPath(t, core, now, 3, token) - // Last months token data is at normal segment sequences - writeTokenSegmentOldPath(t, core, lastMonth, 0, token) - writeTokenSegmentOldPath(t, core, lastMonth, 1, token) - writeTokenSegmentOldPath(t, core, lastMonth, 2, token) - writeTokenSegmentOldPath(t, core, lastMonth, 3, token) - // Month before is at only one random segment sequence - writeTokenSegmentOldPath(t, core, twoMonthsAgo, 2, token) - - tokens, err := core.activityLog.extractTokensDeprecatedStoragePath(context.Background()) - require.NoError(t, err) - require.Equal(t, 3, len(tokens)) - - if diff := deep.Equal(expected, tokens); diff != nil { - t.Fatal(diff) - } -} From f6910bbb2e947b354905836f33912fe3194dbcc5 Mon Sep 17 00:00:00 2001 From: Victor Rodriguez Date: Mon, 23 Dec 2024 20:56:41 +0100 Subject: [PATCH 13/15] Sort CA chain into root and intermediates on VerifyCertificate. (#29255) Sort CA chain into root and intermediates on VerifyCertificate. In order for the Certificate.Verify method to work correctly, the certificates in the CA chain need to be sorted into separate root and intermediate certificate pools. Add unit tests to verify that name constraints in both the root and intermediate certificates are checked. --- builtin/logical/pki/cert_util_test.go | 113 +++++++++++++++++++++ builtin/logical/pki/issuing/cert_verify.go | 24 +++-- changelog/29255.txt | 3 + 3 files changed, 133 insertions(+), 7 deletions(-) create mode 100644 changelog/29255.txt diff --git a/builtin/logical/pki/cert_util_test.go b/builtin/logical/pki/cert_util_test.go index 30f0f71c7715..90b8a12f438f 100644 --- a/builtin/logical/pki/cert_util_test.go +++ b/builtin/logical/pki/cert_util_test.go @@ -8,6 +8,7 @@ import ( "crypto/x509" "crypto/x509/pkix" "fmt" + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" "net" "net/url" "os" @@ -1165,3 +1166,115 @@ func testParseCsrToFields(t *testing.T, issueTime time.Time, tt *parseCertificat t.Errorf("testParseCertificateToFields() diff: %s", strings.ReplaceAll(strings.Join(diff, "\n"), "map", "\nmap")) } } + +// TestVerify_chained_name_constraints verifies that we perform name constraints certificate validation using the +// entire CA chain. +// +// This test constructs a root CA that +// - allows: .example.com +// - excludes: bad.example.com +// +// and an intermediate that +// - forbids alsobad.example.com +// +// It verifies that the intermediate +// - can issue certs like good.example.com +// - rejects names like notanexample.com since they are not in the namespace of names permitted by the root CA +// - rejects bad.example.com, since the root CA excludes it +// - rejects alsobad.example.com, since the intermediate CA excludes it. +func TestVerify_chained_name_constraints(t *testing.T) { + t.Parallel() + bRoot, sRoot := CreateBackendWithStorage(t) + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Setup + + var bInt *backend + var sInt logical.Storage + { + resp, err := CBWrite(bRoot, sRoot, "root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + "permitted_dns_domains": ".example.com", + "excluded_dns_domains": "bad.example.com", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + // Create the CSR + bInt, sInt = CreateBackendWithStorage(t) + resp, err = CBWrite(bInt, sInt, "intermediate/generate/internal", map[string]interface{}{ + "common_name": "myint.com", + }) + require.NoError(t, err) + schema.ValidateResponse(t, schema.GetResponseSchema(t, bRoot.Route("intermediate/generate/internal"), logical.UpdateOperation), resp, true) + csr := resp.Data["csr"] + + // Sign the CSR + resp, err = CBWrite(bRoot, sRoot, "root/sign-intermediate", map[string]interface{}{ + "common_name": "myint.com", + "csr": csr, + "ttl": "60h", + "excluded_dns_domains": "alsobad.example.com", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + // Import the New Signed Certificate into the Intermediate Mount. + // Note that we append the root CA certificate to the signed intermediate, so that + // the entire chain is stored by set-signed. + resp, err = CBWrite(bInt, sInt, "intermediate/set-signed", map[string]interface{}{ + "certificate": strings.Join(resp.Data["ca_chain"].([]string), "\n"), + }) + require.NoError(t, err) + + // Create a Role in the Intermediate Mount + resp, err = CBWrite(bInt, sInt, "roles/test", map[string]interface{}{ + "allow_bare_domains": true, + "allow_subdomains": true, + "allow_any_name": true, + }) + require.NoError(t, err) + require.NotNil(t, resp) + } + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Tests + + testCases := []struct { + commonName string + wantError string + }{ + { + commonName: "good.example.com", + }, + { + commonName: "notanexample.com", + wantError: "should not be permitted by root CA", + }, + { + commonName: "bad.example.com", + wantError: "should be rejected by the root CA", + }, + { + commonName: "alsobad.example.com", + wantError: "should be rejected by the intermediate CA", + }, + } + + for _, tc := range testCases { + t.Run(tc.commonName, func(t *testing.T) { + resp, err := CBWrite(bInt, sInt, "issue/test", map[string]any{ + "common_name": tc.commonName, + }) + if tc.wantError != "" { + require.Error(t, err, tc.wantError) + require.ErrorContains(t, err, "certificate is not authorized to sign for this name") + require.Nil(t, resp) + } else { + require.NoError(t, err) + require.NoError(t, resp.Error()) + } + }) + } +} diff --git a/builtin/logical/pki/issuing/cert_verify.go b/builtin/logical/pki/issuing/cert_verify.go index f17cd7de087d..a97f67bc5d43 100644 --- a/builtin/logical/pki/issuing/cert_verify.go +++ b/builtin/logical/pki/issuing/cert_verify.go @@ -4,6 +4,7 @@ package issuing import ( + "bytes" "context" "fmt" "os" @@ -42,26 +43,35 @@ func VerifyCertificate(ctx context.Context, storage logical.Storage, issuerId Is return nil } - certChainPool := ctx509.NewCertPool() + rootCertPool := ctx509.NewCertPool() + intermediateCertPool := ctx509.NewCertPool() + for _, certificate := range parsedBundle.CAChain { cert, err := convertCertificate(certificate.Bytes) if err != nil { return err } - certChainPool.AddCert(cert) + if bytes.Equal(cert.RawIssuer, cert.RawSubject) { + rootCertPool.AddCert(cert) + } else { + intermediateCertPool.AddCert(cert) + } + } + if len(rootCertPool.Subjects()) < 1 { + // Alright, this is weird, since we don't have the root CA, we'll treat the intermediate as + // the root, otherwise we'll get a "x509: certificate signed by unknown authority" error. + rootCertPool, intermediateCertPool = intermediateCertPool, rootCertPool } - - // Validation Code, assuming we need to validate the entire chain of constraints // Note that we use github.com/google/certificate-transparency-go/x509 to perform certificate verification, // since that library provides options to disable checks that the standard library does not. options := ctx509.VerifyOptions{ - Intermediates: nil, // We aren't verifying the chain here, this would do more work - Roots: certChainPool, + Roots: rootCertPool, + Intermediates: intermediateCertPool, CurrentTime: time.Time{}, KeyUsages: nil, - MaxConstraintComparisions: 0, // This means infinite + MaxConstraintComparisions: 0, // Use the library's 'sensible default' DisableTimeChecks: true, DisableEKUChecks: true, DisableCriticalExtensionChecks: false, diff --git a/changelog/29255.txt b/changelog/29255.txt new file mode 100644 index 000000000000..2d9eda7b28af --- /dev/null +++ b/changelog/29255.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix a bug that prevented the full CA chain to be used when enforcing name constraints. +``` From 847597fbae0663870d492bc93ad7d57eb44b7627 Mon Sep 17 00:00:00 2001 From: claire bontempo <68122737+hellobontempo@users.noreply.github.com> Date: Mon, 23 Dec 2024 18:22:21 -0600 Subject: [PATCH 14/15] add open api params (#29257) --- .../acceptance/open-api-path-help-test.js | 2 +- .../helpers/openapi/expected-secret-attrs.js | 49 +++++++++++++++++++ 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/ui/tests/acceptance/open-api-path-help-test.js b/ui/tests/acceptance/open-api-path-help-test.js index e48316d9fe31..621d36326a84 100644 --- a/ui/tests/acceptance/open-api-path-help-test.js +++ b/ui/tests/acceptance/open-api-path-help-test.js @@ -80,7 +80,7 @@ function secretEngineHelper(test, secretEngine) { assert.deepEqual( Object.keys(result).sort(), Object.keys(expected).sort(), - `getProps returns expected attributes for ${modelName}` + `getProps returns expected attributes for ${modelName} (help url: "${helpUrl}")` ); Object.keys(expected).forEach((attrName) => { assert.deepEqual(result[attrName], expected[attrName], `${attrName} attribute details match`); diff --git a/ui/tests/helpers/openapi/expected-secret-attrs.js b/ui/tests/helpers/openapi/expected-secret-attrs.js index 1fb27c2eba76..c72160e1a963 100644 --- a/ui/tests/helpers/openapi/expected-secret-attrs.js +++ b/ui/tests/helpers/openapi/expected-secret-attrs.js @@ -1234,6 +1234,34 @@ const pki = { label: 'Exclude Common Name from Subject Alternative Names (SANs)', type: 'boolean', }, + excludedDnsDomains: { + editType: 'stringArray', + fieldGroup: 'default', + helpText: + 'Domains for which this certificate is not allowed to sign or issue child certificates (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).', + label: 'Excluded DNS Domains', + }, + excludedEmailAddresses: { + editType: 'stringArray', + fieldGroup: 'default', + helpText: + 'Email addresses for which this certificate is not allowed to sign or issue child certificates (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).', + label: 'Excluded email addresses', + }, + excludedIpRanges: { + editType: 'stringArray', + fieldGroup: 'default', + helpText: + 'IP ranges for which this certificate is not allowed to sign or issue child certificates (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10). Ranges must be specified in the notation of IP address and prefix length, like "192.0.2.0/24" or "2001:db8::/32", as defined in RFC 4632 and RFC 4291.', + label: 'Excluded IP ranges', + }, + excludedUriDomains: { + editType: 'stringArray', + fieldGroup: 'default', + helpText: + 'URI domains for which this certificate is not allowed to sign or issue child certificates (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).', + label: 'Excluded URI domains', + }, format: { editType: 'string', helpText: @@ -1312,6 +1340,27 @@ const pki = { fieldGroup: 'default', label: 'Permitted DNS Domains', }, + permittedEmailAddresses: { + editType: 'stringArray', + fieldGroup: 'default', + helpText: + 'Email addresses for which this certificate is allowed to sign or issue child certificates (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).', + label: 'Permitted email adresses', + }, + permittedIpRanges: { + editType: 'stringArray', + fieldGroup: 'default', + helpText: + 'IP ranges for which this certificate is allowed to sign or issue child certificates (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10). Ranges must be specified in the notation of IP address and prefix length, like "192.0.2.0/24" or "2001:db8::/32", as defined in RFC 4632 and RFC 4291.', + label: 'Permitted IP ranges', + }, + permittedUriDomains: { + editType: 'stringArray', + fieldGroup: 'default', + helpText: + 'URI domains for which this certificate is allowed to sign or issue child certificates (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).', + label: 'Permitted URI domains', + }, postalCode: { editType: 'stringArray', helpText: 'If set, Postal Code will be set to this value.', From 28768d5b5b48a13709ddf457e4e8768805e2fdd2 Mon Sep 17 00:00:00 2001 From: "Shannon Roberts (Beagin)" Date: Thu, 26 Dec 2024 09:24:24 -0800 Subject: [PATCH 15/15] [VAULT-33207] Update Policy Modal links to use HDS, add ACL Policies "get started" link (#29254) * [VAULT-33207] Update Policy Modal links to use HDS, add ACL Policies "get started" link * fix failing test --- .../core/addon/components/policy-example.hbs | 20 +++++++++++-------- .../components/policy-example-test.js | 2 +- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/ui/lib/core/addon/components/policy-example.hbs b/ui/lib/core/addon/components/policy-example.hbs index 638089f7522c..4c9ac3759871 100644 --- a/ui/lib/core/addon/components/policy-example.hbs +++ b/ui/lib/core/addon/components/policy-example.hbs @@ -6,15 +6,16 @@
{{#if (eq @policyType "acl")}}

- ACL Policies are written in Hashicorp Configuration Language ( - HCL + ACL Policies + are written in Hashicorp Configuration Language ( + HCL ) or JSON and describe which paths in Vault a user or machine is allowed to access. Here is an example policy:

{{else if (eq @policyType "rgp")}}

Role Governing Policies (RGPs) are tied to client tokens or identities which is similar to - ACL policies. They use - Sentinel + ACL policies. They use + Sentinel as a language framework to enable fine-grained policy decisions.

@@ -31,9 +32,9 @@ Endpoint Governing Policies (EGPs) are tied to particular paths (e.g. aws/creds/ ) instead of tokens. They use - Sentinel + Sentinel as a language to access - properties + properties of the incoming requests.

@@ -55,8 +56,11 @@ More information about {{uppercase @policyType}} policies can be found - + here. - +

\ No newline at end of file diff --git a/ui/tests/integration/components/policy-example-test.js b/ui/tests/integration/components/policy-example-test.js index 37659b0585c8..831f5c659fd2 100644 --- a/ui/tests/integration/components/policy-example-test.js +++ b/ui/tests/integration/components/policy-example-test.js @@ -50,7 +50,7 @@ module('Integration | Component | policy-example', function (hooks) { assert .dom(SELECTORS.policyDescription('rgp')) .hasText( - 'Role Governing Policies (RGPs) are tied to client tokens or identities which is similar to ACL policies . They use Sentinel as a language framework to enable fine-grained policy decisions.' + 'Role Governing Policies (RGPs) are tied to client tokens or identities which is similar to ACL policies. They use Sentinel as a language framework to enable fine-grained policy decisions.' ); });