Skip to content

Commit

Permalink
feat(redis): Allow configuring Redis pools individually (#3859)
Browse files Browse the repository at this point in the history
Second attempt at #3843.
Implements #3829.

This time around, if there is only one Redis pool configuration, we only
create one pool and clone it 4 ways (instead of creating 4 identical
pools like last time). This means that the case of only one
configuration preserves the current behavior, modulo the bug fix related
to `DEFAULT_MIN_MAX_CONNECTIONS`.
  • Loading branch information
loewenheim authored Aug 1, 2024
1 parent 3cfb9f0 commit 824cdb2
Show file tree
Hide file tree
Showing 10 changed files with 506 additions and 108 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@

- "Cardinality limit" outcomes now report which limit was exceeded. ([#3825](https://github.com/getsentry/relay/pull/3825))
- Derive span browser name from user agent. ([#3834](https://github.com/getsentry/relay/pull/3834))
- Redis pools for `project_configs`, `cardinality`, `quotas`, and `misc` usecases
can now be configured individually. ([#3859](https://github.com/getsentry/relay/pull/3859))

**Internal**:

Expand Down
42 changes: 11 additions & 31 deletions relay-config/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,14 @@ use relay_kafka::{
};
use relay_metrics::aggregator::{AggregatorConfig, FlushBatching};
use relay_metrics::MetricNamespace;
use relay_redis::RedisConfigOptions;
use serde::de::{DeserializeOwned, Unexpected, Visitor};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use uuid::Uuid;

use crate::aggregator::{AggregatorServiceConfig, ScopedAggregatorConfig};
use crate::byte_size::ByteSize;
use crate::upstream::UpstreamDescriptor;
use crate::{RedisConfig, RedisConnection};
use crate::{create_redis_pools, RedisConfig, RedisConfigs, RedisPoolConfigs};

const DEFAULT_NETWORK_OUTAGE_GRACE_PERIOD: u64 = 10;

Expand Down Expand Up @@ -1076,7 +1075,7 @@ pub struct Processing {
pub kafka_validate_topics: bool,
/// Redis hosts to connect to for storing state for rate limits.
#[serde(default)]
pub redis: Option<RedisConfig>,
pub redis: Option<RedisConfigs>,
/// Maximum chunk size of attachments for Kafka.
#[serde(default = "default_chunk_size")]
pub attachment_chunk_size: ByteSize,
Expand Down Expand Up @@ -1637,7 +1636,7 @@ impl Config {
}

if let Some(redis) = overrides.redis_url {
processing.redis = Some(RedisConfig::single(redis))
processing.redis = Some(RedisConfigs::Unified(RedisConfig::single(redis)))
}

if let Some(kafka_url) = overrides.kafka_url {
Expand Down Expand Up @@ -2356,34 +2355,15 @@ impl Config {
&self.values.processing.topics.unused
}

/// Redis servers to connect to, for rate limiting.
pub fn redis(&self) -> Option<(&RedisConnection, RedisConfigOptions)> {
let cpu_concurrency = self.cpu_concurrency();
/// Redis servers to connect to for project configs, cardinality limits,
/// rate limiting, and metrics metadata.
pub fn redis(&self) -> Option<RedisPoolConfigs> {
let redis_configs = self.values.processing.redis.as_ref()?;

let redis = self.values.processing.redis.as_ref()?;

let max_connections = redis
.options
.max_connections
.unwrap_or(cpu_concurrency as u32 * 2)
.max(crate::redis::DEFAULT_MIN_MAX_CONNECTIONS);

let min_idle = redis
.options
.min_idle
.unwrap_or_else(|| max_connections.div_ceil(crate::redis::DEFAULT_MIN_IDLE_RATIO));

let options = RedisConfigOptions {
max_connections,
min_idle: Some(min_idle),
connection_timeout: redis.options.connection_timeout,
max_lifetime: redis.options.max_lifetime,
idle_timeout: redis.options.idle_timeout,
read_timeout: redis.options.read_timeout,
write_timeout: redis.options.write_timeout,
};

Some((&redis.connection, options))
Some(create_redis_pools(
redis_configs,
self.cpu_concurrency() as u32,
))
}

/// Chunk size of attachments in bytes.
Expand Down
Loading

0 comments on commit 824cdb2

Please sign in to comment.