From 130050d1435c212020dd652c1f611801fd3721bb Mon Sep 17 00:00:00 2001 From: Zahari Dichev Date: Mon, 21 Oct 2024 13:57:35 +0000 Subject: [PATCH 01/11] serve EgressNetwork responses Signed-off-by: Zahari Dichev --- Cargo.lock | 3 +- Cargo.toml | 3 + policy-controller/core/src/outbound.rs | 91 +- policy-controller/grpc/src/outbound.rs | 276 +++-- policy-controller/grpc/src/outbound/grpc.rs | 307 ++++-- policy-controller/grpc/src/outbound/http.rs | 299 ++++-- policy-controller/grpc/src/outbound/tcp.rs | 247 +++++ policy-controller/grpc/src/outbound/tls.rs | 253 +++++ policy-controller/grpc/src/routes.rs | 15 +- .../k8s/api/src/policy/network.rs | 33 + policy-controller/k8s/index/Cargo.toml | 1 + .../k8s/index/src/inbound/index/grpc.rs | 2 +- .../k8s/index/src/inbound/index/http.rs | 4 +- policy-controller/k8s/index/src/outbound.rs | 2 +- .../k8s/index/src/outbound/index.rs | 949 +++++++++++++++--- .../src/outbound/index/egress_network.rs | 284 ++++++ .../k8s/index/src/outbound/index/grpc.rs | 12 +- .../k8s/index/src/outbound/index/http.rs | 124 +-- .../k8s/index/src/outbound/index/metrics.rs | 4 +- .../k8s/index/src/outbound/index/tcp.rs | 106 ++ .../k8s/index/src/outbound/index/tls.rs | 43 + .../k8s/index/src/outbound/tests.rs | 29 +- .../k8s/index/src/outbound/tests/routes.rs | 7 + .../index/src/outbound/tests/routes/grpc.rs | 117 ++- .../index/src/outbound/tests/routes/http.rs | 118 ++- .../index/src/outbound/tests/routes/tcp.rs | 241 +++++ .../index/src/outbound/tests/routes/tls.rs | 242 +++++ policy-controller/k8s/index/src/routes.rs | 16 +- .../k8s/index/src/routes/http.rs | 14 - policy-controller/src/lib.rs | 66 +- policy-controller/src/main.rs | 12 +- policy-test/tests/outbound_api_linkerd.rs | 1 + 32 files changed, 3357 insertions(+), 564 deletions(-) create mode 100644 policy-controller/grpc/src/outbound/tcp.rs create mode 100644 policy-controller/grpc/src/outbound/tls.rs create mode 100644 policy-controller/k8s/index/src/outbound/index/egress_network.rs create mode 100644 policy-controller/k8s/index/src/outbound/index/tcp.rs create mode 100644 policy-controller/k8s/index/src/outbound/index/tls.rs create mode 100644 policy-controller/k8s/index/src/outbound/tests/routes/tcp.rs create mode 100644 policy-controller/k8s/index/src/outbound/tests/routes/tls.rs diff --git a/Cargo.lock b/Cargo.lock index 87840eb7d6cac..f7414c5abbace 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1325,8 +1325,7 @@ dependencies = [ [[package]] name = "linkerd2-proxy-api" version = "0.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c72fb98d969e1e94e95d52a6fcdf4693764702c369e577934256e72fb5bc61" +source = "git+https://github.com/linkerd/linkerd2-proxy-api?branch=zd/add-error-type-to-tls-and-tcp-route#d7385b43d087da05ef4356e51d81be0a1a736a14" dependencies = [ "http", "ipnet", diff --git a/Cargo.toml b/Cargo.toml index 2bd3689b03f4d..c2d19445f2243 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,3 +12,6 @@ members = [ [profile.release] lto = "thin" + +[patch.crates-io] +linkerd2-proxy-api = { git = 'https://github.com/linkerd/linkerd2-proxy-api', branch = 'zd/add-error-type-to-tls-and-tcp-route' } \ No newline at end of file diff --git a/policy-controller/core/src/outbound.rs b/policy-controller/core/src/outbound.rs index c429b0e218a99..aa1e7d102ef0f 100644 --- a/policy-controller/core/src/outbound.rs +++ b/policy-controller/core/src/outbound.rs @@ -6,7 +6,16 @@ use ahash::AHashMap as HashMap; use anyhow::Result; use chrono::{offset::Utc, DateTime}; use futures::prelude::*; -use std::{net::IpAddr, num::NonZeroU16, pin::Pin, time}; +use std::{ + net::{IpAddr, SocketAddr}, + num::NonZeroU16, + pin::Pin, + time, +}; + +pub trait Route { + fn creation_timestamp(&self) -> Option>; +} /// Models outbound policy discovery. #[async_trait::async_trait] @@ -22,20 +31,40 @@ pub type OutboundPolicyStream = Pin + Send pub type HttpRoute = OutboundRoute; pub type GrpcRoute = OutboundRoute; + pub type RouteSet = HashMap; +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum TrafficPolicy { + Allow, + Deny, +} + +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum Kind { + EgressNetwork { + original_dst: SocketAddr, + traffic_policy: TrafficPolicy, + }, + Service, +} + +#[derive(Clone, Debug)] pub struct OutboundDiscoverTarget { - pub service_name: String, - pub service_namespace: String, - pub service_port: NonZeroU16, + pub name: String, + pub namespace: String, + pub port: NonZeroU16, pub source_namespace: String, + pub kind: Kind, } #[derive(Clone, Debug, PartialEq)] pub struct OutboundPolicy { pub http_routes: RouteSet, pub grpc_routes: RouteSet, - pub authority: String, + pub tls_routes: RouteSet, + pub tcp_routes: RouteSet, + pub service_authority: String, pub name: String, pub namespace: String, pub port: NonZeroU16, @@ -56,6 +85,24 @@ pub struct OutboundRoute { pub creation_timestamp: Option>, } +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TlsRoute { + pub hostnames: Vec, + pub rule: TcpRouteRule, + /// This is required for ordering returned routes + /// by their creation timestamp. + pub creation_timestamp: Option>, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TcpRoute { + pub rule: TcpRouteRule, + + /// This is required for ordering returned routes + /// by their creation timestamp. + pub creation_timestamp: Option>, +} + #[derive(Clone, Debug, PartialEq, Eq)] pub struct OutboundRouteRule { pub matches: Vec, @@ -65,10 +112,16 @@ pub struct OutboundRouteRule { pub filters: Vec, } +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TcpRouteRule { + pub backends: Vec, +} + #[derive(Clone, Debug, PartialEq, Eq)] pub enum Backend { Addr(WeightedAddr), Service(WeightedService), + EgressNetwork(WeightedEgressNetwork), Invalid { weight: u32, message: String }, } @@ -90,6 +143,16 @@ pub struct WeightedService { pub exists: bool, } +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct WeightedEgressNetwork { + pub weight: u32, + pub name: String, + pub namespace: String, + pub port: Option, + pub filters: Vec, + pub exists: bool, +} + #[derive(Copy, Clone, Debug, PartialEq)] pub enum FailureAccrual { Consecutive { max_failures: u32, backoff: Backoff }, @@ -138,3 +201,21 @@ pub enum GrpcRetryCondition { Internal, Unavailable, } + +impl Route for OutboundRoute { + fn creation_timestamp(&self) -> Option> { + self.creation_timestamp + } +} + +impl Route for TcpRoute { + fn creation_timestamp(&self) -> Option> { + self.creation_timestamp + } +} + +impl Route for TlsRoute { + fn creation_timestamp(&self) -> Option> { + self.creation_timestamp + } +} diff --git a/policy-controller/grpc/src/outbound.rs b/policy-controller/grpc/src/outbound.rs index 7a8d69fb1893a..7fb3261b497b4 100644 --- a/policy-controller/grpc/src/outbound.rs +++ b/policy-controller/grpc/src/outbound.rs @@ -4,8 +4,8 @@ use crate::workload; use futures::prelude::*; use http_crate::uri::Authority; use linkerd2_proxy_api::{ - self as api, - meta::{metadata, Metadata}, + self as api, destination, + meta::{metadata, Metadata, Resource}, outbound::{ self, outbound_policies_server::{OutboundPolicies, OutboundPoliciesServer}, @@ -13,8 +13,8 @@ use linkerd2_proxy_api::{ }; use linkerd_policy_controller_core::{ outbound::{ - DiscoverOutboundPolicy, OutboundDiscoverTarget, OutboundPolicy, OutboundPolicyStream, - OutboundRoute, + DiscoverOutboundPolicy, Kind, OutboundDiscoverTarget, OutboundPolicy, OutboundPolicyStream, + Route, WeightedEgressNetwork, WeightedService, }, routes::GroupKindNamespaceName, }; @@ -22,6 +22,8 @@ use std::{num::NonZeroU16, str::FromStr, sync::Arc, time}; mod grpc; mod http; +mod tcp; +mod tls; #[derive(Clone, Debug)] pub struct OutboundPolicyServer { @@ -62,14 +64,15 @@ where let target = match target { outbound::traffic_spec::Target::Addr(target) => target, outbound::traffic_spec::Target::Authority(auth) => { - return self.lookup_authority(&auth).map( - |(service_namespace, service_name, service_port)| OutboundDiscoverTarget { - service_name, - service_namespace, - service_port, + return self.lookup_authority(&auth).map(|(namespace, name, port)| { + OutboundDiscoverTarget { + kind: Kind::Service, + name, + namespace, + port, source_namespace, - }, - ) + } + }) } }; @@ -146,19 +149,19 @@ where req: tonic::Request, ) -> Result, tonic::Status> { let service = self.lookup(req.into_inner())?; - let policy = self .index - .get_outbound_policy(service) + .get_outbound_policy(service.clone()) .await .map_err(|error| { tonic::Status::internal(format!("failed to get outbound policy: {error}")) })?; if let Some(policy) = policy { - Ok(tonic::Response::new(to_service( + Ok(tonic::Response::new(to_proto( policy, self.allow_l5d_request_headers, + service, ))) } else { Err(tonic::Status::not_found("No such policy")) @@ -172,11 +175,12 @@ where req: tonic::Request, ) -> Result, tonic::Status> { let service = self.lookup(req.into_inner())?; + let drain = self.drain.clone(); let rx = self .index - .watch_outbound_policy(service) + .watch_outbound_policy(service.clone()) .await .map_err(|e| tonic::Status::internal(format!("lookup failed: {e}")))? .ok_or_else(|| tonic::Status::not_found("unknown server"))?; @@ -184,6 +188,7 @@ where drain, rx, self.allow_l5d_request_headers, + service, ))) } } @@ -196,6 +201,7 @@ fn response_stream( drain: drain::Watch, mut rx: OutboundPolicyStream, allow_l5d_request_headers: bool, + target: OutboundDiscoverTarget, ) -> BoxWatchStream { Box::pin(async_stream::try_stream! { tokio::pin! { @@ -207,7 +213,7 @@ fn response_stream( // When the port is updated with a new server, update the server watch. res = rx.next() => match res { Some(policy) => { - yield to_service(policy, allow_l5d_request_headers); + yield to_proto(policy, allow_l5d_request_headers, target.clone()); } None => return, }, @@ -222,15 +228,23 @@ fn response_stream( }) } -fn to_service( +fn no_explicit_routes(outbound: &OutboundPolicy) -> bool { + outbound.http_routes.is_empty() + && outbound.grpc_routes.is_empty() + && outbound.tls_routes.is_empty() + && outbound.tcp_routes.is_empty() +} + +fn to_proto( outbound: OutboundPolicy, allow_l5d_request_headers: bool, + target: OutboundDiscoverTarget, ) -> outbound::OutboundPolicy { - let backend: outbound::Backend = default_backend(&outbound); + let backend: outbound::Backend = default_backend(&outbound, target.kind); let kind = if outbound.opaque { outbound::proxy_protocol::Kind::Opaque(outbound::proxy_protocol::Opaque { - routes: vec![default_outbound_opaq_route(backend)], + routes: vec![default_outbound_opaq_route(backend, target.kind)], }) } else { let accrual = outbound.accrual.map(|accrual| outbound::FailureAccrual { @@ -251,20 +265,9 @@ fn to_service( }), }); - let mut http_routes = outbound.http_routes.into_iter().collect::>(); - let mut grpc_routes = outbound.grpc_routes.into_iter().collect::>(); - - if !grpc_routes.is_empty() { - grpc_routes.sort_by(timestamp_then_name); - grpc::protocol( - backend, - grpc_routes.into_iter(), - accrual, - outbound.grpc_retry, - outbound.timeouts, - allow_l5d_request_headers, - ) - } else { + // if we have no explicit routes attached to the parent, always attempt protocol detection + if no_explicit_routes(&outbound) { + let mut http_routes = outbound.http_routes.into_iter().collect::>(); http_routes.sort_by(timestamp_then_name); http::protocol( backend, @@ -273,14 +276,55 @@ fn to_service( outbound.http_retry, outbound.timeouts, allow_l5d_request_headers, + target.clone(), ) + } else { + let mut grpc_routes = outbound.grpc_routes.into_iter().collect::>(); + let mut http_routes = outbound.http_routes.into_iter().collect::>(); + let mut tls_routes = outbound.tls_routes.into_iter().collect::>(); + let mut tcp_routes = outbound.tcp_routes.into_iter().collect::>(); + + if !grpc_routes.is_empty() { + grpc_routes.sort_by(timestamp_then_name); + grpc::protocol( + backend, + grpc_routes.into_iter(), + accrual, + outbound.grpc_retry, + outbound.timeouts, + allow_l5d_request_headers, + target.clone(), + ) + } else if !http_routes.is_empty() { + http_routes.sort_by(timestamp_then_name); + http::protocol( + backend, + http_routes.into_iter(), + accrual, + outbound.http_retry, + outbound.timeouts, + allow_l5d_request_headers, + target.clone(), + ) + } else if !tls_routes.is_empty() { + tls_routes.sort_by(timestamp_then_name); + tls::protocol(backend, tls_routes.into_iter(), target.clone()) + } else { + tcp_routes.sort_by(timestamp_then_name); + tcp::protocol(backend, tcp_routes.into_iter(), target.clone()) + } } }; + let (parent_group, parent_kind) = match target.kind { + Kind::EgressNetwork { .. } => ("policy.linkerd.io", "EgressNetwork"), + Kind::Service => ("core", "Service"), + }; + let metadata = Metadata { kind: Some(metadata::Kind::Resource(api::meta::Resource { - group: "core".to_string(), - kind: "Service".to_string(), + group: parent_group.into(), + kind: parent_kind.into(), namespace: outbound.namespace, name: outbound.name, port: u16::from(outbound.port).into(), @@ -294,13 +338,13 @@ fn to_service( } } -fn timestamp_then_name( - (left_id, left_route): &(GroupKindNamespaceName, OutboundRoute), - (right_id, right_route): &(GroupKindNamespaceName, OutboundRoute), +fn timestamp_then_name( + (left_id, left_route): &(GroupKindNamespaceName, L), + (right_id, right_route): &(GroupKindNamespaceName, R), ) -> std::cmp::Ordering { let by_ts = match ( - &left_route.creation_timestamp, - &right_route.creation_timestamp, + &left_route.creation_timestamp(), + &right_route.creation_timestamp(), ) { (Some(left_ts), Some(right_ts)) => left_ts.cmp(right_ts), (None, None) => std::cmp::Ordering::Equal, @@ -312,50 +356,87 @@ fn timestamp_then_name( by_ts.then_with(|| left_id.name.cmp(&right_id.name)) } -fn default_backend(outbound: &OutboundPolicy) -> outbound::Backend { - outbound::Backend { - metadata: Some(Metadata { - kind: Some(metadata::Kind::Resource(api::meta::Resource { - group: "core".to_string(), - kind: "Service".to_string(), - name: outbound.name.clone(), - namespace: outbound.namespace.clone(), - section: Default::default(), - port: u16::from(outbound.port).into(), - })), - }), - queue: Some(default_queue_config()), - kind: Some(outbound::backend::Kind::Balancer( - outbound::backend::BalanceP2c { - discovery: Some(outbound::backend::EndpointDiscovery { - kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( - outbound::backend::endpoint_discovery::DestinationGet { - path: outbound.authority.clone(), +fn default_backend(outbound: &OutboundPolicy, parent_kind: Kind) -> outbound::Backend { + match parent_kind { + Kind::Service => outbound::Backend { + metadata: Some(Metadata { + kind: Some(metadata::Kind::Resource(api::meta::Resource { + group: "core".to_string(), + kind: "Service".to_string(), + name: outbound.name.clone(), + namespace: outbound.namespace.clone(), + section: Default::default(), + port: u16::from(outbound.port).into(), + })), + }), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Balancer( + outbound::backend::BalanceP2c { + discovery: Some(outbound::backend::EndpointDiscovery { + kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( + outbound::backend::endpoint_discovery::DestinationGet { + path: outbound.service_authority.clone(), + }, + )), + }), + load: Some(default_balancer_config()), + }, + )), + }, + Kind::EgressNetwork { original_dst, .. } => outbound::Backend { + metadata: Some(Metadata { + kind: Some(metadata::Kind::Resource(api::meta::Resource { + group: "policy.linkerd.io".to_string(), + kind: "EgressNetwork".to_string(), + name: outbound.name.clone(), + namespace: outbound.namespace.clone(), + section: Default::default(), + port: u16::from(outbound.port).into(), + })), + }), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: 1, + ..Default::default() + }, + )), + }, + } +} + +fn default_outbound_opaq_route( + backend: outbound::Backend, + parent_kind: Kind, +) -> outbound::OpaqueRoute { + match parent_kind { + Kind::EgressNetwork { traffic_policy, .. } => { + tcp::default_outbound_egress_route(backend, traffic_policy) + } + Kind::Service => { + let metadata = Some(Metadata { + kind: Some(metadata::Kind::Default("opaq".to_string())), + }); + let rules = vec![outbound::opaque_route::Rule { + backends: Some(outbound::opaque_route::Distribution { + kind: Some(outbound::opaque_route::distribution::Kind::FirstAvailable( + outbound::opaque_route::distribution::FirstAvailable { + backends: vec![outbound::opaque_route::RouteBackend { + backend: Some(backend), + }], }, )), }), - load: Some(default_balancer_config()), - }, - )), - } -} + }]; -fn default_outbound_opaq_route(backend: outbound::Backend) -> outbound::OpaqueRoute { - let metadata = Some(Metadata { - kind: Some(metadata::Kind::Default("opaq".to_string())), - }); - let rules = vec![outbound::opaque_route::Rule { - backends: Some(outbound::opaque_route::Distribution { - kind: Some(outbound::opaque_route::distribution::Kind::FirstAvailable( - outbound::opaque_route::distribution::FirstAvailable { - backends: vec![outbound::opaque_route::RouteBackend { - backend: Some(backend), - }], - }, - )), - }), - }]; - outbound::OpaqueRoute { metadata, rules } + outbound::OpaqueRoute { + metadata, + rules, + error: None, + } + } + } } fn default_balancer_config() -> outbound::backend::balance_p2c::Load { @@ -395,3 +476,38 @@ pub(crate) fn convert_duration( }) .ok() } + +pub(crate) fn service_meta(svc: WeightedService) -> Metadata { + Metadata { + kind: Some(metadata::Kind::Resource(Resource { + group: "core".to_string(), + kind: "Service".to_string(), + name: svc.name, + namespace: svc.namespace, + section: Default::default(), + port: u16::from(svc.port).into(), + })), + } +} + +pub(crate) fn egress_net_meta( + egress_net: WeightedEgressNetwork, + original_dst_port: Option, +) -> Metadata { + let port = egress_net + .port + .map(NonZeroU16::get) + .or(original_dst_port) + .unwrap_or_default(); + + Metadata { + kind: Some(metadata::Kind::Resource(Resource { + group: "policy.linkerd.io".to_string(), + kind: "EgressNetwork".to_string(), + name: egress_net.name, + namespace: egress_net.namespace, + section: Default::default(), + port: port.into(), + })), + } +} diff --git a/policy-controller/grpc/src/outbound/grpc.rs b/policy-controller/grpc/src/outbound/grpc.rs index 472eb714daea0..91d6fc7d0e9ec 100644 --- a/policy-controller/grpc/src/outbound/grpc.rs +++ b/policy-controller/grpc/src/outbound/grpc.rs @@ -5,8 +5,8 @@ use crate::routes::{ use linkerd2_proxy_api::{destination, grpc_route, http_route, meta, outbound}; use linkerd_policy_controller_core::{ outbound::{ - Backend, Filter, GrpcRetryCondition, GrpcRoute, OutboundRoute, OutboundRouteRule, - RouteRetry, RouteTimeouts, + Backend, Filter, GrpcRetryCondition, GrpcRoute, Kind, OutboundDiscoverTarget, + OutboundRoute, OutboundRouteRule, RouteRetry, RouteTimeouts, TrafficPolicy, }, routes::{FailureInjectorFilter, GroupKindNamespaceName}, }; @@ -19,8 +19,9 @@ pub(crate) fn protocol( service_retry: Option>, service_timeouts: RouteTimeouts, allow_l5d_request_headers: bool, + target: OutboundDiscoverTarget, ) -> outbound::proxy_protocol::Kind { - let routes = routes + let mut routes = routes .map(|(gknn, route)| { convert_outbound_route( gknn, @@ -29,9 +30,20 @@ pub(crate) fn protocol( service_retry.clone(), service_timeouts.clone(), allow_l5d_request_headers, + target.clone(), ) }) .collect::>(); + + if let Kind::EgressNetwork { traffic_policy, .. } = target.kind { + routes.push(default_outbound_egress_route( + default_backend, + service_retry, + service_timeouts, + traffic_policy, + )); + } + outbound::proxy_protocol::Kind::Grpc(outbound::proxy_protocol::Grpc { routes, failure_accrual, @@ -49,6 +61,7 @@ fn convert_outbound_route( service_retry: Option>, service_timeouts: RouteTimeouts, allow_l5d_request_headers: bool, + target: OutboundDiscoverTarget, ) -> outbound::GrpcRoute { // This encoder sets deprecated timeouts for older proxies. #![allow(deprecated)] @@ -77,7 +90,7 @@ fn convert_outbound_route( }| { let backends = backends .into_iter() - .map(convert_backend) + .map(|b| convert_backend(b, target.clone())) .collect::>(); let dist = if backends.is_empty() { outbound::grpc_route::distribution::Kind::FirstAvailable( @@ -158,7 +171,15 @@ fn convert_outbound_route( } } -fn convert_backend(backend: Backend) -> outbound::grpc_route::WeightedRouteBackend { +fn convert_backend( + backend: Backend, + target: OutboundDiscoverTarget, +) -> outbound::grpc_route::WeightedRouteBackend { + let original_dst_port = match target.kind { + Kind::EgressNetwork { original_dst, .. } => Some(original_dst.port()), + Kind::Service => None, + }; + match backend { Backend::Addr(addr) => { let socket_addr = SocketAddr::new(addr.addr, addr.port.get()); @@ -181,88 +202,214 @@ fn convert_backend(backend: Backend) -> outbound::grpc_route::WeightedRouteBacke }), } } - Backend::Service(svc) => { - if svc.exists { - let filters = svc.filters.into_iter().map(convert_to_filter).collect(); - outbound::grpc_route::WeightedRouteBackend { - weight: svc.weight, - backend: Some(outbound::grpc_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(meta::Metadata { - kind: Some(meta::metadata::Kind::Resource(meta::Resource { - group: "core".to_string(), - kind: "Service".to_string(), - name: svc.name, - namespace: svc.namespace, - section: Default::default(), - port: u16::from(svc.port).into(), - })), - }), - queue: Some(default_queue_config()), - kind: Some(outbound::backend::Kind::Balancer( - outbound::backend::BalanceP2c { - discovery: Some(outbound::backend::EndpointDiscovery { - kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( - outbound::backend::endpoint_discovery::DestinationGet { - path: svc.authority, - }, - )), - }), - load: Some(default_balancer_config()), - }, - )), - }), - filters, - ..Default::default() + Backend::Service(svc) if svc.exists => { + let filters = svc + .filters + .clone() + .into_iter() + .map(convert_to_filter) + .collect(); + outbound::grpc_route::WeightedRouteBackend { + weight: svc.weight, + backend: Some(outbound::grpc_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::service_meta(svc.clone())), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Balancer( + outbound::backend::BalanceP2c { + discovery: Some(outbound::backend::EndpointDiscovery { + kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( + outbound::backend::endpoint_discovery::DestinationGet { + path: svc.authority, + }, + )), + }), + load: Some(default_balancer_config()), + }, + )), }), - } - } else { - outbound::grpc_route::WeightedRouteBackend { - weight: svc.weight, - backend: Some(outbound::grpc_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(meta::Metadata { - kind: Some(meta::metadata::Kind::Default("invalid".to_string())), + filters, + ..Default::default() + }), + } + } + Backend::Service(svc) => invalid_backend( + svc.weight, + format!("Service not found {}", svc.name), + super::service_meta(svc), + ), + Backend::EgressNetwork(egress_net) if egress_net.exists => match target.kind { + Kind::EgressNetwork { original_dst, .. } => { + if target.name == egress_net.name && target.namespace == egress_net.namespace { + let filters = egress_net + .filters + .clone() + .into_iter() + .map(convert_to_filter) + .collect(); + + outbound::grpc_route::WeightedRouteBackend { + weight: egress_net.weight, + backend: Some(outbound::grpc_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::egress_net_meta( + egress_net.clone(), + original_dst_port, + )), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: egress_net.weight, + ..Default::default() + }, + )), }), - queue: Some(default_queue_config()), - kind: None, + filters, + ..Default::default() }), - filters: vec![outbound::grpc_route::Filter { - kind: Some(outbound::grpc_route::filter::Kind::FailureInjector( - grpc_route::GrpcFailureInjector { - code: 500, - message: format!("Service not found {}", svc.name), - ratio: None, - }, - )), - }], - ..Default::default() - }), + } + } else { + let weight = egress_net.weight; + let message = "Route with EgressNetwork backend needs to have the same EgressNetwork as a parent".to_string(); + invalid_backend( + weight, + message, + super::egress_net_meta(egress_net, original_dst_port), + ) } } - } - Backend::Invalid { weight, message } => outbound::grpc_route::WeightedRouteBackend { + Kind::Service => invalid_backend( + egress_net.weight, + "EgressNetwork backends attach to EgressNetwork parents only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), + }, + Backend::EgressNetwork(egress_net) => invalid_backend( + egress_net.weight, + format!("EgressNetwork not found {}", egress_net.name), + super::egress_net_meta(egress_net, original_dst_port), + ), + Backend::Invalid { weight, message } => invalid_backend( weight, - backend: Some(outbound::grpc_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(meta::Metadata { - kind: Some(meta::metadata::Kind::Default("invalid".to_string())), - }), - queue: Some(default_queue_config()), - kind: None, - }), - filters: vec![outbound::grpc_route::Filter { - kind: Some(outbound::grpc_route::filter::Kind::FailureInjector( - grpc_route::GrpcFailureInjector { - code: 500, - message, - ratio: None, - }, - )), - }], - ..Default::default() + message, + meta::Metadata { + kind: Some(meta::metadata::Kind::Default("invalid".to_string())), + }, + ), + } +} + +fn invalid_backend( + weight: u32, + message: String, + meta: meta::Metadata, +) -> outbound::grpc_route::WeightedRouteBackend { + outbound::grpc_route::WeightedRouteBackend { + weight, + backend: Some(outbound::grpc_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(meta), + queue: Some(default_queue_config()), + kind: None, }), - }, + filters: vec![outbound::grpc_route::Filter { + kind: Some(outbound::grpc_route::filter::Kind::FailureInjector( + grpc_route::GrpcFailureInjector { + code: 500, + message, + ratio: None, + }, + )), + }], + ..Default::default() + }), + } +} + +pub(crate) fn default_outbound_egress_route( + backend: outbound::Backend, + service_retry: Option>, + service_timeouts: RouteTimeouts, + traffic_policy: TrafficPolicy, +) -> outbound::GrpcRoute { + #![allow(deprecated)] + let (filters, name) = match traffic_policy { + TrafficPolicy::Allow => (Vec::default(), "grpc-egress-allow"), + TrafficPolicy::Deny => ( + vec![outbound::grpc_route::Filter { + kind: Some(outbound::grpc_route::filter::Kind::FailureInjector( + grpc_route::GrpcFailureInjector { + code: 7, + message: "traffic not allowed".to_string(), + ratio: None, + }, + )), + }], + "grpc-egress-deny", + ), + }; + + // This encoder sets deprecated timeouts for older proxies. + let metadata = Some(meta::Metadata { + kind: Some(meta::metadata::Kind::Default(name.to_string())), + }); + let rules = vec![outbound::grpc_route::Rule { + matches: vec![grpc_route::GrpcRouteMatch::default()], + backends: Some(outbound::grpc_route::Distribution { + kind: Some(outbound::grpc_route::distribution::Kind::FirstAvailable( + outbound::grpc_route::distribution::FirstAvailable { + backends: vec![outbound::grpc_route::RouteBackend { + backend: Some(backend), + ..Default::default() + }], + }, + )), + }), + request_timeout: service_timeouts + .request + .and_then(|d| convert_duration("request timeout", d)), + timeouts: Some(http_route::Timeouts { + request: service_timeouts + .request + .and_then(|d| convert_duration("stream timeout", d)), + idle: service_timeouts + .idle + .and_then(|d| convert_duration("idle timeout", d)), + response: service_timeouts + .response + .and_then(|d| convert_duration("response timeout", d)), + }), + retry: service_retry.map(|r| outbound::grpc_route::Retry { + max_retries: r.limit.into(), + max_request_bytes: 64 * 1024, + backoff: Some(outbound::ExponentialBackoff { + min_backoff: Some(time::Duration::from_millis(25).try_into().unwrap()), + max_backoff: Some(time::Duration::from_millis(250).try_into().unwrap()), + jitter_ratio: 1.0, + }), + conditions: Some(r.conditions.iter().flatten().fold( + outbound::grpc_route::retry::Conditions::default(), + |mut cond, c| { + match c { + GrpcRetryCondition::Cancelled => cond.cancelled = true, + GrpcRetryCondition::DeadlineExceeded => cond.deadine_exceeded = true, + GrpcRetryCondition::Internal => cond.internal = true, + GrpcRetryCondition::ResourceExhausted => cond.resource_exhausted = true, + GrpcRetryCondition::Unavailable => cond.unavailable = true, + }; + cond + }, + )), + timeout: r.timeout.and_then(|d| convert_duration("retry timeout", d)), + }), + filters, + ..Default::default() + }]; + outbound::GrpcRoute { + metadata, + rules, + ..Default::default() } } diff --git a/policy-controller/grpc/src/outbound/http.rs b/policy-controller/grpc/src/outbound/http.rs index 75410081e1e8e..c8d0969aa4cea 100644 --- a/policy-controller/grpc/src/outbound/http.rs +++ b/policy-controller/grpc/src/outbound/http.rs @@ -9,8 +9,8 @@ use crate::routes::{ use linkerd2_proxy_api::{destination, http_route, meta, outbound}; use linkerd_policy_controller_core::{ outbound::{ - Backend, Filter, HttpRetryCondition, HttpRoute, OutboundRouteRule, RouteRetry, - RouteTimeouts, + Backend, Filter, HttpRetryCondition, HttpRoute, Kind, OutboundDiscoverTarget, + OutboundRouteRule, RouteRetry, RouteTimeouts, TrafficPolicy, }, routes::GroupKindNamespaceName, }; @@ -23,8 +23,9 @@ pub(crate) fn protocol( service_retry: Option>, service_timeouts: RouteTimeouts, allow_l5d_request_headers: bool, + target: OutboundDiscoverTarget, ) -> outbound::proxy_protocol::Kind { - let opaque_route = default_outbound_opaq_route(default_backend.clone()); + let opaque_route = default_outbound_opaq_route(default_backend.clone(), target.kind); let mut routes = routes .map(|(gknn, route)| { convert_outbound_route( @@ -34,16 +35,31 @@ pub(crate) fn protocol( service_retry.clone(), service_timeouts.clone(), allow_l5d_request_headers, + target.clone(), ) }) .collect::>(); - if routes.is_empty() { - routes.push(default_outbound_route( - default_backend, - service_retry.clone(), - service_timeouts.clone(), - )); + + match target.kind { + Kind::Service => { + if routes.is_empty() { + routes.push(default_outbound_service_route( + default_backend, + service_retry.clone(), + service_timeouts.clone(), + )); + } + } + Kind::EgressNetwork { traffic_policy, .. } => { + routes.push(default_outbound_egress_route( + default_backend, + service_retry.clone(), + service_timeouts.clone(), + traffic_policy, + )); + } } + outbound::proxy_protocol::Kind::Detect(outbound::proxy_protocol::Detect { timeout: Some( time::Duration::from_secs(10) @@ -76,10 +92,10 @@ fn convert_outbound_route( service_retry: Option>, service_timeouts: RouteTimeouts, allow_l5d_request_headers: bool, + target: OutboundDiscoverTarget, ) -> outbound::HttpRoute { // This encoder sets deprecated timeouts for older proxies. #![allow(deprecated)] - let metadata = Some(meta::Metadata { kind: Some(meta::metadata::Kind::Resource(meta::Resource { group: gknn.group.to_string(), @@ -104,7 +120,7 @@ fn convert_outbound_route( }| { let backends = backends .into_iter() - .map(convert_backend) + .map(|b| convert_backend(b, target.clone())) .collect::>(); let dist = if backends.is_empty() { outbound::http_route::distribution::Kind::FirstAvailable( @@ -149,7 +165,15 @@ fn convert_outbound_route( } } -fn convert_backend(backend: Backend) -> outbound::http_route::WeightedRouteBackend { +fn convert_backend( + backend: Backend, + target: OutboundDiscoverTarget, +) -> outbound::http_route::WeightedRouteBackend { + let original_dst_port = match target.kind { + Kind::EgressNetwork { original_dst, .. } => Some(original_dst.port()), + Kind::Service => None, + }; + match backend { Backend::Addr(addr) => { let socket_addr = SocketAddr::new(addr.addr, addr.port.get()); @@ -172,92 +196,132 @@ fn convert_backend(backend: Backend) -> outbound::http_route::WeightedRouteBacke }), } } - Backend::Service(svc) => { - if svc.exists { - let filters = svc.filters.into_iter().map(convert_to_filter).collect(); - outbound::http_route::WeightedRouteBackend { - weight: svc.weight, - backend: Some(outbound::http_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(meta::Metadata { - kind: Some(meta::metadata::Kind::Resource(meta::Resource { - group: "core".to_string(), - kind: "Service".to_string(), - name: svc.name, - namespace: svc.namespace, - section: Default::default(), - port: u16::from(svc.port).into(), - })), - }), - queue: Some(default_queue_config()), - kind: Some(outbound::backend::Kind::Balancer( - outbound::backend::BalanceP2c { - discovery: Some(outbound::backend::EndpointDiscovery { - kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( - outbound::backend::endpoint_discovery::DestinationGet { - path: svc.authority, - }, - )), - }), - load: Some(default_balancer_config()), - }, - )), - }), - filters, - ..Default::default() + Backend::Service(svc) if svc.exists => { + let filters = svc + .filters + .clone() + .into_iter() + .map(convert_to_filter) + .collect(); + outbound::http_route::WeightedRouteBackend { + weight: svc.weight, + backend: Some(outbound::http_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::service_meta(svc.clone())), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Balancer( + outbound::backend::BalanceP2c { + discovery: Some(outbound::backend::EndpointDiscovery { + kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( + outbound::backend::endpoint_discovery::DestinationGet { + path: svc.authority, + }, + )), + }), + load: Some(default_balancer_config()), + }, + )), }), - } - } else { - outbound::http_route::WeightedRouteBackend { - weight: svc.weight, - backend: Some(outbound::http_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(meta::Metadata { - kind: Some(meta::metadata::Kind::Default("invalid".to_string())), + filters, + ..Default::default() + }), + } + } + Backend::Service(svc) => invalid_backend( + svc.weight, + format!("Service not found {}", svc.name), + super::service_meta(svc), + ), + Backend::EgressNetwork(egress_net) if egress_net.exists => match target.kind { + Kind::EgressNetwork { original_dst, .. } => { + if target.name == egress_net.name && target.namespace == egress_net.namespace { + let filters = egress_net + .filters + .clone() + .into_iter() + .map(convert_to_filter) + .collect(); + + outbound::http_route::WeightedRouteBackend { + weight: egress_net.weight, + backend: Some(outbound::http_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::egress_net_meta( + egress_net.clone(), + original_dst_port, + )), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: egress_net.weight, + ..Default::default() + }, + )), }), - queue: Some(default_queue_config()), - kind: None, + filters, + ..Default::default() }), - filters: vec![outbound::http_route::Filter { - kind: Some(outbound::http_route::filter::Kind::FailureInjector( - http_route::HttpFailureInjector { - status: 500, - message: format!("Service not found {}", svc.name), - ratio: None, - }, - )), - }], - ..Default::default() - }), + } + } else { + let weight = egress_net.weight; + let message = "Route with EgressNetwork backend needs to have the same EgressNetwork as a parent".to_string(); + invalid_backend( + weight, + message, + super::egress_net_meta(egress_net, original_dst_port), + ) } } - } - Backend::Invalid { weight, message } => outbound::http_route::WeightedRouteBackend { + Kind::Service => invalid_backend( + egress_net.weight, + "EgressNetwork backends attach to EgressNetwork parents only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), + }, + Backend::EgressNetwork(egress_net) => invalid_backend( + egress_net.weight, + format!("EgressNetwork not found {}", egress_net.name), + super::egress_net_meta(egress_net, original_dst_port), + ), + Backend::Invalid { weight, message } => invalid_backend( weight, - backend: Some(outbound::http_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(meta::Metadata { - kind: Some(meta::metadata::Kind::Default("invalid".to_string())), - }), - queue: Some(default_queue_config()), - kind: None, - }), - filters: vec![outbound::http_route::Filter { - kind: Some(outbound::http_route::filter::Kind::FailureInjector( - http_route::HttpFailureInjector { - status: 500, - message, - ratio: None, - }, - )), - }], - ..Default::default() + message, + meta::Metadata { + kind: Some(meta::metadata::Kind::Default("invalid".to_string())), + }, + ), + } +} + +fn invalid_backend( + weight: u32, + message: String, + meta: meta::Metadata, +) -> outbound::http_route::WeightedRouteBackend { + outbound::http_route::WeightedRouteBackend { + weight, + backend: Some(outbound::http_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(meta), + queue: Some(default_queue_config()), + kind: None, }), - }, + filters: vec![outbound::http_route::Filter { + kind: Some(outbound::http_route::filter::Kind::FailureInjector( + http_route::HttpFailureInjector { + status: 500, + message, + ratio: None, + }, + )), + }], + ..Default::default() + }), } } -pub(crate) fn default_outbound_route( +pub(crate) fn default_outbound_service_route( backend: outbound::Backend, service_retry: Option>, service_timeouts: RouteTimeouts, @@ -299,6 +363,65 @@ pub(crate) fn default_outbound_route( } } +pub(crate) fn default_outbound_egress_route( + backend: outbound::Backend, + service_retry: Option>, + service_timeouts: RouteTimeouts, + traffic_policy: TrafficPolicy, +) -> outbound::HttpRoute { + #![allow(deprecated)] + let (filters, name) = match traffic_policy { + TrafficPolicy::Allow => (Vec::default(), "http-egress-allow"), + TrafficPolicy::Deny => ( + vec![outbound::http_route::Filter { + kind: Some(outbound::http_route::filter::Kind::FailureInjector( + http_route::HttpFailureInjector { + status: 403, + message: "traffic not allowed".to_string(), + ratio: None, + }, + )), + }], + "http-egress-deny", + ), + }; + + // This encoder sets deprecated timeouts for older proxies. + let metadata = Some(meta::Metadata { + kind: Some(meta::metadata::Kind::Default(name.to_string())), + }); + let rules = vec![outbound::http_route::Rule { + matches: vec![http_route::HttpRouteMatch { + path: Some(http_route::PathMatch { + kind: Some(http_route::path_match::Kind::Prefix("/".to_string())), + }), + ..Default::default() + }], + backends: Some(outbound::http_route::Distribution { + kind: Some(outbound::http_route::distribution::Kind::FirstAvailable( + outbound::http_route::distribution::FirstAvailable { + backends: vec![outbound::http_route::RouteBackend { + backend: Some(backend), + ..Default::default() + }], + }, + )), + }), + retry: service_retry.map(convert_retry), + request_timeout: service_timeouts + .request + .and_then(|d| convert_duration("request timeout", d)), + timeouts: Some(convert_timeouts(service_timeouts)), + filters, + ..Default::default() + }]; + outbound::HttpRoute { + metadata, + rules, + ..Default::default() + } +} + fn convert_to_filter(filter: Filter) -> outbound::http_route::Filter { use outbound::http_route::filter::Kind; diff --git a/policy-controller/grpc/src/outbound/tcp.rs b/policy-controller/grpc/src/outbound/tcp.rs new file mode 100644 index 0000000000000..fff83202699b8 --- /dev/null +++ b/policy-controller/grpc/src/outbound/tcp.rs @@ -0,0 +1,247 @@ +use super::{default_balancer_config, default_queue_config}; +use linkerd2_proxy_api::{destination, meta, outbound}; +use linkerd_policy_controller_core::{ + outbound::{Backend, Kind, OutboundDiscoverTarget, TcpRoute, TrafficPolicy}, + routes::GroupKindNamespaceName, +}; +use std::net::SocketAddr; + +pub(crate) fn protocol( + default_backend: outbound::Backend, + routes: impl Iterator, + target: OutboundDiscoverTarget, +) -> outbound::proxy_protocol::Kind { + let mut routes = routes + .map(|(gknn, route)| { + convert_outbound_route(gknn, route, default_backend.clone(), target.clone()) + }) + .collect::>(); + + if let Kind::EgressNetwork { traffic_policy, .. } = target.kind { + routes.push(default_outbound_egress_route( + default_backend, + traffic_policy, + )); + } + + outbound::proxy_protocol::Kind::Opaque(outbound::proxy_protocol::Opaque { routes }) +} + +fn convert_outbound_route( + gknn: GroupKindNamespaceName, + TcpRoute { + rule, + creation_timestamp: _, + }: TcpRoute, + backend: outbound::Backend, + target: OutboundDiscoverTarget, +) -> outbound::OpaqueRoute { + // This encoder sets deprecated timeouts for older proxies. + #![allow(deprecated)] + + let metadata = Some(meta::Metadata { + kind: Some(meta::metadata::Kind::Resource(meta::Resource { + group: gknn.group.to_string(), + kind: gknn.kind.to_string(), + namespace: gknn.namespace.to_string(), + name: gknn.name.to_string(), + ..Default::default() + })), + }); + + let backends = rule + .backends + .into_iter() + .map(|b| convert_backend(b, target.clone())) + .collect::>(); + + let dist = if backends.is_empty() { + outbound::opaque_route::distribution::Kind::FirstAvailable( + outbound::opaque_route::distribution::FirstAvailable { + backends: vec![outbound::opaque_route::RouteBackend { + backend: Some(backend.clone()), + }], + }, + ) + } else { + outbound::opaque_route::distribution::Kind::RandomAvailable( + outbound::opaque_route::distribution::RandomAvailable { backends }, + ) + }; + + let rules = vec![outbound::opaque_route::Rule { + backends: Some(outbound::opaque_route::Distribution { kind: Some(dist) }), + }]; + + outbound::OpaqueRoute { + metadata, + rules, + error: None, + } +} + +fn convert_backend( + backend: Backend, + target: OutboundDiscoverTarget, +) -> outbound::opaque_route::WeightedRouteBackend { + let original_dst_port = match target.kind { + Kind::EgressNetwork { original_dst, .. } => Some(original_dst.port()), + Kind::Service => None, + }; + + match backend { + Backend::Addr(addr) => { + let socket_addr = SocketAddr::new(addr.addr, addr.port.get()); + outbound::opaque_route::WeightedRouteBackend { + weight: addr.weight, + backend: Some(outbound::opaque_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: None, + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(socket_addr.into()), + weight: addr.weight, + ..Default::default() + }, + )), + }), + }), + error: None, + } + } + Backend::Service(svc) if svc.exists => outbound::opaque_route::WeightedRouteBackend { + weight: svc.weight, + backend: Some(outbound::opaque_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::service_meta(svc.clone())), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Balancer( + outbound::backend::BalanceP2c { + discovery: Some(outbound::backend::EndpointDiscovery { + kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( + outbound::backend::endpoint_discovery::DestinationGet { + path: svc.authority, + }, + )), + }), + load: Some(default_balancer_config()), + }, + )), + }), + }), + error: None, + }, + Backend::Service(svc) => invalid_backend( + svc.weight, + format!("Service not found {}", svc.name), + super::service_meta(svc), + ), + Backend::EgressNetwork(egress_net) if egress_net.exists => match target.kind { + Kind::EgressNetwork { original_dst, .. } => { + if target.name == egress_net.name && target.namespace == egress_net.namespace { + outbound::opaque_route::WeightedRouteBackend { + weight: egress_net.weight, + backend: Some(outbound::opaque_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::egress_net_meta( + egress_net.clone(), + original_dst_port, + )), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: egress_net.weight, + ..Default::default() + }, + )), + }), + }), + error: None, + } + } else { + let weight = egress_net.weight; + let message = "Route with EgressNetwork backend needs to have the same EgressNetwork as a parent".to_string(); + invalid_backend( + weight, + message, + super::egress_net_meta(egress_net, original_dst_port), + ) + } + } + Kind::Service { .. } => invalid_backend( + egress_net.weight, + "EgressNetwork backends attach to EgressNetwork parents only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), + }, + Backend::EgressNetwork(egress_net) => invalid_backend( + egress_net.weight, + format!("EgressNetwork not found {}", egress_net.name), + super::egress_net_meta(egress_net, original_dst_port), + ), + Backend::Invalid { weight, message } => invalid_backend( + weight, + message, + meta::Metadata { + kind: Some(meta::metadata::Kind::Default("invalid".to_string())), + }, + ), + } +} + +fn invalid_backend( + weight: u32, + message: String, + meta: meta::Metadata, +) -> outbound::opaque_route::WeightedRouteBackend { + outbound::opaque_route::WeightedRouteBackend { + weight, + backend: Some(outbound::opaque_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(meta), + queue: Some(default_queue_config()), + kind: None, + }), + }), + error: Some(outbound::BackendError { message }), + } +} + +pub(crate) fn default_outbound_egress_route( + backend: outbound::Backend, + traffic_policy: TrafficPolicy, +) -> outbound::OpaqueRoute { + #![allow(deprecated)] + let (error, name) = match traffic_policy { + TrafficPolicy::Allow => (None, "tcp-egress-allow"), + TrafficPolicy::Deny => ( + Some(outbound::RouteError { + message: "traffic not allowed".to_string(), + }), + "tcp-egress-deny", + ), + }; + + // This encoder sets deprecated timeouts for older proxies. + let metadata = Some(meta::Metadata { + kind: Some(meta::metadata::Kind::Default(name.to_string())), + }); + let rules = vec![outbound::opaque_route::Rule { + backends: Some(outbound::opaque_route::Distribution { + kind: Some(outbound::opaque_route::distribution::Kind::FirstAvailable( + outbound::opaque_route::distribution::FirstAvailable { + backends: vec![outbound::opaque_route::RouteBackend { + backend: Some(backend), + }], + }, + )), + }), + }]; + outbound::OpaqueRoute { + metadata, + rules, + error, + } +} diff --git a/policy-controller/grpc/src/outbound/tls.rs b/policy-controller/grpc/src/outbound/tls.rs new file mode 100644 index 0000000000000..fbbbda15328c2 --- /dev/null +++ b/policy-controller/grpc/src/outbound/tls.rs @@ -0,0 +1,253 @@ +use super::{default_balancer_config, default_queue_config}; +use crate::routes::convert_sni_match; +use linkerd2_proxy_api::{destination, meta, outbound}; +use linkerd_policy_controller_core::{ + outbound::{Backend, Kind, OutboundDiscoverTarget, TlsRoute, TrafficPolicy}, + routes::GroupKindNamespaceName, +}; +use std::net::SocketAddr; + +pub(crate) fn protocol( + default_backend: outbound::Backend, + routes: impl Iterator, + target: OutboundDiscoverTarget, +) -> outbound::proxy_protocol::Kind { + let mut routes = routes + .map(|(gknn, route)| { + convert_outbound_route(gknn, route, default_backend.clone(), target.clone()) + }) + .collect::>(); + + if let Kind::EgressNetwork { traffic_policy, .. } = target.kind { + routes.push(default_outbound_egress_route( + default_backend, + traffic_policy, + )); + } + + outbound::proxy_protocol::Kind::Tls(outbound::proxy_protocol::Tls { routes }) +} + +fn convert_outbound_route( + gknn: GroupKindNamespaceName, + TlsRoute { + hostnames, + rule, + creation_timestamp: _, + }: TlsRoute, + backend: outbound::Backend, + target: OutboundDiscoverTarget, +) -> outbound::TlsRoute { + // This encoder sets deprecated timeouts for older proxies. + #![allow(deprecated)] + + let metadata = Some(meta::Metadata { + kind: Some(meta::metadata::Kind::Resource(meta::Resource { + group: gknn.group.to_string(), + kind: gknn.kind.to_string(), + namespace: gknn.namespace.to_string(), + name: gknn.name.to_string(), + ..Default::default() + })), + }); + + let snis = hostnames.into_iter().map(convert_sni_match).collect(); + + let backends = rule + .backends + .into_iter() + .map(|b| convert_backend(b, target.clone())) + .collect::>(); + + let dist = if backends.is_empty() { + outbound::tls_route::distribution::Kind::FirstAvailable( + outbound::tls_route::distribution::FirstAvailable { + backends: vec![outbound::tls_route::RouteBackend { + backend: Some(backend.clone()), + }], + }, + ) + } else { + outbound::tls_route::distribution::Kind::RandomAvailable( + outbound::tls_route::distribution::RandomAvailable { backends }, + ) + }; + + let rules = vec![outbound::tls_route::Rule { + backends: Some(outbound::tls_route::Distribution { kind: Some(dist) }), + }]; + + outbound::TlsRoute { + metadata, + snis, + rules, + error: None, + } +} + +fn convert_backend( + backend: Backend, + target: OutboundDiscoverTarget, +) -> outbound::tls_route::WeightedRouteBackend { + let original_dst_port = match target.kind { + Kind::EgressNetwork { original_dst, .. } => Some(original_dst.port()), + Kind::Service => None, + }; + + match backend { + Backend::Addr(addr) => { + let socket_addr = SocketAddr::new(addr.addr, addr.port.get()); + outbound::tls_route::WeightedRouteBackend { + weight: addr.weight, + backend: Some(outbound::tls_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: None, + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(socket_addr.into()), + weight: addr.weight, + ..Default::default() + }, + )), + }), + }), + error: None, + } + } + Backend::Service(svc) if svc.exists => outbound::tls_route::WeightedRouteBackend { + weight: svc.weight, + backend: Some(outbound::tls_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::service_meta(svc.clone())), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Balancer( + outbound::backend::BalanceP2c { + discovery: Some(outbound::backend::EndpointDiscovery { + kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( + outbound::backend::endpoint_discovery::DestinationGet { + path: svc.authority, + }, + )), + }), + load: Some(default_balancer_config()), + }, + )), + }), + }), + error: None, + }, + Backend::Service(svc) => invalid_backend( + svc.weight, + format!("Service not found {}", svc.name), + super::service_meta(svc), + ), + Backend::EgressNetwork(egress_net) if egress_net.exists => match target.kind { + Kind::EgressNetwork { original_dst, .. } => { + if target.name == egress_net.name && target.namespace == egress_net.namespace { + outbound::tls_route::WeightedRouteBackend { + weight: egress_net.weight, + backend: Some(outbound::tls_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::egress_net_meta( + egress_net.clone(), + original_dst_port, + )), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: egress_net.weight, + ..Default::default() + }, + )), + }), + }), + error: None, + } + } else { + let weight = egress_net.weight; + let message = "Route with EgressNetwork backend needs to have the same EgressNetwork as a parent".to_string(); + invalid_backend( + weight, + message, + super::egress_net_meta(egress_net, original_dst_port), + ) + } + } + Kind::Service { .. } => invalid_backend( + egress_net.weight, + "EgressNetwork backends attach to EgressNetwork parents only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), + }, + Backend::EgressNetwork(egress_net) => invalid_backend( + egress_net.weight, + format!("EgressNetwork not found {}", egress_net.name), + super::egress_net_meta(egress_net, original_dst_port), + ), + Backend::Invalid { weight, message } => invalid_backend( + weight, + message, + meta::Metadata { + kind: Some(meta::metadata::Kind::Default("invalid".to_string())), + }, + ), + } +} + +fn invalid_backend( + weight: u32, + message: String, + meta: meta::Metadata, +) -> outbound::tls_route::WeightedRouteBackend { + outbound::tls_route::WeightedRouteBackend { + weight, + backend: Some(outbound::tls_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(meta), + queue: Some(default_queue_config()), + kind: None, + }), + }), + error: Some(outbound::BackendError { message }), + } +} + +pub(crate) fn default_outbound_egress_route( + backend: outbound::Backend, + traffic_policy: TrafficPolicy, +) -> outbound::TlsRoute { + #![allow(deprecated)] + let (error, name) = match traffic_policy { + TrafficPolicy::Allow => (None, "tls-egress-allow"), + TrafficPolicy::Deny => ( + Some(outbound::RouteError { + message: "traffic not allowed".to_string(), + }), + "tls-egress-deny", + ), + }; + + // This encoder sets deprecated timeouts for older proxies. + let metadata = Some(meta::Metadata { + kind: Some(meta::metadata::Kind::Default(name.to_string())), + }); + let rules = vec![outbound::tls_route::Rule { + backends: Some(outbound::tls_route::Distribution { + kind: Some(outbound::tls_route::distribution::Kind::FirstAvailable( + outbound::tls_route::distribution::FirstAvailable { + backends: vec![outbound::tls_route::RouteBackend { + backend: Some(backend), + }], + }, + )), + }), + }]; + outbound::TlsRoute { + metadata, + rules, + error, + ..Default::default() + } +} diff --git a/policy-controller/grpc/src/routes.rs b/policy-controller/grpc/src/routes.rs index 86d679cacb305..b960c6cebb18b 100644 --- a/policy-controller/grpc/src/routes.rs +++ b/policy-controller/grpc/src/routes.rs @@ -1,4 +1,4 @@ -use linkerd2_proxy_api::{http_route as proto, http_types}; +use linkerd2_proxy_api::{http_route as proto, http_types, tls_route as tls_proto}; use linkerd_policy_controller_core::routes::{ HeaderModifierFilter, HostMatch, PathModifier, RequestRedirectFilter, }; @@ -19,6 +19,19 @@ pub(crate) fn convert_host_match(h: HostMatch) -> proto::HostMatch { } } +pub(crate) fn convert_sni_match(h: HostMatch) -> tls_proto::SniMatch { + tls_proto::SniMatch { + r#match: Some(match h { + HostMatch::Exact(host) => tls_proto::sni_match::Match::Exact(host), + HostMatch::Suffix { reverse_labels } => { + tls_proto::sni_match::Match::Suffix(tls_proto::sni_match::Suffix { + reverse_labels: reverse_labels.to_vec(), + }) + } + }), + } +} + pub(crate) fn convert_request_header_modifier_filter( HeaderModifierFilter { add, set, remove }: HeaderModifierFilter, ) -> proto::RequestHeaderModifier { diff --git a/policy-controller/k8s/api/src/policy/network.rs b/policy-controller/k8s/api/src/policy/network.rs index 7448664e50ecc..d0da7ea8be0e0 100644 --- a/policy-controller/k8s/api/src/policy/network.rs +++ b/policy-controller/k8s/api/src/policy/network.rs @@ -1,3 +1,5 @@ +use std::net::IpAddr; + use ipnet::IpNet; #[derive( @@ -19,6 +21,26 @@ impl Network { intersect && !cidr_is_exception } + + #[inline] + pub fn contains(&self, addr: IpAddr) -> bool { + let addr = Cidr::Addr(addr); + let addr_is_exception = self.except.iter().flatten().any(|ex| ex.contains(&addr)); + if addr_is_exception { + return false; + } + + self.cidr.contains(&addr) + } + + /// Returns the size of this Network. The size is the + /// cidr size - the sum of the exception sizes. We assume + /// that exceptions do not overlap. + #[inline] + pub fn block_size(&self) -> usize { + let except_size: usize = self.except.iter().flatten().map(|c| c.block_size()).sum(); + self.cidr.block_size() - except_size + } } #[derive( @@ -55,6 +77,17 @@ impl Cidr { Self::Net(IpNet::V6(_)) => true, } } + + /// Returns the size of this CIDR block. + /// + /// Returns `1` if this represents a single address. + #[inline] + pub fn block_size(&self) -> usize { + match self { + Cidr::Net(net) => net.hosts().count(), + Cidr::Addr(_) => 1, + } + } } impl std::str::FromStr for Cidr { diff --git a/policy-controller/k8s/index/Cargo.toml b/policy-controller/k8s/index/Cargo.toml index cf074fc3343d1..b123401611677 100644 --- a/policy-controller/k8s/index/Cargo.toml +++ b/policy-controller/k8s/index/Cargo.toml @@ -8,6 +8,7 @@ publish = false [dependencies] ahash = "0.8" anyhow = "1" +chrono = { version = "0.4.38", default_features = false } futures = { version = "0.3", default-features = false } http = "0.2" kube = { version = "0.87.1", default-features = false, features = [ diff --git a/policy-controller/k8s/index/src/inbound/index/grpc.rs b/policy-controller/k8s/index/src/inbound/index/grpc.rs index 7908816ac1a39..cd020a03563f4 100644 --- a/policy-controller/k8s/index/src/inbound/index/grpc.rs +++ b/policy-controller/k8s/index/src/inbound/index/grpc.rs @@ -19,7 +19,7 @@ impl TryFrom for RouteBinding { .hostnames .into_iter() .flatten() - .map(crate::routes::http::host_match) + .map(crate::routes::host_match) .collect(); let rules = route diff --git a/policy-controller/k8s/index/src/inbound/index/http.rs b/policy-controller/k8s/index/src/inbound/index/http.rs index fa52a1a699aa3..4b9fa2a82cd5d 100644 --- a/policy-controller/k8s/index/src/inbound/index/http.rs +++ b/policy-controller/k8s/index/src/inbound/index/http.rs @@ -20,7 +20,7 @@ impl TryFrom for RouteBinding { .hostnames .into_iter() .flatten() - .map(crate::routes::http::host_match) + .map(crate::routes::host_match) .collect(); let rules = route @@ -66,7 +66,7 @@ impl TryFrom for RouteBinding { .hostnames .into_iter() .flatten() - .map(crate::routes::http::host_match) + .map(crate::routes::host_match) .collect(); let rules = route diff --git a/policy-controller/k8s/index/src/outbound.rs b/policy-controller/k8s/index/src/outbound.rs index eeffd5a10e107..ff51cea36fefe 100644 --- a/policy-controller/k8s/index/src/outbound.rs +++ b/policy-controller/k8s/index/src/outbound.rs @@ -1,6 +1,6 @@ pub mod index; -pub use index::{metrics, Index, ServiceRef, SharedIndex}; +pub use index::{metrics, Index, ResourceRef, SharedIndex}; #[cfg(test)] mod tests; diff --git a/policy-controller/k8s/index/src/outbound/index.rs b/policy-controller/k8s/index/src/outbound/index.rs index e84e00122532c..2b632f4b446a8 100644 --- a/policy-controller/k8s/index/src/outbound/index.rs +++ b/policy-controller/k8s/index/src/outbound/index.rs @@ -5,36 +5,53 @@ use crate::{ }; use ahash::AHashMap as HashMap; use anyhow::{bail, ensure, Result}; +use egress_network::EgressNetwork; use linkerd_policy_controller_core::{ outbound::{ Backend, Backoff, FailureAccrual, GrpcRetryCondition, GrpcRoute, HttpRetryCondition, - HttpRoute, OutboundPolicy, RouteRetry, RouteSet, RouteTimeouts, + HttpRoute, Kind, OutboundDiscoverTarget, OutboundPolicy, RouteRetry, RouteSet, + RouteTimeouts, TcpRoute, TlsRoute, TrafficPolicy, }, routes::GroupKindNamespaceName, }; use linkerd_policy_controller_k8s_api::{ gateway::{self as k8s_gateway_api, ParentReference}, - policy as linkerd_k8s_api, ResourceExt, Service, + policy::{self as linkerd_k8s_api, Cidr}, + ResourceExt, Service, }; use parking_lot::RwLock; use std::{hash::Hash, net::IpAddr, num::NonZeroU16, sync::Arc, time}; use tokio::sync::watch; +#[allow(dead_code)] #[derive(Debug)] pub struct Index { namespaces: NamespaceIndex, - services_by_ip: HashMap, - service_info: HashMap, + services_by_ip: HashMap, + egress_networks_by_ref: HashMap, + // holds information about resources. currently EgressNetworks and Services + resource_info: HashMap, + cluster_networks: Vec, } +pub mod egress_network; pub mod grpc; pub mod http; pub mod metrics; +pub mod tcp; +pub(crate) mod tls; pub type SharedIndex = Arc>; #[derive(Debug, Clone, Hash, PartialEq, Eq)] -pub struct ServiceRef { +pub enum ResourceKind { + EgressNetwork, + Service, +} + +#[derive(Debug, Clone, Hash, PartialEq, Eq)] +pub struct ResourceRef { + pub kind: ResourceKind, pub name: String, pub namespace: String, } @@ -48,19 +65,23 @@ struct NamespaceIndex { #[derive(Debug)] struct Namespace { - /// Stores an observable handle for each known service:port, + /// Stores an observable handle for each known resource:port, /// as well as any route resources in the cluster that specify /// a port. - service_port_routes: HashMap, + resource_port_routes: HashMap, /// Stores the route resources (by service name) that do not - /// explicitly target a port. + /// explicitly target a port. These are only valid for Service + /// as EgressNetworks cannot be parents without an explicit + /// port declaration service_http_routes: HashMap>, service_grpc_routes: HashMap>, + service_tls_routes: HashMap>, + service_tcp_routes: HashMap>, namespace: Arc, } -#[derive(Debug, Default)] -struct ServiceInfo { +#[derive(Debug)] +struct ResourceInfo { opaque_ports: PortSet, accrual: Option, http_retry: Option>, @@ -69,17 +90,18 @@ struct ServiceInfo { } #[derive(Clone, Debug, PartialEq, Eq, Hash)] -struct ServicePort { - service: String, +struct ResourcePort { + kind: ResourceKind, + name: String, port: NonZeroU16, } #[derive(Debug)] -struct ServiceRoutes { +struct ResourceRoutes { namespace: Arc, name: String, port: NonZeroU16, - authority: String, + authority: Option, // present only on services watches_by_ns: HashMap, opaque: bool, accrual: Option, @@ -97,6 +119,8 @@ struct RoutesWatch { timeouts: RouteTimeouts, http_routes: RouteSet, grpc_routes: RouteSet, + tls_routes: RouteSet, + tcp_routes: RouteSet, watch: watch::Sender, } @@ -146,6 +170,36 @@ impl kubert::index::IndexNamespacedResource for Inde } } +impl kubert::index::IndexNamespacedResource for Index { + fn apply(&mut self, route: k8s_gateway_api::TlsRoute) { + self.apply_tls(route) + } + + fn delete(&mut self, namespace: String, name: String) { + let gknn = name + .gkn::() + .namespaced(namespace); + for ns_index in self.namespaces.by_ns.values_mut() { + ns_index.delete_tls_route(&gknn); + } + } +} + +impl kubert::index::IndexNamespacedResource for Index { + fn apply(&mut self, route: k8s_gateway_api::TcpRoute) { + self.apply_tcp(route) + } + + fn delete(&mut self, namespace: String, name: String) { + let gknn = name + .gkn::() + .namespaced(namespace); + for ns_index in self.namespaces.by_ns.values_mut() { + ns_index.delete_tcp_route(&gknn); + } + } +} + impl kubert::index::IndexNamespacedResource for Index { fn apply(&mut self, service: Service) { let name = service.name_unchecked(); @@ -180,7 +234,8 @@ impl kubert::index::IndexNamespacedResource for Index { } match cluster_ip.parse() { Ok(addr) => { - let service_ref = ServiceRef { + let service_ref = ResourceRef { + kind: ResourceKind::Service, name: name.clone(), namespace: ns.clone(), }; @@ -193,7 +248,7 @@ impl kubert::index::IndexNamespacedResource for Index { } } - let service_info = ServiceInfo { + let service_info = ResourceInfo { opaque_ports, accrual, http_retry, @@ -207,90 +262,206 @@ impl kubert::index::IndexNamespacedResource for Index { .or_insert_with(|| Namespace { service_http_routes: Default::default(), service_grpc_routes: Default::default(), - service_port_routes: Default::default(), + service_tls_routes: Default::default(), + service_tcp_routes: Default::default(), + resource_port_routes: Default::default(), namespace: Arc::new(ns), }) - .update_service(service.name_unchecked(), &service_info); + .update_resource(service.name_unchecked(), &service_info); - self.service_info.insert( - ServiceRef { + self.resource_info.insert( + ResourceRef { + kind: ResourceKind::Service, name: service.name_unchecked(), namespace: service.namespace().expect("Service must have Namespace"), }, service_info, ); - self.reindex_services() + self.reindex_resources() } fn delete(&mut self, namespace: String, name: String) { tracing::debug!(name, namespace, "deleting service"); - let service_ref = ServiceRef { name, namespace }; - self.service_info.remove(&service_ref); + let service_ref = ResourceRef { + kind: ResourceKind::Service, + name, + namespace, + }; + self.resource_info.remove(&service_ref); self.services_by_ip.retain(|_, v| *v != service_ref); - self.reindex_services() + self.reindex_resources() + } +} + +impl kubert::index::IndexNamespacedResource for Index { + fn apply(&mut self, egress_network: linkerd_k8s_api::EgressNetwork) { + let name = egress_network.name_unchecked(); + let ns = egress_network + .namespace() + .expect("EgressNetwork must have a namespace"); + tracing::debug!(name, ns, "indexing EgressNetwork"); + let accrual = parse_accrual_config(egress_network.annotations()) + .map_err(|error| tracing::error!(%error, service=name, namespace=ns, "failed to parse accrual config")) + .unwrap_or_default(); + let opaque_ports = ports_annotation( + egress_network.annotations(), + "config.linkerd.io/opaque-ports", + ) + .unwrap_or_else(|| self.namespaces.cluster_info.default_opaque_ports.clone()); + + let timeouts = parse_timeouts(egress_network.annotations()) + .map_err(|error| tracing::error!(%error, service=name, namespace=ns, "failed to parse timeouts")) + .unwrap_or_default(); + + let http_retry = http::parse_http_retry(egress_network.annotations()).map_err(|error| { + tracing::error!(%error, service=name, namespace=ns, "failed to parse http retry") + }).unwrap_or_default(); + let grpc_retry = grpc::parse_grpc_retry(egress_network.annotations()).map_err(|error| { + tracing::error!(%error, service=name, namespace=ns, "failed to parse grpc retry") + }).unwrap_or_default(); + + let egress_net_ref = ResourceRef { + kind: ResourceKind::EgressNetwork, + name: name.clone(), + namespace: ns.clone(), + }; + let egress_net = + EgressNetwork::from_resource(&egress_network, self.cluster_networks.clone()); + self.egress_networks_by_ref + .insert(egress_net_ref.clone(), egress_net); + + let egress_network_info = ResourceInfo { + opaque_ports, + accrual, + http_retry, + grpc_retry, + timeouts, + }; + + self.namespaces + .by_ns + .entry(ns.clone()) + .or_insert_with(|| Namespace { + service_http_routes: Default::default(), + service_grpc_routes: Default::default(), + service_tls_routes: Default::default(), + service_tcp_routes: Default::default(), + resource_port_routes: Default::default(), + namespace: Arc::new(ns), + }) + .update_resource(egress_network.name_unchecked(), &egress_network_info); + + self.resource_info + .insert(egress_net_ref, egress_network_info); + + self.reindex_resources() + } + + fn delete(&mut self, namespace: String, name: String) { + tracing::debug!(name, namespace, "deleting EgressNetwork"); + let egress_net_ref = ResourceRef { + kind: ResourceKind::EgressNetwork, + name, + namespace, + }; + self.egress_networks_by_ref.remove(&egress_net_ref); + + self.reindex_resources() } } impl Index { pub fn shared(cluster_info: Arc) -> SharedIndex { + let cluster_networks = cluster_info.networks.clone(); Arc::new(RwLock::new(Self { namespaces: NamespaceIndex { by_ns: HashMap::default(), cluster_info, }, services_by_ip: HashMap::default(), - service_info: HashMap::default(), + egress_networks_by_ref: HashMap::default(), + resource_info: HashMap::default(), + cluster_networks: cluster_networks.into_iter().map(Cidr::from).collect(), })) } pub fn outbound_policy_rx( &mut self, - service_name: String, - service_namespace: String, - service_port: NonZeroU16, - source_namespace: String, + target: OutboundDiscoverTarget, ) -> Result> { + let OutboundDiscoverTarget { + name, + namespace, + port, + source_namespace, + kind, + } = target; + + let kind = match kind { + Kind::EgressNetwork { .. } => ResourceKind::EgressNetwork, + Kind::Service { .. } => ResourceKind::Service, + }; + let ns = self .namespaces .by_ns - .entry(service_namespace.clone()) + .entry(namespace.clone()) .or_insert_with(|| Namespace { - namespace: Arc::new(service_namespace.to_string()), + namespace: Arc::new(namespace.to_string()), service_http_routes: Default::default(), service_grpc_routes: Default::default(), - service_port_routes: Default::default(), + service_tls_routes: Default::default(), + service_tcp_routes: Default::default(), + resource_port_routes: Default::default(), }); - let key = ServicePort { - service: service_name, - port: service_port, - }; + let key = ResourcePort { kind, name, port }; - tracing::debug!(?key, "subscribing to service port"); + tracing::debug!(?key, "subscribing to resource port"); let routes = - ns.service_routes_or_default(key, &self.namespaces.cluster_info, &self.service_info); + ns.resource_routes_or_default(key, &self.namespaces.cluster_info, &self.resource_info); let watch = routes.watch_for_ns_or_default(source_namespace); Ok(watch.watch.subscribe()) } - pub fn lookup_service(&self, addr: IpAddr) -> Option { - self.services_by_ip.get(&addr).cloned() + pub fn lookup_service(&self, addr: IpAddr) -> Option<(String, String)> { + self.services_by_ip + .get(&addr) + .cloned() + .map(|r| (r.namespace, r.name)) + } + + pub fn lookup_egress_network( + &self, + addr: IpAddr, + source_namespace: String, + ) -> Option<(String, String, TrafficPolicy)> { + egress_network::resolve_egress_network( + addr, + source_namespace, + self.egress_networks_by_ref.values(), + ) + .map(|(r, p)| (r.namespace, r.name, p)) } fn apply_http(&mut self, route: HttpRouteResource) { tracing::debug!(name = route.name(), "indexing httproute"); for parent_ref in route.inner().parent_refs.iter().flatten() { - if !is_parent_service(parent_ref) { + let parent_kind = if is_parent_service(parent_ref) { + ResourceKind::Service + } else if is_parent_egress_network(parent_ref) { + ResourceKind::EgressNetwork + } else { continue; - } + }; - if !route_accepted_by_service(route.status(), &parent_ref.name) { + if !route_accepted_by_parent(route.status(), &parent_ref.name) { continue; } @@ -306,13 +477,16 @@ impl Index { namespace: Arc::new(ns), service_http_routes: Default::default(), service_grpc_routes: Default::default(), - service_port_routes: Default::default(), + service_tls_routes: Default::default(), + service_tcp_routes: Default::default(), + resource_port_routes: Default::default(), }) .apply_http_route( route.clone(), parent_ref, + parent_kind, &self.namespaces.cluster_info, - &self.service_info, + &self.resource_info, ); } } @@ -321,11 +495,15 @@ impl Index { tracing::debug!(name = route.name_unchecked(), "indexing grpcroute"); for parent_ref in route.spec.inner.parent_refs.iter().flatten() { - if !is_parent_service(parent_ref) { + let parent_kind = if is_parent_service(parent_ref) { + ResourceKind::Service + } else if is_parent_egress_network(parent_ref) { + ResourceKind::EgressNetwork + } else { continue; - } + }; - if !route_accepted_by_service(route.status.as_ref().map(|s| &s.inner), &parent_ref.name) + if !route_accepted_by_parent(route.status.as_ref().map(|s| &s.inner), &parent_ref.name) { continue; } @@ -342,20 +520,109 @@ impl Index { namespace: Arc::new(ns), service_http_routes: Default::default(), service_grpc_routes: Default::default(), - service_port_routes: Default::default(), + service_tls_routes: Default::default(), + service_tcp_routes: Default::default(), + resource_port_routes: Default::default(), }) .apply_grpc_route( route.clone(), parent_ref, + parent_kind, + &self.namespaces.cluster_info, + &self.resource_info, + ); + } + } + + fn apply_tls(&mut self, route: k8s_gateway_api::TlsRoute) { + tracing::debug!(name = route.name_unchecked(), "indexing tlsroute"); + + for parent_ref in route.spec.inner.parent_refs.iter().flatten() { + let parent_kind = if is_parent_service(parent_ref) { + ResourceKind::Service + } else if is_parent_egress_network(parent_ref) { + ResourceKind::EgressNetwork + } else { + continue; + }; + + if !route_accepted_by_parent(route.status.as_ref().map(|s| &s.inner), &parent_ref.name) + { + continue; + } + + let ns = parent_ref + .namespace + .clone() + .unwrap_or_else(|| route.namespace().expect("TlsRoute must have a namespace")); + + self.namespaces + .by_ns + .entry(ns.clone()) + .or_insert_with(|| Namespace { + namespace: Arc::new(ns), + service_http_routes: Default::default(), + service_grpc_routes: Default::default(), + service_tls_routes: Default::default(), + service_tcp_routes: Default::default(), + resource_port_routes: Default::default(), + }) + .apply_tls_route( + route.clone(), + parent_ref, + parent_kind, + &self.namespaces.cluster_info, + &self.resource_info, + ); + } + } + + fn apply_tcp(&mut self, route: k8s_gateway_api::TcpRoute) { + tracing::debug!(name = route.name_unchecked(), "indexing tcproute"); + + for parent_ref in route.spec.inner.parent_refs.iter().flatten() { + let parent_kind = if is_parent_service(parent_ref) { + ResourceKind::Service + } else if is_parent_egress_network(parent_ref) { + ResourceKind::EgressNetwork + } else { + continue; + }; + + if !route_accepted_by_parent(route.status.as_ref().map(|s| &s.inner), &parent_ref.name) + { + continue; + } + + let ns = parent_ref + .namespace + .clone() + .unwrap_or_else(|| route.namespace().expect("TcpRoute must have a namespace")); + + self.namespaces + .by_ns + .entry(ns.clone()) + .or_insert_with(|| Namespace { + namespace: Arc::new(ns), + service_http_routes: Default::default(), + service_grpc_routes: Default::default(), + service_tls_routes: Default::default(), + service_tcp_routes: Default::default(), + resource_port_routes: Default::default(), + }) + .apply_tcp_route( + route.clone(), + parent_ref, + parent_kind, &self.namespaces.cluster_info, - &self.service_info, + &self.resource_info, ); } } - fn reindex_services(&mut self) { + fn reindex_resources(&mut self) { for ns in self.namespaces.by_ns.values_mut() { - ns.reindex_services(&self.service_info); + ns.reindex_resources(&self.resource_info); } } } @@ -365,53 +632,59 @@ impl Namespace { &mut self, route: HttpRouteResource, parent_ref: &ParentReference, + parent_kind: ResourceKind, cluster_info: &ClusterInfo, - service_info: &HashMap, + resource_info: &HashMap, ) { tracing::debug!(?route); - let outbound_route = - match http::convert_route(&self.namespace, route.clone(), cluster_info, service_info) { - Ok(route) => route, - Err(error) => { - tracing::error!(%error, "failed to convert route"); - return; - } - }; + let outbound_route = match http::convert_route( + &self.namespace, + route.clone(), + cluster_info, + resource_info, + ) { + Ok(route) => route, + Err(error) => { + tracing::error!(%error, "failed to convert route"); + return; + } + }; tracing::debug!(?outbound_route); let port = parent_ref.port.and_then(NonZeroU16::new); if let Some(port) = port { - let service_port = ServicePort { + let resource_port = ResourcePort { + kind: parent_kind, port, - service: parent_ref.name.clone(), + name: parent_ref.name.clone(), }; tracing::debug!( - ?service_port, + ?resource_port, route = route.name(), - "inserting httproute for service" + "inserting httproute for resource" ); let service_routes = - self.service_routes_or_default(service_port, cluster_info, service_info); + self.resource_routes_or_default(resource_port, cluster_info, resource_info); service_routes.apply_http_route(route.gknn(), outbound_route); } else { // If the parent_ref doesn't include a port, apply this route - // to all ServiceRoutes which match the Service name. - self.service_port_routes.iter_mut().for_each( - |(ServicePort { service, port: _ }, routes)| { - if service == &parent_ref.name { + // to all ResourceRoutes which match the resource name. + self.resource_port_routes.iter_mut().for_each( + |(ResourcePort { name, port: _, .. }, routes)| { + if name == &parent_ref.name { routes.apply_http_route(route.gknn(), outbound_route.clone()); } }, ); // Also add the route to the list of routes that target the - // Service without specifying a port. + // resource without specifying a port. self.service_http_routes .entry(parent_ref.name.clone()) .or_default() @@ -423,13 +696,79 @@ impl Namespace { &mut self, route: k8s_gateway_api::GrpcRoute, parent_ref: &ParentReference, + parent_kind: ResourceKind, cluster_info: &ClusterInfo, - service_info: &HashMap, + resource_info: &HashMap, ) { tracing::debug!(?route); + let outbound_route = match grpc::convert_route( + &self.namespace, + route.clone(), + cluster_info, + resource_info, + ) { + Ok(route) => route, + Err(error) => { + tracing::error!(%error, "failed to convert route"); + return; + } + }; + + tracing::debug!(?outbound_route); + + let gknn = route + .gkn() + .namespaced(route.namespace().expect("Route must have namespace")); + + let port = parent_ref.port.and_then(NonZeroU16::new); + + if let Some(port) = port { + let port = ResourcePort { + kind: parent_kind, + port, + name: parent_ref.name.clone(), + }; + + tracing::debug!( + ?port, + route = route.name_unchecked(), + "inserting grpcroute for resource" + ); + + let service_routes = self.resource_routes_or_default(port, cluster_info, resource_info); + service_routes.apply_grpc_route(gknn, outbound_route); + } else { + // If the parent_ref doesn't include a port, apply this route + // to all ResourceRoutes which match the resource name. + self.resource_port_routes.iter_mut().for_each( + |(ResourcePort { name, port: _, .. }, routes)| { + if name == &parent_ref.name { + routes.apply_grpc_route(gknn.clone(), outbound_route.clone()); + } + }, + ); + + // Also add the route to the list of routes that target the + // resource without specifying a port. + self.service_grpc_routes + .entry(parent_ref.name.clone()) + .or_default() + .insert(gknn, outbound_route); + } + } + + fn apply_tls_route( + &mut self, + route: k8s_gateway_api::TlsRoute, + parent_ref: &ParentReference, + parent_kind: ResourceKind, + cluster_info: &ClusterInfo, + resource_info: &HashMap, + ) { + tracing::debug!(?route); let outbound_route = - match grpc::convert_route(&self.namespace, route.clone(), cluster_info, service_info) { + match tls::convert_route(&self.namespace, route.clone(), cluster_info, resource_info) { Ok(route) => route, Err(error) => { tracing::error!(%error, "failed to convert route"); @@ -446,53 +785,130 @@ impl Namespace { let port = parent_ref.port.and_then(NonZeroU16::new); if let Some(port) = port { - let service_port = ServicePort { + let port = ResourcePort { + kind: parent_kind, port, - service: parent_ref.name.clone(), + name: parent_ref.name.clone(), }; tracing::debug!( - ?service_port, + ?port, route = route.name_unchecked(), - "inserting grpcroute for service" + "inserting tlsroute for resource" ); - let service_routes = - self.service_routes_or_default(service_port, cluster_info, service_info); + let resource_routes = + self.resource_routes_or_default(port, cluster_info, resource_info); - service_routes.apply_grpc_route(gknn, outbound_route); + resource_routes.apply_tls_route(gknn, outbound_route); } else { // If the parent_ref doesn't include a port, apply this route - // to all ServiceRoutes which match the Service name. - self.service_port_routes.iter_mut().for_each( - |(ServicePort { service, port: _ }, routes)| { - if service == &parent_ref.name { - routes.apply_grpc_route(gknn.clone(), outbound_route.clone()); + // to all ResourceRoutes which match the resource name. + self.resource_port_routes.iter_mut().for_each( + |(ResourcePort { name, port: _, .. }, routes)| { + if name == &parent_ref.name { + routes.apply_tls_route(gknn.clone(), outbound_route.clone()); } }, ); // Also add the route to the list of routes that target the - // Service without specifying a port. - self.service_grpc_routes + // resource without specifying a port. + self.service_tls_routes .entry(parent_ref.name.clone()) .or_default() .insert(gknn, outbound_route); } } - fn reindex_services(&mut self, service_info: &HashMap) { - let update_service = |backend: &mut Backend| { - if let Backend::Service(svc) = backend { - let service_ref = ServiceRef { - name: svc.name.clone(), - namespace: svc.namespace.clone(), - }; - svc.exists = service_info.contains_key(&service_ref); - } + fn apply_tcp_route( + &mut self, + route: k8s_gateway_api::TcpRoute, + parent_ref: &ParentReference, + parent_kind: ResourceKind, + cluster_info: &ClusterInfo, + resource_info: &HashMap, + ) { + tracing::debug!(?route); + let outbound_route = + match tcp::convert_route(&self.namespace, route.clone(), cluster_info, resource_info) { + Ok(route) => route, + Err(error) => { + tracing::error!(%error, "failed to convert route"); + return; + } + }; + + tracing::debug!(?outbound_route); + + let gknn = route + .gkn() + .namespaced(route.namespace().expect("Route must have namespace")); + + let port = parent_ref.port.and_then(NonZeroU16::new); + + if let Some(port) = port { + let port = ResourcePort { + kind: parent_kind, + port, + name: parent_ref.name.clone(), + }; + + tracing::debug!( + ?port, + route = route.name_unchecked(), + "inserting tcproute for resource" + ); + + let resource_routes = + self.resource_routes_or_default(port, cluster_info, resource_info); + + resource_routes.apply_tcp_route(gknn, outbound_route); + } else { + // If the parent_ref doesn't include a port, apply this route + // to all ResourceRoutes which match the resource name. + self.resource_port_routes.iter_mut().for_each( + |(ResourcePort { name, port: _, .. }, routes)| { + if name == &parent_ref.name { + routes.apply_tcp_route(gknn.clone(), outbound_route.clone()); + } + }, + ); + + // Also add the route to the list of routes that target the + // resource without specifying a port. + self.service_tcp_routes + .entry(parent_ref.name.clone()) + .or_default() + .insert(gknn, outbound_route); + } + } + + fn reindex_resources(&mut self, resource_info: &HashMap) { + let update_backend = |backend: &mut Backend| { + match backend { + Backend::Service(svc) => { + let service_ref = ResourceRef { + kind: ResourceKind::Service, + name: svc.name.clone(), + namespace: svc.namespace.clone(), + }; + svc.exists = resource_info.contains_key(&service_ref); + } + Backend::EgressNetwork(egress_net) => { + let egress_net_ref = ResourceRef { + kind: ResourceKind::EgressNetwork, + name: egress_net.name.clone(), + namespace: egress_net.namespace.clone(), + }; + egress_net.exists = resource_info.contains_key(&egress_net_ref); + } + + _ => {} + }; }; - for routes in self.service_port_routes.values_mut() { + for routes in self.resource_port_routes.values_mut() { for watch in routes.watches_by_ns.values_mut() { let http_backends = watch .http_routes @@ -504,36 +920,49 @@ impl Namespace { .values_mut() .flat_map(|route| route.rules.iter_mut()) .flat_map(|rule| rule.backends.iter_mut()); + let tls_backends = watch + .tls_routes + .values_mut() + .flat_map(|route| route.rule.backends.iter_mut()); + let tcp_backends = watch + .tcp_routes + .values_mut() + .flat_map(|route| route.rule.backends.iter_mut()); + + http_backends + .chain(grpc_backends) + .chain(tls_backends) + .chain(tcp_backends) + .for_each(update_backend); - http_backends.chain(grpc_backends).for_each(update_service); watch.send_if_modified(); } } } - fn update_service(&mut self, name: String, service: &ServiceInfo) { - tracing::debug!(?name, ?service, "updating service"); + fn update_resource(&mut self, name: String, resource: &ResourceInfo) { + tracing::debug!(?name, ?resource, "updating resource"); - for (svc_port, svc_routes) in self.service_port_routes.iter_mut() { - if svc_port.service != name { + for (resource_port, resource_routes) in self.resource_port_routes.iter_mut() { + if resource_port.name != name { continue; } - let opaque = service.opaque_ports.contains(&svc_port.port); + let opaque = resource.opaque_ports.contains(&resource_port.port); - svc_routes.update_service( + resource_routes.update_resource( opaque, - service.accrual, - service.http_retry.clone(), - service.grpc_retry.clone(), - service.timeouts.clone(), + resource.accrual, + resource.http_retry.clone(), + resource.grpc_retry.clone(), + resource.timeouts.clone(), ); } } fn delete_http_route(&mut self, gknn: &GroupKindNamespaceName) { - for service in self.service_port_routes.values_mut() { - service.delete_http_route(gknn); + for resource in self.resource_port_routes.values_mut() { + resource.delete_http_route(gknn); } self.service_http_routes.retain(|_, routes| { @@ -543,8 +972,8 @@ impl Namespace { } fn delete_grpc_route(&mut self, gknn: &GroupKindNamespaceName) { - for service in self.service_port_routes.values_mut() { - service.delete_grpc_route(gknn); + for resource in self.resource_port_routes.values_mut() { + resource.delete_grpc_route(gknn); } self.service_grpc_routes.retain(|_, routes| { @@ -553,21 +982,48 @@ impl Namespace { }); } - fn service_routes_or_default( + fn delete_tls_route(&mut self, gknn: &GroupKindNamespaceName) { + for resource in self.resource_port_routes.values_mut() { + resource.delete_tls_route(gknn); + } + + self.service_tls_routes.retain(|_, routes| { + routes.remove(gknn); + !routes.is_empty() + }); + } + + fn delete_tcp_route(&mut self, gknn: &GroupKindNamespaceName) { + for resource in self.resource_port_routes.values_mut() { + resource.delete_tcp_route(gknn); + } + + self.service_tcp_routes.retain(|_, routes| { + routes.remove(gknn); + !routes.is_empty() + }); + } + + fn resource_routes_or_default( &mut self, - sp: ServicePort, + rp: ResourcePort, cluster: &ClusterInfo, - service_info: &HashMap, - ) -> &mut ServiceRoutes { - self.service_port_routes - .entry(sp.clone()) + resource_info: &HashMap, + ) -> &mut ResourceRoutes { + self.resource_port_routes + .entry(rp.clone()) .or_insert_with(|| { - let authority = - cluster.service_dns_authority(&self.namespace, &sp.service, sp.port); - - let service_ref = ServiceRef { - name: sp.service.clone(), + let resource_ref = ResourceRef { + name: rp.name.clone(), namespace: self.namespace.to_string(), + kind: rp.kind.clone(), + }; + + let authority = match rp.kind { + ResourceKind::EgressNetwork => None, + ResourceKind::Service => { + Some(cluster.service_dns_authority(&self.namespace, &rp.name, rp.port)) + } }; let mut opaque = false; @@ -575,36 +1031,46 @@ impl Namespace { let mut http_retry = None; let mut grpc_retry = None; let mut timeouts = Default::default(); - if let Some(svc) = service_info.get(&service_ref) { - opaque = svc.opaque_ports.contains(&sp.port); - accrual = svc.accrual; - http_retry = svc.http_retry.clone(); - grpc_retry = svc.grpc_retry.clone(); - timeouts = svc.timeouts.clone(); + if let Some(resource) = resource_info.get(&resource_ref) { + opaque = resource.opaque_ports.contains(&rp.port); + accrual = resource.accrual; + http_retry = resource.http_retry.clone(); + grpc_retry = resource.grpc_retry.clone(); + timeouts = resource.timeouts.clone(); } - // The routes which target this Service but don't specify + // The routes which target this Resource but don't specify // a port apply to all ports. Therefore, we include them. let http_routes = self .service_http_routes - .get(&sp.service) + .get(&rp.name) .cloned() .unwrap_or_default(); let grpc_routes = self .service_grpc_routes - .get(&sp.service) + .get(&rp.name) + .cloned() + .unwrap_or_default(); + let tls_routes = self + .service_tls_routes + .get(&rp.name) + .cloned() + .unwrap_or_default(); + let tcp_routes = self + .service_tcp_routes + .get(&rp.name) .cloned() .unwrap_or_default(); - let mut service_routes = ServiceRoutes { + let mut resource_routes = ResourceRoutes { opaque, accrual, http_retry, grpc_retry, timeouts, authority, - port: sp.port, - name: sp.service, + port: rp.port, + name: rp.name, namespace: self.namespace.clone(), watches_by_ns: Default::default(), }; @@ -618,34 +1084,57 @@ impl Namespace { let (producer_grpc_routes, consumer_grpc_routes): (Vec<_>, Vec<_>) = grpc_routes .into_iter() .partition(|(gknn, _)| gknn.namespace == *self.namespace); + let (producer_tls_routes, consumer_tls_routes): (Vec<_>, Vec<_>) = tls_routes + .into_iter() + .partition(|(gknn, _)| gknn.namespace == *self.namespace); + let (producer_tcp_routes, consumer_tcp_routes): (Vec<_>, Vec<_>) = tcp_routes + .into_iter() + .partition(|(gknn, _)| gknn.namespace == *self.namespace); for (consumer_gknn, consumer_route) in consumer_http_routes { // Consumer routes should only apply to watches from the // consumer namespace. - let consumer_watch = - service_routes.watch_for_ns_or_default(consumer_gknn.namespace.to_string()); + let consumer_watch = resource_routes + .watch_for_ns_or_default(consumer_gknn.namespace.to_string()); consumer_watch.insert_http_route(consumer_gknn.clone(), consumer_route.clone()); } for (consumer_gknn, consumer_route) in consumer_grpc_routes { // Consumer routes should only apply to watches from the // consumer namespace. - let consumer_watch = - service_routes.watch_for_ns_or_default(consumer_gknn.namespace.to_string()); + let consumer_watch = resource_routes + .watch_for_ns_or_default(consumer_gknn.namespace.to_string()); consumer_watch.insert_grpc_route(consumer_gknn.clone(), consumer_route.clone()); } + for (consumer_gknn, consumer_route) in consumer_tls_routes { + // Consumer routes should only apply to watches from the + // consumer namespace. + let consumer_watch = resource_routes + .watch_for_ns_or_default(consumer_gknn.namespace.to_string()); + + consumer_watch.insert_tls_route(consumer_gknn.clone(), consumer_route.clone()); + } + + for (consumer_gknn, consumer_route) in consumer_tcp_routes { + // Consumer routes should only apply to watches from the + // consumer namespace. + let consumer_watch = resource_routes + .watch_for_ns_or_default(consumer_gknn.namespace.to_string()); + + consumer_watch.insert_tcp_route(consumer_gknn.clone(), consumer_route.clone()); + } for (producer_gknn, producer_route) in producer_http_routes { // Insert the route into the producer namespace. - let producer_watch = - service_routes.watch_for_ns_or_default(producer_gknn.namespace.to_string()); + let producer_watch = resource_routes + .watch_for_ns_or_default(producer_gknn.namespace.to_string()); producer_watch.insert_http_route(producer_gknn.clone(), producer_route.clone()); // Producer routes apply to clients in all namespaces, so // apply it to watches for all other namespaces too. - service_routes + resource_routes .watches_by_ns .iter_mut() .filter(|(namespace, _)| { @@ -658,14 +1147,14 @@ impl Namespace { for (producer_gknn, producer_route) in producer_grpc_routes { // Insert the route into the producer namespace. - let producer_watch = - service_routes.watch_for_ns_or_default(producer_gknn.namespace.to_string()); + let producer_watch = resource_routes + .watch_for_ns_or_default(producer_gknn.namespace.to_string()); producer_watch.insert_grpc_route(producer_gknn.clone(), producer_route.clone()); // Producer routes apply to clients in all namespaces, so // apply it to watches for all other namespaces too. - service_routes + resource_routes .watches_by_ns .iter_mut() .filter(|(namespace, _)| { @@ -676,7 +1165,47 @@ impl Namespace { }); } - service_routes + for (producer_gknn, producer_route) in producer_tls_routes { + // Insert the route into the producer namespace. + let producer_watch = resource_routes + .watch_for_ns_or_default(producer_gknn.namespace.to_string()); + + producer_watch.insert_tls_route(producer_gknn.clone(), producer_route.clone()); + + // Producer routes apply to clients in all namespaces, so + // apply it to watches for all other namespaces too. + resource_routes + .watches_by_ns + .iter_mut() + .filter(|(namespace, _)| { + namespace.as_str() != producer_gknn.namespace.as_ref() + }) + .for_each(|(_, watch)| { + watch.insert_tls_route(producer_gknn.clone(), producer_route.clone()) + }); + } + + for (producer_gknn, producer_route) in producer_tcp_routes { + // Insert the route into the producer namespace. + let producer_watch = resource_routes + .watch_for_ns_or_default(producer_gknn.namespace.to_string()); + + producer_watch.insert_tcp_route(producer_gknn.clone(), producer_route.clone()); + + // Producer routes apply to clients in all namespaces, so + // apply it to watches for all other namespaces too. + resource_routes + .watches_by_ns + .iter_mut() + .filter(|(namespace, _)| { + namespace.as_str() != producer_gknn.namespace.as_ref() + }) + .for_each(|(_, watch)| { + watch.insert_tcp_route(producer_gknn.clone(), producer_route.clone()) + }); + } + + resource_routes }) } } @@ -725,7 +1254,7 @@ pub fn is_parent_service_or_egress_network(parent: &ParentReference) -> bool { } #[inline] -fn route_accepted_by_service( +fn route_accepted_by_parent( route_status: Option<&k8s_gateway_api::RouteStatus>, service: &str, ) -> bool { @@ -743,7 +1272,7 @@ fn route_accepted_by_service( }) } -impl ServiceRoutes { +impl ResourceRoutes { fn watch_for_ns_or_default(&mut self, namespace: String) -> &mut RoutesWatch { // The routes from the producer namespace apply to watches in all // namespaces, so we copy them. @@ -758,6 +1287,18 @@ impl ServiceRoutes { .map(|watch| watch.grpc_routes.clone()) .unwrap_or_default(); + let tls_routes = self + .watches_by_ns + .get(self.namespace.as_ref()) + .map(|watch| watch.tls_routes.clone()) + .unwrap_or_default(); + + let tcp_routes = self + .watches_by_ns + .get(self.namespace.as_ref()) + .map(|watch| watch.tcp_routes.clone()) + .unwrap_or_default(); + self.watches_by_ns.entry(namespace).or_insert_with(|| { let (sender, _) = watch::channel(OutboundPolicy { port: self.port, @@ -768,14 +1309,18 @@ impl ServiceRoutes { timeouts: self.timeouts.clone(), http_routes: http_routes.clone(), grpc_routes: grpc_routes.clone(), + tls_routes: tls_routes.clone(), + tcp_routes: tcp_routes.clone(), name: self.name.to_string(), - authority: self.authority.clone(), + service_authority: self.authority.clone().unwrap_or("".into()), namespace: self.namespace.to_string(), }); RoutesWatch { http_routes, grpc_routes, + tls_routes, + tcp_routes, watch: sender, opaque: self.opaque, accrual: self.accrual, @@ -830,7 +1375,51 @@ impl ServiceRoutes { } } - fn update_service( + fn apply_tls_route(&mut self, gknn: GroupKindNamespaceName, route: TlsRoute) { + if *gknn.namespace == *self.namespace { + // This is a producer namespace route. + let watch = self.watch_for_ns_or_default(gknn.namespace.to_string()); + + watch.insert_tls_route(gknn.clone(), route.clone()); + + // Producer routes apply to clients in all namespaces, so + // apply it to watches for all other namespaces too. + for (ns, ns_watch) in self.watches_by_ns.iter_mut() { + if ns != &gknn.namespace { + ns_watch.insert_tls_route(gknn.clone(), route.clone()); + } + } + } else { + // This is a consumer namespace route and should only apply to + // watches from that namespace. + let watch = self.watch_for_ns_or_default(gknn.namespace.to_string()); + watch.insert_tls_route(gknn, route); + } + } + + fn apply_tcp_route(&mut self, gknn: GroupKindNamespaceName, route: TcpRoute) { + if *gknn.namespace == *self.namespace { + // This is a producer namespace route. + let watch = self.watch_for_ns_or_default(gknn.namespace.to_string()); + + watch.insert_tcp_route(gknn.clone(), route.clone()); + + // Producer routes apply to clients in all namespaces, so + // apply it to watches for all other namespaces too. + for (ns, ns_watch) in self.watches_by_ns.iter_mut() { + if ns != &gknn.namespace { + ns_watch.insert_tcp_route(gknn.clone(), route.clone()); + } + } + } else { + // This is a consumer namespace route and should only apply to + // watches from that namespace. + let watch = self.watch_for_ns_or_default(gknn.namespace.to_string()); + watch.insert_tcp_route(gknn, route); + } + } + + fn update_resource( &mut self, opaque: bool, accrual: Option, @@ -864,6 +1453,18 @@ impl ServiceRoutes { watch.remove_grpc_route(gknn); } } + + fn delete_tls_route(&mut self, gknn: &GroupKindNamespaceName) { + for watch in self.watches_by_ns.values_mut() { + watch.remove_tls_route(gknn); + } + } + + fn delete_tcp_route(&mut self, gknn: &GroupKindNamespaceName) { + for watch in self.watches_by_ns.values_mut() { + watch.remove_tcp_route(gknn); + } + } } impl RoutesWatch { @@ -881,6 +1482,16 @@ impl RoutesWatch { modified = true; } + if self.tls_routes != policy.tls_routes { + policy.tls_routes = self.tls_routes.clone(); + modified = true; + } + + if self.tcp_routes != policy.tcp_routes { + policy.tcp_routes = self.tcp_routes.clone(); + modified = true; + } + if self.opaque != policy.opaque { policy.opaque = self.opaque; modified = true; @@ -922,6 +1533,18 @@ impl RoutesWatch { self.send_if_modified(); } + fn insert_tls_route(&mut self, gknn: GroupKindNamespaceName, route: TlsRoute) { + self.tls_routes.insert(gknn, route); + + self.send_if_modified(); + } + + fn insert_tcp_route(&mut self, gknn: GroupKindNamespaceName, route: TcpRoute) { + self.tcp_routes.insert(gknn, route); + + self.send_if_modified(); + } + fn remove_http_route(&mut self, gknn: &GroupKindNamespaceName) { self.http_routes.remove(gknn); self.send_if_modified(); @@ -931,6 +1554,16 @@ impl RoutesWatch { self.grpc_routes.remove(gknn); self.send_if_modified(); } + + fn remove_tls_route(&mut self, gknn: &GroupKindNamespaceName) { + self.tls_routes.remove(gknn); + self.send_if_modified(); + } + + fn remove_tcp_route(&mut self, gknn: &GroupKindNamespaceName) { + self.tcp_routes.remove(gknn); + self.send_if_modified(); + } } pub fn parse_accrual_config( @@ -1036,3 +1669,19 @@ fn parse_duration(s: &str) -> Result { .ok_or_else(|| anyhow::anyhow!("Timeout value {} overflows when converted to 'ms'", s))?; Ok(time::Duration::from_millis(ms)) } + +#[inline] +pub(crate) fn backend_kind( + backend: &k8s_gateway_api::BackendObjectReference, +) -> Option { + let group = backend.group.as_deref(); + // Backends default to `Service` if no kind is specified. + let kind = backend.kind.as_deref().unwrap_or("Service"); + if is_service(group, kind) { + Some(ResourceKind::Service) + } else if is_egress_network(group, kind) { + Some(ResourceKind::EgressNetwork) + } else { + None + } +} diff --git a/policy-controller/k8s/index/src/outbound/index/egress_network.rs b/policy-controller/k8s/index/src/outbound/index/egress_network.rs new file mode 100644 index 0000000000000..9d11b4dff56ac --- /dev/null +++ b/policy-controller/k8s/index/src/outbound/index/egress_network.rs @@ -0,0 +1,284 @@ +use chrono::{offset::Utc, DateTime}; +use linkerd_policy_controller_core::outbound; +use linkerd_policy_controller_k8s_api::policy::{Cidr, Network, TrafficPolicy}; +use linkerd_policy_controller_k8s_api::{policy as linkerd_k8s_api, ResourceExt}; +use std::net::IpAddr; + +#[derive(Debug)] +pub(crate) struct EgressNetwork { + pub networks: Vec, + pub name: String, + pub namespace: String, + pub creation_timestamp: Option>, + pub traffic_policy: TrafficPolicy, +} + +#[derive(Debug, PartialEq, Eq)] +struct MatchedEgressNetwork { + matched_network_size: usize, + name: String, + namespace: String, + creation_timestamp: Option>, + pub traffic_policy: TrafficPolicy, +} + +// === impl EgressNetwork === + +impl EgressNetwork { + pub(crate) fn from_resource( + r: &linkerd_k8s_api::EgressNetwork, + cluster_networks: Vec, + ) -> Self { + let name = r.name_unchecked(); + let namespace = r.namespace().expect("EgressNetwork must have a namespace"); + let creation_timestamp = r.creation_timestamp().map(|d| d.0); + let traffic_policy = r.spec.traffic_policy.clone(); + + let networks = r.spec.networks.clone().unwrap_or_else(|| { + let (v6, v4) = cluster_networks.iter().cloned().partition(Cidr::is_ipv6); + + vec![ + Network { + cidr: "0.0.0.0/0".parse().expect("should parse"), + except: Some(v4), + }, + Network { + cidr: "::/0".parse().expect("should parse"), + except: Some(v6), + }, + ] + }); + + EgressNetwork { + name, + namespace, + networks, + creation_timestamp, + traffic_policy, + } + } +} + +// Attempts to find the best matching network for a certain discovery look-up. +// Logic is: +// 1. if there are Egress networks in the source_namespace, only these are considered +// 2. the target IP is matched against the networks of the EgressNetwork +// 3. ambiguity is resolved as by comparing the networks using compare_matched_egress_network +pub(crate) fn resolve_egress_network<'n>( + addr: IpAddr, + source_namespace: String, + nets: impl Iterator, +) -> Option<(super::ResourceRef, outbound::TrafficPolicy)> { + let (same_ns, rest): (Vec<_>, Vec<_>) = nets.partition(|un| un.namespace == source_namespace); + let to_pick_from = if !same_ns.is_empty() { same_ns } else { rest }; + + to_pick_from + .iter() + .filter_map(|egress_network| { + let matched_network_size = match_network(&egress_network.networks, addr)?; + Some(MatchedEgressNetwork { + name: egress_network.name.clone(), + namespace: egress_network.namespace.clone(), + matched_network_size, + creation_timestamp: egress_network.creation_timestamp, + traffic_policy: egress_network.traffic_policy.clone(), + }) + }) + .max_by(compare_matched_egress_network) + .map(|m| { + ( + super::ResourceRef { + kind: super::ResourceKind::EgressNetwork, + name: m.name, + namespace: m.namespace, + }, + match m.traffic_policy { + TrafficPolicy::Allow => outbound::TrafficPolicy::Allow, + TrafficPolicy::Deny => outbound::TrafficPolicy::Deny, + }, + ) + }) +} + +// Finds a CIDR that contains the given IpAddr. When there are +// multiple CIDRS that match this criteria, the CIDR that is most +// specific (as in having the smallest address space) wins. +fn match_network(networks: &[Network], addr: IpAddr) -> Option { + networks + .iter() + .filter(|c| c.contains(addr)) + .min_by(|a, b| a.block_size().cmp(&b.block_size())) + .map(Network::block_size) +} + +// This logic compares two MatchedEgressNetwork objects with the purpose +// of picking the one that is more specific. The disambiguation rules are +// as follows: +// 1. prefer the more specific network match (smaller address space size) +// 2. prefer older resource +// 3. all being equal, rely on alphabetical sort of namespace/name +fn compare_matched_egress_network( + a: &MatchedEgressNetwork, + b: &MatchedEgressNetwork, +) -> std::cmp::Ordering { + b.matched_network_size + .cmp(&a.matched_network_size) + .then_with(|| a.creation_timestamp.cmp(&b.creation_timestamp).reverse()) + .then_with(|| a.namespace.cmp(&b.namespace).reverse()) + .then_with(|| a.name.cmp(&b.name).reverse()) +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_picks_smallest_cidr() { + let ip_addr = "192.168.0.4".parse().unwrap(); + let networks = vec![ + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/16".parse().unwrap(), + except: None, + }], + name: "net-1".to_string(), + namespace: "ns".to_string(), + creation_timestamp: None, + traffic_policy: TrafficPolicy::Allow, + }, + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/24".parse().unwrap(), + except: None, + }], + name: "net-2".to_string(), + namespace: "ns".to_string(), + creation_timestamp: None, + traffic_policy: TrafficPolicy::Allow, + }, + ]; + + let resolved = resolve_egress_network(ip_addr, "ns".into(), networks.iter()); + assert_eq!(resolved.unwrap().0.name, "net-2".to_string()) + } + + #[test] + fn test_picks_local_ns() { + let ip_addr = "192.168.0.4".parse().unwrap(); + let networks = vec![ + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/16".parse().unwrap(), + except: None, + }], + name: "net-1".to_string(), + namespace: "ns-1".to_string(), + creation_timestamp: None, + traffic_policy: TrafficPolicy::Allow, + }, + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/24".parse().unwrap(), + except: None, + }], + name: "net-2".to_string(), + namespace: "ns".to_string(), + creation_timestamp: None, + traffic_policy: TrafficPolicy::Allow, + }, + ]; + + let resolved = resolve_egress_network(ip_addr, "ns-1".into(), networks.iter()); + assert_eq!(resolved.unwrap().0.name, "net-1".to_string()) + } + + #[test] + fn test_picks_older_resource() { + let ip_addr = "192.168.0.4".parse().unwrap(); + let networks = vec![ + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/16".parse().unwrap(), + except: None, + }], + name: "net-1".to_string(), + namespace: "ns".to_string(), + creation_timestamp: Some(DateTime::::MAX_UTC), + traffic_policy: TrafficPolicy::Allow, + }, + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/16".parse().unwrap(), + except: None, + }], + name: "net-2".to_string(), + namespace: "ns".to_string(), + creation_timestamp: Some(DateTime::::MIN_UTC), + traffic_policy: TrafficPolicy::Allow, + }, + ]; + + let resolved = resolve_egress_network(ip_addr, "ns".into(), networks.iter()); + assert_eq!(resolved.unwrap().0.name, "net-2".to_string()) + } + + #[test] + fn test_picks_alphabetical_order() { + let ip_addr = "192.168.0.4".parse().unwrap(); + let networks = vec![ + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/16".parse().unwrap(), + except: None, + }], + name: "b".to_string(), + namespace: "a".to_string(), + creation_timestamp: None, + traffic_policy: TrafficPolicy::Allow, + }, + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/16".parse().unwrap(), + except: None, + }], + name: "d".to_string(), + namespace: "c".to_string(), + creation_timestamp: None, + traffic_policy: TrafficPolicy::Allow, + }, + ]; + + let resolved = resolve_egress_network(ip_addr, "ns".into(), networks.iter()); + assert_eq!(resolved.unwrap().0.name, "b".to_string()) + } + + #[test] + fn test_respects_exception() { + let ip_addr = "192.168.0.4".parse().unwrap(); + let networks = vec![ + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/16".parse().unwrap(), + except: Some(vec!["192.168.0.4".parse().unwrap()]), + }], + name: "b".to_string(), + namespace: "a".to_string(), + creation_timestamp: None, + traffic_policy: TrafficPolicy::Allow, + }, + EgressNetwork { + networks: vec![Network { + cidr: "192.168.0.1/16".parse().unwrap(), + except: None, + }], + name: "d".to_string(), + namespace: "c".to_string(), + creation_timestamp: None, + traffic_policy: TrafficPolicy::Allow, + }, + ]; + + let resolved = resolve_egress_network(ip_addr, "ns".into(), networks.iter()); + assert_eq!(resolved.unwrap().0.name, "d".to_string()) + } +} diff --git a/policy-controller/k8s/index/src/outbound/index/grpc.rs b/policy-controller/k8s/index/src/outbound/index/grpc.rs index f9346686831cb..e4143c18d2d41 100644 --- a/policy-controller/k8s/index/src/outbound/index/grpc.rs +++ b/policy-controller/k8s/index/src/outbound/index/grpc.rs @@ -1,7 +1,7 @@ use std::time; use super::http::{convert_backend, convert_gateway_filter}; -use super::{parse_duration, parse_timeouts, ServiceInfo, ServiceRef}; +use super::{parse_duration, parse_timeouts, ResourceInfo, ResourceRef}; use crate::{routes, ClusterInfo}; use ahash::AHashMap as HashMap; use anyhow::{bail, Result}; @@ -16,7 +16,7 @@ pub(super) fn convert_route( ns: &str, route: gateway::GrpcRoute, cluster: &ClusterInfo, - service_info: &HashMap, + resource_info: &HashMap, ) -> Result { let timeouts = parse_timeouts(route.annotations())?; let retry = parse_grpc_retry(route.annotations())?; @@ -26,7 +26,7 @@ pub(super) fn convert_route( .hostnames .into_iter() .flatten() - .map(routes::http::host_match) + .map(routes::host_match) .collect(); let rules = route @@ -39,7 +39,7 @@ pub(super) fn convert_route( ns, rule, cluster, - service_info, + resource_info, timeouts.clone(), retry.clone(), ) @@ -59,7 +59,7 @@ fn convert_rule( ns: &str, rule: gateway::GrpcRouteRule, cluster: &ClusterInfo, - service_info: &HashMap, + resource_info: &HashMap, timeouts: RouteTimeouts, retry: Option>, ) -> Result> { @@ -74,7 +74,7 @@ fn convert_rule( .backend_refs .into_iter() .flatten() - .filter_map(|b| convert_backend(ns, b, cluster, service_info)) + .filter_map(|b| convert_backend(ns, b, cluster, resource_info)) .collect(); let filters = rule diff --git a/policy-controller/k8s/index/src/outbound/index/http.rs b/policy-controller/k8s/index/src/outbound/index/http.rs index 8351d1136f255..b394e7b413246 100644 --- a/policy-controller/k8s/index/src/outbound/index/http.rs +++ b/policy-controller/k8s/index/src/outbound/index/http.rs @@ -1,6 +1,6 @@ use std::{num::NonZeroU16, time}; -use super::{is_service, parse_duration, parse_timeouts, ServiceInfo, ServiceRef}; +use super::{parse_duration, parse_timeouts, ResourceInfo, ResourceKind, ResourceRef}; use crate::{ routes::{self, HttpRouteResource}, ClusterInfo, @@ -11,7 +11,7 @@ use kube::ResourceExt; use linkerd_policy_controller_core::{ outbound::{ Backend, Filter, HttpRetryCondition, OutboundRoute, OutboundRouteRule, RouteRetry, - RouteTimeouts, WeightedService, + RouteTimeouts, WeightedEgressNetwork, WeightedService, }, routes::HttpRouteMatch, }; @@ -21,7 +21,7 @@ pub(super) fn convert_route( ns: &str, route: HttpRouteResource, cluster: &ClusterInfo, - service_info: &HashMap, + resource_info: &HashMap, ) -> Result> { match route { HttpRouteResource::LinkerdHttp(route) => { @@ -33,7 +33,7 @@ pub(super) fn convert_route( .hostnames .into_iter() .flatten() - .map(routes::http::host_match) + .map(routes::host_match) .collect(); let rules = route @@ -46,7 +46,7 @@ pub(super) fn convert_route( ns, r, cluster, - service_info, + resource_info, timeouts.clone(), retry.clone(), ) @@ -70,7 +70,7 @@ pub(super) fn convert_route( .hostnames .into_iter() .flatten() - .map(routes::http::host_match) + .map(routes::host_match) .collect(); let rules = route @@ -83,7 +83,7 @@ pub(super) fn convert_route( ns, r, cluster, - service_info, + resource_info, timeouts.clone(), retry.clone(), ) @@ -105,7 +105,7 @@ fn convert_linkerd_rule( ns: &str, rule: policy::httproute::HttpRouteRule, cluster: &ClusterInfo, - service_info: &HashMap, + resource_info: &HashMap, mut timeouts: RouteTimeouts, retry: Option>, ) -> Result> { @@ -120,7 +120,7 @@ fn convert_linkerd_rule( .backend_refs .into_iter() .flatten() - .filter_map(|b| convert_backend(ns, b, cluster, service_info)) + .filter_map(|b| convert_backend(ns, b, cluster, resource_info)) .collect(); let filters = rule @@ -156,7 +156,7 @@ fn convert_gateway_rule( ns: &str, rule: gateway::HttpRouteRule, cluster: &ClusterInfo, - service_info: &HashMap, + resource_info: &HashMap, timeouts: RouteTimeouts, retry: Option>, ) -> Result> { @@ -171,7 +171,7 @@ fn convert_gateway_rule( .backend_refs .into_iter() .flatten() - .filter_map(|b| convert_backend(ns, b, cluster, service_info)) + .filter_map(|b| convert_backend(ns, b, cluster, resource_info)) .collect(); let filters = rule @@ -194,46 +194,35 @@ pub(super) fn convert_backend>( ns: &str, backend: BackendRef, cluster: &ClusterInfo, - services: &HashMap, + resources: &HashMap, ) -> Option { let backend = backend.into(); let filters = backend.filters; let backend = backend.backend_ref?; - if !is_backend_service(&backend.inner) { - return Some(Backend::Invalid { - weight: backend.weight.unwrap_or(1).into(), - message: format!( - "unsupported backend type {group} {kind}", - group = backend.inner.group.as_deref().unwrap_or("core"), - kind = backend.inner.kind.as_deref().unwrap_or(""), - ), - }); - } - - let name = backend.inner.name; - let weight = backend.weight.unwrap_or(1); - // The gateway API dictates: - // - // Port is required when the referent is a Kubernetes Service. - let port = match backend - .inner - .port - .and_then(|p| NonZeroU16::try_from(p).ok()) - { - Some(port) => port, + let backend_kind = match super::backend_kind(&backend.inner) { + Some(backend_kind) => backend_kind, None => { return Some(Backend::Invalid { - weight: weight.into(), - message: format!("missing port for backend Service {name}"), - }) + weight: backend.weight.unwrap_or(1).into(), + message: format!( + "unsupported backend type {group} {kind}", + group = backend.inner.group.as_deref().unwrap_or("core"), + kind = backend.inner.kind.as_deref().unwrap_or(""), + ), + }); } }; - let service_ref = ServiceRef { - name: name.clone(), + + let backend_ref = ResourceRef { + name: backend.inner.name.clone(), namespace: backend.inner.namespace.unwrap_or_else(|| ns.to_string()), + kind: backend_kind.clone(), }; + let name = backend.inner.name; + let weight = backend.weight.unwrap_or(1); + let filters = match filters .into_iter() .flatten() @@ -249,15 +238,45 @@ pub(super) fn convert_backend>( } }; - Some(Backend::Service(WeightedService { - weight: weight.into(), - authority: cluster.service_dns_authority(&service_ref.namespace, &name, port), - name, - namespace: service_ref.namespace.to_string(), - port, - filters, - exists: services.contains_key(&service_ref), - })) + let port = backend + .inner + .port + .and_then(|p| NonZeroU16::try_from(p).ok()); + + match backend_kind { + ResourceKind::Service => { + // The gateway API dictates: + // + // Port is required when the referent is a Kubernetes Service. + let port = match port { + Some(port) => port, + None => { + return Some(Backend::Invalid { + weight: weight.into(), + message: format!("missing port for backend Service {name}"), + }) + } + }; + + Some(Backend::Service(WeightedService { + weight: weight.into(), + authority: cluster.service_dns_authority(&backend_ref.namespace, &name, port), + name, + namespace: backend_ref.namespace.to_string(), + port, + filters, + exists: resources.contains_key(&backend_ref), + })) + } + ResourceKind::EgressNetwork => Some(Backend::EgressNetwork(WeightedEgressNetwork { + weight: weight.into(), + name, + namespace: backend_ref.namespace.to_string(), + port, + filters, + exists: resources.contains_key(&backend_ref), + })), + } } fn convert_linkerd_filter(filter: policy::httproute::HttpRouteFilter) -> Result { @@ -320,15 +339,6 @@ pub(crate) fn convert_gateway_filter Ok(filter) } -#[inline] -fn is_backend_service(backend: &gateway::BackendObjectReference) -> bool { - is_service( - backend.group.as_deref(), - // Backends default to `Service` if no kind is specified. - backend.kind.as_deref().unwrap_or("Service"), - ) -} - pub fn parse_http_retry( annotations: &std::collections::BTreeMap, ) -> Result>> { diff --git a/policy-controller/k8s/index/src/outbound/index/metrics.rs b/policy-controller/k8s/index/src/outbound/index/metrics.rs index d7f698a78cc0d..55e6730344100 100644 --- a/policy-controller/k8s/index/src/outbound/index/metrics.rs +++ b/policy-controller/k8s/index/src/outbound/index/metrics.rs @@ -33,7 +33,7 @@ impl Collector for Instrumented { None, MetricType::Gauge, )?; - let service_infos = ConstGauge::new(this.service_info.len() as u32); + let service_infos = ConstGauge::new(this.resource_info.len() as u32); service_infos.encode(service_info_encoder)?; let mut service_route_encoder = encoder.encode_descriptor( @@ -57,7 +57,7 @@ impl Collector for Instrumented { )?; for (ns, index) in &this.namespaces.by_ns { let labels = vec![("namespace", ns.as_str())]; - let service_port_routes = ConstGauge::new(index.service_port_routes.len() as u32); + let service_port_routes = ConstGauge::new(index.resource_port_routes.len() as u32); let service_port_route_encoder = service_port_route_encoder.encode_family(&labels)?; service_port_routes.encode(service_port_route_encoder)?; } diff --git a/policy-controller/k8s/index/src/outbound/index/tcp.rs b/policy-controller/k8s/index/src/outbound/index/tcp.rs new file mode 100644 index 0000000000000..4736a9137c3a5 --- /dev/null +++ b/policy-controller/k8s/index/src/outbound/index/tcp.rs @@ -0,0 +1,106 @@ +use std::num::NonZeroU16; + +use super::{ResourceInfo, ResourceKind, ResourceRef}; +use crate::ClusterInfo; +use ahash::AHashMap as HashMap; +use anyhow::{bail, Result}; +use linkerd_policy_controller_core::outbound::{Backend, WeightedEgressNetwork, WeightedService}; +use linkerd_policy_controller_core::outbound::{TcpRoute, TcpRouteRule}; +use linkerd_policy_controller_k8s_api::{gateway, Time}; + +pub(super) fn convert_route( + ns: &str, + route: gateway::TcpRoute, + cluster: &ClusterInfo, + resource_info: &HashMap, +) -> Result { + if route.spec.rules.len() != 1 { + bail!("TCPRoute needs to have one rule"); + } + + let rule = route.spec.rules.first().expect("already checked"); + + let backends = rule + .backend_refs + .clone() + .into_iter() + .filter_map(|b| convert_backend(ns, b, cluster, resource_info)) + .collect(); + + let creation_timestamp = route.metadata.creation_timestamp.map(|Time(t)| t); + + Ok(TcpRoute { + rule: TcpRouteRule { backends }, + creation_timestamp, + }) +} + +pub(super) fn convert_backend( + ns: &str, + backend: gateway::BackendRef, + cluster: &ClusterInfo, + resources: &HashMap, +) -> Option { + let backend_kind = match super::backend_kind(&backend.inner) { + Some(backend_kind) => backend_kind, + None => { + return Some(Backend::Invalid { + weight: backend.weight.unwrap_or(1).into(), + message: format!( + "unsupported backend type {group} {kind}", + group = backend.inner.group.as_deref().unwrap_or("core"), + kind = backend.inner.kind.as_deref().unwrap_or(""), + ), + }); + } + }; + + let backend_ref = ResourceRef { + name: backend.inner.name.clone(), + namespace: backend.inner.namespace.unwrap_or_else(|| ns.to_string()), + kind: backend_kind.clone(), + }; + + let name = backend.inner.name; + let weight = backend.weight.unwrap_or(1); + + let port = backend + .inner + .port + .and_then(|p| NonZeroU16::try_from(p).ok()); + + match backend_kind { + ResourceKind::Service => { + // The gateway API dictates: + // + // Port is required when the referent is a Kubernetes Service. + let port = match port { + Some(port) => port, + None => { + return Some(Backend::Invalid { + weight: weight.into(), + message: format!("missing port for backend Service {name}"), + }) + } + }; + + Some(Backend::Service(WeightedService { + weight: weight.into(), + authority: cluster.service_dns_authority(&backend_ref.namespace, &name, port), + name, + namespace: backend_ref.namespace.to_string(), + port, + filters: vec![], + exists: resources.contains_key(&backend_ref), + })) + } + ResourceKind::EgressNetwork => Some(Backend::EgressNetwork(WeightedEgressNetwork { + weight: weight.into(), + name, + namespace: backend_ref.namespace.to_string(), + port, + filters: vec![], + exists: resources.contains_key(&backend_ref), + })), + } +} diff --git a/policy-controller/k8s/index/src/outbound/index/tls.rs b/policy-controller/k8s/index/src/outbound/index/tls.rs new file mode 100644 index 0000000000000..393c5ef9db3c9 --- /dev/null +++ b/policy-controller/k8s/index/src/outbound/index/tls.rs @@ -0,0 +1,43 @@ +use super::tcp::convert_backend; +use super::{ResourceInfo, ResourceRef}; +use crate::{routes, ClusterInfo}; +use ahash::AHashMap as HashMap; +use anyhow::{bail, Result}; +use linkerd_policy_controller_core::outbound::{TcpRouteRule, TlsRoute}; +use linkerd_policy_controller_k8s_api::{gateway, Time}; + +pub(super) fn convert_route( + ns: &str, + route: gateway::TlsRoute, + cluster: &ClusterInfo, + resource_info: &HashMap, +) -> Result { + if route.spec.rules.len() != 1 { + bail!("TLSRoute needs to have one rule"); + } + + let rule = route.spec.rules.first().expect("already checked"); + + let hostnames = route + .spec + .hostnames + .into_iter() + .flatten() + .map(routes::host_match) + .collect(); + + let backends = rule + .backend_refs + .clone() + .into_iter() + .filter_map(|b| convert_backend(ns, b, cluster, resource_info)) + .collect(); + + let creation_timestamp = route.metadata.creation_timestamp.map(|Time(t)| t); + + Ok(TlsRoute { + hostnames, + rule: TcpRouteRule { backends }, + creation_timestamp, + }) +} diff --git a/policy-controller/k8s/index/src/outbound/tests.rs b/policy-controller/k8s/index/src/outbound/tests.rs index 552eaf05d1be4..d11a9ed8947b0 100644 --- a/policy-controller/k8s/index/src/outbound/tests.rs +++ b/policy-controller/k8s/index/src/outbound/tests.rs @@ -1,13 +1,14 @@ -use std::sync::Arc; +use std::{sync::Arc, vec}; use crate::{ defaults::DefaultPolicy, outbound::index::{Index, SharedIndex}, ClusterInfo, }; +use k8s_openapi::chrono::Utc; use kubert::index::IndexNamespacedResource; use linkerd_policy_controller_core::IpNet; -use linkerd_policy_controller_k8s_api::{self as k8s}; +use linkerd_policy_controller_k8s_api::{self as k8s, policy}; use tokio::time; mod routes; @@ -34,6 +35,30 @@ pub fn mk_service(ns: impl ToString, name: impl ToString, port: i32) -> k8s::Ser } } +pub fn mk_egress_network(ns: impl ToString, name: impl ToString) -> policy::EgressNetwork { + policy::EgressNetwork { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some(name.to_string()), + ..Default::default() + }, + spec: policy::EgressNetworkSpec { + traffic_policy: policy::TrafficPolicy::Allow, + networks: None, + }, + status: Some(policy::EgressNetworkStatus { + conditions: vec![k8s::Condition { + last_transition_time: k8s::Time(Utc::now()), + message: "".to_string(), + observed_generation: None, + reason: "Accepted".to_string(), + status: "True".to_string(), + type_: "Accepted".to_string(), + }], + }), + } +} + impl TestConfig { fn from_default_policy(default_policy: DefaultPolicy) -> Self { Self::from_default_policy_with_probes(default_policy, vec![]) diff --git a/policy-controller/k8s/index/src/outbound/tests/routes.rs b/policy-controller/k8s/index/src/outbound/tests/routes.rs index f4e23ff7ca636..ce47732b3c5c5 100644 --- a/policy-controller/k8s/index/src/outbound/tests/routes.rs +++ b/policy-controller/k8s/index/src/outbound/tests/routes.rs @@ -1,2 +1,9 @@ mod grpc; mod http; +mod tcp; +mod tls; + +enum BackendKind { + Egress, + Service, +} diff --git a/policy-controller/k8s/index/src/outbound/tests/routes/grpc.rs b/policy-controller/k8s/index/src/outbound/tests/routes/grpc.rs index d8245bdb10bd4..ae93457e652c9 100644 --- a/policy-controller/k8s/index/src/outbound/tests/routes/grpc.rs +++ b/policy-controller/k8s/index/src/outbound/tests/routes/grpc.rs @@ -1,6 +1,8 @@ use kube::Resource; use linkerd_policy_controller_core::{ - outbound::{Backend, WeightedService}, + outbound::{ + self, Backend, Kind, OutboundDiscoverTarget, WeightedEgressNetwork, WeightedService, + }, routes::GroupKindNamespaceName, POLICY_CONTROLLER_NAME, }; @@ -22,18 +24,26 @@ fn backend_service() { test.index.write().apply(apex); // Create httproute. - let route = mk_route("ns", "route", 8080, "apex", "backend"); + let route = mk_route( + "ns", + "route", + 8080, + "apex", + "backend", + super::BackendKind::Service, + ); test.index.write().apply(route); let mut rx = test .index .write() - .outbound_policy_rx( - "apex".to_string(), - "ns".to_string(), - 8080.try_into().unwrap(), - "ns".to_string(), - ) + .outbound_policy_rx(OutboundDiscoverTarget { + name: "apex".to_string(), + namespace: "ns".to_string(), + port: 8080.try_into().unwrap(), + source_namespace: "ns".to_string(), + kind: Kind::Service, + }) .expect("apex.ns should exist"); { @@ -96,15 +106,88 @@ fn backend_service() { } } +#[test] +fn backend_egress_network() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + // Create apex service. + let apex = mk_egress_network("ns", "apex"); + test.index.write().apply(apex); + + // Create httproute. + let route = mk_route( + "ns", + "route", + 8080, + "apex", + "apex", + super::BackendKind::Egress, + ); + test.index.write().apply(route); + + let mut rx = test + .index + .write() + .outbound_policy_rx(OutboundDiscoverTarget { + name: "apex".to_string(), + namespace: "ns".to_string(), + port: 8080.try_into().unwrap(), + source_namespace: "ns".to_string(), + kind: Kind::EgressNetwork { + original_dst: "192.168.0.1:8080".parse().unwrap(), + traffic_policy: outbound::TrafficPolicy::Allow, + }, + }) + .expect("apex.ns should exist"); + + { + let policy = rx.borrow_and_update(); + let backend = policy + .grpc_routes + .get(&GroupKindNamespaceName { + group: k8s_gateway_api::GrpcRoute::group(&()), + kind: k8s_gateway_api::GrpcRoute::kind(&()), + namespace: "ns".into(), + name: "route".into(), + }) + .expect("route should exist") + .rules + .first() + .expect("rule should exist") + .backends + .first() + .expect("backend should exist"); + + let exists = match backend { + Backend::Invalid { .. } => &false, + Backend::EgressNetwork(WeightedEgressNetwork { exists, .. }) => exists, + _ => panic!("backend should be an egress network, but got {backend:?}"), + }; + + // Backend should exist. + assert!(exists); + } +} + fn mk_route( ns: impl ToString, name: impl ToString, port: u16, parent: impl ToString, - backend: impl ToString, + backend_name: impl ToString, + backend: super::BackendKind, ) -> k8s_gateway_api::GrpcRoute { - use chrono::Utc; use k8s::{policy::httproute::*, Time}; + let (group, kind) = match backend { + super::BackendKind::Service => ("core".to_string(), "Service".to_string()), + super::BackendKind::Egress => { + ("policy.linkerd.io".to_string(), "EgressNetwork".to_string()) + } + }; k8s_gateway_api::GrpcRoute { metadata: k8s::ObjectMeta { @@ -116,8 +199,8 @@ fn mk_route( spec: k8s_gateway_api::GrpcRouteSpec { inner: CommonRouteSpec { parent_refs: Some(vec![ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), + group: Some(group.clone()), + kind: Some(kind.clone()), namespace: Some(ns.to_string()), name: parent.to_string(), section_name: None, @@ -138,10 +221,10 @@ fn mk_route( filters: None, weight: None, inner: BackendObjectReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), + group: Some(group.clone()), + kind: Some(kind.clone()), namespace: Some(ns.to_string()), - name: backend.to_string(), + name: backend_name.to_string(), port: Some(port), }, }]), @@ -151,8 +234,8 @@ fn mk_route( inner: RouteStatus { parents: vec![k8s::gateway::RouteParentStatus { parent_ref: ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), + group: Some(group.clone()), + kind: Some(kind.clone()), namespace: Some(ns.to_string()), name: parent.to_string(), section_name: None, diff --git a/policy-controller/k8s/index/src/outbound/tests/routes/http.rs b/policy-controller/k8s/index/src/outbound/tests/routes/http.rs index dc672b339781f..1db3a45dfadd1 100644 --- a/policy-controller/k8s/index/src/outbound/tests/routes/http.rs +++ b/policy-controller/k8s/index/src/outbound/tests/routes/http.rs @@ -1,6 +1,8 @@ use kube::Resource; use linkerd_policy_controller_core::{ - outbound::{Backend, WeightedService}, + outbound::{ + self, Backend, Kind, OutboundDiscoverTarget, WeightedEgressNetwork, WeightedService, + }, routes::GroupKindNamespaceName, POLICY_CONTROLLER_NAME, }; @@ -23,18 +25,26 @@ fn backend_service() { test.index.write().apply(apex); // Create httproute. - let route = mk_route("ns", "route", 8080, "apex", "backend"); + let route = mk_route( + "ns", + "route", + 8080, + "apex", + "backend", + super::BackendKind::Service, + ); test.index.write().apply(route); let mut rx = test .index .write() - .outbound_policy_rx( - "apex".to_string(), - "ns".to_string(), - 8080.try_into().unwrap(), - "ns".to_string(), - ) + .outbound_policy_rx(OutboundDiscoverTarget { + name: "apex".to_string(), + namespace: "ns".to_string(), + port: 8080.try_into().unwrap(), + source_namespace: "ns".to_string(), + kind: Kind::Service, + }) .expect("apex.ns should exist"); { @@ -98,15 +108,89 @@ fn backend_service() { } } +#[test] +fn backend_egress_network() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + + // Create apex service. + let apex = mk_egress_network("ns", "apex"); + test.index.write().apply(apex); + + // Create httproute. + let route = mk_route( + "ns", + "route", + 8080, + "apex", + "apex", + super::BackendKind::Egress, + ); + test.index.write().apply(route); + + let mut rx = test + .index + .write() + .outbound_policy_rx(OutboundDiscoverTarget { + name: "apex".to_string(), + namespace: "ns".to_string(), + port: 8080.try_into().unwrap(), + source_namespace: "ns".to_string(), + kind: Kind::EgressNetwork { + original_dst: "192.168.0.1:8080".parse().unwrap(), + traffic_policy: outbound::TrafficPolicy::Allow, + }, + }) + .expect("apex.ns should exist"); + + { + let policy = rx.borrow_and_update(); + let backend = policy + .http_routes + .get(&GroupKindNamespaceName { + group: k8s::policy::HttpRoute::group(&()), + kind: k8s::policy::HttpRoute::kind(&()), + namespace: "ns".into(), + name: "route".into(), + }) + .expect("route should exist") + .rules + .first() + .expect("rule should exist") + .backends + .first() + .expect("backend should exist"); + + let exists = match backend { + Backend::Invalid { .. } => &false, + Backend::EgressNetwork(WeightedEgressNetwork { exists, .. }) => exists, + _ => panic!("backend should be an egress network, but got {backend:?}"), + }; + + // Backend should exist. + assert!(exists); + } +} + fn mk_route( ns: impl ToString, name: impl ToString, port: u16, parent: impl ToString, - backend: impl ToString, + backend_name: impl ToString, + backend: super::BackendKind, ) -> k8s::policy::HttpRoute { - use chrono::Utc; use k8s::{policy::httproute::*, Time}; + let (group, kind) = match backend { + super::BackendKind::Service => ("core".to_string(), "Service".to_string()), + super::BackendKind::Egress => { + ("policy.linkerd.io".to_string(), "EgressNetwork".to_string()) + } + }; HttpRoute { metadata: k8s::ObjectMeta { @@ -118,8 +202,8 @@ fn mk_route( spec: HttpRouteSpec { inner: CommonRouteSpec { parent_refs: Some(vec![ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), + group: Some(group.clone()), + kind: Some(kind.clone()), namespace: Some(ns.to_string()), name: parent.to_string(), section_name: None, @@ -141,10 +225,10 @@ fn mk_route( backend_ref: Some(BackendRef { weight: None, inner: BackendObjectReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), + group: Some(group.clone()), + kind: Some(kind.clone()), namespace: Some(ns.to_string()), - name: backend.to_string(), + name: backend_name.to_string(), port: Some(port), }, }), @@ -157,8 +241,8 @@ fn mk_route( inner: RouteStatus { parents: vec![k8s::gateway::RouteParentStatus { parent_ref: ParentReference { - group: Some("core".to_string()), - kind: Some("Service".to_string()), + group: Some(group), + kind: Some(kind), namespace: Some(ns.to_string()), name: parent.to_string(), section_name: None, diff --git a/policy-controller/k8s/index/src/outbound/tests/routes/tcp.rs b/policy-controller/k8s/index/src/outbound/tests/routes/tcp.rs new file mode 100644 index 0000000000000..16823d41dbf67 --- /dev/null +++ b/policy-controller/k8s/index/src/outbound/tests/routes/tcp.rs @@ -0,0 +1,241 @@ +use kube::Resource; +use linkerd_policy_controller_core::{ + outbound::{ + self, Backend, Kind, OutboundDiscoverTarget, WeightedEgressNetwork, WeightedService, + }, + routes::GroupKindNamespaceName, + POLICY_CONTROLLER_NAME, +}; +use linkerd_policy_controller_k8s_api::gateway as k8s_gateway_api; +use tracing::Level; + +use super::super::*; + +#[test] +fn backend_service() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + // Create apex service. + let apex = mk_service("ns", "apex", 8080); + test.index.write().apply(apex); + + // Create tcproute. + let route = mk_route( + "ns", + "route", + 8080, + "apex", + "backend", + super::BackendKind::Service, + ); + test.index.write().apply(route); + + let mut rx = test + .index + .write() + .outbound_policy_rx(OutboundDiscoverTarget { + name: "apex".to_string(), + namespace: "ns".to_string(), + port: 8080.try_into().unwrap(), + source_namespace: "ns".to_string(), + kind: Kind::Service, + }) + .expect("apex.ns should exist"); + + { + let policy = rx.borrow_and_update(); + let backend = policy + .tcp_routes + .get(&GroupKindNamespaceName { + group: k8s_gateway_api::TcpRoute::group(&()), + kind: k8s_gateway_api::TcpRoute::kind(&()), + namespace: "ns".into(), + name: "route".into(), + }) + .expect("route should exist") + .rule + .backends + .first() + .expect("backend should exist"); + + let exists = match backend { + Backend::Service(WeightedService { exists, .. }) => exists, + _ => panic!("backend should be a service"), + }; + + // Backend should not exist. + assert!(!exists); + } + + // Create backend service. + let backend = mk_service("ns", "backend", 8080); + test.index.write().apply(backend); + assert!(rx.has_changed().unwrap()); + + { + let policy = rx.borrow_and_update(); + let backend = policy + .tcp_routes + .get(&GroupKindNamespaceName { + group: k8s_gateway_api::TcpRoute::group(&()), + kind: k8s_gateway_api::TcpRoute::kind(&()), + namespace: "ns".into(), + name: "route".into(), + }) + .expect("route should exist") + .rule + .backends + .first() + .expect("backend should exist"); + + let exists = match backend { + Backend::Service(WeightedService { exists, .. }) => exists, + _ => panic!("backend should be a service"), + }; + + // Backend should exist. + assert!(exists); + } +} + +#[test] +fn backend_egress_network() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + // Create apex service. + let apex = mk_egress_network("ns", "apex"); + test.index.write().apply(apex); + + // Create tcproute. + let route = mk_route( + "ns", + "route", + 8080, + "apex", + "apex", + super::BackendKind::Egress, + ); + test.index.write().apply(route); + + let mut rx = test + .index + .write() + .outbound_policy_rx(OutboundDiscoverTarget { + name: "apex".to_string(), + namespace: "ns".to_string(), + port: 8080.try_into().unwrap(), + source_namespace: "ns".to_string(), + kind: Kind::EgressNetwork { + original_dst: "192.168.0.1:8080".parse().unwrap(), + traffic_policy: outbound::TrafficPolicy::Allow, + }, + }) + .expect("apex.ns should exist"); + + { + let policy = rx.borrow_and_update(); + let backend = policy + .tcp_routes + .get(&GroupKindNamespaceName { + group: k8s_gateway_api::TcpRoute::group(&()), + kind: k8s_gateway_api::TcpRoute::kind(&()), + namespace: "ns".into(), + name: "route".into(), + }) + .expect("route should exist") + .rule + .backends + .first() + .expect("backend should exist"); + + let exists = match backend { + Backend::Invalid { .. } => &false, + Backend::EgressNetwork(WeightedEgressNetwork { exists, .. }) => exists, + _ => panic!("backend should be an egress network, but got {backend:?}"), + }; + + // Backend should exist. + assert!(exists); + } +} + +fn mk_route( + ns: impl ToString, + name: impl ToString, + port: u16, + parent: impl ToString, + backend_name: impl ToString, + backend: super::BackendKind, +) -> k8s_gateway_api::TcpRoute { + use k8s::{policy::httproute::*, Time}; + let (group, kind) = match backend { + super::BackendKind::Service => ("core".to_string(), "Service".to_string()), + super::BackendKind::Egress => { + ("policy.linkerd.io".to_string(), "EgressNetwork".to_string()) + } + }; + + k8s_gateway_api::TcpRoute { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some(name.to_string()), + creation_timestamp: Some(Time(Utc::now())), + ..Default::default() + }, + spec: k8s_gateway_api::TcpRouteSpec { + inner: CommonRouteSpec { + parent_refs: Some(vec![ParentReference { + group: Some(group.clone()), + kind: Some(kind.clone()), + namespace: Some(ns.to_string()), + name: parent.to_string(), + section_name: None, + port: Some(port), + }]), + }, + rules: vec![k8s_gateway_api::TcpRouteRule { + backend_refs: vec![k8s_gateway_api::BackendRef { + weight: None, + inner: BackendObjectReference { + group: Some(group.clone()), + kind: Some(kind.clone()), + namespace: Some(ns.to_string()), + name: backend_name.to_string(), + port: Some(port), + }, + }], + }], + }, + status: Some(k8s_gateway_api::TcpRouteStatus { + inner: RouteStatus { + parents: vec![k8s::gateway::RouteParentStatus { + parent_ref: ParentReference { + group: Some(group.clone()), + kind: Some(kind.clone()), + namespace: Some(ns.to_string()), + name: parent.to_string(), + section_name: None, + port: Some(port), + }, + controller_name: POLICY_CONTROLLER_NAME.to_string(), + conditions: vec![k8s::Condition { + last_transition_time: Time(chrono::DateTime::::MIN_UTC), + message: "".to_string(), + observed_generation: None, + reason: "Accepted".to_string(), + status: "True".to_string(), + type_: "Accepted".to_string(), + }], + }], + }, + }), + } +} diff --git a/policy-controller/k8s/index/src/outbound/tests/routes/tls.rs b/policy-controller/k8s/index/src/outbound/tests/routes/tls.rs new file mode 100644 index 0000000000000..0673b2d53d612 --- /dev/null +++ b/policy-controller/k8s/index/src/outbound/tests/routes/tls.rs @@ -0,0 +1,242 @@ +use kube::Resource; +use linkerd_policy_controller_core::{ + outbound::{ + self, Backend, Kind, OutboundDiscoverTarget, WeightedEgressNetwork, WeightedService, + }, + routes::GroupKindNamespaceName, + POLICY_CONTROLLER_NAME, +}; +use linkerd_policy_controller_k8s_api::gateway as k8s_gateway_api; +use tracing::Level; + +use super::super::*; + +#[test] +fn backend_service() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + // Create apex service. + let apex = mk_service("ns", "apex", 8080); + test.index.write().apply(apex); + + // Create tlsroute. + let route = mk_route( + "ns", + "route", + 8080, + "apex", + "backend", + super::BackendKind::Service, + ); + test.index.write().apply(route); + + let mut rx = test + .index + .write() + .outbound_policy_rx(OutboundDiscoverTarget { + name: "apex".to_string(), + namespace: "ns".to_string(), + port: 8080.try_into().unwrap(), + source_namespace: "ns".to_string(), + kind: Kind::Service, + }) + .expect("apex.ns should exist"); + + { + let policy = rx.borrow_and_update(); + let backend = policy + .tls_routes + .get(&GroupKindNamespaceName { + group: k8s_gateway_api::TlsRoute::group(&()), + kind: k8s_gateway_api::TlsRoute::kind(&()), + namespace: "ns".into(), + name: "route".into(), + }) + .expect("route should exist") + .rule + .backends + .first() + .expect("backend should exist"); + + let exists = match backend { + Backend::Service(WeightedService { exists, .. }) => exists, + _ => panic!("backend should be a service"), + }; + + // Backend should not exist. + assert!(!exists); + } + + // Create backend service. + let backend = mk_service("ns", "backend", 8080); + test.index.write().apply(backend); + assert!(rx.has_changed().unwrap()); + + { + let policy = rx.borrow_and_update(); + let backend = policy + .tls_routes + .get(&GroupKindNamespaceName { + group: k8s_gateway_api::TlsRoute::group(&()), + kind: k8s_gateway_api::TlsRoute::kind(&()), + namespace: "ns".into(), + name: "route".into(), + }) + .expect("route should exist") + .rule + .backends + .first() + .expect("backend should exist"); + + let exists = match backend { + Backend::Service(WeightedService { exists, .. }) => exists, + _ => panic!("backend should be a service"), + }; + + // Backend should exist. + assert!(exists); + } +} + +#[test] +fn backend_egress_network() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + // Create apex service. + let apex = mk_egress_network("ns", "apex"); + test.index.write().apply(apex); + + // Create tlsroute. + let route = mk_route( + "ns", + "route", + 8080, + "apex", + "apex", + super::BackendKind::Egress, + ); + test.index.write().apply(route); + + let mut rx = test + .index + .write() + .outbound_policy_rx(OutboundDiscoverTarget { + name: "apex".to_string(), + namespace: "ns".to_string(), + port: 8080.try_into().unwrap(), + source_namespace: "ns".to_string(), + kind: Kind::EgressNetwork { + original_dst: "192.168.0.1:8080".parse().unwrap(), + traffic_policy: outbound::TrafficPolicy::Allow, + }, + }) + .expect("apex.ns should exist"); + + { + let policy = rx.borrow_and_update(); + let backend = policy + .tls_routes + .get(&GroupKindNamespaceName { + group: k8s_gateway_api::TlsRoute::group(&()), + kind: k8s_gateway_api::TlsRoute::kind(&()), + namespace: "ns".into(), + name: "route".into(), + }) + .expect("route should exist") + .rule + .backends + .first() + .expect("backend should exist"); + + let exists = match backend { + Backend::Invalid { .. } => &false, + Backend::EgressNetwork(WeightedEgressNetwork { exists, .. }) => exists, + _ => panic!("backend should be an egress network, but got {backend:?}"), + }; + + // Backend should exist. + assert!(exists); + } +} + +fn mk_route( + ns: impl ToString, + name: impl ToString, + port: u16, + parent: impl ToString, + backend_name: impl ToString, + backend: super::BackendKind, +) -> k8s_gateway_api::TlsRoute { + use k8s::{policy::httproute::*, Time}; + let (group, kind) = match backend { + super::BackendKind::Service => ("core".to_string(), "Service".to_string()), + super::BackendKind::Egress => { + ("policy.linkerd.io".to_string(), "EgressNetwork".to_string()) + } + }; + + k8s_gateway_api::TlsRoute { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some(name.to_string()), + creation_timestamp: Some(Time(Utc::now())), + ..Default::default() + }, + spec: k8s_gateway_api::TlsRouteSpec { + inner: CommonRouteSpec { + parent_refs: Some(vec![ParentReference { + group: Some(group.clone()), + kind: Some(kind.clone()), + namespace: Some(ns.to_string()), + name: parent.to_string(), + section_name: None, + port: Some(port), + }]), + }, + hostnames: None, + rules: vec![k8s_gateway_api::TlsRouteRule { + backend_refs: vec![k8s_gateway_api::BackendRef { + weight: None, + inner: BackendObjectReference { + group: Some(group.clone()), + kind: Some(kind.clone()), + namespace: Some(ns.to_string()), + name: backend_name.to_string(), + port: Some(port), + }, + }], + }], + }, + status: Some(k8s_gateway_api::TlsRouteStatus { + inner: RouteStatus { + parents: vec![k8s::gateway::RouteParentStatus { + parent_ref: ParentReference { + group: Some(group.clone()), + kind: Some(kind.clone()), + namespace: Some(ns.to_string()), + name: parent.to_string(), + section_name: None, + port: Some(port), + }, + controller_name: POLICY_CONTROLLER_NAME.to_string(), + conditions: vec![k8s::Condition { + last_transition_time: Time(chrono::DateTime::::MIN_UTC), + message: "".to_string(), + observed_generation: None, + reason: "Accepted".to_string(), + status: "True".to_string(), + type_: "Accepted".to_string(), + }], + }], + }, + }), + } +} diff --git a/policy-controller/k8s/index/src/routes.rs b/policy-controller/k8s/index/src/routes.rs index 538d2bf4e8513..37715717cf3af 100644 --- a/policy-controller/k8s/index/src/routes.rs +++ b/policy-controller/k8s/index/src/routes.rs @@ -1,4 +1,4 @@ -use linkerd_policy_controller_core::routes::{GroupKindName, GroupKindNamespaceName}; +use linkerd_policy_controller_core::routes::{GroupKindName, GroupKindNamespaceName, HostMatch}; use linkerd_policy_controller_k8s_api::{gateway as api, policy, Resource, ResourceExt}; pub mod grpc; @@ -77,3 +77,17 @@ impl ExplicitGKN for str { GroupKindName { group, kind, name } } } + +pub fn host_match(hostname: api::Hostname) -> HostMatch { + if hostname.starts_with("*.") { + let mut reverse_labels = hostname + .split('.') + .skip(1) + .map(|label| label.to_string()) + .collect::>(); + reverse_labels.reverse(); + HostMatch::Suffix { reverse_labels } + } else { + HostMatch::Exact(hostname) + } +} diff --git a/policy-controller/k8s/index/src/routes/http.rs b/policy-controller/k8s/index/src/routes/http.rs index d01839f6f566a..4f24405906919 100644 --- a/policy-controller/k8s/index/src/routes/http.rs +++ b/policy-controller/k8s/index/src/routes/http.rs @@ -54,20 +54,6 @@ pub fn path_match(path_match: api::HttpPathMatch) -> Result { } } -pub fn host_match(hostname: api::Hostname) -> routes::HostMatch { - if hostname.starts_with("*.") { - let mut reverse_labels = hostname - .split('.') - .skip(1) - .map(|label| label.to_string()) - .collect::>(); - reverse_labels.reverse(); - routes::HostMatch::Suffix { reverse_labels } - } else { - routes::HostMatch::Exact(hostname) - } -} - pub fn header_match(header_match: api::HttpHeaderMatch) -> Result { match header_match { api::HttpHeaderMatch::Exact { name, value } => { diff --git a/policy-controller/src/lib.rs b/policy-controller/src/lib.rs index d524a6578ed51..dc5e367dcafcb 100644 --- a/policy-controller/src/lib.rs +++ b/policy-controller/src/lib.rs @@ -9,12 +9,13 @@ use linkerd_policy_controller_core::inbound::{ DiscoverInboundServer, InboundServer, InboundServerStream, }; use linkerd_policy_controller_core::outbound::{ - DiscoverOutboundPolicy, OutboundDiscoverTarget, OutboundPolicy, OutboundPolicyStream, + DiscoverOutboundPolicy, Kind, OutboundDiscoverTarget, OutboundPolicy, OutboundPolicyStream, }; pub use linkerd_policy_controller_core::IpNet; pub use linkerd_policy_controller_grpc as grpc; pub use linkerd_policy_controller_k8s_api as k8s; pub use linkerd_policy_controller_k8s_index::{inbound, outbound, ClusterInfo, DefaultPolicy}; +use std::net::SocketAddr; use std::{net::IpAddr, num::NonZeroU16}; #[derive(Clone, Debug)] @@ -87,19 +88,9 @@ impl DiscoverInboundServer<(grpc::workload::Workload, NonZeroU16)> for InboundDi impl DiscoverOutboundPolicy for OutboundDiscover { async fn get_outbound_policy( &self, - OutboundDiscoverTarget { - service_name, - service_namespace, - service_port, - source_namespace, - }: OutboundDiscoverTarget, + target: OutboundDiscoverTarget, ) -> Result> { - let rx = match self.0.write().outbound_policy_rx( - service_name, - service_namespace, - service_port, - source_namespace, - ) { + let rx = match self.0.write().outbound_policy_rx(target) { Ok(rx) => rx, Err(error) => { tracing::error!(%error, "failed to get outbound policy rx"); @@ -112,19 +103,9 @@ impl DiscoverOutboundPolicy for OutboundDiscover { async fn watch_outbound_policy( &self, - OutboundDiscoverTarget { - service_name, - service_namespace, - service_port, - source_namespace, - }: OutboundDiscoverTarget, + target: OutboundDiscoverTarget, ) -> Result> { - match self.0.write().outbound_policy_rx( - service_name, - service_namespace, - service_port, - source_namespace, - ) { + match self.0.write().outbound_policy_rx(target) { Ok(rx) => Ok(Some(Box::pin(tokio_stream::wrappers::WatchStream::new(rx)))), Err(_) => Ok(None), } @@ -136,16 +117,31 @@ impl DiscoverOutboundPolicy for OutboundDiscover { port: NonZeroU16, source_namespace: String, ) -> Option { - self.0 - .read() - .lookup_service(addr) - .map( - |outbound::ServiceRef { name, namespace }| OutboundDiscoverTarget { - service_name: name, - service_namespace: namespace, - service_port: port, + let index = self.0.read(); + if let Some((namespace, name)) = index.lookup_service(addr) { + return Some(OutboundDiscoverTarget { + name, + namespace, + port, + source_namespace, + kind: Kind::Service, + }); + } + + index + .lookup_egress_network(addr, source_namespace.clone()) + .map(|(namespace, name, traffic_policy)| { + let original_dst = SocketAddr::new(addr, port.into()); + OutboundDiscoverTarget { + name, + namespace, + port, source_namespace, - }, - ) + kind: Kind::EgressNetwork { + original_dst, + traffic_policy, + }, + } + }) } } diff --git a/policy-controller/src/main.rs b/policy-controller/src/main.rs index 36eaca3d24c52..3606283e757af 100644 --- a/policy-controller/src/main.rs +++ b/policy-controller/src/main.rs @@ -296,7 +296,9 @@ async fn main() -> Result<()> { if api_resource_exists::(&runtime.client()).await { let tls_routes = runtime.watch_all::(watcher::Config::default()); - let tls_routes_indexes = IndexList::new(status_index.clone()).shared(); + let tls_routes_indexes = IndexList::new(status_index.clone()) + .push(outbound_index.clone()) + .shared(); tokio::spawn( kubert::index::namespaced(tls_routes_indexes.clone(), tls_routes) .instrument(info_span!("tlsroutes.gateway.networking.k8s.io")), @@ -309,7 +311,9 @@ async fn main() -> Result<()> { if api_resource_exists::(&runtime.client()).await { let tcp_routes = runtime.watch_all::(watcher::Config::default()); - let tcp_routes_indexes = IndexList::new(status_index.clone()).shared(); + let tcp_routes_indexes = IndexList::new(status_index.clone()) + .push(outbound_index.clone()) + .shared(); tokio::spawn( kubert::index::namespaced(tcp_routes_indexes.clone(), tcp_routes) .instrument(info_span!("tcproutes.gateway.networking.k8s.io")), @@ -330,7 +334,9 @@ async fn main() -> Result<()> { let egress_networks = runtime.watch_all::(watcher::Config::default()); - let egress_networks_indexes = IndexList::new(status_index.clone()).shared(); + let egress_networks_indexes = IndexList::new(status_index.clone()) + .push(outbound_index.clone()) + .shared(); tokio::spawn( kubert::index::namespaced(egress_networks_indexes, egress_networks) .instrument(info_span!("egressnetworks")), diff --git a/policy-test/tests/outbound_api_linkerd.rs b/policy-test/tests/outbound_api_linkerd.rs index 4278e15af2451..f4e24cd255bc8 100644 --- a/policy-test/tests/outbound_api_linkerd.rs +++ b/policy-test/tests/outbound_api_linkerd.rs @@ -26,6 +26,7 @@ async fn service_does_not_exist() { let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; let rsp = policy_api.watch(&ns, &svc, 4191).await; + println!("{:?}", rsp); assert!(rsp.is_err()); assert_eq!(rsp.err().unwrap().code(), tonic::Code::NotFound); }) From 4144b190f8aad2745633628a194c3aa792fb50f6 Mon Sep 17 00:00:00 2001 From: Zahari Dichev Date: Wed, 23 Oct 2024 14:21:48 +0000 Subject: [PATCH 02/11] handle egress network changes Signed-off-by: Zahari Dichev --- policy-controller/core/src/outbound.rs | 53 +--- policy-controller/core/src/outbound/policy.rs | 67 +++++ policy-controller/core/src/outbound/target.rs | 26 ++ policy-controller/grpc/src/outbound.rs | 267 +++++++++++------- policy-controller/grpc/src/outbound/grpc.rs | 44 +-- policy-controller/grpc/src/outbound/http.rs | 44 +-- policy-controller/grpc/src/outbound/tcp.rs | 38 +-- policy-controller/grpc/src/outbound/tls.rs | 38 +-- .../k8s/index/src/outbound/index.rs | 118 ++++++-- .../src/outbound/index/egress_network.rs | 29 +- .../k8s/index/src/outbound/tests.rs | 123 +++++++- .../index/src/outbound/tests/routes/grpc.rs | 13 +- .../index/src/outbound/tests/routes/http.rs | 13 +- .../index/src/outbound/tests/routes/tcp.rs | 13 +- .../index/src/outbound/tests/routes/tls.rs | 13 +- policy-controller/src/lib.rs | 139 +++++++-- policy-test/src/lib.rs | 2 +- 17 files changed, 708 insertions(+), 332 deletions(-) create mode 100644 policy-controller/core/src/outbound/policy.rs create mode 100644 policy-controller/core/src/outbound/target.rs diff --git a/policy-controller/core/src/outbound.rs b/policy-controller/core/src/outbound.rs index aa1e7d102ef0f..5cd4dd8fb935f 100644 --- a/policy-controller/core/src/outbound.rs +++ b/policy-controller/core/src/outbound.rs @@ -6,11 +6,14 @@ use ahash::AHashMap as HashMap; use anyhow::Result; use chrono::{offset::Utc, DateTime}; use futures::prelude::*; -use std::{ - net::{IpAddr, SocketAddr}, - num::NonZeroU16, - pin::Pin, - time, +use std::{net::IpAddr, num::NonZeroU16, pin::Pin, time}; + +mod policy; +mod target; + +pub use self::{ + policy::{OutboundPolicy, OutboundPolicyKind, ParentMeta, ResourceOutboundPolicy}, + target::{Kind, OutboundDiscoverTarget, ResourceTarget}, }; pub trait Route { @@ -20,14 +23,15 @@ pub trait Route { /// Models outbound policy discovery. #[async_trait::async_trait] pub trait DiscoverOutboundPolicy { - async fn get_outbound_policy(&self, target: T) -> Result>; + async fn get_outbound_policy(&self, target: T) -> Result>; async fn watch_outbound_policy(&self, target: T) -> Result>; fn lookup_ip(&self, addr: IpAddr, port: NonZeroU16, source_namespace: String) -> Option; } -pub type OutboundPolicyStream = Pin + Send + Sync + 'static>>; +pub type OutboundPolicyStream = + Pin + Send + Sync + 'static>>; pub type HttpRoute = OutboundRoute; pub type GrpcRoute = OutboundRoute; @@ -40,41 +44,6 @@ pub enum TrafficPolicy { Deny, } -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub enum Kind { - EgressNetwork { - original_dst: SocketAddr, - traffic_policy: TrafficPolicy, - }, - Service, -} - -#[derive(Clone, Debug)] -pub struct OutboundDiscoverTarget { - pub name: String, - pub namespace: String, - pub port: NonZeroU16, - pub source_namespace: String, - pub kind: Kind, -} - -#[derive(Clone, Debug, PartialEq)] -pub struct OutboundPolicy { - pub http_routes: RouteSet, - pub grpc_routes: RouteSet, - pub tls_routes: RouteSet, - pub tcp_routes: RouteSet, - pub service_authority: String, - pub name: String, - pub namespace: String, - pub port: NonZeroU16, - pub opaque: bool, - pub accrual: Option, - pub http_retry: Option>, - pub grpc_retry: Option>, - pub timeouts: RouteTimeouts, -} - #[derive(Clone, Debug, PartialEq, Eq)] pub struct OutboundRoute { pub hostnames: Vec, diff --git a/policy-controller/core/src/outbound/policy.rs b/policy-controller/core/src/outbound/policy.rs new file mode 100644 index 0000000000000..c04e2d07ff4a0 --- /dev/null +++ b/policy-controller/core/src/outbound/policy.rs @@ -0,0 +1,67 @@ +use super::{ + FailureAccrual, GrpcRetryCondition, GrpcRoute, HttpRetryCondition, HttpRoute, RouteRetry, + RouteSet, RouteTimeouts, TcpRoute, TlsRoute, TrafficPolicy, +}; + +use std::{net::SocketAddr, num::NonZeroU16}; + +/// OutboundPolicyKind describes a resolved outbound policy that is +/// either attributed to a resource or is a fallback one. +#[allow(clippy::large_enum_variant)] +#[derive(Clone, Debug, PartialEq)] +pub enum OutboundPolicyKind { + Fallback(SocketAddr), + Resource(ResourceOutboundPolicy), +} + +/// ResourceOutboundPolicy expresses the known resource types +/// that can be parents for outbound policy. They each come with +/// specific metadata that is used when putting together the final +/// policy response. +#[derive(Clone, Debug, PartialEq)] +pub enum ResourceOutboundPolicy { + Service { + authority: String, + policy: OutboundPolicy, + }, + Egress { + traffic_policy: TrafficPolicy, + original_dst: SocketAddr, + policy: OutboundPolicy, + }, +} + +// ParentMeta carries information resource-specific +// information about the parent to which outbound policy +// is associated. +#[derive(Clone, Debug, Hash, PartialEq, Eq)] +pub enum ParentMeta { + Service { authority: String }, + EgressNetwork(TrafficPolicy), +} + +#[derive(Clone, Debug, PartialEq)] +pub struct OutboundPolicy { + pub parent_meta: ParentMeta, + pub http_routes: RouteSet, + pub grpc_routes: RouteSet, + pub tls_routes: RouteSet, + pub tcp_routes: RouteSet, + pub name: String, + pub namespace: String, + pub port: NonZeroU16, + pub opaque: bool, + pub accrual: Option, + pub http_retry: Option>, + pub grpc_retry: Option>, + pub timeouts: RouteTimeouts, +} + +impl ResourceOutboundPolicy { + pub fn policy(&self) -> &OutboundPolicy { + match self { + Self::Egress { policy, .. } => policy, + Self::Service { policy, .. } => policy, + } + } +} diff --git a/policy-controller/core/src/outbound/target.rs b/policy-controller/core/src/outbound/target.rs new file mode 100644 index 0000000000000..16840c488eb1b --- /dev/null +++ b/policy-controller/core/src/outbound/target.rs @@ -0,0 +1,26 @@ +use std::{net::SocketAddr, num::NonZeroU16}; + +/// OutboundDiscoverTarget allows us to express the fact that +/// a policy resolution can be fulfilled by either a resource +/// we know about (a specific EgressNetwork or a Service) or +/// by our fallback mechanism. +#[derive(Clone, Debug)] +pub enum OutboundDiscoverTarget { + Resource(ResourceTarget), + Fallback(SocketAddr), +} + +#[derive(Clone, Debug)] +pub struct ResourceTarget { + pub name: String, + pub namespace: String, + pub port: NonZeroU16, + pub source_namespace: String, + pub kind: Kind, +} + +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum Kind { + EgressNetwork(SocketAddr), + Service, +} diff --git a/policy-controller/grpc/src/outbound.rs b/policy-controller/grpc/src/outbound.rs index 7fb3261b497b4..6d4da0cd96080 100644 --- a/policy-controller/grpc/src/outbound.rs +++ b/policy-controller/grpc/src/outbound.rs @@ -13,12 +13,13 @@ use linkerd2_proxy_api::{ }; use linkerd_policy_controller_core::{ outbound::{ - DiscoverOutboundPolicy, Kind, OutboundDiscoverTarget, OutboundPolicy, OutboundPolicyStream, - Route, WeightedEgressNetwork, WeightedService, + DiscoverOutboundPolicy, Kind, OutboundDiscoverTarget, OutboundPolicyKind, + OutboundPolicyStream, ResourceOutboundPolicy, ResourceTarget, Route, WeightedEgressNetwork, + WeightedService, }, routes::GroupKindNamespaceName, }; -use std::{num::NonZeroU16, str::FromStr, sync::Arc, time}; +use std::{net::SocketAddr, num::NonZeroU16, str::FromStr, sync::Arc, time}; mod grpc; mod http; @@ -65,13 +66,13 @@ where outbound::traffic_spec::Target::Addr(target) => target, outbound::traffic_spec::Target::Authority(auth) => { return self.lookup_authority(&auth).map(|(namespace, name, port)| { - OutboundDiscoverTarget { + OutboundDiscoverTarget::Resource(ResourceTarget { kind: Kind::Service, name, namespace, port, source_namespace, - } + }) }) } }; @@ -148,24 +149,27 @@ where &self, req: tonic::Request, ) -> Result, tonic::Status> { - let service = self.lookup(req.into_inner())?; + let target = self.lookup(req.into_inner())?; + let policy = self .index - .get_outbound_policy(service.clone()) + .get_outbound_policy(target) .await .map_err(|error| { tonic::Status::internal(format!("failed to get outbound policy: {error}")) })?; - if let Some(policy) = policy { - Ok(tonic::Response::new(to_proto( - policy, - self.allow_l5d_request_headers, - service, - ))) - } else { - Err(tonic::Status::not_found("No such policy")) - } + let message = match policy { + Some(OutboundPolicyKind::Fallback(original_dst)) => fallback(original_dst), + Some(OutboundPolicyKind::Resource(resource)) => { + to_proto(resource, self.allow_l5d_request_headers) + } + None => { + return Err(tonic::Status::not_found("No such policy")); + } + }; + + Ok(tonic::Response::new(message)) } type WatchStream = BoxWatchStream; @@ -180,7 +184,7 @@ where let rx = self .index - .watch_outbound_policy(service.clone()) + .watch_outbound_policy(service) .await .map_err(|e| tonic::Status::internal(format!("lookup failed: {e}")))? .ok_or_else(|| tonic::Status::not_found("unknown server"))?; @@ -188,7 +192,6 @@ where drain, rx, self.allow_l5d_request_headers, - service, ))) } } @@ -201,7 +204,6 @@ fn response_stream( drain: drain::Watch, mut rx: OutboundPolicyStream, allow_l5d_request_headers: bool, - target: OutboundDiscoverTarget, ) -> BoxWatchStream { Box::pin(async_stream::try_stream! { tokio::pin! { @@ -212,8 +214,11 @@ fn response_stream( tokio::select! { // When the port is updated with a new server, update the server watch. res = rx.next() => match res { - Some(policy) => { - yield to_proto(policy, allow_l5d_request_headers, target.clone()); + Some(OutboundPolicyKind::Resource(resource)) => { + yield to_proto(resource, allow_l5d_request_headers); + } + Some(OutboundPolicyKind::Fallback(original_dst)) => { + yield fallback(original_dst); } None => return, }, @@ -228,106 +233,146 @@ fn response_stream( }) } -fn no_explicit_routes(outbound: &OutboundPolicy) -> bool { - outbound.http_routes.is_empty() - && outbound.grpc_routes.is_empty() - && outbound.tls_routes.is_empty() - && outbound.tcp_routes.is_empty() +fn fallback(original_dst: SocketAddr) -> outbound::OutboundPolicy { + outbound::OutboundPolicy { + metadata: Some(Metadata { + kind: Some(metadata::Kind::Default("egress-fallback".to_string())), + }), + + protocol: Some(outbound::ProxyProtocol { + kind: Some(outbound::proxy_protocol::Kind::Opaque( + outbound::proxy_protocol::Opaque { + routes: vec![outbound::OpaqueRoute { + metadata: Some(Metadata { + kind: Some(metadata::Kind::Default("egress-fallback".to_string())), + }), + rules: vec![outbound::opaque_route::Rule { + backends: Some(outbound::opaque_route::Distribution { + kind: Some( + outbound::opaque_route::distribution::Kind::FirstAvailable( + outbound::opaque_route::distribution::FirstAvailable { + backends: vec![outbound::opaque_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(Metadata { + kind: Some(metadata::Kind::Default( + "egress-fallback".to_string(), + )), + }), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: 1, + ..Default::default() + }, + )), + }), + }], + }, + ), + ), + }), + }], + error: None, + }], + }, + )), + }), + } } fn to_proto( - outbound: OutboundPolicy, + resource_policy: ResourceOutboundPolicy, allow_l5d_request_headers: bool, - target: OutboundDiscoverTarget, ) -> outbound::OutboundPolicy { - let backend: outbound::Backend = default_backend(&outbound, target.kind); + let policy = resource_policy.policy(); + let backend: outbound::Backend = default_backend(&resource_policy); - let kind = if outbound.opaque { + let kind = if policy.opaque { outbound::proxy_protocol::Kind::Opaque(outbound::proxy_protocol::Opaque { - routes: vec![default_outbound_opaq_route(backend, target.kind)], + routes: vec![default_outbound_opaq_route(backend, &resource_policy)], }) } else { - let accrual = outbound.accrual.map(|accrual| outbound::FailureAccrual { - kind: Some(match accrual { - linkerd_policy_controller_core::outbound::FailureAccrual::Consecutive { - max_failures, - backoff, - } => outbound::failure_accrual::Kind::ConsecutiveFailures( - outbound::failure_accrual::ConsecutiveFailures { + let accrual = resource_policy + .policy() + .accrual + .map(|accrual| outbound::FailureAccrual { + kind: Some(match accrual { + linkerd_policy_controller_core::outbound::FailureAccrual::Consecutive { max_failures, - backoff: Some(outbound::ExponentialBackoff { - min_backoff: convert_duration("min_backoff", backoff.min_penalty), - max_backoff: convert_duration("max_backoff", backoff.max_penalty), - jitter_ratio: backoff.jitter, - }), - }, - ), - }), - }); + backoff, + } => outbound::failure_accrual::Kind::ConsecutiveFailures( + outbound::failure_accrual::ConsecutiveFailures { + max_failures, + backoff: Some(outbound::ExponentialBackoff { + min_backoff: convert_duration("min_backoff", backoff.min_penalty), + max_backoff: convert_duration("max_backoff", backoff.max_penalty), + jitter_ratio: backoff.jitter, + }), + }, + ), + }), + }); + + let mut grpc_routes = policy.grpc_routes.clone().into_iter().collect::>(); + let mut http_routes = policy.http_routes.clone().into_iter().collect::>(); + let mut tls_routes = policy.tls_routes.clone().into_iter().collect::>(); + let mut tcp_routes = policy.tcp_routes.clone().into_iter().collect::>(); - // if we have no explicit routes attached to the parent, always attempt protocol detection - if no_explicit_routes(&outbound) { - let mut http_routes = outbound.http_routes.into_iter().collect::>(); + if !grpc_routes.is_empty() { + grpc_routes.sort_by(timestamp_then_name); + grpc::protocol( + backend, + grpc_routes.into_iter(), + accrual, + policy.grpc_retry.clone(), + policy.timeouts.clone(), + allow_l5d_request_headers, + &resource_policy, + ) + } else if !http_routes.is_empty() { http_routes.sort_by(timestamp_then_name); http::protocol( backend, http_routes.into_iter(), accrual, - outbound.http_retry, - outbound.timeouts, + policy.http_retry.clone(), + policy.timeouts.clone(), allow_l5d_request_headers, - target.clone(), + &resource_policy, ) + } else if !tls_routes.is_empty() { + tls_routes.sort_by(timestamp_then_name); + tls::protocol(backend, tls_routes.into_iter(), &resource_policy) + } else if !tcp_routes.is_empty() { + tcp_routes.sort_by(timestamp_then_name); + tcp::protocol(backend, tcp_routes.into_iter(), &resource_policy) } else { - let mut grpc_routes = outbound.grpc_routes.into_iter().collect::>(); - let mut http_routes = outbound.http_routes.into_iter().collect::>(); - let mut tls_routes = outbound.tls_routes.into_iter().collect::>(); - let mut tcp_routes = outbound.tcp_routes.into_iter().collect::>(); - - if !grpc_routes.is_empty() { - grpc_routes.sort_by(timestamp_then_name); - grpc::protocol( - backend, - grpc_routes.into_iter(), - accrual, - outbound.grpc_retry, - outbound.timeouts, - allow_l5d_request_headers, - target.clone(), - ) - } else if !http_routes.is_empty() { - http_routes.sort_by(timestamp_then_name); - http::protocol( - backend, - http_routes.into_iter(), - accrual, - outbound.http_retry, - outbound.timeouts, - allow_l5d_request_headers, - target.clone(), - ) - } else if !tls_routes.is_empty() { - tls_routes.sort_by(timestamp_then_name); - tls::protocol(backend, tls_routes.into_iter(), target.clone()) - } else { - tcp_routes.sort_by(timestamp_then_name); - tcp::protocol(backend, tcp_routes.into_iter(), target.clone()) - } + http_routes.sort_by(timestamp_then_name); + http::protocol( + backend, + http_routes.into_iter(), + accrual, + policy.http_retry.clone(), + policy.timeouts.clone(), + allow_l5d_request_headers, + &resource_policy, + ) } }; - let (parent_group, parent_kind) = match target.kind { - Kind::EgressNetwork { .. } => ("policy.linkerd.io", "EgressNetwork"), - Kind::Service => ("core", "Service"), + let (parent_group, parent_kind) = match resource_policy { + ResourceOutboundPolicy::Egress { .. } => ("policy.linkerd.io", "EgressNetwork"), + ResourceOutboundPolicy::Service { .. } => ("core", "Service"), }; let metadata = Metadata { kind: Some(metadata::Kind::Resource(api::meta::Resource { group: parent_group.into(), kind: parent_kind.into(), - namespace: outbound.namespace, - name: outbound.name, - port: u16::from(outbound.port).into(), + namespace: policy.namespace.clone(), + name: policy.name.clone(), + port: u16::from(policy.port).into(), ..Default::default() })), }; @@ -356,17 +401,17 @@ fn timestamp_then_name( by_ts.then_with(|| left_id.name.cmp(&right_id.name)) } -fn default_backend(outbound: &OutboundPolicy, parent_kind: Kind) -> outbound::Backend { - match parent_kind { - Kind::Service => outbound::Backend { +fn default_backend(policy: &ResourceOutboundPolicy) -> outbound::Backend { + match policy { + ResourceOutboundPolicy::Service { authority, policy } => outbound::Backend { metadata: Some(Metadata { kind: Some(metadata::Kind::Resource(api::meta::Resource { group: "core".to_string(), kind: "Service".to_string(), - name: outbound.name.clone(), - namespace: outbound.namespace.clone(), + name: policy.name.clone(), + namespace: policy.namespace.clone(), section: Default::default(), - port: u16::from(outbound.port).into(), + port: u16::from(policy.port).into(), })), }), queue: Some(default_queue_config()), @@ -375,7 +420,7 @@ fn default_backend(outbound: &OutboundPolicy, parent_kind: Kind) -> outbound::Ba discovery: Some(outbound::backend::EndpointDiscovery { kind: Some(outbound::backend::endpoint_discovery::Kind::Dst( outbound::backend::endpoint_discovery::DestinationGet { - path: outbound.service_authority.clone(), + path: authority.clone(), }, )), }), @@ -383,21 +428,25 @@ fn default_backend(outbound: &OutboundPolicy, parent_kind: Kind) -> outbound::Ba }, )), }, - Kind::EgressNetwork { original_dst, .. } => outbound::Backend { + ResourceOutboundPolicy::Egress { + original_dst, + policy, + .. + } => outbound::Backend { metadata: Some(Metadata { kind: Some(metadata::Kind::Resource(api::meta::Resource { group: "policy.linkerd.io".to_string(), kind: "EgressNetwork".to_string(), - name: outbound.name.clone(), - namespace: outbound.namespace.clone(), + name: policy.name.clone(), + namespace: policy.namespace.clone(), section: Default::default(), - port: u16::from(outbound.port).into(), + port: u16::from(policy.port).into(), })), }), queue: Some(default_queue_config()), kind: Some(outbound::backend::Kind::Forward( destination::WeightedAddr { - addr: Some(original_dst.into()), + addr: Some((*original_dst).into()), weight: 1, ..Default::default() }, @@ -408,13 +457,13 @@ fn default_backend(outbound: &OutboundPolicy, parent_kind: Kind) -> outbound::Ba fn default_outbound_opaq_route( backend: outbound::Backend, - parent_kind: Kind, + resource_policy: &ResourceOutboundPolicy, ) -> outbound::OpaqueRoute { - match parent_kind { - Kind::EgressNetwork { traffic_policy, .. } => { + match resource_policy { + ResourceOutboundPolicy::Egress { traffic_policy, .. } => { tcp::default_outbound_egress_route(backend, traffic_policy) } - Kind::Service => { + ResourceOutboundPolicy::Service { .. } => { let metadata = Some(Metadata { kind: Some(metadata::Kind::Default("opaq".to_string())), }); diff --git a/policy-controller/grpc/src/outbound/grpc.rs b/policy-controller/grpc/src/outbound/grpc.rs index 91d6fc7d0e9ec..bc2510ae548b8 100644 --- a/policy-controller/grpc/src/outbound/grpc.rs +++ b/policy-controller/grpc/src/outbound/grpc.rs @@ -2,11 +2,15 @@ use super::{convert_duration, default_balancer_config, default_queue_config}; use crate::routes::{ convert_host_match, convert_request_header_modifier_filter, grpc::convert_match, }; -use linkerd2_proxy_api::{destination, grpc_route, http_route, meta, outbound}; +use linkerd2_proxy_api::{ + destination, grpc_route, http_route, + meta::{self}, + outbound, +}; use linkerd_policy_controller_core::{ outbound::{ - Backend, Filter, GrpcRetryCondition, GrpcRoute, Kind, OutboundDiscoverTarget, - OutboundRoute, OutboundRouteRule, RouteRetry, RouteTimeouts, TrafficPolicy, + Backend, Filter, GrpcRetryCondition, GrpcRoute, OutboundRoute, OutboundRouteRule, + ResourceOutboundPolicy, RouteRetry, RouteTimeouts, TrafficPolicy, }, routes::{FailureInjectorFilter, GroupKindNamespaceName}, }; @@ -19,7 +23,7 @@ pub(crate) fn protocol( service_retry: Option>, service_timeouts: RouteTimeouts, allow_l5d_request_headers: bool, - target: OutboundDiscoverTarget, + policy: &ResourceOutboundPolicy, ) -> outbound::proxy_protocol::Kind { let mut routes = routes .map(|(gknn, route)| { @@ -30,12 +34,12 @@ pub(crate) fn protocol( service_retry.clone(), service_timeouts.clone(), allow_l5d_request_headers, - target.clone(), + policy, ) }) .collect::>(); - if let Kind::EgressNetwork { traffic_policy, .. } = target.kind { + if let ResourceOutboundPolicy::Egress { traffic_policy, .. } = policy { routes.push(default_outbound_egress_route( default_backend, service_retry, @@ -61,7 +65,7 @@ fn convert_outbound_route( service_retry: Option>, service_timeouts: RouteTimeouts, allow_l5d_request_headers: bool, - target: OutboundDiscoverTarget, + policy: &ResourceOutboundPolicy, ) -> outbound::GrpcRoute { // This encoder sets deprecated timeouts for older proxies. #![allow(deprecated)] @@ -90,7 +94,7 @@ fn convert_outbound_route( }| { let backends = backends .into_iter() - .map(|b| convert_backend(b, target.clone())) + .map(|b| convert_backend(b, policy)) .collect::>(); let dist = if backends.is_empty() { outbound::grpc_route::distribution::Kind::FirstAvailable( @@ -173,11 +177,11 @@ fn convert_outbound_route( fn convert_backend( backend: Backend, - target: OutboundDiscoverTarget, + policy: &ResourceOutboundPolicy, ) -> outbound::grpc_route::WeightedRouteBackend { - let original_dst_port = match target.kind { - Kind::EgressNetwork { original_dst, .. } => Some(original_dst.port()), - Kind::Service => None, + let original_dst_port = match policy { + ResourceOutboundPolicy::Egress { original_dst, .. } => Some(original_dst.port()), + _ => None, }; match backend { @@ -238,9 +242,13 @@ fn convert_backend( format!("Service not found {}", svc.name), super::service_meta(svc), ), - Backend::EgressNetwork(egress_net) if egress_net.exists => match target.kind { - Kind::EgressNetwork { original_dst, .. } => { - if target.name == egress_net.name && target.namespace == egress_net.namespace { + Backend::EgressNetwork(egress_net) if egress_net.exists => match policy { + ResourceOutboundPolicy::Egress { + original_dst, + policy, + .. + } => { + if policy.name == egress_net.name && policy.namespace == egress_net.namespace { let filters = egress_net .filters .clone() @@ -259,7 +267,7 @@ fn convert_backend( queue: Some(default_queue_config()), kind: Some(outbound::backend::Kind::Forward( destination::WeightedAddr { - addr: Some(original_dst.into()), + addr: Some((*original_dst).into()), weight: egress_net.weight, ..Default::default() }, @@ -279,7 +287,7 @@ fn convert_backend( ) } } - Kind::Service => invalid_backend( + ResourceOutboundPolicy::Service { .. } => invalid_backend( egress_net.weight, "EgressNetwork backends attach to EgressNetwork parents only".to_string(), super::egress_net_meta(egress_net, original_dst_port), @@ -331,7 +339,7 @@ pub(crate) fn default_outbound_egress_route( backend: outbound::Backend, service_retry: Option>, service_timeouts: RouteTimeouts, - traffic_policy: TrafficPolicy, + traffic_policy: &TrafficPolicy, ) -> outbound::GrpcRoute { #![allow(deprecated)] let (filters, name) = match traffic_policy { diff --git a/policy-controller/grpc/src/outbound/http.rs b/policy-controller/grpc/src/outbound/http.rs index c8d0969aa4cea..cb7439d9fac5c 100644 --- a/policy-controller/grpc/src/outbound/http.rs +++ b/policy-controller/grpc/src/outbound/http.rs @@ -9,8 +9,8 @@ use crate::routes::{ use linkerd2_proxy_api::{destination, http_route, meta, outbound}; use linkerd_policy_controller_core::{ outbound::{ - Backend, Filter, HttpRetryCondition, HttpRoute, Kind, OutboundDiscoverTarget, - OutboundRouteRule, RouteRetry, RouteTimeouts, TrafficPolicy, + Backend, Filter, HttpRetryCondition, HttpRoute, OutboundRouteRule, ResourceOutboundPolicy, + RouteRetry, RouteTimeouts, TrafficPolicy, }, routes::GroupKindNamespaceName, }; @@ -23,9 +23,9 @@ pub(crate) fn protocol( service_retry: Option>, service_timeouts: RouteTimeouts, allow_l5d_request_headers: bool, - target: OutboundDiscoverTarget, + policy: &ResourceOutboundPolicy, ) -> outbound::proxy_protocol::Kind { - let opaque_route = default_outbound_opaq_route(default_backend.clone(), target.kind); + let opaque_route = default_outbound_opaq_route(default_backend.clone(), policy); let mut routes = routes .map(|(gknn, route)| { convert_outbound_route( @@ -35,13 +35,13 @@ pub(crate) fn protocol( service_retry.clone(), service_timeouts.clone(), allow_l5d_request_headers, - target.clone(), + policy, ) }) .collect::>(); - match target.kind { - Kind::Service => { + match policy { + ResourceOutboundPolicy::Service { .. } => { if routes.is_empty() { routes.push(default_outbound_service_route( default_backend, @@ -50,7 +50,7 @@ pub(crate) fn protocol( )); } } - Kind::EgressNetwork { traffic_policy, .. } => { + ResourceOutboundPolicy::Egress { traffic_policy, .. } => { routes.push(default_outbound_egress_route( default_backend, service_retry.clone(), @@ -92,7 +92,7 @@ fn convert_outbound_route( service_retry: Option>, service_timeouts: RouteTimeouts, allow_l5d_request_headers: bool, - target: OutboundDiscoverTarget, + policy: &ResourceOutboundPolicy, ) -> outbound::HttpRoute { // This encoder sets deprecated timeouts for older proxies. #![allow(deprecated)] @@ -120,7 +120,7 @@ fn convert_outbound_route( }| { let backends = backends .into_iter() - .map(|b| convert_backend(b, target.clone())) + .map(|b| convert_backend(b, policy)) .collect::>(); let dist = if backends.is_empty() { outbound::http_route::distribution::Kind::FirstAvailable( @@ -167,11 +167,11 @@ fn convert_outbound_route( fn convert_backend( backend: Backend, - target: OutboundDiscoverTarget, + policy: &ResourceOutboundPolicy, ) -> outbound::http_route::WeightedRouteBackend { - let original_dst_port = match target.kind { - Kind::EgressNetwork { original_dst, .. } => Some(original_dst.port()), - Kind::Service => None, + let original_dst_port = match policy { + ResourceOutboundPolicy::Egress { original_dst, .. } => Some(original_dst.port()), + ResourceOutboundPolicy::Service { .. } => None, }; match backend { @@ -232,9 +232,13 @@ fn convert_backend( format!("Service not found {}", svc.name), super::service_meta(svc), ), - Backend::EgressNetwork(egress_net) if egress_net.exists => match target.kind { - Kind::EgressNetwork { original_dst, .. } => { - if target.name == egress_net.name && target.namespace == egress_net.namespace { + Backend::EgressNetwork(egress_net) if egress_net.exists => match policy { + ResourceOutboundPolicy::Egress { + original_dst, + policy, + .. + } => { + if policy.name == egress_net.name && policy.namespace == egress_net.namespace { let filters = egress_net .filters .clone() @@ -253,7 +257,7 @@ fn convert_backend( queue: Some(default_queue_config()), kind: Some(outbound::backend::Kind::Forward( destination::WeightedAddr { - addr: Some(original_dst.into()), + addr: Some((*original_dst).into()), weight: egress_net.weight, ..Default::default() }, @@ -273,7 +277,7 @@ fn convert_backend( ) } } - Kind::Service => invalid_backend( + ResourceOutboundPolicy::Service { .. } => invalid_backend( egress_net.weight, "EgressNetwork backends attach to EgressNetwork parents only".to_string(), super::egress_net_meta(egress_net, original_dst_port), @@ -367,7 +371,7 @@ pub(crate) fn default_outbound_egress_route( backend: outbound::Backend, service_retry: Option>, service_timeouts: RouteTimeouts, - traffic_policy: TrafficPolicy, + traffic_policy: &TrafficPolicy, ) -> outbound::HttpRoute { #![allow(deprecated)] let (filters, name) = match traffic_policy { diff --git a/policy-controller/grpc/src/outbound/tcp.rs b/policy-controller/grpc/src/outbound/tcp.rs index fff83202699b8..26c14ee7c05ab 100644 --- a/policy-controller/grpc/src/outbound/tcp.rs +++ b/policy-controller/grpc/src/outbound/tcp.rs @@ -1,7 +1,7 @@ use super::{default_balancer_config, default_queue_config}; use linkerd2_proxy_api::{destination, meta, outbound}; use linkerd_policy_controller_core::{ - outbound::{Backend, Kind, OutboundDiscoverTarget, TcpRoute, TrafficPolicy}, + outbound::{Backend, ResourceOutboundPolicy, TcpRoute, TrafficPolicy}, routes::GroupKindNamespaceName, }; use std::net::SocketAddr; @@ -9,15 +9,13 @@ use std::net::SocketAddr; pub(crate) fn protocol( default_backend: outbound::Backend, routes: impl Iterator, - target: OutboundDiscoverTarget, + policy: &ResourceOutboundPolicy, ) -> outbound::proxy_protocol::Kind { let mut routes = routes - .map(|(gknn, route)| { - convert_outbound_route(gknn, route, default_backend.clone(), target.clone()) - }) + .map(|(gknn, route)| convert_outbound_route(gknn, route, default_backend.clone(), policy)) .collect::>(); - if let Kind::EgressNetwork { traffic_policy, .. } = target.kind { + if let ResourceOutboundPolicy::Egress { traffic_policy, .. } = policy { routes.push(default_outbound_egress_route( default_backend, traffic_policy, @@ -34,7 +32,7 @@ fn convert_outbound_route( creation_timestamp: _, }: TcpRoute, backend: outbound::Backend, - target: OutboundDiscoverTarget, + policy: &ResourceOutboundPolicy, ) -> outbound::OpaqueRoute { // This encoder sets deprecated timeouts for older proxies. #![allow(deprecated)] @@ -52,7 +50,7 @@ fn convert_outbound_route( let backends = rule .backends .into_iter() - .map(|b| convert_backend(b, target.clone())) + .map(|b| convert_backend(b, policy)) .collect::>(); let dist = if backends.is_empty() { @@ -82,11 +80,11 @@ fn convert_outbound_route( fn convert_backend( backend: Backend, - target: OutboundDiscoverTarget, + policy: &ResourceOutboundPolicy, ) -> outbound::opaque_route::WeightedRouteBackend { - let original_dst_port = match target.kind { - Kind::EgressNetwork { original_dst, .. } => Some(original_dst.port()), - Kind::Service => None, + let original_dst_port = match policy { + ResourceOutboundPolicy::Egress { original_dst, .. } => Some(original_dst.port()), + ResourceOutboundPolicy::Service { .. } => None, }; match backend { @@ -137,9 +135,13 @@ fn convert_backend( format!("Service not found {}", svc.name), super::service_meta(svc), ), - Backend::EgressNetwork(egress_net) if egress_net.exists => match target.kind { - Kind::EgressNetwork { original_dst, .. } => { - if target.name == egress_net.name && target.namespace == egress_net.namespace { + Backend::EgressNetwork(egress_net) if egress_net.exists => match policy { + ResourceOutboundPolicy::Egress { + original_dst, + policy, + .. + } => { + if policy.name == egress_net.name && policy.namespace == egress_net.namespace { outbound::opaque_route::WeightedRouteBackend { weight: egress_net.weight, backend: Some(outbound::opaque_route::RouteBackend { @@ -151,7 +153,7 @@ fn convert_backend( queue: Some(default_queue_config()), kind: Some(outbound::backend::Kind::Forward( destination::WeightedAddr { - addr: Some(original_dst.into()), + addr: Some((*original_dst).into()), weight: egress_net.weight, ..Default::default() }, @@ -170,7 +172,7 @@ fn convert_backend( ) } } - Kind::Service { .. } => invalid_backend( + ResourceOutboundPolicy::Service { .. } => invalid_backend( egress_net.weight, "EgressNetwork backends attach to EgressNetwork parents only".to_string(), super::egress_net_meta(egress_net, original_dst_port), @@ -211,7 +213,7 @@ fn invalid_backend( pub(crate) fn default_outbound_egress_route( backend: outbound::Backend, - traffic_policy: TrafficPolicy, + traffic_policy: &TrafficPolicy, ) -> outbound::OpaqueRoute { #![allow(deprecated)] let (error, name) = match traffic_policy { diff --git a/policy-controller/grpc/src/outbound/tls.rs b/policy-controller/grpc/src/outbound/tls.rs index fbbbda15328c2..9a436e7cf59a5 100644 --- a/policy-controller/grpc/src/outbound/tls.rs +++ b/policy-controller/grpc/src/outbound/tls.rs @@ -2,7 +2,7 @@ use super::{default_balancer_config, default_queue_config}; use crate::routes::convert_sni_match; use linkerd2_proxy_api::{destination, meta, outbound}; use linkerd_policy_controller_core::{ - outbound::{Backend, Kind, OutboundDiscoverTarget, TlsRoute, TrafficPolicy}, + outbound::{Backend, ResourceOutboundPolicy, TlsRoute, TrafficPolicy}, routes::GroupKindNamespaceName, }; use std::net::SocketAddr; @@ -10,15 +10,13 @@ use std::net::SocketAddr; pub(crate) fn protocol( default_backend: outbound::Backend, routes: impl Iterator, - target: OutboundDiscoverTarget, + policy: &ResourceOutboundPolicy, ) -> outbound::proxy_protocol::Kind { let mut routes = routes - .map(|(gknn, route)| { - convert_outbound_route(gknn, route, default_backend.clone(), target.clone()) - }) + .map(|(gknn, route)| convert_outbound_route(gknn, route, default_backend.clone(), policy)) .collect::>(); - if let Kind::EgressNetwork { traffic_policy, .. } = target.kind { + if let ResourceOutboundPolicy::Egress { traffic_policy, .. } = policy { routes.push(default_outbound_egress_route( default_backend, traffic_policy, @@ -36,7 +34,7 @@ fn convert_outbound_route( creation_timestamp: _, }: TlsRoute, backend: outbound::Backend, - target: OutboundDiscoverTarget, + policy: &ResourceOutboundPolicy, ) -> outbound::TlsRoute { // This encoder sets deprecated timeouts for older proxies. #![allow(deprecated)] @@ -56,7 +54,7 @@ fn convert_outbound_route( let backends = rule .backends .into_iter() - .map(|b| convert_backend(b, target.clone())) + .map(|b| convert_backend(b, policy)) .collect::>(); let dist = if backends.is_empty() { @@ -87,11 +85,11 @@ fn convert_outbound_route( fn convert_backend( backend: Backend, - target: OutboundDiscoverTarget, + policy: &ResourceOutboundPolicy, ) -> outbound::tls_route::WeightedRouteBackend { - let original_dst_port = match target.kind { - Kind::EgressNetwork { original_dst, .. } => Some(original_dst.port()), - Kind::Service => None, + let original_dst_port = match policy { + ResourceOutboundPolicy::Egress { original_dst, .. } => Some(original_dst.port()), + ResourceOutboundPolicy::Service { .. } => None, }; match backend { @@ -142,9 +140,13 @@ fn convert_backend( format!("Service not found {}", svc.name), super::service_meta(svc), ), - Backend::EgressNetwork(egress_net) if egress_net.exists => match target.kind { - Kind::EgressNetwork { original_dst, .. } => { - if target.name == egress_net.name && target.namespace == egress_net.namespace { + Backend::EgressNetwork(egress_net) if egress_net.exists => match policy { + ResourceOutboundPolicy::Egress { + original_dst, + policy, + .. + } => { + if policy.name == egress_net.name && policy.namespace == egress_net.namespace { outbound::tls_route::WeightedRouteBackend { weight: egress_net.weight, backend: Some(outbound::tls_route::RouteBackend { @@ -156,7 +158,7 @@ fn convert_backend( queue: Some(default_queue_config()), kind: Some(outbound::backend::Kind::Forward( destination::WeightedAddr { - addr: Some(original_dst.into()), + addr: Some((*original_dst).into()), weight: egress_net.weight, ..Default::default() }, @@ -175,7 +177,7 @@ fn convert_backend( ) } } - Kind::Service { .. } => invalid_backend( + ResourceOutboundPolicy::Service { .. } => invalid_backend( egress_net.weight, "EgressNetwork backends attach to EgressNetwork parents only".to_string(), super::egress_net_meta(egress_net, original_dst_port), @@ -216,7 +218,7 @@ fn invalid_backend( pub(crate) fn default_outbound_egress_route( backend: outbound::Backend, - traffic_policy: TrafficPolicy, + traffic_policy: &TrafficPolicy, ) -> outbound::TlsRoute { #![allow(deprecated)] let (error, name) = match traffic_policy { diff --git a/policy-controller/k8s/index/src/outbound/index.rs b/policy-controller/k8s/index/src/outbound/index.rs index 2b632f4b446a8..337290105ae0b 100644 --- a/policy-controller/k8s/index/src/outbound/index.rs +++ b/policy-controller/k8s/index/src/outbound/index.rs @@ -9,7 +9,7 @@ use egress_network::EgressNetwork; use linkerd_policy_controller_core::{ outbound::{ Backend, Backoff, FailureAccrual, GrpcRetryCondition, GrpcRoute, HttpRetryCondition, - HttpRoute, Kind, OutboundDiscoverTarget, OutboundPolicy, RouteRetry, RouteSet, + HttpRoute, Kind, OutboundPolicy, ParentMeta, ResourceTarget, RouteRetry, RouteSet, RouteTimeouts, TcpRoute, TlsRoute, TrafficPolicy, }, routes::GroupKindNamespaceName, @@ -32,6 +32,7 @@ pub struct Index { // holds information about resources. currently EgressNetworks and Services resource_info: HashMap, cluster_networks: Vec, + fallback_polcy_tx: watch::Sender<()>, } pub mod egress_network; @@ -87,6 +88,7 @@ struct ResourceInfo { http_retry: Option>, grpc_retry: Option>, timeouts: RouteTimeouts, + traffic_policy: Option, } #[derive(Clone, Debug, PartialEq, Eq, Hash)] @@ -98,10 +100,10 @@ struct ResourcePort { #[derive(Debug)] struct ResourceRoutes { + parent_meta: ParentMeta, namespace: Arc, name: String, port: NonZeroU16, - authority: Option, // present only on services watches_by_ns: HashMap, opaque: bool, accrual: Option, @@ -112,6 +114,7 @@ struct ResourceRoutes { #[derive(Debug)] struct RoutesWatch { + parent_meta: ParentMeta, opaque: bool, accrual: Option, http_retry: Option>, @@ -254,6 +257,7 @@ impl kubert::index::IndexNamespacedResource for Index { http_retry, grpc_retry, timeouts, + traffic_policy: None, }; self.namespaces @@ -267,7 +271,11 @@ impl kubert::index::IndexNamespacedResource for Index { resource_port_routes: Default::default(), namespace: Arc::new(ns), }) - .update_resource(service.name_unchecked(), &service_info); + .update_resource( + service.name_unchecked(), + ResourceKind::Service, + &service_info, + ); self.resource_info.insert( ResourceRef { @@ -278,7 +286,7 @@ impl kubert::index::IndexNamespacedResource for Index { service_info, ); - self.reindex_resources() + self.reindex_resources(); } fn delete(&mut self, namespace: String, name: String) { @@ -291,7 +299,7 @@ impl kubert::index::IndexNamespacedResource for Index { self.resource_info.remove(&service_ref); self.services_by_ip.retain(|_, v| *v != service_ref); - self.reindex_resources() + self.reindex_resources(); } } @@ -327,8 +335,15 @@ impl kubert::index::IndexNamespacedResource for name: name.clone(), namespace: ns.clone(), }; + let egress_net = EgressNetwork::from_resource(&egress_network, self.cluster_networks.clone()); + + let traffic_policy = Some(match egress_net.traffic_policy { + linkerd_k8s_api::TrafficPolicy::Allow => TrafficPolicy::Allow, + linkerd_k8s_api::TrafficPolicy::Deny => TrafficPolicy::Deny, + }); + self.egress_networks_by_ref .insert(egress_net_ref.clone(), egress_net); @@ -338,6 +353,7 @@ impl kubert::index::IndexNamespacedResource for http_retry, grpc_retry, timeouts, + traffic_policy, }; self.namespaces @@ -351,12 +367,18 @@ impl kubert::index::IndexNamespacedResource for resource_port_routes: Default::default(), namespace: Arc::new(ns), }) - .update_resource(egress_network.name_unchecked(), &egress_network_info); + .update_resource( + egress_network.name_unchecked(), + ResourceKind::EgressNetwork, + &egress_network_info, + ); self.resource_info .insert(egress_net_ref, egress_network_info); - self.reindex_resources() + self.reindex_resources(); + self.reinitialize_egress_watches(); + self.reinitialize_fallback_watches() } fn delete(&mut self, namespace: String, name: String) { @@ -368,13 +390,16 @@ impl kubert::index::IndexNamespacedResource for }; self.egress_networks_by_ref.remove(&egress_net_ref); - self.reindex_resources() + self.reindex_resources(); + self.reinitialize_egress_watches(); + self.reinitialize_fallback_watches() } } impl Index { pub fn shared(cluster_info: Arc) -> SharedIndex { let cluster_networks = cluster_info.networks.clone(); + let (fallback_polcy_tx, _) = watch::channel(()); Arc::new(RwLock::new(Self { namespaces: NamespaceIndex { by_ns: HashMap::default(), @@ -384,14 +409,30 @@ impl Index { egress_networks_by_ref: HashMap::default(), resource_info: HashMap::default(), cluster_networks: cluster_networks.into_iter().map(Cidr::from).collect(), + fallback_polcy_tx, })) } + pub fn is_address_in_cluster(&self, addr: IpAddr) -> bool { + self.cluster_networks + .iter() + .any(|net| net.contains(&addr.into())) + } + + pub fn fallback_policy_rx(&self) -> watch::Receiver<()> { + self.fallback_polcy_tx.subscribe() + } + + fn reinitialize_fallback_watches(&mut self) { + let (new_fallback_tx, _) = watch::channel(()); + self.fallback_polcy_tx = new_fallback_tx; + } + pub fn outbound_policy_rx( &mut self, - target: OutboundDiscoverTarget, + target: ResourceTarget, ) -> Result> { - let OutboundDiscoverTarget { + let ResourceTarget { name, namespace, port, @@ -440,13 +481,13 @@ impl Index { &self, addr: IpAddr, source_namespace: String, - ) -> Option<(String, String, TrafficPolicy)> { + ) -> Option<(String, String)> { egress_network::resolve_egress_network( addr, source_namespace, self.egress_networks_by_ref.values(), ) - .map(|(r, p)| (r.namespace, r.name, p)) + .map(|r| (r.namespace, r.name)) } fn apply_http(&mut self, route: HttpRouteResource) { @@ -625,6 +666,12 @@ impl Index { ns.reindex_resources(&self.resource_info); } } + + fn reinitialize_egress_watches(&mut self) { + for ns in self.namespaces.by_ns.values_mut() { + ns.reinitialize_egress_watches(); + } + } } impl Namespace { @@ -940,11 +987,19 @@ impl Namespace { } } - fn update_resource(&mut self, name: String, resource: &ResourceInfo) { + fn reinitialize_egress_watches(&mut self) { + for routes in self.resource_port_routes.values_mut() { + if let ParentMeta::EgressNetwork(_) = routes.parent_meta { + routes.reinitialize_watches(); + } + } + } + + fn update_resource(&mut self, name: String, kind: ResourceKind, resource: &ResourceInfo) { tracing::debug!(?name, ?resource, "updating resource"); for (resource_port, resource_routes) in self.resource_port_routes.iter_mut() { - if resource_port.name != name { + if resource_port.name != name || kind != resource_port.kind { continue; } @@ -1019,13 +1074,14 @@ impl Namespace { kind: rp.kind.clone(), }; - let authority = match rp.kind { - ResourceKind::EgressNetwork => None, + let mut parent_meta = match rp.kind { + ResourceKind::EgressNetwork => ParentMeta::EgressNetwork(TrafficPolicy::Deny), ResourceKind::Service => { - Some(cluster.service_dns_authority(&self.namespace, &rp.name, rp.port)) + let authority = + cluster.service_dns_authority(&self.namespace, &rp.name, rp.port); + ParentMeta::Service { authority } } }; - let mut opaque = false; let mut accrual = None; let mut http_retry = None; @@ -1037,6 +1093,10 @@ impl Namespace { http_retry = resource.http_retry.clone(); grpc_retry = resource.grpc_retry.clone(); timeouts = resource.timeouts.clone(); + + if let Some(traffic_policy) = resource.traffic_policy { + parent_meta = ParentMeta::EgressNetwork(traffic_policy) + } } // The routes which target this Resource but don't specify @@ -1063,12 +1123,12 @@ impl Namespace { .unwrap_or_default(); let mut resource_routes = ResourceRoutes { + parent_meta, opaque, accrual, http_retry, grpc_retry, timeouts, - authority, port: rp.port, name: rp.name, namespace: self.namespace.clone(), @@ -1273,6 +1333,12 @@ fn route_accepted_by_parent( } impl ResourceRoutes { + fn reinitialize_watches(&mut self) { + for watch in self.watches_by_ns.values_mut() { + watch.reinitialize_watch(); + } + } + fn watch_for_ns_or_default(&mut self, namespace: String) -> &mut RoutesWatch { // The routes from the producer namespace apply to watches in all // namespaces, so we copy them. @@ -1301,6 +1367,7 @@ impl ResourceRoutes { self.watches_by_ns.entry(namespace).or_insert_with(|| { let (sender, _) = watch::channel(OutboundPolicy { + parent_meta: self.parent_meta.clone(), port: self.port, opaque: self.opaque, accrual: self.accrual, @@ -1312,11 +1379,11 @@ impl ResourceRoutes { tls_routes: tls_routes.clone(), tcp_routes: tcp_routes.clone(), name: self.name.to_string(), - service_authority: self.authority.clone().unwrap_or("".into()), namespace: self.namespace.to_string(), }); RoutesWatch { + parent_meta: self.parent_meta.clone(), http_routes, grpc_routes, tls_routes, @@ -1468,10 +1535,21 @@ impl ResourceRoutes { } impl RoutesWatch { + fn reinitialize_watch(&mut self) { + let current_policy = self.watch.borrow().clone(); + let (new_sender, _) = watch::channel(current_policy); + self.watch = new_sender; + } + fn send_if_modified(&mut self) { self.watch.send_if_modified(|policy| { let mut modified = false; + if self.parent_meta != policy.parent_meta { + policy.parent_meta = self.parent_meta.clone(); + modified = true; + } + if self.http_routes != policy.http_routes { policy.http_routes = self.http_routes.clone(); modified = true; diff --git a/policy-controller/k8s/index/src/outbound/index/egress_network.rs b/policy-controller/k8s/index/src/outbound/index/egress_network.rs index 9d11b4dff56ac..ef961e16e1b0b 100644 --- a/policy-controller/k8s/index/src/outbound/index/egress_network.rs +++ b/policy-controller/k8s/index/src/outbound/index/egress_network.rs @@ -1,5 +1,4 @@ use chrono::{offset::Utc, DateTime}; -use linkerd_policy_controller_core::outbound; use linkerd_policy_controller_k8s_api::policy::{Cidr, Network, TrafficPolicy}; use linkerd_policy_controller_k8s_api::{policy as linkerd_k8s_api, ResourceExt}; use std::net::IpAddr; @@ -68,7 +67,7 @@ pub(crate) fn resolve_egress_network<'n>( addr: IpAddr, source_namespace: String, nets: impl Iterator, -) -> Option<(super::ResourceRef, outbound::TrafficPolicy)> { +) -> Option { let (same_ns, rest): (Vec<_>, Vec<_>) = nets.partition(|un| un.namespace == source_namespace); let to_pick_from = if !same_ns.is_empty() { same_ns } else { rest }; @@ -85,18 +84,10 @@ pub(crate) fn resolve_egress_network<'n>( }) }) .max_by(compare_matched_egress_network) - .map(|m| { - ( - super::ResourceRef { - kind: super::ResourceKind::EgressNetwork, - name: m.name, - namespace: m.namespace, - }, - match m.traffic_policy { - TrafficPolicy::Allow => outbound::TrafficPolicy::Allow, - TrafficPolicy::Deny => outbound::TrafficPolicy::Deny, - }, - ) + .map(|m| super::ResourceRef { + kind: super::ResourceKind::EgressNetwork, + name: m.name, + namespace: m.namespace, }) } @@ -159,7 +150,7 @@ mod test { ]; let resolved = resolve_egress_network(ip_addr, "ns".into(), networks.iter()); - assert_eq!(resolved.unwrap().0.name, "net-2".to_string()) + assert_eq!(resolved.unwrap().name, "net-2".to_string()) } #[test] @@ -189,7 +180,7 @@ mod test { ]; let resolved = resolve_egress_network(ip_addr, "ns-1".into(), networks.iter()); - assert_eq!(resolved.unwrap().0.name, "net-1".to_string()) + assert_eq!(resolved.unwrap().name, "net-1".to_string()) } #[test] @@ -219,7 +210,7 @@ mod test { ]; let resolved = resolve_egress_network(ip_addr, "ns".into(), networks.iter()); - assert_eq!(resolved.unwrap().0.name, "net-2".to_string()) + assert_eq!(resolved.unwrap().name, "net-2".to_string()) } #[test] @@ -249,7 +240,7 @@ mod test { ]; let resolved = resolve_egress_network(ip_addr, "ns".into(), networks.iter()); - assert_eq!(resolved.unwrap().0.name, "b".to_string()) + assert_eq!(resolved.unwrap().name, "b".to_string()) } #[test] @@ -279,6 +270,6 @@ mod test { ]; let resolved = resolve_egress_network(ip_addr, "ns".into(), networks.iter()); - assert_eq!(resolved.unwrap().0.name, "d".to_string()) + assert_eq!(resolved.unwrap().name, "d".to_string()) } } diff --git a/policy-controller/k8s/index/src/outbound/tests.rs b/policy-controller/k8s/index/src/outbound/tests.rs index d11a9ed8947b0..2a1b794dd0ad4 100644 --- a/policy-controller/k8s/index/src/outbound/tests.rs +++ b/policy-controller/k8s/index/src/outbound/tests.rs @@ -7,9 +7,14 @@ use crate::{ }; use k8s_openapi::chrono::Utc; use kubert::index::IndexNamespacedResource; +use linkerd_policy_controller_core::outbound::{Kind, ResourceTarget}; use linkerd_policy_controller_core::IpNet; -use linkerd_policy_controller_k8s_api::{self as k8s, policy}; +use linkerd_policy_controller_k8s_api::{ + self as k8s, + policy::{self, EgressNetwork}, +}; use tokio::time; +use tracing::Level; mod routes; @@ -93,3 +98,119 @@ impl Default for TestConfig { }) } } + +#[test] +fn switch_to_another_egress_network_parent() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + // Create network b. + let network_b = mk_egress_network("ns", "b"); + test.index.write().apply(network_b); + + let (ns, name) = test + .index + .write() + .lookup_egress_network("192.168.0.1".parse().unwrap(), "ns".to_string()) + .expect("should resolve"); + + assert_eq!(ns, "ns".to_string()); + assert_eq!(name, "b".to_string()); + + let mut rx_b = test + .index + .write() + .outbound_policy_rx(ResourceTarget { + name, + namespace: ns.clone(), + port: 8080.try_into().unwrap(), + source_namespace: ns, + kind: Kind::EgressNetwork("192.168.0.1:8080".parse().unwrap()), + }) + .expect("b.ns should exist"); + + // first resolution is for network B + let policy_b = rx_b.borrow_and_update(); + assert_eq!(policy_b.namespace, "ns".to_string()); + assert_eq!(policy_b.name, "b".to_string()); + drop(policy_b); + + // Create network a. + let network_a = mk_egress_network("ns", "a"); + test.index.write().apply(network_a); + + // watch should be dropped at this point + assert!(rx_b.has_changed().is_err()); + + // now a new resolution should resolve network a + + let (ns, name) = test + .index + .write() + .lookup_egress_network("192.168.0.1".parse().unwrap(), "ns".to_string()) + .expect("should resolve"); + + let mut rx_a = test + .index + .write() + .outbound_policy_rx(ResourceTarget { + name, + namespace: ns.clone(), + port: 8080.try_into().unwrap(), + source_namespace: ns, + kind: Kind::EgressNetwork("192.168.0.1:8080".parse().unwrap()), + }) + .expect("a.ns should exist"); + + // second resolution is for network A + let policy_b = rx_a.borrow_and_update(); + assert_eq!(policy_b.namespace, "ns".to_string()); + assert_eq!(policy_b.name, "a".to_string()); +} + +#[test] +fn fallback_rx_closed_when_egress_net_created() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + + let fallback_rx = test.index.read().fallback_policy_rx(); + assert!(fallback_rx.has_changed().is_ok()); + + // Create network. + let network = mk_egress_network("ns", "egress-net"); + test.index.write().apply(network); + + assert!(fallback_rx.has_changed().is_err()); +} + +#[test] +fn fallback_rx_closed_when_egress_net_deleted() { + tracing_subscriber::fmt() + .with_max_level(Level::TRACE) + .try_init() + .ok(); + + let test = TestConfig::default(); + + // Create network. + let network = mk_egress_network("ns", "egress-net"); + test.index.write().apply(network); + + let fallback_rx = test.index.read().fallback_policy_rx(); + assert!(fallback_rx.has_changed().is_ok()); + + >::delete( + &mut test.index.write(), + "ns".into(), + "egress-net".into(), + ); + + assert!(fallback_rx.has_changed().is_err()); +} diff --git a/policy-controller/k8s/index/src/outbound/tests/routes/grpc.rs b/policy-controller/k8s/index/src/outbound/tests/routes/grpc.rs index ae93457e652c9..ff2051b8e60d1 100644 --- a/policy-controller/k8s/index/src/outbound/tests/routes/grpc.rs +++ b/policy-controller/k8s/index/src/outbound/tests/routes/grpc.rs @@ -1,8 +1,6 @@ use kube::Resource; use linkerd_policy_controller_core::{ - outbound::{ - self, Backend, Kind, OutboundDiscoverTarget, WeightedEgressNetwork, WeightedService, - }, + outbound::{Backend, Kind, ResourceTarget, WeightedEgressNetwork, WeightedService}, routes::GroupKindNamespaceName, POLICY_CONTROLLER_NAME, }; @@ -37,7 +35,7 @@ fn backend_service() { let mut rx = test .index .write() - .outbound_policy_rx(OutboundDiscoverTarget { + .outbound_policy_rx(ResourceTarget { name: "apex".to_string(), namespace: "ns".to_string(), port: 8080.try_into().unwrap(), @@ -132,15 +130,12 @@ fn backend_egress_network() { let mut rx = test .index .write() - .outbound_policy_rx(OutboundDiscoverTarget { + .outbound_policy_rx(ResourceTarget { name: "apex".to_string(), namespace: "ns".to_string(), port: 8080.try_into().unwrap(), source_namespace: "ns".to_string(), - kind: Kind::EgressNetwork { - original_dst: "192.168.0.1:8080".parse().unwrap(), - traffic_policy: outbound::TrafficPolicy::Allow, - }, + kind: Kind::EgressNetwork("192.168.0.1:8080".parse().unwrap()), }) .expect("apex.ns should exist"); diff --git a/policy-controller/k8s/index/src/outbound/tests/routes/http.rs b/policy-controller/k8s/index/src/outbound/tests/routes/http.rs index 1db3a45dfadd1..96107e51d8fb0 100644 --- a/policy-controller/k8s/index/src/outbound/tests/routes/http.rs +++ b/policy-controller/k8s/index/src/outbound/tests/routes/http.rs @@ -1,8 +1,6 @@ use kube::Resource; use linkerd_policy_controller_core::{ - outbound::{ - self, Backend, Kind, OutboundDiscoverTarget, WeightedEgressNetwork, WeightedService, - }, + outbound::{Backend, Kind, ResourceTarget, WeightedEgressNetwork, WeightedService}, routes::GroupKindNamespaceName, POLICY_CONTROLLER_NAME, }; @@ -38,7 +36,7 @@ fn backend_service() { let mut rx = test .index .write() - .outbound_policy_rx(OutboundDiscoverTarget { + .outbound_policy_rx(ResourceTarget { name: "apex".to_string(), namespace: "ns".to_string(), port: 8080.try_into().unwrap(), @@ -135,15 +133,12 @@ fn backend_egress_network() { let mut rx = test .index .write() - .outbound_policy_rx(OutboundDiscoverTarget { + .outbound_policy_rx(ResourceTarget { name: "apex".to_string(), namespace: "ns".to_string(), port: 8080.try_into().unwrap(), source_namespace: "ns".to_string(), - kind: Kind::EgressNetwork { - original_dst: "192.168.0.1:8080".parse().unwrap(), - traffic_policy: outbound::TrafficPolicy::Allow, - }, + kind: Kind::EgressNetwork("192.168.0.1:8080".parse().unwrap()), }) .expect("apex.ns should exist"); diff --git a/policy-controller/k8s/index/src/outbound/tests/routes/tcp.rs b/policy-controller/k8s/index/src/outbound/tests/routes/tcp.rs index 16823d41dbf67..f9d12e2e6eed3 100644 --- a/policy-controller/k8s/index/src/outbound/tests/routes/tcp.rs +++ b/policy-controller/k8s/index/src/outbound/tests/routes/tcp.rs @@ -1,8 +1,6 @@ use kube::Resource; use linkerd_policy_controller_core::{ - outbound::{ - self, Backend, Kind, OutboundDiscoverTarget, WeightedEgressNetwork, WeightedService, - }, + outbound::{Backend, Kind, ResourceTarget, WeightedEgressNetwork, WeightedService}, routes::GroupKindNamespaceName, POLICY_CONTROLLER_NAME, }; @@ -37,7 +35,7 @@ fn backend_service() { let mut rx = test .index .write() - .outbound_policy_rx(OutboundDiscoverTarget { + .outbound_policy_rx(ResourceTarget { name: "apex".to_string(), namespace: "ns".to_string(), port: 8080.try_into().unwrap(), @@ -128,15 +126,12 @@ fn backend_egress_network() { let mut rx = test .index .write() - .outbound_policy_rx(OutboundDiscoverTarget { + .outbound_policy_rx(ResourceTarget { name: "apex".to_string(), namespace: "ns".to_string(), port: 8080.try_into().unwrap(), source_namespace: "ns".to_string(), - kind: Kind::EgressNetwork { - original_dst: "192.168.0.1:8080".parse().unwrap(), - traffic_policy: outbound::TrafficPolicy::Allow, - }, + kind: Kind::EgressNetwork("192.168.0.1:8080".parse().unwrap()), }) .expect("apex.ns should exist"); diff --git a/policy-controller/k8s/index/src/outbound/tests/routes/tls.rs b/policy-controller/k8s/index/src/outbound/tests/routes/tls.rs index 0673b2d53d612..914d7307ed79b 100644 --- a/policy-controller/k8s/index/src/outbound/tests/routes/tls.rs +++ b/policy-controller/k8s/index/src/outbound/tests/routes/tls.rs @@ -1,8 +1,6 @@ use kube::Resource; use linkerd_policy_controller_core::{ - outbound::{ - self, Backend, Kind, OutboundDiscoverTarget, WeightedEgressNetwork, WeightedService, - }, + outbound::{Backend, Kind, ResourceTarget, WeightedEgressNetwork, WeightedService}, routes::GroupKindNamespaceName, POLICY_CONTROLLER_NAME, }; @@ -37,7 +35,7 @@ fn backend_service() { let mut rx = test .index .write() - .outbound_policy_rx(OutboundDiscoverTarget { + .outbound_policy_rx(ResourceTarget { name: "apex".to_string(), namespace: "ns".to_string(), port: 8080.try_into().unwrap(), @@ -128,15 +126,12 @@ fn backend_egress_network() { let mut rx = test .index .write() - .outbound_policy_rx(OutboundDiscoverTarget { + .outbound_policy_rx(ResourceTarget { name: "apex".to_string(), namespace: "ns".to_string(), port: 8080.try_into().unwrap(), source_namespace: "ns".to_string(), - kind: Kind::EgressNetwork { - original_dst: "192.168.0.1:8080".parse().unwrap(), - traffic_policy: outbound::TrafficPolicy::Allow, - }, + kind: Kind::EgressNetwork("192.168.0.1:8080".parse().unwrap()), }) .expect("apex.ns should exist"); diff --git a/policy-controller/src/lib.rs b/policy-controller/src/lib.rs index dc5e367dcafcb..1cf817fd9ed4c 100644 --- a/policy-controller/src/lib.rs +++ b/policy-controller/src/lib.rs @@ -5,11 +5,13 @@ pub mod index_list; mod validation; pub use self::admission::Admission; use anyhow::Result; +use futures::StreamExt; use linkerd_policy_controller_core::inbound::{ DiscoverInboundServer, InboundServer, InboundServerStream, }; use linkerd_policy_controller_core::outbound::{ - DiscoverOutboundPolicy, Kind, OutboundDiscoverTarget, OutboundPolicy, OutboundPolicyStream, + DiscoverOutboundPolicy, Kind, OutboundDiscoverTarget, OutboundPolicyKind, OutboundPolicyStream, + ParentMeta, ResourceOutboundPolicy, ResourceTarget, }; pub use linkerd_policy_controller_core::IpNet; pub use linkerd_policy_controller_grpc as grpc; @@ -89,25 +91,99 @@ impl DiscoverOutboundPolicy for OutboundDiscover { async fn get_outbound_policy( &self, target: OutboundDiscoverTarget, - ) -> Result> { - let rx = match self.0.write().outbound_policy_rx(target) { - Ok(rx) => rx, - Err(error) => { - tracing::error!(%error, "failed to get outbound policy rx"); - return Ok(None); + ) -> Result> { + match target { + OutboundDiscoverTarget::Fallback(original_dst) => { + Ok(Some(OutboundPolicyKind::Fallback(original_dst))) } - }; - let policy = (*rx.borrow()).clone(); - Ok(Some(policy)) + OutboundDiscoverTarget::Resource(resource) => { + let rx = match self.0.write().outbound_policy_rx(resource.clone()) { + Ok(rx) => rx, + Err(error) => { + tracing::error!(%error, "failed to get outbound policy rx"); + return Ok(None); + } + }; + let policy = (*rx.borrow()).clone(); + + let resource = match (&policy.parent_meta, &resource.kind) { + ( + ParentMeta::EgressNetwork(traffic_policy), + Kind::EgressNetwork(original_dst), + ) => ResourceOutboundPolicy::Egress { + traffic_policy: *traffic_policy, + original_dst: *original_dst, + policy: policy.clone(), + }, + + (ParentMeta::Service { authority }, Kind::EgressNetwork(_)) => { + ResourceOutboundPolicy::Service { + authority: authority.clone(), + policy, + } + } + (policy_kind, resource_kind) => { + anyhow::bail!( + "policy kind {:?} incorrect for resource kind: {:?}", + policy_kind, + resource_kind + ); + } + }; + Ok(Some(OutboundPolicyKind::Resource(resource))) + } + } } async fn watch_outbound_policy( &self, target: OutboundDiscoverTarget, ) -> Result> { - match self.0.write().outbound_policy_rx(target) { - Ok(rx) => Ok(Some(Box::pin(tokio_stream::wrappers::WatchStream::new(rx)))), - Err(_) => Ok(None), + match target { + OutboundDiscoverTarget::Fallback(original_dst) => { + let rx = self.0.write().fallback_policy_rx(); + let stream = tokio_stream::wrappers::WatchStream::new(rx) + .map(move |_| OutboundPolicyKind::Fallback(original_dst)); + Ok(Some(Box::pin(stream))) + } + + OutboundDiscoverTarget::Resource(resource) => { + match self.0.write().outbound_policy_rx(resource.clone()) { + Ok(rx) => { + let stream = tokio_stream::wrappers::WatchStream::new(rx).filter_map( + move |policy| { + let resource = match (policy.parent_meta.clone(), resource.kind) { + ( + ParentMeta::EgressNetwork(traffic_policy), + Kind::EgressNetwork(original_dst), + ) => Some(ResourceOutboundPolicy::Egress { + traffic_policy, + original_dst, + policy: policy.clone(), + }), + + (ParentMeta::Service { authority }, Kind::EgressNetwork(_)) => { + Some(ResourceOutboundPolicy::Service { authority, policy }) + } + (policy_kind, resource_kind) => { + tracing::error!( + "policy kind {:?} incorrect for resource kind: {:?}", + policy_kind, + resource_kind + ); + None + } + } + .map(OutboundPolicyKind::Resource); + + futures::future::ready(resource) + }, + ); + Ok(Some(Box::pin(stream))) + } + Err(_) => Ok(None), + } + } } } @@ -119,29 +195,32 @@ impl DiscoverOutboundPolicy for OutboundDiscover { ) -> Option { let index = self.0.read(); if let Some((namespace, name)) = index.lookup_service(addr) { - return Some(OutboundDiscoverTarget { + return Some(OutboundDiscoverTarget::Resource(ResourceTarget { name, namespace, port, source_namespace, kind: Kind::Service, - }); + })); } - index - .lookup_egress_network(addr, source_namespace.clone()) - .map(|(namespace, name, traffic_policy)| { - let original_dst = SocketAddr::new(addr, port.into()); - OutboundDiscoverTarget { - name, - namespace, - port, - source_namespace, - kind: Kind::EgressNetwork { - original_dst, - traffic_policy, - }, - } - }) + if let Some((namespace, name)) = index.lookup_egress_network(addr, source_namespace.clone()) + { + let original_dst = SocketAddr::new(addr, port.into()); + return Some(OutboundDiscoverTarget::Resource(ResourceTarget { + name, + namespace, + port, + source_namespace, + kind: Kind::EgressNetwork(original_dst), + })); + } + + if !index.is_address_in_cluster(addr) { + let original_dst = SocketAddr::new(addr, port.into()); + return Some(OutboundDiscoverTarget::Fallback(original_dst)); + } + + None } } diff --git a/policy-test/src/lib.rs b/policy-test/src/lib.rs index b2dca83b24a38..19121e3634b0b 100644 --- a/policy-test/src/lib.rs +++ b/policy-test/src/lib.rs @@ -435,7 +435,7 @@ where drop(_tracing); } - if std::env::var("POLICY_TEST_NO_CLEANUP").is_err() { + if std::env::var("POLICY_TEST_NO_CLEANUP").is_ok() { tracing::debug!(ns = %ns.name_unchecked(), "Deleting"); api.delete(&ns.name_unchecked(), &kube::api::DeleteParams::background()) .await From 903005e7811e9a774b74dba0d5e5e52991ecf55f Mon Sep 17 00:00:00 2001 From: Zahari Dichev Date: Wed, 23 Oct 2024 14:40:05 +0000 Subject: [PATCH 03/11] simplify types Signed-off-by: Zahari Dichev --- policy-controller/grpc/src/outbound.rs | 4 ++-- policy-controller/src/lib.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/policy-controller/grpc/src/outbound.rs b/policy-controller/grpc/src/outbound.rs index 6d4da0cd96080..bc9a499a266db 100644 --- a/policy-controller/grpc/src/outbound.rs +++ b/policy-controller/grpc/src/outbound.rs @@ -383,8 +383,8 @@ fn to_proto( } } -fn timestamp_then_name( - (left_id, left_route): &(GroupKindNamespaceName, L), +fn timestamp_then_name( + (left_id, left_route): &(GroupKindNamespaceName, R), (right_id, right_route): &(GroupKindNamespaceName, R), ) -> std::cmp::Ordering { let by_ts = match ( diff --git a/policy-controller/src/lib.rs b/policy-controller/src/lib.rs index 1cf817fd9ed4c..99faab0e987a6 100644 --- a/policy-controller/src/lib.rs +++ b/policy-controller/src/lib.rs @@ -116,7 +116,7 @@ impl DiscoverOutboundPolicy for OutboundDiscover { policy: policy.clone(), }, - (ParentMeta::Service { authority }, Kind::EgressNetwork(_)) => { + (ParentMeta::Service { authority }, Kind::Service) => { ResourceOutboundPolicy::Service { authority: authority.clone(), policy, @@ -162,7 +162,7 @@ impl DiscoverOutboundPolicy for OutboundDiscover { policy: policy.clone(), }), - (ParentMeta::Service { authority }, Kind::EgressNetwork(_)) => { + (ParentMeta::Service { authority }, Kind::Service) => { Some(ResourceOutboundPolicy::Service { authority, policy }) } (policy_kind, resource_kind) => { From c2888744dc23642d02cc60a4967f05aaaca3bdff Mon Sep 17 00:00:00 2001 From: Zahari Dichev Date: Wed, 23 Oct 2024 19:12:46 +0000 Subject: [PATCH 04/11] fix not found integration test Signed-off-by: Zahari Dichev --- policy-test/tests/outbound_api_gateway.rs | 2 +- policy-test/tests/outbound_api_linkerd.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/policy-test/tests/outbound_api_gateway.rs b/policy-test/tests/outbound_api_gateway.rs index f70560a312ea8..cb2f5325df247 100644 --- a/policy-test/tests/outbound_api_gateway.rs +++ b/policy-test/tests/outbound_api_gateway.rs @@ -20,7 +20,7 @@ async fn service_does_not_exist() { // Build a service but don't apply it to the cluster. let mut svc = mk_service(&ns, "my-svc", 4191); // Give it a bogus cluster ip. - svc.spec.as_mut().unwrap().cluster_ip = Some("1.1.1.1".to_string()); + svc.spec.as_mut().unwrap().cluster_ip = Some("192.168.0.2".to_string()); let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; let rsp = policy_api.watch(&ns, &svc, 4191).await; diff --git a/policy-test/tests/outbound_api_linkerd.rs b/policy-test/tests/outbound_api_linkerd.rs index f4e24cd255bc8..7c42fd60f8ddf 100644 --- a/policy-test/tests/outbound_api_linkerd.rs +++ b/policy-test/tests/outbound_api_linkerd.rs @@ -21,7 +21,7 @@ async fn service_does_not_exist() { // Build a service but don't apply it to the cluster. let mut svc = mk_service(&ns, "my-svc", 4191); // Give it a bogus cluster ip. - svc.spec.as_mut().unwrap().cluster_ip = Some("1.1.1.1".to_string()); + svc.spec.as_mut().unwrap().cluster_ip = Some("192.168.0.2".to_string()); let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; let rsp = policy_api.watch(&ns, &svc, 4191).await; From a53f573dd5d90dc1580f362225705b6b30b2f2a1 Mon Sep 17 00:00:00 2001 From: Zahari Dichev Date: Thu, 24 Oct 2024 07:50:53 +0000 Subject: [PATCH 05/11] address feedback Signed-off-by: Zahari Dichev --- policy-controller/grpc/src/outbound.rs | 119 ++++++++++++------ policy-controller/grpc/src/outbound/tcp.rs | 5 - policy-controller/grpc/src/outbound/tls.rs | 5 - .../k8s/index/src/outbound/index.rs | 24 ++++ policy-test/src/lib.rs | 2 +- policy-test/tests/outbound_api_linkerd.rs | 1 - 6 files changed, 105 insertions(+), 51 deletions(-) diff --git a/policy-controller/grpc/src/outbound.rs b/policy-controller/grpc/src/outbound.rs index bc9a499a266db..858cca9320cea 100644 --- a/policy-controller/grpc/src/outbound.rs +++ b/policy-controller/grpc/src/outbound.rs @@ -234,47 +234,88 @@ fn response_stream( } fn fallback(original_dst: SocketAddr) -> outbound::OutboundPolicy { - outbound::OutboundPolicy { - metadata: Some(Metadata { - kind: Some(metadata::Kind::Default("egress-fallback".to_string())), - }), + // This encoder sets deprecated timeouts for older proxies. + #![allow(deprecated)] + + let metadata = Some(Metadata { + kind: Some(metadata::Kind::Default("egress-fallback".to_string())), + }); + + let backend = outbound::Backend { + metadata: metadata.clone(), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: 1, + ..Default::default() + }, + )), + }; - protocol: Some(outbound::ProxyProtocol { - kind: Some(outbound::proxy_protocol::Kind::Opaque( - outbound::proxy_protocol::Opaque { - routes: vec![outbound::OpaqueRoute { - metadata: Some(Metadata { - kind: Some(metadata::Kind::Default("egress-fallback".to_string())), - }), - rules: vec![outbound::opaque_route::Rule { - backends: Some(outbound::opaque_route::Distribution { - kind: Some( - outbound::opaque_route::distribution::Kind::FirstAvailable( - outbound::opaque_route::distribution::FirstAvailable { - backends: vec![outbound::opaque_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(Metadata { - kind: Some(metadata::Kind::Default( - "egress-fallback".to_string(), - )), - }), - queue: Some(default_queue_config()), - kind: Some(outbound::backend::Kind::Forward( - destination::WeightedAddr { - addr: Some(original_dst.into()), - weight: 1, - ..Default::default() - }, - )), - }), - }], - }, - ), - ), - }), + let opaque = outbound::proxy_protocol::Opaque { + routes: vec![outbound::OpaqueRoute { + metadata: Some(Metadata { + kind: Some(metadata::Kind::Default("egress-fallback".to_string())), + }), + rules: vec![outbound::opaque_route::Rule { + backends: Some(outbound::opaque_route::Distribution { + kind: Some(outbound::opaque_route::distribution::Kind::FirstAvailable( + outbound::opaque_route::distribution::FirstAvailable { + backends: vec![outbound::opaque_route::RouteBackend { + backend: Some(backend.clone()), + }], + }, + )), + }), + }], + error: None, + }], + }; + + let http_routes = vec![outbound::HttpRoute { + hosts: Vec::default(), + metadata: metadata.clone(), + rules: vec![outbound::http_route::Rule { + backends: Some(outbound::http_route::Distribution { + kind: Some(outbound::http_route::distribution::Kind::FirstAvailable( + outbound::http_route::distribution::FirstAvailable { + backends: vec![outbound::http_route::RouteBackend { + backend: Some(backend), + filters: Vec::default(), + request_timeout: None, }], - error: None, - }], + }, + )), + }), + matches: vec![api::http_route::HttpRouteMatch::default()], + filters: Vec::default(), + request_timeout: None, + timeouts: None, + retry: None, + allow_l5d_request_headers: false, + }], + }]; + + outbound::OutboundPolicy { + metadata, + protocol: Some(outbound::ProxyProtocol { + kind: Some(outbound::proxy_protocol::Kind::Detect( + outbound::proxy_protocol::Detect { + timeout: Some( + time::Duration::from_secs(10) + .try_into() + .expect("failed to convert detect timeout to protobuf"), + ), + opaque: Some(opaque), + http1: Some(outbound::proxy_protocol::Http1 { + routes: http_routes.clone(), + failure_accrual: None, + }), + http2: Some(outbound::proxy_protocol::Http2 { + routes: http_routes, + failure_accrual: None, + }), }, )), }), diff --git a/policy-controller/grpc/src/outbound/tcp.rs b/policy-controller/grpc/src/outbound/tcp.rs index 26c14ee7c05ab..dbc7c40293dfd 100644 --- a/policy-controller/grpc/src/outbound/tcp.rs +++ b/policy-controller/grpc/src/outbound/tcp.rs @@ -34,9 +34,6 @@ fn convert_outbound_route( backend: outbound::Backend, policy: &ResourceOutboundPolicy, ) -> outbound::OpaqueRoute { - // This encoder sets deprecated timeouts for older proxies. - #![allow(deprecated)] - let metadata = Some(meta::Metadata { kind: Some(meta::metadata::Kind::Resource(meta::Resource { group: gknn.group.to_string(), @@ -215,7 +212,6 @@ pub(crate) fn default_outbound_egress_route( backend: outbound::Backend, traffic_policy: &TrafficPolicy, ) -> outbound::OpaqueRoute { - #![allow(deprecated)] let (error, name) = match traffic_policy { TrafficPolicy::Allow => (None, "tcp-egress-allow"), TrafficPolicy::Deny => ( @@ -226,7 +222,6 @@ pub(crate) fn default_outbound_egress_route( ), }; - // This encoder sets deprecated timeouts for older proxies. let metadata = Some(meta::Metadata { kind: Some(meta::metadata::Kind::Default(name.to_string())), }); diff --git a/policy-controller/grpc/src/outbound/tls.rs b/policy-controller/grpc/src/outbound/tls.rs index 9a436e7cf59a5..8860b695b0e39 100644 --- a/policy-controller/grpc/src/outbound/tls.rs +++ b/policy-controller/grpc/src/outbound/tls.rs @@ -36,9 +36,6 @@ fn convert_outbound_route( backend: outbound::Backend, policy: &ResourceOutboundPolicy, ) -> outbound::TlsRoute { - // This encoder sets deprecated timeouts for older proxies. - #![allow(deprecated)] - let metadata = Some(meta::Metadata { kind: Some(meta::metadata::Kind::Resource(meta::Resource { group: gknn.group.to_string(), @@ -220,7 +217,6 @@ pub(crate) fn default_outbound_egress_route( backend: outbound::Backend, traffic_policy: &TrafficPolicy, ) -> outbound::TlsRoute { - #![allow(deprecated)] let (error, name) = match traffic_policy { TrafficPolicy::Allow => (None, "tls-egress-allow"), TrafficPolicy::Deny => ( @@ -231,7 +227,6 @@ pub(crate) fn default_outbound_egress_route( ), }; - // This encoder sets deprecated timeouts for older proxies. let metadata = Some(meta::Metadata { kind: Some(meta::metadata::Kind::Default(name.to_string())), }); diff --git a/policy-controller/k8s/index/src/outbound/index.rs b/policy-controller/k8s/index/src/outbound/index.rs index 337290105ae0b..e620ac5a0375f 100644 --- a/policy-controller/k8s/index/src/outbound/index.rs +++ b/policy-controller/k8s/index/src/outbound/index.rs @@ -1011,6 +1011,7 @@ impl Namespace { resource.http_retry.clone(), resource.grpc_retry.clone(), resource.timeouts.clone(), + resource.traffic_policy, ); } } @@ -1493,22 +1494,35 @@ impl ResourceRoutes { http_retry: Option>, grpc_retry: Option>, timeouts: RouteTimeouts, + traffic_policy: Option, ) { self.opaque = opaque; self.accrual = accrual; self.http_retry = http_retry.clone(); self.grpc_retry = grpc_retry.clone(); self.timeouts = timeouts.clone(); + self.update_traffic_policy(traffic_policy); for watch in self.watches_by_ns.values_mut() { watch.opaque = opaque; watch.accrual = accrual; watch.http_retry = http_retry.clone(); watch.grpc_retry = grpc_retry.clone(); watch.timeouts = timeouts.clone(); + watch.update_traffic_policy(traffic_policy); watch.send_if_modified(); } } + fn update_traffic_policy(&mut self, traffic_policy: Option) { + if let (ParentMeta::EgressNetwork(current), Some(new)) = + (self.parent_meta.clone(), traffic_policy) + { + if current != new { + self.parent_meta = ParentMeta::EgressNetwork(new) + } + } + } + fn delete_http_route(&mut self, gknn: &GroupKindNamespaceName) { for watch in self.watches_by_ns.values_mut() { watch.remove_http_route(gknn); @@ -1541,6 +1555,16 @@ impl RoutesWatch { self.watch = new_sender; } + fn update_traffic_policy(&mut self, traffic_policy: Option) { + if let (ParentMeta::EgressNetwork(current), Some(new)) = + (self.parent_meta.clone(), traffic_policy) + { + if current != new { + self.parent_meta = ParentMeta::EgressNetwork(new) + } + } + } + fn send_if_modified(&mut self) { self.watch.send_if_modified(|policy| { let mut modified = false; diff --git a/policy-test/src/lib.rs b/policy-test/src/lib.rs index 19121e3634b0b..b2dca83b24a38 100644 --- a/policy-test/src/lib.rs +++ b/policy-test/src/lib.rs @@ -435,7 +435,7 @@ where drop(_tracing); } - if std::env::var("POLICY_TEST_NO_CLEANUP").is_ok() { + if std::env::var("POLICY_TEST_NO_CLEANUP").is_err() { tracing::debug!(ns = %ns.name_unchecked(), "Deleting"); api.delete(&ns.name_unchecked(), &kube::api::DeleteParams::background()) .await diff --git a/policy-test/tests/outbound_api_linkerd.rs b/policy-test/tests/outbound_api_linkerd.rs index 7c42fd60f8ddf..38d19bc68ec76 100644 --- a/policy-test/tests/outbound_api_linkerd.rs +++ b/policy-test/tests/outbound_api_linkerd.rs @@ -26,7 +26,6 @@ async fn service_does_not_exist() { let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; let rsp = policy_api.watch(&ns, &svc, 4191).await; - println!("{:?}", rsp); assert!(rsp.is_err()); assert_eq!(rsp.err().unwrap().code(), tonic::Code::NotFound); }) From 501c646d048572f71508f834a87b2799a4faff6f Mon Sep 17 00:00:00 2001 From: Zahari Dichev Date: Thu, 24 Oct 2024 10:10:21 +0000 Subject: [PATCH 06/11] remove PolicyKind type Signed-off-by: Zahari Dichev --- policy-controller/core/src/outbound.rs | 16 +- policy-controller/core/src/outbound/policy.rs | 58 ++-- policy-controller/core/src/outbound/target.rs | 9 + policy-controller/grpc/src/outbound.rs | 283 +++++++++++------- policy-controller/grpc/src/outbound/grpc.rs | 125 ++++---- policy-controller/grpc/src/outbound/http.rs | 134 +++++---- policy-controller/grpc/src/outbound/tcp.rs | 116 +++---- policy-controller/grpc/src/outbound/tls.rs | 116 +++---- .../k8s/index/src/outbound/index.rs | 58 ++-- .../k8s/index/src/outbound/tests.rs | 8 +- policy-controller/src/lib.rs | 116 ++----- 11 files changed, 572 insertions(+), 467 deletions(-) diff --git a/policy-controller/core/src/outbound.rs b/policy-controller/core/src/outbound.rs index 5cd4dd8fb935f..6dde8cab36422 100644 --- a/policy-controller/core/src/outbound.rs +++ b/policy-controller/core/src/outbound.rs @@ -11,8 +11,10 @@ use std::{net::IpAddr, num::NonZeroU16, pin::Pin, time}; mod policy; mod target; +type FallbackPolicy = (); + pub use self::{ - policy::{OutboundPolicy, OutboundPolicyKind, ParentMeta, ResourceOutboundPolicy}, + policy::{OutboundPolicy, ParentInfo, ResourceOutboundPolicy}, target::{Kind, OutboundDiscoverTarget, ResourceTarget}, }; @@ -22,16 +24,18 @@ pub trait Route { /// Models outbound policy discovery. #[async_trait::async_trait] -pub trait DiscoverOutboundPolicy { - async fn get_outbound_policy(&self, target: T) -> Result>; +pub trait DiscoverOutboundPolicy { + async fn get_outbound_policy(&self, target: R) -> Result>; + + async fn watch_outbound_policy(&self, target: R) -> Result>; - async fn watch_outbound_policy(&self, target: T) -> Result>; + async fn watch_fallback_policy(&self) -> FallbackPolicyStream; fn lookup_ip(&self, addr: IpAddr, port: NonZeroU16, source_namespace: String) -> Option; } -pub type OutboundPolicyStream = - Pin + Send + Sync + 'static>>; +pub type OutboundPolicyStream = Pin + Send + Sync + 'static>>; +pub type FallbackPolicyStream = Pin + Send + Sync + 'static>>; pub type HttpRoute = OutboundRoute; pub type GrpcRoute = OutboundRoute; diff --git a/policy-controller/core/src/outbound/policy.rs b/policy-controller/core/src/outbound/policy.rs index c04e2d07ff4a0..cb308e47987fa 100644 --- a/policy-controller/core/src/outbound/policy.rs +++ b/policy-controller/core/src/outbound/policy.rs @@ -5,15 +5,6 @@ use super::{ use std::{net::SocketAddr, num::NonZeroU16}; -/// OutboundPolicyKind describes a resolved outbound policy that is -/// either attributed to a resource or is a fallback one. -#[allow(clippy::large_enum_variant)] -#[derive(Clone, Debug, PartialEq)] -pub enum OutboundPolicyKind { - Fallback(SocketAddr), - Resource(ResourceOutboundPolicy), -} - /// ResourceOutboundPolicy expresses the known resource types /// that can be parents for outbound policy. They each come with /// specific metadata that is used when putting together the final @@ -31,24 +22,29 @@ pub enum ResourceOutboundPolicy { }, } -// ParentMeta carries information resource-specific -// information about the parent to which outbound policy -// is associated. +// ParentInfo carries resource-specific information about +// the parent to which outbound policy is associated. #[derive(Clone, Debug, Hash, PartialEq, Eq)] -pub enum ParentMeta { - Service { authority: String }, - EgressNetwork(TrafficPolicy), +pub enum ParentInfo { + Service { + name: String, + namespace: String, + authority: String, + }, + EgressNetwork { + name: String, + namespace: String, + traffic_policy: TrafficPolicy, + }, } #[derive(Clone, Debug, PartialEq)] pub struct OutboundPolicy { - pub parent_meta: ParentMeta, + pub parent_info: ParentInfo, pub http_routes: RouteSet, pub grpc_routes: RouteSet, pub tls_routes: RouteSet, pub tcp_routes: RouteSet, - pub name: String, - pub namespace: String, pub port: NonZeroU16, pub opaque: bool, pub accrual: Option, @@ -57,6 +53,32 @@ pub struct OutboundPolicy { pub timeouts: RouteTimeouts, } +impl ParentInfo { + pub fn name(&self) -> &str { + match self { + Self::EgressNetwork { name, .. } => name, + Self::Service { name, .. } => name, + } + } + + pub fn namespace(&self) -> &str { + match self { + Self::EgressNetwork { namespace, .. } => namespace, + Self::Service { namespace, .. } => namespace, + } + } +} + +impl OutboundPolicy { + pub fn parent_name(&self) -> &str { + self.parent_info.name() + } + + pub fn parent_namespace(&self) -> &str { + self.parent_info.namespace() + } +} + impl ResourceOutboundPolicy { pub fn policy(&self) -> &OutboundPolicy { match self { diff --git a/policy-controller/core/src/outbound/target.rs b/policy-controller/core/src/outbound/target.rs index 16840c488eb1b..24e977c629a77 100644 --- a/policy-controller/core/src/outbound/target.rs +++ b/policy-controller/core/src/outbound/target.rs @@ -24,3 +24,12 @@ pub enum Kind { EgressNetwork(SocketAddr), Service, } + +impl ResourceTarget { + pub fn original_dst(&self) -> Option { + match self.kind { + Kind::EgressNetwork(original_dst) => Some(original_dst), + Kind::Service => None, + } + } +} diff --git a/policy-controller/grpc/src/outbound.rs b/policy-controller/grpc/src/outbound.rs index 858cca9320cea..5c18e92a91489 100644 --- a/policy-controller/grpc/src/outbound.rs +++ b/policy-controller/grpc/src/outbound.rs @@ -1,7 +1,7 @@ extern crate http as http_crate; use crate::workload; -use futures::prelude::*; +use futures::{prelude::*, StreamExt}; use http_crate::uri::Authority; use linkerd2_proxy_api::{ self as api, destination, @@ -13,8 +13,8 @@ use linkerd2_proxy_api::{ }; use linkerd_policy_controller_core::{ outbound::{ - DiscoverOutboundPolicy, Kind, OutboundDiscoverTarget, OutboundPolicyKind, - OutboundPolicyStream, ResourceOutboundPolicy, ResourceTarget, Route, WeightedEgressNetwork, + DiscoverOutboundPolicy, FallbackPolicyStream, Kind, OutboundDiscoverTarget, OutboundPolicy, + OutboundPolicyStream, ParentInfo, ResourceTarget, Route, WeightedEgressNetwork, WeightedService, }, routes::GroupKindNamespaceName, @@ -37,7 +37,7 @@ pub struct OutboundPolicyServer { impl OutboundPolicyServer where - T: DiscoverOutboundPolicy + Send + Sync + 'static, + T: DiscoverOutboundPolicy + Send + Sync + 'static, { pub fn new( discover: T, @@ -143,7 +143,7 @@ where #[async_trait::async_trait] impl OutboundPolicies for OutboundPolicyServer where - T: DiscoverOutboundPolicy + Send + Sync + 'static, + T: DiscoverOutboundPolicy + Send + Sync + 'static, { async fn get( &self, @@ -151,25 +151,32 @@ where ) -> Result, tonic::Status> { let target = self.lookup(req.into_inner())?; - let policy = self - .index - .get_outbound_policy(target) - .await - .map_err(|error| { - tonic::Status::internal(format!("failed to get outbound policy: {error}")) - })?; - - let message = match policy { - Some(OutboundPolicyKind::Fallback(original_dst)) => fallback(original_dst), - Some(OutboundPolicyKind::Resource(resource)) => { - to_proto(resource, self.allow_l5d_request_headers) - } - None => { - return Err(tonic::Status::not_found("No such policy")); + match target.clone() { + OutboundDiscoverTarget::Resource(resource) => { + let original_dst = resource.original_dst(); + let policy = self + .index + .get_outbound_policy(resource) + .await + .map_err(|error| { + tonic::Status::internal(format!("failed to get outbound policy: {error}")) + })?; + + if let Some(policy) = policy { + Ok(tonic::Response::new(to_proto( + policy, + self.allow_l5d_request_headers, + original_dst, + ))) + } else { + Err(tonic::Status::not_found("No such policy")) + } } - }; - Ok(tonic::Response::new(message)) + OutboundDiscoverTarget::Fallback(original_dst) => { + Ok(tonic::Response::new(fallback(original_dst))) + } + } } type WatchStream = BoxWatchStream; @@ -178,21 +185,35 @@ where &self, req: tonic::Request, ) -> Result, tonic::Status> { - let service = self.lookup(req.into_inner())?; - + let target = self.lookup(req.into_inner())?; let drain = self.drain.clone(); - let rx = self - .index - .watch_outbound_policy(service) - .await - .map_err(|e| tonic::Status::internal(format!("lookup failed: {e}")))? - .ok_or_else(|| tonic::Status::not_found("unknown server"))?; - Ok(tonic::Response::new(response_stream( - drain, - rx, - self.allow_l5d_request_headers, - ))) + match target.clone() { + OutboundDiscoverTarget::Resource(resource) => { + let original_dst = resource.original_dst(); + let rx = self + .index + .watch_outbound_policy(resource) + .await + .map_err(|e| tonic::Status::internal(format!("lookup failed: {e}")))? + .ok_or_else(|| tonic::Status::not_found("unknown server"))?; + Ok(tonic::Response::new(response_stream( + drain, + rx, + self.allow_l5d_request_headers, + original_dst, + ))) + } + + OutboundDiscoverTarget::Fallback(original_dst) => { + let rx = self.index.watch_fallback_policy().await; + Ok(tonic::Response::new(fallback_stream( + drain, + rx, + original_dst, + ))) + } + } } } @@ -204,6 +225,7 @@ fn response_stream( drain: drain::Watch, mut rx: OutboundPolicyStream, allow_l5d_request_headers: bool, + original_dst: Option, ) -> BoxWatchStream { Box::pin(async_stream::try_stream! { tokio::pin! { @@ -214,10 +236,36 @@ fn response_stream( tokio::select! { // When the port is updated with a new server, update the server watch. res = rx.next() => match res { - Some(OutboundPolicyKind::Resource(resource)) => { - yield to_proto(resource, allow_l5d_request_headers); + Some(policy) => { + yield to_proto(policy, allow_l5d_request_headers, original_dst); } - Some(OutboundPolicyKind::Fallback(original_dst)) => { + None => return, + }, + + // If the server starts shutting down, close the stream so that it doesn't hold the + // server open. + _ = &mut shutdown => { + return; + } + } + } + }) +} + +fn fallback_stream( + drain: drain::Watch, + mut rx: FallbackPolicyStream, + original_dst: SocketAddr, +) -> BoxWatchStream { + Box::pin(async_stream::try_stream! { + tokio::pin! { + let shutdown = drain.signaled(); + } + + loop { + tokio::select! { + res = rx.next() => match res { + Some(_) => { yield fallback(original_dst); } None => return, @@ -323,37 +371,34 @@ fn fallback(original_dst: SocketAddr) -> outbound::OutboundPolicy { } fn to_proto( - resource_policy: ResourceOutboundPolicy, + policy: OutboundPolicy, allow_l5d_request_headers: bool, + original_dst: Option, ) -> outbound::OutboundPolicy { - let policy = resource_policy.policy(); - let backend: outbound::Backend = default_backend(&resource_policy); + let backend: outbound::Backend = default_backend(&policy, original_dst); let kind = if policy.opaque { outbound::proxy_protocol::Kind::Opaque(outbound::proxy_protocol::Opaque { - routes: vec![default_outbound_opaq_route(backend, &resource_policy)], + routes: vec![default_outbound_opaq_route(backend, &policy.parent_info)], }) } else { - let accrual = resource_policy - .policy() - .accrual - .map(|accrual| outbound::FailureAccrual { - kind: Some(match accrual { - linkerd_policy_controller_core::outbound::FailureAccrual::Consecutive { + let accrual = policy.accrual.map(|accrual| outbound::FailureAccrual { + kind: Some(match accrual { + linkerd_policy_controller_core::outbound::FailureAccrual::Consecutive { + max_failures, + backoff, + } => outbound::failure_accrual::Kind::ConsecutiveFailures( + outbound::failure_accrual::ConsecutiveFailures { max_failures, - backoff, - } => outbound::failure_accrual::Kind::ConsecutiveFailures( - outbound::failure_accrual::ConsecutiveFailures { - max_failures, - backoff: Some(outbound::ExponentialBackoff { - min_backoff: convert_duration("min_backoff", backoff.min_penalty), - max_backoff: convert_duration("max_backoff", backoff.max_penalty), - jitter_ratio: backoff.jitter, - }), - }, - ), - }), - }); + backoff: Some(outbound::ExponentialBackoff { + min_backoff: convert_duration("min_backoff", backoff.min_penalty), + max_backoff: convert_duration("max_backoff", backoff.max_penalty), + jitter_ratio: backoff.jitter, + }), + }, + ), + }), + }); let mut grpc_routes = policy.grpc_routes.clone().into_iter().collect::>(); let mut http_routes = policy.http_routes.clone().into_iter().collect::>(); @@ -369,7 +414,8 @@ fn to_proto( policy.grpc_retry.clone(), policy.timeouts.clone(), allow_l5d_request_headers, - &resource_policy, + &policy.parent_info, + original_dst, ) } else if !http_routes.is_empty() { http_routes.sort_by(timestamp_then_name); @@ -380,14 +426,25 @@ fn to_proto( policy.http_retry.clone(), policy.timeouts.clone(), allow_l5d_request_headers, - &resource_policy, + &policy.parent_info, + original_dst, ) } else if !tls_routes.is_empty() { tls_routes.sort_by(timestamp_then_name); - tls::protocol(backend, tls_routes.into_iter(), &resource_policy) + tls::protocol( + backend, + tls_routes.into_iter(), + &policy.parent_info, + original_dst, + ) } else if !tcp_routes.is_empty() { tcp_routes.sort_by(timestamp_then_name); - tcp::protocol(backend, tcp_routes.into_iter(), &resource_policy) + tcp::protocol( + backend, + tcp_routes.into_iter(), + &policy.parent_info, + original_dst, + ) } else { http_routes.sort_by(timestamp_then_name); http::protocol( @@ -397,22 +454,27 @@ fn to_proto( policy.http_retry.clone(), policy.timeouts.clone(), allow_l5d_request_headers, - &resource_policy, + &policy.parent_info, + original_dst, ) } }; - let (parent_group, parent_kind) = match resource_policy { - ResourceOutboundPolicy::Egress { .. } => ("policy.linkerd.io", "EgressNetwork"), - ResourceOutboundPolicy::Service { .. } => ("core", "Service"), + let (parent_group, parent_kind, namespace, name) = match policy.parent_info { + ParentInfo::EgressNetwork { + namespace, name, .. + } => ("policy.linkerd.io", "EgressNetwork", namespace, name), + ParentInfo::Service { + name, namespace, .. + } => ("core", "Service", namespace, name), }; let metadata = Metadata { kind: Some(metadata::Kind::Resource(api::meta::Resource { group: parent_group.into(), kind: parent_kind.into(), - namespace: policy.namespace.clone(), - name: policy.name.clone(), + namespace, + name, port: u16::from(policy.port).into(), ..Default::default() })), @@ -442,15 +504,20 @@ fn timestamp_then_name( by_ts.then_with(|| left_id.name.cmp(&right_id.name)) } -fn default_backend(policy: &ResourceOutboundPolicy) -> outbound::Backend { - match policy { - ResourceOutboundPolicy::Service { authority, policy } => outbound::Backend { +fn default_backend(policy: &OutboundPolicy, original_dst: Option) -> outbound::Backend { + match policy.parent_info.clone() { + ParentInfo::Service { + authority, + namespace, + name, + .. + } => outbound::Backend { metadata: Some(Metadata { kind: Some(metadata::Kind::Resource(api::meta::Resource { group: "core".to_string(), kind: "Service".to_string(), - name: policy.name.clone(), - namespace: policy.namespace.clone(), + name, + namespace, section: Default::default(), port: u16::from(policy.port).into(), })), @@ -469,42 +536,52 @@ fn default_backend(policy: &ResourceOutboundPolicy) -> outbound::Backend { }, )), }, - ResourceOutboundPolicy::Egress { - original_dst, - policy, - .. - } => outbound::Backend { - metadata: Some(Metadata { - kind: Some(metadata::Kind::Resource(api::meta::Resource { - group: "policy.linkerd.io".to_string(), - kind: "EgressNetwork".to_string(), - name: policy.name.clone(), - namespace: policy.namespace.clone(), - section: Default::default(), - port: u16::from(policy.port).into(), - })), - }), - queue: Some(default_queue_config()), - kind: Some(outbound::backend::Kind::Forward( - destination::WeightedAddr { - addr: Some((*original_dst).into()), - weight: 1, - ..Default::default() - }, - )), + ParentInfo::EgressNetwork { + namespace, name, .. + } => match original_dst { + Some(original_dst) => outbound::Backend { + metadata: Some(Metadata { + kind: Some(metadata::Kind::Resource(api::meta::Resource { + group: "policy.linkerd.io".to_string(), + kind: "EgressNetwork".to_string(), + name, + namespace, + section: Default::default(), + port: u16::from(policy.port).into(), + })), + }), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: 1, + ..Default::default() + }, + )), + }, + None => { + tracing::error!("no original_dst for Egresspolicy"); + outbound::Backend { + metadata: Some(Metadata { + kind: Some(metadata::Kind::Default("invalid".to_string())), + }), + queue: None, + kind: None, + } + } }, } } fn default_outbound_opaq_route( backend: outbound::Backend, - resource_policy: &ResourceOutboundPolicy, + parent_info: &ParentInfo, ) -> outbound::OpaqueRoute { - match resource_policy { - ResourceOutboundPolicy::Egress { traffic_policy, .. } => { + match parent_info { + ParentInfo::EgressNetwork { traffic_policy, .. } => { tcp::default_outbound_egress_route(backend, traffic_policy) } - ResourceOutboundPolicy::Service { .. } => { + ParentInfo::Service { .. } => { let metadata = Some(Metadata { kind: Some(metadata::Kind::Default("opaq".to_string())), }); diff --git a/policy-controller/grpc/src/outbound/grpc.rs b/policy-controller/grpc/src/outbound/grpc.rs index bc2510ae548b8..791191e3eca1f 100644 --- a/policy-controller/grpc/src/outbound/grpc.rs +++ b/policy-controller/grpc/src/outbound/grpc.rs @@ -10,12 +10,13 @@ use linkerd2_proxy_api::{ use linkerd_policy_controller_core::{ outbound::{ Backend, Filter, GrpcRetryCondition, GrpcRoute, OutboundRoute, OutboundRouteRule, - ResourceOutboundPolicy, RouteRetry, RouteTimeouts, TrafficPolicy, + ParentInfo, RouteRetry, RouteTimeouts, TrafficPolicy, }, routes::{FailureInjectorFilter, GroupKindNamespaceName}, }; use std::{net::SocketAddr, time}; +#[allow(clippy::too_many_arguments)] pub(crate) fn protocol( default_backend: outbound::Backend, routes: impl Iterator, @@ -23,7 +24,8 @@ pub(crate) fn protocol( service_retry: Option>, service_timeouts: RouteTimeouts, allow_l5d_request_headers: bool, - policy: &ResourceOutboundPolicy, + parent_info: &ParentInfo, + original_dst: Option, ) -> outbound::proxy_protocol::Kind { let mut routes = routes .map(|(gknn, route)| { @@ -34,12 +36,13 @@ pub(crate) fn protocol( service_retry.clone(), service_timeouts.clone(), allow_l5d_request_headers, - policy, + parent_info, + original_dst, ) }) .collect::>(); - if let ResourceOutboundPolicy::Egress { traffic_policy, .. } = policy { + if let ParentInfo::EgressNetwork { traffic_policy, .. } = parent_info { routes.push(default_outbound_egress_route( default_backend, service_retry, @@ -54,6 +57,7 @@ pub(crate) fn protocol( }) } +#[allow(clippy::too_many_arguments)] fn convert_outbound_route( gknn: GroupKindNamespaceName, OutboundRoute { @@ -65,7 +69,8 @@ fn convert_outbound_route( service_retry: Option>, service_timeouts: RouteTimeouts, allow_l5d_request_headers: bool, - policy: &ResourceOutboundPolicy, + parent_info: &ParentInfo, + original_dst: Option, ) -> outbound::GrpcRoute { // This encoder sets deprecated timeouts for older proxies. #![allow(deprecated)] @@ -94,7 +99,7 @@ fn convert_outbound_route( }| { let backends = backends .into_iter() - .map(|b| convert_backend(b, policy)) + .map(|b| convert_backend(b, parent_info, original_dst)) .collect::>(); let dist = if backends.is_empty() { outbound::grpc_route::distribution::Kind::FirstAvailable( @@ -177,12 +182,10 @@ fn convert_outbound_route( fn convert_backend( backend: Backend, - policy: &ResourceOutboundPolicy, + parent_info: &ParentInfo, + original_dst: Option, ) -> outbound::grpc_route::WeightedRouteBackend { - let original_dst_port = match policy { - ResourceOutboundPolicy::Egress { original_dst, .. } => Some(original_dst.port()), - _ => None, - }; + let original_dst_port = original_dst.map(|o| o.port()); match backend { Backend::Addr(addr) => { @@ -242,57 +245,65 @@ fn convert_backend( format!("Service not found {}", svc.name), super::service_meta(svc), ), - Backend::EgressNetwork(egress_net) if egress_net.exists => match policy { - ResourceOutboundPolicy::Egress { - original_dst, - policy, - .. - } => { - if policy.name == egress_net.name && policy.namespace == egress_net.namespace { - let filters = egress_net - .filters - .clone() - .into_iter() - .map(convert_to_filter) - .collect(); + Backend::EgressNetwork(egress_net) if egress_net.exists => { + match (parent_info, original_dst) { + ( + ParentInfo::EgressNetwork { + name, namespace, .. + }, + Some(original_dst), + ) => { + if *name == egress_net.name && *namespace == egress_net.namespace { + let filters = egress_net + .filters + .clone() + .into_iter() + .map(convert_to_filter) + .collect(); - outbound::grpc_route::WeightedRouteBackend { - weight: egress_net.weight, - backend: Some(outbound::grpc_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(super::egress_net_meta( - egress_net.clone(), - original_dst_port, - )), - queue: Some(default_queue_config()), - kind: Some(outbound::backend::Kind::Forward( - destination::WeightedAddr { - addr: Some((*original_dst).into()), - weight: egress_net.weight, - ..Default::default() - }, - )), + outbound::grpc_route::WeightedRouteBackend { + weight: egress_net.weight, + backend: Some(outbound::grpc_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::egress_net_meta( + egress_net.clone(), + original_dst_port, + )), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: egress_net.weight, + ..Default::default() + }, + )), + }), + filters, + ..Default::default() }), - filters, - ..Default::default() - }), + } + } else { + let weight = egress_net.weight; + let message = "Route with EgressNetwork backend needs to have the same EgressNetwork as a parent".to_string(); + invalid_backend( + weight, + message, + super::egress_net_meta(egress_net, original_dst_port), + ) } - } else { - let weight = egress_net.weight; - let message = "Route with EgressNetwork backend needs to have the same EgressNetwork as a parent".to_string(); - invalid_backend( - weight, - message, - super::egress_net_meta(egress_net, original_dst_port), - ) } + (ParentInfo::EgressNetwork { .. }, None) => invalid_backend( + egress_net.weight, + "EgressNetwork can be resolved from an ip:port combo only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), + (ParentInfo::Service { .. }, _) => invalid_backend( + egress_net.weight, + "EgressNetwork backends attach to EgressNetwork parents only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), } - ResourceOutboundPolicy::Service { .. } => invalid_backend( - egress_net.weight, - "EgressNetwork backends attach to EgressNetwork parents only".to_string(), - super::egress_net_meta(egress_net, original_dst_port), - ), - }, + } Backend::EgressNetwork(egress_net) => invalid_backend( egress_net.weight, format!("EgressNetwork not found {}", egress_net.name), diff --git a/policy-controller/grpc/src/outbound/http.rs b/policy-controller/grpc/src/outbound/http.rs index cb7439d9fac5c..ad461a0aeb07f 100644 --- a/policy-controller/grpc/src/outbound/http.rs +++ b/policy-controller/grpc/src/outbound/http.rs @@ -9,13 +9,14 @@ use crate::routes::{ use linkerd2_proxy_api::{destination, http_route, meta, outbound}; use linkerd_policy_controller_core::{ outbound::{ - Backend, Filter, HttpRetryCondition, HttpRoute, OutboundRouteRule, ResourceOutboundPolicy, - RouteRetry, RouteTimeouts, TrafficPolicy, + Backend, Filter, HttpRetryCondition, HttpRoute, OutboundRouteRule, ParentInfo, RouteRetry, + RouteTimeouts, TrafficPolicy, }, routes::GroupKindNamespaceName, }; use std::{net::SocketAddr, time}; +#[allow(clippy::too_many_arguments)] pub(crate) fn protocol( default_backend: outbound::Backend, routes: impl Iterator, @@ -23,9 +24,10 @@ pub(crate) fn protocol( service_retry: Option>, service_timeouts: RouteTimeouts, allow_l5d_request_headers: bool, - policy: &ResourceOutboundPolicy, + parent_info: &ParentInfo, + original_dst: Option, ) -> outbound::proxy_protocol::Kind { - let opaque_route = default_outbound_opaq_route(default_backend.clone(), policy); + let opaque_route = default_outbound_opaq_route(default_backend.clone(), parent_info); let mut routes = routes .map(|(gknn, route)| { convert_outbound_route( @@ -35,13 +37,14 @@ pub(crate) fn protocol( service_retry.clone(), service_timeouts.clone(), allow_l5d_request_headers, - policy, + parent_info, + original_dst, ) }) .collect::>(); - match policy { - ResourceOutboundPolicy::Service { .. } => { + match parent_info { + ParentInfo::Service { .. } => { if routes.is_empty() { routes.push(default_outbound_service_route( default_backend, @@ -50,7 +53,7 @@ pub(crate) fn protocol( )); } } - ResourceOutboundPolicy::Egress { traffic_policy, .. } => { + ParentInfo::EgressNetwork { traffic_policy, .. } => { routes.push(default_outbound_egress_route( default_backend, service_retry.clone(), @@ -81,6 +84,7 @@ pub(crate) fn protocol( }) } +#[allow(clippy::too_many_arguments)] fn convert_outbound_route( gknn: GroupKindNamespaceName, HttpRoute { @@ -92,7 +96,8 @@ fn convert_outbound_route( service_retry: Option>, service_timeouts: RouteTimeouts, allow_l5d_request_headers: bool, - policy: &ResourceOutboundPolicy, + parent_info: &ParentInfo, + original_dst: Option, ) -> outbound::HttpRoute { // This encoder sets deprecated timeouts for older proxies. #![allow(deprecated)] @@ -120,7 +125,7 @@ fn convert_outbound_route( }| { let backends = backends .into_iter() - .map(|b| convert_backend(b, policy)) + .map(|b| convert_backend(b, parent_info, original_dst)) .collect::>(); let dist = if backends.is_empty() { outbound::http_route::distribution::Kind::FirstAvailable( @@ -167,13 +172,10 @@ fn convert_outbound_route( fn convert_backend( backend: Backend, - policy: &ResourceOutboundPolicy, + parent_info: &ParentInfo, + original_dst: Option, ) -> outbound::http_route::WeightedRouteBackend { - let original_dst_port = match policy { - ResourceOutboundPolicy::Egress { original_dst, .. } => Some(original_dst.port()), - ResourceOutboundPolicy::Service { .. } => None, - }; - + let original_dst_port = original_dst.map(|o| o.port()); match backend { Backend::Addr(addr) => { let socket_addr = SocketAddr::new(addr.addr, addr.port.get()); @@ -232,57 +234,65 @@ fn convert_backend( format!("Service not found {}", svc.name), super::service_meta(svc), ), - Backend::EgressNetwork(egress_net) if egress_net.exists => match policy { - ResourceOutboundPolicy::Egress { - original_dst, - policy, - .. - } => { - if policy.name == egress_net.name && policy.namespace == egress_net.namespace { - let filters = egress_net - .filters - .clone() - .into_iter() - .map(convert_to_filter) - .collect(); + Backend::EgressNetwork(egress_net) if egress_net.exists => { + match (parent_info, original_dst) { + ( + ParentInfo::EgressNetwork { + name, namespace, .. + }, + Some(original_dst), + ) => { + if *name == egress_net.name && *namespace == egress_net.namespace { + let filters = egress_net + .filters + .clone() + .into_iter() + .map(convert_to_filter) + .collect(); - outbound::http_route::WeightedRouteBackend { - weight: egress_net.weight, - backend: Some(outbound::http_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(super::egress_net_meta( - egress_net.clone(), - original_dst_port, - )), - queue: Some(default_queue_config()), - kind: Some(outbound::backend::Kind::Forward( - destination::WeightedAddr { - addr: Some((*original_dst).into()), - weight: egress_net.weight, - ..Default::default() - }, - )), + outbound::http_route::WeightedRouteBackend { + weight: egress_net.weight, + backend: Some(outbound::http_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::egress_net_meta( + egress_net.clone(), + original_dst_port, + )), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: egress_net.weight, + ..Default::default() + }, + )), + }), + filters, + ..Default::default() }), - filters, - ..Default::default() - }), + } + } else { + let weight = egress_net.weight; + let message = "Route with EgressNetwork backend needs to have the same EgressNetwork as a parent".to_string(); + invalid_backend( + weight, + message, + super::egress_net_meta(egress_net, original_dst_port), + ) } - } else { - let weight = egress_net.weight; - let message = "Route with EgressNetwork backend needs to have the same EgressNetwork as a parent".to_string(); - invalid_backend( - weight, - message, - super::egress_net_meta(egress_net, original_dst_port), - ) } + (ParentInfo::EgressNetwork { .. }, None) => invalid_backend( + egress_net.weight, + "EgressNetwork can be resolved from an ip:port combo only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), + (ParentInfo::Service { .. }, _) => invalid_backend( + egress_net.weight, + "EgressNetwork backends attach to EgressNetwork parents only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), } - ResourceOutboundPolicy::Service { .. } => invalid_backend( - egress_net.weight, - "EgressNetwork backends attach to EgressNetwork parents only".to_string(), - super::egress_net_meta(egress_net, original_dst_port), - ), - }, + } Backend::EgressNetwork(egress_net) => invalid_backend( egress_net.weight, format!("EgressNetwork not found {}", egress_net.name), diff --git a/policy-controller/grpc/src/outbound/tcp.rs b/policy-controller/grpc/src/outbound/tcp.rs index dbc7c40293dfd..4c8baabdae024 100644 --- a/policy-controller/grpc/src/outbound/tcp.rs +++ b/policy-controller/grpc/src/outbound/tcp.rs @@ -1,7 +1,7 @@ use super::{default_balancer_config, default_queue_config}; use linkerd2_proxy_api::{destination, meta, outbound}; use linkerd_policy_controller_core::{ - outbound::{Backend, ResourceOutboundPolicy, TcpRoute, TrafficPolicy}, + outbound::{Backend, ParentInfo, TcpRoute, TrafficPolicy}, routes::GroupKindNamespaceName, }; use std::net::SocketAddr; @@ -9,13 +9,22 @@ use std::net::SocketAddr; pub(crate) fn protocol( default_backend: outbound::Backend, routes: impl Iterator, - policy: &ResourceOutboundPolicy, + parent_info: &ParentInfo, + original_dst: Option, ) -> outbound::proxy_protocol::Kind { let mut routes = routes - .map(|(gknn, route)| convert_outbound_route(gknn, route, default_backend.clone(), policy)) + .map(|(gknn, route)| { + convert_outbound_route( + gknn, + route, + default_backend.clone(), + parent_info, + original_dst, + ) + }) .collect::>(); - if let ResourceOutboundPolicy::Egress { traffic_policy, .. } = policy { + if let ParentInfo::EgressNetwork { traffic_policy, .. } = parent_info { routes.push(default_outbound_egress_route( default_backend, traffic_policy, @@ -32,7 +41,8 @@ fn convert_outbound_route( creation_timestamp: _, }: TcpRoute, backend: outbound::Backend, - policy: &ResourceOutboundPolicy, + parent_info: &ParentInfo, + original_dst: Option, ) -> outbound::OpaqueRoute { let metadata = Some(meta::Metadata { kind: Some(meta::metadata::Kind::Resource(meta::Resource { @@ -47,7 +57,7 @@ fn convert_outbound_route( let backends = rule .backends .into_iter() - .map(|b| convert_backend(b, policy)) + .map(|b| convert_backend(b, parent_info, original_dst)) .collect::>(); let dist = if backends.is_empty() { @@ -77,12 +87,10 @@ fn convert_outbound_route( fn convert_backend( backend: Backend, - policy: &ResourceOutboundPolicy, + parent_info: &ParentInfo, + original_dst: Option, ) -> outbound::opaque_route::WeightedRouteBackend { - let original_dst_port = match policy { - ResourceOutboundPolicy::Egress { original_dst, .. } => Some(original_dst.port()), - ResourceOutboundPolicy::Service { .. } => None, - }; + let original_dst_port = original_dst.map(|o| o.port()); match backend { Backend::Addr(addr) => { @@ -132,49 +140,57 @@ fn convert_backend( format!("Service not found {}", svc.name), super::service_meta(svc), ), - Backend::EgressNetwork(egress_net) if egress_net.exists => match policy { - ResourceOutboundPolicy::Egress { - original_dst, - policy, - .. - } => { - if policy.name == egress_net.name && policy.namespace == egress_net.namespace { - outbound::opaque_route::WeightedRouteBackend { - weight: egress_net.weight, - backend: Some(outbound::opaque_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(super::egress_net_meta( - egress_net.clone(), - original_dst_port, - )), - queue: Some(default_queue_config()), - kind: Some(outbound::backend::Kind::Forward( - destination::WeightedAddr { - addr: Some((*original_dst).into()), - weight: egress_net.weight, - ..Default::default() - }, - )), + Backend::EgressNetwork(egress_net) if egress_net.exists => { + match (parent_info, original_dst) { + ( + ParentInfo::EgressNetwork { + name, namespace, .. + }, + Some(original_dst), + ) => { + if *name == egress_net.name && *namespace == egress_net.namespace { + outbound::opaque_route::WeightedRouteBackend { + weight: egress_net.weight, + backend: Some(outbound::opaque_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::egress_net_meta( + egress_net.clone(), + original_dst_port, + )), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: egress_net.weight, + ..Default::default() + }, + )), + }), }), - }), - error: None, + error: None, + } + } else { + let weight = egress_net.weight; + let message = "Route with EgressNetwork backend needs to have the same EgressNetwork as a parent".to_string(); + invalid_backend( + weight, + message, + super::egress_net_meta(egress_net, original_dst_port), + ) } - } else { - let weight = egress_net.weight; - let message = "Route with EgressNetwork backend needs to have the same EgressNetwork as a parent".to_string(); - invalid_backend( - weight, - message, - super::egress_net_meta(egress_net, original_dst_port), - ) } + (ParentInfo::EgressNetwork { .. }, None) => invalid_backend( + egress_net.weight, + "EgressNetwork can be resolved from an ip:port combo only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), + (ParentInfo::Service { .. }, _) => invalid_backend( + egress_net.weight, + "EgressNetwork backends attach to EgressNetwork parents only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), } - ResourceOutboundPolicy::Service { .. } => invalid_backend( - egress_net.weight, - "EgressNetwork backends attach to EgressNetwork parents only".to_string(), - super::egress_net_meta(egress_net, original_dst_port), - ), - }, + } Backend::EgressNetwork(egress_net) => invalid_backend( egress_net.weight, format!("EgressNetwork not found {}", egress_net.name), diff --git a/policy-controller/grpc/src/outbound/tls.rs b/policy-controller/grpc/src/outbound/tls.rs index 8860b695b0e39..911aac2533a80 100644 --- a/policy-controller/grpc/src/outbound/tls.rs +++ b/policy-controller/grpc/src/outbound/tls.rs @@ -2,7 +2,7 @@ use super::{default_balancer_config, default_queue_config}; use crate::routes::convert_sni_match; use linkerd2_proxy_api::{destination, meta, outbound}; use linkerd_policy_controller_core::{ - outbound::{Backend, ResourceOutboundPolicy, TlsRoute, TrafficPolicy}, + outbound::{Backend, ParentInfo, TlsRoute, TrafficPolicy}, routes::GroupKindNamespaceName, }; use std::net::SocketAddr; @@ -10,13 +10,22 @@ use std::net::SocketAddr; pub(crate) fn protocol( default_backend: outbound::Backend, routes: impl Iterator, - policy: &ResourceOutboundPolicy, + parent_info: &ParentInfo, + original_dst: Option, ) -> outbound::proxy_protocol::Kind { let mut routes = routes - .map(|(gknn, route)| convert_outbound_route(gknn, route, default_backend.clone(), policy)) + .map(|(gknn, route)| { + convert_outbound_route( + gknn, + route, + default_backend.clone(), + parent_info, + original_dst, + ) + }) .collect::>(); - if let ResourceOutboundPolicy::Egress { traffic_policy, .. } = policy { + if let ParentInfo::EgressNetwork { traffic_policy, .. } = parent_info { routes.push(default_outbound_egress_route( default_backend, traffic_policy, @@ -34,7 +43,8 @@ fn convert_outbound_route( creation_timestamp: _, }: TlsRoute, backend: outbound::Backend, - policy: &ResourceOutboundPolicy, + parent_info: &ParentInfo, + original_dst: Option, ) -> outbound::TlsRoute { let metadata = Some(meta::Metadata { kind: Some(meta::metadata::Kind::Resource(meta::Resource { @@ -51,7 +61,7 @@ fn convert_outbound_route( let backends = rule .backends .into_iter() - .map(|b| convert_backend(b, policy)) + .map(|b| convert_backend(b, parent_info, original_dst)) .collect::>(); let dist = if backends.is_empty() { @@ -82,12 +92,10 @@ fn convert_outbound_route( fn convert_backend( backend: Backend, - policy: &ResourceOutboundPolicy, + parent_info: &ParentInfo, + original_dst: Option, ) -> outbound::tls_route::WeightedRouteBackend { - let original_dst_port = match policy { - ResourceOutboundPolicy::Egress { original_dst, .. } => Some(original_dst.port()), - ResourceOutboundPolicy::Service { .. } => None, - }; + let original_dst_port = original_dst.map(|o| o.port()); match backend { Backend::Addr(addr) => { @@ -137,49 +145,57 @@ fn convert_backend( format!("Service not found {}", svc.name), super::service_meta(svc), ), - Backend::EgressNetwork(egress_net) if egress_net.exists => match policy { - ResourceOutboundPolicy::Egress { - original_dst, - policy, - .. - } => { - if policy.name == egress_net.name && policy.namespace == egress_net.namespace { - outbound::tls_route::WeightedRouteBackend { - weight: egress_net.weight, - backend: Some(outbound::tls_route::RouteBackend { - backend: Some(outbound::Backend { - metadata: Some(super::egress_net_meta( - egress_net.clone(), - original_dst_port, - )), - queue: Some(default_queue_config()), - kind: Some(outbound::backend::Kind::Forward( - destination::WeightedAddr { - addr: Some((*original_dst).into()), - weight: egress_net.weight, - ..Default::default() - }, - )), + Backend::EgressNetwork(egress_net) if egress_net.exists => { + match (parent_info, original_dst) { + ( + ParentInfo::EgressNetwork { + name, namespace, .. + }, + Some(original_dst), + ) => { + if *name == egress_net.name && *namespace == egress_net.namespace { + outbound::tls_route::WeightedRouteBackend { + weight: egress_net.weight, + backend: Some(outbound::tls_route::RouteBackend { + backend: Some(outbound::Backend { + metadata: Some(super::egress_net_meta( + egress_net.clone(), + original_dst_port, + )), + queue: Some(default_queue_config()), + kind: Some(outbound::backend::Kind::Forward( + destination::WeightedAddr { + addr: Some(original_dst.into()), + weight: egress_net.weight, + ..Default::default() + }, + )), + }), }), - }), - error: None, + error: None, + } + } else { + let weight = egress_net.weight; + let message = "Route with EgressNetwork backend needs to have the same EgressNetwork as a parent".to_string(); + invalid_backend( + weight, + message, + super::egress_net_meta(egress_net, original_dst_port), + ) } - } else { - let weight = egress_net.weight; - let message = "Route with EgressNetwork backend needs to have the same EgressNetwork as a parent".to_string(); - invalid_backend( - weight, - message, - super::egress_net_meta(egress_net, original_dst_port), - ) } + (ParentInfo::EgressNetwork { .. }, None) => invalid_backend( + egress_net.weight, + "EgressNetwork can be resolved from an ip:port combo only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), + (ParentInfo::Service { .. }, _) => invalid_backend( + egress_net.weight, + "EgressNetwork backends attach to EgressNetwork parents only".to_string(), + super::egress_net_meta(egress_net, original_dst_port), + ), } - ResourceOutboundPolicy::Service { .. } => invalid_backend( - egress_net.weight, - "EgressNetwork backends attach to EgressNetwork parents only".to_string(), - super::egress_net_meta(egress_net, original_dst_port), - ), - }, + } Backend::EgressNetwork(egress_net) => invalid_backend( egress_net.weight, format!("EgressNetwork not found {}", egress_net.name), diff --git a/policy-controller/k8s/index/src/outbound/index.rs b/policy-controller/k8s/index/src/outbound/index.rs index e620ac5a0375f..8f06ce34e1e75 100644 --- a/policy-controller/k8s/index/src/outbound/index.rs +++ b/policy-controller/k8s/index/src/outbound/index.rs @@ -9,7 +9,7 @@ use egress_network::EgressNetwork; use linkerd_policy_controller_core::{ outbound::{ Backend, Backoff, FailureAccrual, GrpcRetryCondition, GrpcRoute, HttpRetryCondition, - HttpRoute, Kind, OutboundPolicy, ParentMeta, ResourceTarget, RouteRetry, RouteSet, + HttpRoute, Kind, OutboundPolicy, ParentInfo, ResourceTarget, RouteRetry, RouteSet, RouteTimeouts, TcpRoute, TlsRoute, TrafficPolicy, }, routes::GroupKindNamespaceName, @@ -100,9 +100,8 @@ struct ResourcePort { #[derive(Debug)] struct ResourceRoutes { - parent_meta: ParentMeta, + parent_info: ParentInfo, namespace: Arc, - name: String, port: NonZeroU16, watches_by_ns: HashMap, opaque: bool, @@ -114,7 +113,7 @@ struct ResourceRoutes { #[derive(Debug)] struct RoutesWatch { - parent_meta: ParentMeta, + parent_info: ParentInfo, opaque: bool, accrual: Option, http_retry: Option>, @@ -989,7 +988,7 @@ impl Namespace { fn reinitialize_egress_watches(&mut self) { for routes in self.resource_port_routes.values_mut() { - if let ParentMeta::EgressNetwork(_) = routes.parent_meta { + if let ParentInfo::EgressNetwork { .. } = routes.parent_info { routes.reinitialize_watches(); } } @@ -1075,12 +1074,20 @@ impl Namespace { kind: rp.kind.clone(), }; - let mut parent_meta = match rp.kind { - ResourceKind::EgressNetwork => ParentMeta::EgressNetwork(TrafficPolicy::Deny), + let mut parent_info = match rp.kind { + ResourceKind::EgressNetwork => ParentInfo::EgressNetwork { + traffic_policy: TrafficPolicy::Deny, + name: resource_ref.name.clone(), + namespace: resource_ref.namespace.clone(), + }, ResourceKind::Service => { let authority = cluster.service_dns_authority(&self.namespace, &rp.name, rp.port); - ParentMeta::Service { authority } + ParentInfo::Service { + authority, + name: resource_ref.name.clone(), + namespace: resource_ref.namespace.clone(), + } } }; let mut opaque = false; @@ -1096,7 +1103,11 @@ impl Namespace { timeouts = resource.timeouts.clone(); if let Some(traffic_policy) = resource.traffic_policy { - parent_meta = ParentMeta::EgressNetwork(traffic_policy) + parent_info = ParentInfo::EgressNetwork { + traffic_policy, + name: resource_ref.name, + namespace: resource_ref.namespace, + } } } @@ -1124,14 +1135,13 @@ impl Namespace { .unwrap_or_default(); let mut resource_routes = ResourceRoutes { - parent_meta, + parent_info, opaque, accrual, http_retry, grpc_retry, timeouts, port: rp.port, - name: rp.name, namespace: self.namespace.clone(), watches_by_ns: Default::default(), }; @@ -1368,7 +1378,7 @@ impl ResourceRoutes { self.watches_by_ns.entry(namespace).or_insert_with(|| { let (sender, _) = watch::channel(OutboundPolicy { - parent_meta: self.parent_meta.clone(), + parent_info: self.parent_info.clone(), port: self.port, opaque: self.opaque, accrual: self.accrual, @@ -1379,12 +1389,10 @@ impl ResourceRoutes { grpc_routes: grpc_routes.clone(), tls_routes: tls_routes.clone(), tcp_routes: tcp_routes.clone(), - name: self.name.to_string(), - namespace: self.namespace.to_string(), }); RoutesWatch { - parent_meta: self.parent_meta.clone(), + parent_info: self.parent_info.clone(), http_routes, grpc_routes, tls_routes, @@ -1514,11 +1522,11 @@ impl ResourceRoutes { } fn update_traffic_policy(&mut self, traffic_policy: Option) { - if let (ParentMeta::EgressNetwork(current), Some(new)) = - (self.parent_meta.clone(), traffic_policy) + if let (ParentInfo::EgressNetwork { traffic_policy, .. }, Some(new)) = + (&mut self.parent_info, traffic_policy) { - if current != new { - self.parent_meta = ParentMeta::EgressNetwork(new) + if *traffic_policy != new { + *traffic_policy = new; } } } @@ -1556,11 +1564,11 @@ impl RoutesWatch { } fn update_traffic_policy(&mut self, traffic_policy: Option) { - if let (ParentMeta::EgressNetwork(current), Some(new)) = - (self.parent_meta.clone(), traffic_policy) + if let (ParentInfo::EgressNetwork { traffic_policy, .. }, Some(new)) = + (&mut self.parent_info, traffic_policy) { - if current != new { - self.parent_meta = ParentMeta::EgressNetwork(new) + if *traffic_policy != new { + *traffic_policy = new; } } } @@ -1569,8 +1577,8 @@ impl RoutesWatch { self.watch.send_if_modified(|policy| { let mut modified = false; - if self.parent_meta != policy.parent_meta { - policy.parent_meta = self.parent_meta.clone(); + if self.parent_info != policy.parent_info { + policy.parent_info = self.parent_info.clone(); modified = true; } diff --git a/policy-controller/k8s/index/src/outbound/tests.rs b/policy-controller/k8s/index/src/outbound/tests.rs index 2a1b794dd0ad4..6994c2a96fa7d 100644 --- a/policy-controller/k8s/index/src/outbound/tests.rs +++ b/policy-controller/k8s/index/src/outbound/tests.rs @@ -134,8 +134,8 @@ fn switch_to_another_egress_network_parent() { // first resolution is for network B let policy_b = rx_b.borrow_and_update(); - assert_eq!(policy_b.namespace, "ns".to_string()); - assert_eq!(policy_b.name, "b".to_string()); + assert_eq!(policy_b.parent_namespace(), "ns"); + assert_eq!(policy_b.parent_name(), "b"); drop(policy_b); // Create network a. @@ -167,8 +167,8 @@ fn switch_to_another_egress_network_parent() { // second resolution is for network A let policy_b = rx_a.borrow_and_update(); - assert_eq!(policy_b.namespace, "ns".to_string()); - assert_eq!(policy_b.name, "a".to_string()); + assert_eq!(policy_b.parent_namespace(), "ns"); + assert_eq!(policy_b.parent_name(), "a"); } #[test] diff --git a/policy-controller/src/lib.rs b/policy-controller/src/lib.rs index 99faab0e987a6..db6693eb4e23a 100644 --- a/policy-controller/src/lib.rs +++ b/policy-controller/src/lib.rs @@ -5,13 +5,12 @@ pub mod index_list; mod validation; pub use self::admission::Admission; use anyhow::Result; -use futures::StreamExt; use linkerd_policy_controller_core::inbound::{ DiscoverInboundServer, InboundServer, InboundServerStream, }; use linkerd_policy_controller_core::outbound::{ - DiscoverOutboundPolicy, Kind, OutboundDiscoverTarget, OutboundPolicyKind, OutboundPolicyStream, - ParentMeta, ResourceOutboundPolicy, ResourceTarget, + DiscoverOutboundPolicy, FallbackPolicyStream, Kind, OutboundDiscoverTarget, OutboundPolicy, + OutboundPolicyStream, ResourceTarget, }; pub use linkerd_policy_controller_core::IpNet; pub use linkerd_policy_controller_grpc as grpc; @@ -87,106 +86,39 @@ impl DiscoverInboundServer<(grpc::workload::Workload, NonZeroU16)> for InboundDi } #[async_trait::async_trait] -impl DiscoverOutboundPolicy for OutboundDiscover { +impl DiscoverOutboundPolicy for OutboundDiscover { async fn get_outbound_policy( &self, - target: OutboundDiscoverTarget, - ) -> Result> { - match target { - OutboundDiscoverTarget::Fallback(original_dst) => { - Ok(Some(OutboundPolicyKind::Fallback(original_dst))) + resource: ResourceTarget, + ) -> Result> { + let rx = match self.0.write().outbound_policy_rx(resource.clone()) { + Ok(rx) => rx, + Err(error) => { + tracing::error!(%error, "failed to get outbound policy rx"); + return Ok(None); } - OutboundDiscoverTarget::Resource(resource) => { - let rx = match self.0.write().outbound_policy_rx(resource.clone()) { - Ok(rx) => rx, - Err(error) => { - tracing::error!(%error, "failed to get outbound policy rx"); - return Ok(None); - } - }; - let policy = (*rx.borrow()).clone(); - - let resource = match (&policy.parent_meta, &resource.kind) { - ( - ParentMeta::EgressNetwork(traffic_policy), - Kind::EgressNetwork(original_dst), - ) => ResourceOutboundPolicy::Egress { - traffic_policy: *traffic_policy, - original_dst: *original_dst, - policy: policy.clone(), - }, - - (ParentMeta::Service { authority }, Kind::Service) => { - ResourceOutboundPolicy::Service { - authority: authority.clone(), - policy, - } - } - (policy_kind, resource_kind) => { - anyhow::bail!( - "policy kind {:?} incorrect for resource kind: {:?}", - policy_kind, - resource_kind - ); - } - }; - Ok(Some(OutboundPolicyKind::Resource(resource))) - } - } + }; + + let policy = (*rx.borrow()).clone(); + Ok(Some(policy)) } async fn watch_outbound_policy( &self, - target: OutboundDiscoverTarget, + target: ResourceTarget, ) -> Result> { - match target { - OutboundDiscoverTarget::Fallback(original_dst) => { - let rx = self.0.write().fallback_policy_rx(); - let stream = tokio_stream::wrappers::WatchStream::new(rx) - .map(move |_| OutboundPolicyKind::Fallback(original_dst)); - Ok(Some(Box::pin(stream))) - } - - OutboundDiscoverTarget::Resource(resource) => { - match self.0.write().outbound_policy_rx(resource.clone()) { - Ok(rx) => { - let stream = tokio_stream::wrappers::WatchStream::new(rx).filter_map( - move |policy| { - let resource = match (policy.parent_meta.clone(), resource.kind) { - ( - ParentMeta::EgressNetwork(traffic_policy), - Kind::EgressNetwork(original_dst), - ) => Some(ResourceOutboundPolicy::Egress { - traffic_policy, - original_dst, - policy: policy.clone(), - }), - - (ParentMeta::Service { authority }, Kind::Service) => { - Some(ResourceOutboundPolicy::Service { authority, policy }) - } - (policy_kind, resource_kind) => { - tracing::error!( - "policy kind {:?} incorrect for resource kind: {:?}", - policy_kind, - resource_kind - ); - None - } - } - .map(OutboundPolicyKind::Resource); - - futures::future::ready(resource) - }, - ); - Ok(Some(Box::pin(stream))) - } - Err(_) => Ok(None), - } - } + match self.0.write().outbound_policy_rx(target) { + Ok(rx) => Ok(Some(Box::pin(tokio_stream::wrappers::WatchStream::new(rx)))), + Err(_) => Ok(None), } } + async fn watch_fallback_policy(&self) -> FallbackPolicyStream { + Box::pin(tokio_stream::wrappers::WatchStream::new( + self.0.read().fallback_policy_rx(), + )) + } + fn lookup_ip( &self, addr: IpAddr, From 8b2950168592146cea30ecb0fec0b50ca85384f4 Mon Sep 17 00:00:00 2001 From: Zahari Dichev Date: Thu, 24 Oct 2024 10:26:17 +0000 Subject: [PATCH 07/11] update proxy api Signed-off-by: Zahari Dichev --- Cargo.lock | 2 +- Cargo.toml | 2 +- policy-controller/grpc/src/outbound.rs | 2 ++ policy-controller/grpc/src/outbound/tcp.rs | 14 ++++++++------ policy-controller/grpc/src/outbound/tls.rs | 14 ++++++++------ 5 files changed, 20 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f7414c5abbace..b079b8936f2c5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1325,7 +1325,7 @@ dependencies = [ [[package]] name = "linkerd2-proxy-api" version = "0.14.0" -source = "git+https://github.com/linkerd/linkerd2-proxy-api?branch=zd/add-error-type-to-tls-and-tcp-route#d7385b43d087da05ef4356e51d81be0a1a736a14" +source = "git+https://github.com/linkerd/linkerd2-proxy-api?branch=zd/make-route-error-optional#5493beff7e0713e048c845fbb92777cfe1a58274" dependencies = [ "http", "ipnet", diff --git a/Cargo.toml b/Cargo.toml index c2d19445f2243..99c65e9db49ea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,4 +14,4 @@ members = [ lto = "thin" [patch.crates-io] -linkerd2-proxy-api = { git = 'https://github.com/linkerd/linkerd2-proxy-api', branch = 'zd/add-error-type-to-tls-and-tcp-route' } \ No newline at end of file +linkerd2-proxy-api = { git = 'https://github.com/linkerd/linkerd2-proxy-api', branch = 'zd/make-route-error-optional' } \ No newline at end of file diff --git a/policy-controller/grpc/src/outbound.rs b/policy-controller/grpc/src/outbound.rs index 5c18e92a91489..5cf3c2f9b0f0d 100644 --- a/policy-controller/grpc/src/outbound.rs +++ b/policy-controller/grpc/src/outbound.rs @@ -312,6 +312,7 @@ fn fallback(original_dst: SocketAddr) -> outbound::OutboundPolicy { outbound::opaque_route::distribution::FirstAvailable { backends: vec![outbound::opaque_route::RouteBackend { backend: Some(backend.clone()), + invalid: None, }], }, )), @@ -591,6 +592,7 @@ fn default_outbound_opaq_route( outbound::opaque_route::distribution::FirstAvailable { backends: vec![outbound::opaque_route::RouteBackend { backend: Some(backend), + invalid: None, }], }, )), diff --git a/policy-controller/grpc/src/outbound/tcp.rs b/policy-controller/grpc/src/outbound/tcp.rs index 4c8baabdae024..50821364d9207 100644 --- a/policy-controller/grpc/src/outbound/tcp.rs +++ b/policy-controller/grpc/src/outbound/tcp.rs @@ -65,6 +65,7 @@ fn convert_outbound_route( outbound::opaque_route::distribution::FirstAvailable { backends: vec![outbound::opaque_route::RouteBackend { backend: Some(backend.clone()), + invalid: None, }], }, ) @@ -109,8 +110,8 @@ fn convert_backend( }, )), }), + invalid: None, }), - error: None, } } Backend::Service(svc) if svc.exists => outbound::opaque_route::WeightedRouteBackend { @@ -132,8 +133,8 @@ fn convert_backend( }, )), }), + invalid: None, }), - error: None, }, Backend::Service(svc) => invalid_backend( svc.weight, @@ -166,8 +167,8 @@ fn convert_backend( }, )), }), + invalid: None, }), - error: None, } } else { let weight = egress_net.weight; @@ -219,8 +220,8 @@ fn invalid_backend( queue: Some(default_queue_config()), kind: None, }), + invalid: Some(outbound::opaque_route::route_backend::Invalid { message }), }), - error: Some(outbound::BackendError { message }), } } @@ -231,8 +232,8 @@ pub(crate) fn default_outbound_egress_route( let (error, name) = match traffic_policy { TrafficPolicy::Allow => (None, "tcp-egress-allow"), TrafficPolicy::Deny => ( - Some(outbound::RouteError { - message: "traffic not allowed".to_string(), + Some(outbound::opaque_route::RouteError { + kind: outbound::opaque_route::route_error::Kind::Forbidden as i32, }), "tcp-egress-deny", ), @@ -247,6 +248,7 @@ pub(crate) fn default_outbound_egress_route( outbound::opaque_route::distribution::FirstAvailable { backends: vec![outbound::opaque_route::RouteBackend { backend: Some(backend), + invalid: None, }], }, )), diff --git a/policy-controller/grpc/src/outbound/tls.rs b/policy-controller/grpc/src/outbound/tls.rs index 911aac2533a80..a49d756c51863 100644 --- a/policy-controller/grpc/src/outbound/tls.rs +++ b/policy-controller/grpc/src/outbound/tls.rs @@ -69,6 +69,7 @@ fn convert_outbound_route( outbound::tls_route::distribution::FirstAvailable { backends: vec![outbound::tls_route::RouteBackend { backend: Some(backend.clone()), + invalid: None, }], }, ) @@ -114,8 +115,8 @@ fn convert_backend( }, )), }), + invalid: None, }), - error: None, } } Backend::Service(svc) if svc.exists => outbound::tls_route::WeightedRouteBackend { @@ -137,8 +138,8 @@ fn convert_backend( }, )), }), + invalid: None, }), - error: None, }, Backend::Service(svc) => invalid_backend( svc.weight, @@ -171,8 +172,8 @@ fn convert_backend( }, )), }), + invalid: None, }), - error: None, } } else { let weight = egress_net.weight; @@ -224,8 +225,8 @@ fn invalid_backend( queue: Some(default_queue_config()), kind: None, }), + invalid: Some(outbound::tls_route::route_backend::Invalid { message }), }), - error: Some(outbound::BackendError { message }), } } @@ -236,8 +237,8 @@ pub(crate) fn default_outbound_egress_route( let (error, name) = match traffic_policy { TrafficPolicy::Allow => (None, "tls-egress-allow"), TrafficPolicy::Deny => ( - Some(outbound::RouteError { - message: "traffic not allowed".to_string(), + Some(outbound::tls_route::RouteError { + kind: outbound::tls_route::route_error::Kind::Forbidden as i32, }), "tls-egress-deny", ), @@ -252,6 +253,7 @@ pub(crate) fn default_outbound_egress_route( outbound::tls_route::distribution::FirstAvailable { backends: vec![outbound::tls_route::RouteBackend { backend: Some(backend), + invalid: None, }], }, )), From 3e1573aca72b3623a9a03eebef2af53ea63614e5 Mon Sep 17 00:00:00 2001 From: Zahari Dichev Date: Thu, 24 Oct 2024 11:42:32 +0000 Subject: [PATCH 08/11] switching test Signed-off-by: Zahari Dichev --- policy-controller/core/src/outbound.rs | 4 +- policy-controller/core/src/outbound/target.rs | 2 +- policy-controller/grpc/src/outbound.rs | 14 +++--- .../k8s/index/src/outbound/index.rs | 5 +++ policy-controller/src/lib.rs | 6 +-- policy-test/src/grpc.rs | 13 +++++- policy-test/src/lib.rs | 44 +++++++++++++++++++ policy-test/tests/outbound_api_gateway.rs | 44 ++++++++++++++++++- 8 files changed, 115 insertions(+), 17 deletions(-) diff --git a/policy-controller/core/src/outbound.rs b/policy-controller/core/src/outbound.rs index 6dde8cab36422..8e41b4c43415e 100644 --- a/policy-controller/core/src/outbound.rs +++ b/policy-controller/core/src/outbound.rs @@ -29,13 +29,13 @@ pub trait DiscoverOutboundPolicy { async fn watch_outbound_policy(&self, target: R) -> Result>; - async fn watch_fallback_policy(&self) -> FallbackPolicyStream; + async fn watch_external_policy(&self) -> ExternalPolicyStream; fn lookup_ip(&self, addr: IpAddr, port: NonZeroU16, source_namespace: String) -> Option; } pub type OutboundPolicyStream = Pin + Send + Sync + 'static>>; -pub type FallbackPolicyStream = Pin + Send + Sync + 'static>>; +pub type ExternalPolicyStream = Pin + Send + Sync + 'static>>; pub type HttpRoute = OutboundRoute; pub type GrpcRoute = OutboundRoute; diff --git a/policy-controller/core/src/outbound/target.rs b/policy-controller/core/src/outbound/target.rs index 24e977c629a77..d03cfd870aae9 100644 --- a/policy-controller/core/src/outbound/target.rs +++ b/policy-controller/core/src/outbound/target.rs @@ -7,7 +7,7 @@ use std::{net::SocketAddr, num::NonZeroU16}; #[derive(Clone, Debug)] pub enum OutboundDiscoverTarget { Resource(ResourceTarget), - Fallback(SocketAddr), + External(SocketAddr), } #[derive(Clone, Debug)] diff --git a/policy-controller/grpc/src/outbound.rs b/policy-controller/grpc/src/outbound.rs index 5cf3c2f9b0f0d..b38f6ef6395c9 100644 --- a/policy-controller/grpc/src/outbound.rs +++ b/policy-controller/grpc/src/outbound.rs @@ -13,7 +13,7 @@ use linkerd2_proxy_api::{ }; use linkerd_policy_controller_core::{ outbound::{ - DiscoverOutboundPolicy, FallbackPolicyStream, Kind, OutboundDiscoverTarget, OutboundPolicy, + DiscoverOutboundPolicy, ExternalPolicyStream, Kind, OutboundDiscoverTarget, OutboundPolicy, OutboundPolicyStream, ParentInfo, ResourceTarget, Route, WeightedEgressNetwork, WeightedService, }, @@ -173,7 +173,7 @@ where } } - OutboundDiscoverTarget::Fallback(original_dst) => { + OutboundDiscoverTarget::External(original_dst) => { Ok(tonic::Response::new(fallback(original_dst))) } } @@ -205,9 +205,9 @@ where ))) } - OutboundDiscoverTarget::Fallback(original_dst) => { - let rx = self.index.watch_fallback_policy().await; - Ok(tonic::Response::new(fallback_stream( + OutboundDiscoverTarget::External(original_dst) => { + let rx = self.index.watch_external_policy().await; + Ok(tonic::Response::new(external_stream( drain, rx, original_dst, @@ -252,9 +252,9 @@ fn response_stream( }) } -fn fallback_stream( +fn external_stream( drain: drain::Watch, - mut rx: FallbackPolicyStream, + mut rx: ExternalPolicyStream, original_dst: SocketAddr, ) -> BoxWatchStream { Box::pin(async_stream::try_stream! { diff --git a/policy-controller/k8s/index/src/outbound/index.rs b/policy-controller/k8s/index/src/outbound/index.rs index 8f06ce34e1e75..553809012828e 100644 --- a/policy-controller/k8s/index/src/outbound/index.rs +++ b/policy-controller/k8s/index/src/outbound/index.rs @@ -32,6 +32,11 @@ pub struct Index { // holds information about resources. currently EgressNetworks and Services resource_info: HashMap, cluster_networks: Vec, + + // holds a no-op sender to which all clients that have been returned + // a Fallback policy are subsribed. It is used to force these clients + // to reconnect an obtain new policy once the current one may no longer + // be valid fallback_polcy_tx: watch::Sender<()>, } diff --git a/policy-controller/src/lib.rs b/policy-controller/src/lib.rs index db6693eb4e23a..4a0fc24080b3d 100644 --- a/policy-controller/src/lib.rs +++ b/policy-controller/src/lib.rs @@ -9,7 +9,7 @@ use linkerd_policy_controller_core::inbound::{ DiscoverInboundServer, InboundServer, InboundServerStream, }; use linkerd_policy_controller_core::outbound::{ - DiscoverOutboundPolicy, FallbackPolicyStream, Kind, OutboundDiscoverTarget, OutboundPolicy, + DiscoverOutboundPolicy, ExternalPolicyStream, Kind, OutboundDiscoverTarget, OutboundPolicy, OutboundPolicyStream, ResourceTarget, }; pub use linkerd_policy_controller_core::IpNet; @@ -113,7 +113,7 @@ impl DiscoverOutboundPolicy for Outbound } } - async fn watch_fallback_policy(&self) -> FallbackPolicyStream { + async fn watch_external_policy(&self) -> ExternalPolicyStream { Box::pin(tokio_stream::wrappers::WatchStream::new( self.0.read().fallback_policy_rx(), )) @@ -150,7 +150,7 @@ impl DiscoverOutboundPolicy for Outbound if !index.is_address_in_cluster(addr) { let original_dst = SocketAddr::new(addr, port.into()); - return Some(OutboundDiscoverTarget::Fallback(original_dst)); + return Some(OutboundDiscoverTarget::External(original_dst)); } None diff --git a/policy-test/src/grpc.rs b/policy-test/src/grpc.rs index 0a84464ff5462..a25a1800e02f2 100644 --- a/policy-test/src/grpc.rs +++ b/policy-test/src/grpc.rs @@ -295,7 +295,6 @@ impl OutboundPolicyClient { svc: &k8s::Service, port: u16, ) -> Result, tonic::Status> { - use std::net::Ipv4Addr; let address = svc .spec .as_ref() @@ -303,7 +302,17 @@ impl OutboundPolicyClient { .cluster_ip .as_ref() .expect("Service must have a cluster ip"); - let ip = address.parse::().unwrap(); + self.watch_ip(ns, address, port).await + } + + pub async fn watch_ip( + &mut self, + ns: &str, + addr: &str, + port: u16, + ) -> Result, tonic::Status> { + use std::net::Ipv4Addr; + let ip = addr.parse::().unwrap(); let rsp = self .client .watch(tonic::Request::new(outbound::TrafficSpec { diff --git a/policy-test/src/lib.rs b/policy-test/src/lib.rs index b2dca83b24a38..e9838284f0448 100644 --- a/policy-test/src/lib.rs +++ b/policy-test/src/lib.rs @@ -75,6 +75,25 @@ where .expect("failed to create resource") } +/// Deletes a namespace-scoped resource. +pub async fn delete(client: &kube::Client, obj: T) +where + T: kube::Resource, + T: serde::Serialize + serde::de::DeserializeOwned + Clone + std::fmt::Debug, + T::DynamicType: Default, +{ + let params = kube::api::DeleteParams::default(); + let api = obj + .namespace() + .map(|ns| kube::Api::::namespaced(client.clone(), &ns)) + .unwrap_or_else(|| kube::Api::::default_namespaced(client.clone())); + + tracing::trace!(?obj, "Deleting"); + api.delete(&obj.name_any(), ¶ms) + .await + .expect("failed to delete resource"); +} + /// Updates a namespace-scoped resource. pub async fn update(client: &kube::Client, mut new: T) -> T where @@ -238,6 +257,16 @@ pub async fn create_service( create(client, svc).await } +/// Creates an egress network resource. +pub async fn create_egress_network( + client: &kube::Client, + ns: &str, + name: &str, +) -> k8s::policy::EgressNetwork { + let en = mk_egress_net(ns, name); + create(client, en).await +} + /// Creates a service resource. pub async fn create_opaque_service( client: &kube::Client, @@ -300,6 +329,21 @@ pub fn mk_service(ns: &str, name: &str, port: i32) -> k8s::Service { } } +pub fn mk_egress_net(ns: &str, name: &str) -> k8s::policy::EgressNetwork { + k8s::policy::EgressNetwork { + metadata: k8s::ObjectMeta { + namespace: Some(ns.to_string()), + name: Some(name.to_string()), + ..Default::default() + }, + spec: k8s::policy::EgressNetworkSpec { + networks: None, + traffic_policy: k8s::policy::egress_network::TrafficPolicy::Allow, + }, + status: None, + } +} + #[track_caller] pub fn assert_svc_meta(meta: &Option, svc: &k8s::Service, port: u16) { tracing::debug!(?meta, ?svc, port, "Asserting service metadata"); diff --git a/policy-test/tests/outbound_api_gateway.rs b/policy-test/tests/outbound_api_gateway.rs index cb2f5325df247..e91150a65f8e1 100644 --- a/policy-test/tests/outbound_api_gateway.rs +++ b/policy-test/tests/outbound_api_gateway.rs @@ -1,10 +1,11 @@ use futures::prelude::*; use kube::ResourceExt; +use linkerd2_proxy_api::meta; use linkerd_policy_controller_k8s_api as k8s; use linkerd_policy_test::{ assert_default_accrual_backoff, assert_svc_meta, create, create_annotated_service, - create_cluster_scoped, create_opaque_service, create_service, delete_cluster_scoped, grpc, - mk_service, outbound_api::*, with_temp_ns, + create_cluster_scoped, create_egress_network, create_opaque_service, create_service, delete, + delete_cluster_scoped, grpc, mk_service, outbound_api::*, with_temp_ns, }; use maplit::{btreemap, convert_args}; use std::{collections::BTreeMap, time::Duration}; @@ -31,6 +32,45 @@ async fn service_does_not_exist() { .await; } +#[tokio::test(flavor = "current_thread")] +async fn egress_switches_to_fallback() { + with_temp_ns(|client, ns| async move { + let egress_net = create_egress_network(&client, &ns, "egress-net").await; + + let mut policy_api = grpc::OutboundPolicyClient::port_forwarded(&client).await; + let mut rsp = policy_api.watch_ip(&ns, "1.1.1.1", 80).await.unwrap(); + + let policy = rsp.next().await.unwrap().unwrap(); + let meta = policy.metadata.unwrap(); + + let expected_meta = meta::Metadata { + kind: Some(meta::metadata::Kind::Resource(meta::Resource { + group: "policy.linkerd.io".to_string(), + port: 80, + kind: "EgressNetwork".to_string(), + name: "egress-net".to_string(), + namespace: ns.clone(), + section: "".to_string(), + })), + }; + + assert_eq!(meta, expected_meta); + + delete(&client, egress_net).await; + assert!(rsp.next().await.is_none()); + + let mut rsp = policy_api.watch_ip(&ns, "1.1.1.1", 80).await.unwrap(); + + let policy = rsp.next().await.unwrap().unwrap(); + let meta = policy.metadata.unwrap(); + let expected_meta = meta::Metadata { + kind: Some(meta::metadata::Kind::Default("egress-fallback".to_string())), + }; + assert_eq!(meta, expected_meta); + }) + .await; +} + #[tokio::test(flavor = "current_thread")] async fn service_with_no_http_routes() { with_temp_ns(|client, ns| async move { From 1ffaa3bad2817763c6563562865e02949cadfec7 Mon Sep 17 00:00:00 2001 From: Zahari Dichev Date: Sun, 27 Oct 2024 18:31:48 +0000 Subject: [PATCH 09/11] more feedback Signed-off-by: Zahari Dichev --- policy-controller/core/src/outbound/policy.rs | 17 ----------------- policy-controller/grpc/src/outbound.rs | 10 ++-------- 2 files changed, 2 insertions(+), 25 deletions(-) diff --git a/policy-controller/core/src/outbound/policy.rs b/policy-controller/core/src/outbound/policy.rs index cb308e47987fa..41d8a72600c51 100644 --- a/policy-controller/core/src/outbound/policy.rs +++ b/policy-controller/core/src/outbound/policy.rs @@ -5,23 +5,6 @@ use super::{ use std::{net::SocketAddr, num::NonZeroU16}; -/// ResourceOutboundPolicy expresses the known resource types -/// that can be parents for outbound policy. They each come with -/// specific metadata that is used when putting together the final -/// policy response. -#[derive(Clone, Debug, PartialEq)] -pub enum ResourceOutboundPolicy { - Service { - authority: String, - policy: OutboundPolicy, - }, - Egress { - traffic_policy: TrafficPolicy, - original_dst: SocketAddr, - policy: OutboundPolicy, - }, -} - // ParentInfo carries resource-specific information about // the parent to which outbound policy is associated. #[derive(Clone, Debug, Hash, PartialEq, Eq)] diff --git a/policy-controller/grpc/src/outbound.rs b/policy-controller/grpc/src/outbound.rs index b38f6ef6395c9..cba161908b4e3 100644 --- a/policy-controller/grpc/src/outbound.rs +++ b/policy-controller/grpc/src/outbound.rs @@ -283,8 +283,6 @@ fn external_stream( fn fallback(original_dst: SocketAddr) -> outbound::OutboundPolicy { // This encoder sets deprecated timeouts for older proxies. - #![allow(deprecated)] - let metadata = Some(Metadata { kind: Some(metadata::Kind::Default("egress-fallback".to_string())), }); @@ -331,18 +329,14 @@ fn fallback(original_dst: SocketAddr) -> outbound::OutboundPolicy { outbound::http_route::distribution::FirstAvailable { backends: vec![outbound::http_route::RouteBackend { backend: Some(backend), - filters: Vec::default(), - request_timeout: None, + ..Default::default() }], }, )), }), matches: vec![api::http_route::HttpRouteMatch::default()], filters: Vec::default(), - request_timeout: None, - timeouts: None, - retry: None, - allow_l5d_request_headers: false, + ..Default::default() }], }]; From e0b58fe3271c75386f18c3ee1d01f5d461f8d8c7 Mon Sep 17 00:00:00 2001 From: Zahari Dichev Date: Sun, 27 Oct 2024 18:34:43 +0000 Subject: [PATCH 10/11] depend on proxy-api c5648ae2a1e405cc6b8aca20522356ebdf20f1ea Signed-off-by: Zahari Dichev --- Cargo.lock | 2 +- Cargo.toml | 3 ++- deny.toml | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b079b8936f2c5..41a25a03dbc71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1325,7 +1325,7 @@ dependencies = [ [[package]] name = "linkerd2-proxy-api" version = "0.14.0" -source = "git+https://github.com/linkerd/linkerd2-proxy-api?branch=zd/make-route-error-optional#5493beff7e0713e048c845fbb92777cfe1a58274" +source = "git+https://github.com/linkerd/linkerd2-proxy-api?rev=c5648ae2a1e405cc6b8aca20522356ebdf20f1ea#c5648ae2a1e405cc6b8aca20522356ebdf20f1ea" dependencies = [ "http", "ipnet", diff --git a/Cargo.toml b/Cargo.toml index 99c65e9db49ea..9ba219c0c9972 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,4 +14,5 @@ members = [ lto = "thin" [patch.crates-io] -linkerd2-proxy-api = { git = 'https://github.com/linkerd/linkerd2-proxy-api', branch = 'zd/make-route-error-optional' } \ No newline at end of file +# TODO(Zahari): switch released version once TLS protocol support is out. +linkerd2-proxy-api = { git = 'https://github.com/linkerd/linkerd2-proxy-api', rev = 'c5648ae2a1e405cc6b8aca20522356ebdf20f1ea' } \ No newline at end of file diff --git a/deny.toml b/deny.toml index e5014532f6b9b..680f9dabe2880 100644 --- a/deny.toml +++ b/deny.toml @@ -77,6 +77,7 @@ skip-tree = [ unknown-registry = "deny" unknown-git = "deny" allow-registry = ["https://github.com/rust-lang/crates.io-index"] +allow-git = ["https://github.com/linkerd/linkerd2-proxy-api"] [sources.allow-org] github = [] From 06e2d3a30f633d1f68e69e867d1d7790a6eb09db Mon Sep 17 00:00:00 2001 From: Zahari Dichev Date: Sun, 27 Oct 2024 19:15:36 +0000 Subject: [PATCH 11/11] clippy Signed-off-by: Zahari Dichev --- policy-controller/core/src/outbound.rs | 2 +- policy-controller/core/src/outbound/policy.rs | 11 +---------- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/policy-controller/core/src/outbound.rs b/policy-controller/core/src/outbound.rs index 8e41b4c43415e..dcde37fbf3ee4 100644 --- a/policy-controller/core/src/outbound.rs +++ b/policy-controller/core/src/outbound.rs @@ -14,7 +14,7 @@ mod target; type FallbackPolicy = (); pub use self::{ - policy::{OutboundPolicy, ParentInfo, ResourceOutboundPolicy}, + policy::{OutboundPolicy, ParentInfo}, target::{Kind, OutboundDiscoverTarget, ResourceTarget}, }; diff --git a/policy-controller/core/src/outbound/policy.rs b/policy-controller/core/src/outbound/policy.rs index 41d8a72600c51..37c4cbc9e7f6d 100644 --- a/policy-controller/core/src/outbound/policy.rs +++ b/policy-controller/core/src/outbound/policy.rs @@ -3,7 +3,7 @@ use super::{ RouteSet, RouteTimeouts, TcpRoute, TlsRoute, TrafficPolicy, }; -use std::{net::SocketAddr, num::NonZeroU16}; +use std::num::NonZeroU16; // ParentInfo carries resource-specific information about // the parent to which outbound policy is associated. @@ -61,12 +61,3 @@ impl OutboundPolicy { self.parent_info.namespace() } } - -impl ResourceOutboundPolicy { - pub fn policy(&self) -> &OutboundPolicy { - match self { - Self::Egress { policy, .. } => policy, - Self::Service { policy, .. } => policy, - } - } -}