From e225351a73a665b3d91f56950040790f864e0c23 Mon Sep 17 00:00:00 2001 From: NANI <57297120+UJX6N@users.noreply.github.com> Date: Sun, 2 Jul 2023 22:06:20 +0800 Subject: [PATCH] Add files via upload --- ..._official_linux-6.4.x_src_to_bbrplus.patch | 1337 +++++++++++++++++ 1 file changed, 1337 insertions(+) create mode 100644 convert_official_linux-6.4.x_src_to_bbrplus.patch diff --git a/convert_official_linux-6.4.x_src_to_bbrplus.patch b/convert_official_linux-6.4.x_src_to_bbrplus.patch new file mode 100644 index 0000000..086580d --- /dev/null +++ b/convert_official_linux-6.4.x_src_to_bbrplus.patch @@ -0,0 +1,1337 @@ +diff -ruaN a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h +--- a/include/net/inet_connection_sock.h 2023-05-11 22:17:39.000000000 +0800 ++++ b/include/net/inet_connection_sock.h 2023-05-14 06:18:47.170341635 +0800 +@@ -135,8 +135,8 @@ + u32 icsk_probes_tstamp; + u32 icsk_user_timeout; + +- u64 icsk_ca_priv[104 / sizeof(u64)]; +-#define ICSK_CA_PRIV_SIZE sizeof_field(struct inet_connection_sock, icsk_ca_priv) ++ u64 icsk_ca_priv[112 / sizeof(u64)]; ++#define ICSK_CA_PRIV_SIZE (14 * sizeof(u64)) + }; + + #define ICSK_TIME_RETRANS 1 /* Retransmit timer */ +diff -ruaN a/include/net/tcp.h b/include/net/tcp.h +--- a/include/net/tcp.h 2023-05-11 22:17:39.000000000 +0800 ++++ b/include/net/tcp.h 2023-05-14 06:18:47.170341635 +0800 +@@ -591,6 +591,8 @@ + #endif + /* tcp_output.c */ + ++u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, ++ int min_tso_segs); + void tcp_skb_entail(struct sock *sk, struct sk_buff *skb); + void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb); + void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, +@@ -1088,6 +1090,9 @@ + /* override sysctl_tcp_min_tso_segs */ + u32 (*min_tso_segs)(struct sock *sk); + ++ /* suggest number of segments for each skb to transmit (optional) */ ++ u32 (*tso_segs_goal)(struct sock *sk); ++ + /* call when packets are delivered to update cwnd and pacing rate, + * after all the ca_state processing. (optional) + */ +diff -ruaN a/net/ipv4/Kconfig b/net/ipv4/Kconfig +--- a/net/ipv4/Kconfig 2023-05-11 22:17:39.000000000 +0800 ++++ b/net/ipv4/Kconfig 2023-05-14 06:18:47.170341635 +0800 +@@ -495,7 +495,7 @@ + + config TCP_CONG_CUBIC + tristate "CUBIC TCP" +- default y ++ default m + help + This is version 2.0 of BIC-TCP which uses a cubic growth function + among other techniques. +@@ -678,9 +678,18 @@ + AQM schemes that do not provide a delay signal. It requires the fq + ("Fair Queue") pacing packet scheduler. + ++config TCP_CONG_BBRPLUS ++ tristate "BBRPLUS TCP" ++ default y ++ help ++ ++ BBRplus is an enhanced version of BBR (Bottleneck Bandwidth and RTT). ++ Originally introduced by dog250 & cx9208. ++ Same as BBR, requires the fq ("Fair Queue") pacing packet scheduler. ++ + choice + prompt "Default TCP congestion control" +- default DEFAULT_CUBIC ++ default DEFAULT_BBRPLUS + help + Select the TCP congestion control that will be used by default + for all connections. +@@ -715,6 +724,9 @@ + config DEFAULT_BBR + bool "BBR" if TCP_CONG_BBR=y + ++ config DEFAULT_BBRPLUS ++ bool "BBRPLUS" if TCP_CONG_BBRPLUS=y ++ + config DEFAULT_RENO + bool "Reno" + endchoice +@@ -739,7 +751,8 @@ + default "dctcp" if DEFAULT_DCTCP + default "cdg" if DEFAULT_CDG + default "bbr" if DEFAULT_BBR +- default "cubic" ++ default "bbrplus" if DEFAULT_BBRPLUS ++ default "bbrplus" + + config TCP_MD5SIG + bool "TCP: MD5 Signature Option support (RFC2385)" +diff -ruaN a/net/ipv4/Makefile b/net/ipv4/Makefile +--- a/net/ipv4/Makefile 2023-05-11 22:17:39.000000000 +0800 ++++ b/net/ipv4/Makefile 2023-05-14 06:18:47.170341635 +0800 +@@ -47,6 +47,7 @@ + obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o + obj-$(CONFIG_INET_RAW_DIAG) += raw_diag.o + obj-$(CONFIG_TCP_CONG_BBR) += tcp_bbr.o ++obj-$(CONFIG_TCP_CONG_BBRPLUS) += tcp_bbrplus.o + obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o + obj-$(CONFIG_TCP_CONG_CDG) += tcp_cdg.o + obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o +diff -ruaN a/net/ipv4/tcp_bbrplus.c b/net/ipv4/tcp_bbrplus.c +--- a/net/ipv4/tcp_bbrplus.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/net/ipv4/tcp_bbrplus.c 2023-05-14 06:33:53.264897515 +0800 +@@ -0,0 +1,1214 @@ ++/* Bottleneck Bandwidth and RTT (BBR) congestion control ++ * ++ * BBR congestion control computes the sending rate based on the delivery ++ * rate (throughput) estimated from ACKs. In a nutshell: ++ * ++ * On each ACK, update our model of the network path: ++ * bottleneck_bandwidth = windowed_max(delivered / elapsed, 10 round trips) ++ * min_rtt = windowed_min(rtt, 10 seconds) ++ * pacing_rate = pacing_gain * bottleneck_bandwidth ++ * cwnd = max(cwnd_gain * bottleneck_bandwidth * min_rtt, 4) ++ * ++ * The core algorithm does not react directly to packet losses or delays, ++ * although BBR may adjust the size of next send per ACK when loss is ++ * observed, or adjust the sending rate if it estimates there is a ++ * traffic policer, in order to keep the drop rate reasonable. ++ * ++ * Here is a state transition diagram for BBR: ++ * ++ * | ++ * V ++ * +---> STARTUP ----+ ++ * | | | ++ * | V | ++ * | DRAIN ----+ ++ * | | | ++ * | V | ++ * +---> PROBE_BW ----+ ++ * | ^ | | ++ * | | | | ++ * | +----+ | ++ * | | ++ * +---- PROBE_RTT <--+ ++ * ++ * A BBR flow starts in STARTUP, and ramps up its sending rate quickly. ++ * When it estimates the pipe is full, it enters DRAIN to drain the queue. ++ * In steady state a BBR flow only uses PROBE_BW and PROBE_RTT. ++ * A long-lived BBR flow spends the vast majority of its time remaining ++ * (repeatedly) in PROBE_BW, fully probing and utilizing the pipe's bandwidth ++ * in a fair manner, with a small, bounded queue. *If* a flow has been ++ * continuously sending for the entire min_rtt window, and hasn't seen an RTT ++ * sample that matches or decreases its min_rtt estimate for 10 seconds, then ++ * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe ++ * the path's two-way propagation delay (min_rtt). When exiting PROBE_RTT, if ++ * we estimated that we reached the full bw of the pipe then we enter PROBE_BW; ++ * otherwise we enter STARTUP to try to fill the pipe. ++ * ++ * BBR is described in detail in: ++ * "BBR: Congestion-Based Congestion Control", ++ * Neal Cardwell, Yuchung Cheng, C. Stephen Gunn, Soheil Hassas Yeganeh, ++ * Van Jacobson. ACM Queue, Vol. 14 No. 5, September-October 2016. ++ * ++ * There is a public e-mail list for discussing BBR development and testing: ++ * https://groups.google.com/forum/#!forum/bbr-dev ++ * ++ * NOTE: BBR might be used with the fq qdisc ("man tc-fq") with pacing enabled, ++ * otherwise TCP stack falls back to an internal pacing using one high ++ * resolution timer per TCP socket and may use more resources. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* Scale factor for rate in pkt/uSec unit to avoid truncation in bandwidth ++ * estimation. The rate unit ~= (1500 bytes / 1 usec / 2^24) ~= 715 bps. ++ * This handles bandwidths from 0.06pps (715bps) to 256Mpps (3Tbps) in a u32. ++ * Since the minimum window is >=4 packets, the lower bound isn't ++ * an issue. The upper bound isn't an issue with existing technologies. ++ */ ++#define BW_SCALE 24 ++#define BW_UNIT (1 << BW_SCALE) ++ ++#define BBR_SCALE 8 /* scaling factor for fractions in BBR (e.g. gains) */ ++#define BBR_UNIT (1 << BBR_SCALE) ++ ++/* BBR has the following modes for deciding how fast to send: */ ++enum bbr_mode { ++ BBR_STARTUP, /* ramp up sending rate rapidly to fill pipe */ ++ BBR_DRAIN, /* drain any queue created during startup */ ++ BBR_PROBE_BW, /* discover, share bw: pace around estimated bw */ ++ BBR_PROBE_RTT, /* cut inflight to min to probe min_rtt */ ++}; ++ ++/* BBR congestion control block */ ++struct bbr { ++ u32 min_rtt_us; /* min RTT in min_rtt_win_sec window */ ++ u32 min_rtt_stamp; /* timestamp of min_rtt_us */ ++ u32 probe_rtt_done_stamp; /* end time for BBR_PROBE_RTT mode */ ++ struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */ ++ u32 rtt_cnt; /* count of packet-timed rounds elapsed */ ++ u32 next_rtt_delivered; /* scb->tx.delivered at end of round */ ++ u64 cycle_mstamp; /* time of this cycle phase start */ ++ u32 mode:3, /* current bbr_mode in state machine */ ++ prev_ca_state:3, /* CA state on previous ACK */ ++ packet_conservation:1, /* use packet conservation? */ ++ restore_cwnd:1, /* decided to revert cwnd to old value */ ++ round_start:1, /* start of packet-timed tx->ack round? */ ++ cycle_len:4, /* phases in this PROBE_BW gain cycle */ ++ tso_segs_goal:7, /* segments we want in each skb we send */ ++ idle_restart:1, /* restarting after idle? */ ++ probe_rtt_round_done:1, /* a BBR_PROBE_RTT round at 4 pkts? */ ++ unused:8, ++ lt_is_sampling:1, /* taking long-term ("LT") samples now? */ ++ lt_rtt_cnt:7, /* round trips in long-term interval */ ++ lt_use_bw:1; /* use lt_bw as our bw estimate? */ ++ u32 lt_bw; /* LT est delivery rate in pkts/uS << 24 */ ++ u32 lt_last_delivered; /* LT intvl start: tp->delivered */ ++ u32 lt_last_stamp; /* LT intvl start: tp->delivered_mstamp */ ++ u32 lt_last_lost; /* LT intvl start: tp->lost */ ++ u32 pacing_gain:10, /* current gain for setting pacing rate */ ++ cwnd_gain:10, /* current gain for setting cwnd */ ++ full_bw_reached:1, /* reached full bw in Startup? */ ++ full_bw_cnt:2, /* number of rounds without large bw gains */ ++ cycle_idx:3, /* current index in pacing_gain cycle array */ ++ has_seen_rtt:1, /* have we seen an RTT sample yet? */ ++ unused_b:5; ++ u32 prior_cwnd; /* prior cwnd upon entering loss recovery */ ++ u32 full_bw; /* recent bw, to estimate if pipe is full */ ++ /* For tracking ACK aggregation: */ ++ u64 ack_epoch_mstamp; ++ /* start of ACK sampling epoch */ ++ u16 extra_acked[2]; ++ /* max excess data ACKed in epoch */ ++ u32 ack_epoch_acked:20, /* packets (S)ACKed in sampling epoch */ ++ extra_acked_win_rtts:5, /* age of extra_acked, in round trips */ ++ extra_acked_win_idx:1, /* current index in extra_acked array */ ++ unused1:6; ++}; ++ ++#define CYCLE_LEN 8 /* number of phases in a pacing gain cycle */ ++ ++/* Window length of bw filter (in rounds): */ ++static const int bbr_bw_rtts = CYCLE_LEN + 2; ++/* Window length of min_rtt filter (in sec): */ ++static const u32 bbr_min_rtt_win_sec = 10; ++/* Minimum time (in ms) spent at bbr_cwnd_min_target in BBR_PROBE_RTT mode: */ ++static const u32 bbr_probe_rtt_mode_ms = 200; ++/* Skip TSO below the following bandwidth (bits/sec): */ ++static const int bbr_min_tso_rate = 1200000; ++ ++/* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain ++ * that will allow a smoothly increasing pacing rate that will double each RTT ++ * and send the same number of packets per RTT that an un-paced, slow-starting ++ * Reno or CUBIC flow would: ++ */ ++static const int bbr_high_gain = BBR_UNIT * 2885 / 1000 + 1; ++/* The pacing gain of 1/high_gain in BBR_DRAIN is calculated to typically drain ++ * the queue created in BBR_STARTUP in a single round: ++ */ ++static const int bbr_drain_gain = BBR_UNIT * 1000 / 2885; ++/* The gain for deriving steady-state cwnd tolerates delayed/stretched ACKs: */ ++static const int bbr_cwnd_gain = BBR_UNIT * 2; ++ ++enum bbr_pacing_gain_phase { ++ BBR_BW_PROBE_UP = 0, ++ BBR_BW_PROBE_DOWN = 1, ++ BBR_BW_PROBE_CRUISE = 2, ++}; ++ ++/* The pacing_gain values for the PROBE_BW gain cycle, to discover/share bw: */ ++static const int bbr_pacing_gain[] = { ++ BBR_UNIT * 5 / 4, /* probe for more available bw */ ++ BBR_UNIT * 3 / 4, /* drain queue and/or yield bw to other flows */ ++ BBR_UNIT, BBR_UNIT, BBR_UNIT, /* cruise at 1.0*bw to utilize pipe, */ ++ BBR_UNIT, BBR_UNIT, BBR_UNIT /* without creating excess queue... */ ++}; ++/* Randomize the starting gain cycling phase over N phases: */ ++static const u32 bbr_cycle_rand = 7; ++ ++/* Try to keep at least this many packets in flight, if things go smoothly. For ++ * smooth functioning, a sliding window protocol ACKing every other packet ++ * needs at least 4 packets in flight: ++ */ ++static const u32 bbr_cwnd_min_target = 4; ++ ++/* To estimate if BBR_STARTUP mode (i.e. high_gain) has filled pipe... */ ++/* If bw has increased significantly (1.25x), there may be more bw available: */ ++static const u32 bbr_full_bw_thresh = BBR_UNIT * 5 / 4; ++/* But after 3 rounds w/o significant bw growth, estimate pipe is full: */ ++static const u32 bbr_full_bw_cnt = 3; ++ ++/* "long-term" ("LT") bandwidth estimator parameters... */ ++/* The minimum number of rounds in an LT bw sampling interval: */ ++static const u32 bbr_lt_intvl_min_rtts = 4; ++/* If lost/delivered ratio > 20%, interval is "lossy" and we may be policed: */ ++static const u32 bbr_lt_loss_thresh = 50; ++/* If 2 intervals have a bw ratio <= 1/8, their bw is "consistent": */ ++static const u32 bbr_lt_bw_ratio = BBR_UNIT / 8; ++/* If 2 intervals have a bw diff <= 4 Kbit/sec their bw is "consistent": */ ++static const u32 bbr_lt_bw_diff = 4000 / 8; ++/* If we estimate we're policed, use lt_bw for this many round trips: */ ++static const u32 bbr_lt_bw_max_rtts = 48; ++ ++/* Gain factor for adding extra_acked to target cwnd: */ ++static const int bbr_extra_acked_gain = BBR_UNIT; ++/* Window length of extra_acked window. Max allowed val is 31. */ ++static const u32 bbr_extra_acked_win_rtts = 10; ++/* Max allowed val for ack_epoch_acked, after which sampling epoch is reset */ ++static const u32 bbr_ack_epoch_acked_reset_thresh = 1U << 20; ++/* Time period for clamping cwnd increment due to ack aggregation */ ++static const u32 bbr_extra_acked_max_us = 100 * 1000; ++ ++/* Each cycle, try to hold sub-unity gain until inflight <= BDP. */ ++static const bool bbr_drain_to_target = true; /* default: enabled */ ++ ++static bool tcp_snd_wnd_test(const struct tcp_sock *tp, ++ const struct sk_buff *skb, ++ unsigned int cur_mss) ++{ ++ u32 end_seq = TCP_SKB_CB(skb)->end_seq; ++ ++ if (skb->len > cur_mss) ++ end_seq = TCP_SKB_CB(skb)->seq + cur_mss; ++ ++ return !after(end_seq, tcp_wnd_end(tp)); ++} ++ ++/* Do we estimate that STARTUP filled the pipe? */ ++static bool bbr_full_bw_reached(const struct sock *sk) ++{ ++ const struct bbr *bbr = inet_csk_ca(sk); ++ ++ return bbr->full_bw_reached; ++} ++ ++static void bbr_set_cycle_idx(struct sock *sk, int cycle_idx) ++{ ++ struct bbr *bbr = inet_csk_ca(sk); ++ bbr->cycle_idx = cycle_idx; ++ bbr->pacing_gain = bbr->lt_use_bw ? ++ BBR_UNIT : bbr_pacing_gain[bbr->cycle_idx]; ++} ++ ++u32 bbr_max_bw(const struct sock *sk); ++u32 bbr_inflight(struct sock *sk, u32 bw, int gain); ++u32 bbr_max_bw(const struct sock *sk); ++ ++static void bbr_drain_to_target_cycling(struct sock *sk, ++ const struct rate_sample *rs) ++{ ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct bbr *bbr = inet_csk_ca(sk); ++ u32 elapsed_us = ++ tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp); ++ u32 inflight, bw; ++ if (bbr->mode != BBR_PROBE_BW) ++ return; ++ ++ /* Always need to probe for bw before we forget good bw estimate. */ ++ if (elapsed_us > bbr->cycle_len * bbr->min_rtt_us) { ++ /* Start a new PROBE_BW probing cycle of [2 to 8] x min_rtt. */ ++ bbr->cycle_mstamp = tp->delivered_mstamp; ++ bbr->cycle_len = CYCLE_LEN - get_random_u32_below(bbr_cycle_rand); ++ bbr_set_cycle_idx(sk, BBR_BW_PROBE_UP); /* probe bandwidth */ ++ return; ++ } ++ /* The pacing_gain of 1.0 paces at the estimated bw to try to fully ++ * use the pipe without increasing the queue. ++ */ ++ if (bbr->pacing_gain == BBR_UNIT) ++ return; ++ inflight = rs->prior_in_flight; /* what was in-flight before ACK? */ ++ bw = bbr_max_bw(sk); ++ /* A pacing_gain < 1.0 tries to drain extra queue we added if bw ++ * probing didn't find more bw. If inflight falls to match BDP then we ++ * estimate queue is drained; persisting would underutilize the pipe. ++ */ ++ if (bbr->pacing_gain < BBR_UNIT) { ++ if (inflight <= bbr_inflight(sk, bw, BBR_UNIT)) ++ bbr_set_cycle_idx(sk, BBR_BW_PROBE_CRUISE); /* cruise */ ++ return; ++ } ++ /* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at ++ * least pacing_gain*BDP; this may take more than min_rtt if min_rtt is ++ * small (e.g. on a LAN). We do not persist if packets are lost, since ++ * a path with small buffers may not hold that much. Similarly we exit ++ * if we were prevented by app/recv-win from reaching the target. ++ */ ++ if (elapsed_us > bbr->min_rtt_us && ++ (inflight >= bbr_inflight(sk, bw, bbr->pacing_gain) || ++ rs->losses || /* perhaps pacing_gain*BDP won't fit */ ++ rs->is_app_limited || /* previously app-limited */ ++ !tcp_send_head(sk) || /* currently app/rwin-limited */ ++ !tcp_snd_wnd_test(tp, tcp_send_head(sk), tp->mss_cache))) { ++ bbr_set_cycle_idx(sk, BBR_BW_PROBE_DOWN); /* drain queue */ ++ return; ++ } ++} ++ ++ ++/* Return maximum extra acked in past k-2k round trips, ++ * where k = bbr_extra_acked_win_rtts. ++ */ ++static u16 bbr_extra_acked(const struct sock *sk) ++{ ++ struct bbr *bbr = inet_csk_ca(sk); ++ return max(bbr->extra_acked[0], bbr->extra_acked[1]); ++} ++ ++ ++/* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */ ++u32 bbr_max_bw(const struct sock *sk) ++{ ++ struct bbr *bbr = inet_csk_ca(sk); ++ ++ return minmax_get(&bbr->bw); ++} ++ ++/* Return the estimated bandwidth of the path, in pkts/uS << BW_SCALE. */ ++static u32 bbr_bw(const struct sock *sk) ++{ ++ struct bbr *bbr = inet_csk_ca(sk); ++ ++ return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk); ++} ++ ++/* Return rate in bytes per second, optionally with a gain. ++ * The order here is chosen carefully to avoid overflow of u64. This should ++ * work for input rates of up to 2.9Tbit/sec and gain of 2.89x. ++ */ ++static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain) ++{ ++ rate *= tcp_mss_to_mtu(sk, tcp_sk(sk)->mss_cache); ++ rate *= gain; ++ rate >>= BBR_SCALE; ++ rate *= USEC_PER_SEC; ++ return rate >> BW_SCALE; ++} ++ ++/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */ ++static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain) ++{ ++ u64 rate = bw; ++ ++ rate = bbr_rate_bytes_per_sec(sk, rate, gain); ++ rate = min_t(u64, rate, sk->sk_max_pacing_rate); ++ return rate; ++} ++ ++/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */ ++static void bbr_init_pacing_rate_from_rtt(struct sock *sk) ++{ ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct bbr *bbr = inet_csk_ca(sk); ++ u64 bw; ++ u32 rtt_us; ++ ++ if (tp->srtt_us) { /* any RTT sample yet? */ ++ rtt_us = max(tp->srtt_us >> 3, 1U); ++ bbr->has_seen_rtt = 1; ++ } else { /* no RTT sample yet */ ++ rtt_us = USEC_PER_MSEC; /* use nominal default RTT */ ++ } ++ bw = (u64)tcp_snd_cwnd(tp) * BW_UNIT; ++ do_div(bw, rtt_us); ++ sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain); ++} ++ ++/* Pace using current bw estimate and a gain factor. In order to help drive the ++ * network toward lower queues while maintaining high utilization and low ++ * latency, the average pacing rate aims to be slightly (~1%) lower than the ++ * estimated bandwidth. This is an important aspect of the design. In this ++ * implementation this slightly lower pacing rate is achieved implicitly by not ++ * including link-layer headers in the packet size used for the pacing rate. ++ */ ++static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain) ++{ ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct bbr *bbr = inet_csk_ca(sk); ++ u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain); ++ ++ if (unlikely(!bbr->has_seen_rtt && tp->srtt_us)) ++ bbr_init_pacing_rate_from_rtt(sk); ++ if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate) ++ sk->sk_pacing_rate = rate; ++} ++ ++/* Return count of segments we want in the skbs we send, or 0 for default. */ ++static u32 bbr_tso_segs_goal(struct sock *sk) ++{ ++ struct bbr *bbr = inet_csk_ca(sk); ++ ++ return bbr->tso_segs_goal; ++} ++ ++static void bbr_set_tso_segs_goal(struct sock *sk) ++{ ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct bbr *bbr = inet_csk_ca(sk); ++ u32 min_segs; ++ ++ min_segs = sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2; ++ bbr->tso_segs_goal = min(tcp_tso_autosize(sk, tp->mss_cache, min_segs), ++ 0x7FU); ++} ++ ++/* Save "last known good" cwnd so we can restore it after losses or PROBE_RTT */ ++static void bbr_save_cwnd(struct sock *sk) ++{ ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct bbr *bbr = inet_csk_ca(sk); ++ ++ if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT) ++ bbr->prior_cwnd = tcp_snd_cwnd(tp); /* this cwnd is good enough */ ++ else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */ ++ bbr->prior_cwnd = max(bbr->prior_cwnd, tcp_snd_cwnd(tp)); ++} ++ ++__bpf_kfunc static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) ++{ ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct bbr *bbr = inet_csk_ca(sk); ++ ++ if (event == CA_EVENT_TX_START && tp->app_limited) { ++ bbr->idle_restart = 1; ++ bbr->ack_epoch_mstamp = tp->tcp_mstamp; ++ bbr->ack_epoch_acked = 0; ++ ++ /* Avoid pointless buffer overflows: pace at est. bw if we don't ++ * need more speed (we're restarting from idle and app-limited). ++ */ ++ if (bbr->mode == BBR_PROBE_BW) ++ bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT); ++ } ++} ++ ++/* Find target cwnd. Right-size the cwnd based on min RTT and the ++ * estimated bottleneck bandwidth: ++ * ++ * cwnd = bw * min_rtt * gain = BDP * gain ++ * ++ * The key factor, gain, controls the amount of queue. While a small gain ++ * builds a smaller queue, it becomes more vulnerable to noise in RTT ++ * measurements (e.g., delayed ACKs or other ACK compression effects). This ++ * noise may cause BBR to under-estimate the rate. ++ * ++ * To achieve full performance in high-speed paths, we budget enough cwnd to ++ * fit full-sized skbs in-flight on both end hosts to fully utilize the path: ++ * - one skb in sending host Qdisc, ++ * - one skb in sending host TSO/GSO engine ++ * - one skb being received by receiver host LRO/GRO/delayed-ACK engine ++ * Don't worry, at low rates (bbr_min_tso_rate) this won't bloat cwnd because ++ * in such cases tso_segs_goal is 1. The minimum cwnd is 4 packets, ++ * which allows 2 outstanding 2-packet sequences, to try to keep pipe ++ * full even with ACK-every-other-packet delayed ACKs. ++ */ ++static u32 bbr_bdp(struct sock *sk, u32 bw, int gain) ++{ ++ struct bbr *bbr = inet_csk_ca(sk); ++ u32 bdp; ++ u64 w; ++ ++ /* If we've never had a valid RTT sample, cap cwnd at the initial ++ * default. This should only happen when the connection is not using TCP ++ * timestamps and has retransmitted all of the SYN/SYNACK/data packets ++ * ACKed so far. In this case, an RTO can cut cwnd to 1, in which ++ * case we need to slow-start up toward something safe: TCP_INIT_CWND. ++ */ ++ if (unlikely(bbr->min_rtt_us == ~0U)) /* no valid RTT samples yet? */ ++ return TCP_INIT_CWND; /* be safe: cap at default initial cwnd*/ ++ ++ w = (u64)bw * bbr->min_rtt_us; ++ ++ /* Apply a gain to the given value, then remove the BW_SCALE shift. */ ++ bdp = (((w * gain) >> BBR_SCALE) + BW_UNIT - 1) / BW_UNIT; ++ ++ return bdp; ++} ++ ++static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd, int gain) ++{ ++ ++ /* Allow enough full-sized skbs in flight to utilize end systems. */ ++ cwnd += 3 * bbr_tso_segs_goal(sk); ++ ++ return cwnd; ++} ++ ++/* Find inflight based on min RTT and the estimated bottleneck bandwidth. */ ++u32 bbr_inflight(struct sock *sk, u32 bw, int gain) ++{ ++ u32 inflight; ++ inflight = bbr_bdp(sk, bw, gain); ++ inflight = bbr_quantization_budget(sk, inflight, gain); ++ return inflight; ++ ++} ++ ++/* Find the cwnd increment based on estimate of ack aggregation */ ++static u32 bbr_ack_aggregation_cwnd(struct sock *sk) ++{ ++ u32 max_aggr_cwnd, aggr_cwnd = 0; ++ if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) { ++ max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us) ++ / BW_UNIT; ++ aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk)) ++ >> BBR_SCALE; ++ aggr_cwnd = min(aggr_cwnd, max_aggr_cwnd); ++ } ++ return aggr_cwnd; ++} ++ ++ ++/* An optimization in BBR to reduce losses: On the first round of recovery, we ++ * follow the packet conservation principle: send P packets per P packets acked. ++ * After that, we slow-start and send at most 2*P packets per P packets acked. ++ * After recovery finishes, or upon undo, we restore the cwnd we had when ++ * recovery started (capped by the target cwnd based on estimated BDP). ++ * ++ * TODO(ycheng/ncardwell): implement a rate-based approach. ++ */ ++static bool bbr_set_cwnd_to_recover_or_restore( ++ struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd) ++{ ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct bbr *bbr = inet_csk_ca(sk); ++ u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state; ++ u32 cwnd = tcp_snd_cwnd(tp); ++ ++ /* An ACK for P pkts should release at most 2*P packets. We do this ++ * in two steps. First, here we deduct the number of lost packets. ++ * Then, in bbr_set_cwnd() we slow start up toward the target cwnd. ++ */ ++ if (rs->losses > 0) ++ cwnd = max_t(s32, cwnd - rs->losses, 1); ++ ++ if (state == TCP_CA_Recovery && prev_state != TCP_CA_Recovery) { ++ /* Starting 1st round of Recovery, so do packet conservation. */ ++ bbr->packet_conservation = 1; ++ bbr->next_rtt_delivered = tp->delivered; /* start round now */ ++ /* Cut unused cwnd from app behavior, TSQ, or TSO deferral: */ ++ cwnd = tcp_packets_in_flight(tp) + acked; ++ } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) { ++ /* Exiting loss recovery; restore cwnd saved before recovery. */ ++ bbr->restore_cwnd = 1; ++ bbr->packet_conservation = 0; ++ } ++ bbr->prev_ca_state = state; ++ ++ if (bbr->restore_cwnd) { ++ /* Restore cwnd after exiting loss recovery or PROBE_RTT. */ ++ cwnd = max(cwnd, bbr->prior_cwnd); ++ bbr->restore_cwnd = 0; ++ } ++ ++ if (bbr->packet_conservation) { ++ *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); ++ return true; /* yes, using packet conservation */ ++ } ++ *new_cwnd = cwnd; ++ return false; ++} ++ ++/* Slow-start up toward target cwnd (if bw estimate is growing, or packet loss ++ * has drawn us down below target), or snap down to target if we're above it. ++ */ ++static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs, ++ u32 acked, u32 bw, int gain) ++{ ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct bbr *bbr = inet_csk_ca(sk); ++ u32 cwnd = 0, target_cwnd = 0; ++ ++ if (!acked) ++ return; ++ ++ if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) ++ goto done; ++ ++ /* If we're below target cwnd, slow start cwnd toward target cwnd. */ ++ target_cwnd = bbr_bdp(sk, bw, gain); ++ //// ++ /* Increment the cwnd to account for excess ACKed data that seems ++ * due to aggregation (of data and/or ACKs) visible in the ACK stream. ++ */ ++ target_cwnd += bbr_ack_aggregation_cwnd(sk); ++ //// ++ target_cwnd = bbr_quantization_budget(sk, target_cwnd, gain); ++ if (bbr_full_bw_reached(sk)) /* only cut cwnd if we filled the pipe */ ++ cwnd = min(cwnd + acked, target_cwnd); ++ else if (cwnd < target_cwnd || tp->delivered < TCP_INIT_CWND) ++ cwnd = cwnd + acked; ++ cwnd = max(cwnd, bbr_cwnd_min_target); ++ ++done: ++ tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp)); /* apply global cap */ ++ if (bbr->mode == BBR_PROBE_RTT) /* drain queue, refresh min_rtt */ ++ tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), bbr_cwnd_min_target)); ++} ++ ++/* End cycle phase if it's time and/or we hit the phase's in-flight target. */ ++static bool bbr_is_next_cycle_phase(struct sock *sk, ++ const struct rate_sample *rs) ++{ ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct bbr *bbr = inet_csk_ca(sk); ++ bool is_full_length = ++ tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) > ++ bbr->min_rtt_us; ++ u32 inflight, bw; ++ ++ /* The pacing_gain of 1.0 paces at the estimated bw to try to fully ++ * use the pipe without increasing the queue. ++ */ ++ if (bbr->pacing_gain == BBR_UNIT) ++ return is_full_length; /* just use wall clock time */ ++ ++ inflight = rs->prior_in_flight; /* what was in-flight before ACK? */ ++ bw = bbr_max_bw(sk); ++ ++ /* A pacing_gain > 1.0 probes for bw by trying to raise inflight to at ++ * least pacing_gain*BDP; this may take more than min_rtt if min_rtt is ++ * small (e.g. on a LAN). We do not persist if packets are lost, since ++ * a path with small buffers may not hold that much. ++ */ ++ if (bbr->pacing_gain > BBR_UNIT) ++ return is_full_length && ++ (rs->losses || /* perhaps pacing_gain*BDP won't fit */ ++ inflight >= bbr_inflight(sk, bw, bbr->pacing_gain)); ++ ++ /* A pacing_gain < 1.0 tries to drain extra queue we added if bw ++ * probing didn't find more bw. If inflight falls to match BDP then we ++ * estimate queue is drained; persisting would underutilize the pipe. ++ */ ++ return is_full_length || ++ inflight <= bbr_inflight(sk, bw, BBR_UNIT); ++} ++ ++static void bbr_advance_cycle_phase(struct sock *sk) ++{ ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct bbr *bbr = inet_csk_ca(sk); ++ ++ ++ bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1); ++ bbr->cycle_mstamp = tp->delivered_mstamp; ++ bbr->pacing_gain = bbr->lt_use_bw ? BBR_UNIT : ++ bbr_pacing_gain[bbr->cycle_idx]; ++} ++ ++/* Gain cycling: cycle pacing gain to converge to fair share of available bw. */ ++static void bbr_update_cycle_phase(struct sock *sk, ++ const struct rate_sample *rs) ++{ ++ struct bbr *bbr = inet_csk_ca(sk); ++ ++ if (bbr_drain_to_target) { ++ bbr_drain_to_target_cycling(sk, rs); ++ return; ++ } ++ ++ if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs)) ++ bbr_advance_cycle_phase(sk); ++} ++ ++static void bbr_reset_startup_mode(struct sock *sk) ++{ ++ struct bbr *bbr = inet_csk_ca(sk); ++ ++ bbr->mode = BBR_STARTUP; ++ bbr->pacing_gain = bbr_high_gain; ++ bbr->cwnd_gain = bbr_high_gain; ++} ++ ++static void bbr_reset_probe_bw_mode(struct sock *sk) ++{ ++ struct bbr *bbr = inet_csk_ca(sk); ++ ++ bbr->mode = BBR_PROBE_BW; ++ bbr->pacing_gain = BBR_UNIT; ++ bbr->cwnd_gain = bbr_cwnd_gain; ++ bbr->cycle_idx = CYCLE_LEN - 1 - get_random_u32_below(bbr_cycle_rand); ++ bbr_advance_cycle_phase(sk); /* flip to next phase of gain cycle */ ++} ++ ++static void bbr_reset_mode(struct sock *sk) ++{ ++ if (!bbr_full_bw_reached(sk)) ++ bbr_reset_startup_mode(sk); ++ else ++ bbr_reset_probe_bw_mode(sk); ++} ++ ++/* Start a new long-term sampling interval. */ ++static void bbr_reset_lt_bw_sampling_interval(struct sock *sk) ++{ ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct bbr *bbr = inet_csk_ca(sk); ++ ++ bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC); ++ bbr->lt_last_delivered = tp->delivered; ++ bbr->lt_last_lost = tp->lost; ++ bbr->lt_rtt_cnt = 0; ++} ++ ++/* Completely reset long-term bandwidth sampling. */ ++static void bbr_reset_lt_bw_sampling(struct sock *sk) ++{ ++ struct bbr *bbr = inet_csk_ca(sk); ++ ++ bbr->lt_bw = 0; ++ bbr->lt_use_bw = 0; ++ bbr->lt_is_sampling = false; ++ bbr_reset_lt_bw_sampling_interval(sk); ++} ++ ++/* Long-term bw sampling interval is done. Estimate whether we're policed. */ ++static void bbr_lt_bw_interval_done(struct sock *sk, u32 bw) ++{ ++ struct bbr *bbr = inet_csk_ca(sk); ++ u32 diff; ++ ++ if (bbr->lt_bw) { /* do we have bw from a previous interval? */ ++ /* Is new bw close to the lt_bw from the previous interval? */ ++ diff = abs(bw - bbr->lt_bw); ++ if ((diff * BBR_UNIT <= bbr_lt_bw_ratio * bbr->lt_bw) || ++ (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <= ++ bbr_lt_bw_diff)) { ++ /* All criteria are met; estimate we're policed. */ ++ bbr->lt_bw = (bw + bbr->lt_bw) >> 1; /* avg 2 intvls */ ++ bbr->lt_use_bw = 1; ++ bbr->pacing_gain = BBR_UNIT; /* try to avoid drops */ ++ bbr->lt_rtt_cnt = 0; ++ return; ++ } ++ } ++ bbr->lt_bw = bw; ++ bbr_reset_lt_bw_sampling_interval(sk); ++} ++ ++/* Token-bucket traffic policers are common (see "An Internet-Wide Analysis of ++ * Traffic Policing", SIGCOMM 2016). BBR detects token-bucket policers and ++ * explicitly models their policed rate, to reduce unnecessary losses. We ++ * estimate that we're policed if we see 2 consecutive sampling intervals with ++ * consistent throughput and high packet loss. If we think we're being policed, ++ * set lt_bw to the "long-term" average delivery rate from those 2 intervals. ++ */ ++static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs) ++{ ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct bbr *bbr = inet_csk_ca(sk); ++ u32 lost, delivered; ++ u64 bw; ++ u32 t; ++ ++ if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */ ++ if (bbr->mode == BBR_PROBE_BW && bbr->round_start && ++ ++bbr->lt_rtt_cnt >= bbr_lt_bw_max_rtts) { ++ bbr_reset_lt_bw_sampling(sk); /* stop using lt_bw */ ++ bbr_reset_probe_bw_mode(sk); /* restart gain cycling */ ++ } ++ return; ++ } ++ ++ /* Wait for the first loss before sampling, to let the policer exhaust ++ * its tokens and estimate the steady-state rate allowed by the policer. ++ * Starting samples earlier includes bursts that over-estimate the bw. ++ */ ++ if (!bbr->lt_is_sampling) { ++ if (!rs->losses) ++ return; ++ bbr_reset_lt_bw_sampling_interval(sk); ++ bbr->lt_is_sampling = true; ++ } ++ ++ /* To avoid underestimates, reset sampling if we run out of data. */ ++ if (rs->is_app_limited) { ++ bbr_reset_lt_bw_sampling(sk); ++ return; ++ } ++ ++ if (bbr->round_start) ++ bbr->lt_rtt_cnt++; /* count round trips in this interval */ ++ if (bbr->lt_rtt_cnt < bbr_lt_intvl_min_rtts) ++ return; /* sampling interval needs to be longer */ ++ if (bbr->lt_rtt_cnt > 4 * bbr_lt_intvl_min_rtts) { ++ bbr_reset_lt_bw_sampling(sk); /* interval is too long */ ++ return; ++ } ++ ++ /* End sampling interval when a packet is lost, so we estimate the ++ * policer tokens were exhausted. Stopping the sampling before the ++ * tokens are exhausted under-estimates the policed rate. ++ */ ++ if (!rs->losses) ++ return; ++ ++ /* Calculate packets lost and delivered in sampling interval. */ ++ lost = tp->lost - bbr->lt_last_lost; ++ delivered = tp->delivered - bbr->lt_last_delivered; ++ /* Is loss rate (lost/delivered) >= lt_loss_thresh? If not, wait. */ ++ if (!delivered || (lost << BBR_SCALE) < bbr_lt_loss_thresh * delivered) ++ return; ++ ++ /* Find average delivery rate in this sampling interval. */ ++ t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp; ++ if ((s32)t < 1) ++ return; /* interval is less than one ms, so wait */ ++ /* Check if can multiply without overflow */ ++ if (t >= ~0U / USEC_PER_MSEC) { ++ bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */ ++ return; ++ } ++ t *= USEC_PER_MSEC; ++ bw = (u64)delivered * BW_UNIT; ++ do_div(bw, t); ++ bbr_lt_bw_interval_done(sk, bw); ++} ++ ++/* Estimate the bandwidth based on how fast packets are delivered */ ++static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs) ++{ ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct bbr *bbr = inet_csk_ca(sk); ++ u64 bw; ++ ++ bbr->round_start = 0; ++ if (rs->delivered < 0 || rs->interval_us <= 0) ++ return; /* Not a valid observation */ ++ ++ /* See if we've reached the next RTT */ ++ if (!before(rs->prior_delivered, bbr->next_rtt_delivered)) { ++ bbr->next_rtt_delivered = tp->delivered; ++ bbr->rtt_cnt++; ++ bbr->round_start = 1; ++ bbr->packet_conservation = 0; ++ } ++ ++ bbr_lt_bw_sampling(sk, rs); ++ ++ /* Divide delivered by the interval to find a (lower bound) bottleneck ++ * bandwidth sample. Delivered is in packets and interval_us in uS and ++ * ratio will be <<1 for most connections. So delivered is first scaled. ++ */ ++ bw = div64_long((u64)rs->delivered * BW_UNIT, rs->interval_us); ++ ++ /* If this sample is application-limited, it is likely to have a very ++ * low delivered count that represents application behavior rather than ++ * the available network rate. Such a sample could drag down estimated ++ * bw, causing needless slow-down. Thus, to continue to send at the ++ * last measured network rate, we filter out app-limited samples unless ++ * they describe the path bw at least as well as our bw model. ++ * ++ * So the goal during app-limited phase is to proceed with the best ++ * network rate no matter how long. We automatically leave this ++ * phase when app writes faster than the network can deliver :) ++ */ ++ if (!rs->is_app_limited || bw >= bbr_max_bw(sk)) { ++ /* Incorporate new sample into our max bw filter. */ ++ minmax_running_max(&bbr->bw, bbr_bw_rtts, bbr->rtt_cnt, bw); ++ } ++} ++ ++/* Estimate when the pipe is full, using the change in delivery rate: BBR ++ * estimates that STARTUP filled the pipe if the estimated bw hasn't changed by ++ * at least bbr_full_bw_thresh (25%) after bbr_full_bw_cnt (3) non-app-limited ++ * rounds. Why 3 rounds: 1: rwin autotuning grows the rwin, 2: we fill the ++ * higher rwin, 3: we get higher delivery rate samples. Or transient ++ * cross-traffic or radio noise can go away. CUBIC Hystart shares a similar ++ * design goal, but uses delay and inter-ACK spacing instead of bandwidth. ++ */ ++static void bbr_check_full_bw_reached(struct sock *sk, ++ const struct rate_sample *rs) ++{ ++ struct bbr *bbr = inet_csk_ca(sk); ++ u32 bw_thresh; ++ ++ if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited) ++ return; ++ ++ bw_thresh = (u64)bbr->full_bw * bbr_full_bw_thresh >> BBR_SCALE; ++ if (bbr_max_bw(sk) >= bw_thresh) { ++ bbr->full_bw = bbr_max_bw(sk); ++ bbr->full_bw_cnt = 0; ++ return; ++ } ++ ++bbr->full_bw_cnt; ++ bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt; ++} ++ ++/* If pipe is probably full, drain the queue and then enter steady-state. */ ++static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs) ++{ ++ struct bbr *bbr = inet_csk_ca(sk); ++ ++ if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) { ++ bbr->mode = BBR_DRAIN; /* drain queue we created */ ++ bbr->pacing_gain = bbr_drain_gain; /* pace slow to drain */ ++ bbr->cwnd_gain = bbr_high_gain; /* maintain cwnd */ ++ } /* fall through to check if in-flight is already small: */ ++ if (bbr->mode == BBR_DRAIN && ++ tcp_packets_in_flight(tcp_sk(sk)) <= bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT)) ++ bbr_reset_probe_bw_mode(sk); /* we estimate queue is drained */ ++} ++ ++ ++/* Estimates the windowed max degree of ack aggregation. ++ * This is used to provision extra in-flight data to keep sending during ++ * inter-ACK silences. ++ * ++ * Degree of ack aggregation is estimated as extra data acked beyond expected. ++ * ++ * max_extra_acked = "maximum recent excess data ACKed beyond max_bw * interval" ++ * cwnd += max_extra_acked ++ * ++ * Max extra_acked is clamped by cwnd and bw * bbr_extra_acked_max_us (100 ms). ++ * Max filter is an approximate sliding window of 10-20 (packet timed) round ++ * trips. ++ */ ++ static void bbr_update_ack_aggregation(struct sock *sk, ++ const struct rate_sample *rs) ++ { ++ u32 epoch_us, expected_acked, extra_acked; ++ struct bbr *bbr = inet_csk_ca(sk); ++ struct tcp_sock *tp = tcp_sk(sk); ++ if (!bbr_extra_acked_gain || rs->acked_sacked <= 0 || ++ rs->delivered < 0 || rs->interval_us <= 0) ++ return; ++ if (bbr->round_start) { ++ bbr->extra_acked_win_rtts = min(0x1F, ++ bbr->extra_acked_win_rtts + 1); ++ if (bbr->extra_acked_win_rtts >= bbr_extra_acked_win_rtts) { ++ bbr->extra_acked_win_rtts = 0; ++ bbr->extra_acked_win_idx = bbr->extra_acked_win_idx ?0 : 1; ++ bbr->extra_acked[bbr->extra_acked_win_idx] = 0; ++ } ++ } ++ /* Compute how many packets we expected to be delivered over epoch. */ ++ epoch_us = tcp_stamp_us_delta(tp->delivered_mstamp, ++ bbr->ack_epoch_mstamp); ++ expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT; ++ /* Reset the aggregation epoch if ACK rate is below expected rate or ++ * significantly large no. of ack received since epoch (potentially ++ * quite old epoch). ++ */ ++ if (bbr->ack_epoch_acked <= expected_acked || ++ (bbr->ack_epoch_acked + rs->acked_sacked >= ++ bbr_ack_epoch_acked_reset_thresh)) { ++ bbr->ack_epoch_acked = 0; ++ bbr->ack_epoch_mstamp = tp->delivered_mstamp; ++ expected_acked = 0; ++ } ++ /* Compute excess data delivered, beyond what was expected. */ ++ bbr->ack_epoch_acked = min(0xFFFFFU, ++ bbr->ack_epoch_acked + rs->acked_sacked); ++ extra_acked = bbr->ack_epoch_acked - expected_acked; ++ extra_acked = min(extra_acked, tcp_snd_cwnd(tp)); ++ if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx]) ++ bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked; ++} ++ ++/* The goal of PROBE_RTT mode is to have BBR flows cooperatively and ++ * periodically drain the bottleneck queue, to converge to measure the true ++ * min_rtt (unloaded propagation delay). This allows the flows to keep queues ++ * small (reducing queuing delay and packet loss) and achieve fairness among ++ * BBR flows. ++ * ++ * The min_rtt filter window is 10 seconds. When the min_rtt estimate expires, ++ * we enter PROBE_RTT mode and cap the cwnd at bbr_cwnd_min_target=4 packets. ++ * After at least bbr_probe_rtt_mode_ms=200ms and at least one packet-timed ++ * round trip elapsed with that flight size <= 4, we leave PROBE_RTT mode and ++ * re-enter the previous mode. BBR uses 200ms to approximately bound the ++ * performance penalty of PROBE_RTT's cwnd capping to roughly 2% (200ms/10s). ++ * ++ * Note that flows need only pay 2% if they are busy sending over the last 10 ++ * seconds. Interactive applications (e.g., Web, RPCs, video chunks) often have ++ * natural silences or low-rate periods within 10 seconds where the rate is low ++ * enough for long enough to drain its queue in the bottleneck. We pick up ++ * these min RTT measurements opportunistically with our min_rtt filter. :-) ++ */ ++ ++static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) ++{ ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct bbr *bbr = inet_csk_ca(sk); ++ bool filter_expired; ++ ++ /* Track min RTT seen in the min_rtt_win_sec filter window: */ ++ filter_expired = after(tcp_jiffies32, ++ bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ); ++ if (rs->rtt_us >= 0 && ++ (rs->rtt_us < bbr->min_rtt_us || filter_expired)) { ++ bbr->min_rtt_us = rs->rtt_us; ++ bbr->min_rtt_stamp = tcp_jiffies32; ++ } ++ ++ if (bbr_probe_rtt_mode_ms > 0 && filter_expired && ++ !bbr->idle_restart && bbr->mode != BBR_PROBE_RTT) { ++ bbr->mode = BBR_PROBE_RTT; /* dip, drain queue */ ++ bbr->pacing_gain = BBR_UNIT; ++ bbr->cwnd_gain = BBR_UNIT; ++ bbr_save_cwnd(sk); /* note cwnd so we can restore it */ ++ bbr->probe_rtt_done_stamp = 0; ++ } ++ ++ if (bbr->mode == BBR_PROBE_RTT) { ++ /* Ignore low rate samples during this mode. */ ++ tp->app_limited = ++ (tp->delivered + tcp_packets_in_flight(tp)) ? : 1; ++ /* Maintain min packets in flight for max(200 ms, 1 round). */ ++ if (!bbr->probe_rtt_done_stamp && ++ tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) { ++ bbr->probe_rtt_done_stamp = tcp_jiffies32 + ++ msecs_to_jiffies(bbr_probe_rtt_mode_ms); ++ bbr->probe_rtt_round_done = 0; ++ bbr->next_rtt_delivered = tp->delivered; ++ } else if (bbr->probe_rtt_done_stamp) { ++ if (bbr->round_start) ++ bbr->probe_rtt_round_done = 1; ++ if (bbr->probe_rtt_round_done && ++ after(tcp_jiffies32, bbr->probe_rtt_done_stamp)) { ++ bbr->min_rtt_stamp = tcp_jiffies32; ++ bbr->restore_cwnd = 1; /* snap to prior_cwnd */ ++ bbr_reset_mode(sk); ++ } ++ } ++ } ++ /* Restart after idle ends only once we process a new S/ACK for data */ ++ if (rs->delivered > 0) ++ bbr->idle_restart = 0; ++} ++ ++static void bbr_update_model(struct sock *sk, const struct rate_sample *rs) ++{ ++ bbr_update_bw(sk, rs); ++ bbr_update_ack_aggregation(sk, rs); ++ bbr_update_cycle_phase(sk, rs); ++ bbr_check_full_bw_reached(sk, rs); ++ bbr_check_drain(sk, rs); ++ bbr_update_min_rtt(sk, rs); ++} ++ ++__bpf_kfunc static void bbr_main(struct sock *sk, const struct rate_sample *rs) ++{ ++ struct bbr *bbr = inet_csk_ca(sk); ++ u32 bw; ++ ++ bbr_update_model(sk, rs); ++ ++ bw = bbr_bw(sk); ++ bbr_set_pacing_rate(sk, bw, bbr->pacing_gain); ++ bbr_set_tso_segs_goal(sk); ++ bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain); ++} ++ ++__bpf_kfunc static void bbr_init(struct sock *sk) ++{ ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct bbr *bbr = inet_csk_ca(sk); ++ ++ bbr->prior_cwnd = 0; ++ bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */ ++ bbr->rtt_cnt = 0; ++ bbr->next_rtt_delivered = tp->delivered; ++ bbr->prev_ca_state = TCP_CA_Open; ++ bbr->packet_conservation = 0; ++ ++ bbr->probe_rtt_done_stamp = 0; ++ bbr->probe_rtt_round_done = 0; ++ bbr->min_rtt_us = tcp_min_rtt(tp); ++ bbr->min_rtt_stamp = tcp_jiffies32; ++ ++ minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */ ++ ++ bbr->has_seen_rtt = 0; ++ bbr_init_pacing_rate_from_rtt(sk); ++ ++ bbr->restore_cwnd = 0; ++ bbr->round_start = 0; ++ bbr->idle_restart = 0; ++ bbr->full_bw_reached = 0; ++ bbr->full_bw = 0; ++ bbr->full_bw_cnt = 0; ++ bbr->cycle_mstamp = 0; ++ bbr->cycle_idx = 0; ++ bbr->cycle_len = 0; ++ bbr_reset_lt_bw_sampling(sk); ++ bbr_reset_startup_mode(sk); ++ bbr->ack_epoch_mstamp = tp->tcp_mstamp; ++ bbr->ack_epoch_acked = 0; ++ bbr->extra_acked_win_rtts = 0; ++ bbr->extra_acked_win_idx = 0; ++ bbr->extra_acked[0] = 0; ++ bbr->extra_acked[1] = 0; ++ ++ cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED); ++} ++ ++__bpf_kfunc static u32 bbr_sndbuf_expand(struct sock *sk) ++{ ++ /* Provision 3 * cwnd since BBR may slow-start even during recovery. */ ++ return 3; ++} ++ ++/* In theory BBR does not need to undo the cwnd since it does not ++ * always reduce cwnd on losses (see bbr_main()). Keep it for now. ++ */ ++__bpf_kfunc static u32 bbr_undo_cwnd(struct sock *sk) ++{ ++ struct bbr *bbr = inet_csk_ca(sk); ++ ++ bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */ ++ bbr->full_bw_cnt = 0; ++ bbr_reset_lt_bw_sampling(sk); ++ return tcp_snd_cwnd(tcp_sk(sk)); ++} ++ ++/* Entering loss recovery, so save cwnd for when we exit or undo recovery. */ ++__bpf_kfunc static u32 bbr_ssthresh(struct sock *sk) ++{ ++ bbr_save_cwnd(sk); ++ return TCP_INFINITE_SSTHRESH; /* BBR does not use ssthresh */ ++} ++ ++static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr, ++ union tcp_cc_info *info) ++{ ++ if (ext & (1 << (INET_DIAG_BBRINFO - 1)) || ++ ext & (1 << (INET_DIAG_VEGASINFO - 1))) { ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct bbr *bbr = inet_csk_ca(sk); ++ u64 bw = bbr_bw(sk); ++ ++ bw = bw * tp->mss_cache * USEC_PER_SEC >> BW_SCALE; ++ memset(&info->bbr, 0, sizeof(info->bbr)); ++ info->bbr.bbr_bw_lo = (u32)bw; ++ info->bbr.bbr_bw_hi = (u32)(bw >> 32); ++ info->bbr.bbr_min_rtt = bbr->min_rtt_us; ++ info->bbr.bbr_pacing_gain = bbr->pacing_gain; ++ info->bbr.bbr_cwnd_gain = bbr->cwnd_gain; ++ *attr = INET_DIAG_BBRINFO; ++ return sizeof(info->bbr); ++ } ++ return 0; ++} ++ ++__bpf_kfunc static void bbr_set_state(struct sock *sk, u8 new_state) ++{ ++ struct bbr *bbr = inet_csk_ca(sk); ++ ++ if (new_state == TCP_CA_Loss) { ++ struct rate_sample rs = { .losses = 1 }; ++ ++ bbr->prev_ca_state = TCP_CA_Loss; ++ bbr->full_bw = 0; ++ bbr->round_start = 1; /* treat RTO like end of a round */ ++ bbr_lt_bw_sampling(sk, &rs); ++ } ++} ++ ++static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = { ++ .flags = TCP_CONG_NON_RESTRICTED, ++ .name = "bbrplus", ++ .owner = THIS_MODULE, ++ .init = bbr_init, ++ .cong_control = bbr_main, ++ .sndbuf_expand = bbr_sndbuf_expand, ++ .undo_cwnd = bbr_undo_cwnd, ++ .cwnd_event = bbr_cwnd_event, ++ .ssthresh = bbr_ssthresh, ++ .tso_segs_goal = bbr_tso_segs_goal, ++ .get_info = bbr_get_info, ++ .set_state = bbr_set_state, ++}; ++ ++BTF_SET8_START(tcp_bbr_check_kfunc_ids) ++#ifdef CONFIG_X86 ++#ifdef CONFIG_DYNAMIC_FTRACE ++BTF_ID_FLAGS(func, bbr_init) ++BTF_ID_FLAGS(func, bbr_main) ++BTF_ID_FLAGS(func, bbr_sndbuf_expand) ++BTF_ID_FLAGS(func, bbr_undo_cwnd) ++BTF_ID_FLAGS(func, bbr_cwnd_event) ++BTF_ID_FLAGS(func, bbr_ssthresh) ++BTF_ID_FLAGS(func, bbr_set_state) ++#endif ++#endif ++BTF_SET8_END(tcp_bbr_check_kfunc_ids) ++ ++static const struct btf_kfunc_id_set tcp_bbr_kfunc_set = { ++ .owner = THIS_MODULE, ++ .set = &tcp_bbr_check_kfunc_ids, ++}; ++ ++static int __init bbr_register(void) ++{ ++ int ret; ++ ++ BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE); ++ ++ ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &tcp_bbr_kfunc_set); ++ if (ret < 0) ++ return ret; ++ return tcp_register_congestion_control(&tcp_bbr_cong_ops); ++} ++ ++static void __exit bbr_unregister(void) ++{ ++ tcp_unregister_congestion_control(&tcp_bbr_cong_ops); ++} ++ ++module_init(bbr_register); ++module_exit(bbr_unregister); ++ ++MODULE_AUTHOR("Van Jacobson "); ++MODULE_AUTHOR("Neal Cardwell "); ++MODULE_AUTHOR("Yuchung Cheng "); ++MODULE_AUTHOR("Soheil Hassas Yeganeh "); ++MODULE_LICENSE("Dual BSD/GPL"); ++MODULE_DESCRIPTION("TCP BBR (Bottleneck Bandwidth and RTT)"); +diff -ruaN a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +--- a/net/ipv4/tcp_output.c 2023-05-11 22:17:39.000000000 +0800 ++++ b/net/ipv4/tcp_output.c 2023-05-14 06:18:47.170341635 +0800 +@@ -1967,7 +1967,7 @@ + * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance + * is below 1500 bytes after 6 * ~500 usec = 3ms. + */ +-static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, ++u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, + int min_tso_segs) + { + unsigned long bytes; +@@ -1983,6 +1983,7 @@ + + return max_t(u32, bytes / mss_now, min_tso_segs); + } ++EXPORT_SYMBOL(tcp_tso_autosize); + + /* Return the number of segments we want in the skb we are transmitting. + * See if congestion control module wants to decide; otherwise, autosize.