1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 Facebook */
3
4/* WARNING: This implemenation is not necessarily the same
5 * as the tcp_dctcp.c.  The purpose is mainly for testing
6 * the kernel BPF logic.
7 */
8
9#include <stddef.h>
10#include <linux/bpf.h>
11#include <linux/types.h>
12#include <linux/stddef.h>
13#include <linux/tcp.h>
14#include <errno.h>
15#include <bpf/bpf_helpers.h>
16#include <bpf/bpf_tracing.h>
17#include "bpf_tcp_helpers.h"
18
19char _license[] SEC("license") = "GPL";
20
21volatile const char fallback[TCP_CA_NAME_MAX];
22const char bpf_dctcp[] = "bpf_dctcp";
23const char tcp_cdg[] = "cdg";
24char cc_res[TCP_CA_NAME_MAX];
25int tcp_cdg_res = 0;
26int stg_result = 0;
27int ebusy_cnt = 0;
28
29struct {
30	__uint(type, BPF_MAP_TYPE_SK_STORAGE);
31	__uint(map_flags, BPF_F_NO_PREALLOC);
32	__type(key, int);
33	__type(value, int);
34} sk_stg_map SEC(".maps");
35
36#define DCTCP_MAX_ALPHA	1024U
37
38struct dctcp {
39	__u32 old_delivered;
40	__u32 old_delivered_ce;
41	__u32 prior_rcv_nxt;
42	__u32 dctcp_alpha;
43	__u32 next_seq;
44	__u32 ce_state;
45	__u32 loss_cwnd;
46};
47
48static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */
49static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA;
50
51static __always_inline void dctcp_reset(const struct tcp_sock *tp,
52					struct dctcp *ca)
53{
54	ca->next_seq = tp->snd_nxt;
55
56	ca->old_delivered = tp->delivered;
57	ca->old_delivered_ce = tp->delivered_ce;
58}
59
60SEC("struct_ops/dctcp_init")
61void BPF_PROG(dctcp_init, struct sock *sk)
62{
63	const struct tcp_sock *tp = tcp_sk(sk);
64	struct dctcp *ca = inet_csk_ca(sk);
65	int *stg;
66
67	if (!(tp->ecn_flags & TCP_ECN_OK) && fallback[0]) {
68		/* Switch to fallback */
69		if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
70				   (void *)fallback, sizeof(fallback)) == -EBUSY)
71			ebusy_cnt++;
72
73		/* Switch back to myself and the recurred dctcp_init()
74		 * will get -EBUSY for all bpf_setsockopt(TCP_CONGESTION),
75		 * except the last "cdg" one.
76		 */
77		if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
78				   (void *)bpf_dctcp, sizeof(bpf_dctcp)) == -EBUSY)
79			ebusy_cnt++;
80
81		/* Switch back to fallback */
82		if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
83				   (void *)fallback, sizeof(fallback)) == -EBUSY)
84			ebusy_cnt++;
85
86		/* Expecting -ENOTSUPP for tcp_cdg_res */
87		tcp_cdg_res = bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
88					     (void *)tcp_cdg, sizeof(tcp_cdg));
89		bpf_getsockopt(sk, SOL_TCP, TCP_CONGESTION,
90			       (void *)cc_res, sizeof(cc_res));
91		return;
92	}
93
94	ca->prior_rcv_nxt = tp->rcv_nxt;
95	ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
96	ca->loss_cwnd = 0;
97	ca->ce_state = 0;
98
99	stg = bpf_sk_storage_get(&sk_stg_map, (void *)tp, NULL, 0);
100	if (stg) {
101		stg_result = *stg;
102		bpf_sk_storage_delete(&sk_stg_map, (void *)tp);
103	}
104	dctcp_reset(tp, ca);
105}
106
107SEC("struct_ops/dctcp_ssthresh")
108__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
109{
110	struct dctcp *ca = inet_csk_ca(sk);
111	struct tcp_sock *tp = tcp_sk(sk);
112
113	ca->loss_cwnd = tp->snd_cwnd;
114	return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
115}
116
117SEC("struct_ops/dctcp_update_alpha")
118void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
119{
120	const struct tcp_sock *tp = tcp_sk(sk);
121	struct dctcp *ca = inet_csk_ca(sk);
122
123	/* Expired RTT */
124	if (!before(tp->snd_una, ca->next_seq)) {
125		__u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
126		__u32 alpha = ca->dctcp_alpha;
127
128		/* alpha = (1 - g) * alpha + g * F */
129
130		alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
131		if (delivered_ce) {
132			__u32 delivered = tp->delivered - ca->old_delivered;
133
134			/* If dctcp_shift_g == 1, a 32bit value would overflow
135			 * after 8 M packets.
136			 */
137			delivered_ce <<= (10 - dctcp_shift_g);
138			delivered_ce /= max(1U, delivered);
139
140			alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
141		}
142		ca->dctcp_alpha = alpha;
143		dctcp_reset(tp, ca);
144	}
145}
146
147static __always_inline void dctcp_react_to_loss(struct sock *sk)
148{
149	struct dctcp *ca = inet_csk_ca(sk);
150	struct tcp_sock *tp = tcp_sk(sk);
151
152	ca->loss_cwnd = tp->snd_cwnd;
153	tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
154}
155
156SEC("struct_ops/dctcp_state")
157void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
158{
159	if (new_state == TCP_CA_Recovery &&
160	    new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state))
161		dctcp_react_to_loss(sk);
162	/* We handle RTO in dctcp_cwnd_event to ensure that we perform only
163	 * one loss-adjustment per RTT.
164	 */
165}
166
167static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
168{
169	struct tcp_sock *tp = tcp_sk(sk);
170
171	if (ce_state == 1)
172		tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
173	else
174		tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
175}
176
177/* Minimal DCTP CE state machine:
178 *
179 * S:	0 <- last pkt was non-CE
180 *	1 <- last pkt was CE
181 */
182static __always_inline
183void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
184			  __u32 *prior_rcv_nxt, __u32 *ce_state)
185{
186	__u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
187
188	if (*ce_state != new_ce_state) {
189		/* CE state has changed, force an immediate ACK to
190		 * reflect the new CE state. If an ACK was delayed,
191		 * send that first to reflect the prior CE state.
192		 */
193		if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
194			dctcp_ece_ack_cwr(sk, *ce_state);
195			bpf_tcp_send_ack(sk, *prior_rcv_nxt);
196		}
197		inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
198	}
199	*prior_rcv_nxt = tcp_sk(sk)->rcv_nxt;
200	*ce_state = new_ce_state;
201	dctcp_ece_ack_cwr(sk, new_ce_state);
202}
203
204SEC("struct_ops/dctcp_cwnd_event")
205void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
206{
207	struct dctcp *ca = inet_csk_ca(sk);
208
209	switch (ev) {
210	case CA_EVENT_ECN_IS_CE:
211	case CA_EVENT_ECN_NO_CE:
212		dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
213		break;
214	case CA_EVENT_LOSS:
215		dctcp_react_to_loss(sk);
216		break;
217	default:
218		/* Don't care for the rest. */
219		break;
220	}
221}
222
223SEC("struct_ops/dctcp_cwnd_undo")
224__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
225{
226	const struct dctcp *ca = inet_csk_ca(sk);
227
228	return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
229}
230
231extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
232
233SEC("struct_ops/dctcp_reno_cong_avoid")
234void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
235{
236	tcp_reno_cong_avoid(sk, ack, acked);
237}
238
239SEC(".struct_ops")
240struct tcp_congestion_ops dctcp_nouse = {
241	.init		= (void *)dctcp_init,
242	.set_state	= (void *)dctcp_state,
243	.flags		= TCP_CONG_NEEDS_ECN,
244	.name		= "bpf_dctcp_nouse",
245};
246
247SEC(".struct_ops")
248struct tcp_congestion_ops dctcp = {
249	.init		= (void *)dctcp_init,
250	.in_ack_event   = (void *)dctcp_update_alpha,
251	.cwnd_event	= (void *)dctcp_cwnd_event,
252	.ssthresh	= (void *)dctcp_ssthresh,
253	.cong_avoid	= (void *)dctcp_cong_avoid,
254	.undo_cwnd	= (void *)dctcp_cwnd_undo,
255	.set_state	= (void *)dctcp_state,
256	.flags		= TCP_CONG_NEEDS_ECN,
257	.name		= "bpf_dctcp",
258};
259