1
2
3
4
5
6
7
8
9#include <stddef.h>
10#include <linux/bpf.h>
11#include <linux/types.h>
12#include <linux/stddef.h>
13#include <linux/tcp.h>
14#include <bpf/bpf_helpers.h>
15#include <bpf/bpf_tracing.h>
16#include "bpf_tcp_helpers.h"
17
18char _license[] SEC("license") = "GPL";
19
20int stg_result = 0;
21
22struct {
23 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
24 __uint(map_flags, BPF_F_NO_PREALLOC);
25 __type(key, int);
26 __type(value, int);
27} sk_stg_map SEC(".maps");
28
29#define DCTCP_MAX_ALPHA 1024U
30
31struct dctcp {
32 __u32 old_delivered;
33 __u32 old_delivered_ce;
34 __u32 prior_rcv_nxt;
35 __u32 dctcp_alpha;
36 __u32 next_seq;
37 __u32 ce_state;
38 __u32 loss_cwnd;
39};
40
41static unsigned int dctcp_shift_g = 4;
42static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA;
43
44static __always_inline void dctcp_reset(const struct tcp_sock *tp,
45 struct dctcp *ca)
46{
47 ca->next_seq = tp->snd_nxt;
48
49 ca->old_delivered = tp->delivered;
50 ca->old_delivered_ce = tp->delivered_ce;
51}
52
53SEC("struct_ops/dctcp_init")
54void BPF_PROG(dctcp_init, struct sock *sk)
55{
56 const struct tcp_sock *tp = tcp_sk(sk);
57 struct dctcp *ca = inet_csk_ca(sk);
58 int *stg;
59
60 ca->prior_rcv_nxt = tp->rcv_nxt;
61 ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
62 ca->loss_cwnd = 0;
63 ca->ce_state = 0;
64
65 stg = bpf_sk_storage_get(&sk_stg_map, (void *)tp, NULL, 0);
66 if (stg) {
67 stg_result = *stg;
68 bpf_sk_storage_delete(&sk_stg_map, (void *)tp);
69 }
70 dctcp_reset(tp, ca);
71}
72
73SEC("struct_ops/dctcp_ssthresh")
74__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
75{
76 struct dctcp *ca = inet_csk_ca(sk);
77 struct tcp_sock *tp = tcp_sk(sk);
78
79 ca->loss_cwnd = tp->snd_cwnd;
80 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
81}
82
83SEC("struct_ops/dctcp_update_alpha")
84void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
85{
86 const struct tcp_sock *tp = tcp_sk(sk);
87 struct dctcp *ca = inet_csk_ca(sk);
88
89
90 if (!before(tp->snd_una, ca->next_seq)) {
91 __u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
92 __u32 alpha = ca->dctcp_alpha;
93
94
95
96 alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
97 if (delivered_ce) {
98 __u32 delivered = tp->delivered - ca->old_delivered;
99
100
101
102
103 delivered_ce <<= (10 - dctcp_shift_g);
104 delivered_ce /= max(1U, delivered);
105
106 alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
107 }
108 ca->dctcp_alpha = alpha;
109 dctcp_reset(tp, ca);
110 }
111}
112
113static __always_inline void dctcp_react_to_loss(struct sock *sk)
114{
115 struct dctcp *ca = inet_csk_ca(sk);
116 struct tcp_sock *tp = tcp_sk(sk);
117
118 ca->loss_cwnd = tp->snd_cwnd;
119 tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
120}
121
122SEC("struct_ops/dctcp_state")
123void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
124{
125 if (new_state == TCP_CA_Recovery &&
126 new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state))
127 dctcp_react_to_loss(sk);
128
129
130
131}
132
133static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
134{
135 struct tcp_sock *tp = tcp_sk(sk);
136
137 if (ce_state == 1)
138 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
139 else
140 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
141}
142
143
144
145
146
147
148static __always_inline
149void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
150 __u32 *prior_rcv_nxt, __u32 *ce_state)
151{
152 __u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
153
154 if (*ce_state != new_ce_state) {
155
156
157
158
159 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
160 dctcp_ece_ack_cwr(sk, *ce_state);
161 bpf_tcp_send_ack(sk, *prior_rcv_nxt);
162 }
163 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
164 }
165 *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt;
166 *ce_state = new_ce_state;
167 dctcp_ece_ack_cwr(sk, new_ce_state);
168}
169
170SEC("struct_ops/dctcp_cwnd_event")
171void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
172{
173 struct dctcp *ca = inet_csk_ca(sk);
174
175 switch (ev) {
176 case CA_EVENT_ECN_IS_CE:
177 case CA_EVENT_ECN_NO_CE:
178 dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
179 break;
180 case CA_EVENT_LOSS:
181 dctcp_react_to_loss(sk);
182 break;
183 default:
184
185 break;
186 }
187}
188
189SEC("struct_ops/dctcp_cwnd_undo")
190__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
191{
192 const struct dctcp *ca = inet_csk_ca(sk);
193
194 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
195}
196
197extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
198
199SEC("struct_ops/dctcp_reno_cong_avoid")
200void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
201{
202 tcp_reno_cong_avoid(sk, ack, acked);
203}
204
205SEC(".struct_ops")
206struct tcp_congestion_ops dctcp_nouse = {
207 .init = (void *)dctcp_init,
208 .set_state = (void *)dctcp_state,
209 .flags = TCP_CONG_NEEDS_ECN,
210 .name = "bpf_dctcp_nouse",
211};
212
213SEC(".struct_ops")
214struct tcp_congestion_ops dctcp = {
215 .init = (void *)dctcp_init,
216 .in_ack_event = (void *)dctcp_update_alpha,
217 .cwnd_event = (void *)dctcp_cwnd_event,
218 .ssthresh = (void *)dctcp_ssthresh,
219 .cong_avoid = (void *)dctcp_cong_avoid,
220 .undo_cwnd = (void *)dctcp_cwnd_undo,
221 .set_state = (void *)dctcp_state,
222 .flags = TCP_CONG_NEEDS_ECN,
223 .name = "bpf_dctcp",
224};
225