1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) "TCP: " fmt
11
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/types.h>
15#include <linux/list.h>
16#include <linux/gfp.h>
17#include <linux/jhash.h>
18#include <net/tcp.h>
19#include <trace/events/tcp.h>
20
21static DEFINE_SPINLOCK(tcp_cong_list_lock);
22static LIST_HEAD(tcp_cong_list);
23
24
25struct tcp_congestion_ops *tcp_ca_find(const char *name)
26{
27 struct tcp_congestion_ops *e;
28
29 list_for_each_entry_rcu(e, &tcp_cong_list, list) {
30 if (strcmp(e->name, name) == 0)
31 return e;
32 }
33
34 return NULL;
35}
36
37void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
38{
39 struct inet_connection_sock *icsk = inet_csk(sk);
40
41 trace_tcp_cong_state_set(sk, ca_state);
42
43 if (icsk->icsk_ca_ops->set_state)
44 icsk->icsk_ca_ops->set_state(sk, ca_state);
45 icsk->icsk_ca_state = ca_state;
46}
47
48
49static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net,
50 const char *name)
51{
52 struct tcp_congestion_ops *ca = tcp_ca_find(name);
53
54#ifdef CONFIG_MODULES
55 if (!ca && capable(CAP_NET_ADMIN)) {
56 rcu_read_unlock();
57 request_module("tcp_%s", name);
58 rcu_read_lock();
59 ca = tcp_ca_find(name);
60 }
61#endif
62 return ca;
63}
64
65
66struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
67{
68 struct tcp_congestion_ops *e;
69
70 list_for_each_entry_rcu(e, &tcp_cong_list, list) {
71 if (e->key == key)
72 return e;
73 }
74
75 return NULL;
76}
77
78
79
80
81
82int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
83{
84 int ret = 0;
85
86
87 if (!ca->ssthresh || !ca->undo_cwnd ||
88 !(ca->cong_avoid || ca->cong_control)) {
89 pr_err("%s does not implement required ops\n", ca->name);
90 return -EINVAL;
91 }
92
93 ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
94
95 spin_lock(&tcp_cong_list_lock);
96 if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
97 pr_notice("%s already registered or non-unique key\n",
98 ca->name);
99 ret = -EEXIST;
100 } else {
101 list_add_tail_rcu(&ca->list, &tcp_cong_list);
102 pr_debug("%s registered\n", ca->name);
103 }
104 spin_unlock(&tcp_cong_list_lock);
105
106 return ret;
107}
108EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
109
110
111
112
113
114
115
116void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
117{
118 spin_lock(&tcp_cong_list_lock);
119 list_del_rcu(&ca->list);
120 spin_unlock(&tcp_cong_list_lock);
121
122
123
124
125
126
127
128
129 synchronize_rcu();
130}
131EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
132
133u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
134{
135 const struct tcp_congestion_ops *ca;
136 u32 key = TCP_CA_UNSPEC;
137
138 might_sleep();
139
140 rcu_read_lock();
141 ca = tcp_ca_find_autoload(net, name);
142 if (ca) {
143 key = ca->key;
144 *ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
145 }
146 rcu_read_unlock();
147
148 return key;
149}
150
151char *tcp_ca_get_name_by_key(u32 key, char *buffer)
152{
153 const struct tcp_congestion_ops *ca;
154 char *ret = NULL;
155
156 rcu_read_lock();
157 ca = tcp_ca_find_key(key);
158 if (ca)
159 ret = strncpy(buffer, ca->name,
160 TCP_CA_NAME_MAX);
161 rcu_read_unlock();
162
163 return ret;
164}
165
166
167void tcp_assign_congestion_control(struct sock *sk)
168{
169 struct net *net = sock_net(sk);
170 struct inet_connection_sock *icsk = inet_csk(sk);
171 const struct tcp_congestion_ops *ca;
172
173 rcu_read_lock();
174 ca = rcu_dereference(net->ipv4.tcp_congestion_control);
175 if (unlikely(!bpf_try_module_get(ca, ca->owner)))
176 ca = &tcp_reno;
177 icsk->icsk_ca_ops = ca;
178 rcu_read_unlock();
179
180 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
181 if (ca->flags & TCP_CONG_NEEDS_ECN)
182 INET_ECN_xmit(sk);
183 else
184 INET_ECN_dontxmit(sk);
185}
186
187void tcp_init_congestion_control(struct sock *sk)
188{
189 struct inet_connection_sock *icsk = inet_csk(sk);
190
191 tcp_sk(sk)->prior_ssthresh = 0;
192 if (icsk->icsk_ca_ops->init)
193 icsk->icsk_ca_ops->init(sk);
194 if (tcp_ca_needs_ecn(sk))
195 INET_ECN_xmit(sk);
196 else
197 INET_ECN_dontxmit(sk);
198 icsk->icsk_ca_initialized = 1;
199}
200
201static void tcp_reinit_congestion_control(struct sock *sk,
202 const struct tcp_congestion_ops *ca)
203{
204 struct inet_connection_sock *icsk = inet_csk(sk);
205
206 tcp_cleanup_congestion_control(sk);
207 icsk->icsk_ca_ops = ca;
208 icsk->icsk_ca_setsockopt = 1;
209 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
210
211 if (ca->flags & TCP_CONG_NEEDS_ECN)
212 INET_ECN_xmit(sk);
213 else
214 INET_ECN_dontxmit(sk);
215
216 if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
217 tcp_init_congestion_control(sk);
218}
219
220
221void tcp_cleanup_congestion_control(struct sock *sk)
222{
223 struct inet_connection_sock *icsk = inet_csk(sk);
224
225 if (icsk->icsk_ca_ops->release)
226 icsk->icsk_ca_ops->release(sk);
227 bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
228}
229
230
231int tcp_set_default_congestion_control(struct net *net, const char *name)
232{
233 struct tcp_congestion_ops *ca;
234 const struct tcp_congestion_ops *prev;
235 int ret;
236
237 rcu_read_lock();
238 ca = tcp_ca_find_autoload(net, name);
239 if (!ca) {
240 ret = -ENOENT;
241 } else if (!bpf_try_module_get(ca, ca->owner)) {
242 ret = -EBUSY;
243 } else if (!net_eq(net, &init_net) &&
244 !(ca->flags & TCP_CONG_NON_RESTRICTED)) {
245
246 ret = -EPERM;
247 } else {
248 prev = xchg(&net->ipv4.tcp_congestion_control, ca);
249 if (prev)
250 bpf_module_put(prev, prev->owner);
251
252 ca->flags |= TCP_CONG_NON_RESTRICTED;
253 ret = 0;
254 }
255 rcu_read_unlock();
256
257 return ret;
258}
259
260
261static int __init tcp_congestion_default(void)
262{
263 return tcp_set_default_congestion_control(&init_net,
264 CONFIG_DEFAULT_TCP_CONG);
265}
266late_initcall(tcp_congestion_default);
267
268
269void tcp_get_available_congestion_control(char *buf, size_t maxlen)
270{
271 struct tcp_congestion_ops *ca;
272 size_t offs = 0;
273
274 rcu_read_lock();
275 list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
276 offs += snprintf(buf + offs, maxlen - offs,
277 "%s%s",
278 offs == 0 ? "" : " ", ca->name);
279
280 if (WARN_ON_ONCE(offs >= maxlen))
281 break;
282 }
283 rcu_read_unlock();
284}
285
286
287void tcp_get_default_congestion_control(struct net *net, char *name)
288{
289 const struct tcp_congestion_ops *ca;
290
291 rcu_read_lock();
292 ca = rcu_dereference(net->ipv4.tcp_congestion_control);
293 strncpy(name, ca->name, TCP_CA_NAME_MAX);
294 rcu_read_unlock();
295}
296
297
298void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
299{
300 struct tcp_congestion_ops *ca;
301 size_t offs = 0;
302
303 *buf = '\0';
304 rcu_read_lock();
305 list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
306 if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
307 continue;
308 offs += snprintf(buf + offs, maxlen - offs,
309 "%s%s",
310 offs == 0 ? "" : " ", ca->name);
311
312 if (WARN_ON_ONCE(offs >= maxlen))
313 break;
314 }
315 rcu_read_unlock();
316}
317
318
319int tcp_set_allowed_congestion_control(char *val)
320{
321 struct tcp_congestion_ops *ca;
322 char *saved_clone, *clone, *name;
323 int ret = 0;
324
325 saved_clone = clone = kstrdup(val, GFP_USER);
326 if (!clone)
327 return -ENOMEM;
328
329 spin_lock(&tcp_cong_list_lock);
330
331 while ((name = strsep(&clone, " ")) && *name) {
332 ca = tcp_ca_find(name);
333 if (!ca) {
334 ret = -ENOENT;
335 goto out;
336 }
337 }
338
339
340 list_for_each_entry_rcu(ca, &tcp_cong_list, list)
341 ca->flags &= ~TCP_CONG_NON_RESTRICTED;
342
343
344 while ((name = strsep(&val, " ")) && *name) {
345 ca = tcp_ca_find(name);
346 WARN_ON(!ca);
347 if (ca)
348 ca->flags |= TCP_CONG_NON_RESTRICTED;
349 }
350out:
351 spin_unlock(&tcp_cong_list_lock);
352 kfree(saved_clone);
353
354 return ret;
355}
356
357
358
359
360
361
362int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
363 bool cap_net_admin)
364{
365 struct inet_connection_sock *icsk = inet_csk(sk);
366 const struct tcp_congestion_ops *ca;
367 int err = 0;
368
369 if (icsk->icsk_ca_dst_locked)
370 return -EPERM;
371
372 rcu_read_lock();
373 if (!load)
374 ca = tcp_ca_find(name);
375 else
376 ca = tcp_ca_find_autoload(sock_net(sk), name);
377
378
379 if (ca == icsk->icsk_ca_ops) {
380 icsk->icsk_ca_setsockopt = 1;
381 goto out;
382 }
383
384 if (!ca)
385 err = -ENOENT;
386 else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin))
387 err = -EPERM;
388 else if (!bpf_try_module_get(ca, ca->owner))
389 err = -EBUSY;
390 else
391 tcp_reinit_congestion_control(sk, ca);
392 out:
393 rcu_read_unlock();
394 return err;
395}
396
397
398
399
400
401
402
403
404
405
406u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
407{
408 u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh);
409
410 acked -= cwnd - tcp_snd_cwnd(tp);
411 tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp));
412
413 return acked;
414}
415EXPORT_SYMBOL_GPL(tcp_slow_start);
416
417
418
419
420void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
421{
422
423 if (tp->snd_cwnd_cnt >= w) {
424 tp->snd_cwnd_cnt = 0;
425 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
426 }
427
428 tp->snd_cwnd_cnt += acked;
429 if (tp->snd_cwnd_cnt >= w) {
430 u32 delta = tp->snd_cwnd_cnt / w;
431
432 tp->snd_cwnd_cnt -= delta * w;
433 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + delta);
434 }
435 tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp));
436}
437EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
438
439
440
441
442
443
444
445
446void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
447{
448 struct tcp_sock *tp = tcp_sk(sk);
449
450 if (!tcp_is_cwnd_limited(sk))
451 return;
452
453
454 if (tcp_in_slow_start(tp)) {
455 acked = tcp_slow_start(tp, acked);
456 if (!acked)
457 return;
458 }
459
460 tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
461}
462EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
463
464
465u32 tcp_reno_ssthresh(struct sock *sk)
466{
467 const struct tcp_sock *tp = tcp_sk(sk);
468
469 return max(tcp_snd_cwnd(tp) >> 1U, 2U);
470}
471EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
472
473u32 tcp_reno_undo_cwnd(struct sock *sk)
474{
475 const struct tcp_sock *tp = tcp_sk(sk);
476
477 return max(tcp_snd_cwnd(tp), tp->prior_cwnd);
478}
479EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
480
481struct tcp_congestion_ops tcp_reno = {
482 .flags = TCP_CONG_NON_RESTRICTED,
483 .name = "reno",
484 .owner = THIS_MODULE,
485 .ssthresh = tcp_reno_ssthresh,
486 .cong_avoid = tcp_reno_cong_avoid,
487 .undo_cwnd = tcp_reno_undo_cwnd,
488};
489