1
2
3
4
5
6
7
8
9#include <linux/dccp.h>
10#include <linux/kernel.h>
11#include <linux/skbuff.h>
12#include <linux/slab.h>
13#include <linux/sched/signal.h>
14
15#include <net/inet_sock.h>
16#include <net/sock.h>
17
18#include "ackvec.h"
19#include "ccid.h"
20#include "dccp.h"
21
22static inline void dccp_event_ack_sent(struct sock *sk)
23{
24 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
25}
26
27
28static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
29{
30 skb_set_owner_w(skb, sk);
31 WARN_ON(sk->sk_send_head);
32 sk->sk_send_head = skb;
33 return skb_clone(sk->sk_send_head, gfp_any());
34}
35
36
37
38
39
40
41
42static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
43{
44 if (likely(skb != NULL)) {
45 struct inet_sock *inet = inet_sk(sk);
46 const struct inet_connection_sock *icsk = inet_csk(sk);
47 struct dccp_sock *dp = dccp_sk(sk);
48 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
49 struct dccp_hdr *dh;
50
51 const u32 dccp_header_size = sizeof(*dh) +
52 sizeof(struct dccp_hdr_ext) +
53 dccp_packet_hdr_len(dcb->dccpd_type);
54 int err, set_ack = 1;
55 u64 ackno = dp->dccps_gsr;
56
57
58
59
60 dcb->dccpd_seq = ADD48(dp->dccps_gss, 1);
61
62 switch (dcb->dccpd_type) {
63 case DCCP_PKT_DATA:
64 set_ack = 0;
65 fallthrough;
66 case DCCP_PKT_DATAACK:
67 case DCCP_PKT_RESET:
68 break;
69
70 case DCCP_PKT_REQUEST:
71 set_ack = 0;
72
73 if (icsk->icsk_retransmits == 0)
74 dcb->dccpd_seq = dp->dccps_iss;
75 fallthrough;
76
77 case DCCP_PKT_SYNC:
78 case DCCP_PKT_SYNCACK:
79 ackno = dcb->dccpd_ack_seq;
80 fallthrough;
81 default:
82
83
84
85
86
87
88 WARN_ON(skb->sk);
89 skb_set_owner_w(skb, sk);
90 break;
91 }
92
93 if (dccp_insert_options(sk, skb)) {
94 kfree_skb(skb);
95 return -EPROTO;
96 }
97
98
99
100 dh = dccp_zeroed_hdr(skb, dccp_header_size);
101 dh->dccph_type = dcb->dccpd_type;
102 dh->dccph_sport = inet->inet_sport;
103 dh->dccph_dport = inet->inet_dport;
104 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
105 dh->dccph_ccval = dcb->dccpd_ccval;
106 dh->dccph_cscov = dp->dccps_pcslen;
107
108 dh->dccph_x = 1;
109
110 dccp_update_gss(sk, dcb->dccpd_seq);
111 dccp_hdr_set_seq(dh, dp->dccps_gss);
112 if (set_ack)
113 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
114
115 switch (dcb->dccpd_type) {
116 case DCCP_PKT_REQUEST:
117 dccp_hdr_request(skb)->dccph_req_service =
118 dp->dccps_service;
119
120
121
122
123 dp->dccps_awl = dp->dccps_iss;
124 break;
125 case DCCP_PKT_RESET:
126 dccp_hdr_reset(skb)->dccph_reset_code =
127 dcb->dccpd_reset_code;
128 break;
129 }
130
131 icsk->icsk_af_ops->send_check(sk, skb);
132
133 if (set_ack)
134 dccp_event_ack_sent(sk);
135
136 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
137
138 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
139 return net_xmit_eval(err);
140 }
141 return -ENOBUFS;
142}
143
144
145
146
147
148
149
150
151
152static u32 dccp_determine_ccmps(const struct dccp_sock *dp)
153{
154 const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid;
155
156 if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL)
157 return 0;
158 return tx_ccid->ccid_ops->ccid_ccmps;
159}
160
161unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
162{
163 struct inet_connection_sock *icsk = inet_csk(sk);
164 struct dccp_sock *dp = dccp_sk(sk);
165 u32 ccmps = dccp_determine_ccmps(dp);
166 u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu;
167
168
169 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len +
170 sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext));
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185 cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 +
186 (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4);
187
188
189 icsk->icsk_pmtu_cookie = pmtu;
190 dp->dccps_mss_cache = cur_mps;
191
192 return cur_mps;
193}
194
195EXPORT_SYMBOL_GPL(dccp_sync_mss);
196
197void dccp_write_space(struct sock *sk)
198{
199 struct socket_wq *wq;
200
201 rcu_read_lock();
202 wq = rcu_dereference(sk->sk_wq);
203 if (skwq_has_sleeper(wq))
204 wake_up_interruptible(&wq->wait);
205
206 if (sock_writeable(sk))
207 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
208
209 rcu_read_unlock();
210}
211
212
213
214
215
216
217
218
219static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
220{
221 DEFINE_WAIT(wait);
222 long remaining;
223
224 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
225 sk->sk_write_pending++;
226 release_sock(sk);
227
228 remaining = schedule_timeout(delay);
229
230 lock_sock(sk);
231 sk->sk_write_pending--;
232 finish_wait(sk_sleep(sk), &wait);
233
234 if (signal_pending(current) || sk->sk_err)
235 return -1;
236 return remaining;
237}
238
239
240
241
242
243
244
245static void dccp_xmit_packet(struct sock *sk)
246{
247 int err, len;
248 struct dccp_sock *dp = dccp_sk(sk);
249 struct sk_buff *skb = dccp_qpolicy_pop(sk);
250
251 if (unlikely(skb == NULL))
252 return;
253 len = skb->len;
254
255 if (sk->sk_state == DCCP_PARTOPEN) {
256 const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
257
258
259
260
261
262
263
264 if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
265 DCCP_WARN("Payload too large (%d) for featneg.\n", len);
266 dccp_send_ack(sk);
267 dccp_feat_list_purge(&dp->dccps_featneg);
268 }
269
270 inet_csk_schedule_ack(sk);
271 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
272 inet_csk(sk)->icsk_rto,
273 DCCP_RTO_MAX);
274 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
275 } else if (dccp_ack_pending(sk)) {
276 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
277 } else {
278 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
279 }
280
281 err = dccp_transmit_skb(sk, skb);
282 if (err)
283 dccp_pr_debug("transmit_skb() returned err=%d\n", err);
284
285
286
287
288
289 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
290
291
292
293
294
295
296
297 if (dp->dccps_sync_scheduled)
298 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
299}
300
301
302
303
304
305
306
307
308
309
310
311void dccp_flush_write_queue(struct sock *sk, long *time_budget)
312{
313 struct dccp_sock *dp = dccp_sk(sk);
314 struct sk_buff *skb;
315 long delay, rc;
316
317 while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
318 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
319
320 switch (ccid_packet_dequeue_eval(rc)) {
321 case CCID_PACKET_WILL_DEQUEUE_LATER:
322
323
324
325
326
327 DCCP_WARN("CCID did not manage to send all packets\n");
328 return;
329 case CCID_PACKET_DELAY:
330 delay = msecs_to_jiffies(rc);
331 if (delay > *time_budget)
332 return;
333 rc = dccp_wait_for_ccid(sk, delay);
334 if (rc < 0)
335 return;
336 *time_budget -= (delay - rc);
337
338 break;
339 case CCID_PACKET_SEND_AT_ONCE:
340 dccp_xmit_packet(sk);
341 break;
342 case CCID_PACKET_ERR:
343 skb_dequeue(&sk->sk_write_queue);
344 kfree_skb(skb);
345 dccp_pr_debug("packet discarded due to err=%ld\n", rc);
346 }
347 }
348}
349
350void dccp_write_xmit(struct sock *sk)
351{
352 struct dccp_sock *dp = dccp_sk(sk);
353 struct sk_buff *skb;
354
355 while ((skb = dccp_qpolicy_top(sk))) {
356 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
357
358 switch (ccid_packet_dequeue_eval(rc)) {
359 case CCID_PACKET_WILL_DEQUEUE_LATER:
360 return;
361 case CCID_PACKET_DELAY:
362 sk_reset_timer(sk, &dp->dccps_xmit_timer,
363 jiffies + msecs_to_jiffies(rc));
364 return;
365 case CCID_PACKET_SEND_AT_ONCE:
366 dccp_xmit_packet(sk);
367 break;
368 case CCID_PACKET_ERR:
369 dccp_qpolicy_drop(sk, skb);
370 dccp_pr_debug("packet discarded due to err=%d\n", rc);
371 }
372 }
373}
374
375
376
377
378
379
380
381
382
383
384
385
386int dccp_retransmit_skb(struct sock *sk)
387{
388 WARN_ON(sk->sk_send_head == NULL);
389
390 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
391 return -EHOSTUNREACH;
392
393
394 inet_csk(sk)->icsk_retransmits++;
395
396 return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
397}
398
399struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst,
400 struct request_sock *req)
401{
402 struct dccp_hdr *dh;
403 struct dccp_request_sock *dreq;
404 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
405 sizeof(struct dccp_hdr_ext) +
406 sizeof(struct dccp_hdr_response);
407 struct sk_buff *skb;
408
409
410
411
412
413 skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1,
414 GFP_ATOMIC);
415 if (!skb)
416 return NULL;
417
418 skb_reserve(skb, MAX_DCCP_HEADER);
419
420 skb_dst_set(skb, dst_clone(dst));
421
422 dreq = dccp_rsk(req);
423 if (inet_rsk(req)->acked)
424 dccp_inc_seqno(&dreq->dreq_gss);
425 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
426 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss;
427
428
429 if (dccp_feat_server_ccid_dependencies(dreq))
430 goto response_failed;
431
432 if (dccp_insert_options_rsk(dreq, skb))
433 goto response_failed;
434
435
436 dh = dccp_zeroed_hdr(skb, dccp_header_size);
437
438 dh->dccph_sport = htons(inet_rsk(req)->ir_num);
439 dh->dccph_dport = inet_rsk(req)->ir_rmt_port;
440 dh->dccph_doff = (dccp_header_size +
441 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
442 dh->dccph_type = DCCP_PKT_RESPONSE;
443 dh->dccph_x = 1;
444 dccp_hdr_set_seq(dh, dreq->dreq_gss);
445 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr);
446 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
447
448 dccp_csum_outgoing(skb);
449
450
451 inet_rsk(req)->acked = 1;
452 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
453 return skb;
454response_failed:
455 kfree_skb(skb);
456 return NULL;
457}
458
459EXPORT_SYMBOL_GPL(dccp_make_response);
460
461
462struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
463{
464 struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh;
465 struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb);
466 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
467 sizeof(struct dccp_hdr_ext) +
468 sizeof(struct dccp_hdr_reset);
469 struct dccp_hdr_reset *dhr;
470 struct sk_buff *skb;
471
472 skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
473 if (skb == NULL)
474 return NULL;
475
476 skb_reserve(skb, sk->sk_prot->max_header);
477
478
479 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
480 dh->dccph_type = DCCP_PKT_RESET;
481 dh->dccph_sport = rxdh->dccph_dport;
482 dh->dccph_dport = rxdh->dccph_sport;
483 dh->dccph_doff = dccp_hdr_reset_len / 4;
484 dh->dccph_x = 1;
485
486 dhr = dccp_hdr_reset(skb);
487 dhr->dccph_reset_code = dcb->dccpd_reset_code;
488
489 switch (dcb->dccpd_reset_code) {
490 case DCCP_RESET_CODE_PACKET_ERROR:
491 dhr->dccph_reset_data[0] = rxdh->dccph_type;
492 break;
493 case DCCP_RESET_CODE_OPTION_ERROR:
494 case DCCP_RESET_CODE_MANDATORY_ERROR:
495 memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3);
496 break;
497 }
498
499
500
501
502
503 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
504 dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1));
505 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq);
506
507 dccp_csum_outgoing(skb);
508 return skb;
509}
510
511EXPORT_SYMBOL_GPL(dccp_ctl_make_reset);
512
513
514int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
515{
516 struct sk_buff *skb;
517
518
519
520
521 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
522
523 if (err != 0)
524 return err;
525
526 skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
527 if (skb == NULL)
528 return -ENOBUFS;
529
530
531 skb_reserve(skb, sk->sk_prot->max_header);
532 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
533 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
534
535 return dccp_transmit_skb(sk, skb);
536}
537
538
539
540
541int dccp_connect(struct sock *sk)
542{
543 struct sk_buff *skb;
544 struct dccp_sock *dp = dccp_sk(sk);
545 struct dst_entry *dst = __sk_dst_get(sk);
546 struct inet_connection_sock *icsk = inet_csk(sk);
547
548 sk->sk_err = 0;
549 sock_reset_flag(sk, SOCK_DONE);
550
551 dccp_sync_mss(sk, dst_mtu(dst));
552
553
554 if (dccp_feat_finalise_settings(dccp_sk(sk)))
555 return -EPROTO;
556
557
558 dp->dccps_gar = dp->dccps_iss;
559
560 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
561 if (unlikely(skb == NULL))
562 return -ENOBUFS;
563
564
565 skb_reserve(skb, sk->sk_prot->max_header);
566
567 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
568
569 dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
570 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
571
572
573 icsk->icsk_retransmits = 0;
574 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
575 icsk->icsk_rto, DCCP_RTO_MAX);
576 return 0;
577}
578
579EXPORT_SYMBOL_GPL(dccp_connect);
580
581void dccp_send_ack(struct sock *sk)
582{
583
584 if (sk->sk_state != DCCP_CLOSED) {
585 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
586 GFP_ATOMIC);
587
588 if (skb == NULL) {
589 inet_csk_schedule_ack(sk);
590 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
591 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
592 TCP_DELACK_MAX,
593 DCCP_RTO_MAX);
594 return;
595 }
596
597
598 skb_reserve(skb, sk->sk_prot->max_header);
599 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
600 dccp_transmit_skb(sk, skb);
601 }
602}
603
604EXPORT_SYMBOL_GPL(dccp_send_ack);
605
606#if 0
607
608void dccp_send_delayed_ack(struct sock *sk)
609{
610 struct inet_connection_sock *icsk = inet_csk(sk);
611
612
613
614
615
616 unsigned long timeout = jiffies + 2 * HZ;
617
618
619 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
620
621
622
623
624
625 if (icsk->icsk_ack.blocked) {
626 dccp_send_ack(sk);
627 return;
628 }
629
630 if (!time_before(timeout, icsk->icsk_ack.timeout))
631 timeout = icsk->icsk_ack.timeout;
632 }
633 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
634 icsk->icsk_ack.timeout = timeout;
635 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
636}
637#endif
638
639void dccp_send_sync(struct sock *sk, const u64 ackno,
640 const enum dccp_pkt_type pkt_type)
641{
642
643
644
645
646
647 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
648
649 if (skb == NULL) {
650
651 DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type));
652 return;
653 }
654
655
656 skb_reserve(skb, sk->sk_prot->max_header);
657 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
658 DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
659
660
661
662
663
664 dccp_sk(sk)->dccps_sync_scheduled = 0;
665
666 dccp_transmit_skb(sk, skb);
667}
668
669EXPORT_SYMBOL_GPL(dccp_send_sync);
670
671
672
673
674
675
676void dccp_send_close(struct sock *sk, const int active)
677{
678 struct dccp_sock *dp = dccp_sk(sk);
679 struct sk_buff *skb;
680 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
681
682 skb = alloc_skb(sk->sk_prot->max_header, prio);
683 if (skb == NULL)
684 return;
685
686
687 skb_reserve(skb, sk->sk_prot->max_header);
688 if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
689 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
690 else
691 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
692
693 if (active) {
694 skb = dccp_skb_entail(sk, skb);
695
696
697
698
699
700
701
702
703
704
705 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
706 DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
707 }
708 dccp_transmit_skb(sk, skb);
709}
710