1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/dccp.h>
14#include <linux/kernel.h>
15#include <linux/skbuff.h>
16#include <linux/slab.h>
17
18#include <net/inet_sock.h>
19#include <net/sock.h>
20
21#include "ackvec.h"
22#include "ccid.h"
23#include "dccp.h"
24
25static inline void dccp_event_ack_sent(struct sock *sk)
26{
27 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
28}
29
30
31static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
32{
33 skb_set_owner_w(skb, sk);
34 WARN_ON(sk->sk_send_head);
35 sk->sk_send_head = skb;
36 return skb_clone(sk->sk_send_head, gfp_any());
37}
38
39
40
41
42
43
44
45static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
46{
47 if (likely(skb != NULL)) {
48 struct inet_sock *inet = inet_sk(sk);
49 const struct inet_connection_sock *icsk = inet_csk(sk);
50 struct dccp_sock *dp = dccp_sk(sk);
51 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
52 struct dccp_hdr *dh;
53
54 const u32 dccp_header_size = sizeof(*dh) +
55 sizeof(struct dccp_hdr_ext) +
56 dccp_packet_hdr_len(dcb->dccpd_type);
57 int err, set_ack = 1;
58 u64 ackno = dp->dccps_gsr;
59
60
61
62
63 dcb->dccpd_seq = ADD48(dp->dccps_gss, 1);
64
65 switch (dcb->dccpd_type) {
66 case DCCP_PKT_DATA:
67 set_ack = 0;
68
69 case DCCP_PKT_DATAACK:
70 case DCCP_PKT_RESET:
71 break;
72
73 case DCCP_PKT_REQUEST:
74 set_ack = 0;
75
76 if (icsk->icsk_retransmits == 0)
77 dcb->dccpd_seq = dp->dccps_iss;
78
79
80 case DCCP_PKT_SYNC:
81 case DCCP_PKT_SYNCACK:
82 ackno = dcb->dccpd_ack_seq;
83
84 default:
85
86
87
88
89
90
91 WARN_ON(skb->sk);
92 skb_set_owner_w(skb, sk);
93 break;
94 }
95
96 if (dccp_insert_options(sk, skb)) {
97 kfree_skb(skb);
98 return -EPROTO;
99 }
100
101
102
103 dh = dccp_zeroed_hdr(skb, dccp_header_size);
104 dh->dccph_type = dcb->dccpd_type;
105 dh->dccph_sport = inet->inet_sport;
106 dh->dccph_dport = inet->inet_dport;
107 dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4;
108 dh->dccph_ccval = dcb->dccpd_ccval;
109 dh->dccph_cscov = dp->dccps_pcslen;
110
111 dh->dccph_x = 1;
112
113 dccp_update_gss(sk, dcb->dccpd_seq);
114 dccp_hdr_set_seq(dh, dp->dccps_gss);
115 if (set_ack)
116 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
117
118 switch (dcb->dccpd_type) {
119 case DCCP_PKT_REQUEST:
120 dccp_hdr_request(skb)->dccph_req_service =
121 dp->dccps_service;
122
123
124
125
126 dp->dccps_awl = dp->dccps_iss;
127 break;
128 case DCCP_PKT_RESET:
129 dccp_hdr_reset(skb)->dccph_reset_code =
130 dcb->dccpd_reset_code;
131 break;
132 }
133
134 icsk->icsk_af_ops->send_check(sk, skb);
135
136 if (set_ack)
137 dccp_event_ack_sent(sk);
138
139 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
140
141 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
142 return net_xmit_eval(err);
143 }
144 return -ENOBUFS;
145}
146
147
148
149
150
151
152
153static u32 dccp_determine_ccmps(const struct dccp_sock *dp)
154{
155 const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid;
156
157 if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL)
158 return 0;
159 return tx_ccid->ccid_ops->ccid_ccmps;
160}
161
162unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
163{
164 struct inet_connection_sock *icsk = inet_csk(sk);
165 struct dccp_sock *dp = dccp_sk(sk);
166 u32 ccmps = dccp_determine_ccmps(dp);
167 u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu;
168
169
170 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len +
171 sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext));
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186 cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 +
187 (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4);
188
189
190 icsk->icsk_pmtu_cookie = pmtu;
191 dp->dccps_mss_cache = cur_mps;
192
193 return cur_mps;
194}
195
196EXPORT_SYMBOL_GPL(dccp_sync_mss);
197
198void dccp_write_space(struct sock *sk)
199{
200 struct socket_wq *wq;
201
202 rcu_read_lock();
203 wq = rcu_dereference(sk->sk_wq);
204 if (skwq_has_sleeper(wq))
205 wake_up_interruptible(&wq->wait);
206
207 if (sock_writeable(sk))
208 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
209
210 rcu_read_unlock();
211}
212
213
214
215
216
217
218
219
220static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
221{
222 DEFINE_WAIT(wait);
223 long remaining;
224
225 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
226 sk->sk_write_pending++;
227 release_sock(sk);
228
229 remaining = schedule_timeout(delay);
230
231 lock_sock(sk);
232 sk->sk_write_pending--;
233 finish_wait(sk_sleep(sk), &wait);
234
235 if (signal_pending(current) || sk->sk_err)
236 return -1;
237 return remaining;
238}
239
240
241
242
243
244static void dccp_xmit_packet(struct sock *sk)
245{
246 int err, len;
247 struct dccp_sock *dp = dccp_sk(sk);
248 struct sk_buff *skb = dccp_qpolicy_pop(sk);
249
250 if (unlikely(skb == NULL))
251 return;
252 len = skb->len;
253
254 if (sk->sk_state == DCCP_PARTOPEN) {
255 const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
256
257
258
259
260
261
262
263 if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
264 DCCP_WARN("Payload too large (%d) for featneg.\n", len);
265 dccp_send_ack(sk);
266 dccp_feat_list_purge(&dp->dccps_featneg);
267 }
268
269 inet_csk_schedule_ack(sk);
270 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
271 inet_csk(sk)->icsk_rto,
272 DCCP_RTO_MAX);
273 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
274 } else if (dccp_ack_pending(sk)) {
275 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
276 } else {
277 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
278 }
279
280 err = dccp_transmit_skb(sk, skb);
281 if (err)
282 dccp_pr_debug("transmit_skb() returned err=%d\n", err);
283
284
285
286
287
288 ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
289
290
291
292
293
294
295
296 if (dp->dccps_sync_scheduled)
297 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
298}
299
300
301
302
303
304
305
306
307void dccp_flush_write_queue(struct sock *sk, long *time_budget)
308{
309 struct dccp_sock *dp = dccp_sk(sk);
310 struct sk_buff *skb;
311 long delay, rc;
312
313 while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
314 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
315
316 switch (ccid_packet_dequeue_eval(rc)) {
317 case CCID_PACKET_WILL_DEQUEUE_LATER:
318
319
320
321
322
323 DCCP_WARN("CCID did not manage to send all packets\n");
324 return;
325 case CCID_PACKET_DELAY:
326 delay = msecs_to_jiffies(rc);
327 if (delay > *time_budget)
328 return;
329 rc = dccp_wait_for_ccid(sk, delay);
330 if (rc < 0)
331 return;
332 *time_budget -= (delay - rc);
333
334 break;
335 case CCID_PACKET_SEND_AT_ONCE:
336 dccp_xmit_packet(sk);
337 break;
338 case CCID_PACKET_ERR:
339 skb_dequeue(&sk->sk_write_queue);
340 kfree_skb(skb);
341 dccp_pr_debug("packet discarded due to err=%ld\n", rc);
342 }
343 }
344}
345
346void dccp_write_xmit(struct sock *sk)
347{
348 struct dccp_sock *dp = dccp_sk(sk);
349 struct sk_buff *skb;
350
351 while ((skb = dccp_qpolicy_top(sk))) {
352 int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
353
354 switch (ccid_packet_dequeue_eval(rc)) {
355 case CCID_PACKET_WILL_DEQUEUE_LATER:
356 return;
357 case CCID_PACKET_DELAY:
358 sk_reset_timer(sk, &dp->dccps_xmit_timer,
359 jiffies + msecs_to_jiffies(rc));
360 return;
361 case CCID_PACKET_SEND_AT_ONCE:
362 dccp_xmit_packet(sk);
363 break;
364 case CCID_PACKET_ERR:
365 dccp_qpolicy_drop(sk, skb);
366 dccp_pr_debug("packet discarded due to err=%d\n", rc);
367 }
368 }
369}
370
371
372
373
374
375
376
377
378
379
380int dccp_retransmit_skb(struct sock *sk)
381{
382 WARN_ON(sk->sk_send_head == NULL);
383
384 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
385 return -EHOSTUNREACH;
386
387
388 inet_csk(sk)->icsk_retransmits++;
389
390 return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
391}
392
393struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst,
394 struct request_sock *req)
395{
396 struct dccp_hdr *dh;
397 struct dccp_request_sock *dreq;
398 const u32 dccp_header_size = sizeof(struct dccp_hdr) +
399 sizeof(struct dccp_hdr_ext) +
400 sizeof(struct dccp_hdr_response);
401 struct sk_buff *skb;
402
403
404
405
406
407 skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1,
408 GFP_ATOMIC);
409 if (!skb)
410 return NULL;
411
412 skb_reserve(skb, MAX_DCCP_HEADER);
413
414 skb_dst_set(skb, dst_clone(dst));
415
416 dreq = dccp_rsk(req);
417 if (inet_rsk(req)->acked)
418 dccp_inc_seqno(&dreq->dreq_gss);
419 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
420 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss;
421
422
423 if (dccp_feat_server_ccid_dependencies(dreq))
424 goto response_failed;
425
426 if (dccp_insert_options_rsk(dreq, skb))
427 goto response_failed;
428
429
430 dh = dccp_zeroed_hdr(skb, dccp_header_size);
431
432 dh->dccph_sport = htons(inet_rsk(req)->ir_num);
433 dh->dccph_dport = inet_rsk(req)->ir_rmt_port;
434 dh->dccph_doff = (dccp_header_size +
435 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
436 dh->dccph_type = DCCP_PKT_RESPONSE;
437 dh->dccph_x = 1;
438 dccp_hdr_set_seq(dh, dreq->dreq_gss);
439 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr);
440 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
441
442 dccp_csum_outgoing(skb);
443
444
445 inet_rsk(req)->acked = 1;
446 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
447 return skb;
448response_failed:
449 kfree_skb(skb);
450 return NULL;
451}
452
453EXPORT_SYMBOL_GPL(dccp_make_response);
454
455
456struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
457{
458 struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh;
459 struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb);
460 const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
461 sizeof(struct dccp_hdr_ext) +
462 sizeof(struct dccp_hdr_reset);
463 struct dccp_hdr_reset *dhr;
464 struct sk_buff *skb;
465
466 skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
467 if (skb == NULL)
468 return NULL;
469
470 skb_reserve(skb, sk->sk_prot->max_header);
471
472
473 dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
474 dh->dccph_type = DCCP_PKT_RESET;
475 dh->dccph_sport = rxdh->dccph_dport;
476 dh->dccph_dport = rxdh->dccph_sport;
477 dh->dccph_doff = dccp_hdr_reset_len / 4;
478 dh->dccph_x = 1;
479
480 dhr = dccp_hdr_reset(skb);
481 dhr->dccph_reset_code = dcb->dccpd_reset_code;
482
483 switch (dcb->dccpd_reset_code) {
484 case DCCP_RESET_CODE_PACKET_ERROR:
485 dhr->dccph_reset_data[0] = rxdh->dccph_type;
486 break;
487 case DCCP_RESET_CODE_OPTION_ERROR:
488 case DCCP_RESET_CODE_MANDATORY_ERROR:
489 memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3);
490 break;
491 }
492
493
494
495
496
497 if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
498 dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1));
499 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq);
500
501 dccp_csum_outgoing(skb);
502 return skb;
503}
504
505EXPORT_SYMBOL_GPL(dccp_ctl_make_reset);
506
507
508int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
509{
510 struct sk_buff *skb;
511
512
513
514
515 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
516
517 if (err != 0)
518 return err;
519
520 skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
521 if (skb == NULL)
522 return -ENOBUFS;
523
524
525 skb_reserve(skb, sk->sk_prot->max_header);
526 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET;
527 DCCP_SKB_CB(skb)->dccpd_reset_code = code;
528
529 return dccp_transmit_skb(sk, skb);
530}
531
532
533
534
535int dccp_connect(struct sock *sk)
536{
537 struct sk_buff *skb;
538 struct dccp_sock *dp = dccp_sk(sk);
539 struct dst_entry *dst = __sk_dst_get(sk);
540 struct inet_connection_sock *icsk = inet_csk(sk);
541
542 sk->sk_err = 0;
543 sock_reset_flag(sk, SOCK_DONE);
544
545 dccp_sync_mss(sk, dst_mtu(dst));
546
547
548 if (dccp_feat_finalise_settings(dccp_sk(sk)))
549 return -EPROTO;
550
551
552 dp->dccps_gar = dp->dccps_iss;
553
554 skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
555 if (unlikely(skb == NULL))
556 return -ENOBUFS;
557
558
559 skb_reserve(skb, sk->sk_prot->max_header);
560
561 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
562
563 dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
564 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
565
566
567 icsk->icsk_retransmits = 0;
568 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
569 icsk->icsk_rto, DCCP_RTO_MAX);
570 return 0;
571}
572
573EXPORT_SYMBOL_GPL(dccp_connect);
574
575void dccp_send_ack(struct sock *sk)
576{
577
578 if (sk->sk_state != DCCP_CLOSED) {
579 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
580 GFP_ATOMIC);
581
582 if (skb == NULL) {
583 inet_csk_schedule_ack(sk);
584 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
585 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
586 TCP_DELACK_MAX,
587 DCCP_RTO_MAX);
588 return;
589 }
590
591
592 skb_reserve(skb, sk->sk_prot->max_header);
593 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
594 dccp_transmit_skb(sk, skb);
595 }
596}
597
598EXPORT_SYMBOL_GPL(dccp_send_ack);
599
600#if 0
601
602void dccp_send_delayed_ack(struct sock *sk)
603{
604 struct inet_connection_sock *icsk = inet_csk(sk);
605
606
607
608
609
610 unsigned long timeout = jiffies + 2 * HZ;
611
612
613 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
614
615
616
617
618
619 if (icsk->icsk_ack.blocked) {
620 dccp_send_ack(sk);
621 return;
622 }
623
624 if (!time_before(timeout, icsk->icsk_ack.timeout))
625 timeout = icsk->icsk_ack.timeout;
626 }
627 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
628 icsk->icsk_ack.timeout = timeout;
629 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
630}
631#endif
632
633void dccp_send_sync(struct sock *sk, const u64 ackno,
634 const enum dccp_pkt_type pkt_type)
635{
636
637
638
639
640
641 struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
642
643 if (skb == NULL) {
644
645 DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type));
646 return;
647 }
648
649
650 skb_reserve(skb, sk->sk_prot->max_header);
651 DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
652 DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
653
654
655
656
657
658 dccp_sk(sk)->dccps_sync_scheduled = 0;
659
660 dccp_transmit_skb(sk, skb);
661}
662
663EXPORT_SYMBOL_GPL(dccp_send_sync);
664
665
666
667
668
669
670void dccp_send_close(struct sock *sk, const int active)
671{
672 struct dccp_sock *dp = dccp_sk(sk);
673 struct sk_buff *skb;
674 const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
675
676 skb = alloc_skb(sk->sk_prot->max_header, prio);
677 if (skb == NULL)
678 return;
679
680
681 skb_reserve(skb, sk->sk_prot->max_header);
682 if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
683 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
684 else
685 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
686
687 if (active) {
688 skb = dccp_skb_entail(sk, skb);
689
690
691
692
693
694
695
696
697
698
699 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
700 DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
701 }
702 dccp_transmit_skb(sk, skb);
703}
704