1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include <net/tcp.h>
38
39#include <linux/compiler.h>
40#include <linux/gfp.h>
41#include <linux/module.h>
42
43
44int sysctl_tcp_retrans_collapse __read_mostly = 1;
45
46
47
48
49int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
50
51
52
53
54
55int sysctl_tcp_tso_win_divisor __read_mostly = 3;
56
57int sysctl_tcp_mtu_probing __read_mostly = 0;
58int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
59
60
61int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
62
63int sysctl_tcp_cookie_size __read_mostly = 0;
64EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
65
66
67
68static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
69{
70 struct tcp_sock *tp = tcp_sk(sk);
71 unsigned int prior_packets = tp->packets_out;
72
73 tcp_advance_send_head(sk, skb);
74 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
75
76
77 if (tp->frto_counter == 2)
78 tp->frto_counter = 3;
79
80 tp->packets_out += tcp_skb_pcount(skb);
81 if (!prior_packets)
82 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
83 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
84}
85
86
87
88
89
90
91
92static inline __u32 tcp_acceptable_seq(struct sock *sk)
93{
94 struct tcp_sock *tp = tcp_sk(sk);
95
96 if (!before(tcp_wnd_end(tp), tp->snd_nxt))
97 return tp->snd_nxt;
98 else
99 return tcp_wnd_end(tp);
100}
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116static __u16 tcp_advertise_mss(struct sock *sk)
117{
118 struct tcp_sock *tp = tcp_sk(sk);
119 struct dst_entry *dst = __sk_dst_get(sk);
120 int mss = tp->advmss;
121
122 if (dst) {
123 unsigned int metric = dst_metric_advmss(dst);
124
125 if (metric < mss) {
126 mss = metric;
127 tp->advmss = mss;
128 }
129 }
130
131 return (__u16)mss;
132}
133
134
135
136static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
137{
138 struct tcp_sock *tp = tcp_sk(sk);
139 s32 delta = tcp_time_stamp - tp->lsndtime;
140 u32 restart_cwnd = tcp_init_cwnd(tp, dst);
141 u32 cwnd = tp->snd_cwnd;
142
143 tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
144
145 tp->snd_ssthresh = tcp_current_ssthresh(sk);
146 restart_cwnd = min(restart_cwnd, cwnd);
147
148 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
149 cwnd >>= 1;
150 tp->snd_cwnd = max(cwnd, restart_cwnd);
151 tp->snd_cwnd_stamp = tcp_time_stamp;
152 tp->snd_cwnd_used = 0;
153}
154
155
156static void tcp_event_data_sent(struct tcp_sock *tp,
157 struct sk_buff *skb, struct sock *sk)
158{
159 struct inet_connection_sock *icsk = inet_csk(sk);
160 const u32 now = tcp_time_stamp;
161
162 if (sysctl_tcp_slow_start_after_idle &&
163 (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
164 tcp_cwnd_restart(sk, __sk_dst_get(sk));
165
166 tp->lsndtime = now;
167
168
169
170
171 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
172 icsk->icsk_ack.pingpong = 1;
173}
174
175
176static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
177{
178 tcp_dec_quickack_mode(sk, pkts);
179 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
180}
181
182
183
184
185
186
187
188
189void tcp_select_initial_window(int __space, __u32 mss,
190 __u32 *rcv_wnd, __u32 *window_clamp,
191 int wscale_ok, __u8 *rcv_wscale,
192 __u32 init_rcv_wnd)
193{
194 unsigned int space = (__space < 0 ? 0 : __space);
195
196
197 if (*window_clamp == 0)
198 (*window_clamp) = (65535 << 14);
199 space = min(*window_clamp, space);
200
201
202 if (space > mss)
203 space = (space / mss) * mss;
204
205
206
207
208
209
210
211
212
213 if (sysctl_tcp_workaround_signed_windows)
214 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
215 else
216 (*rcv_wnd) = space;
217
218 (*rcv_wscale) = 0;
219 if (wscale_ok) {
220
221
222
223 space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
224 space = min_t(u32, space, *window_clamp);
225 while (space > 65535 && (*rcv_wscale) < 14) {
226 space >>= 1;
227 (*rcv_wscale)++;
228 }
229 }
230
231
232
233
234
235 if (mss > (1 << *rcv_wscale)) {
236 int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
237 if (mss > 1460)
238 init_cwnd =
239 max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
240
241
242
243 if (init_rcv_wnd)
244 *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
245 else
246 *rcv_wnd = min(*rcv_wnd, init_cwnd * mss);
247 }
248
249
250 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
251}
252EXPORT_SYMBOL(tcp_select_initial_window);
253
254
255
256
257
258
259static u16 tcp_select_window(struct sock *sk)
260{
261 struct tcp_sock *tp = tcp_sk(sk);
262 u32 cur_win = tcp_receive_window(tp);
263 u32 new_win = __tcp_select_window(sk);
264
265
266 if (new_win < cur_win) {
267
268
269
270
271
272
273
274 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
275 }
276 tp->rcv_wnd = new_win;
277 tp->rcv_wup = tp->rcv_nxt;
278
279
280
281
282 if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
283 new_win = min(new_win, MAX_TCP_WINDOW);
284 else
285 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
286
287
288 new_win >>= tp->rx_opt.rcv_wscale;
289
290
291 if (new_win == 0)
292 tp->pred_flags = 0;
293
294 return new_win;
295}
296
297
298static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
299{
300 TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR;
301 if (!(tp->ecn_flags & TCP_ECN_OK))
302 TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE;
303}
304
305
306static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
307{
308 struct tcp_sock *tp = tcp_sk(sk);
309
310 tp->ecn_flags = 0;
311 if (sysctl_tcp_ecn == 1) {
312 TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR;
313 tp->ecn_flags = TCP_ECN_OK;
314 }
315}
316
317static __inline__ void
318TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
319{
320 if (inet_rsk(req)->ecn_ok)
321 th->ece = 1;
322}
323
324
325
326
327static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
328 int tcp_header_len)
329{
330 struct tcp_sock *tp = tcp_sk(sk);
331
332 if (tp->ecn_flags & TCP_ECN_OK) {
333
334 if (skb->len != tcp_header_len &&
335 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
336 INET_ECN_xmit(sk);
337 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
338 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
339 tcp_hdr(skb)->cwr = 1;
340 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
341 }
342 } else {
343
344 INET_ECN_dontxmit(sk);
345 }
346 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
347 tcp_hdr(skb)->ece = 1;
348 }
349}
350
351
352
353
354static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
355{
356 skb->ip_summed = CHECKSUM_PARTIAL;
357 skb->csum = 0;
358
359 TCP_SKB_CB(skb)->flags = flags;
360 TCP_SKB_CB(skb)->sacked = 0;
361
362 skb_shinfo(skb)->gso_segs = 1;
363 skb_shinfo(skb)->gso_size = 0;
364 skb_shinfo(skb)->gso_type = 0;
365
366 TCP_SKB_CB(skb)->seq = seq;
367 if (flags & (TCPHDR_SYN | TCPHDR_FIN))
368 seq++;
369 TCP_SKB_CB(skb)->end_seq = seq;
370}
371
372static inline int tcp_urg_mode(const struct tcp_sock *tp)
373{
374 return tp->snd_una != tp->snd_up;
375}
376
377#define OPTION_SACK_ADVERTISE (1 << 0)
378#define OPTION_TS (1 << 1)
379#define OPTION_MD5 (1 << 2)
380#define OPTION_WSCALE (1 << 3)
381#define OPTION_COOKIE_EXTENSION (1 << 4)
382
383struct tcp_out_options {
384 u8 options;
385 u8 ws;
386 u8 num_sack_blocks;
387 u8 hash_size;
388 u16 mss;
389 __u32 tsval, tsecr;
390 __u8 *hash_location;
391};
392
393
394
395static u8 tcp_cookie_size_check(u8 desired)
396{
397 int cookie_size;
398
399 if (desired > 0)
400
401 return desired;
402
403 cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size);
404 if (cookie_size <= 0)
405
406 return 0;
407
408 if (cookie_size <= TCP_COOKIE_MIN)
409
410 return TCP_COOKIE_MIN;
411
412 if (cookie_size >= TCP_COOKIE_MAX)
413
414 return TCP_COOKIE_MAX;
415
416 if (cookie_size & 1)
417
418 cookie_size++;
419
420 return (u8)cookie_size;
421}
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
437 struct tcp_out_options *opts)
438{
439 u8 options = opts->options;
440
441
442
443
444
445
446
447
448
449 if (unlikely(OPTION_MD5 & options)) {
450 if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
451 *ptr++ = htonl((TCPOPT_COOKIE << 24) |
452 (TCPOLEN_COOKIE_BASE << 16) |
453 (TCPOPT_MD5SIG << 8) |
454 TCPOLEN_MD5SIG);
455 } else {
456 *ptr++ = htonl((TCPOPT_NOP << 24) |
457 (TCPOPT_NOP << 16) |
458 (TCPOPT_MD5SIG << 8) |
459 TCPOLEN_MD5SIG);
460 }
461 options &= ~OPTION_COOKIE_EXTENSION;
462
463 opts->hash_location = (__u8 *)ptr;
464 ptr += 4;
465 }
466
467 if (unlikely(opts->mss)) {
468 *ptr++ = htonl((TCPOPT_MSS << 24) |
469 (TCPOLEN_MSS << 16) |
470 opts->mss);
471 }
472
473 if (likely(OPTION_TS & options)) {
474 if (unlikely(OPTION_SACK_ADVERTISE & options)) {
475 *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
476 (TCPOLEN_SACK_PERM << 16) |
477 (TCPOPT_TIMESTAMP << 8) |
478 TCPOLEN_TIMESTAMP);
479 options &= ~OPTION_SACK_ADVERTISE;
480 } else {
481 *ptr++ = htonl((TCPOPT_NOP << 24) |
482 (TCPOPT_NOP << 16) |
483 (TCPOPT_TIMESTAMP << 8) |
484 TCPOLEN_TIMESTAMP);
485 }
486 *ptr++ = htonl(opts->tsval);
487 *ptr++ = htonl(opts->tsecr);
488 }
489
490
491
492
493
494
495
496 if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
497 __u8 *cookie_copy = opts->hash_location;
498 u8 cookie_size = opts->hash_size;
499
500
501
502
503 if (0x2 & cookie_size) {
504 __u8 *p = (__u8 *)ptr;
505
506
507 *p++ = TCPOPT_COOKIE;
508 *p++ = TCPOLEN_COOKIE_BASE + cookie_size;
509 *p++ = *cookie_copy++;
510 *p++ = *cookie_copy++;
511 ptr++;
512 cookie_size -= 2;
513 } else {
514
515 *ptr++ = htonl(((TCPOPT_NOP << 24) |
516 (TCPOPT_NOP << 16) |
517 (TCPOPT_COOKIE << 8) |
518 TCPOLEN_COOKIE_BASE) +
519 cookie_size);
520 }
521
522 if (cookie_size > 0) {
523 memcpy(ptr, cookie_copy, cookie_size);
524 ptr += (cookie_size / 4);
525 }
526 }
527
528 if (unlikely(OPTION_SACK_ADVERTISE & options)) {
529 *ptr++ = htonl((TCPOPT_NOP << 24) |
530 (TCPOPT_NOP << 16) |
531 (TCPOPT_SACK_PERM << 8) |
532 TCPOLEN_SACK_PERM);
533 }
534
535 if (unlikely(OPTION_WSCALE & options)) {
536 *ptr++ = htonl((TCPOPT_NOP << 24) |
537 (TCPOPT_WINDOW << 16) |
538 (TCPOLEN_WINDOW << 8) |
539 opts->ws);
540 }
541
542 if (unlikely(opts->num_sack_blocks)) {
543 struct tcp_sack_block *sp = tp->rx_opt.dsack ?
544 tp->duplicate_sack : tp->selective_acks;
545 int this_sack;
546
547 *ptr++ = htonl((TCPOPT_NOP << 24) |
548 (TCPOPT_NOP << 16) |
549 (TCPOPT_SACK << 8) |
550 (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
551 TCPOLEN_SACK_PERBLOCK)));
552
553 for (this_sack = 0; this_sack < opts->num_sack_blocks;
554 ++this_sack) {
555 *ptr++ = htonl(sp[this_sack].start_seq);
556 *ptr++ = htonl(sp[this_sack].end_seq);
557 }
558
559 tp->rx_opt.dsack = 0;
560 }
561}
562
563
564
565
566static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
567 struct tcp_out_options *opts,
568 struct tcp_md5sig_key **md5) {
569 struct tcp_sock *tp = tcp_sk(sk);
570 struct tcp_cookie_values *cvp = tp->cookie_values;
571 unsigned remaining = MAX_TCP_OPTION_SPACE;
572 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
573 tcp_cookie_size_check(cvp->cookie_desired) :
574 0;
575
576#ifdef CONFIG_TCP_MD5SIG
577 *md5 = tp->af_specific->md5_lookup(sk, sk);
578 if (*md5) {
579 opts->options |= OPTION_MD5;
580 remaining -= TCPOLEN_MD5SIG_ALIGNED;
581 }
582#else
583 *md5 = NULL;
584#endif
585
586
587
588
589
590
591
592
593
594
595 opts->mss = tcp_advertise_mss(sk);
596 remaining -= TCPOLEN_MSS_ALIGNED;
597
598 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
599 opts->options |= OPTION_TS;
600 opts->tsval = TCP_SKB_CB(skb)->when;
601 opts->tsecr = tp->rx_opt.ts_recent;
602 remaining -= TCPOLEN_TSTAMP_ALIGNED;
603 }
604 if (likely(sysctl_tcp_window_scaling)) {
605 opts->ws = tp->rx_opt.rcv_wscale;
606 opts->options |= OPTION_WSCALE;
607 remaining -= TCPOLEN_WSCALE_ALIGNED;
608 }
609 if (likely(sysctl_tcp_sack)) {
610 opts->options |= OPTION_SACK_ADVERTISE;
611 if (unlikely(!(OPTION_TS & opts->options)))
612 remaining -= TCPOLEN_SACKPERM_ALIGNED;
613 }
614
615
616
617
618
619
620
621 if (*md5 == NULL &&
622 (OPTION_TS & opts->options) &&
623 cookie_size > 0) {
624 int need = TCPOLEN_COOKIE_BASE + cookie_size;
625
626 if (0x2 & need) {
627
628 need += 2;
629
630 if (need > remaining) {
631
632 cookie_size -= 2;
633 need -= 4;
634 }
635 }
636 while (need > remaining && TCP_COOKIE_MIN <= cookie_size) {
637 cookie_size -= 4;
638 need -= 4;
639 }
640 if (TCP_COOKIE_MIN <= cookie_size) {
641 opts->options |= OPTION_COOKIE_EXTENSION;
642 opts->hash_location = (__u8 *)&cvp->cookie_pair[0];
643 opts->hash_size = cookie_size;
644
645
646 cvp->cookie_desired = cookie_size;
647
648 if (cvp->cookie_desired != cvp->cookie_pair_size) {
649
650
651
652
653 get_random_bytes(&cvp->cookie_pair[0],
654 cookie_size);
655 cvp->cookie_pair_size = cookie_size;
656 }
657
658 remaining -= need;
659 }
660 }
661 return MAX_TCP_OPTION_SPACE - remaining;
662}
663
664
665static unsigned tcp_synack_options(struct sock *sk,
666 struct request_sock *req,
667 unsigned mss, struct sk_buff *skb,
668 struct tcp_out_options *opts,
669 struct tcp_md5sig_key **md5,
670 struct tcp_extend_values *xvp)
671{
672 struct inet_request_sock *ireq = inet_rsk(req);
673 unsigned remaining = MAX_TCP_OPTION_SPACE;
674 u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ?
675 xvp->cookie_plus :
676 0;
677
678#ifdef CONFIG_TCP_MD5SIG
679 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
680 if (*md5) {
681 opts->options |= OPTION_MD5;
682 remaining -= TCPOLEN_MD5SIG_ALIGNED;
683
684
685
686
687
688
689 ireq->tstamp_ok &= !ireq->sack_ok;
690 }
691#else
692 *md5 = NULL;
693#endif
694
695
696 opts->mss = mss;
697 remaining -= TCPOLEN_MSS_ALIGNED;
698
699 if (likely(ireq->wscale_ok)) {
700 opts->ws = ireq->rcv_wscale;
701 opts->options |= OPTION_WSCALE;
702 remaining -= TCPOLEN_WSCALE_ALIGNED;
703 }
704 if (likely(ireq->tstamp_ok)) {
705 opts->options |= OPTION_TS;
706 opts->tsval = TCP_SKB_CB(skb)->when;
707 opts->tsecr = req->ts_recent;
708 remaining -= TCPOLEN_TSTAMP_ALIGNED;
709 }
710 if (likely(ireq->sack_ok)) {
711 opts->options |= OPTION_SACK_ADVERTISE;
712 if (unlikely(!ireq->tstamp_ok))
713 remaining -= TCPOLEN_SACKPERM_ALIGNED;
714 }
715
716
717
718
719 if (*md5 == NULL &&
720 ireq->tstamp_ok &&
721 cookie_plus > TCPOLEN_COOKIE_BASE) {
722 int need = cookie_plus;
723
724 if (0x2 & need) {
725
726 need += 2;
727 }
728 if (need <= remaining) {
729 opts->options |= OPTION_COOKIE_EXTENSION;
730 opts->hash_size = cookie_plus - TCPOLEN_COOKIE_BASE;
731 remaining -= need;
732 } else {
733
734 xvp->cookie_out_never = 1;
735 opts->hash_size = 0;
736 }
737 }
738 return MAX_TCP_OPTION_SPACE - remaining;
739}
740
741
742
743
744static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
745 struct tcp_out_options *opts,
746 struct tcp_md5sig_key **md5) {
747 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
748 struct tcp_sock *tp = tcp_sk(sk);
749 unsigned size = 0;
750 unsigned int eff_sacks;
751
752#ifdef CONFIG_TCP_MD5SIG
753 *md5 = tp->af_specific->md5_lookup(sk, sk);
754 if (unlikely(*md5)) {
755 opts->options |= OPTION_MD5;
756 size += TCPOLEN_MD5SIG_ALIGNED;
757 }
758#else
759 *md5 = NULL;
760#endif
761
762 if (likely(tp->rx_opt.tstamp_ok)) {
763 opts->options |= OPTION_TS;
764 opts->tsval = tcb ? tcb->when : 0;
765 opts->tsecr = tp->rx_opt.ts_recent;
766 size += TCPOLEN_TSTAMP_ALIGNED;
767 }
768
769 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
770 if (unlikely(eff_sacks)) {
771 const unsigned remaining = MAX_TCP_OPTION_SPACE - size;
772 opts->num_sack_blocks =
773 min_t(unsigned, eff_sacks,
774 (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
775 TCPOLEN_SACK_PERBLOCK);
776 size += TCPOLEN_SACK_BASE_ALIGNED +
777 opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
778 }
779
780 return size;
781}
782
783
784
785
786
787
788
789
790
791
792
793
794static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
795 gfp_t gfp_mask)
796{
797 const struct inet_connection_sock *icsk = inet_csk(sk);
798 struct inet_sock *inet;
799 struct tcp_sock *tp;
800 struct tcp_skb_cb *tcb;
801 struct tcp_out_options opts;
802 unsigned tcp_options_size, tcp_header_size;
803 struct tcp_md5sig_key *md5;
804 struct tcphdr *th;
805 int err;
806
807 BUG_ON(!skb || !tcp_skb_pcount(skb));
808
809
810
811
812 if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
813 __net_timestamp(skb);
814
815 if (likely(clone_it)) {
816 if (unlikely(skb_cloned(skb)))
817 skb = pskb_copy(skb, gfp_mask);
818 else
819 skb = skb_clone(skb, gfp_mask);
820 if (unlikely(!skb))
821 return -ENOBUFS;
822 }
823
824 inet = inet_sk(sk);
825 tp = tcp_sk(sk);
826 tcb = TCP_SKB_CB(skb);
827 memset(&opts, 0, sizeof(opts));
828
829 if (unlikely(tcb->flags & TCPHDR_SYN))
830 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
831 else
832 tcp_options_size = tcp_established_options(sk, skb, &opts,
833 &md5);
834 tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
835
836 if (tcp_packets_in_flight(tp) == 0) {
837 tcp_ca_event(sk, CA_EVENT_TX_START);
838 skb->ooo_okay = 1;
839 } else
840 skb->ooo_okay = 0;
841
842 skb_push(skb, tcp_header_size);
843 skb_reset_transport_header(skb);
844 skb_set_owner_w(skb, sk);
845
846
847 th = tcp_hdr(skb);
848 th->source = inet->inet_sport;
849 th->dest = inet->inet_dport;
850 th->seq = htonl(tcb->seq);
851 th->ack_seq = htonl(tp->rcv_nxt);
852 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
853 tcb->flags);
854
855 if (unlikely(tcb->flags & TCPHDR_SYN)) {
856
857
858
859 th->window = htons(min(tp->rcv_wnd, 65535U));
860 } else {
861 th->window = htons(tcp_select_window(sk));
862 }
863 th->check = 0;
864 th->urg_ptr = 0;
865
866
867 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
868 if (before(tp->snd_up, tcb->seq + 0x10000)) {
869 th->urg_ptr = htons(tp->snd_up - tcb->seq);
870 th->urg = 1;
871 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
872 th->urg_ptr = htons(0xFFFF);
873 th->urg = 1;
874 }
875 }
876
877 tcp_options_write((__be32 *)(th + 1), tp, &opts);
878 if (likely((tcb->flags & TCPHDR_SYN) == 0))
879 TCP_ECN_send(sk, skb, tcp_header_size);
880
881#ifdef CONFIG_TCP_MD5SIG
882
883 if (md5) {
884 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
885 tp->af_specific->calc_md5_hash(opts.hash_location,
886 md5, sk, NULL, skb);
887 }
888#endif
889
890 icsk->icsk_af_ops->send_check(sk, skb);
891
892 if (likely(tcb->flags & TCPHDR_ACK))
893 tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
894
895 if (skb->len != tcp_header_size)
896 tcp_event_data_sent(tp, skb, sk);
897
898 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
899 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
900 tcp_skb_pcount(skb));
901
902 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
903 if (likely(err <= 0))
904 return err;
905
906 tcp_enter_cwr(sk, 1);
907
908 return net_xmit_eval(err);
909}
910
911
912
913
914
915
916static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
917{
918 struct tcp_sock *tp = tcp_sk(sk);
919
920
921 tp->write_seq = TCP_SKB_CB(skb)->end_seq;
922 skb_header_release(skb);
923 tcp_add_write_queue_tail(sk, skb);
924 sk->sk_wmem_queued += skb->truesize;
925 sk_mem_charge(sk, skb->truesize);
926}
927
928
929static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
930 unsigned int mss_now)
931{
932 if (skb->len <= mss_now || !sk_can_gso(sk) ||
933 skb->ip_summed == CHECKSUM_NONE) {
934
935
936
937 skb_shinfo(skb)->gso_segs = 1;
938 skb_shinfo(skb)->gso_size = 0;
939 skb_shinfo(skb)->gso_type = 0;
940 } else {
941 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
942 skb_shinfo(skb)->gso_size = mss_now;
943 skb_shinfo(skb)->gso_type = sk->sk_gso_type;
944 }
945}
946
947
948
949
950static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
951 int decr)
952{
953 struct tcp_sock *tp = tcp_sk(sk);
954
955 if (!tp->sacked_out || tcp_is_reno(tp))
956 return;
957
958 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
959 tp->fackets_out -= decr;
960}
961
962
963
964
965static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr)
966{
967 struct tcp_sock *tp = tcp_sk(sk);
968
969 tp->packets_out -= decr;
970
971 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
972 tp->sacked_out -= decr;
973 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
974 tp->retrans_out -= decr;
975 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
976 tp->lost_out -= decr;
977
978
979 if (tcp_is_reno(tp) && decr > 0)
980 tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
981
982 tcp_adjust_fackets_out(sk, skb, decr);
983
984 if (tp->lost_skb_hint &&
985 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
986 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
987 tp->lost_cnt_hint -= decr;
988
989 tcp_verify_left_out(tp);
990}
991
992
993
994
995
996
997int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
998 unsigned int mss_now)
999{
1000 struct tcp_sock *tp = tcp_sk(sk);
1001 struct sk_buff *buff;
1002 int nsize, old_factor;
1003 int nlen;
1004 u8 flags;
1005
1006 if (WARN_ON(len > skb->len))
1007 return -EINVAL;
1008
1009 nsize = skb_headlen(skb) - len;
1010 if (nsize < 0)
1011 nsize = 0;
1012
1013 if (skb_cloned(skb) &&
1014 skb_is_nonlinear(skb) &&
1015 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1016 return -ENOMEM;
1017
1018
1019 buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
1020 if (buff == NULL)
1021 return -ENOMEM;
1022
1023 sk->sk_wmem_queued += buff->truesize;
1024 sk_mem_charge(sk, buff->truesize);
1025 nlen = skb->len - len - nsize;
1026 buff->truesize += nlen;
1027 skb->truesize -= nlen;
1028
1029
1030 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1031 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1032 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1033
1034
1035 flags = TCP_SKB_CB(skb)->flags;
1036 TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1037 TCP_SKB_CB(buff)->flags = flags;
1038 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1039
1040 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
1041
1042 buff->csum = csum_partial_copy_nocheck(skb->data + len,
1043 skb_put(buff, nsize),
1044 nsize, 0);
1045
1046 skb_trim(skb, len);
1047
1048 skb->csum = csum_block_sub(skb->csum, buff->csum, len);
1049 } else {
1050 skb->ip_summed = CHECKSUM_PARTIAL;
1051 skb_split(skb, buff, len);
1052 }
1053
1054 buff->ip_summed = skb->ip_summed;
1055
1056
1057
1058
1059 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
1060 buff->tstamp = skb->tstamp;
1061
1062 old_factor = tcp_skb_pcount(skb);
1063
1064
1065 tcp_set_skb_tso_segs(sk, skb, mss_now);
1066 tcp_set_skb_tso_segs(sk, buff, mss_now);
1067
1068
1069
1070
1071 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
1072 int diff = old_factor - tcp_skb_pcount(skb) -
1073 tcp_skb_pcount(buff);
1074
1075 if (diff)
1076 tcp_adjust_pcount(sk, skb, diff);
1077 }
1078
1079
1080 skb_header_release(buff);
1081 tcp_insert_write_queue_after(skb, buff, sk);
1082
1083 return 0;
1084}
1085
1086
1087
1088
1089
1090static void __pskb_trim_head(struct sk_buff *skb, int len)
1091{
1092 int i, k, eat;
1093
1094 eat = len;
1095 k = 0;
1096 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1097 if (skb_shinfo(skb)->frags[i].size <= eat) {
1098 put_page(skb_shinfo(skb)->frags[i].page);
1099 eat -= skb_shinfo(skb)->frags[i].size;
1100 } else {
1101 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1102 if (eat) {
1103 skb_shinfo(skb)->frags[k].page_offset += eat;
1104 skb_shinfo(skb)->frags[k].size -= eat;
1105 eat = 0;
1106 }
1107 k++;
1108 }
1109 }
1110 skb_shinfo(skb)->nr_frags = k;
1111
1112 skb_reset_tail_pointer(skb);
1113 skb->data_len -= len;
1114 skb->len = skb->data_len;
1115}
1116
1117
1118int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1119{
1120 if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1121 return -ENOMEM;
1122
1123
1124 if (unlikely(len < skb_headlen(skb)))
1125 __skb_pull(skb, len);
1126 else
1127 __pskb_trim_head(skb, len - skb_headlen(skb));
1128
1129 TCP_SKB_CB(skb)->seq += len;
1130 skb->ip_summed = CHECKSUM_PARTIAL;
1131
1132 skb->truesize -= len;
1133 sk->sk_wmem_queued -= len;
1134 sk_mem_uncharge(sk, len);
1135 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1136
1137
1138
1139
1140 if (tcp_skb_pcount(skb) > 1)
1141 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk));
1142
1143 return 0;
1144}
1145
1146
1147int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1148{
1149 struct tcp_sock *tp = tcp_sk(sk);
1150 struct inet_connection_sock *icsk = inet_csk(sk);
1151 int mss_now;
1152
1153
1154
1155
1156 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1157
1158
1159 if (mss_now > tp->rx_opt.mss_clamp)
1160 mss_now = tp->rx_opt.mss_clamp;
1161
1162
1163 mss_now -= icsk->icsk_ext_hdr_len;
1164
1165
1166 if (mss_now < 48)
1167 mss_now = 48;
1168
1169
1170 mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
1171
1172 return mss_now;
1173}
1174
1175
1176int tcp_mss_to_mtu(struct sock *sk, int mss)
1177{
1178 struct tcp_sock *tp = tcp_sk(sk);
1179 struct inet_connection_sock *icsk = inet_csk(sk);
1180 int mtu;
1181
1182 mtu = mss +
1183 tp->tcp_header_len +
1184 icsk->icsk_ext_hdr_len +
1185 icsk->icsk_af_ops->net_header_len;
1186
1187 return mtu;
1188}
1189
1190
1191void tcp_mtup_init(struct sock *sk)
1192{
1193 struct tcp_sock *tp = tcp_sk(sk);
1194 struct inet_connection_sock *icsk = inet_csk(sk);
1195
1196 icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
1197 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
1198 icsk->icsk_af_ops->net_header_len;
1199 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
1200 icsk->icsk_mtup.probe_size = 0;
1201}
1202EXPORT_SYMBOL(tcp_mtup_init);
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1227{
1228 struct tcp_sock *tp = tcp_sk(sk);
1229 struct inet_connection_sock *icsk = inet_csk(sk);
1230 int mss_now;
1231
1232 if (icsk->icsk_mtup.search_high > pmtu)
1233 icsk->icsk_mtup.search_high = pmtu;
1234
1235 mss_now = tcp_mtu_to_mss(sk, pmtu);
1236 mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1237
1238
1239 icsk->icsk_pmtu_cookie = pmtu;
1240 if (icsk->icsk_mtup.enabled)
1241 mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1242 tp->mss_cache = mss_now;
1243
1244 return mss_now;
1245}
1246EXPORT_SYMBOL(tcp_sync_mss);
1247
1248
1249
1250
1251unsigned int tcp_current_mss(struct sock *sk)
1252{
1253 struct tcp_sock *tp = tcp_sk(sk);
1254 struct dst_entry *dst = __sk_dst_get(sk);
1255 u32 mss_now;
1256 unsigned header_len;
1257 struct tcp_out_options opts;
1258 struct tcp_md5sig_key *md5;
1259
1260 mss_now = tp->mss_cache;
1261
1262 if (dst) {
1263 u32 mtu = dst_mtu(dst);
1264 if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
1265 mss_now = tcp_sync_mss(sk, mtu);
1266 }
1267
1268 header_len = tcp_established_options(sk, NULL, &opts, &md5) +
1269 sizeof(struct tcphdr);
1270
1271
1272
1273
1274 if (header_len != tp->tcp_header_len) {
1275 int delta = (int) header_len - tp->tcp_header_len;
1276 mss_now -= delta;
1277 }
1278
1279 return mss_now;
1280}
1281
1282
1283static void tcp_cwnd_validate(struct sock *sk)
1284{
1285 struct tcp_sock *tp = tcp_sk(sk);
1286
1287 if (tp->packets_out >= tp->snd_cwnd) {
1288
1289 tp->snd_cwnd_used = 0;
1290 tp->snd_cwnd_stamp = tcp_time_stamp;
1291 } else {
1292
1293 if (tp->packets_out > tp->snd_cwnd_used)
1294 tp->snd_cwnd_used = tp->packets_out;
1295
1296 if (sysctl_tcp_slow_start_after_idle &&
1297 (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
1298 tcp_cwnd_application_limited(sk);
1299 }
1300}
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
1315 unsigned int mss_now, unsigned int cwnd)
1316{
1317 struct tcp_sock *tp = tcp_sk(sk);
1318 u32 needed, window, cwnd_len;
1319
1320 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1321 cwnd_len = mss_now * cwnd;
1322
1323 if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
1324 return cwnd_len;
1325
1326 needed = min(skb->len, window);
1327
1328 if (cwnd_len <= needed)
1329 return cwnd_len;
1330
1331 return needed - needed % mss_now;
1332}
1333
1334
1335
1336
1337static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
1338 struct sk_buff *skb)
1339{
1340 u32 in_flight, cwnd;
1341
1342
1343 if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1)
1344 return 1;
1345
1346 in_flight = tcp_packets_in_flight(tp);
1347 cwnd = tp->snd_cwnd;
1348 if (in_flight < cwnd)
1349 return (cwnd - in_flight);
1350
1351 return 0;
1352}
1353
1354
1355
1356
1357
1358static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,
1359 unsigned int mss_now)
1360{
1361 int tso_segs = tcp_skb_pcount(skb);
1362
1363 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
1364 tcp_set_skb_tso_segs(sk, skb, mss_now);
1365 tso_segs = tcp_skb_pcount(skb);
1366 }
1367 return tso_segs;
1368}
1369
1370
1371static inline int tcp_minshall_check(const struct tcp_sock *tp)
1372{
1373 return after(tp->snd_sml, tp->snd_una) &&
1374 !after(tp->snd_sml, tp->snd_nxt);
1375}
1376
1377
1378
1379
1380
1381
1382
1383
1384static inline int tcp_nagle_check(const struct tcp_sock *tp,
1385 const struct sk_buff *skb,
1386 unsigned mss_now, int nonagle)
1387{
1388 return skb->len < mss_now &&
1389 ((nonagle & TCP_NAGLE_CORK) ||
1390 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1391}
1392
1393
1394
1395
1396static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1397 unsigned int cur_mss, int nonagle)
1398{
1399
1400
1401
1402
1403
1404
1405 if (nonagle & TCP_NAGLE_PUSH)
1406 return 1;
1407
1408
1409
1410
1411 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
1412 (TCP_SKB_CB(skb)->flags & TCPHDR_FIN))
1413 return 1;
1414
1415 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1416 return 1;
1417
1418 return 0;
1419}
1420
1421
1422static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb,
1423 unsigned int cur_mss)
1424{
1425 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1426
1427 if (skb->len > cur_mss)
1428 end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1429
1430 return !after(end_seq, tcp_wnd_end(tp));
1431}
1432
1433
1434
1435
1436
1437static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1438 unsigned int cur_mss, int nonagle)
1439{
1440 struct tcp_sock *tp = tcp_sk(sk);
1441 unsigned int cwnd_quota;
1442
1443 tcp_init_tso_segs(sk, skb, cur_mss);
1444
1445 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1446 return 0;
1447
1448 cwnd_quota = tcp_cwnd_test(tp, skb);
1449 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
1450 cwnd_quota = 0;
1451
1452 return cwnd_quota;
1453}
1454
1455
1456int tcp_may_send_now(struct sock *sk)
1457{
1458 struct tcp_sock *tp = tcp_sk(sk);
1459 struct sk_buff *skb = tcp_send_head(sk);
1460
1461 return skb &&
1462 tcp_snd_test(sk, skb, tcp_current_mss(sk),
1463 (tcp_skb_is_last(sk, skb) ?
1464 tp->nonagle : TCP_NAGLE_PUSH));
1465}
1466
1467
1468
1469
1470
1471
1472
1473
1474static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1475 unsigned int mss_now, gfp_t gfp)
1476{
1477 struct sk_buff *buff;
1478 int nlen = skb->len - len;
1479 u8 flags;
1480
1481
1482 if (skb->len != skb->data_len)
1483 return tcp_fragment(sk, skb, len, mss_now);
1484
1485 buff = sk_stream_alloc_skb(sk, 0, gfp);
1486 if (unlikely(buff == NULL))
1487 return -ENOMEM;
1488
1489 sk->sk_wmem_queued += buff->truesize;
1490 sk_mem_charge(sk, buff->truesize);
1491 buff->truesize += nlen;
1492 skb->truesize -= nlen;
1493
1494
1495 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1496 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1497 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1498
1499
1500 flags = TCP_SKB_CB(skb)->flags;
1501 TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1502 TCP_SKB_CB(buff)->flags = flags;
1503
1504
1505 TCP_SKB_CB(buff)->sacked = 0;
1506
1507 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1508 skb_split(skb, buff, len);
1509
1510
1511 tcp_set_skb_tso_segs(sk, skb, mss_now);
1512 tcp_set_skb_tso_segs(sk, buff, mss_now);
1513
1514
1515 skb_header_release(buff);
1516 tcp_insert_write_queue_after(skb, buff, sk);
1517
1518 return 0;
1519}
1520
1521
1522
1523
1524
1525
1526static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1527{
1528 struct tcp_sock *tp = tcp_sk(sk);
1529 const struct inet_connection_sock *icsk = inet_csk(sk);
1530 u32 send_win, cong_win, limit, in_flight;
1531 int win_divisor;
1532
1533 if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
1534 goto send_now;
1535
1536 if (icsk->icsk_ca_state != TCP_CA_Open)
1537 goto send_now;
1538
1539
1540 if (tp->tso_deferred &&
1541 (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
1542 goto send_now;
1543
1544 in_flight = tcp_packets_in_flight(tp);
1545
1546 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
1547
1548 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1549
1550
1551 cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1552
1553 limit = min(send_win, cong_win);
1554
1555
1556 if (limit >= sk->sk_gso_max_size)
1557 goto send_now;
1558
1559
1560 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
1561 goto send_now;
1562
1563 win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1564 if (win_divisor) {
1565 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1566
1567
1568
1569
1570 chunk /= win_divisor;
1571 if (limit >= chunk)
1572 goto send_now;
1573 } else {
1574
1575
1576
1577
1578
1579 if (limit > tcp_max_burst(tp) * tp->mss_cache)
1580 goto send_now;
1581 }
1582
1583
1584 tp->tso_deferred = 1 | (jiffies << 1);
1585
1586 return 1;
1587
1588send_now:
1589 tp->tso_deferred = 0;
1590 return 0;
1591}
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602static int tcp_mtu_probe(struct sock *sk)
1603{
1604 struct tcp_sock *tp = tcp_sk(sk);
1605 struct inet_connection_sock *icsk = inet_csk(sk);
1606 struct sk_buff *skb, *nskb, *next;
1607 int len;
1608 int probe_size;
1609 int size_needed;
1610 int copy;
1611 int mss_now;
1612
1613
1614
1615
1616
1617 if (!icsk->icsk_mtup.enabled ||
1618 icsk->icsk_mtup.probe_size ||
1619 inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
1620 tp->snd_cwnd < 11 ||
1621 tp->rx_opt.num_sacks || tp->rx_opt.dsack)
1622 return -1;
1623
1624
1625 mss_now = tcp_current_mss(sk);
1626 probe_size = 2 * tp->mss_cache;
1627 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
1628 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
1629
1630 return -1;
1631 }
1632
1633
1634 if (tp->write_seq - tp->snd_nxt < size_needed)
1635 return -1;
1636
1637 if (tp->snd_wnd < size_needed)
1638 return -1;
1639 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
1640 return 0;
1641
1642
1643 if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1644 if (!tcp_packets_in_flight(tp))
1645 return -1;
1646 else
1647 return 0;
1648 }
1649
1650
1651 if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
1652 return -1;
1653 sk->sk_wmem_queued += nskb->truesize;
1654 sk_mem_charge(sk, nskb->truesize);
1655
1656 skb = tcp_send_head(sk);
1657
1658 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1659 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
1660 TCP_SKB_CB(nskb)->flags = TCPHDR_ACK;
1661 TCP_SKB_CB(nskb)->sacked = 0;
1662 nskb->csum = 0;
1663 nskb->ip_summed = skb->ip_summed;
1664
1665 tcp_insert_write_queue_before(nskb, skb, sk);
1666
1667 len = 0;
1668 tcp_for_write_queue_from_safe(skb, next, sk) {
1669 copy = min_t(int, skb->len, probe_size - len);
1670 if (nskb->ip_summed)
1671 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
1672 else
1673 nskb->csum = skb_copy_and_csum_bits(skb, 0,
1674 skb_put(nskb, copy),
1675 copy, nskb->csum);
1676
1677 if (skb->len <= copy) {
1678
1679
1680 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
1681 tcp_unlink_write_queue(skb, sk);
1682 sk_wmem_free_skb(sk, skb);
1683 } else {
1684 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
1685 ~(TCPHDR_FIN|TCPHDR_PSH);
1686 if (!skb_shinfo(skb)->nr_frags) {
1687 skb_pull(skb, copy);
1688 if (skb->ip_summed != CHECKSUM_PARTIAL)
1689 skb->csum = csum_partial(skb->data,
1690 skb->len, 0);
1691 } else {
1692 __pskb_trim_head(skb, copy);
1693 tcp_set_skb_tso_segs(sk, skb, mss_now);
1694 }
1695 TCP_SKB_CB(skb)->seq += copy;
1696 }
1697
1698 len += copy;
1699
1700 if (len >= probe_size)
1701 break;
1702 }
1703 tcp_init_tso_segs(sk, nskb, nskb->len);
1704
1705
1706
1707 TCP_SKB_CB(nskb)->when = tcp_time_stamp;
1708 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
1709
1710
1711 tp->snd_cwnd--;
1712 tcp_event_new_data_sent(sk, nskb);
1713
1714 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
1715 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
1716 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
1717
1718 return 1;
1719 }
1720
1721 return -1;
1722}
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1736 int push_one, gfp_t gfp)
1737{
1738 struct tcp_sock *tp = tcp_sk(sk);
1739 struct sk_buff *skb;
1740 unsigned int tso_segs, sent_pkts;
1741 int cwnd_quota;
1742 int result;
1743
1744 sent_pkts = 0;
1745
1746 if (!push_one) {
1747
1748 result = tcp_mtu_probe(sk);
1749 if (!result) {
1750 return 0;
1751 } else if (result > 0) {
1752 sent_pkts = 1;
1753 }
1754 }
1755
1756 while ((skb = tcp_send_head(sk))) {
1757 unsigned int limit;
1758
1759 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1760 BUG_ON(!tso_segs);
1761
1762 cwnd_quota = tcp_cwnd_test(tp, skb);
1763 if (!cwnd_quota)
1764 break;
1765
1766 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
1767 break;
1768
1769 if (tso_segs == 1) {
1770 if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1771 (tcp_skb_is_last(sk, skb) ?
1772 nonagle : TCP_NAGLE_PUSH))))
1773 break;
1774 } else {
1775 if (!push_one && tcp_tso_should_defer(sk, skb))
1776 break;
1777 }
1778
1779 limit = mss_now;
1780 if (tso_segs > 1 && !tcp_urg_mode(tp))
1781 limit = tcp_mss_split_point(sk, skb, mss_now,
1782 cwnd_quota);
1783
1784 if (skb->len > limit &&
1785 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
1786 break;
1787
1788 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1789
1790 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
1791 break;
1792
1793
1794
1795
1796 tcp_event_new_data_sent(sk, skb);
1797
1798 tcp_minshall_update(tp, mss_now, skb);
1799 sent_pkts++;
1800
1801 if (push_one)
1802 break;
1803 }
1804
1805 if (likely(sent_pkts)) {
1806 tcp_cwnd_validate(sk);
1807 return 0;
1808 }
1809 return !tp->packets_out && tcp_send_head(sk);
1810}
1811
1812
1813
1814
1815
1816void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1817 int nonagle)
1818{
1819
1820
1821
1822
1823 if (unlikely(sk->sk_state == TCP_CLOSE))
1824 return;
1825
1826 if (tcp_write_xmit(sk, cur_mss, nonagle, 0, GFP_ATOMIC))
1827 tcp_check_probe_timer(sk);
1828}
1829
1830
1831
1832
1833void tcp_push_one(struct sock *sk, unsigned int mss_now)
1834{
1835 struct sk_buff *skb = tcp_send_head(sk);
1836
1837 BUG_ON(!skb || skb->len < mss_now);
1838
1839 tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
1840}
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894u32 __tcp_select_window(struct sock *sk)
1895{
1896 struct inet_connection_sock *icsk = inet_csk(sk);
1897 struct tcp_sock *tp = tcp_sk(sk);
1898
1899
1900
1901
1902
1903
1904 int mss = icsk->icsk_ack.rcv_mss;
1905 int free_space = tcp_space(sk);
1906 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
1907 int window;
1908
1909 if (mss > full_space)
1910 mss = full_space;
1911
1912 if (free_space < (full_space >> 1)) {
1913 icsk->icsk_ack.quick = 0;
1914
1915 if (tcp_memory_pressure)
1916 tp->rcv_ssthresh = min(tp->rcv_ssthresh,
1917 4U * tp->advmss);
1918
1919 if (free_space < mss)
1920 return 0;
1921 }
1922
1923 if (free_space > tp->rcv_ssthresh)
1924 free_space = tp->rcv_ssthresh;
1925
1926
1927
1928
1929 window = tp->rcv_wnd;
1930 if (tp->rx_opt.rcv_wscale) {
1931 window = free_space;
1932
1933
1934
1935
1936
1937 if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
1938 window = (((window >> tp->rx_opt.rcv_wscale) + 1)
1939 << tp->rx_opt.rcv_wscale);
1940 } else {
1941
1942
1943
1944
1945
1946
1947
1948
1949 if (window <= free_space - mss || window > free_space)
1950 window = (free_space / mss) * mss;
1951 else if (mss == full_space &&
1952 free_space > window + (full_space >> 1))
1953 window = free_space;
1954 }
1955
1956 return window;
1957}
1958
1959
1960static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
1961{
1962 struct tcp_sock *tp = tcp_sk(sk);
1963 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
1964 int skb_size, next_skb_size;
1965
1966 skb_size = skb->len;
1967 next_skb_size = next_skb->len;
1968
1969 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
1970
1971 tcp_highest_sack_combine(sk, next_skb, skb);
1972
1973 tcp_unlink_write_queue(next_skb, sk);
1974
1975 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
1976 next_skb_size);
1977
1978 if (next_skb->ip_summed == CHECKSUM_PARTIAL)
1979 skb->ip_summed = CHECKSUM_PARTIAL;
1980
1981 if (skb->ip_summed != CHECKSUM_PARTIAL)
1982 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
1983
1984
1985 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
1986
1987
1988 TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(next_skb)->flags;
1989
1990
1991
1992
1993 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
1994
1995
1996 tcp_clear_retrans_hints_partial(tp);
1997 if (next_skb == tp->retransmit_skb_hint)
1998 tp->retransmit_skb_hint = skb;
1999
2000 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2001
2002 sk_wmem_free_skb(sk, next_skb);
2003}
2004
2005
2006static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb)
2007{
2008 if (tcp_skb_pcount(skb) > 1)
2009 return 0;
2010
2011 if (skb_shinfo(skb)->nr_frags != 0)
2012 return 0;
2013 if (skb_cloned(skb))
2014 return 0;
2015 if (skb == tcp_send_head(sk))
2016 return 0;
2017
2018 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2019 return 0;
2020
2021 return 1;
2022}
2023
2024
2025
2026
2027static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2028 int space)
2029{
2030 struct tcp_sock *tp = tcp_sk(sk);
2031 struct sk_buff *skb = to, *tmp;
2032 int first = 1;
2033
2034 if (!sysctl_tcp_retrans_collapse)
2035 return;
2036 if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN)
2037 return;
2038
2039 tcp_for_write_queue_from_safe(skb, tmp, sk) {
2040 if (!tcp_can_collapse(sk, skb))
2041 break;
2042
2043 space -= skb->len;
2044
2045 if (first) {
2046 first = 0;
2047 continue;
2048 }
2049
2050 if (space < 0)
2051 break;
2052
2053
2054
2055 if (skb->len > skb_tailroom(to))
2056 break;
2057
2058 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
2059 break;
2060
2061 tcp_collapse_retrans(sk, to);
2062 }
2063}
2064
2065
2066
2067
2068
2069int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2070{
2071 struct tcp_sock *tp = tcp_sk(sk);
2072 struct inet_connection_sock *icsk = inet_csk(sk);
2073 unsigned int cur_mss;
2074 int err;
2075
2076
2077 if (icsk->icsk_mtup.probe_size) {
2078 icsk->icsk_mtup.probe_size = 0;
2079 }
2080
2081
2082
2083
2084 if (atomic_read(&sk->sk_wmem_alloc) >
2085 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
2086 return -EAGAIN;
2087
2088 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
2089 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
2090 BUG();
2091 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
2092 return -ENOMEM;
2093 }
2094
2095 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
2096 return -EHOSTUNREACH;
2097
2098 cur_mss = tcp_current_mss(sk);
2099
2100
2101
2102
2103
2104
2105 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
2106 TCP_SKB_CB(skb)->seq != tp->snd_una)
2107 return -EAGAIN;
2108
2109 if (skb->len > cur_mss) {
2110 if (tcp_fragment(sk, skb, cur_mss, cur_mss))
2111 return -ENOMEM;
2112 } else {
2113 int oldpcount = tcp_skb_pcount(skb);
2114
2115 if (unlikely(oldpcount > 1)) {
2116 tcp_init_tso_segs(sk, skb, cur_mss);
2117 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
2118 }
2119 }
2120
2121 tcp_retrans_try_collapse(sk, skb, cur_mss);
2122
2123
2124
2125
2126
2127 if (skb->len > 0 &&
2128 (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) &&
2129 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
2130 if (!pskb_trim(skb, 0)) {
2131
2132 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
2133 TCP_SKB_CB(skb)->flags);
2134 skb->ip_summed = CHECKSUM_NONE;
2135 }
2136 }
2137
2138
2139
2140
2141 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2142
2143 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2144
2145 if (err == 0) {
2146
2147 TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
2148
2149 tp->total_retrans++;
2150
2151#if FASTRETRANS_DEBUG > 0
2152 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2153 if (net_ratelimit())
2154 printk(KERN_DEBUG "retrans_out leaked.\n");
2155 }
2156#endif
2157 if (!tp->retrans_out)
2158 tp->lost_retrans_low = tp->snd_nxt;
2159 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
2160 tp->retrans_out += tcp_skb_pcount(skb);
2161
2162
2163 if (!tp->retrans_stamp)
2164 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2165
2166 tp->undo_retrans += tcp_skb_pcount(skb);
2167
2168
2169
2170
2171 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
2172 }
2173 return err;
2174}
2175
2176
2177
2178
2179static int tcp_can_forward_retransmit(struct sock *sk)
2180{
2181 const struct inet_connection_sock *icsk = inet_csk(sk);
2182 struct tcp_sock *tp = tcp_sk(sk);
2183
2184
2185 if (icsk->icsk_ca_state != TCP_CA_Recovery)
2186 return 0;
2187
2188
2189 if (tcp_is_reno(tp))
2190 return 0;
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200 if (tcp_may_send_now(sk))
2201 return 0;
2202
2203 return 1;
2204}
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214void tcp_xmit_retransmit_queue(struct sock *sk)
2215{
2216 const struct inet_connection_sock *icsk = inet_csk(sk);
2217 struct tcp_sock *tp = tcp_sk(sk);
2218 struct sk_buff *skb;
2219 struct sk_buff *hole = NULL;
2220 u32 last_lost;
2221 int mib_idx;
2222 int fwd_rexmitting = 0;
2223
2224 if (!tp->packets_out)
2225 return;
2226
2227 if (!tp->lost_out)
2228 tp->retransmit_high = tp->snd_una;
2229
2230 if (tp->retransmit_skb_hint) {
2231 skb = tp->retransmit_skb_hint;
2232 last_lost = TCP_SKB_CB(skb)->end_seq;
2233 if (after(last_lost, tp->retransmit_high))
2234 last_lost = tp->retransmit_high;
2235 } else {
2236 skb = tcp_write_queue_head(sk);
2237 last_lost = tp->snd_una;
2238 }
2239
2240 tcp_for_write_queue_from(skb, sk) {
2241 __u8 sacked = TCP_SKB_CB(skb)->sacked;
2242
2243 if (skb == tcp_send_head(sk))
2244 break;
2245
2246 if (hole == NULL)
2247 tp->retransmit_skb_hint = skb;
2248
2249
2250
2251
2252
2253
2254
2255
2256 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
2257 return;
2258
2259 if (fwd_rexmitting) {
2260begin_fwd:
2261 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2262 break;
2263 mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
2264
2265 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2266 tp->retransmit_high = last_lost;
2267 if (!tcp_can_forward_retransmit(sk))
2268 break;
2269
2270 if (hole != NULL) {
2271 skb = hole;
2272 hole = NULL;
2273 }
2274 fwd_rexmitting = 1;
2275 goto begin_fwd;
2276
2277 } else if (!(sacked & TCPCB_LOST)) {
2278 if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
2279 hole = skb;
2280 continue;
2281
2282 } else {
2283 last_lost = TCP_SKB_CB(skb)->end_seq;
2284 if (icsk->icsk_ca_state != TCP_CA_Loss)
2285 mib_idx = LINUX_MIB_TCPFASTRETRANS;
2286 else
2287 mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
2288 }
2289
2290 if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
2291 continue;
2292
2293 if (tcp_retransmit_skb(sk, skb))
2294 return;
2295 NET_INC_STATS_BH(sock_net(sk), mib_idx);
2296
2297 if (skb == tcp_write_queue_head(sk))
2298 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2299 inet_csk(sk)->icsk_rto,
2300 TCP_RTO_MAX);
2301 }
2302}
2303
2304
2305
2306
2307void tcp_send_fin(struct sock *sk)
2308{
2309 struct tcp_sock *tp = tcp_sk(sk);
2310 struct sk_buff *skb = tcp_write_queue_tail(sk);
2311 int mss_now;
2312
2313
2314
2315
2316
2317 mss_now = tcp_current_mss(sk);
2318
2319 if (tcp_send_head(sk) != NULL) {
2320 TCP_SKB_CB(skb)->flags |= TCPHDR_FIN;
2321 TCP_SKB_CB(skb)->end_seq++;
2322 tp->write_seq++;
2323 } else {
2324
2325 for (;;) {
2326 skb = alloc_skb_fclone(MAX_TCP_HEADER,
2327 sk->sk_allocation);
2328 if (skb)
2329 break;
2330 yield();
2331 }
2332
2333
2334 skb_reserve(skb, MAX_TCP_HEADER);
2335
2336 tcp_init_nondata_skb(skb, tp->write_seq,
2337 TCPHDR_ACK | TCPHDR_FIN);
2338 tcp_queue_skb(sk, skb);
2339 }
2340 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
2341}
2342
2343
2344
2345
2346
2347
2348void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2349{
2350 struct sk_buff *skb;
2351
2352
2353 skb = alloc_skb(MAX_TCP_HEADER, priority);
2354 if (!skb) {
2355 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
2356 return;
2357 }
2358
2359
2360 skb_reserve(skb, MAX_TCP_HEADER);
2361 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2362 TCPHDR_ACK | TCPHDR_RST);
2363
2364 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2365 if (tcp_transmit_skb(sk, skb, 0, priority))
2366 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
2367
2368 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
2369}
2370
2371
2372
2373
2374
2375
2376
2377int tcp_send_synack(struct sock *sk)
2378{
2379 struct sk_buff *skb;
2380
2381 skb = tcp_write_queue_head(sk);
2382 if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) {
2383 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
2384 return -EFAULT;
2385 }
2386 if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) {
2387 if (skb_cloned(skb)) {
2388 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
2389 if (nskb == NULL)
2390 return -ENOMEM;
2391 tcp_unlink_write_queue(skb, sk);
2392 skb_header_release(nskb);
2393 __tcp_add_write_queue_head(sk, nskb);
2394 sk_wmem_free_skb(sk, skb);
2395 sk->sk_wmem_queued += nskb->truesize;
2396 sk_mem_charge(sk, nskb->truesize);
2397 skb = nskb;
2398 }
2399
2400 TCP_SKB_CB(skb)->flags |= TCPHDR_ACK;
2401 TCP_ECN_send_synack(tcp_sk(sk), skb);
2402 }
2403 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2404 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2405}
2406
2407
2408struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2409 struct request_sock *req,
2410 struct request_values *rvp)
2411{
2412 struct tcp_out_options opts;
2413 struct tcp_extend_values *xvp = tcp_xv(rvp);
2414 struct inet_request_sock *ireq = inet_rsk(req);
2415 struct tcp_sock *tp = tcp_sk(sk);
2416 const struct tcp_cookie_values *cvp = tp->cookie_values;
2417 struct tcphdr *th;
2418 struct sk_buff *skb;
2419 struct tcp_md5sig_key *md5;
2420 int tcp_header_size;
2421 int mss;
2422 int s_data_desired = 0;
2423
2424 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
2425 s_data_desired = cvp->s_data_desired;
2426 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
2427 if (skb == NULL)
2428 return NULL;
2429
2430
2431 skb_reserve(skb, MAX_TCP_HEADER);
2432
2433 skb_dst_set(skb, dst_clone(dst));
2434
2435 mss = dst_metric_advmss(dst);
2436 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2437 mss = tp->rx_opt.user_mss;
2438
2439 if (req->rcv_wnd == 0) {
2440 __u8 rcv_wscale;
2441
2442 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2443
2444
2445 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2446 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
2447 req->window_clamp = tcp_full_space(sk);
2448
2449
2450 tcp_select_initial_window(tcp_full_space(sk),
2451 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
2452 &req->rcv_wnd,
2453 &req->window_clamp,
2454 ireq->wscale_ok,
2455 &rcv_wscale,
2456 dst_metric(dst, RTAX_INITRWND));
2457 ireq->rcv_wscale = rcv_wscale;
2458 }
2459
2460 memset(&opts, 0, sizeof(opts));
2461#ifdef CONFIG_SYN_COOKIES
2462 if (unlikely(req->cookie_ts))
2463 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
2464 else
2465#endif
2466 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2467 tcp_header_size = tcp_synack_options(sk, req, mss,
2468 skb, &opts, &md5, xvp)
2469 + sizeof(*th);
2470
2471 skb_push(skb, tcp_header_size);
2472 skb_reset_transport_header(skb);
2473
2474 th = tcp_hdr(skb);
2475 memset(th, 0, sizeof(struct tcphdr));
2476 th->syn = 1;
2477 th->ack = 1;
2478 TCP_ECN_make_synack(req, th);
2479 th->source = ireq->loc_port;
2480 th->dest = ireq->rmt_port;
2481
2482
2483
2484 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
2485 TCPHDR_SYN | TCPHDR_ACK);
2486
2487 if (OPTION_COOKIE_EXTENSION & opts.options) {
2488 if (s_data_desired) {
2489 u8 *buf = skb_put(skb, s_data_desired);
2490
2491
2492 memcpy(buf, cvp->s_data_payload, s_data_desired);
2493 TCP_SKB_CB(skb)->end_seq += s_data_desired;
2494 }
2495
2496 if (opts.hash_size > 0) {
2497 __u32 workspace[SHA_WORKSPACE_WORDS];
2498 u32 *mess = &xvp->cookie_bakery[COOKIE_DIGEST_WORDS];
2499 u32 *tail = &mess[COOKIE_MESSAGE_WORDS-1];
2500
2501
2502
2503
2504
2505 *tail-- ^= opts.tsval;
2506 *tail-- ^= tcp_rsk(req)->rcv_isn + 1;
2507 *tail-- ^= TCP_SKB_CB(skb)->seq + 1;
2508
2509
2510 *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source);
2511 *tail-- ^= (u32)(unsigned long)cvp;
2512
2513 sha_transform((__u32 *)&xvp->cookie_bakery[0],
2514 (char *)mess,
2515 &workspace[0]);
2516 opts.hash_location =
2517 (__u8 *)&xvp->cookie_bakery[0];
2518 }
2519 }
2520
2521 th->seq = htonl(TCP_SKB_CB(skb)->seq);
2522 th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
2523
2524
2525 th->window = htons(min(req->rcv_wnd, 65535U));
2526 tcp_options_write((__be32 *)(th + 1), tp, &opts);
2527 th->doff = (tcp_header_size >> 2);
2528 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb));
2529
2530#ifdef CONFIG_TCP_MD5SIG
2531
2532 if (md5) {
2533 tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
2534 md5, NULL, req, skb);
2535 }
2536#endif
2537
2538 return skb;
2539}
2540EXPORT_SYMBOL(tcp_make_synack);
2541
2542
2543static void tcp_connect_init(struct sock *sk)
2544{
2545 struct dst_entry *dst = __sk_dst_get(sk);
2546 struct tcp_sock *tp = tcp_sk(sk);
2547 __u8 rcv_wscale;
2548
2549
2550
2551
2552 tp->tcp_header_len = sizeof(struct tcphdr) +
2553 (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
2554
2555#ifdef CONFIG_TCP_MD5SIG
2556 if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2557 tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2558#endif
2559
2560
2561 if (tp->rx_opt.user_mss)
2562 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2563 tp->max_window = 0;
2564 tcp_mtup_init(sk);
2565 tcp_sync_mss(sk, dst_mtu(dst));
2566
2567 if (!tp->window_clamp)
2568 tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
2569 tp->advmss = dst_metric_advmss(dst);
2570 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
2571 tp->advmss = tp->rx_opt.user_mss;
2572
2573 tcp_initialize_rcv_mss(sk);
2574
2575
2576 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2577 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
2578 tp->window_clamp = tcp_full_space(sk);
2579
2580 tcp_select_initial_window(tcp_full_space(sk),
2581 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2582 &tp->rcv_wnd,
2583 &tp->window_clamp,
2584 sysctl_tcp_window_scaling,
2585 &rcv_wscale,
2586 dst_metric(dst, RTAX_INITRWND));
2587
2588 tp->rx_opt.rcv_wscale = rcv_wscale;
2589 tp->rcv_ssthresh = tp->rcv_wnd;
2590
2591 sk->sk_err = 0;
2592 sock_reset_flag(sk, SOCK_DONE);
2593 tp->snd_wnd = 0;
2594 tcp_init_wl(tp, 0);
2595 tp->snd_una = tp->write_seq;
2596 tp->snd_sml = tp->write_seq;
2597 tp->snd_up = tp->write_seq;
2598 tp->rcv_nxt = 0;
2599 tp->rcv_wup = 0;
2600 tp->copied_seq = 0;
2601
2602 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2603 inet_csk(sk)->icsk_retransmits = 0;
2604 tcp_clear_retrans(tp);
2605}
2606
2607
2608int tcp_connect(struct sock *sk)
2609{
2610 struct tcp_sock *tp = tcp_sk(sk);
2611 struct sk_buff *buff;
2612 int err;
2613
2614 tcp_connect_init(sk);
2615
2616 buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
2617 if (unlikely(buff == NULL))
2618 return -ENOBUFS;
2619
2620
2621 skb_reserve(buff, MAX_TCP_HEADER);
2622
2623 tp->snd_nxt = tp->write_seq;
2624 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
2625 TCP_ECN_send_syn(sk, buff);
2626
2627
2628 TCP_SKB_CB(buff)->when = tcp_time_stamp;
2629 tp->retrans_stamp = TCP_SKB_CB(buff)->when;
2630 skb_header_release(buff);
2631 __tcp_add_write_queue_tail(sk, buff);
2632 sk->sk_wmem_queued += buff->truesize;
2633 sk_mem_charge(sk, buff->truesize);
2634 tp->packets_out += tcp_skb_pcount(buff);
2635 err = tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
2636 if (err == -ECONNREFUSED)
2637 return err;
2638
2639
2640
2641
2642 tp->snd_nxt = tp->write_seq;
2643 tp->pushed_seq = tp->write_seq;
2644 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
2645
2646
2647 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2648 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
2649 return 0;
2650}
2651EXPORT_SYMBOL(tcp_connect);
2652
2653
2654
2655
2656
2657void tcp_send_delayed_ack(struct sock *sk)
2658{
2659 struct inet_connection_sock *icsk = inet_csk(sk);
2660 int ato = icsk->icsk_ack.ato;
2661 unsigned long timeout;
2662
2663 if (ato > TCP_DELACK_MIN) {
2664 const struct tcp_sock *tp = tcp_sk(sk);
2665 int max_ato = HZ / 2;
2666
2667 if (icsk->icsk_ack.pingpong ||
2668 (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
2669 max_ato = TCP_DELACK_MAX;
2670
2671
2672
2673
2674
2675
2676
2677 if (tp->srtt) {
2678 int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN);
2679
2680 if (rtt < max_ato)
2681 max_ato = rtt;
2682 }
2683
2684 ato = min(ato, max_ato);
2685 }
2686
2687
2688 timeout = jiffies + ato;
2689
2690
2691 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
2692
2693
2694
2695 if (icsk->icsk_ack.blocked ||
2696 time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
2697 tcp_send_ack(sk);
2698 return;
2699 }
2700
2701 if (!time_before(timeout, icsk->icsk_ack.timeout))
2702 timeout = icsk->icsk_ack.timeout;
2703 }
2704 icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
2705 icsk->icsk_ack.timeout = timeout;
2706 sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
2707}
2708
2709
2710void tcp_send_ack(struct sock *sk)
2711{
2712 struct sk_buff *buff;
2713
2714
2715 if (sk->sk_state == TCP_CLOSE)
2716 return;
2717
2718
2719
2720
2721
2722 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2723 if (buff == NULL) {
2724 inet_csk_schedule_ack(sk);
2725 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
2726 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
2727 TCP_DELACK_MAX, TCP_RTO_MAX);
2728 return;
2729 }
2730
2731
2732 skb_reserve(buff, MAX_TCP_HEADER);
2733 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
2734
2735
2736 TCP_SKB_CB(buff)->when = tcp_time_stamp;
2737 tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
2738}
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
2752{
2753 struct tcp_sock *tp = tcp_sk(sk);
2754 struct sk_buff *skb;
2755
2756
2757 skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2758 if (skb == NULL)
2759 return -1;
2760
2761
2762 skb_reserve(skb, MAX_TCP_HEADER);
2763
2764
2765
2766
2767 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
2768 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2769 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
2770}
2771
2772
2773int tcp_write_wakeup(struct sock *sk)
2774{
2775 struct tcp_sock *tp = tcp_sk(sk);
2776 struct sk_buff *skb;
2777
2778 if (sk->sk_state == TCP_CLOSE)
2779 return -1;
2780
2781 if ((skb = tcp_send_head(sk)) != NULL &&
2782 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
2783 int err;
2784 unsigned int mss = tcp_current_mss(sk);
2785 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2786
2787 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
2788 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
2789
2790
2791
2792
2793
2794 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
2795 skb->len > mss) {
2796 seg_size = min(seg_size, mss);
2797 TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
2798 if (tcp_fragment(sk, skb, seg_size, mss))
2799 return -1;
2800 } else if (!tcp_skb_pcount(skb))
2801 tcp_set_skb_tso_segs(sk, skb, mss);
2802
2803 TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
2804 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2805 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2806 if (!err)
2807 tcp_event_new_data_sent(sk, skb);
2808 return err;
2809 } else {
2810 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
2811 tcp_xmit_probe_skb(sk, 1);
2812 return tcp_xmit_probe_skb(sk, 0);
2813 }
2814}
2815
2816
2817
2818
2819void tcp_send_probe0(struct sock *sk)
2820{
2821 struct inet_connection_sock *icsk = inet_csk(sk);
2822 struct tcp_sock *tp = tcp_sk(sk);
2823 int err;
2824
2825 err = tcp_write_wakeup(sk);
2826
2827 if (tp->packets_out || !tcp_send_head(sk)) {
2828
2829 icsk->icsk_probes_out = 0;
2830 icsk->icsk_backoff = 0;
2831 return;
2832 }
2833
2834 if (err <= 0) {
2835 if (icsk->icsk_backoff < sysctl_tcp_retries2)
2836 icsk->icsk_backoff++;
2837 icsk->icsk_probes_out++;
2838 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2839 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
2840 TCP_RTO_MAX);
2841 } else {
2842
2843
2844
2845
2846
2847
2848 if (!icsk->icsk_probes_out)
2849 icsk->icsk_probes_out = 1;
2850 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2851 min(icsk->icsk_rto << icsk->icsk_backoff,
2852 TCP_RESOURCE_PROBE_INTERVAL),
2853 TCP_RTO_MAX);
2854 }
2855}
2856