1
2#include <linux/tcp.h>
3#include <net/tcp.h>
4
5static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
6{
7 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
8}
9
10static u32 tcp_rack_reo_wnd(const struct sock *sk)
11{
12 struct tcp_sock *tp = tcp_sk(sk);
13
14 if (!tp->reord_seen) {
15
16
17
18 if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery)
19 return 0;
20
21 if (tp->sacked_out >= tp->reordering &&
22 !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH))
23 return 0;
24 }
25
26
27
28
29
30
31
32 return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps,
33 tp->srtt_us >> 3);
34}
35
36s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
37{
38 return tp->rack.rtt_us + reo_wnd -
39 tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
40}
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
63{
64 struct tcp_sock *tp = tcp_sk(sk);
65 struct sk_buff *skb, *n;
66 u32 reo_wnd;
67
68 *reo_timeout = 0;
69 reo_wnd = tcp_rack_reo_wnd(sk);
70 list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
71 tcp_tsorted_anchor) {
72 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
73 s32 remaining;
74
75
76 if ((scb->sacked & TCPCB_LOST) &&
77 !(scb->sacked & TCPCB_SACKED_RETRANS))
78 continue;
79
80 if (!tcp_rack_sent_after(tp->rack.mstamp,
81 tcp_skb_timestamp_us(skb),
82 tp->rack.end_seq, scb->end_seq))
83 break;
84
85
86
87
88 remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd);
89 if (remaining <= 0) {
90 tcp_mark_skb_lost(sk, skb);
91 list_del_init(&skb->tcp_tsorted_anchor);
92 } else {
93
94 *reo_timeout = max_t(u32, *reo_timeout, remaining);
95 }
96 }
97}
98
99void tcp_rack_mark_lost(struct sock *sk)
100{
101 struct tcp_sock *tp = tcp_sk(sk);
102 u32 timeout;
103
104 if (!tp->rack.advanced)
105 return;
106
107
108 tp->rack.advanced = 0;
109 tcp_rack_detect_loss(sk, &timeout);
110 if (timeout) {
111 timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
112 inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
113 timeout, inet_csk(sk)->icsk_rto);
114 }
115}
116
117
118
119
120
121void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
122 u64 xmit_time)
123{
124 u32 rtt_us;
125
126 rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
127 if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
128
129
130
131
132
133
134
135
136
137
138 return;
139 }
140 tp->rack.advanced = 1;
141 tp->rack.rtt_us = rtt_us;
142 if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
143 end_seq, tp->rack.end_seq)) {
144 tp->rack.mstamp = xmit_time;
145 tp->rack.end_seq = end_seq;
146 }
147}
148
149
150
151
152void tcp_rack_reo_timeout(struct sock *sk)
153{
154 struct tcp_sock *tp = tcp_sk(sk);
155 u32 timeout, prior_inflight;
156
157 prior_inflight = tcp_packets_in_flight(tp);
158 tcp_rack_detect_loss(sk, &timeout);
159 if (prior_inflight != tcp_packets_in_flight(tp)) {
160 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
161 tcp_enter_recovery(sk, false);
162 if (!inet_csk(sk)->icsk_ca_ops->cong_control)
163 tcp_cwnd_reduction(sk, 1, 0);
164 }
165 tcp_xmit_retransmit_queue(sk);
166 }
167 if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
168 tcp_rearm_rto(sk);
169}
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
189{
190 struct tcp_sock *tp = tcp_sk(sk);
191
192 if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND ||
193 !rs->prior_delivered)
194 return;
195
196
197 if (before(rs->prior_delivered, tp->rack.last_delivered))
198 tp->rack.dsack_seen = 0;
199
200
201 if (tp->rack.dsack_seen) {
202 tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
203 tp->rack.reo_wnd_steps + 1);
204 tp->rack.dsack_seen = 0;
205 tp->rack.last_delivered = tp->delivered;
206 tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
207 } else if (!tp->rack.reo_wnd_persist) {
208 tp->rack.reo_wnd_steps = 1;
209 }
210}
211
212
213
214
215
216
217void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced)
218{
219 const u8 state = inet_csk(sk)->icsk_ca_state;
220 struct tcp_sock *tp = tcp_sk(sk);
221
222 if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) ||
223 (state == TCP_CA_Recovery && snd_una_advanced)) {
224 struct sk_buff *skb = tcp_rtx_queue_head(sk);
225 u32 mss;
226
227 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
228 return;
229
230 mss = tcp_skb_mss(skb);
231 if (tcp_skb_pcount(skb) > 1 && skb->len > mss)
232 tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
233 mss, mss, GFP_ATOMIC);
234
235 tcp_mark_skb_lost(sk, skb);
236 }
237}
238