1
2
3
4
5
6
7
8
9
10
11
12#include "dccp.h"
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/export.h>
16
17static struct kmem_cache *dccp_ackvec_slab;
18static struct kmem_cache *dccp_ackvec_record_slab;
19
20struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
21{
22 struct dccp_ackvec *av = kmem_cache_zalloc(dccp_ackvec_slab, priority);
23
24 if (av != NULL) {
25 av->av_buf_head = av->av_buf_tail = DCCPAV_MAX_ACKVEC_LEN - 1;
26 INIT_LIST_HEAD(&av->av_records);
27 }
28 return av;
29}
30
31static void dccp_ackvec_purge_records(struct dccp_ackvec *av)
32{
33 struct dccp_ackvec_record *cur, *next;
34
35 list_for_each_entry_safe(cur, next, &av->av_records, avr_node)
36 kmem_cache_free(dccp_ackvec_record_slab, cur);
37 INIT_LIST_HEAD(&av->av_records);
38}
39
40void dccp_ackvec_free(struct dccp_ackvec *av)
41{
42 if (likely(av != NULL)) {
43 dccp_ackvec_purge_records(av);
44 kmem_cache_free(dccp_ackvec_slab, av);
45 }
46}
47
48
49
50
51
52
53
54int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum)
55{
56 struct dccp_ackvec_record *avr;
57
58 avr = kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
59 if (avr == NULL)
60 return -ENOBUFS;
61
62 avr->avr_ack_seqno = seqno;
63 avr->avr_ack_ptr = av->av_buf_head;
64 avr->avr_ack_ackno = av->av_buf_ackno;
65 avr->avr_ack_nonce = nonce_sum;
66 avr->avr_ack_runlen = dccp_ackvec_runlen(av->av_buf + av->av_buf_head);
67
68
69
70
71
72
73 if (av->av_overflow)
74 dccp_ackvec_purge_records(av);
75
76
77
78
79 list_add(&avr->avr_node, &av->av_records);
80
81 dccp_pr_debug("Added Vector, ack_seqno=%llu, ack_ackno=%llu (rl=%u)\n",
82 (unsigned long long)avr->avr_ack_seqno,
83 (unsigned long long)avr->avr_ack_ackno,
84 avr->avr_ack_runlen);
85 return 0;
86}
87
88static struct dccp_ackvec_record *dccp_ackvec_lookup(struct list_head *av_list,
89 const u64 ackno)
90{
91 struct dccp_ackvec_record *avr;
92
93
94
95
96
97 list_for_each_entry_reverse(avr, av_list, avr_node) {
98 if (avr->avr_ack_seqno == ackno)
99 return avr;
100 if (before48(ackno, avr->avr_ack_seqno))
101 break;
102 }
103 return NULL;
104}
105
106
107
108
109
110static inline u16 __ackvec_idx_add(const u16 a, const u16 b)
111{
112 return (a + b) % DCCPAV_MAX_ACKVEC_LEN;
113}
114
115static inline u16 __ackvec_idx_sub(const u16 a, const u16 b)
116{
117 return __ackvec_idx_add(a, DCCPAV_MAX_ACKVEC_LEN - b);
118}
119
120u16 dccp_ackvec_buflen(const struct dccp_ackvec *av)
121{
122 if (unlikely(av->av_overflow))
123 return DCCPAV_MAX_ACKVEC_LEN;
124 return __ackvec_idx_sub(av->av_buf_tail, av->av_buf_head);
125}
126
127
128
129
130
131
132
133
134static void dccp_ackvec_update_old(struct dccp_ackvec *av, s64 distance,
135 u64 seqno, enum dccp_ackvec_states state)
136{
137 u16 ptr = av->av_buf_head;
138
139 BUG_ON(distance > 0);
140 if (unlikely(dccp_ackvec_is_empty(av)))
141 return;
142
143 do {
144 u8 runlen = dccp_ackvec_runlen(av->av_buf + ptr);
145
146 if (distance + runlen >= 0) {
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162 if (av->av_buf[ptr] == DCCPAV_NOT_RECEIVED)
163 av->av_buf[ptr] = state;
164 else
165 dccp_pr_debug("Not changing %llu state to %u\n",
166 (unsigned long long)seqno, state);
167 break;
168 }
169
170 distance += runlen + 1;
171 ptr = __ackvec_idx_add(ptr, 1);
172
173 } while (ptr != av->av_buf_tail);
174}
175
176
177static void dccp_ackvec_reserve_seats(struct dccp_ackvec *av, u16 num)
178{
179 u16 start = __ackvec_idx_add(av->av_buf_head, 1),
180 len = DCCPAV_MAX_ACKVEC_LEN - start;
181
182
183 if (num > len) {
184 memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, len);
185 start = 0;
186 num -= len;
187 }
188 if (num)
189 memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, num);
190}
191
192
193
194
195
196
197
198
199static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
200 u64 seqno, enum dccp_ackvec_states state)
201{
202 u32 num_cells = num_packets;
203
204 if (num_packets > DCCPAV_BURST_THRESH) {
205 u32 lost_packets = num_packets - 1;
206
207 DCCP_WARN("Warning: large burst loss (%u)\n", lost_packets);
208
209
210
211
212
213
214
215
216
217
218
219
220 for (num_packets = num_cells = 1; lost_packets; ++num_cells) {
221 u8 len = min_t(u32, lost_packets, DCCPAV_MAX_RUNLEN);
222
223 av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, 1);
224 av->av_buf[av->av_buf_head] = DCCPAV_NOT_RECEIVED | len;
225
226 lost_packets -= len;
227 }
228 }
229
230 if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) {
231 DCCP_CRIT("Ack Vector buffer overflow: dropping old entries");
232 av->av_overflow = true;
233 }
234
235 av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, num_packets);
236 if (av->av_overflow)
237 av->av_buf_tail = av->av_buf_head;
238
239 av->av_buf[av->av_buf_head] = state;
240 av->av_buf_ackno = seqno;
241
242 if (num_packets > 1)
243 dccp_ackvec_reserve_seats(av, num_packets - 1);
244}
245
246
247
248
249void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb)
250{
251 u64 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
252 enum dccp_ackvec_states state = DCCPAV_RECEIVED;
253
254 if (dccp_ackvec_is_empty(av)) {
255 dccp_ackvec_add_new(av, 1, seqno, state);
256 av->av_tail_ackno = seqno;
257
258 } else {
259 s64 num_packets = dccp_delta_seqno(av->av_buf_ackno, seqno);
260 u8 *current_head = av->av_buf + av->av_buf_head;
261
262 if (num_packets == 1 &&
263 dccp_ackvec_state(current_head) == state &&
264 dccp_ackvec_runlen(current_head) < DCCPAV_MAX_RUNLEN) {
265
266 *current_head += 1;
267 av->av_buf_ackno = seqno;
268
269 } else if (num_packets > 0) {
270 dccp_ackvec_add_new(av, num_packets, seqno, state);
271 } else {
272 dccp_ackvec_update_old(av, num_packets, seqno, state);
273 }
274 }
275}
276
277
278
279
280
281
282
283
284
285void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno)
286{
287 struct dccp_ackvec_record *avr, *next;
288 u8 runlen_now, eff_runlen;
289 s64 delta;
290
291 avr = dccp_ackvec_lookup(&av->av_records, ackno);
292 if (avr == NULL)
293 return;
294
295
296
297
298
299 delta = dccp_delta_seqno(av->av_tail_ackno, avr->avr_ack_ackno);
300 if (delta < 0)
301 goto free_records;
302
303
304
305
306 eff_runlen = delta < avr->avr_ack_runlen ? delta : avr->avr_ack_runlen;
307
308 runlen_now = dccp_ackvec_runlen(av->av_buf + avr->avr_ack_ptr);
309
310
311
312
313
314
315
316 if (runlen_now > eff_runlen) {
317
318 av->av_buf[avr->avr_ack_ptr] -= eff_runlen + 1;
319 av->av_buf_tail = __ackvec_idx_add(avr->avr_ack_ptr, 1);
320
321
322 if (av->av_overflow)
323 av->av_overflow = (av->av_buf_head == av->av_buf_tail);
324 } else {
325 av->av_buf_tail = avr->avr_ack_ptr;
326
327
328
329
330
331 av->av_overflow = 0;
332 }
333
334
335
336
337
338 av->av_tail_ackno = ADD48(avr->avr_ack_ackno, 1);
339
340free_records:
341 list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) {
342 list_del(&avr->avr_node);
343 kmem_cache_free(dccp_ackvec_record_slab, avr);
344 }
345}
346
347
348
349
350int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce)
351{
352 struct dccp_ackvec_parsed *new = kmalloc(sizeof(*new), GFP_ATOMIC);
353
354 if (new == NULL)
355 return -ENOBUFS;
356 new->vec = vec;
357 new->len = len;
358 new->nonce = nonce;
359
360 list_add_tail(&new->node, head);
361 return 0;
362}
363EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_add);
364
365void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks)
366{
367 struct dccp_ackvec_parsed *cur, *next;
368
369 list_for_each_entry_safe(cur, next, parsed_chunks, node)
370 kfree(cur);
371 INIT_LIST_HEAD(parsed_chunks);
372}
373EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_cleanup);
374
375int __init dccp_ackvec_init(void)
376{
377 dccp_ackvec_slab = kmem_cache_create("dccp_ackvec",
378 sizeof(struct dccp_ackvec), 0,
379 SLAB_HWCACHE_ALIGN, NULL);
380 if (dccp_ackvec_slab == NULL)
381 goto out_err;
382
383 dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
384 sizeof(struct dccp_ackvec_record),
385 0, SLAB_HWCACHE_ALIGN, NULL);
386 if (dccp_ackvec_record_slab == NULL)
387 goto out_destroy_slab;
388
389 return 0;
390
391out_destroy_slab:
392 kmem_cache_destroy(dccp_ackvec_slab);
393 dccp_ackvec_slab = NULL;
394out_err:
395 DCCP_CRIT("Unable to create Ack Vector slab cache");
396 return -ENOBUFS;
397}
398
399void dccp_ackvec_exit(void)
400{
401 kmem_cache_destroy(dccp_ackvec_slab);
402 dccp_ackvec_slab = NULL;
403 kmem_cache_destroy(dccp_ackvec_record_slab);
404 dccp_ackvec_record_slab = NULL;
405}
406