1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "fragmentation.h"
19#include "main.h"
20
21#include <linux/atomic.h>
22#include <linux/byteorder/generic.h>
23#include <linux/errno.h>
24#include <linux/etherdevice.h>
25#include <linux/fs.h>
26#include <linux/if_ether.h>
27#include <linux/jiffies.h>
28#include <linux/kernel.h>
29#include <linux/lockdep.h>
30#include <linux/netdevice.h>
31#include <linux/skbuff.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/string.h>
35
36#include "hard-interface.h"
37#include "originator.h"
38#include "packet.h"
39#include "routing.h"
40#include "send.h"
41#include "soft-interface.h"
42
43
44
45
46
47
48
49
50static void batadv_frag_clear_chain(struct hlist_head *head, bool dropped)
51{
52 struct batadv_frag_list_entry *entry;
53 struct hlist_node *node;
54
55 hlist_for_each_entry_safe(entry, node, head, list) {
56 hlist_del(&entry->list);
57
58 if (dropped)
59 kfree_skb(entry->skb);
60 else
61 consume_skb(entry->skb);
62
63 kfree(entry);
64 }
65}
66
67
68
69
70
71
72void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
73 bool (*check_cb)(struct batadv_frag_table_entry *))
74{
75 struct batadv_frag_table_entry *chain;
76 u8 i;
77
78 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
79 chain = &orig_node->fragments[i];
80 spin_lock_bh(&chain->lock);
81
82 if (!check_cb || check_cb(chain)) {
83 batadv_frag_clear_chain(&chain->fragment_list, true);
84 chain->size = 0;
85 }
86
87 spin_unlock_bh(&chain->lock);
88 }
89}
90
91
92
93
94
95
96static int batadv_frag_size_limit(void)
97{
98 int limit = BATADV_FRAG_MAX_FRAG_SIZE;
99
100 limit -= sizeof(struct batadv_frag_packet);
101 limit *= BATADV_FRAG_MAX_FRAGMENTS;
102
103 return limit;
104}
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
120 u16 seqno)
121{
122 lockdep_assert_held(&chain->lock);
123
124 if (chain->seqno == seqno)
125 return false;
126
127 if (!hlist_empty(&chain->fragment_list))
128 batadv_frag_clear_chain(&chain->fragment_list, true);
129
130 chain->size = 0;
131 chain->seqno = seqno;
132
133 return true;
134}
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
150 struct sk_buff *skb,
151 struct hlist_head *chain_out)
152{
153 struct batadv_frag_table_entry *chain;
154 struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
155 struct batadv_frag_list_entry *frag_entry_last = NULL;
156 struct batadv_frag_packet *frag_packet;
157 u8 bucket;
158 u16 seqno, hdr_size = sizeof(struct batadv_frag_packet);
159 bool ret = false;
160
161
162
163
164
165 if (skb_linearize(skb) < 0)
166 goto err;
167
168 frag_packet = (struct batadv_frag_packet *)skb->data;
169 seqno = ntohs(frag_packet->seqno);
170 bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
171
172 frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC);
173 if (!frag_entry_new)
174 goto err;
175
176 frag_entry_new->skb = skb;
177 frag_entry_new->no = frag_packet->no;
178
179
180
181
182
183 chain = &orig_node->fragments[bucket];
184 spin_lock_bh(&chain->lock);
185 if (batadv_frag_init_chain(chain, seqno)) {
186 hlist_add_head(&frag_entry_new->list, &chain->fragment_list);
187 chain->size = skb->len - hdr_size;
188 chain->timestamp = jiffies;
189 chain->total_size = ntohs(frag_packet->total_size);
190 ret = true;
191 goto out;
192 }
193
194
195 hlist_for_each_entry(frag_entry_curr, &chain->fragment_list, list) {
196
197 if (frag_entry_curr->no == frag_entry_new->no)
198 goto err_unlock;
199
200
201 if (frag_entry_curr->no < frag_entry_new->no) {
202 hlist_add_before(&frag_entry_new->list,
203 &frag_entry_curr->list);
204 chain->size += skb->len - hdr_size;
205 chain->timestamp = jiffies;
206 ret = true;
207 goto out;
208 }
209
210
211 frag_entry_last = frag_entry_curr;
212 }
213
214
215 if (likely(frag_entry_last)) {
216 hlist_add_behind(&frag_entry_new->list, &frag_entry_last->list);
217 chain->size += skb->len - hdr_size;
218 chain->timestamp = jiffies;
219 ret = true;
220 }
221
222out:
223 if (chain->size > batadv_frag_size_limit() ||
224 chain->total_size != ntohs(frag_packet->total_size) ||
225 chain->total_size > batadv_frag_size_limit()) {
226
227
228
229
230 batadv_frag_clear_chain(&chain->fragment_list, true);
231 chain->size = 0;
232 } else if (ntohs(frag_packet->total_size) == chain->size) {
233
234 hlist_move_list(&chain->fragment_list, chain_out);
235 chain->size = 0;
236 }
237
238err_unlock:
239 spin_unlock_bh(&chain->lock);
240
241err:
242 if (!ret) {
243 kfree(frag_entry_new);
244 kfree_skb(skb);
245 }
246
247 return ret;
248}
249
250
251
252
253
254
255
256
257
258
259static struct sk_buff *
260batadv_frag_merge_packets(struct hlist_head *chain)
261{
262 struct batadv_frag_packet *packet;
263 struct batadv_frag_list_entry *entry;
264 struct sk_buff *skb_out;
265 int size, hdr_size = sizeof(struct batadv_frag_packet);
266 bool dropped = false;
267
268
269
270
271 entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
272 hlist_del(&entry->list);
273 skb_out = entry->skb;
274 kfree(entry);
275
276 packet = (struct batadv_frag_packet *)skb_out->data;
277 size = ntohs(packet->total_size);
278
279
280 if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
281 kfree_skb(skb_out);
282 skb_out = NULL;
283 dropped = true;
284 goto free;
285 }
286
287
288
289
290 skb_pull_rcsum(skb_out, hdr_size);
291 memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
292 skb_set_mac_header(skb_out, -ETH_HLEN);
293 skb_reset_network_header(skb_out);
294 skb_reset_transport_header(skb_out);
295
296
297 hlist_for_each_entry(entry, chain, list) {
298 size = entry->skb->len - hdr_size;
299 skb_put_data(skb_out, entry->skb->data + hdr_size, size);
300 }
301
302free:
303
304 batadv_frag_clear_chain(chain, dropped);
305 return skb_out;
306}
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322bool batadv_frag_skb_buffer(struct sk_buff **skb,
323 struct batadv_orig_node *orig_node_src)
324{
325 struct sk_buff *skb_out = NULL;
326 struct hlist_head head = HLIST_HEAD_INIT;
327 bool ret = false;
328
329
330 if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
331 goto out_err;
332
333
334 if (hlist_empty(&head))
335 goto out;
336
337 skb_out = batadv_frag_merge_packets(&head);
338 if (!skb_out)
339 goto out_err;
340
341out:
342 ret = true;
343out_err:
344 *skb = skb_out;
345 return ret;
346}
347
348
349
350
351
352
353
354
355
356
357
358
359
360bool batadv_frag_skb_fwd(struct sk_buff *skb,
361 struct batadv_hard_iface *recv_if,
362 struct batadv_orig_node *orig_node_src)
363{
364 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
365 struct batadv_orig_node *orig_node_dst;
366 struct batadv_neigh_node *neigh_node = NULL;
367 struct batadv_frag_packet *packet;
368 u16 total_size;
369 bool ret = false;
370
371 packet = (struct batadv_frag_packet *)skb->data;
372 orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest);
373 if (!orig_node_dst)
374 goto out;
375
376 neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if);
377 if (!neigh_node)
378 goto out;
379
380
381
382
383 total_size = ntohs(packet->total_size);
384 if (total_size > neigh_node->if_incoming->net_dev->mtu) {
385 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD);
386 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
387 skb->len + ETH_HLEN);
388
389 packet->ttl--;
390 batadv_send_unicast_skb(skb, neigh_node);
391 ret = true;
392 }
393
394out:
395 if (orig_node_dst)
396 batadv_orig_node_put(orig_node_dst);
397 if (neigh_node)
398 batadv_neigh_node_put(neigh_node);
399 return ret;
400}
401
402
403
404
405
406
407
408
409
410
411
412
413
414static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
415 struct batadv_frag_packet *frag_head,
416 unsigned int fragment_size)
417{
418 struct sk_buff *skb_fragment;
419 unsigned int header_size = sizeof(*frag_head);
420 unsigned int mtu = fragment_size + header_size;
421
422 skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
423 if (!skb_fragment)
424 goto err;
425
426 skb_fragment->priority = skb->priority;
427
428
429 skb_reserve(skb_fragment, header_size + ETH_HLEN);
430 skb_split(skb, skb_fragment, skb->len - fragment_size);
431
432
433 skb_push(skb_fragment, header_size);
434 memcpy(skb_fragment->data, frag_head, header_size);
435
436err:
437 return skb_fragment;
438}
439
440
441
442
443
444
445
446
447
448int batadv_frag_send_packet(struct sk_buff *skb,
449 struct batadv_orig_node *orig_node,
450 struct batadv_neigh_node *neigh_node)
451{
452 struct batadv_priv *bat_priv;
453 struct batadv_hard_iface *primary_if = NULL;
454 struct batadv_frag_packet frag_header;
455 struct sk_buff *skb_fragment;
456 unsigned int mtu = neigh_node->if_incoming->net_dev->mtu;
457 unsigned int header_size = sizeof(frag_header);
458 unsigned int max_fragment_size, num_fragments;
459 int ret;
460
461
462
463
464 mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
465 max_fragment_size = mtu - header_size;
466
467 if (skb->len == 0 || max_fragment_size == 0)
468 return -EINVAL;
469
470 num_fragments = (skb->len - 1) / max_fragment_size + 1;
471 max_fragment_size = (skb->len - 1) / num_fragments + 1;
472
473
474 if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) {
475 ret = -EAGAIN;
476 goto free_skb;
477 }
478
479 bat_priv = orig_node->bat_priv;
480 primary_if = batadv_primary_if_get_selected(bat_priv);
481 if (!primary_if) {
482 ret = -EINVAL;
483 goto free_skb;
484 }
485
486
487 frag_header.packet_type = BATADV_UNICAST_FRAG;
488 frag_header.version = BATADV_COMPAT_VERSION;
489 frag_header.ttl = BATADV_TTL;
490 frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
491 frag_header.reserved = 0;
492 frag_header.no = 0;
493 frag_header.total_size = htons(skb->len);
494
495
496
497
498
499
500 if (skb->priority >= 256 && skb->priority <= 263)
501 frag_header.priority = skb->priority - 256;
502 else
503 frag_header.priority = 0;
504
505 ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
506 ether_addr_copy(frag_header.dest, orig_node->orig);
507
508
509 while (skb->len > max_fragment_size) {
510
511 if (unlikely(frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)) {
512 ret = -EINVAL;
513 goto put_primary_if;
514 }
515
516 skb_fragment = batadv_frag_create(skb, &frag_header,
517 max_fragment_size);
518 if (!skb_fragment) {
519 ret = -ENOMEM;
520 goto put_primary_if;
521 }
522
523 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
524 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
525 skb_fragment->len + ETH_HLEN);
526 ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
527 if (ret != NET_XMIT_SUCCESS) {
528 ret = NET_XMIT_DROP;
529 goto put_primary_if;
530 }
531
532 frag_header.no++;
533 }
534
535
536 if (batadv_skb_head_push(skb, header_size) < 0 ||
537 pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
538 ret = -ENOMEM;
539 goto put_primary_if;
540 }
541
542 memcpy(skb->data, &frag_header, header_size);
543
544
545 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
546 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
547 skb->len + ETH_HLEN);
548 ret = batadv_send_unicast_skb(skb, neigh_node);
549
550 skb = NULL;
551
552put_primary_if:
553 batadv_hardif_put(primary_if);
554free_skb:
555 kfree_skb(skb);
556
557 return ret;
558}
559