1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "send.h"
19#include "main.h"
20
21#include <linux/atomic.h>
22#include <linux/byteorder/generic.h>
23#include <linux/errno.h>
24#include <linux/etherdevice.h>
25#include <linux/fs.h>
26#include <linux/if.h>
27#include <linux/if_ether.h>
28#include <linux/jiffies.h>
29#include <linux/kernel.h>
30#include <linux/kref.h>
31#include <linux/list.h>
32#include <linux/netdevice.h>
33#include <linux/printk.h>
34#include <linux/rculist.h>
35#include <linux/rcupdate.h>
36#include <linux/skbuff.h>
37#include <linux/slab.h>
38#include <linux/spinlock.h>
39#include <linux/stddef.h>
40#include <linux/workqueue.h>
41
42#include "distributed-arp-table.h"
43#include "fragmentation.h"
44#include "gateway_client.h"
45#include "hard-interface.h"
46#include "log.h"
47#include "network-coding.h"
48#include "originator.h"
49#include "routing.h"
50#include "soft-interface.h"
51#include "translation-table.h"
52
53static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70int batadv_send_skb_packet(struct sk_buff *skb,
71 struct batadv_hard_iface *hard_iface,
72 const u8 *dst_addr)
73{
74 struct batadv_priv *bat_priv;
75 struct ethhdr *ethhdr;
76 int ret;
77
78 bat_priv = netdev_priv(hard_iface->soft_iface);
79
80 if (hard_iface->if_status != BATADV_IF_ACTIVE)
81 goto send_skb_err;
82
83 if (unlikely(!hard_iface->net_dev))
84 goto send_skb_err;
85
86 if (!(hard_iface->net_dev->flags & IFF_UP)) {
87 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
88 hard_iface->net_dev->name);
89 goto send_skb_err;
90 }
91
92
93 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
94 goto send_skb_err;
95
96 skb_reset_mac_header(skb);
97
98 ethhdr = eth_hdr(skb);
99 ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
100 ether_addr_copy(ethhdr->h_dest, dst_addr);
101 ethhdr->h_proto = htons(ETH_P_BATMAN);
102
103 skb_set_network_header(skb, ETH_HLEN);
104 skb->protocol = htons(ETH_P_BATMAN);
105
106 skb->dev = hard_iface->net_dev;
107
108
109 batadv_nc_skb_store_for_decoding(bat_priv, skb);
110
111
112
113
114
115
116
117
118 ret = dev_queue_xmit(skb);
119 if (ret < 0)
120 ret = NET_XMIT_DROP;
121
122 return ret;
123send_skb_err:
124 kfree_skb(skb);
125 return NET_XMIT_DROP;
126}
127
128int batadv_send_broadcast_skb(struct sk_buff *skb,
129 struct batadv_hard_iface *hard_iface)
130{
131 return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
132}
133
134int batadv_send_unicast_skb(struct sk_buff *skb,
135 struct batadv_neigh_node *neigh)
136{
137#ifdef CONFIG_BATMAN_ADV_BATMAN_V
138 struct batadv_hardif_neigh_node *hardif_neigh;
139#endif
140 int ret;
141
142 ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
143
144#ifdef CONFIG_BATMAN_ADV_BATMAN_V
145 hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);
146
147 if ((hardif_neigh) && (ret != NET_XMIT_DROP))
148 hardif_neigh->bat_v.last_unicast_tx = jiffies;
149
150 if (hardif_neigh)
151 batadv_hardif_neigh_put(hardif_neigh);
152#endif
153
154 return ret;
155}
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174int batadv_send_skb_to_orig(struct sk_buff *skb,
175 struct batadv_orig_node *orig_node,
176 struct batadv_hard_iface *recv_if)
177{
178 struct batadv_priv *bat_priv = orig_node->bat_priv;
179 struct batadv_neigh_node *neigh_node;
180 int ret = -1;
181
182
183 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
184 if (!neigh_node)
185 goto out;
186
187
188
189
190 if (atomic_read(&bat_priv->fragmentation) &&
191 skb->len > neigh_node->if_incoming->net_dev->mtu) {
192
193 ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
194
195 goto out;
196 }
197
198
199
200
201
202 if (recv_if && batadv_nc_skb_forward(skb, neigh_node))
203 ret = -EINPROGRESS;
204 else
205 ret = batadv_send_unicast_skb(skb, neigh_node);
206
207out:
208 if (neigh_node)
209 batadv_neigh_node_put(neigh_node);
210
211 return ret;
212}
213
214
215
216
217
218
219
220
221
222
223static bool
224batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
225 struct batadv_orig_node *orig_node)
226{
227 struct batadv_unicast_packet *unicast_packet;
228 u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
229
230 if (batadv_skb_head_push(skb, hdr_size) < 0)
231 return false;
232
233 unicast_packet = (struct batadv_unicast_packet *)skb->data;
234 unicast_packet->version = BATADV_COMPAT_VERSION;
235
236 unicast_packet->packet_type = BATADV_UNICAST;
237
238 unicast_packet->ttl = BATADV_TTL;
239
240 ether_addr_copy(unicast_packet->dest, orig_node->orig);
241
242 unicast_packet->ttvn = ttvn;
243
244 return true;
245}
246
247
248
249
250
251
252
253
254static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
255 struct batadv_orig_node *orig_node)
256{
257 size_t uni_size = sizeof(struct batadv_unicast_packet);
258
259 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
260}
261
262
263
264
265
266
267
268
269
270
271
272bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
273 struct sk_buff *skb,
274 struct batadv_orig_node *orig,
275 int packet_subtype)
276{
277 struct batadv_hard_iface *primary_if;
278 struct batadv_unicast_4addr_packet *uc_4addr_packet;
279 bool ret = false;
280
281 primary_if = batadv_primary_if_get_selected(bat_priv);
282 if (!primary_if)
283 goto out;
284
285
286
287
288
289 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
290 orig))
291 goto out;
292
293 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
294 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
295 ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
296 uc_4addr_packet->subtype = packet_subtype;
297 uc_4addr_packet->reserved = 0;
298
299 ret = true;
300out:
301 if (primary_if)
302 batadv_hardif_put(primary_if);
303 return ret;
304}
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
323 struct sk_buff *skb, int packet_type,
324 int packet_subtype,
325 struct batadv_orig_node *orig_node,
326 unsigned short vid)
327{
328 struct batadv_unicast_packet *unicast_packet;
329 struct ethhdr *ethhdr;
330 int res, ret = NET_XMIT_DROP;
331
332 if (!orig_node)
333 goto out;
334
335 switch (packet_type) {
336 case BATADV_UNICAST:
337 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
338 goto out;
339 break;
340 case BATADV_UNICAST_4ADDR:
341 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
342 orig_node,
343 packet_subtype))
344 goto out;
345 break;
346 default:
347
348
349
350 goto out;
351 }
352
353
354
355
356 ethhdr = eth_hdr(skb);
357 unicast_packet = (struct batadv_unicast_packet *)skb->data;
358
359
360
361
362
363
364 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
365 unicast_packet->ttvn = unicast_packet->ttvn - 1;
366
367 res = batadv_send_skb_to_orig(skb, orig_node, NULL);
368 if (res != -1)
369 ret = NET_XMIT_SUCCESS;
370
371out:
372 if (ret == NET_XMIT_DROP)
373 kfree_skb(skb);
374 return ret;
375}
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
396 struct sk_buff *skb, int packet_type,
397 int packet_subtype, u8 *dst_hint,
398 unsigned short vid)
399{
400 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
401 struct batadv_orig_node *orig_node;
402 u8 *src, *dst;
403 int ret;
404
405 src = ethhdr->h_source;
406 dst = ethhdr->h_dest;
407
408
409 if (dst_hint) {
410 src = NULL;
411 dst = dst_hint;
412 }
413 orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
414
415 ret = batadv_send_skb_unicast(bat_priv, skb, packet_type,
416 packet_subtype, orig_node, vid);
417
418 if (orig_node)
419 batadv_orig_node_put(orig_node);
420
421 return ret;
422}
423
424
425
426
427
428
429
430
431
432
433
434
435int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
436 unsigned short vid)
437{
438 struct batadv_orig_node *orig_node;
439 int ret;
440
441 orig_node = batadv_gw_get_selected_orig(bat_priv);
442 ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
443 BATADV_P_DATA, orig_node, vid);
444
445 if (orig_node)
446 batadv_orig_node_put(orig_node);
447
448 return ret;
449}
450
451
452
453
454
455
456
457
458void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
459{
460 kfree_skb(forw_packet->skb);
461 if (forw_packet->if_incoming)
462 batadv_hardif_put(forw_packet->if_incoming);
463 if (forw_packet->if_outgoing)
464 batadv_hardif_put(forw_packet->if_outgoing);
465 if (forw_packet->queue_left)
466 atomic_inc(forw_packet->queue_left);
467 kfree(forw_packet);
468}
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483struct batadv_forw_packet *
484batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
485 struct batadv_hard_iface *if_outgoing,
486 atomic_t *queue_left,
487 struct batadv_priv *bat_priv)
488{
489 struct batadv_forw_packet *forw_packet;
490 const char *qname;
491
492 if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) {
493 qname = "unknown";
494
495 if (queue_left == &bat_priv->bcast_queue_left)
496 qname = "bcast";
497
498 if (queue_left == &bat_priv->batman_queue_left)
499 qname = "batman";
500
501 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
502 "%s queue is full\n", qname);
503
504 return NULL;
505 }
506
507 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
508 if (!forw_packet)
509 goto err;
510
511 if (if_incoming)
512 kref_get(&if_incoming->refcount);
513
514 if (if_outgoing)
515 kref_get(&if_outgoing->refcount);
516
517 forw_packet->skb = NULL;
518 forw_packet->queue_left = queue_left;
519 forw_packet->if_incoming = if_incoming;
520 forw_packet->if_outgoing = if_outgoing;
521 forw_packet->num_packets = 0;
522
523 return forw_packet;
524
525err:
526 if (queue_left)
527 atomic_inc(queue_left);
528
529 return NULL;
530}
531
532static void
533_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
534 struct batadv_forw_packet *forw_packet,
535 unsigned long send_time)
536{
537
538 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
539 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
540 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
541
542
543 queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
544 send_time);
545}
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
562 const struct sk_buff *skb,
563 unsigned long delay)
564{
565 struct batadv_hard_iface *primary_if = NULL;
566 struct batadv_forw_packet *forw_packet;
567 struct batadv_bcast_packet *bcast_packet;
568 struct sk_buff *newskb;
569
570 primary_if = batadv_primary_if_get_selected(bat_priv);
571 if (!primary_if)
572 goto err;
573
574 forw_packet = batadv_forw_packet_alloc(primary_if, NULL,
575 &bat_priv->bcast_queue_left,
576 bat_priv);
577 batadv_hardif_put(primary_if);
578 if (!forw_packet)
579 goto err;
580
581 newskb = skb_copy(skb, GFP_ATOMIC);
582 if (!newskb)
583 goto err_packet_free;
584
585
586 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
587 bcast_packet->ttl--;
588
589 skb_reset_mac_header(newskb);
590
591 forw_packet->skb = newskb;
592
593 INIT_DELAYED_WORK(&forw_packet->delayed_work,
594 batadv_send_outstanding_bcast_packet);
595
596 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
597 return NETDEV_TX_OK;
598
599err_packet_free:
600 batadv_forw_packet_free(forw_packet);
601err:
602 return NETDEV_TX_BUSY;
603}
604
605static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
606{
607 struct batadv_hard_iface *hard_iface;
608 struct delayed_work *delayed_work;
609 struct batadv_forw_packet *forw_packet;
610 struct sk_buff *skb1;
611 struct net_device *soft_iface;
612 struct batadv_priv *bat_priv;
613
614 delayed_work = to_delayed_work(work);
615 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
616 delayed_work);
617 soft_iface = forw_packet->if_incoming->soft_iface;
618 bat_priv = netdev_priv(soft_iface);
619
620 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
621 hlist_del(&forw_packet->list);
622 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
623
624 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
625 goto out;
626
627 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
628 goto out;
629
630
631 rcu_read_lock();
632 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
633 if (hard_iface->soft_iface != soft_iface)
634 continue;
635
636 if (forw_packet->num_packets >= hard_iface->num_bcasts)
637 continue;
638
639 if (!kref_get_unless_zero(&hard_iface->refcount))
640 continue;
641
642
643 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
644 if (skb1)
645 batadv_send_broadcast_skb(skb1, hard_iface);
646
647 batadv_hardif_put(hard_iface);
648 }
649 rcu_read_unlock();
650
651 forw_packet->num_packets++;
652
653
654 if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
655 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
656 msecs_to_jiffies(5));
657 return;
658 }
659
660out:
661 batadv_forw_packet_free(forw_packet);
662}
663
664void
665batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
666 const struct batadv_hard_iface *hard_iface)
667{
668 struct batadv_forw_packet *forw_packet;
669 struct hlist_node *safe_tmp_node;
670 bool pending;
671
672 if (hard_iface)
673 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
674 "purge_outstanding_packets(): %s\n",
675 hard_iface->net_dev->name);
676 else
677 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
678 "purge_outstanding_packets()\n");
679
680
681 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
682 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
683 &bat_priv->forw_bcast_list, list) {
684
685
686
687 if ((hard_iface) &&
688 (forw_packet->if_incoming != hard_iface) &&
689 (forw_packet->if_outgoing != hard_iface))
690 continue;
691
692 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
693
694
695
696
697 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
698 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
699
700 if (pending) {
701 hlist_del(&forw_packet->list);
702 batadv_forw_packet_free(forw_packet);
703 }
704 }
705 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
706
707
708 spin_lock_bh(&bat_priv->forw_bat_list_lock);
709 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
710 &bat_priv->forw_bat_list, list) {
711
712
713
714 if ((hard_iface) &&
715 (forw_packet->if_incoming != hard_iface) &&
716 (forw_packet->if_outgoing != hard_iface))
717 continue;
718
719 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
720
721
722
723
724 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
725 spin_lock_bh(&bat_priv->forw_bat_list_lock);
726
727 if (pending) {
728 hlist_del(&forw_packet->list);
729 batadv_forw_packet_free(forw_packet);
730 }
731 }
732 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
733}
734