1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "send.h"
20#include "main.h"
21
22#include <linux/atomic.h>
23#include <linux/bug.h>
24#include <linux/byteorder/generic.h>
25#include <linux/errno.h>
26#include <linux/etherdevice.h>
27#include <linux/gfp.h>
28#include <linux/if.h>
29#include <linux/if_ether.h>
30#include <linux/jiffies.h>
31#include <linux/kernel.h>
32#include <linux/kref.h>
33#include <linux/list.h>
34#include <linux/netdevice.h>
35#include <linux/printk.h>
36#include <linux/rculist.h>
37#include <linux/rcupdate.h>
38#include <linux/skbuff.h>
39#include <linux/slab.h>
40#include <linux/spinlock.h>
41#include <linux/stddef.h>
42#include <linux/workqueue.h>
43
44#include "distributed-arp-table.h"
45#include "fragmentation.h"
46#include "gateway_client.h"
47#include "hard-interface.h"
48#include "log.h"
49#include "network-coding.h"
50#include "originator.h"
51#include "routing.h"
52#include "soft-interface.h"
53#include "translation-table.h"
54
55static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75int batadv_send_skb_packet(struct sk_buff *skb,
76 struct batadv_hard_iface *hard_iface,
77 const u8 *dst_addr)
78{
79 struct batadv_priv *bat_priv;
80 struct ethhdr *ethhdr;
81 int ret;
82
83 bat_priv = netdev_priv(hard_iface->soft_iface);
84
85 if (hard_iface->if_status != BATADV_IF_ACTIVE)
86 goto send_skb_err;
87
88 if (unlikely(!hard_iface->net_dev))
89 goto send_skb_err;
90
91 if (!(hard_iface->net_dev->flags & IFF_UP)) {
92 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
93 hard_iface->net_dev->name);
94 goto send_skb_err;
95 }
96
97
98 if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
99 goto send_skb_err;
100
101 skb_reset_mac_header(skb);
102
103 ethhdr = eth_hdr(skb);
104 ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
105 ether_addr_copy(ethhdr->h_dest, dst_addr);
106 ethhdr->h_proto = htons(ETH_P_BATMAN);
107
108 skb_set_network_header(skb, ETH_HLEN);
109 skb->protocol = htons(ETH_P_BATMAN);
110
111 skb->dev = hard_iface->net_dev;
112
113
114 batadv_nc_skb_store_for_decoding(bat_priv, skb);
115
116
117
118
119
120 ret = dev_queue_xmit(skb);
121 return net_xmit_eval(ret);
122send_skb_err:
123 kfree_skb(skb);
124 return NET_XMIT_DROP;
125}
126
127
128
129
130
131
132
133
134
135
136int batadv_send_broadcast_skb(struct sk_buff *skb,
137 struct batadv_hard_iface *hard_iface)
138{
139 return batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
140}
141
142
143
144
145
146
147
148
149
150
151int batadv_send_unicast_skb(struct sk_buff *skb,
152 struct batadv_neigh_node *neigh)
153{
154#ifdef CONFIG_BATMAN_ADV_BATMAN_V
155 struct batadv_hardif_neigh_node *hardif_neigh;
156#endif
157 int ret;
158
159 ret = batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
160
161#ifdef CONFIG_BATMAN_ADV_BATMAN_V
162 hardif_neigh = batadv_hardif_neigh_get(neigh->if_incoming, neigh->addr);
163
164 if (hardif_neigh && ret != NET_XMIT_DROP)
165 hardif_neigh->bat_v.last_unicast_tx = jiffies;
166
167 if (hardif_neigh)
168 batadv_hardif_neigh_put(hardif_neigh);
169#endif
170
171 return ret;
172}
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189int batadv_send_skb_to_orig(struct sk_buff *skb,
190 struct batadv_orig_node *orig_node,
191 struct batadv_hard_iface *recv_if)
192{
193 struct batadv_priv *bat_priv = orig_node->bat_priv;
194 struct batadv_neigh_node *neigh_node;
195 int ret;
196
197
198 neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
199 if (!neigh_node) {
200 ret = -EINVAL;
201 goto free_skb;
202 }
203
204
205
206
207 if (atomic_read(&bat_priv->fragmentation) &&
208 skb->len > neigh_node->if_incoming->net_dev->mtu) {
209
210 ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
211
212 skb = NULL;
213
214 goto put_neigh_node;
215 }
216
217
218
219
220
221 if (recv_if && batadv_nc_skb_forward(skb, neigh_node))
222 ret = -EINPROGRESS;
223 else
224 ret = batadv_send_unicast_skb(skb, neigh_node);
225
226
227 skb = NULL;
228
229put_neigh_node:
230 batadv_neigh_node_put(neigh_node);
231free_skb:
232 kfree_skb(skb);
233
234 return ret;
235}
236
237
238
239
240
241
242
243
244
245
246static bool
247batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
248 struct batadv_orig_node *orig_node)
249{
250 struct batadv_unicast_packet *unicast_packet;
251 u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
252
253 if (batadv_skb_head_push(skb, hdr_size) < 0)
254 return false;
255
256 unicast_packet = (struct batadv_unicast_packet *)skb->data;
257 unicast_packet->version = BATADV_COMPAT_VERSION;
258
259 unicast_packet->packet_type = BATADV_UNICAST;
260
261 unicast_packet->ttl = BATADV_TTL;
262
263 ether_addr_copy(unicast_packet->dest, orig_node->orig);
264
265 unicast_packet->ttvn = ttvn;
266
267 return true;
268}
269
270
271
272
273
274
275
276
277static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
278 struct batadv_orig_node *orig_node)
279{
280 size_t uni_size = sizeof(struct batadv_unicast_packet);
281
282 return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
283}
284
285
286
287
288
289
290
291
292
293
294
295bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
296 struct sk_buff *skb,
297 struct batadv_orig_node *orig,
298 int packet_subtype)
299{
300 struct batadv_hard_iface *primary_if;
301 struct batadv_unicast_4addr_packet *uc_4addr_packet;
302 bool ret = false;
303
304 primary_if = batadv_primary_if_get_selected(bat_priv);
305 if (!primary_if)
306 goto out;
307
308
309
310
311
312 if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
313 orig))
314 goto out;
315
316 uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
317 uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
318 ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
319 uc_4addr_packet->subtype = packet_subtype;
320 uc_4addr_packet->reserved = 0;
321
322 ret = true;
323out:
324 if (primary_if)
325 batadv_hardif_put(primary_if);
326 return ret;
327}
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
346 struct sk_buff *skb, int packet_type,
347 int packet_subtype,
348 struct batadv_orig_node *orig_node,
349 unsigned short vid)
350{
351 struct batadv_unicast_packet *unicast_packet;
352 struct ethhdr *ethhdr;
353 int ret = NET_XMIT_DROP;
354
355 if (!orig_node)
356 goto out;
357
358 switch (packet_type) {
359 case BATADV_UNICAST:
360 if (!batadv_send_skb_prepare_unicast(skb, orig_node))
361 goto out;
362 break;
363 case BATADV_UNICAST_4ADDR:
364 if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
365 orig_node,
366 packet_subtype))
367 goto out;
368 break;
369 default:
370
371
372
373 goto out;
374 }
375
376
377
378
379 ethhdr = eth_hdr(skb);
380 unicast_packet = (struct batadv_unicast_packet *)skb->data;
381
382
383
384
385
386
387 if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
388 unicast_packet->ttvn = unicast_packet->ttvn - 1;
389
390 ret = batadv_send_skb_to_orig(skb, orig_node, NULL);
391
392 skb = NULL;
393
394out:
395 kfree_skb(skb);
396 return ret;
397}
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
418 struct sk_buff *skb, int packet_type,
419 int packet_subtype, u8 *dst_hint,
420 unsigned short vid)
421{
422 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
423 struct batadv_orig_node *orig_node;
424 u8 *src, *dst;
425 int ret;
426
427 src = ethhdr->h_source;
428 dst = ethhdr->h_dest;
429
430
431 if (dst_hint) {
432 src = NULL;
433 dst = dst_hint;
434 }
435 orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
436
437 ret = batadv_send_skb_unicast(bat_priv, skb, packet_type,
438 packet_subtype, orig_node, vid);
439
440 if (orig_node)
441 batadv_orig_node_put(orig_node);
442
443 return ret;
444}
445
446
447
448
449
450
451
452
453
454
455
456
457int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
458 unsigned short vid)
459{
460 struct batadv_orig_node *orig_node;
461 int ret;
462
463 orig_node = batadv_gw_get_selected_orig(bat_priv);
464 ret = batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
465 BATADV_P_DATA, orig_node, vid);
466
467 if (orig_node)
468 batadv_orig_node_put(orig_node);
469
470 return ret;
471}
472
473
474
475
476
477
478
479
480
481void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet,
482 bool dropped)
483{
484 if (dropped)
485 kfree_skb(forw_packet->skb);
486 else
487 consume_skb(forw_packet->skb);
488
489 if (forw_packet->if_incoming)
490 batadv_hardif_put(forw_packet->if_incoming);
491 if (forw_packet->if_outgoing)
492 batadv_hardif_put(forw_packet->if_outgoing);
493 if (forw_packet->queue_left)
494 atomic_inc(forw_packet->queue_left);
495 kfree(forw_packet);
496}
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512struct batadv_forw_packet *
513batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming,
514 struct batadv_hard_iface *if_outgoing,
515 atomic_t *queue_left,
516 struct batadv_priv *bat_priv,
517 struct sk_buff *skb)
518{
519 struct batadv_forw_packet *forw_packet;
520 const char *qname;
521
522 if (queue_left && !batadv_atomic_dec_not_zero(queue_left)) {
523 qname = "unknown";
524
525 if (queue_left == &bat_priv->bcast_queue_left)
526 qname = "bcast";
527
528 if (queue_left == &bat_priv->batman_queue_left)
529 qname = "batman";
530
531 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
532 "%s queue is full\n", qname);
533
534 return NULL;
535 }
536
537 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
538 if (!forw_packet)
539 goto err;
540
541 if (if_incoming)
542 kref_get(&if_incoming->refcount);
543
544 if (if_outgoing)
545 kref_get(&if_outgoing->refcount);
546
547 INIT_HLIST_NODE(&forw_packet->list);
548 INIT_HLIST_NODE(&forw_packet->cleanup_list);
549 forw_packet->skb = skb;
550 forw_packet->queue_left = queue_left;
551 forw_packet->if_incoming = if_incoming;
552 forw_packet->if_outgoing = if_outgoing;
553 forw_packet->num_packets = 0;
554
555 return forw_packet;
556
557err:
558 if (queue_left)
559 atomic_inc(queue_left);
560
561 return NULL;
562}
563
564
565
566
567
568
569
570
571
572
573static bool
574batadv_forw_packet_was_stolen(struct batadv_forw_packet *forw_packet)
575{
576 return !hlist_unhashed(&forw_packet->cleanup_list);
577}
578
579
580
581
582
583
584
585
586
587
588
589
590
591bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet,
592 spinlock_t *lock)
593{
594
595 spin_lock_bh(lock);
596 if (batadv_forw_packet_was_stolen(forw_packet)) {
597 spin_unlock_bh(lock);
598 return false;
599 }
600
601 hlist_del_init(&forw_packet->list);
602
603
604 hlist_add_fake(&forw_packet->cleanup_list);
605
606 spin_unlock_bh(lock);
607 return true;
608}
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623static void
624batadv_forw_packet_list_steal(struct hlist_head *forw_list,
625 struct hlist_head *cleanup_list,
626 const struct batadv_hard_iface *hard_iface)
627{
628 struct batadv_forw_packet *forw_packet;
629 struct hlist_node *safe_tmp_node;
630
631 hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
632 forw_list, list) {
633
634
635
636 if (hard_iface &&
637 forw_packet->if_incoming != hard_iface &&
638 forw_packet->if_outgoing != hard_iface)
639 continue;
640
641 hlist_del(&forw_packet->list);
642 hlist_add_head(&forw_packet->cleanup_list, cleanup_list);
643 }
644}
645
646
647
648
649
650
651
652
653
654
655
656static void batadv_forw_packet_list_free(struct hlist_head *head)
657{
658 struct batadv_forw_packet *forw_packet;
659 struct hlist_node *safe_tmp_node;
660
661 hlist_for_each_entry_safe(forw_packet, safe_tmp_node, head,
662 cleanup_list) {
663 cancel_delayed_work_sync(&forw_packet->delayed_work);
664
665 hlist_del(&forw_packet->cleanup_list);
666 batadv_forw_packet_free(forw_packet, true);
667 }
668}
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet,
688 spinlock_t *lock, struct hlist_head *head,
689 unsigned long send_time)
690{
691 spin_lock_bh(lock);
692
693
694 if (batadv_forw_packet_was_stolen(forw_packet)) {
695
696
697
698 WARN_ONCE(hlist_fake(&forw_packet->cleanup_list),
699 "Requeuing after batadv_forw_packet_steal() not allowed!\n");
700
701 spin_unlock_bh(lock);
702 return;
703 }
704
705 hlist_del_init(&forw_packet->list);
706 hlist_add_head(&forw_packet->list, head);
707
708 queue_delayed_work(batadv_event_workqueue,
709 &forw_packet->delayed_work,
710 send_time - jiffies);
711 spin_unlock_bh(lock);
712}
713
714
715
716
717
718
719
720
721
722
723
724static void
725batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv,
726 struct batadv_forw_packet *forw_packet,
727 unsigned long send_time)
728{
729 batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bcast_list_lock,
730 &bat_priv->forw_bcast_list, send_time);
731}
732
733
734
735
736
737
738
739
740
741
742
743void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv,
744 struct batadv_forw_packet *forw_packet,
745 unsigned long send_time)
746{
747 batadv_forw_packet_queue(forw_packet, &bat_priv->forw_bat_list_lock,
748 &bat_priv->forw_bat_list, send_time);
749}
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
767 const struct sk_buff *skb,
768 unsigned long delay,
769 bool own_packet)
770{
771 struct batadv_hard_iface *primary_if;
772 struct batadv_forw_packet *forw_packet;
773 struct batadv_bcast_packet *bcast_packet;
774 struct sk_buff *newskb;
775
776 primary_if = batadv_primary_if_get_selected(bat_priv);
777 if (!primary_if)
778 goto err;
779
780 newskb = skb_copy(skb, GFP_ATOMIC);
781 if (!newskb) {
782 batadv_hardif_put(primary_if);
783 goto err;
784 }
785
786 forw_packet = batadv_forw_packet_alloc(primary_if, NULL,
787 &bat_priv->bcast_queue_left,
788 bat_priv, newskb);
789 batadv_hardif_put(primary_if);
790 if (!forw_packet)
791 goto err_packet_free;
792
793
794 bcast_packet = (struct batadv_bcast_packet *)newskb->data;
795 bcast_packet->ttl--;
796
797 forw_packet->own = own_packet;
798
799 INIT_DELAYED_WORK(&forw_packet->delayed_work,
800 batadv_send_outstanding_bcast_packet);
801
802 batadv_forw_packet_bcast_queue(bat_priv, forw_packet, jiffies + delay);
803 return NETDEV_TX_OK;
804
805err_packet_free:
806 kfree_skb(newskb);
807err:
808 return NETDEV_TX_BUSY;
809}
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825static bool
826batadv_forw_packet_bcasts_left(struct batadv_forw_packet *forw_packet,
827 struct batadv_hard_iface *hard_iface)
828{
829 unsigned int max;
830
831 if (hard_iface)
832 max = hard_iface->num_bcasts;
833 else
834 max = BATADV_NUM_BCASTS_MAX;
835
836 return BATADV_SKB_CB(forw_packet->skb)->num_bcasts < max;
837}
838
839
840
841
842
843
844static void
845batadv_forw_packet_bcasts_inc(struct batadv_forw_packet *forw_packet)
846{
847 BATADV_SKB_CB(forw_packet->skb)->num_bcasts++;
848}
849
850
851
852
853
854
855
856bool batadv_forw_packet_is_rebroadcast(struct batadv_forw_packet *forw_packet)
857{
858 return BATADV_SKB_CB(forw_packet->skb)->num_bcasts > 0;
859}
860
861static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
862{
863 struct batadv_hard_iface *hard_iface;
864 struct batadv_hardif_neigh_node *neigh_node;
865 struct delayed_work *delayed_work;
866 struct batadv_forw_packet *forw_packet;
867 struct batadv_bcast_packet *bcast_packet;
868 struct sk_buff *skb1;
869 struct net_device *soft_iface;
870 struct batadv_priv *bat_priv;
871 unsigned long send_time = jiffies + msecs_to_jiffies(5);
872 bool dropped = false;
873 u8 *neigh_addr;
874 u8 *orig_neigh;
875 int ret = 0;
876
877 delayed_work = to_delayed_work(work);
878 forw_packet = container_of(delayed_work, struct batadv_forw_packet,
879 delayed_work);
880 soft_iface = forw_packet->if_incoming->soft_iface;
881 bat_priv = netdev_priv(soft_iface);
882
883 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) {
884 dropped = true;
885 goto out;
886 }
887
888 if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet)) {
889 dropped = true;
890 goto out;
891 }
892
893 bcast_packet = (struct batadv_bcast_packet *)forw_packet->skb->data;
894
895
896 rcu_read_lock();
897 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
898 if (hard_iface->soft_iface != soft_iface)
899 continue;
900
901 if (!batadv_forw_packet_bcasts_left(forw_packet, hard_iface))
902 continue;
903
904 if (forw_packet->own) {
905 neigh_node = NULL;
906 } else {
907 neigh_addr = eth_hdr(forw_packet->skb)->h_source;
908 neigh_node = batadv_hardif_neigh_get(hard_iface,
909 neigh_addr);
910 }
911
912 orig_neigh = neigh_node ? neigh_node->orig : NULL;
913
914 ret = batadv_hardif_no_broadcast(hard_iface, bcast_packet->orig,
915 orig_neigh);
916
917 if (ret) {
918 char *type;
919
920 switch (ret) {
921 case BATADV_HARDIF_BCAST_NORECIPIENT:
922 type = "no neighbor";
923 break;
924 case BATADV_HARDIF_BCAST_DUPFWD:
925 type = "single neighbor is source";
926 break;
927 case BATADV_HARDIF_BCAST_DUPORIG:
928 type = "single neighbor is originator";
929 break;
930 default:
931 type = "unknown";
932 }
933
934 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "BCAST packet from orig %pM on %s suppressed: %s\n",
935 bcast_packet->orig,
936 hard_iface->net_dev->name, type);
937
938 if (neigh_node)
939 batadv_hardif_neigh_put(neigh_node);
940
941 continue;
942 }
943
944 if (neigh_node)
945 batadv_hardif_neigh_put(neigh_node);
946
947 if (!kref_get_unless_zero(&hard_iface->refcount))
948 continue;
949
950
951 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
952 if (skb1)
953 batadv_send_broadcast_skb(skb1, hard_iface);
954
955 batadv_hardif_put(hard_iface);
956 }
957 rcu_read_unlock();
958
959 batadv_forw_packet_bcasts_inc(forw_packet);
960
961
962 if (batadv_forw_packet_bcasts_left(forw_packet, NULL)) {
963 batadv_forw_packet_bcast_queue(bat_priv, forw_packet,
964 send_time);
965 return;
966 }
967
968out:
969
970 if (batadv_forw_packet_steal(forw_packet,
971 &bat_priv->forw_bcast_list_lock))
972 batadv_forw_packet_free(forw_packet, dropped);
973}
974
975
976
977
978
979
980
981
982
983
984
985
986void
987batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
988 const struct batadv_hard_iface *hard_iface)
989{
990 struct hlist_head head = HLIST_HEAD_INIT;
991
992 if (hard_iface)
993 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
994 "%s(): %s\n",
995 __func__, hard_iface->net_dev->name);
996 else
997 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
998 "%s()\n", __func__);
999
1000
1001 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
1002 batadv_forw_packet_list_steal(&bat_priv->forw_bcast_list, &head,
1003 hard_iface);
1004 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
1005
1006
1007 spin_lock_bh(&bat_priv->forw_bat_list_lock);
1008 batadv_forw_packet_list_steal(&bat_priv->forw_bat_list, &head,
1009 hard_iface);
1010 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
1011
1012
1013 batadv_forw_packet_list_free(&head);
1014}
1015