1
2
3
4
5
6
7#include "routing.h"
8#include "main.h"
9
10#include <linux/atomic.h>
11#include <linux/byteorder/generic.h>
12#include <linux/compiler.h>
13#include <linux/errno.h>
14#include <linux/etherdevice.h>
15#include <linux/if_ether.h>
16#include <linux/jiffies.h>
17#include <linux/kref.h>
18#include <linux/netdevice.h>
19#include <linux/printk.h>
20#include <linux/rculist.h>
21#include <linux/rcupdate.h>
22#include <linux/skbuff.h>
23#include <linux/spinlock.h>
24#include <linux/stddef.h>
25#include <uapi/linux/batadv_packet.h>
26
27#include "bitarray.h"
28#include "bridge_loop_avoidance.h"
29#include "distributed-arp-table.h"
30#include "fragmentation.h"
31#include "hard-interface.h"
32#include "icmp_socket.h"
33#include "log.h"
34#include "network-coding.h"
35#include "originator.h"
36#include "send.h"
37#include "soft-interface.h"
38#include "tp_meter.h"
39#include "translation-table.h"
40#include "tvlv.h"
41
42static int batadv_route_unicast_packet(struct sk_buff *skb,
43 struct batadv_hard_iface *recv_if);
44
45
46
47
48
49
50
51
52
53
54static void _batadv_update_route(struct batadv_priv *bat_priv,
55 struct batadv_orig_node *orig_node,
56 struct batadv_hard_iface *recv_if,
57 struct batadv_neigh_node *neigh_node)
58{
59 struct batadv_orig_ifinfo *orig_ifinfo;
60 struct batadv_neigh_node *curr_router;
61
62 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, recv_if);
63 if (!orig_ifinfo)
64 return;
65
66 spin_lock_bh(&orig_node->neigh_list_lock);
67
68
69
70
71
72
73
74
75
76 if (neigh_node)
77 kref_get(&neigh_node->refcount);
78
79 curr_router = rcu_replace_pointer(orig_ifinfo->router, neigh_node,
80 true);
81 spin_unlock_bh(&orig_node->neigh_list_lock);
82 batadv_orig_ifinfo_put(orig_ifinfo);
83
84
85 if (curr_router && !neigh_node) {
86 batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
87 "Deleting route towards: %pM\n", orig_node->orig);
88 batadv_tt_global_del_orig(bat_priv, orig_node, -1,
89 "Deleted route towards originator");
90
91
92 } else if (!curr_router && neigh_node) {
93 batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
94 "Adding route towards: %pM (via %pM)\n",
95 orig_node->orig, neigh_node->addr);
96
97 } else if (neigh_node && curr_router) {
98 batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
99 "Changing route towards: %pM (now via %pM - was via %pM)\n",
100 orig_node->orig, neigh_node->addr,
101 curr_router->addr);
102 }
103
104
105 if (curr_router)
106 batadv_neigh_node_put(curr_router);
107}
108
109
110
111
112
113
114
115
116void batadv_update_route(struct batadv_priv *bat_priv,
117 struct batadv_orig_node *orig_node,
118 struct batadv_hard_iface *recv_if,
119 struct batadv_neigh_node *neigh_node)
120{
121 struct batadv_neigh_node *router = NULL;
122
123 if (!orig_node)
124 goto out;
125
126 router = batadv_orig_router_get(orig_node, recv_if);
127
128 if (router != neigh_node)
129 _batadv_update_route(bat_priv, orig_node, recv_if, neigh_node);
130
131out:
132 if (router)
133 batadv_neigh_node_put(router);
134}
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
153 s32 seq_old_max_diff, unsigned long *last_reset,
154 bool *protection_started)
155{
156 if (seq_num_diff <= -seq_old_max_diff ||
157 seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {
158 if (!batadv_has_timed_out(*last_reset,
159 BATADV_RESET_PROTECTION_MS))
160 return true;
161
162 *last_reset = jiffies;
163 if (protection_started)
164 *protection_started = true;
165 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
166 "old packet received, start protection\n");
167 }
168
169 return false;
170}
171
172
173
174
175
176
177
178
179
180bool batadv_check_management_packet(struct sk_buff *skb,
181 struct batadv_hard_iface *hard_iface,
182 int header_len)
183{
184 struct ethhdr *ethhdr;
185
186
187 if (unlikely(!pskb_may_pull(skb, header_len)))
188 return false;
189
190 ethhdr = eth_hdr(skb);
191
192
193 if (!is_broadcast_ether_addr(ethhdr->h_dest))
194 return false;
195
196
197 if (!is_valid_ether_addr(ethhdr->h_source))
198 return false;
199
200
201 if (skb_cow(skb, 0) < 0)
202 return false;
203
204
205 if (skb_linearize(skb) < 0)
206 return false;
207
208 return true;
209}
210
211
212
213
214
215
216
217
218
219static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
220 struct sk_buff *skb)
221{
222 struct batadv_hard_iface *primary_if = NULL;
223 struct batadv_orig_node *orig_node = NULL;
224 struct batadv_icmp_header *icmph;
225 int res, ret = NET_RX_DROP;
226
227 icmph = (struct batadv_icmp_header *)skb->data;
228
229 switch (icmph->msg_type) {
230 case BATADV_ECHO_REPLY:
231 case BATADV_DESTINATION_UNREACHABLE:
232 case BATADV_TTL_EXCEEDED:
233
234 if (skb_linearize(skb) < 0)
235 break;
236
237 batadv_socket_receive_packet(icmph, skb->len);
238 break;
239 case BATADV_ECHO_REQUEST:
240
241 primary_if = batadv_primary_if_get_selected(bat_priv);
242 if (!primary_if)
243 goto out;
244
245
246 orig_node = batadv_orig_hash_find(bat_priv, icmph->orig);
247 if (!orig_node)
248 goto out;
249
250
251 if (skb_cow(skb, ETH_HLEN) < 0)
252 goto out;
253
254 icmph = (struct batadv_icmp_header *)skb->data;
255
256 ether_addr_copy(icmph->dst, icmph->orig);
257 ether_addr_copy(icmph->orig, primary_if->net_dev->dev_addr);
258 icmph->msg_type = BATADV_ECHO_REPLY;
259 icmph->ttl = BATADV_TTL;
260
261 res = batadv_send_skb_to_orig(skb, orig_node, NULL);
262 if (res == NET_XMIT_SUCCESS)
263 ret = NET_RX_SUCCESS;
264
265
266 skb = NULL;
267 break;
268 case BATADV_TP:
269 if (!pskb_may_pull(skb, sizeof(struct batadv_icmp_tp_packet)))
270 goto out;
271
272 batadv_tp_meter_recv(bat_priv, skb);
273 ret = NET_RX_SUCCESS;
274
275 skb = NULL;
276 goto out;
277 default:
278
279 goto out;
280 }
281out:
282 if (primary_if)
283 batadv_hardif_put(primary_if);
284 if (orig_node)
285 batadv_orig_node_put(orig_node);
286
287 kfree_skb(skb);
288
289 return ret;
290}
291
292static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
293 struct sk_buff *skb)
294{
295 struct batadv_hard_iface *primary_if = NULL;
296 struct batadv_orig_node *orig_node = NULL;
297 struct batadv_icmp_packet *icmp_packet;
298 int res, ret = NET_RX_DROP;
299
300 icmp_packet = (struct batadv_icmp_packet *)skb->data;
301
302
303 if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
304 pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
305 icmp_packet->orig, icmp_packet->dst);
306 goto out;
307 }
308
309 primary_if = batadv_primary_if_get_selected(bat_priv);
310 if (!primary_if)
311 goto out;
312
313
314 orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
315 if (!orig_node)
316 goto out;
317
318
319 if (skb_cow(skb, ETH_HLEN) < 0)
320 goto out;
321
322 icmp_packet = (struct batadv_icmp_packet *)skb->data;
323
324 ether_addr_copy(icmp_packet->dst, icmp_packet->orig);
325 ether_addr_copy(icmp_packet->orig, primary_if->net_dev->dev_addr);
326 icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
327 icmp_packet->ttl = BATADV_TTL;
328
329 res = batadv_send_skb_to_orig(skb, orig_node, NULL);
330 if (res == NET_RX_SUCCESS)
331 ret = NET_XMIT_SUCCESS;
332
333
334 skb = NULL;
335
336out:
337 if (primary_if)
338 batadv_hardif_put(primary_if);
339 if (orig_node)
340 batadv_orig_node_put(orig_node);
341
342 kfree_skb(skb);
343
344 return ret;
345}
346
347
348
349
350
351
352
353
354int batadv_recv_icmp_packet(struct sk_buff *skb,
355 struct batadv_hard_iface *recv_if)
356{
357 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
358 struct batadv_icmp_header *icmph;
359 struct batadv_icmp_packet_rr *icmp_packet_rr;
360 struct ethhdr *ethhdr;
361 struct batadv_orig_node *orig_node = NULL;
362 int hdr_size = sizeof(struct batadv_icmp_header);
363 int res, ret = NET_RX_DROP;
364
365
366 if (unlikely(!pskb_may_pull(skb, hdr_size)))
367 goto free_skb;
368
369 ethhdr = eth_hdr(skb);
370
371
372 if (!is_valid_ether_addr(ethhdr->h_dest))
373 goto free_skb;
374
375
376 if (is_multicast_ether_addr(ethhdr->h_source))
377 goto free_skb;
378
379
380 if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
381 goto free_skb;
382
383 icmph = (struct batadv_icmp_header *)skb->data;
384
385
386 if ((icmph->msg_type == BATADV_ECHO_REPLY ||
387 icmph->msg_type == BATADV_ECHO_REQUEST) &&
388 skb->len >= sizeof(struct batadv_icmp_packet_rr)) {
389 if (skb_linearize(skb) < 0)
390 goto free_skb;
391
392
393 if (skb_cow(skb, ETH_HLEN) < 0)
394 goto free_skb;
395
396 ethhdr = eth_hdr(skb);
397 icmph = (struct batadv_icmp_header *)skb->data;
398 icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
399 if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
400 goto free_skb;
401
402 ether_addr_copy(icmp_packet_rr->rr[icmp_packet_rr->rr_cur],
403 ethhdr->h_dest);
404 icmp_packet_rr->rr_cur++;
405 }
406
407
408 if (batadv_is_my_mac(bat_priv, icmph->dst))
409 return batadv_recv_my_icmp_packet(bat_priv, skb);
410
411
412 if (icmph->ttl < 2)
413 return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
414
415
416 orig_node = batadv_orig_hash_find(bat_priv, icmph->dst);
417 if (!orig_node)
418 goto free_skb;
419
420
421 if (skb_cow(skb, ETH_HLEN) < 0)
422 goto put_orig_node;
423
424 icmph = (struct batadv_icmp_header *)skb->data;
425
426
427 icmph->ttl--;
428
429
430 res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
431 if (res == NET_XMIT_SUCCESS)
432 ret = NET_RX_SUCCESS;
433
434
435 skb = NULL;
436
437put_orig_node:
438 if (orig_node)
439 batadv_orig_node_put(orig_node);
440free_skb:
441 kfree_skb(skb);
442
443 return ret;
444}
445
446
447
448
449
450
451
452
453
454
455
456
457
458static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
459 struct sk_buff *skb, int hdr_size)
460{
461 struct ethhdr *ethhdr;
462
463
464 if (unlikely(!pskb_may_pull(skb, hdr_size)))
465 return -ENODATA;
466
467 ethhdr = eth_hdr(skb);
468
469
470 if (!is_valid_ether_addr(ethhdr->h_dest))
471 return -EBADR;
472
473
474 if (is_multicast_ether_addr(ethhdr->h_source))
475 return -EBADR;
476
477
478 if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
479 return -EREMOTE;
480
481 return 0;
482}
483
484
485
486
487
488
489
490
491
492static struct batadv_orig_ifinfo *
493batadv_last_bonding_get(struct batadv_orig_node *orig_node)
494{
495 struct batadv_orig_ifinfo *last_bonding_candidate;
496
497 spin_lock_bh(&orig_node->neigh_list_lock);
498 last_bonding_candidate = orig_node->last_bonding_candidate;
499
500 if (last_bonding_candidate)
501 kref_get(&last_bonding_candidate->refcount);
502 spin_unlock_bh(&orig_node->neigh_list_lock);
503
504 return last_bonding_candidate;
505}
506
507
508
509
510
511
512static void
513batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
514 struct batadv_orig_ifinfo *new_candidate)
515{
516 struct batadv_orig_ifinfo *old_candidate;
517
518 spin_lock_bh(&orig_node->neigh_list_lock);
519 old_candidate = orig_node->last_bonding_candidate;
520
521 if (new_candidate)
522 kref_get(&new_candidate->refcount);
523 orig_node->last_bonding_candidate = new_candidate;
524 spin_unlock_bh(&orig_node->neigh_list_lock);
525
526 if (old_candidate)
527 batadv_orig_ifinfo_put(old_candidate);
528}
529
530
531
532
533
534
535
536
537
538
539struct batadv_neigh_node *
540batadv_find_router(struct batadv_priv *bat_priv,
541 struct batadv_orig_node *orig_node,
542 struct batadv_hard_iface *recv_if)
543{
544 struct batadv_algo_ops *bao = bat_priv->algo_ops;
545 struct batadv_neigh_node *first_candidate_router = NULL;
546 struct batadv_neigh_node *next_candidate_router = NULL;
547 struct batadv_neigh_node *router, *cand_router = NULL;
548 struct batadv_neigh_node *last_cand_router = NULL;
549 struct batadv_orig_ifinfo *cand, *first_candidate = NULL;
550 struct batadv_orig_ifinfo *next_candidate = NULL;
551 struct batadv_orig_ifinfo *last_candidate;
552 bool last_candidate_found = false;
553
554 if (!orig_node)
555 return NULL;
556
557 router = batadv_orig_router_get(orig_node, recv_if);
558
559 if (!router)
560 return router;
561
562
563
564
565 if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding)))
566 return router;
567
568
569
570
571
572
573
574
575
576 rcu_read_lock();
577 last_candidate = batadv_last_bonding_get(orig_node);
578 if (last_candidate)
579 last_cand_router = rcu_dereference(last_candidate->router);
580
581 hlist_for_each_entry_rcu(cand, &orig_node->ifinfo_list, list) {
582
583 if (!kref_get_unless_zero(&cand->refcount))
584 continue;
585
586 cand_router = rcu_dereference(cand->router);
587 if (!cand_router)
588 goto next;
589
590 if (!kref_get_unless_zero(&cand_router->refcount)) {
591 cand_router = NULL;
592 goto next;
593 }
594
595
596
597
598 if (!bao->neigh.is_similar_or_better(cand_router,
599 cand->if_outgoing, router,
600 recv_if))
601 goto next;
602
603
604 if (last_cand_router == cand_router)
605 goto next;
606
607
608 if (!first_candidate) {
609 kref_get(&cand_router->refcount);
610 kref_get(&cand->refcount);
611 first_candidate = cand;
612 first_candidate_router = cand_router;
613 }
614
615
616
617
618
619 if (!last_candidate || last_candidate_found) {
620 next_candidate = cand;
621 next_candidate_router = cand_router;
622 break;
623 }
624
625 if (last_candidate == cand)
626 last_candidate_found = true;
627next:
628
629 if (cand_router) {
630 batadv_neigh_node_put(cand_router);
631 cand_router = NULL;
632 }
633 batadv_orig_ifinfo_put(cand);
634 }
635 rcu_read_unlock();
636
637
638
639
640
641
642 if (next_candidate) {
643 batadv_neigh_node_put(router);
644
645 kref_get(&next_candidate_router->refcount);
646 router = next_candidate_router;
647 batadv_last_bonding_replace(orig_node, next_candidate);
648 } else if (first_candidate) {
649 batadv_neigh_node_put(router);
650
651 kref_get(&first_candidate_router->refcount);
652 router = first_candidate_router;
653 batadv_last_bonding_replace(orig_node, first_candidate);
654 } else {
655 batadv_last_bonding_replace(orig_node, NULL);
656 }
657
658
659 if (first_candidate) {
660 batadv_neigh_node_put(first_candidate_router);
661 batadv_orig_ifinfo_put(first_candidate);
662 }
663
664 if (next_candidate) {
665 batadv_neigh_node_put(next_candidate_router);
666 batadv_orig_ifinfo_put(next_candidate);
667 }
668
669 if (last_candidate)
670 batadv_orig_ifinfo_put(last_candidate);
671
672 return router;
673}
674
675static int batadv_route_unicast_packet(struct sk_buff *skb,
676 struct batadv_hard_iface *recv_if)
677{
678 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
679 struct batadv_orig_node *orig_node = NULL;
680 struct batadv_unicast_packet *unicast_packet;
681 struct ethhdr *ethhdr = eth_hdr(skb);
682 int res, hdr_len, ret = NET_RX_DROP;
683 unsigned int len;
684
685 unicast_packet = (struct batadv_unicast_packet *)skb->data;
686
687
688 if (unicast_packet->ttl < 2) {
689 pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
690 ethhdr->h_source, unicast_packet->dest);
691 goto free_skb;
692 }
693
694
695 orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->dest);
696
697 if (!orig_node)
698 goto free_skb;
699
700
701 if (skb_cow(skb, ETH_HLEN) < 0)
702 goto put_orig_node;
703
704
705 unicast_packet = (struct batadv_unicast_packet *)skb->data;
706 unicast_packet->ttl--;
707
708 switch (unicast_packet->packet_type) {
709 case BATADV_UNICAST_4ADDR:
710 hdr_len = sizeof(struct batadv_unicast_4addr_packet);
711 break;
712 case BATADV_UNICAST:
713 hdr_len = sizeof(struct batadv_unicast_packet);
714 break;
715 default:
716
717 hdr_len = -1;
718 break;
719 }
720
721 if (hdr_len > 0)
722 batadv_skb_set_priority(skb, hdr_len);
723
724 len = skb->len;
725 res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
726
727
728 if (res == NET_XMIT_SUCCESS) {
729 ret = NET_RX_SUCCESS;
730
731 batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
732 batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
733 len + ETH_HLEN);
734 }
735
736
737 skb = NULL;
738
739put_orig_node:
740 batadv_orig_node_put(orig_node);
741free_skb:
742 kfree_skb(skb);
743
744 return ret;
745}
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761static bool
762batadv_reroute_unicast_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
763 struct batadv_unicast_packet *unicast_packet,
764 u8 *dst_addr, unsigned short vid)
765{
766 struct batadv_orig_node *orig_node = NULL;
767 struct batadv_hard_iface *primary_if = NULL;
768 bool ret = false;
769 u8 *orig_addr, orig_ttvn;
770
771 if (batadv_is_my_client(bat_priv, dst_addr, vid)) {
772 primary_if = batadv_primary_if_get_selected(bat_priv);
773 if (!primary_if)
774 goto out;
775 orig_addr = primary_if->net_dev->dev_addr;
776 orig_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
777 } else {
778 orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr,
779 vid);
780 if (!orig_node)
781 goto out;
782
783 if (batadv_compare_eth(orig_node->orig, unicast_packet->dest))
784 goto out;
785
786 orig_addr = orig_node->orig;
787 orig_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
788 }
789
790
791 skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
792 ether_addr_copy(unicast_packet->dest, orig_addr);
793 unicast_packet->ttvn = orig_ttvn;
794 skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
795
796 ret = true;
797out:
798 if (primary_if)
799 batadv_hardif_put(primary_if);
800 if (orig_node)
801 batadv_orig_node_put(orig_node);
802
803 return ret;
804}
805
806static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
807 struct sk_buff *skb, int hdr_len)
808{
809 struct batadv_unicast_packet *unicast_packet;
810 struct batadv_hard_iface *primary_if;
811 struct batadv_orig_node *orig_node;
812 u8 curr_ttvn, old_ttvn;
813 struct ethhdr *ethhdr;
814 unsigned short vid;
815 int is_old_ttvn;
816
817
818 if (!pskb_may_pull(skb, hdr_len + ETH_HLEN))
819 return false;
820
821
822 if (skb_cow(skb, sizeof(*unicast_packet)) < 0)
823 return false;
824
825 unicast_packet = (struct batadv_unicast_packet *)skb->data;
826 vid = batadv_get_vid(skb, hdr_len);
827 ethhdr = (struct ethhdr *)(skb->data + hdr_len);
828
829
830 if (is_multicast_ether_addr(ethhdr->h_dest))
831 return true;
832
833
834
835
836
837
838 if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
839 if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
840 ethhdr->h_dest, vid))
841 batadv_dbg_ratelimited(BATADV_DBG_TT,
842 bat_priv,
843 "Rerouting unicast packet to %pM (dst=%pM): Local Roaming\n",
844 unicast_packet->dest,
845 ethhdr->h_dest);
846
847
848
849
850
851 return true;
852 }
853
854
855
856
857
858 curr_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
859 if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
860 orig_node = batadv_orig_hash_find(bat_priv,
861 unicast_packet->dest);
862
863
864
865
866 if (!orig_node)
867 return false;
868
869 curr_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
870 batadv_orig_node_put(orig_node);
871 }
872
873
874
875
876 is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn);
877 if (!is_old_ttvn)
878 return true;
879
880 old_ttvn = unicast_packet->ttvn;
881
882
883
884
885 if (batadv_reroute_unicast_packet(bat_priv, skb, unicast_packet,
886 ethhdr->h_dest, vid)) {
887 batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv,
888 "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
889 unicast_packet->dest, ethhdr->h_dest,
890 old_ttvn, curr_ttvn);
891 return true;
892 }
893
894
895
896
897
898 if (!batadv_is_my_client(bat_priv, ethhdr->h_dest, vid))
899 return false;
900
901
902
903
904 primary_if = batadv_primary_if_get_selected(bat_priv);
905 if (!primary_if)
906 return false;
907
908
909 skb_postpull_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
910 ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
911 unicast_packet->ttvn = curr_ttvn;
912 skb_postpush_rcsum(skb, unicast_packet, sizeof(*unicast_packet));
913
914 batadv_hardif_put(primary_if);
915
916 return true;
917}
918
919
920
921
922
923
924
925
926
927
928int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
929 struct batadv_hard_iface *recv_if)
930{
931 struct batadv_unicast_packet *unicast_packet;
932 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
933 int check, hdr_size = sizeof(*unicast_packet);
934
935 check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
936 if (check < 0)
937 goto free_skb;
938
939
940 unicast_packet = (struct batadv_unicast_packet *)skb->data;
941 if (batadv_is_my_mac(bat_priv, unicast_packet->dest))
942 goto free_skb;
943
944 return batadv_route_unicast_packet(skb, recv_if);
945
946free_skb:
947 kfree_skb(skb);
948 return NET_RX_DROP;
949}
950
951
952
953
954
955
956
957
958int batadv_recv_unicast_packet(struct sk_buff *skb,
959 struct batadv_hard_iface *recv_if)
960{
961 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
962 struct batadv_unicast_packet *unicast_packet;
963 struct batadv_unicast_4addr_packet *unicast_4addr_packet;
964 u8 *orig_addr, *orig_addr_gw;
965 struct batadv_orig_node *orig_node = NULL, *orig_node_gw = NULL;
966 int check, hdr_size = sizeof(*unicast_packet);
967 enum batadv_subtype subtype;
968 int ret = NET_RX_DROP;
969 bool is4addr, is_gw;
970
971 unicast_packet = (struct batadv_unicast_packet *)skb->data;
972 is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
973
974 if (is4addr)
975 hdr_size = sizeof(*unicast_4addr_packet);
976
977
978 check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
979
980
981
982
983 if (check == -EREMOTE)
984 batadv_nc_skb_store_sniffed_unicast(bat_priv, skb);
985
986 if (check < 0)
987 goto free_skb;
988 if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
989 goto free_skb;
990
991 unicast_packet = (struct batadv_unicast_packet *)skb->data;
992
993
994 if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
995
996
997
998 orig_addr_gw = eth_hdr(skb)->h_source;
999 orig_node_gw = batadv_orig_hash_find(bat_priv, orig_addr_gw);
1000 if (orig_node_gw) {
1001 is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw,
1002 hdr_size);
1003 batadv_orig_node_put(orig_node_gw);
1004 if (is_gw) {
1005 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1006 "%s(): Dropped unicast pkt received from another backbone gw %pM.\n",
1007 __func__, orig_addr_gw);
1008 goto free_skb;
1009 }
1010 }
1011
1012 if (is4addr) {
1013 unicast_4addr_packet =
1014 (struct batadv_unicast_4addr_packet *)skb->data;
1015 subtype = unicast_4addr_packet->subtype;
1016 batadv_dat_inc_counter(bat_priv, subtype);
1017
1018
1019
1020
1021
1022
1023
1024 if (subtype == BATADV_P_DATA) {
1025 orig_addr = unicast_4addr_packet->src;
1026 orig_node = batadv_orig_hash_find(bat_priv,
1027 orig_addr);
1028 }
1029 }
1030
1031 if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb,
1032 hdr_size))
1033 goto rx_success;
1034 if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb,
1035 hdr_size))
1036 goto rx_success;
1037
1038 batadv_dat_snoop_incoming_dhcp_ack(bat_priv, skb, hdr_size);
1039
1040 batadv_interface_rx(recv_if->soft_iface, skb, hdr_size,
1041 orig_node);
1042
1043rx_success:
1044 if (orig_node)
1045 batadv_orig_node_put(orig_node);
1046
1047 return NET_RX_SUCCESS;
1048 }
1049
1050 ret = batadv_route_unicast_packet(skb, recv_if);
1051
1052 skb = NULL;
1053
1054free_skb:
1055 kfree_skb(skb);
1056
1057 return ret;
1058}
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068int batadv_recv_unicast_tvlv(struct sk_buff *skb,
1069 struct batadv_hard_iface *recv_if)
1070{
1071 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1072 struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
1073 unsigned char *tvlv_buff;
1074 u16 tvlv_buff_len;
1075 int hdr_size = sizeof(*unicast_tvlv_packet);
1076 int ret = NET_RX_DROP;
1077
1078 if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
1079 goto free_skb;
1080
1081
1082 if (skb_cow(skb, hdr_size) < 0)
1083 goto free_skb;
1084
1085
1086 if (skb_linearize(skb) < 0)
1087 goto free_skb;
1088
1089 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)skb->data;
1090
1091 tvlv_buff = (unsigned char *)(skb->data + hdr_size);
1092 tvlv_buff_len = ntohs(unicast_tvlv_packet->tvlv_len);
1093
1094 if (tvlv_buff_len > skb->len - hdr_size)
1095 goto free_skb;
1096
1097 ret = batadv_tvlv_containers_process(bat_priv, false, NULL,
1098 unicast_tvlv_packet->src,
1099 unicast_tvlv_packet->dst,
1100 tvlv_buff, tvlv_buff_len);
1101
1102 if (ret != NET_RX_SUCCESS) {
1103 ret = batadv_route_unicast_packet(skb, recv_if);
1104
1105 skb = NULL;
1106 }
1107
1108free_skb:
1109 kfree_skb(skb);
1110
1111 return ret;
1112}
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125int batadv_recv_frag_packet(struct sk_buff *skb,
1126 struct batadv_hard_iface *recv_if)
1127{
1128 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1129 struct batadv_orig_node *orig_node_src = NULL;
1130 struct batadv_frag_packet *frag_packet;
1131 int ret = NET_RX_DROP;
1132
1133 if (batadv_check_unicast_packet(bat_priv, skb,
1134 sizeof(*frag_packet)) < 0)
1135 goto free_skb;
1136
1137 frag_packet = (struct batadv_frag_packet *)skb->data;
1138 orig_node_src = batadv_orig_hash_find(bat_priv, frag_packet->orig);
1139 if (!orig_node_src)
1140 goto free_skb;
1141
1142 skb->priority = frag_packet->priority + 256;
1143
1144
1145 if (!batadv_is_my_mac(bat_priv, frag_packet->dest) &&
1146 batadv_frag_skb_fwd(skb, recv_if, orig_node_src)) {
1147
1148 skb = NULL;
1149 ret = NET_RX_SUCCESS;
1150 goto put_orig_node;
1151 }
1152
1153 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_RX);
1154 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_RX_BYTES, skb->len);
1155
1156
1157 if (!batadv_frag_skb_buffer(&skb, orig_node_src))
1158 goto put_orig_node;
1159
1160
1161
1162
1163 if (skb) {
1164 batadv_batman_skb_recv(skb, recv_if->net_dev,
1165 &recv_if->batman_adv_ptype, NULL);
1166
1167 skb = NULL;
1168 }
1169
1170 ret = NET_RX_SUCCESS;
1171
1172put_orig_node:
1173 batadv_orig_node_put(orig_node_src);
1174free_skb:
1175 kfree_skb(skb);
1176
1177 return ret;
1178}
1179
1180
1181
1182
1183
1184
1185
1186
1187int batadv_recv_bcast_packet(struct sk_buff *skb,
1188 struct batadv_hard_iface *recv_if)
1189{
1190 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1191 struct batadv_orig_node *orig_node = NULL;
1192 struct batadv_bcast_packet *bcast_packet;
1193 struct ethhdr *ethhdr;
1194 int hdr_size = sizeof(*bcast_packet);
1195 int ret = NET_RX_DROP;
1196 s32 seq_diff;
1197 u32 seqno;
1198
1199
1200 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1201 goto free_skb;
1202
1203 ethhdr = eth_hdr(skb);
1204
1205
1206 if (!is_broadcast_ether_addr(ethhdr->h_dest))
1207 goto free_skb;
1208
1209
1210 if (is_multicast_ether_addr(ethhdr->h_source))
1211 goto free_skb;
1212
1213
1214 if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
1215 goto free_skb;
1216
1217 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1218
1219
1220 if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
1221 goto free_skb;
1222
1223 if (bcast_packet->ttl < 2)
1224 goto free_skb;
1225
1226 orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
1227
1228 if (!orig_node)
1229 goto free_skb;
1230
1231 spin_lock_bh(&orig_node->bcast_seqno_lock);
1232
1233 seqno = ntohl(bcast_packet->seqno);
1234
1235 if (batadv_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
1236 seqno))
1237 goto spin_unlock;
1238
1239 seq_diff = seqno - orig_node->last_bcast_seqno;
1240
1241
1242 if (batadv_window_protected(bat_priv, seq_diff,
1243 BATADV_BCAST_MAX_AGE,
1244 &orig_node->bcast_seqno_reset, NULL))
1245 goto spin_unlock;
1246
1247
1248
1249
1250 if (batadv_bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
1251 orig_node->last_bcast_seqno = seqno;
1252
1253 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1254
1255
1256 if (batadv_bla_check_bcast_duplist(bat_priv, skb))
1257 goto free_skb;
1258
1259 batadv_skb_set_priority(skb, sizeof(struct batadv_bcast_packet));
1260
1261
1262 batadv_add_bcast_packet_to_list(bat_priv, skb, 1, false);
1263
1264
1265
1266
1267 if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size))
1268 goto free_skb;
1269
1270 if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb, hdr_size))
1271 goto rx_success;
1272 if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb, hdr_size))
1273 goto rx_success;
1274
1275 batadv_dat_snoop_incoming_dhcp_ack(bat_priv, skb, hdr_size);
1276
1277
1278 batadv_interface_rx(recv_if->soft_iface, skb, hdr_size, orig_node);
1279
1280rx_success:
1281 ret = NET_RX_SUCCESS;
1282 goto out;
1283
1284spin_unlock:
1285 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1286free_skb:
1287 kfree_skb(skb);
1288out:
1289 if (orig_node)
1290 batadv_orig_node_put(orig_node);
1291 return ret;
1292}
1293