1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "routing.h"
19#include "main.h"
20
21#include <linux/atomic.h>
22#include <linux/byteorder/generic.h>
23#include <linux/compiler.h>
24#include <linux/errno.h>
25#include <linux/etherdevice.h>
26#include <linux/if_ether.h>
27#include <linux/jiffies.h>
28#include <linux/kref.h>
29#include <linux/netdevice.h>
30#include <linux/printk.h>
31#include <linux/rculist.h>
32#include <linux/rcupdate.h>
33#include <linux/skbuff.h>
34#include <linux/spinlock.h>
35#include <linux/stddef.h>
36
37#include "bitarray.h"
38#include "bridge_loop_avoidance.h"
39#include "distributed-arp-table.h"
40#include "fragmentation.h"
41#include "hard-interface.h"
42#include "icmp_socket.h"
43#include "log.h"
44#include "network-coding.h"
45#include "originator.h"
46#include "packet.h"
47#include "send.h"
48#include "soft-interface.h"
49#include "tp_meter.h"
50#include "translation-table.h"
51#include "tvlv.h"
52
53static int batadv_route_unicast_packet(struct sk_buff *skb,
54 struct batadv_hard_iface *recv_if);
55
56
57
58
59
60
61
62
63
64
65static void _batadv_update_route(struct batadv_priv *bat_priv,
66 struct batadv_orig_node *orig_node,
67 struct batadv_hard_iface *recv_if,
68 struct batadv_neigh_node *neigh_node)
69{
70 struct batadv_orig_ifinfo *orig_ifinfo;
71 struct batadv_neigh_node *curr_router;
72
73 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, recv_if);
74 if (!orig_ifinfo)
75 return;
76
77 spin_lock_bh(&orig_node->neigh_list_lock);
78
79
80
81
82
83
84
85 curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
86
87
88 if (neigh_node)
89 kref_get(&neigh_node->refcount);
90
91 rcu_assign_pointer(orig_ifinfo->router, neigh_node);
92 spin_unlock_bh(&orig_node->neigh_list_lock);
93 batadv_orig_ifinfo_put(orig_ifinfo);
94
95
96 if ((curr_router) && (!neigh_node)) {
97 batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
98 "Deleting route towards: %pM\n", orig_node->orig);
99 batadv_tt_global_del_orig(bat_priv, orig_node, -1,
100 "Deleted route towards originator");
101
102
103 } else if ((!curr_router) && (neigh_node)) {
104 batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
105 "Adding route towards: %pM (via %pM)\n",
106 orig_node->orig, neigh_node->addr);
107
108 } else if (neigh_node && curr_router) {
109 batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
110 "Changing route towards: %pM (now via %pM - was via %pM)\n",
111 orig_node->orig, neigh_node->addr,
112 curr_router->addr);
113 }
114
115
116 if (curr_router)
117 batadv_neigh_node_put(curr_router);
118}
119
120
121
122
123
124
125
126
127void batadv_update_route(struct batadv_priv *bat_priv,
128 struct batadv_orig_node *orig_node,
129 struct batadv_hard_iface *recv_if,
130 struct batadv_neigh_node *neigh_node)
131{
132 struct batadv_neigh_node *router = NULL;
133
134 if (!orig_node)
135 goto out;
136
137 router = batadv_orig_router_get(orig_node, recv_if);
138
139 if (router != neigh_node)
140 _batadv_update_route(bat_priv, orig_node, recv_if, neigh_node);
141
142out:
143 if (router)
144 batadv_neigh_node_put(router);
145}
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163bool batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
164 s32 seq_old_max_diff, unsigned long *last_reset,
165 bool *protection_started)
166{
167 if (seq_num_diff <= -seq_old_max_diff ||
168 seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {
169 if (!batadv_has_timed_out(*last_reset,
170 BATADV_RESET_PROTECTION_MS))
171 return true;
172
173 *last_reset = jiffies;
174 if (protection_started)
175 *protection_started = true;
176 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
177 "old packet received, start protection\n");
178 }
179
180 return false;
181}
182
183bool batadv_check_management_packet(struct sk_buff *skb,
184 struct batadv_hard_iface *hard_iface,
185 int header_len)
186{
187 struct ethhdr *ethhdr;
188
189
190 if (unlikely(!pskb_may_pull(skb, header_len)))
191 return false;
192
193 ethhdr = eth_hdr(skb);
194
195
196 if (!is_broadcast_ether_addr(ethhdr->h_dest))
197 return false;
198
199
200 if (!is_valid_ether_addr(ethhdr->h_source))
201 return false;
202
203
204 if (skb_cow(skb, 0) < 0)
205 return false;
206
207
208 if (skb_linearize(skb) < 0)
209 return false;
210
211 return true;
212}
213
214
215
216
217
218
219
220
221
222static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
223 struct sk_buff *skb)
224{
225 struct batadv_hard_iface *primary_if = NULL;
226 struct batadv_orig_node *orig_node = NULL;
227 struct batadv_icmp_header *icmph;
228 int res, ret = NET_RX_DROP;
229
230 icmph = (struct batadv_icmp_header *)skb->data;
231
232 switch (icmph->msg_type) {
233 case BATADV_ECHO_REPLY:
234 case BATADV_DESTINATION_UNREACHABLE:
235 case BATADV_TTL_EXCEEDED:
236
237 if (skb_linearize(skb) < 0)
238 break;
239
240 batadv_socket_receive_packet(icmph, skb->len);
241 break;
242 case BATADV_ECHO_REQUEST:
243
244 primary_if = batadv_primary_if_get_selected(bat_priv);
245 if (!primary_if)
246 goto out;
247
248
249 orig_node = batadv_orig_hash_find(bat_priv, icmph->orig);
250 if (!orig_node)
251 goto out;
252
253
254 if (skb_cow(skb, ETH_HLEN) < 0)
255 goto out;
256
257 icmph = (struct batadv_icmp_header *)skb->data;
258
259 ether_addr_copy(icmph->dst, icmph->orig);
260 ether_addr_copy(icmph->orig, primary_if->net_dev->dev_addr);
261 icmph->msg_type = BATADV_ECHO_REPLY;
262 icmph->ttl = BATADV_TTL;
263
264 res = batadv_send_skb_to_orig(skb, orig_node, NULL);
265 if (res == NET_XMIT_SUCCESS)
266 ret = NET_RX_SUCCESS;
267
268
269 skb = NULL;
270 break;
271 case BATADV_TP:
272 if (!pskb_may_pull(skb, sizeof(struct batadv_icmp_tp_packet)))
273 goto out;
274
275 batadv_tp_meter_recv(bat_priv, skb);
276 ret = NET_RX_SUCCESS;
277
278 skb = NULL;
279 goto out;
280 default:
281
282 goto out;
283 }
284out:
285 if (primary_if)
286 batadv_hardif_put(primary_if);
287 if (orig_node)
288 batadv_orig_node_put(orig_node);
289
290 kfree_skb(skb);
291
292 return ret;
293}
294
295static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
296 struct sk_buff *skb)
297{
298 struct batadv_hard_iface *primary_if = NULL;
299 struct batadv_orig_node *orig_node = NULL;
300 struct batadv_icmp_packet *icmp_packet;
301 int res, ret = NET_RX_DROP;
302
303 icmp_packet = (struct batadv_icmp_packet *)skb->data;
304
305
306 if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
307 pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
308 icmp_packet->orig, icmp_packet->dst);
309 goto out;
310 }
311
312 primary_if = batadv_primary_if_get_selected(bat_priv);
313 if (!primary_if)
314 goto out;
315
316
317 orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
318 if (!orig_node)
319 goto out;
320
321
322 if (skb_cow(skb, ETH_HLEN) < 0)
323 goto out;
324
325 icmp_packet = (struct batadv_icmp_packet *)skb->data;
326
327 ether_addr_copy(icmp_packet->dst, icmp_packet->orig);
328 ether_addr_copy(icmp_packet->orig, primary_if->net_dev->dev_addr);
329 icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
330 icmp_packet->ttl = BATADV_TTL;
331
332 res = batadv_send_skb_to_orig(skb, orig_node, NULL);
333 if (res == NET_RX_SUCCESS)
334 ret = NET_XMIT_SUCCESS;
335
336
337 skb = NULL;
338
339out:
340 if (primary_if)
341 batadv_hardif_put(primary_if);
342 if (orig_node)
343 batadv_orig_node_put(orig_node);
344
345 kfree_skb(skb);
346
347 return ret;
348}
349
350int batadv_recv_icmp_packet(struct sk_buff *skb,
351 struct batadv_hard_iface *recv_if)
352{
353 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
354 struct batadv_icmp_header *icmph;
355 struct batadv_icmp_packet_rr *icmp_packet_rr;
356 struct ethhdr *ethhdr;
357 struct batadv_orig_node *orig_node = NULL;
358 int hdr_size = sizeof(struct batadv_icmp_header);
359 int res, ret = NET_RX_DROP;
360
361
362 if (unlikely(!pskb_may_pull(skb, hdr_size)))
363 goto free_skb;
364
365 ethhdr = eth_hdr(skb);
366
367
368 if (!is_valid_ether_addr(ethhdr->h_dest))
369 goto free_skb;
370
371
372 if (is_multicast_ether_addr(ethhdr->h_source))
373 goto free_skb;
374
375
376 if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
377 goto free_skb;
378
379 icmph = (struct batadv_icmp_header *)skb->data;
380
381
382 if ((icmph->msg_type == BATADV_ECHO_REPLY ||
383 icmph->msg_type == BATADV_ECHO_REQUEST) &&
384 (skb->len >= sizeof(struct batadv_icmp_packet_rr))) {
385 if (skb_linearize(skb) < 0)
386 goto free_skb;
387
388
389 if (skb_cow(skb, ETH_HLEN) < 0)
390 goto free_skb;
391
392 ethhdr = eth_hdr(skb);
393 icmph = (struct batadv_icmp_header *)skb->data;
394 icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
395 if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
396 goto free_skb;
397
398 ether_addr_copy(icmp_packet_rr->rr[icmp_packet_rr->rr_cur],
399 ethhdr->h_dest);
400 icmp_packet_rr->rr_cur++;
401 }
402
403
404 if (batadv_is_my_mac(bat_priv, icmph->dst))
405 return batadv_recv_my_icmp_packet(bat_priv, skb);
406
407
408 if (icmph->ttl < 2)
409 return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
410
411
412 orig_node = batadv_orig_hash_find(bat_priv, icmph->dst);
413 if (!orig_node)
414 goto free_skb;
415
416
417 if (skb_cow(skb, ETH_HLEN) < 0)
418 goto put_orig_node;
419
420 icmph = (struct batadv_icmp_header *)skb->data;
421
422
423 icmph->ttl--;
424
425
426 res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
427 if (res == NET_XMIT_SUCCESS)
428 ret = NET_RX_SUCCESS;
429
430
431 skb = NULL;
432
433put_orig_node:
434 if (orig_node)
435 batadv_orig_node_put(orig_node);
436free_skb:
437 kfree_skb(skb);
438
439 return ret;
440}
441
442
443
444
445
446
447
448
449
450
451
452
453
454static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
455 struct sk_buff *skb, int hdr_size)
456{
457 struct ethhdr *ethhdr;
458
459
460 if (unlikely(!pskb_may_pull(skb, hdr_size)))
461 return -ENODATA;
462
463 ethhdr = eth_hdr(skb);
464
465
466 if (!is_valid_ether_addr(ethhdr->h_dest))
467 return -EBADR;
468
469
470 if (is_multicast_ether_addr(ethhdr->h_source))
471 return -EBADR;
472
473
474 if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
475 return -EREMOTE;
476
477 return 0;
478}
479
480
481
482
483
484
485
486
487
488static struct batadv_orig_ifinfo *
489batadv_last_bonding_get(struct batadv_orig_node *orig_node)
490{
491 struct batadv_orig_ifinfo *last_bonding_candidate;
492
493 spin_lock_bh(&orig_node->neigh_list_lock);
494 last_bonding_candidate = orig_node->last_bonding_candidate;
495
496 if (last_bonding_candidate)
497 kref_get(&last_bonding_candidate->refcount);
498 spin_unlock_bh(&orig_node->neigh_list_lock);
499
500 return last_bonding_candidate;
501}
502
503
504
505
506
507
508static void
509batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
510 struct batadv_orig_ifinfo *new_candidate)
511{
512 struct batadv_orig_ifinfo *old_candidate;
513
514 spin_lock_bh(&orig_node->neigh_list_lock);
515 old_candidate = orig_node->last_bonding_candidate;
516
517 if (new_candidate)
518 kref_get(&new_candidate->refcount);
519 orig_node->last_bonding_candidate = new_candidate;
520 spin_unlock_bh(&orig_node->neigh_list_lock);
521
522 if (old_candidate)
523 batadv_orig_ifinfo_put(old_candidate);
524}
525
526
527
528
529
530
531
532
533
534
535struct batadv_neigh_node *
536batadv_find_router(struct batadv_priv *bat_priv,
537 struct batadv_orig_node *orig_node,
538 struct batadv_hard_iface *recv_if)
539{
540 struct batadv_algo_ops *bao = bat_priv->algo_ops;
541 struct batadv_neigh_node *first_candidate_router = NULL;
542 struct batadv_neigh_node *next_candidate_router = NULL;
543 struct batadv_neigh_node *router, *cand_router = NULL;
544 struct batadv_neigh_node *last_cand_router = NULL;
545 struct batadv_orig_ifinfo *cand, *first_candidate = NULL;
546 struct batadv_orig_ifinfo *next_candidate = NULL;
547 struct batadv_orig_ifinfo *last_candidate;
548 bool last_candidate_found = false;
549
550 if (!orig_node)
551 return NULL;
552
553 router = batadv_orig_router_get(orig_node, recv_if);
554
555 if (!router)
556 return router;
557
558
559
560
561 if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding)))
562 return router;
563
564
565
566
567
568
569
570
571
572 rcu_read_lock();
573 last_candidate = batadv_last_bonding_get(orig_node);
574 if (last_candidate)
575 last_cand_router = rcu_dereference(last_candidate->router);
576
577 hlist_for_each_entry_rcu(cand, &orig_node->ifinfo_list, list) {
578
579 if (!kref_get_unless_zero(&cand->refcount))
580 continue;
581
582 cand_router = rcu_dereference(cand->router);
583 if (!cand_router)
584 goto next;
585
586 if (!kref_get_unless_zero(&cand_router->refcount)) {
587 cand_router = NULL;
588 goto next;
589 }
590
591
592
593
594 if (!bao->neigh.is_similar_or_better(cand_router,
595 cand->if_outgoing, router,
596 recv_if))
597 goto next;
598
599
600 if (last_cand_router == cand_router)
601 goto next;
602
603
604 if (!first_candidate) {
605 kref_get(&cand_router->refcount);
606 kref_get(&cand->refcount);
607 first_candidate = cand;
608 first_candidate_router = cand_router;
609 }
610
611
612
613
614
615 if (!last_candidate || last_candidate_found) {
616 next_candidate = cand;
617 next_candidate_router = cand_router;
618 break;
619 }
620
621 if (last_candidate == cand)
622 last_candidate_found = true;
623next:
624
625 if (cand_router) {
626 batadv_neigh_node_put(cand_router);
627 cand_router = NULL;
628 }
629 batadv_orig_ifinfo_put(cand);
630 }
631 rcu_read_unlock();
632
633
634
635
636
637
638 if (next_candidate) {
639 batadv_neigh_node_put(router);
640
641 kref_get(&next_candidate_router->refcount);
642 router = next_candidate_router;
643 batadv_last_bonding_replace(orig_node, next_candidate);
644 } else if (first_candidate) {
645 batadv_neigh_node_put(router);
646
647 kref_get(&first_candidate_router->refcount);
648 router = first_candidate_router;
649 batadv_last_bonding_replace(orig_node, first_candidate);
650 } else {
651 batadv_last_bonding_replace(orig_node, NULL);
652 }
653
654
655 if (first_candidate) {
656 batadv_neigh_node_put(first_candidate_router);
657 batadv_orig_ifinfo_put(first_candidate);
658 }
659
660 if (next_candidate) {
661 batadv_neigh_node_put(next_candidate_router);
662 batadv_orig_ifinfo_put(next_candidate);
663 }
664
665 if (last_candidate)
666 batadv_orig_ifinfo_put(last_candidate);
667
668 return router;
669}
670
671static int batadv_route_unicast_packet(struct sk_buff *skb,
672 struct batadv_hard_iface *recv_if)
673{
674 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
675 struct batadv_orig_node *orig_node = NULL;
676 struct batadv_unicast_packet *unicast_packet;
677 struct ethhdr *ethhdr = eth_hdr(skb);
678 int res, hdr_len, ret = NET_RX_DROP;
679 unsigned int len;
680
681 unicast_packet = (struct batadv_unicast_packet *)skb->data;
682
683
684 if (unicast_packet->ttl < 2) {
685 pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
686 ethhdr->h_source, unicast_packet->dest);
687 goto free_skb;
688 }
689
690
691 orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->dest);
692
693 if (!orig_node)
694 goto free_skb;
695
696
697 if (skb_cow(skb, ETH_HLEN) < 0)
698 goto put_orig_node;
699
700
701 unicast_packet = (struct batadv_unicast_packet *)skb->data;
702 unicast_packet->ttl--;
703
704 switch (unicast_packet->packet_type) {
705 case BATADV_UNICAST_4ADDR:
706 hdr_len = sizeof(struct batadv_unicast_4addr_packet);
707 break;
708 case BATADV_UNICAST:
709 hdr_len = sizeof(struct batadv_unicast_packet);
710 break;
711 default:
712
713 hdr_len = -1;
714 break;
715 }
716
717 if (hdr_len > 0)
718 batadv_skb_set_priority(skb, hdr_len);
719
720 len = skb->len;
721 res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
722
723
724 if (res == NET_XMIT_SUCCESS) {
725 ret = NET_RX_SUCCESS;
726
727 batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
728 batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
729 len + ETH_HLEN);
730 }
731
732
733 skb = NULL;
734
735put_orig_node:
736 batadv_orig_node_put(orig_node);
737free_skb:
738 kfree_skb(skb);
739
740 return ret;
741}
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756static bool
757batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
758 struct batadv_unicast_packet *unicast_packet,
759 u8 *dst_addr, unsigned short vid)
760{
761 struct batadv_orig_node *orig_node = NULL;
762 struct batadv_hard_iface *primary_if = NULL;
763 bool ret = false;
764 u8 *orig_addr, orig_ttvn;
765
766 if (batadv_is_my_client(bat_priv, dst_addr, vid)) {
767 primary_if = batadv_primary_if_get_selected(bat_priv);
768 if (!primary_if)
769 goto out;
770 orig_addr = primary_if->net_dev->dev_addr;
771 orig_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
772 } else {
773 orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr,
774 vid);
775 if (!orig_node)
776 goto out;
777
778 if (batadv_compare_eth(orig_node->orig, unicast_packet->dest))
779 goto out;
780
781 orig_addr = orig_node->orig;
782 orig_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
783 }
784
785
786 ether_addr_copy(unicast_packet->dest, orig_addr);
787 unicast_packet->ttvn = orig_ttvn;
788
789 ret = true;
790out:
791 if (primary_if)
792 batadv_hardif_put(primary_if);
793 if (orig_node)
794 batadv_orig_node_put(orig_node);
795
796 return ret;
797}
798
799static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
800 struct sk_buff *skb, int hdr_len)
801{
802 struct batadv_unicast_packet *unicast_packet;
803 struct batadv_hard_iface *primary_if;
804 struct batadv_orig_node *orig_node;
805 u8 curr_ttvn, old_ttvn;
806 struct ethhdr *ethhdr;
807 unsigned short vid;
808 int is_old_ttvn;
809
810
811 if (!pskb_may_pull(skb, hdr_len + ETH_HLEN))
812 return false;
813
814
815 if (skb_cow(skb, sizeof(*unicast_packet)) < 0)
816 return false;
817
818 unicast_packet = (struct batadv_unicast_packet *)skb->data;
819 vid = batadv_get_vid(skb, hdr_len);
820 ethhdr = (struct ethhdr *)(skb->data + hdr_len);
821
822
823
824
825
826
827 if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
828 if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
829 ethhdr->h_dest, vid))
830 batadv_dbg_ratelimited(BATADV_DBG_TT,
831 bat_priv,
832 "Rerouting unicast packet to %pM (dst=%pM): Local Roaming\n",
833 unicast_packet->dest,
834 ethhdr->h_dest);
835
836
837
838
839
840 return true;
841 }
842
843
844
845
846
847 curr_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
848 if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
849 orig_node = batadv_orig_hash_find(bat_priv,
850 unicast_packet->dest);
851
852
853
854
855 if (!orig_node)
856 return false;
857
858 curr_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
859 batadv_orig_node_put(orig_node);
860 }
861
862
863
864
865 is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn);
866 if (!is_old_ttvn)
867 return true;
868
869 old_ttvn = unicast_packet->ttvn;
870
871
872
873
874 if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
875 ethhdr->h_dest, vid)) {
876 batadv_dbg_ratelimited(BATADV_DBG_TT, bat_priv,
877 "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
878 unicast_packet->dest, ethhdr->h_dest,
879 old_ttvn, curr_ttvn);
880 return true;
881 }
882
883
884
885
886
887 if (!batadv_is_my_client(bat_priv, ethhdr->h_dest, vid))
888 return false;
889
890
891
892
893 primary_if = batadv_primary_if_get_selected(bat_priv);
894 if (!primary_if)
895 return false;
896
897 ether_addr_copy(unicast_packet->dest, primary_if->net_dev->dev_addr);
898
899 batadv_hardif_put(primary_if);
900
901 unicast_packet->ttvn = curr_ttvn;
902
903 return true;
904}
905
906
907
908
909
910
911
912
913
914
915int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
916 struct batadv_hard_iface *recv_if)
917{
918 struct batadv_unicast_packet *unicast_packet;
919 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
920 int check, hdr_size = sizeof(*unicast_packet);
921
922 check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
923 if (check < 0)
924 goto free_skb;
925
926
927 unicast_packet = (struct batadv_unicast_packet *)skb->data;
928 if (batadv_is_my_mac(bat_priv, unicast_packet->dest))
929 goto free_skb;
930
931 return batadv_route_unicast_packet(skb, recv_if);
932
933free_skb:
934 kfree_skb(skb);
935 return NET_RX_DROP;
936}
937
938int batadv_recv_unicast_packet(struct sk_buff *skb,
939 struct batadv_hard_iface *recv_if)
940{
941 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
942 struct batadv_unicast_packet *unicast_packet;
943 struct batadv_unicast_4addr_packet *unicast_4addr_packet;
944 u8 *orig_addr, *orig_addr_gw;
945 struct batadv_orig_node *orig_node = NULL, *orig_node_gw = NULL;
946 int check, hdr_size = sizeof(*unicast_packet);
947 enum batadv_subtype subtype;
948 struct ethhdr *ethhdr;
949 int ret = NET_RX_DROP;
950 bool is4addr, is_gw;
951
952 unicast_packet = (struct batadv_unicast_packet *)skb->data;
953 unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
954 ethhdr = eth_hdr(skb);
955
956 is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
957
958 if (is4addr)
959 hdr_size = sizeof(*unicast_4addr_packet);
960
961
962 check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
963
964
965
966
967 if (check == -EREMOTE)
968 batadv_nc_skb_store_sniffed_unicast(bat_priv, skb);
969
970 if (check < 0)
971 goto free_skb;
972 if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
973 goto free_skb;
974
975
976 if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
977
978
979
980 orig_addr_gw = ethhdr->h_source;
981 orig_node_gw = batadv_orig_hash_find(bat_priv, orig_addr_gw);
982 if (orig_node_gw) {
983 is_gw = batadv_bla_is_backbone_gw(skb, orig_node_gw,
984 hdr_size);
985 batadv_orig_node_put(orig_node_gw);
986 if (is_gw) {
987 batadv_dbg(BATADV_DBG_BLA, bat_priv,
988 "%s(): Dropped unicast pkt received from another backbone gw %pM.\n",
989 __func__, orig_addr_gw);
990 goto free_skb;
991 }
992 }
993
994 if (is4addr) {
995 subtype = unicast_4addr_packet->subtype;
996 batadv_dat_inc_counter(bat_priv, subtype);
997
998
999
1000
1001
1002
1003
1004 if (subtype == BATADV_P_DATA) {
1005 orig_addr = unicast_4addr_packet->src;
1006 orig_node = batadv_orig_hash_find(bat_priv,
1007 orig_addr);
1008 }
1009 }
1010
1011 if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb,
1012 hdr_size))
1013 goto rx_success;
1014 if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb,
1015 hdr_size))
1016 goto rx_success;
1017
1018 batadv_interface_rx(recv_if->soft_iface, skb, hdr_size,
1019 orig_node);
1020
1021rx_success:
1022 if (orig_node)
1023 batadv_orig_node_put(orig_node);
1024
1025 return NET_RX_SUCCESS;
1026 }
1027
1028 ret = batadv_route_unicast_packet(skb, recv_if);
1029
1030 skb = NULL;
1031
1032free_skb:
1033 kfree_skb(skb);
1034
1035 return ret;
1036}
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046int batadv_recv_unicast_tvlv(struct sk_buff *skb,
1047 struct batadv_hard_iface *recv_if)
1048{
1049 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1050 struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
1051 unsigned char *tvlv_buff;
1052 u16 tvlv_buff_len;
1053 int hdr_size = sizeof(*unicast_tvlv_packet);
1054 int ret = NET_RX_DROP;
1055
1056 if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
1057 goto free_skb;
1058
1059
1060 if (skb_cow(skb, hdr_size) < 0)
1061 goto free_skb;
1062
1063
1064 if (skb_linearize(skb) < 0)
1065 goto free_skb;
1066
1067 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)skb->data;
1068
1069 tvlv_buff = (unsigned char *)(skb->data + hdr_size);
1070 tvlv_buff_len = ntohs(unicast_tvlv_packet->tvlv_len);
1071
1072 if (tvlv_buff_len > skb->len - hdr_size)
1073 goto free_skb;
1074
1075 ret = batadv_tvlv_containers_process(bat_priv, false, NULL,
1076 unicast_tvlv_packet->src,
1077 unicast_tvlv_packet->dst,
1078 tvlv_buff, tvlv_buff_len);
1079
1080 if (ret != NET_RX_SUCCESS) {
1081 ret = batadv_route_unicast_packet(skb, recv_if);
1082
1083 skb = NULL;
1084 }
1085
1086free_skb:
1087 kfree_skb(skb);
1088
1089 return ret;
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103int batadv_recv_frag_packet(struct sk_buff *skb,
1104 struct batadv_hard_iface *recv_if)
1105{
1106 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1107 struct batadv_orig_node *orig_node_src = NULL;
1108 struct batadv_frag_packet *frag_packet;
1109 int ret = NET_RX_DROP;
1110
1111 if (batadv_check_unicast_packet(bat_priv, skb,
1112 sizeof(*frag_packet)) < 0)
1113 goto free_skb;
1114
1115 frag_packet = (struct batadv_frag_packet *)skb->data;
1116 orig_node_src = batadv_orig_hash_find(bat_priv, frag_packet->orig);
1117 if (!orig_node_src)
1118 goto free_skb;
1119
1120 skb->priority = frag_packet->priority + 256;
1121
1122
1123 if (!batadv_is_my_mac(bat_priv, frag_packet->dest) &&
1124 batadv_frag_skb_fwd(skb, recv_if, orig_node_src)) {
1125
1126 skb = NULL;
1127 ret = NET_RX_SUCCESS;
1128 goto put_orig_node;
1129 }
1130
1131 batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_RX);
1132 batadv_add_counter(bat_priv, BATADV_CNT_FRAG_RX_BYTES, skb->len);
1133
1134
1135 if (!batadv_frag_skb_buffer(&skb, orig_node_src))
1136 goto put_orig_node;
1137
1138
1139
1140
1141 if (skb) {
1142 batadv_batman_skb_recv(skb, recv_if->net_dev,
1143 &recv_if->batman_adv_ptype, NULL);
1144
1145 skb = NULL;
1146 }
1147
1148 ret = NET_RX_SUCCESS;
1149
1150put_orig_node:
1151 batadv_orig_node_put(orig_node_src);
1152free_skb:
1153 kfree_skb(skb);
1154
1155 return ret;
1156}
1157
1158int batadv_recv_bcast_packet(struct sk_buff *skb,
1159 struct batadv_hard_iface *recv_if)
1160{
1161 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1162 struct batadv_orig_node *orig_node = NULL;
1163 struct batadv_bcast_packet *bcast_packet;
1164 struct ethhdr *ethhdr;
1165 int hdr_size = sizeof(*bcast_packet);
1166 int ret = NET_RX_DROP;
1167 s32 seq_diff;
1168 u32 seqno;
1169
1170
1171 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1172 goto free_skb;
1173
1174 ethhdr = eth_hdr(skb);
1175
1176
1177 if (!is_broadcast_ether_addr(ethhdr->h_dest))
1178 goto free_skb;
1179
1180
1181 if (is_multicast_ether_addr(ethhdr->h_source))
1182 goto free_skb;
1183
1184
1185 if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
1186 goto free_skb;
1187
1188 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1189
1190
1191 if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
1192 goto free_skb;
1193
1194 if (bcast_packet->ttl < 2)
1195 goto free_skb;
1196
1197 orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
1198
1199 if (!orig_node)
1200 goto free_skb;
1201
1202 spin_lock_bh(&orig_node->bcast_seqno_lock);
1203
1204 seqno = ntohl(bcast_packet->seqno);
1205
1206 if (batadv_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
1207 seqno))
1208 goto spin_unlock;
1209
1210 seq_diff = seqno - orig_node->last_bcast_seqno;
1211
1212
1213 if (batadv_window_protected(bat_priv, seq_diff,
1214 BATADV_BCAST_MAX_AGE,
1215 &orig_node->bcast_seqno_reset, NULL))
1216 goto spin_unlock;
1217
1218
1219
1220
1221 if (batadv_bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
1222 orig_node->last_bcast_seqno = seqno;
1223
1224 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1225
1226
1227 if (batadv_bla_check_bcast_duplist(bat_priv, skb))
1228 goto free_skb;
1229
1230 batadv_skb_set_priority(skb, sizeof(struct batadv_bcast_packet));
1231
1232
1233 batadv_add_bcast_packet_to_list(bat_priv, skb, 1, false);
1234
1235
1236
1237
1238 if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size))
1239 goto free_skb;
1240
1241 if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb, hdr_size))
1242 goto rx_success;
1243 if (batadv_dat_snoop_incoming_arp_reply(bat_priv, skb, hdr_size))
1244 goto rx_success;
1245
1246
1247 batadv_interface_rx(recv_if->soft_iface, skb, hdr_size, orig_node);
1248
1249rx_success:
1250 ret = NET_RX_SUCCESS;
1251 goto out;
1252
1253spin_unlock:
1254 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1255free_skb:
1256 kfree_skb(skb);
1257out:
1258 if (orig_node)
1259 batadv_orig_node_put(orig_node);
1260 return ret;
1261}
1262