1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include "bridge_loop_avoidance.h"
20#include "main.h"
21
22#include <linux/atomic.h>
23#include <linux/byteorder/generic.h>
24#include <linux/compiler.h>
25#include <linux/crc16.h>
26#include <linux/errno.h>
27#include <linux/etherdevice.h>
28#include <linux/gfp.h>
29#include <linux/if_arp.h>
30#include <linux/if_ether.h>
31#include <linux/if_vlan.h>
32#include <linux/jhash.h>
33#include <linux/jiffies.h>
34#include <linux/kernel.h>
35#include <linux/kref.h>
36#include <linux/list.h>
37#include <linux/lockdep.h>
38#include <linux/netdevice.h>
39#include <linux/netlink.h>
40#include <linux/rculist.h>
41#include <linux/rcupdate.h>
42#include <linux/seq_file.h>
43#include <linux/skbuff.h>
44#include <linux/slab.h>
45#include <linux/spinlock.h>
46#include <linux/stddef.h>
47#include <linux/string.h>
48#include <linux/workqueue.h>
49#include <net/arp.h>
50#include <net/genetlink.h>
51#include <net/netlink.h>
52#include <net/sock.h>
53#include <uapi/linux/batadv_packet.h>
54#include <uapi/linux/batman_adv.h>
55
56#include "hard-interface.h"
57#include "hash.h"
58#include "log.h"
59#include "netlink.h"
60#include "originator.h"
61#include "soft-interface.h"
62#include "sysfs.h"
63#include "translation-table.h"
64
65static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
66
67static void batadv_bla_periodic_work(struct work_struct *work);
68static void
69batadv_bla_send_announce(struct batadv_priv *bat_priv,
70 struct batadv_bla_backbone_gw *backbone_gw);
71
72
73
74
75
76
77
78
79static inline u32 batadv_choose_claim(const void *data, u32 size)
80{
81 struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
82 u32 hash = 0;
83
84 hash = jhash(&claim->addr, sizeof(claim->addr), hash);
85 hash = jhash(&claim->vid, sizeof(claim->vid), hash);
86
87 return hash % size;
88}
89
90
91
92
93
94
95
96
97static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
98{
99 const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
100 u32 hash = 0;
101
102 hash = jhash(&claim->addr, sizeof(claim->addr), hash);
103 hash = jhash(&claim->vid, sizeof(claim->vid), hash);
104
105 return hash % size;
106}
107
108
109
110
111
112
113
114
115static bool batadv_compare_backbone_gw(const struct hlist_node *node,
116 const void *data2)
117{
118 const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
119 hash_entry);
120 const struct batadv_bla_backbone_gw *gw1 = data1;
121 const struct batadv_bla_backbone_gw *gw2 = data2;
122
123 if (!batadv_compare_eth(gw1->orig, gw2->orig))
124 return false;
125
126 if (gw1->vid != gw2->vid)
127 return false;
128
129 return true;
130}
131
132
133
134
135
136
137
138
139static bool batadv_compare_claim(const struct hlist_node *node,
140 const void *data2)
141{
142 const void *data1 = container_of(node, struct batadv_bla_claim,
143 hash_entry);
144 const struct batadv_bla_claim *cl1 = data1;
145 const struct batadv_bla_claim *cl2 = data2;
146
147 if (!batadv_compare_eth(cl1->addr, cl2->addr))
148 return false;
149
150 if (cl1->vid != cl2->vid)
151 return false;
152
153 return true;
154}
155
156
157
158
159
160
161static void batadv_backbone_gw_release(struct kref *ref)
162{
163 struct batadv_bla_backbone_gw *backbone_gw;
164
165 backbone_gw = container_of(ref, struct batadv_bla_backbone_gw,
166 refcount);
167
168 kfree_rcu(backbone_gw, rcu);
169}
170
171
172
173
174
175
176static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
177{
178 kref_put(&backbone_gw->refcount, batadv_backbone_gw_release);
179}
180
181
182
183
184
185
186static void batadv_claim_release(struct kref *ref)
187{
188 struct batadv_bla_claim *claim;
189 struct batadv_bla_backbone_gw *old_backbone_gw;
190
191 claim = container_of(ref, struct batadv_bla_claim, refcount);
192
193 spin_lock_bh(&claim->backbone_lock);
194 old_backbone_gw = claim->backbone_gw;
195 claim->backbone_gw = NULL;
196 spin_unlock_bh(&claim->backbone_lock);
197
198 spin_lock_bh(&old_backbone_gw->crc_lock);
199 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
200 spin_unlock_bh(&old_backbone_gw->crc_lock);
201
202 batadv_backbone_gw_put(old_backbone_gw);
203
204 kfree_rcu(claim, rcu);
205}
206
207
208
209
210
211static void batadv_claim_put(struct batadv_bla_claim *claim)
212{
213 kref_put(&claim->refcount, batadv_claim_release);
214}
215
216
217
218
219
220
221
222
223static struct batadv_bla_claim *
224batadv_claim_hash_find(struct batadv_priv *bat_priv,
225 struct batadv_bla_claim *data)
226{
227 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
228 struct hlist_head *head;
229 struct batadv_bla_claim *claim;
230 struct batadv_bla_claim *claim_tmp = NULL;
231 int index;
232
233 if (!hash)
234 return NULL;
235
236 index = batadv_choose_claim(data, hash->size);
237 head = &hash->table[index];
238
239 rcu_read_lock();
240 hlist_for_each_entry_rcu(claim, head, hash_entry) {
241 if (!batadv_compare_claim(&claim->hash_entry, data))
242 continue;
243
244 if (!kref_get_unless_zero(&claim->refcount))
245 continue;
246
247 claim_tmp = claim;
248 break;
249 }
250 rcu_read_unlock();
251
252 return claim_tmp;
253}
254
255
256
257
258
259
260
261
262
263static struct batadv_bla_backbone_gw *
264batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
265 unsigned short vid)
266{
267 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
268 struct hlist_head *head;
269 struct batadv_bla_backbone_gw search_entry, *backbone_gw;
270 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
271 int index;
272
273 if (!hash)
274 return NULL;
275
276 ether_addr_copy(search_entry.orig, addr);
277 search_entry.vid = vid;
278
279 index = batadv_choose_backbone_gw(&search_entry, hash->size);
280 head = &hash->table[index];
281
282 rcu_read_lock();
283 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
284 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
285 &search_entry))
286 continue;
287
288 if (!kref_get_unless_zero(&backbone_gw->refcount))
289 continue;
290
291 backbone_gw_tmp = backbone_gw;
292 break;
293 }
294 rcu_read_unlock();
295
296 return backbone_gw_tmp;
297}
298
299
300
301
302
303static void
304batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
305{
306 struct batadv_hashtable *hash;
307 struct hlist_node *node_tmp;
308 struct hlist_head *head;
309 struct batadv_bla_claim *claim;
310 int i;
311 spinlock_t *list_lock;
312
313 hash = backbone_gw->bat_priv->bla.claim_hash;
314 if (!hash)
315 return;
316
317 for (i = 0; i < hash->size; i++) {
318 head = &hash->table[i];
319 list_lock = &hash->list_locks[i];
320
321 spin_lock_bh(list_lock);
322 hlist_for_each_entry_safe(claim, node_tmp,
323 head, hash_entry) {
324 if (claim->backbone_gw != backbone_gw)
325 continue;
326
327 batadv_claim_put(claim);
328 hlist_del_rcu(&claim->hash_entry);
329 }
330 spin_unlock_bh(list_lock);
331 }
332
333
334 spin_lock_bh(&backbone_gw->crc_lock);
335 backbone_gw->crc = BATADV_BLA_CRC_INIT;
336 spin_unlock_bh(&backbone_gw->crc_lock);
337}
338
339
340
341
342
343
344
345
346static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
347 unsigned short vid, int claimtype)
348{
349 struct sk_buff *skb;
350 struct ethhdr *ethhdr;
351 struct batadv_hard_iface *primary_if;
352 struct net_device *soft_iface;
353 u8 *hw_src;
354 struct batadv_bla_claim_dst local_claim_dest;
355 __be32 zeroip = 0;
356
357 primary_if = batadv_primary_if_get_selected(bat_priv);
358 if (!primary_if)
359 return;
360
361 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
362 sizeof(local_claim_dest));
363 local_claim_dest.type = claimtype;
364
365 soft_iface = primary_if->soft_iface;
366
367 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
368
369 zeroip,
370 primary_if->soft_iface,
371
372 zeroip,
373
374 NULL,
375
376 primary_if->net_dev->dev_addr,
377
378
379
380
381 (u8 *)&local_claim_dest);
382
383 if (!skb)
384 goto out;
385
386 ethhdr = (struct ethhdr *)skb->data;
387 hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
388
389
390 switch (claimtype) {
391 case BATADV_CLAIM_TYPE_CLAIM:
392
393
394
395 ether_addr_copy(ethhdr->h_source, mac);
396 batadv_dbg(BATADV_DBG_BLA, bat_priv,
397 "%s(): CLAIM %pM on vid %d\n", __func__, mac,
398 batadv_print_vid(vid));
399 break;
400 case BATADV_CLAIM_TYPE_UNCLAIM:
401
402
403
404 ether_addr_copy(hw_src, mac);
405 batadv_dbg(BATADV_DBG_BLA, bat_priv,
406 "%s(): UNCLAIM %pM on vid %d\n", __func__, mac,
407 batadv_print_vid(vid));
408 break;
409 case BATADV_CLAIM_TYPE_ANNOUNCE:
410
411
412
413 ether_addr_copy(hw_src, mac);
414 batadv_dbg(BATADV_DBG_BLA, bat_priv,
415 "%s(): ANNOUNCE of %pM on vid %d\n", __func__,
416 ethhdr->h_source, batadv_print_vid(vid));
417 break;
418 case BATADV_CLAIM_TYPE_REQUEST:
419
420
421
422
423 ether_addr_copy(hw_src, mac);
424 ether_addr_copy(ethhdr->h_dest, mac);
425 batadv_dbg(BATADV_DBG_BLA, bat_priv,
426 "%s(): REQUEST of %pM to %pM on vid %d\n", __func__,
427 ethhdr->h_source, ethhdr->h_dest,
428 batadv_print_vid(vid));
429 break;
430 case BATADV_CLAIM_TYPE_LOOPDETECT:
431 ether_addr_copy(ethhdr->h_source, mac);
432 batadv_dbg(BATADV_DBG_BLA, bat_priv,
433 "%s(): LOOPDETECT of %pM to %pM on vid %d\n",
434 __func__, ethhdr->h_source, ethhdr->h_dest,
435 batadv_print_vid(vid));
436
437 break;
438 }
439
440 if (vid & BATADV_VLAN_HAS_TAG) {
441 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
442 vid & VLAN_VID_MASK);
443 if (!skb)
444 goto out;
445 }
446
447 skb_reset_mac_header(skb);
448 skb->protocol = eth_type_trans(skb, soft_iface);
449 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
450 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
451 skb->len + ETH_HLEN);
452
453 netif_rx(skb);
454out:
455 if (primary_if)
456 batadv_hardif_put(primary_if);
457}
458
459
460
461
462
463
464
465
466static void batadv_bla_loopdetect_report(struct work_struct *work)
467{
468 struct batadv_bla_backbone_gw *backbone_gw;
469 struct batadv_priv *bat_priv;
470 char vid_str[6] = { '\0' };
471
472 backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
473 report_work);
474 bat_priv = backbone_gw->bat_priv;
475
476 batadv_info(bat_priv->soft_iface,
477 "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
478 batadv_print_vid(backbone_gw->vid));
479 snprintf(vid_str, sizeof(vid_str), "%d",
480 batadv_print_vid(backbone_gw->vid));
481 vid_str[sizeof(vid_str) - 1] = 0;
482
483 batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
484 vid_str);
485
486 batadv_backbone_gw_put(backbone_gw);
487}
488
489
490
491
492
493
494
495
496
497
498static struct batadv_bla_backbone_gw *
499batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
500 unsigned short vid, bool own_backbone)
501{
502 struct batadv_bla_backbone_gw *entry;
503 struct batadv_orig_node *orig_node;
504 int hash_added;
505
506 entry = batadv_backbone_hash_find(bat_priv, orig, vid);
507
508 if (entry)
509 return entry;
510
511 batadv_dbg(BATADV_DBG_BLA, bat_priv,
512 "%s(): not found (%pM, %d), creating new entry\n", __func__,
513 orig, batadv_print_vid(vid));
514
515 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
516 if (!entry)
517 return NULL;
518
519 entry->vid = vid;
520 entry->lasttime = jiffies;
521 entry->crc = BATADV_BLA_CRC_INIT;
522 entry->bat_priv = bat_priv;
523 spin_lock_init(&entry->crc_lock);
524 atomic_set(&entry->request_sent, 0);
525 atomic_set(&entry->wait_periods, 0);
526 ether_addr_copy(entry->orig, orig);
527 INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
528 kref_init(&entry->refcount);
529
530 kref_get(&entry->refcount);
531 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
532 batadv_compare_backbone_gw,
533 batadv_choose_backbone_gw, entry,
534 &entry->hash_entry);
535
536 if (unlikely(hash_added != 0)) {
537
538 kfree(entry);
539 return NULL;
540 }
541
542
543 orig_node = batadv_orig_hash_find(bat_priv, orig);
544 if (orig_node) {
545 batadv_tt_global_del_orig(bat_priv, orig_node, vid,
546 "became a backbone gateway");
547 batadv_orig_node_put(orig_node);
548 }
549
550 if (own_backbone) {
551 batadv_bla_send_announce(bat_priv, entry);
552
553
554 atomic_inc(&entry->request_sent);
555 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
556 atomic_inc(&bat_priv->bla.num_requests);
557 }
558
559 return entry;
560}
561
562
563
564
565
566
567
568
569
570
571static void
572batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
573 struct batadv_hard_iface *primary_if,
574 unsigned short vid)
575{
576 struct batadv_bla_backbone_gw *backbone_gw;
577
578 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
579 primary_if->net_dev->dev_addr,
580 vid, true);
581 if (unlikely(!backbone_gw))
582 return;
583
584 backbone_gw->lasttime = jiffies;
585 batadv_backbone_gw_put(backbone_gw);
586}
587
588
589
590
591
592
593
594
595
596
597static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
598 struct batadv_hard_iface *primary_if,
599 unsigned short vid)
600{
601 struct hlist_head *head;
602 struct batadv_hashtable *hash;
603 struct batadv_bla_claim *claim;
604 struct batadv_bla_backbone_gw *backbone_gw;
605 int i;
606
607 batadv_dbg(BATADV_DBG_BLA, bat_priv,
608 "%s(): received a claim request, send all of our own claims again\n",
609 __func__);
610
611 backbone_gw = batadv_backbone_hash_find(bat_priv,
612 primary_if->net_dev->dev_addr,
613 vid);
614 if (!backbone_gw)
615 return;
616
617 hash = bat_priv->bla.claim_hash;
618 for (i = 0; i < hash->size; i++) {
619 head = &hash->table[i];
620
621 rcu_read_lock();
622 hlist_for_each_entry_rcu(claim, head, hash_entry) {
623
624 if (claim->backbone_gw != backbone_gw)
625 continue;
626
627 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
628 BATADV_CLAIM_TYPE_CLAIM);
629 }
630 rcu_read_unlock();
631 }
632
633
634 batadv_bla_send_announce(bat_priv, backbone_gw);
635 batadv_backbone_gw_put(backbone_gw);
636}
637
638
639
640
641
642
643
644
645
646static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
647{
648
649 batadv_bla_del_backbone_claims(backbone_gw);
650
651 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
652 "Sending REQUEST to %pM\n", backbone_gw->orig);
653
654
655 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
656 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
657
658
659 if (!atomic_read(&backbone_gw->request_sent)) {
660 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
661 atomic_set(&backbone_gw->request_sent, 1);
662 }
663}
664
665
666
667
668
669
670static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
671 struct batadv_bla_backbone_gw *backbone_gw)
672{
673 u8 mac[ETH_ALEN];
674 __be16 crc;
675
676 memcpy(mac, batadv_announce_mac, 4);
677 spin_lock_bh(&backbone_gw->crc_lock);
678 crc = htons(backbone_gw->crc);
679 spin_unlock_bh(&backbone_gw->crc_lock);
680 memcpy(&mac[4], &crc, 2);
681
682 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
683 BATADV_CLAIM_TYPE_ANNOUNCE);
684}
685
686
687
688
689
690
691
692
693static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
694 const u8 *mac, const unsigned short vid,
695 struct batadv_bla_backbone_gw *backbone_gw)
696{
697 struct batadv_bla_backbone_gw *old_backbone_gw;
698 struct batadv_bla_claim *claim;
699 struct batadv_bla_claim search_claim;
700 bool remove_crc = false;
701 int hash_added;
702
703 ether_addr_copy(search_claim.addr, mac);
704 search_claim.vid = vid;
705 claim = batadv_claim_hash_find(bat_priv, &search_claim);
706
707
708 if (!claim) {
709 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
710 if (!claim)
711 return;
712
713 ether_addr_copy(claim->addr, mac);
714 spin_lock_init(&claim->backbone_lock);
715 claim->vid = vid;
716 claim->lasttime = jiffies;
717 kref_get(&backbone_gw->refcount);
718 claim->backbone_gw = backbone_gw;
719 kref_init(&claim->refcount);
720
721 batadv_dbg(BATADV_DBG_BLA, bat_priv,
722 "%s(): adding new entry %pM, vid %d to hash ...\n",
723 __func__, mac, batadv_print_vid(vid));
724
725 kref_get(&claim->refcount);
726 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
727 batadv_compare_claim,
728 batadv_choose_claim, claim,
729 &claim->hash_entry);
730
731 if (unlikely(hash_added != 0)) {
732
733 kfree(claim);
734 return;
735 }
736 } else {
737 claim->lasttime = jiffies;
738 if (claim->backbone_gw == backbone_gw)
739
740 goto claim_free_ref;
741
742 batadv_dbg(BATADV_DBG_BLA, bat_priv,
743 "%s(): changing ownership for %pM, vid %d to gw %pM\n",
744 __func__, mac, batadv_print_vid(vid),
745 backbone_gw->orig);
746
747 remove_crc = true;
748 }
749
750
751 spin_lock_bh(&claim->backbone_lock);
752 old_backbone_gw = claim->backbone_gw;
753 kref_get(&backbone_gw->refcount);
754 claim->backbone_gw = backbone_gw;
755 spin_unlock_bh(&claim->backbone_lock);
756
757 if (remove_crc) {
758
759 spin_lock_bh(&old_backbone_gw->crc_lock);
760 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
761 spin_unlock_bh(&old_backbone_gw->crc_lock);
762 }
763
764 batadv_backbone_gw_put(old_backbone_gw);
765
766
767 spin_lock_bh(&backbone_gw->crc_lock);
768 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
769 spin_unlock_bh(&backbone_gw->crc_lock);
770 backbone_gw->lasttime = jiffies;
771
772claim_free_ref:
773 batadv_claim_put(claim);
774}
775
776
777
778
779
780
781
782
783static struct batadv_bla_backbone_gw *
784batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
785{
786 struct batadv_bla_backbone_gw *backbone_gw;
787
788 spin_lock_bh(&claim->backbone_lock);
789 backbone_gw = claim->backbone_gw;
790 kref_get(&backbone_gw->refcount);
791 spin_unlock_bh(&claim->backbone_lock);
792
793 return backbone_gw;
794}
795
796
797
798
799
800
801
802static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
803 const u8 *mac, const unsigned short vid)
804{
805 struct batadv_bla_claim search_claim, *claim;
806
807 ether_addr_copy(search_claim.addr, mac);
808 search_claim.vid = vid;
809 claim = batadv_claim_hash_find(bat_priv, &search_claim);
810 if (!claim)
811 return;
812
813 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
814 mac, batadv_print_vid(vid));
815
816 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
817 batadv_choose_claim, claim);
818 batadv_claim_put(claim);
819
820
821 batadv_claim_put(claim);
822}
823
824
825
826
827
828
829
830
831
832
833static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
834 u8 *backbone_addr, unsigned short vid)
835{
836 struct batadv_bla_backbone_gw *backbone_gw;
837 u16 backbone_crc, crc;
838
839 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
840 return false;
841
842 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
843 false);
844
845 if (unlikely(!backbone_gw))
846 return true;
847
848
849 backbone_gw->lasttime = jiffies;
850 crc = ntohs(*((__be16 *)(&an_addr[4])));
851
852 batadv_dbg(BATADV_DBG_BLA, bat_priv,
853 "%s(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
854 __func__, batadv_print_vid(vid), backbone_gw->orig, crc);
855
856 spin_lock_bh(&backbone_gw->crc_lock);
857 backbone_crc = backbone_gw->crc;
858 spin_unlock_bh(&backbone_gw->crc_lock);
859
860 if (backbone_crc != crc) {
861 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
862 "%s(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
863 __func__, backbone_gw->orig,
864 batadv_print_vid(backbone_gw->vid),
865 backbone_crc, crc);
866
867 batadv_bla_send_request(backbone_gw);
868 } else {
869
870
871
872 if (atomic_read(&backbone_gw->request_sent)) {
873 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
874 atomic_set(&backbone_gw->request_sent, 0);
875 }
876 }
877
878 batadv_backbone_gw_put(backbone_gw);
879 return true;
880}
881
882
883
884
885
886
887
888
889
890
891
892static bool batadv_handle_request(struct batadv_priv *bat_priv,
893 struct batadv_hard_iface *primary_if,
894 u8 *backbone_addr, struct ethhdr *ethhdr,
895 unsigned short vid)
896{
897
898 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
899 return false;
900
901
902
903
904 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
905 return true;
906
907 batadv_dbg(BATADV_DBG_BLA, bat_priv,
908 "%s(): REQUEST vid %d (sent by %pM)...\n",
909 __func__, batadv_print_vid(vid), ethhdr->h_source);
910
911 batadv_bla_answer_request(bat_priv, primary_if, vid);
912 return true;
913}
914
915
916
917
918
919
920
921
922
923
924
925static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
926 struct batadv_hard_iface *primary_if,
927 u8 *backbone_addr, u8 *claim_addr,
928 unsigned short vid)
929{
930 struct batadv_bla_backbone_gw *backbone_gw;
931
932
933 if (primary_if && batadv_compare_eth(backbone_addr,
934 primary_if->net_dev->dev_addr))
935 batadv_bla_send_claim(bat_priv, claim_addr, vid,
936 BATADV_CLAIM_TYPE_UNCLAIM);
937
938 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
939
940 if (!backbone_gw)
941 return true;
942
943
944 batadv_dbg(BATADV_DBG_BLA, bat_priv,
945 "%s(): UNCLAIM %pM on vid %d (sent by %pM)...\n", __func__,
946 claim_addr, batadv_print_vid(vid), backbone_gw->orig);
947
948 batadv_bla_del_claim(bat_priv, claim_addr, vid);
949 batadv_backbone_gw_put(backbone_gw);
950 return true;
951}
952
953
954
955
956
957
958
959
960
961
962
963static bool batadv_handle_claim(struct batadv_priv *bat_priv,
964 struct batadv_hard_iface *primary_if,
965 u8 *backbone_addr, u8 *claim_addr,
966 unsigned short vid)
967{
968 struct batadv_bla_backbone_gw *backbone_gw;
969
970
971
972 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
973 false);
974
975 if (unlikely(!backbone_gw))
976 return true;
977
978
979 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
980 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
981 batadv_bla_send_claim(bat_priv, claim_addr, vid,
982 BATADV_CLAIM_TYPE_CLAIM);
983
984
985
986 batadv_backbone_gw_put(backbone_gw);
987 return true;
988}
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007static int batadv_check_claim_group(struct batadv_priv *bat_priv,
1008 struct batadv_hard_iface *primary_if,
1009 u8 *hw_src, u8 *hw_dst,
1010 struct ethhdr *ethhdr)
1011{
1012 u8 *backbone_addr;
1013 struct batadv_orig_node *orig_node;
1014 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1015
1016 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1017 bla_dst_own = &bat_priv->bla.claim_dest;
1018
1019
1020
1021
1022 switch (bla_dst->type) {
1023 case BATADV_CLAIM_TYPE_CLAIM:
1024 backbone_addr = hw_src;
1025 break;
1026 case BATADV_CLAIM_TYPE_REQUEST:
1027 case BATADV_CLAIM_TYPE_ANNOUNCE:
1028 case BATADV_CLAIM_TYPE_UNCLAIM:
1029 backbone_addr = ethhdr->h_source;
1030 break;
1031 default:
1032 return 0;
1033 }
1034
1035
1036 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
1037 return 0;
1038
1039
1040 if (bla_dst->group == bla_dst_own->group)
1041 return 2;
1042
1043
1044 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
1045
1046
1047
1048
1049 if (!orig_node)
1050 return 1;
1051
1052
1053 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
1054 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1055 "taking other backbones claim group: %#.4x\n",
1056 ntohs(bla_dst->group));
1057 bla_dst_own->group = bla_dst->group;
1058 }
1059
1060 batadv_orig_node_put(orig_node);
1061
1062 return 2;
1063}
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
1075 struct batadv_hard_iface *primary_if,
1076 struct sk_buff *skb)
1077{
1078 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1079 u8 *hw_src, *hw_dst;
1080 struct vlan_hdr *vhdr, vhdr_buf;
1081 struct ethhdr *ethhdr;
1082 struct arphdr *arphdr;
1083 unsigned short vid;
1084 int vlan_depth = 0;
1085 __be16 proto;
1086 int headlen;
1087 int ret;
1088
1089 vid = batadv_get_vid(skb, 0);
1090 ethhdr = eth_hdr(skb);
1091
1092 proto = ethhdr->h_proto;
1093 headlen = ETH_HLEN;
1094 if (vid & BATADV_VLAN_HAS_TAG) {
1095
1096
1097
1098
1099
1100
1101
1102
1103 do {
1104 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
1105 &vhdr_buf);
1106 if (!vhdr)
1107 return false;
1108
1109 proto = vhdr->h_vlan_encapsulated_proto;
1110 headlen += VLAN_HLEN;
1111 vlan_depth++;
1112 } while (proto == htons(ETH_P_8021Q));
1113 }
1114
1115 if (proto != htons(ETH_P_ARP))
1116 return false;
1117
1118
1119
1120 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
1121 return false;
1122
1123
1124 ethhdr = eth_hdr(skb);
1125 arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
1126
1127
1128
1129
1130 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
1131 return false;
1132 if (arphdr->ar_pro != htons(ETH_P_IP))
1133 return false;
1134 if (arphdr->ar_hln != ETH_ALEN)
1135 return false;
1136 if (arphdr->ar_pln != 4)
1137 return false;
1138
1139 hw_src = (u8 *)arphdr + sizeof(struct arphdr);
1140 hw_dst = hw_src + ETH_ALEN + 4;
1141 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1142 bla_dst_own = &bat_priv->bla.claim_dest;
1143
1144
1145 if (memcmp(bla_dst->magic, bla_dst_own->magic,
1146 sizeof(bla_dst->magic)) != 0)
1147 return false;
1148
1149
1150
1151
1152
1153 if (vlan_depth > 1)
1154 return true;
1155
1156
1157 if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
1158 return false;
1159
1160
1161 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
1162 ethhdr);
1163 if (ret == 1)
1164 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1165 "%s(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1166 __func__, ethhdr->h_source, batadv_print_vid(vid),
1167 hw_src, hw_dst);
1168
1169 if (ret < 2)
1170 return !!ret;
1171
1172
1173 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1174
1175
1176 switch (bla_dst->type) {
1177 case BATADV_CLAIM_TYPE_CLAIM:
1178 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
1179 ethhdr->h_source, vid))
1180 return true;
1181 break;
1182 case BATADV_CLAIM_TYPE_UNCLAIM:
1183 if (batadv_handle_unclaim(bat_priv, primary_if,
1184 ethhdr->h_source, hw_src, vid))
1185 return true;
1186 break;
1187
1188 case BATADV_CLAIM_TYPE_ANNOUNCE:
1189 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
1190 vid))
1191 return true;
1192 break;
1193 case BATADV_CLAIM_TYPE_REQUEST:
1194 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
1195 vid))
1196 return true;
1197 break;
1198 }
1199
1200 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1201 "%s(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1202 __func__, ethhdr->h_source, batadv_print_vid(vid), hw_src,
1203 hw_dst);
1204 return true;
1205}
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
1217{
1218 struct batadv_bla_backbone_gw *backbone_gw;
1219 struct hlist_node *node_tmp;
1220 struct hlist_head *head;
1221 struct batadv_hashtable *hash;
1222 spinlock_t *list_lock;
1223 int i;
1224
1225 hash = bat_priv->bla.backbone_hash;
1226 if (!hash)
1227 return;
1228
1229 for (i = 0; i < hash->size; i++) {
1230 head = &hash->table[i];
1231 list_lock = &hash->list_locks[i];
1232
1233 spin_lock_bh(list_lock);
1234 hlist_for_each_entry_safe(backbone_gw, node_tmp,
1235 head, hash_entry) {
1236 if (now)
1237 goto purge_now;
1238 if (!batadv_has_timed_out(backbone_gw->lasttime,
1239 BATADV_BLA_BACKBONE_TIMEOUT))
1240 continue;
1241
1242 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1243 "%s(): backbone gw %pM timed out\n",
1244 __func__, backbone_gw->orig);
1245
1246purge_now:
1247
1248 if (atomic_read(&backbone_gw->request_sent))
1249 atomic_dec(&bat_priv->bla.num_requests);
1250
1251 batadv_bla_del_backbone_claims(backbone_gw);
1252
1253 hlist_del_rcu(&backbone_gw->hash_entry);
1254 batadv_backbone_gw_put(backbone_gw);
1255 }
1256 spin_unlock_bh(list_lock);
1257 }
1258}
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1270 struct batadv_hard_iface *primary_if,
1271 int now)
1272{
1273 struct batadv_bla_backbone_gw *backbone_gw;
1274 struct batadv_bla_claim *claim;
1275 struct hlist_head *head;
1276 struct batadv_hashtable *hash;
1277 int i;
1278
1279 hash = bat_priv->bla.claim_hash;
1280 if (!hash)
1281 return;
1282
1283 for (i = 0; i < hash->size; i++) {
1284 head = &hash->table[i];
1285
1286 rcu_read_lock();
1287 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1288 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1289 if (now)
1290 goto purge_now;
1291
1292 if (!batadv_compare_eth(backbone_gw->orig,
1293 primary_if->net_dev->dev_addr))
1294 goto skip;
1295
1296 if (!batadv_has_timed_out(claim->lasttime,
1297 BATADV_BLA_CLAIM_TIMEOUT))
1298 goto skip;
1299
1300 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1301 "%s(): timed out.\n", __func__);
1302
1303purge_now:
1304 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1305 "%s(): %pM, vid %d\n", __func__,
1306 claim->addr, claim->vid);
1307
1308 batadv_handle_unclaim(bat_priv, primary_if,
1309 backbone_gw->orig,
1310 claim->addr, claim->vid);
1311skip:
1312 batadv_backbone_gw_put(backbone_gw);
1313 }
1314 rcu_read_unlock();
1315 }
1316}
1317
1318
1319
1320
1321
1322
1323
1324
1325void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1326 struct batadv_hard_iface *primary_if,
1327 struct batadv_hard_iface *oldif)
1328{
1329 struct batadv_bla_backbone_gw *backbone_gw;
1330 struct hlist_head *head;
1331 struct batadv_hashtable *hash;
1332 __be16 group;
1333 int i;
1334
1335
1336 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1337 bat_priv->bla.claim_dest.group = group;
1338
1339
1340 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1341 oldif = NULL;
1342
1343 if (!oldif) {
1344 batadv_bla_purge_claims(bat_priv, NULL, 1);
1345 batadv_bla_purge_backbone_gw(bat_priv, 1);
1346 return;
1347 }
1348
1349 hash = bat_priv->bla.backbone_hash;
1350 if (!hash)
1351 return;
1352
1353 for (i = 0; i < hash->size; i++) {
1354 head = &hash->table[i];
1355
1356 rcu_read_lock();
1357 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1358
1359 if (!batadv_compare_eth(backbone_gw->orig,
1360 oldif->net_dev->dev_addr))
1361 continue;
1362
1363 ether_addr_copy(backbone_gw->orig,
1364 primary_if->net_dev->dev_addr);
1365
1366
1367
1368 batadv_bla_send_announce(bat_priv, backbone_gw);
1369 }
1370 rcu_read_unlock();
1371 }
1372}
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384static void
1385batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
1386 struct batadv_bla_backbone_gw *backbone_gw)
1387{
1388 batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
1389 backbone_gw->vid);
1390 batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
1391 backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
1392}
1393
1394
1395
1396
1397
1398void batadv_bla_status_update(struct net_device *net_dev)
1399{
1400 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1401 struct batadv_hard_iface *primary_if;
1402
1403 primary_if = batadv_primary_if_get_selected(bat_priv);
1404 if (!primary_if)
1405 return;
1406
1407
1408
1409
1410 batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
1411 batadv_hardif_put(primary_if);
1412}
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422static void batadv_bla_periodic_work(struct work_struct *work)
1423{
1424 struct delayed_work *delayed_work;
1425 struct batadv_priv *bat_priv;
1426 struct batadv_priv_bla *priv_bla;
1427 struct hlist_head *head;
1428 struct batadv_bla_backbone_gw *backbone_gw;
1429 struct batadv_hashtable *hash;
1430 struct batadv_hard_iface *primary_if;
1431 bool send_loopdetect = false;
1432 int i;
1433
1434 delayed_work = to_delayed_work(work);
1435 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1436 bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1437 primary_if = batadv_primary_if_get_selected(bat_priv);
1438 if (!primary_if)
1439 goto out;
1440
1441 batadv_bla_purge_claims(bat_priv, primary_if, 0);
1442 batadv_bla_purge_backbone_gw(bat_priv, 0);
1443
1444 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1445 goto out;
1446
1447 if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
1448
1449
1450
1451
1452 random_ether_addr(bat_priv->bla.loopdetect_addr);
1453 bat_priv->bla.loopdetect_addr[0] = 0xba;
1454 bat_priv->bla.loopdetect_addr[1] = 0xbe;
1455 bat_priv->bla.loopdetect_lasttime = jiffies;
1456 atomic_set(&bat_priv->bla.loopdetect_next,
1457 BATADV_BLA_LOOPDETECT_PERIODS);
1458
1459
1460 send_loopdetect = true;
1461 }
1462
1463 hash = bat_priv->bla.backbone_hash;
1464 if (!hash)
1465 goto out;
1466
1467 for (i = 0; i < hash->size; i++) {
1468 head = &hash->table[i];
1469
1470 rcu_read_lock();
1471 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1472 if (!batadv_compare_eth(backbone_gw->orig,
1473 primary_if->net_dev->dev_addr))
1474 continue;
1475
1476 backbone_gw->lasttime = jiffies;
1477
1478 batadv_bla_send_announce(bat_priv, backbone_gw);
1479 if (send_loopdetect)
1480 batadv_bla_send_loopdetect(bat_priv,
1481 backbone_gw);
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492 if (atomic_read(&backbone_gw->request_sent) == 0)
1493 continue;
1494
1495 if (!atomic_dec_and_test(&backbone_gw->wait_periods))
1496 continue;
1497
1498 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
1499 atomic_set(&backbone_gw->request_sent, 0);
1500 }
1501 rcu_read_unlock();
1502 }
1503out:
1504 if (primary_if)
1505 batadv_hardif_put(primary_if);
1506
1507 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1508 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1509}
1510
1511
1512
1513
1514
1515
1516static struct lock_class_key batadv_claim_hash_lock_class_key;
1517static struct lock_class_key batadv_backbone_hash_lock_class_key;
1518
1519
1520
1521
1522
1523
1524
1525int batadv_bla_init(struct batadv_priv *bat_priv)
1526{
1527 int i;
1528 u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1529 struct batadv_hard_iface *primary_if;
1530 u16 crc;
1531 unsigned long entrytime;
1532
1533 spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1534
1535 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1536
1537
1538 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1539 bat_priv->bla.claim_dest.type = 0;
1540 primary_if = batadv_primary_if_get_selected(bat_priv);
1541 if (primary_if) {
1542 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1543 bat_priv->bla.claim_dest.group = htons(crc);
1544 batadv_hardif_put(primary_if);
1545 } else {
1546 bat_priv->bla.claim_dest.group = 0;
1547 }
1548
1549
1550 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1551 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1552 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1553 bat_priv->bla.bcast_duplist_curr = 0;
1554
1555 atomic_set(&bat_priv->bla.loopdetect_next,
1556 BATADV_BLA_LOOPDETECT_PERIODS);
1557
1558 if (bat_priv->bla.claim_hash)
1559 return 0;
1560
1561 bat_priv->bla.claim_hash = batadv_hash_new(128);
1562 bat_priv->bla.backbone_hash = batadv_hash_new(32);
1563
1564 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1565 return -ENOMEM;
1566
1567 batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1568 &batadv_claim_hash_lock_class_key);
1569 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1570 &batadv_backbone_hash_lock_class_key);
1571
1572 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1573
1574 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1575
1576 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1577 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1578 return 0;
1579}
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1598 struct sk_buff *skb)
1599{
1600 int i, curr;
1601 __be32 crc;
1602 struct batadv_bcast_packet *bcast_packet;
1603 struct batadv_bcast_duplist_entry *entry;
1604 bool ret = false;
1605
1606 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1607
1608
1609 crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
1610
1611 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1612
1613 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1614 curr = (bat_priv->bla.bcast_duplist_curr + i);
1615 curr %= BATADV_DUPLIST_SIZE;
1616 entry = &bat_priv->bla.bcast_duplist[curr];
1617
1618
1619
1620
1621 if (batadv_has_timed_out(entry->entrytime,
1622 BATADV_DUPLIST_TIMEOUT))
1623 break;
1624
1625 if (entry->crc != crc)
1626 continue;
1627
1628 if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1629 continue;
1630
1631
1632
1633
1634 ret = true;
1635 goto out;
1636 }
1637
1638
1639
1640 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1641 curr %= BATADV_DUPLIST_SIZE;
1642 entry = &bat_priv->bla.bcast_duplist[curr];
1643 entry->crc = crc;
1644 entry->entrytime = jiffies;
1645 ether_addr_copy(entry->orig, bcast_packet->orig);
1646 bat_priv->bla.bcast_duplist_curr = curr;
1647
1648out:
1649 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1650
1651 return ret;
1652}
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
1664 unsigned short vid)
1665{
1666 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1667 struct hlist_head *head;
1668 struct batadv_bla_backbone_gw *backbone_gw;
1669 int i;
1670
1671 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1672 return false;
1673
1674 if (!hash)
1675 return false;
1676
1677 for (i = 0; i < hash->size; i++) {
1678 head = &hash->table[i];
1679
1680 rcu_read_lock();
1681 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1682 if (batadv_compare_eth(backbone_gw->orig, orig) &&
1683 backbone_gw->vid == vid) {
1684 rcu_read_unlock();
1685 return true;
1686 }
1687 }
1688 rcu_read_unlock();
1689 }
1690
1691 return false;
1692}
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
1704 struct batadv_orig_node *orig_node, int hdr_size)
1705{
1706 struct batadv_bla_backbone_gw *backbone_gw;
1707 unsigned short vid;
1708
1709 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1710 return false;
1711
1712
1713 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1714 return false;
1715
1716 vid = batadv_get_vid(skb, hdr_size);
1717
1718
1719 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1720 orig_node->orig, vid);
1721 if (!backbone_gw)
1722 return false;
1723
1724 batadv_backbone_gw_put(backbone_gw);
1725 return true;
1726}
1727
1728
1729
1730
1731
1732
1733
1734void batadv_bla_free(struct batadv_priv *bat_priv)
1735{
1736 struct batadv_hard_iface *primary_if;
1737
1738 cancel_delayed_work_sync(&bat_priv->bla.work);
1739 primary_if = batadv_primary_if_get_selected(bat_priv);
1740
1741 if (bat_priv->bla.claim_hash) {
1742 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1743 batadv_hash_destroy(bat_priv->bla.claim_hash);
1744 bat_priv->bla.claim_hash = NULL;
1745 }
1746 if (bat_priv->bla.backbone_hash) {
1747 batadv_bla_purge_backbone_gw(bat_priv, 1);
1748 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1749 bat_priv->bla.backbone_hash = NULL;
1750 }
1751 if (primary_if)
1752 batadv_hardif_put(primary_if);
1753}
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768static bool
1769batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1770 struct batadv_hard_iface *primary_if,
1771 unsigned short vid)
1772{
1773 struct batadv_bla_backbone_gw *backbone_gw;
1774 struct ethhdr *ethhdr;
1775
1776 ethhdr = eth_hdr(skb);
1777
1778
1779
1780
1781 if (!batadv_compare_eth(ethhdr->h_source,
1782 bat_priv->bla.loopdetect_addr))
1783 return false;
1784
1785
1786
1787
1788 if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
1789 BATADV_BLA_LOOPDETECT_TIMEOUT))
1790 return true;
1791
1792 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
1793 primary_if->net_dev->dev_addr,
1794 vid, true);
1795 if (unlikely(!backbone_gw))
1796 return true;
1797
1798 queue_work(batadv_event_workqueue, &backbone_gw->report_work);
1799
1800
1801 return true;
1802}
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1821 unsigned short vid, bool is_bcast)
1822{
1823 struct batadv_bla_backbone_gw *backbone_gw;
1824 struct ethhdr *ethhdr;
1825 struct batadv_bla_claim search_claim, *claim = NULL;
1826 struct batadv_hard_iface *primary_if;
1827 bool own_claim;
1828 bool ret;
1829
1830 ethhdr = eth_hdr(skb);
1831
1832 primary_if = batadv_primary_if_get_selected(bat_priv);
1833 if (!primary_if)
1834 goto handled;
1835
1836 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1837 goto allow;
1838
1839 if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
1840 goto handled;
1841
1842 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1843
1844 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1845 goto handled;
1846
1847 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1848 search_claim.vid = vid;
1849 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1850
1851 if (!claim) {
1852
1853
1854
1855
1856 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1857 "%s(): Unclaimed MAC %pM found. Claim it. Local: %s\n",
1858 __func__, ethhdr->h_source,
1859 batadv_is_my_client(bat_priv,
1860 ethhdr->h_source, vid) ?
1861 "yes" : "no");
1862 batadv_handle_claim(bat_priv, primary_if,
1863 primary_if->net_dev->dev_addr,
1864 ethhdr->h_source, vid);
1865 goto allow;
1866 }
1867
1868
1869 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1870 own_claim = batadv_compare_eth(backbone_gw->orig,
1871 primary_if->net_dev->dev_addr);
1872 batadv_backbone_gw_put(backbone_gw);
1873
1874 if (own_claim) {
1875
1876 claim->lasttime = jiffies;
1877 goto allow;
1878 }
1879
1880
1881 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
1882
1883
1884
1885
1886
1887
1888 goto handled;
1889 } else {
1890
1891
1892
1893
1894 batadv_handle_claim(bat_priv, primary_if,
1895 primary_if->net_dev->dev_addr,
1896 ethhdr->h_source, vid);
1897 goto allow;
1898 }
1899allow:
1900 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1901 ret = false;
1902 goto out;
1903
1904handled:
1905 kfree_skb(skb);
1906 ret = true;
1907
1908out:
1909 if (primary_if)
1910 batadv_hardif_put(primary_if);
1911 if (claim)
1912 batadv_claim_put(claim);
1913 return ret;
1914}
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1934 unsigned short vid)
1935{
1936 struct ethhdr *ethhdr;
1937 struct batadv_bla_claim search_claim, *claim = NULL;
1938 struct batadv_bla_backbone_gw *backbone_gw;
1939 struct batadv_hard_iface *primary_if;
1940 bool client_roamed;
1941 bool ret = false;
1942
1943 primary_if = batadv_primary_if_get_selected(bat_priv);
1944 if (!primary_if)
1945 goto out;
1946
1947 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1948 goto allow;
1949
1950 if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1951 goto handled;
1952
1953 ethhdr = eth_hdr(skb);
1954
1955 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1956
1957 if (is_multicast_ether_addr(ethhdr->h_dest))
1958 goto handled;
1959
1960 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1961 search_claim.vid = vid;
1962
1963 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1964
1965
1966 if (!claim)
1967 goto allow;
1968
1969
1970 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1971 client_roamed = batadv_compare_eth(backbone_gw->orig,
1972 primary_if->net_dev->dev_addr);
1973 batadv_backbone_gw_put(backbone_gw);
1974
1975 if (client_roamed) {
1976
1977
1978
1979 if (batadv_has_timed_out(claim->lasttime, 100)) {
1980
1981
1982
1983
1984 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Roaming client %pM detected. Unclaim it.\n",
1985 __func__, ethhdr->h_source);
1986 batadv_handle_unclaim(bat_priv, primary_if,
1987 primary_if->net_dev->dev_addr,
1988 ethhdr->h_source, vid);
1989 goto allow;
1990 } else {
1991 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Race for claim %pM detected. Drop packet.\n",
1992 __func__, ethhdr->h_source);
1993 goto handled;
1994 }
1995 }
1996
1997
1998 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1999
2000
2001
2002 goto handled;
2003 } else {
2004
2005
2006
2007 goto allow;
2008 }
2009allow:
2010 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
2011 ret = false;
2012 goto out;
2013handled:
2014 ret = true;
2015out:
2016 if (primary_if)
2017 batadv_hardif_put(primary_if);
2018 if (claim)
2019 batadv_claim_put(claim);
2020 return ret;
2021}
2022
2023#ifdef CONFIG_BATMAN_ADV_DEBUGFS
2024
2025
2026
2027
2028
2029
2030
2031int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
2032{
2033 struct net_device *net_dev = (struct net_device *)seq->private;
2034 struct batadv_priv *bat_priv = netdev_priv(net_dev);
2035 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
2036 struct batadv_bla_backbone_gw *backbone_gw;
2037 struct batadv_bla_claim *claim;
2038 struct batadv_hard_iface *primary_if;
2039 struct hlist_head *head;
2040 u16 backbone_crc;
2041 u32 i;
2042 bool is_own;
2043 u8 *primary_addr;
2044
2045 primary_if = batadv_seq_print_text_primary_if_get(seq);
2046 if (!primary_if)
2047 goto out;
2048
2049 primary_addr = primary_if->net_dev->dev_addr;
2050 seq_printf(seq,
2051 "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
2052 net_dev->name, primary_addr,
2053 ntohs(bat_priv->bla.claim_dest.group));
2054 seq_puts(seq,
2055 " Client VID Originator [o] (CRC )\n");
2056 for (i = 0; i < hash->size; i++) {
2057 head = &hash->table[i];
2058
2059 rcu_read_lock();
2060 hlist_for_each_entry_rcu(claim, head, hash_entry) {
2061 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
2062
2063 is_own = batadv_compare_eth(backbone_gw->orig,
2064 primary_addr);
2065
2066 spin_lock_bh(&backbone_gw->crc_lock);
2067 backbone_crc = backbone_gw->crc;
2068 spin_unlock_bh(&backbone_gw->crc_lock);
2069 seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
2070 claim->addr, batadv_print_vid(claim->vid),
2071 backbone_gw->orig,
2072 (is_own ? 'x' : ' '),
2073 backbone_crc);
2074
2075 batadv_backbone_gw_put(backbone_gw);
2076 }
2077 rcu_read_unlock();
2078 }
2079out:
2080 if (primary_if)
2081 batadv_hardif_put(primary_if);
2082 return 0;
2083}
2084#endif
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097static int
2098batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
2099 struct batadv_hard_iface *primary_if,
2100 struct batadv_bla_claim *claim)
2101{
2102 u8 *primary_addr = primary_if->net_dev->dev_addr;
2103 u16 backbone_crc;
2104 bool is_own;
2105 void *hdr;
2106 int ret = -EINVAL;
2107
2108 hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
2109 NLM_F_MULTI, BATADV_CMD_GET_BLA_CLAIM);
2110 if (!hdr) {
2111 ret = -ENOBUFS;
2112 goto out;
2113 }
2114
2115 is_own = batadv_compare_eth(claim->backbone_gw->orig,
2116 primary_addr);
2117
2118 spin_lock_bh(&claim->backbone_gw->crc_lock);
2119 backbone_crc = claim->backbone_gw->crc;
2120 spin_unlock_bh(&claim->backbone_gw->crc_lock);
2121
2122 if (is_own)
2123 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
2124 genlmsg_cancel(msg, hdr);
2125 goto out;
2126 }
2127
2128 if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
2129 nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
2130 nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
2131 claim->backbone_gw->orig) ||
2132 nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
2133 backbone_crc)) {
2134 genlmsg_cancel(msg, hdr);
2135 goto out;
2136 }
2137
2138 genlmsg_end(msg, hdr);
2139 ret = 0;
2140
2141out:
2142 return ret;
2143}
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157static int
2158batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
2159 struct batadv_hard_iface *primary_if,
2160 struct hlist_head *head, int *idx_skip)
2161{
2162 struct batadv_bla_claim *claim;
2163 int idx = 0;
2164 int ret = 0;
2165
2166 rcu_read_lock();
2167 hlist_for_each_entry_rcu(claim, head, hash_entry) {
2168 if (idx++ < *idx_skip)
2169 continue;
2170
2171 ret = batadv_bla_claim_dump_entry(msg, portid, seq,
2172 primary_if, claim);
2173 if (ret) {
2174 *idx_skip = idx - 1;
2175 goto unlock;
2176 }
2177 }
2178
2179 *idx_skip = 0;
2180unlock:
2181 rcu_read_unlock();
2182 return ret;
2183}
2184
2185
2186
2187
2188
2189
2190
2191
2192int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
2193{
2194 struct batadv_hard_iface *primary_if = NULL;
2195 int portid = NETLINK_CB(cb->skb).portid;
2196 struct net *net = sock_net(cb->skb->sk);
2197 struct net_device *soft_iface;
2198 struct batadv_hashtable *hash;
2199 struct batadv_priv *bat_priv;
2200 int bucket = cb->args[0];
2201 struct hlist_head *head;
2202 int idx = cb->args[1];
2203 int ifindex;
2204 int ret = 0;
2205
2206 ifindex = batadv_netlink_get_ifindex(cb->nlh,
2207 BATADV_ATTR_MESH_IFINDEX);
2208 if (!ifindex)
2209 return -EINVAL;
2210
2211 soft_iface = dev_get_by_index(net, ifindex);
2212 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2213 ret = -ENODEV;
2214 goto out;
2215 }
2216
2217 bat_priv = netdev_priv(soft_iface);
2218 hash = bat_priv->bla.claim_hash;
2219
2220 primary_if = batadv_primary_if_get_selected(bat_priv);
2221 if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
2222 ret = -ENOENT;
2223 goto out;
2224 }
2225
2226 while (bucket < hash->size) {
2227 head = &hash->table[bucket];
2228
2229 if (batadv_bla_claim_dump_bucket(msg, portid,
2230 cb->nlh->nlmsg_seq,
2231 primary_if, head, &idx))
2232 break;
2233 bucket++;
2234 }
2235
2236 cb->args[0] = bucket;
2237 cb->args[1] = idx;
2238
2239 ret = msg->len;
2240
2241out:
2242 if (primary_if)
2243 batadv_hardif_put(primary_if);
2244
2245 if (soft_iface)
2246 dev_put(soft_iface);
2247
2248 return ret;
2249}
2250
2251#ifdef CONFIG_BATMAN_ADV_DEBUGFS
2252
2253
2254
2255
2256
2257
2258
2259
2260int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
2261{
2262 struct net_device *net_dev = (struct net_device *)seq->private;
2263 struct batadv_priv *bat_priv = netdev_priv(net_dev);
2264 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
2265 struct batadv_bla_backbone_gw *backbone_gw;
2266 struct batadv_hard_iface *primary_if;
2267 struct hlist_head *head;
2268 int secs, msecs;
2269 u16 backbone_crc;
2270 u32 i;
2271 bool is_own;
2272 u8 *primary_addr;
2273
2274 primary_if = batadv_seq_print_text_primary_if_get(seq);
2275 if (!primary_if)
2276 goto out;
2277
2278 primary_addr = primary_if->net_dev->dev_addr;
2279 seq_printf(seq,
2280 "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
2281 net_dev->name, primary_addr,
2282 ntohs(bat_priv->bla.claim_dest.group));
2283 seq_puts(seq, " Originator VID last seen (CRC )\n");
2284 for (i = 0; i < hash->size; i++) {
2285 head = &hash->table[i];
2286
2287 rcu_read_lock();
2288 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
2289 msecs = jiffies_to_msecs(jiffies -
2290 backbone_gw->lasttime);
2291 secs = msecs / 1000;
2292 msecs = msecs % 1000;
2293
2294 is_own = batadv_compare_eth(backbone_gw->orig,
2295 primary_addr);
2296 if (is_own)
2297 continue;
2298
2299 spin_lock_bh(&backbone_gw->crc_lock);
2300 backbone_crc = backbone_gw->crc;
2301 spin_unlock_bh(&backbone_gw->crc_lock);
2302
2303 seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
2304 backbone_gw->orig,
2305 batadv_print_vid(backbone_gw->vid), secs,
2306 msecs, backbone_crc);
2307 }
2308 rcu_read_unlock();
2309 }
2310out:
2311 if (primary_if)
2312 batadv_hardif_put(primary_if);
2313 return 0;
2314}
2315#endif
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328static int
2329batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
2330 struct batadv_hard_iface *primary_if,
2331 struct batadv_bla_backbone_gw *backbone_gw)
2332{
2333 u8 *primary_addr = primary_if->net_dev->dev_addr;
2334 u16 backbone_crc;
2335 bool is_own;
2336 int msecs;
2337 void *hdr;
2338 int ret = -EINVAL;
2339
2340 hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
2341 NLM_F_MULTI, BATADV_CMD_GET_BLA_BACKBONE);
2342 if (!hdr) {
2343 ret = -ENOBUFS;
2344 goto out;
2345 }
2346
2347 is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
2348
2349 spin_lock_bh(&backbone_gw->crc_lock);
2350 backbone_crc = backbone_gw->crc;
2351 spin_unlock_bh(&backbone_gw->crc_lock);
2352
2353 msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime);
2354
2355 if (is_own)
2356 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
2357 genlmsg_cancel(msg, hdr);
2358 goto out;
2359 }
2360
2361 if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
2362 backbone_gw->orig) ||
2363 nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) ||
2364 nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
2365 backbone_crc) ||
2366 nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
2367 genlmsg_cancel(msg, hdr);
2368 goto out;
2369 }
2370
2371 genlmsg_end(msg, hdr);
2372 ret = 0;
2373
2374out:
2375 return ret;
2376}
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390static int
2391batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
2392 struct batadv_hard_iface *primary_if,
2393 struct hlist_head *head, int *idx_skip)
2394{
2395 struct batadv_bla_backbone_gw *backbone_gw;
2396 int idx = 0;
2397 int ret = 0;
2398
2399 rcu_read_lock();
2400 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
2401 if (idx++ < *idx_skip)
2402 continue;
2403
2404 ret = batadv_bla_backbone_dump_entry(msg, portid, seq,
2405 primary_if, backbone_gw);
2406 if (ret) {
2407 *idx_skip = idx - 1;
2408 goto unlock;
2409 }
2410 }
2411
2412 *idx_skip = 0;
2413unlock:
2414 rcu_read_unlock();
2415 return ret;
2416}
2417
2418
2419
2420
2421
2422
2423
2424
2425int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
2426{
2427 struct batadv_hard_iface *primary_if = NULL;
2428 int portid = NETLINK_CB(cb->skb).portid;
2429 struct net *net = sock_net(cb->skb->sk);
2430 struct net_device *soft_iface;
2431 struct batadv_hashtable *hash;
2432 struct batadv_priv *bat_priv;
2433 int bucket = cb->args[0];
2434 struct hlist_head *head;
2435 int idx = cb->args[1];
2436 int ifindex;
2437 int ret = 0;
2438
2439 ifindex = batadv_netlink_get_ifindex(cb->nlh,
2440 BATADV_ATTR_MESH_IFINDEX);
2441 if (!ifindex)
2442 return -EINVAL;
2443
2444 soft_iface = dev_get_by_index(net, ifindex);
2445 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2446 ret = -ENODEV;
2447 goto out;
2448 }
2449
2450 bat_priv = netdev_priv(soft_iface);
2451 hash = bat_priv->bla.backbone_hash;
2452
2453 primary_if = batadv_primary_if_get_selected(bat_priv);
2454 if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
2455 ret = -ENOENT;
2456 goto out;
2457 }
2458
2459 while (bucket < hash->size) {
2460 head = &hash->table[bucket];
2461
2462 if (batadv_bla_backbone_dump_bucket(msg, portid,
2463 cb->nlh->nlmsg_seq,
2464 primary_if, head, &idx))
2465 break;
2466 bucket++;
2467 }
2468
2469 cb->args[0] = bucket;
2470 cb->args[1] = idx;
2471
2472 ret = msg->len;
2473
2474out:
2475 if (primary_if)
2476 batadv_hardif_put(primary_if);
2477
2478 if (soft_iface)
2479 dev_put(soft_iface);
2480
2481 return ret;
2482}
2483
2484#ifdef CONFIG_BATMAN_ADV_DAT
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
2498 u8 *addr, unsigned short vid)
2499{
2500 struct batadv_bla_claim search_claim;
2501 struct batadv_bla_claim *claim = NULL;
2502 struct batadv_hard_iface *primary_if = NULL;
2503 bool ret = true;
2504
2505 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
2506 return ret;
2507
2508 primary_if = batadv_primary_if_get_selected(bat_priv);
2509 if (!primary_if)
2510 return ret;
2511
2512
2513 ether_addr_copy(search_claim.addr, addr);
2514 search_claim.vid = vid;
2515
2516 claim = batadv_claim_hash_find(bat_priv, &search_claim);
2517
2518
2519
2520
2521 if (claim) {
2522 if (!batadv_compare_eth(claim->backbone_gw->orig,
2523 primary_if->net_dev->dev_addr))
2524 ret = false;
2525 batadv_claim_put(claim);
2526 }
2527
2528 batadv_hardif_put(primary_if);
2529 return ret;
2530}
2531#endif
2532