1
2
3
4
5
6
7#include "bridge_loop_avoidance.h"
8#include "main.h"
9
10#include <linux/atomic.h>
11#include <linux/byteorder/generic.h>
12#include <linux/compiler.h>
13#include <linux/crc16.h>
14#include <linux/errno.h>
15#include <linux/etherdevice.h>
16#include <linux/gfp.h>
17#include <linux/if_arp.h>
18#include <linux/if_ether.h>
19#include <linux/if_vlan.h>
20#include <linux/jhash.h>
21#include <linux/jiffies.h>
22#include <linux/kernel.h>
23#include <linux/kref.h>
24#include <linux/list.h>
25#include <linux/lockdep.h>
26#include <linux/netdevice.h>
27#include <linux/netlink.h>
28#include <linux/rculist.h>
29#include <linux/rcupdate.h>
30#include <linux/skbuff.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
33#include <linux/stddef.h>
34#include <linux/string.h>
35#include <linux/workqueue.h>
36#include <net/arp.h>
37#include <net/genetlink.h>
38#include <net/netlink.h>
39#include <net/sock.h>
40#include <uapi/linux/batadv_packet.h>
41#include <uapi/linux/batman_adv.h>
42
43#include "hard-interface.h"
44#include "hash.h"
45#include "log.h"
46#include "netlink.h"
47#include "originator.h"
48#include "soft-interface.h"
49#include "translation-table.h"
50
51static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
52
53static void batadv_bla_periodic_work(struct work_struct *work);
54static void
55batadv_bla_send_announce(struct batadv_priv *bat_priv,
56 struct batadv_bla_backbone_gw *backbone_gw);
57
58
59
60
61
62
63
64
65static inline u32 batadv_choose_claim(const void *data, u32 size)
66{
67 struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
68 u32 hash = 0;
69
70 hash = jhash(&claim->addr, sizeof(claim->addr), hash);
71 hash = jhash(&claim->vid, sizeof(claim->vid), hash);
72
73 return hash % size;
74}
75
76
77
78
79
80
81
82
83static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
84{
85 const struct batadv_bla_backbone_gw *gw;
86 u32 hash = 0;
87
88 gw = (struct batadv_bla_backbone_gw *)data;
89 hash = jhash(&gw->orig, sizeof(gw->orig), hash);
90 hash = jhash(&gw->vid, sizeof(gw->vid), hash);
91
92 return hash % size;
93}
94
95
96
97
98
99
100
101
102static bool batadv_compare_backbone_gw(const struct hlist_node *node,
103 const void *data2)
104{
105 const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
106 hash_entry);
107 const struct batadv_bla_backbone_gw *gw1 = data1;
108 const struct batadv_bla_backbone_gw *gw2 = data2;
109
110 if (!batadv_compare_eth(gw1->orig, gw2->orig))
111 return false;
112
113 if (gw1->vid != gw2->vid)
114 return false;
115
116 return true;
117}
118
119
120
121
122
123
124
125
126static bool batadv_compare_claim(const struct hlist_node *node,
127 const void *data2)
128{
129 const void *data1 = container_of(node, struct batadv_bla_claim,
130 hash_entry);
131 const struct batadv_bla_claim *cl1 = data1;
132 const struct batadv_bla_claim *cl2 = data2;
133
134 if (!batadv_compare_eth(cl1->addr, cl2->addr))
135 return false;
136
137 if (cl1->vid != cl2->vid)
138 return false;
139
140 return true;
141}
142
143
144
145
146
147
148static void batadv_backbone_gw_release(struct kref *ref)
149{
150 struct batadv_bla_backbone_gw *backbone_gw;
151
152 backbone_gw = container_of(ref, struct batadv_bla_backbone_gw,
153 refcount);
154
155 kfree_rcu(backbone_gw, rcu);
156}
157
158
159
160
161
162
163static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
164{
165 kref_put(&backbone_gw->refcount, batadv_backbone_gw_release);
166}
167
168
169
170
171
172
173static void batadv_claim_release(struct kref *ref)
174{
175 struct batadv_bla_claim *claim;
176 struct batadv_bla_backbone_gw *old_backbone_gw;
177
178 claim = container_of(ref, struct batadv_bla_claim, refcount);
179
180 spin_lock_bh(&claim->backbone_lock);
181 old_backbone_gw = claim->backbone_gw;
182 claim->backbone_gw = NULL;
183 spin_unlock_bh(&claim->backbone_lock);
184
185 spin_lock_bh(&old_backbone_gw->crc_lock);
186 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
187 spin_unlock_bh(&old_backbone_gw->crc_lock);
188
189 batadv_backbone_gw_put(old_backbone_gw);
190
191 kfree_rcu(claim, rcu);
192}
193
194
195
196
197
198static void batadv_claim_put(struct batadv_bla_claim *claim)
199{
200 kref_put(&claim->refcount, batadv_claim_release);
201}
202
203
204
205
206
207
208
209
210static struct batadv_bla_claim *
211batadv_claim_hash_find(struct batadv_priv *bat_priv,
212 struct batadv_bla_claim *data)
213{
214 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
215 struct hlist_head *head;
216 struct batadv_bla_claim *claim;
217 struct batadv_bla_claim *claim_tmp = NULL;
218 int index;
219
220 if (!hash)
221 return NULL;
222
223 index = batadv_choose_claim(data, hash->size);
224 head = &hash->table[index];
225
226 rcu_read_lock();
227 hlist_for_each_entry_rcu(claim, head, hash_entry) {
228 if (!batadv_compare_claim(&claim->hash_entry, data))
229 continue;
230
231 if (!kref_get_unless_zero(&claim->refcount))
232 continue;
233
234 claim_tmp = claim;
235 break;
236 }
237 rcu_read_unlock();
238
239 return claim_tmp;
240}
241
242
243
244
245
246
247
248
249
250static struct batadv_bla_backbone_gw *
251batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
252 unsigned short vid)
253{
254 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
255 struct hlist_head *head;
256 struct batadv_bla_backbone_gw search_entry, *backbone_gw;
257 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
258 int index;
259
260 if (!hash)
261 return NULL;
262
263 ether_addr_copy(search_entry.orig, addr);
264 search_entry.vid = vid;
265
266 index = batadv_choose_backbone_gw(&search_entry, hash->size);
267 head = &hash->table[index];
268
269 rcu_read_lock();
270 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
271 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
272 &search_entry))
273 continue;
274
275 if (!kref_get_unless_zero(&backbone_gw->refcount))
276 continue;
277
278 backbone_gw_tmp = backbone_gw;
279 break;
280 }
281 rcu_read_unlock();
282
283 return backbone_gw_tmp;
284}
285
286
287
288
289
290static void
291batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
292{
293 struct batadv_hashtable *hash;
294 struct hlist_node *node_tmp;
295 struct hlist_head *head;
296 struct batadv_bla_claim *claim;
297 int i;
298 spinlock_t *list_lock;
299
300 hash = backbone_gw->bat_priv->bla.claim_hash;
301 if (!hash)
302 return;
303
304 for (i = 0; i < hash->size; i++) {
305 head = &hash->table[i];
306 list_lock = &hash->list_locks[i];
307
308 spin_lock_bh(list_lock);
309 hlist_for_each_entry_safe(claim, node_tmp,
310 head, hash_entry) {
311 if (claim->backbone_gw != backbone_gw)
312 continue;
313
314 batadv_claim_put(claim);
315 hlist_del_rcu(&claim->hash_entry);
316 }
317 spin_unlock_bh(list_lock);
318 }
319
320
321 spin_lock_bh(&backbone_gw->crc_lock);
322 backbone_gw->crc = BATADV_BLA_CRC_INIT;
323 spin_unlock_bh(&backbone_gw->crc_lock);
324}
325
326
327
328
329
330
331
332
333static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
334 unsigned short vid, int claimtype)
335{
336 struct sk_buff *skb;
337 struct ethhdr *ethhdr;
338 struct batadv_hard_iface *primary_if;
339 struct net_device *soft_iface;
340 u8 *hw_src;
341 struct batadv_bla_claim_dst local_claim_dest;
342 __be32 zeroip = 0;
343
344 primary_if = batadv_primary_if_get_selected(bat_priv);
345 if (!primary_if)
346 return;
347
348 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
349 sizeof(local_claim_dest));
350 local_claim_dest.type = claimtype;
351
352 soft_iface = primary_if->soft_iface;
353
354 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
355
356 zeroip,
357 primary_if->soft_iface,
358
359 zeroip,
360
361 NULL,
362
363 primary_if->net_dev->dev_addr,
364
365
366
367
368 (u8 *)&local_claim_dest);
369
370 if (!skb)
371 goto out;
372
373 ethhdr = (struct ethhdr *)skb->data;
374 hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
375
376
377 switch (claimtype) {
378 case BATADV_CLAIM_TYPE_CLAIM:
379
380
381
382 ether_addr_copy(ethhdr->h_source, mac);
383 batadv_dbg(BATADV_DBG_BLA, bat_priv,
384 "%s(): CLAIM %pM on vid %d\n", __func__, mac,
385 batadv_print_vid(vid));
386 break;
387 case BATADV_CLAIM_TYPE_UNCLAIM:
388
389
390
391 ether_addr_copy(hw_src, mac);
392 batadv_dbg(BATADV_DBG_BLA, bat_priv,
393 "%s(): UNCLAIM %pM on vid %d\n", __func__, mac,
394 batadv_print_vid(vid));
395 break;
396 case BATADV_CLAIM_TYPE_ANNOUNCE:
397
398
399
400 ether_addr_copy(hw_src, mac);
401 batadv_dbg(BATADV_DBG_BLA, bat_priv,
402 "%s(): ANNOUNCE of %pM on vid %d\n", __func__,
403 ethhdr->h_source, batadv_print_vid(vid));
404 break;
405 case BATADV_CLAIM_TYPE_REQUEST:
406
407
408
409
410 ether_addr_copy(hw_src, mac);
411 ether_addr_copy(ethhdr->h_dest, mac);
412 batadv_dbg(BATADV_DBG_BLA, bat_priv,
413 "%s(): REQUEST of %pM to %pM on vid %d\n", __func__,
414 ethhdr->h_source, ethhdr->h_dest,
415 batadv_print_vid(vid));
416 break;
417 case BATADV_CLAIM_TYPE_LOOPDETECT:
418 ether_addr_copy(ethhdr->h_source, mac);
419 batadv_dbg(BATADV_DBG_BLA, bat_priv,
420 "%s(): LOOPDETECT of %pM to %pM on vid %d\n",
421 __func__, ethhdr->h_source, ethhdr->h_dest,
422 batadv_print_vid(vid));
423
424 break;
425 }
426
427 if (vid & BATADV_VLAN_HAS_TAG) {
428 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
429 vid & VLAN_VID_MASK);
430 if (!skb)
431 goto out;
432 }
433
434 skb_reset_mac_header(skb);
435 skb->protocol = eth_type_trans(skb, soft_iface);
436 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
437 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
438 skb->len + ETH_HLEN);
439
440 netif_rx_any_context(skb);
441out:
442 if (primary_if)
443 batadv_hardif_put(primary_if);
444}
445
446
447
448
449
450
451
452
453static void batadv_bla_loopdetect_report(struct work_struct *work)
454{
455 struct batadv_bla_backbone_gw *backbone_gw;
456 struct batadv_priv *bat_priv;
457 char vid_str[6] = { '\0' };
458
459 backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
460 report_work);
461 bat_priv = backbone_gw->bat_priv;
462
463 batadv_info(bat_priv->soft_iface,
464 "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
465 batadv_print_vid(backbone_gw->vid));
466 snprintf(vid_str, sizeof(vid_str), "%d",
467 batadv_print_vid(backbone_gw->vid));
468 vid_str[sizeof(vid_str) - 1] = 0;
469
470 batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
471 vid_str);
472
473 batadv_backbone_gw_put(backbone_gw);
474}
475
476
477
478
479
480
481
482
483
484
485static struct batadv_bla_backbone_gw *
486batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
487 unsigned short vid, bool own_backbone)
488{
489 struct batadv_bla_backbone_gw *entry;
490 struct batadv_orig_node *orig_node;
491 int hash_added;
492
493 entry = batadv_backbone_hash_find(bat_priv, orig, vid);
494
495 if (entry)
496 return entry;
497
498 batadv_dbg(BATADV_DBG_BLA, bat_priv,
499 "%s(): not found (%pM, %d), creating new entry\n", __func__,
500 orig, batadv_print_vid(vid));
501
502 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
503 if (!entry)
504 return NULL;
505
506 entry->vid = vid;
507 entry->lasttime = jiffies;
508 entry->crc = BATADV_BLA_CRC_INIT;
509 entry->bat_priv = bat_priv;
510 spin_lock_init(&entry->crc_lock);
511 atomic_set(&entry->request_sent, 0);
512 atomic_set(&entry->wait_periods, 0);
513 ether_addr_copy(entry->orig, orig);
514 INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
515 kref_init(&entry->refcount);
516
517 kref_get(&entry->refcount);
518 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
519 batadv_compare_backbone_gw,
520 batadv_choose_backbone_gw, entry,
521 &entry->hash_entry);
522
523 if (unlikely(hash_added != 0)) {
524
525 kfree(entry);
526 return NULL;
527 }
528
529
530 orig_node = batadv_orig_hash_find(bat_priv, orig);
531 if (orig_node) {
532 batadv_tt_global_del_orig(bat_priv, orig_node, vid,
533 "became a backbone gateway");
534 batadv_orig_node_put(orig_node);
535 }
536
537 if (own_backbone) {
538 batadv_bla_send_announce(bat_priv, entry);
539
540
541 atomic_inc(&entry->request_sent);
542 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
543 atomic_inc(&bat_priv->bla.num_requests);
544 }
545
546 return entry;
547}
548
549
550
551
552
553
554
555
556
557
558static void
559batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
560 struct batadv_hard_iface *primary_if,
561 unsigned short vid)
562{
563 struct batadv_bla_backbone_gw *backbone_gw;
564
565 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
566 primary_if->net_dev->dev_addr,
567 vid, true);
568 if (unlikely(!backbone_gw))
569 return;
570
571 backbone_gw->lasttime = jiffies;
572 batadv_backbone_gw_put(backbone_gw);
573}
574
575
576
577
578
579
580
581
582
583
584static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
585 struct batadv_hard_iface *primary_if,
586 unsigned short vid)
587{
588 struct hlist_head *head;
589 struct batadv_hashtable *hash;
590 struct batadv_bla_claim *claim;
591 struct batadv_bla_backbone_gw *backbone_gw;
592 int i;
593
594 batadv_dbg(BATADV_DBG_BLA, bat_priv,
595 "%s(): received a claim request, send all of our own claims again\n",
596 __func__);
597
598 backbone_gw = batadv_backbone_hash_find(bat_priv,
599 primary_if->net_dev->dev_addr,
600 vid);
601 if (!backbone_gw)
602 return;
603
604 hash = bat_priv->bla.claim_hash;
605 for (i = 0; i < hash->size; i++) {
606 head = &hash->table[i];
607
608 rcu_read_lock();
609 hlist_for_each_entry_rcu(claim, head, hash_entry) {
610
611 if (claim->backbone_gw != backbone_gw)
612 continue;
613
614 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
615 BATADV_CLAIM_TYPE_CLAIM);
616 }
617 rcu_read_unlock();
618 }
619
620
621 batadv_bla_send_announce(bat_priv, backbone_gw);
622 batadv_backbone_gw_put(backbone_gw);
623}
624
625
626
627
628
629
630
631
632
633static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
634{
635
636 batadv_bla_del_backbone_claims(backbone_gw);
637
638 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
639 "Sending REQUEST to %pM\n", backbone_gw->orig);
640
641
642 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
643 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
644
645
646 if (!atomic_read(&backbone_gw->request_sent)) {
647 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
648 atomic_set(&backbone_gw->request_sent, 1);
649 }
650}
651
652
653
654
655
656
657static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
658 struct batadv_bla_backbone_gw *backbone_gw)
659{
660 u8 mac[ETH_ALEN];
661 __be16 crc;
662
663 memcpy(mac, batadv_announce_mac, 4);
664 spin_lock_bh(&backbone_gw->crc_lock);
665 crc = htons(backbone_gw->crc);
666 spin_unlock_bh(&backbone_gw->crc_lock);
667 memcpy(&mac[4], &crc, 2);
668
669 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
670 BATADV_CLAIM_TYPE_ANNOUNCE);
671}
672
673
674
675
676
677
678
679
680static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
681 const u8 *mac, const unsigned short vid,
682 struct batadv_bla_backbone_gw *backbone_gw)
683{
684 struct batadv_bla_backbone_gw *old_backbone_gw;
685 struct batadv_bla_claim *claim;
686 struct batadv_bla_claim search_claim;
687 bool remove_crc = false;
688 int hash_added;
689
690 ether_addr_copy(search_claim.addr, mac);
691 search_claim.vid = vid;
692 claim = batadv_claim_hash_find(bat_priv, &search_claim);
693
694
695 if (!claim) {
696 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
697 if (!claim)
698 return;
699
700 ether_addr_copy(claim->addr, mac);
701 spin_lock_init(&claim->backbone_lock);
702 claim->vid = vid;
703 claim->lasttime = jiffies;
704 kref_get(&backbone_gw->refcount);
705 claim->backbone_gw = backbone_gw;
706 kref_init(&claim->refcount);
707
708 batadv_dbg(BATADV_DBG_BLA, bat_priv,
709 "%s(): adding new entry %pM, vid %d to hash ...\n",
710 __func__, mac, batadv_print_vid(vid));
711
712 kref_get(&claim->refcount);
713 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
714 batadv_compare_claim,
715 batadv_choose_claim, claim,
716 &claim->hash_entry);
717
718 if (unlikely(hash_added != 0)) {
719
720 kfree(claim);
721 return;
722 }
723 } else {
724 claim->lasttime = jiffies;
725 if (claim->backbone_gw == backbone_gw)
726
727 goto claim_free_ref;
728
729 batadv_dbg(BATADV_DBG_BLA, bat_priv,
730 "%s(): changing ownership for %pM, vid %d to gw %pM\n",
731 __func__, mac, batadv_print_vid(vid),
732 backbone_gw->orig);
733
734 remove_crc = true;
735 }
736
737
738 spin_lock_bh(&claim->backbone_lock);
739 old_backbone_gw = claim->backbone_gw;
740 kref_get(&backbone_gw->refcount);
741 claim->backbone_gw = backbone_gw;
742 spin_unlock_bh(&claim->backbone_lock);
743
744 if (remove_crc) {
745
746 spin_lock_bh(&old_backbone_gw->crc_lock);
747 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
748 spin_unlock_bh(&old_backbone_gw->crc_lock);
749 }
750
751 batadv_backbone_gw_put(old_backbone_gw);
752
753
754 spin_lock_bh(&backbone_gw->crc_lock);
755 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
756 spin_unlock_bh(&backbone_gw->crc_lock);
757 backbone_gw->lasttime = jiffies;
758
759claim_free_ref:
760 batadv_claim_put(claim);
761}
762
763
764
765
766
767
768
769
770static struct batadv_bla_backbone_gw *
771batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
772{
773 struct batadv_bla_backbone_gw *backbone_gw;
774
775 spin_lock_bh(&claim->backbone_lock);
776 backbone_gw = claim->backbone_gw;
777 kref_get(&backbone_gw->refcount);
778 spin_unlock_bh(&claim->backbone_lock);
779
780 return backbone_gw;
781}
782
783
784
785
786
787
788
789static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
790 const u8 *mac, const unsigned short vid)
791{
792 struct batadv_bla_claim search_claim, *claim;
793 struct batadv_bla_claim *claim_removed_entry;
794 struct hlist_node *claim_removed_node;
795
796 ether_addr_copy(search_claim.addr, mac);
797 search_claim.vid = vid;
798 claim = batadv_claim_hash_find(bat_priv, &search_claim);
799 if (!claim)
800 return;
801
802 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
803 mac, batadv_print_vid(vid));
804
805 claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
806 batadv_compare_claim,
807 batadv_choose_claim, claim);
808 if (!claim_removed_node)
809 goto free_claim;
810
811
812 claim_removed_entry = hlist_entry(claim_removed_node,
813 struct batadv_bla_claim, hash_entry);
814 batadv_claim_put(claim_removed_entry);
815
816free_claim:
817
818 batadv_claim_put(claim);
819}
820
821
822
823
824
825
826
827
828
829
830static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
831 u8 *backbone_addr, unsigned short vid)
832{
833 struct batadv_bla_backbone_gw *backbone_gw;
834 u16 backbone_crc, crc;
835
836 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
837 return false;
838
839 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
840 false);
841
842 if (unlikely(!backbone_gw))
843 return true;
844
845
846 backbone_gw->lasttime = jiffies;
847 crc = ntohs(*((__force __be16 *)(&an_addr[4])));
848
849 batadv_dbg(BATADV_DBG_BLA, bat_priv,
850 "%s(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
851 __func__, batadv_print_vid(vid), backbone_gw->orig, crc);
852
853 spin_lock_bh(&backbone_gw->crc_lock);
854 backbone_crc = backbone_gw->crc;
855 spin_unlock_bh(&backbone_gw->crc_lock);
856
857 if (backbone_crc != crc) {
858 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
859 "%s(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
860 __func__, backbone_gw->orig,
861 batadv_print_vid(backbone_gw->vid),
862 backbone_crc, crc);
863
864 batadv_bla_send_request(backbone_gw);
865 } else {
866
867
868
869 if (atomic_read(&backbone_gw->request_sent)) {
870 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
871 atomic_set(&backbone_gw->request_sent, 0);
872 }
873 }
874
875 batadv_backbone_gw_put(backbone_gw);
876 return true;
877}
878
879
880
881
882
883
884
885
886
887
888
889static bool batadv_handle_request(struct batadv_priv *bat_priv,
890 struct batadv_hard_iface *primary_if,
891 u8 *backbone_addr, struct ethhdr *ethhdr,
892 unsigned short vid)
893{
894
895 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
896 return false;
897
898
899
900
901 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
902 return true;
903
904 batadv_dbg(BATADV_DBG_BLA, bat_priv,
905 "%s(): REQUEST vid %d (sent by %pM)...\n",
906 __func__, batadv_print_vid(vid), ethhdr->h_source);
907
908 batadv_bla_answer_request(bat_priv, primary_if, vid);
909 return true;
910}
911
912
913
914
915
916
917
918
919
920
921
922static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
923 struct batadv_hard_iface *primary_if,
924 u8 *backbone_addr, u8 *claim_addr,
925 unsigned short vid)
926{
927 struct batadv_bla_backbone_gw *backbone_gw;
928
929
930 if (primary_if && batadv_compare_eth(backbone_addr,
931 primary_if->net_dev->dev_addr))
932 batadv_bla_send_claim(bat_priv, claim_addr, vid,
933 BATADV_CLAIM_TYPE_UNCLAIM);
934
935 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
936
937 if (!backbone_gw)
938 return true;
939
940
941 batadv_dbg(BATADV_DBG_BLA, bat_priv,
942 "%s(): UNCLAIM %pM on vid %d (sent by %pM)...\n", __func__,
943 claim_addr, batadv_print_vid(vid), backbone_gw->orig);
944
945 batadv_bla_del_claim(bat_priv, claim_addr, vid);
946 batadv_backbone_gw_put(backbone_gw);
947 return true;
948}
949
950
951
952
953
954
955
956
957
958
959
960static bool batadv_handle_claim(struct batadv_priv *bat_priv,
961 struct batadv_hard_iface *primary_if,
962 u8 *backbone_addr, u8 *claim_addr,
963 unsigned short vid)
964{
965 struct batadv_bla_backbone_gw *backbone_gw;
966
967
968
969 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
970 false);
971
972 if (unlikely(!backbone_gw))
973 return true;
974
975
976 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
977 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
978 batadv_bla_send_claim(bat_priv, claim_addr, vid,
979 BATADV_CLAIM_TYPE_CLAIM);
980
981
982
983 batadv_backbone_gw_put(backbone_gw);
984 return true;
985}
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004static int batadv_check_claim_group(struct batadv_priv *bat_priv,
1005 struct batadv_hard_iface *primary_if,
1006 u8 *hw_src, u8 *hw_dst,
1007 struct ethhdr *ethhdr)
1008{
1009 u8 *backbone_addr;
1010 struct batadv_orig_node *orig_node;
1011 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1012
1013 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1014 bla_dst_own = &bat_priv->bla.claim_dest;
1015
1016
1017
1018
1019 switch (bla_dst->type) {
1020 case BATADV_CLAIM_TYPE_CLAIM:
1021 backbone_addr = hw_src;
1022 break;
1023 case BATADV_CLAIM_TYPE_REQUEST:
1024 case BATADV_CLAIM_TYPE_ANNOUNCE:
1025 case BATADV_CLAIM_TYPE_UNCLAIM:
1026 backbone_addr = ethhdr->h_source;
1027 break;
1028 default:
1029 return 0;
1030 }
1031
1032
1033 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
1034 return 0;
1035
1036
1037 if (bla_dst->group == bla_dst_own->group)
1038 return 2;
1039
1040
1041 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
1042
1043
1044
1045
1046 if (!orig_node)
1047 return 1;
1048
1049
1050 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
1051 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1052 "taking other backbones claim group: %#.4x\n",
1053 ntohs(bla_dst->group));
1054 bla_dst_own->group = bla_dst->group;
1055 }
1056
1057 batadv_orig_node_put(orig_node);
1058
1059 return 2;
1060}
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
1072 struct batadv_hard_iface *primary_if,
1073 struct sk_buff *skb)
1074{
1075 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1076 u8 *hw_src, *hw_dst;
1077 struct vlan_hdr *vhdr, vhdr_buf;
1078 struct ethhdr *ethhdr;
1079 struct arphdr *arphdr;
1080 unsigned short vid;
1081 int vlan_depth = 0;
1082 __be16 proto;
1083 int headlen;
1084 int ret;
1085
1086 vid = batadv_get_vid(skb, 0);
1087 ethhdr = eth_hdr(skb);
1088
1089 proto = ethhdr->h_proto;
1090 headlen = ETH_HLEN;
1091 if (vid & BATADV_VLAN_HAS_TAG) {
1092
1093
1094
1095
1096
1097
1098
1099
1100 do {
1101 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
1102 &vhdr_buf);
1103 if (!vhdr)
1104 return false;
1105
1106 proto = vhdr->h_vlan_encapsulated_proto;
1107 headlen += VLAN_HLEN;
1108 vlan_depth++;
1109 } while (proto == htons(ETH_P_8021Q));
1110 }
1111
1112 if (proto != htons(ETH_P_ARP))
1113 return false;
1114
1115
1116
1117 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
1118 return false;
1119
1120
1121 ethhdr = eth_hdr(skb);
1122 arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
1123
1124
1125
1126
1127 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
1128 return false;
1129 if (arphdr->ar_pro != htons(ETH_P_IP))
1130 return false;
1131 if (arphdr->ar_hln != ETH_ALEN)
1132 return false;
1133 if (arphdr->ar_pln != 4)
1134 return false;
1135
1136 hw_src = (u8 *)arphdr + sizeof(struct arphdr);
1137 hw_dst = hw_src + ETH_ALEN + 4;
1138 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1139 bla_dst_own = &bat_priv->bla.claim_dest;
1140
1141
1142 if (memcmp(bla_dst->magic, bla_dst_own->magic,
1143 sizeof(bla_dst->magic)) != 0)
1144 return false;
1145
1146
1147
1148
1149
1150 if (vlan_depth > 1)
1151 return true;
1152
1153
1154 if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
1155 return false;
1156
1157
1158 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
1159 ethhdr);
1160 if (ret == 1)
1161 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1162 "%s(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1163 __func__, ethhdr->h_source, batadv_print_vid(vid),
1164 hw_src, hw_dst);
1165
1166 if (ret < 2)
1167 return !!ret;
1168
1169
1170 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1171
1172
1173 switch (bla_dst->type) {
1174 case BATADV_CLAIM_TYPE_CLAIM:
1175 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
1176 ethhdr->h_source, vid))
1177 return true;
1178 break;
1179 case BATADV_CLAIM_TYPE_UNCLAIM:
1180 if (batadv_handle_unclaim(bat_priv, primary_if,
1181 ethhdr->h_source, hw_src, vid))
1182 return true;
1183 break;
1184
1185 case BATADV_CLAIM_TYPE_ANNOUNCE:
1186 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
1187 vid))
1188 return true;
1189 break;
1190 case BATADV_CLAIM_TYPE_REQUEST:
1191 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
1192 vid))
1193 return true;
1194 break;
1195 }
1196
1197 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1198 "%s(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1199 __func__, ethhdr->h_source, batadv_print_vid(vid), hw_src,
1200 hw_dst);
1201 return true;
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
1214{
1215 struct batadv_bla_backbone_gw *backbone_gw;
1216 struct hlist_node *node_tmp;
1217 struct hlist_head *head;
1218 struct batadv_hashtable *hash;
1219 spinlock_t *list_lock;
1220 int i;
1221
1222 hash = bat_priv->bla.backbone_hash;
1223 if (!hash)
1224 return;
1225
1226 for (i = 0; i < hash->size; i++) {
1227 head = &hash->table[i];
1228 list_lock = &hash->list_locks[i];
1229
1230 spin_lock_bh(list_lock);
1231 hlist_for_each_entry_safe(backbone_gw, node_tmp,
1232 head, hash_entry) {
1233 if (now)
1234 goto purge_now;
1235 if (!batadv_has_timed_out(backbone_gw->lasttime,
1236 BATADV_BLA_BACKBONE_TIMEOUT))
1237 continue;
1238
1239 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1240 "%s(): backbone gw %pM timed out\n",
1241 __func__, backbone_gw->orig);
1242
1243purge_now:
1244
1245 if (atomic_read(&backbone_gw->request_sent))
1246 atomic_dec(&bat_priv->bla.num_requests);
1247
1248 batadv_bla_del_backbone_claims(backbone_gw);
1249
1250 hlist_del_rcu(&backbone_gw->hash_entry);
1251 batadv_backbone_gw_put(backbone_gw);
1252 }
1253 spin_unlock_bh(list_lock);
1254 }
1255}
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1267 struct batadv_hard_iface *primary_if,
1268 int now)
1269{
1270 struct batadv_bla_backbone_gw *backbone_gw;
1271 struct batadv_bla_claim *claim;
1272 struct hlist_head *head;
1273 struct batadv_hashtable *hash;
1274 int i;
1275
1276 hash = bat_priv->bla.claim_hash;
1277 if (!hash)
1278 return;
1279
1280 for (i = 0; i < hash->size; i++) {
1281 head = &hash->table[i];
1282
1283 rcu_read_lock();
1284 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1285 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1286 if (now)
1287 goto purge_now;
1288
1289 if (!batadv_compare_eth(backbone_gw->orig,
1290 primary_if->net_dev->dev_addr))
1291 goto skip;
1292
1293 if (!batadv_has_timed_out(claim->lasttime,
1294 BATADV_BLA_CLAIM_TIMEOUT))
1295 goto skip;
1296
1297 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1298 "%s(): timed out.\n", __func__);
1299
1300purge_now:
1301 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1302 "%s(): %pM, vid %d\n", __func__,
1303 claim->addr, claim->vid);
1304
1305 batadv_handle_unclaim(bat_priv, primary_if,
1306 backbone_gw->orig,
1307 claim->addr, claim->vid);
1308skip:
1309 batadv_backbone_gw_put(backbone_gw);
1310 }
1311 rcu_read_unlock();
1312 }
1313}
1314
1315
1316
1317
1318
1319
1320
1321
1322void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1323 struct batadv_hard_iface *primary_if,
1324 struct batadv_hard_iface *oldif)
1325{
1326 struct batadv_bla_backbone_gw *backbone_gw;
1327 struct hlist_head *head;
1328 struct batadv_hashtable *hash;
1329 __be16 group;
1330 int i;
1331
1332
1333 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1334 bat_priv->bla.claim_dest.group = group;
1335
1336
1337 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1338 oldif = NULL;
1339
1340 if (!oldif) {
1341 batadv_bla_purge_claims(bat_priv, NULL, 1);
1342 batadv_bla_purge_backbone_gw(bat_priv, 1);
1343 return;
1344 }
1345
1346 hash = bat_priv->bla.backbone_hash;
1347 if (!hash)
1348 return;
1349
1350 for (i = 0; i < hash->size; i++) {
1351 head = &hash->table[i];
1352
1353 rcu_read_lock();
1354 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1355
1356 if (!batadv_compare_eth(backbone_gw->orig,
1357 oldif->net_dev->dev_addr))
1358 continue;
1359
1360 ether_addr_copy(backbone_gw->orig,
1361 primary_if->net_dev->dev_addr);
1362
1363
1364
1365 batadv_bla_send_announce(bat_priv, backbone_gw);
1366 }
1367 rcu_read_unlock();
1368 }
1369}
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381static void
1382batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
1383 struct batadv_bla_backbone_gw *backbone_gw)
1384{
1385 batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
1386 backbone_gw->vid);
1387 batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
1388 backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
1389}
1390
1391
1392
1393
1394
1395void batadv_bla_status_update(struct net_device *net_dev)
1396{
1397 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1398 struct batadv_hard_iface *primary_if;
1399
1400 primary_if = batadv_primary_if_get_selected(bat_priv);
1401 if (!primary_if)
1402 return;
1403
1404
1405
1406
1407 batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
1408 batadv_hardif_put(primary_if);
1409}
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419static void batadv_bla_periodic_work(struct work_struct *work)
1420{
1421 struct delayed_work *delayed_work;
1422 struct batadv_priv *bat_priv;
1423 struct batadv_priv_bla *priv_bla;
1424 struct hlist_head *head;
1425 struct batadv_bla_backbone_gw *backbone_gw;
1426 struct batadv_hashtable *hash;
1427 struct batadv_hard_iface *primary_if;
1428 bool send_loopdetect = false;
1429 int i;
1430
1431 delayed_work = to_delayed_work(work);
1432 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1433 bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1434 primary_if = batadv_primary_if_get_selected(bat_priv);
1435 if (!primary_if)
1436 goto out;
1437
1438 batadv_bla_purge_claims(bat_priv, primary_if, 0);
1439 batadv_bla_purge_backbone_gw(bat_priv, 0);
1440
1441 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1442 goto out;
1443
1444 if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
1445
1446
1447
1448
1449 eth_random_addr(bat_priv->bla.loopdetect_addr);
1450 bat_priv->bla.loopdetect_addr[0] = 0xba;
1451 bat_priv->bla.loopdetect_addr[1] = 0xbe;
1452 bat_priv->bla.loopdetect_lasttime = jiffies;
1453 atomic_set(&bat_priv->bla.loopdetect_next,
1454 BATADV_BLA_LOOPDETECT_PERIODS);
1455
1456
1457 send_loopdetect = true;
1458 }
1459
1460 hash = bat_priv->bla.backbone_hash;
1461 if (!hash)
1462 goto out;
1463
1464 for (i = 0; i < hash->size; i++) {
1465 head = &hash->table[i];
1466
1467 rcu_read_lock();
1468 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1469 if (!batadv_compare_eth(backbone_gw->orig,
1470 primary_if->net_dev->dev_addr))
1471 continue;
1472
1473 backbone_gw->lasttime = jiffies;
1474
1475 batadv_bla_send_announce(bat_priv, backbone_gw);
1476 if (send_loopdetect)
1477 batadv_bla_send_loopdetect(bat_priv,
1478 backbone_gw);
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489 if (atomic_read(&backbone_gw->request_sent) == 0)
1490 continue;
1491
1492 if (!atomic_dec_and_test(&backbone_gw->wait_periods))
1493 continue;
1494
1495 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
1496 atomic_set(&backbone_gw->request_sent, 0);
1497 }
1498 rcu_read_unlock();
1499 }
1500out:
1501 if (primary_if)
1502 batadv_hardif_put(primary_if);
1503
1504 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1505 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1506}
1507
1508
1509
1510
1511
1512
1513static struct lock_class_key batadv_claim_hash_lock_class_key;
1514static struct lock_class_key batadv_backbone_hash_lock_class_key;
1515
1516
1517
1518
1519
1520
1521
1522int batadv_bla_init(struct batadv_priv *bat_priv)
1523{
1524 int i;
1525 u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1526 struct batadv_hard_iface *primary_if;
1527 u16 crc;
1528 unsigned long entrytime;
1529
1530 spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1531
1532 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1533
1534
1535 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1536 bat_priv->bla.claim_dest.type = 0;
1537 primary_if = batadv_primary_if_get_selected(bat_priv);
1538 if (primary_if) {
1539 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1540 bat_priv->bla.claim_dest.group = htons(crc);
1541 batadv_hardif_put(primary_if);
1542 } else {
1543 bat_priv->bla.claim_dest.group = 0;
1544 }
1545
1546
1547 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1548 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1549 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1550 bat_priv->bla.bcast_duplist_curr = 0;
1551
1552 atomic_set(&bat_priv->bla.loopdetect_next,
1553 BATADV_BLA_LOOPDETECT_PERIODS);
1554
1555 if (bat_priv->bla.claim_hash)
1556 return 0;
1557
1558 bat_priv->bla.claim_hash = batadv_hash_new(128);
1559 bat_priv->bla.backbone_hash = batadv_hash_new(32);
1560
1561 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1562 return -ENOMEM;
1563
1564 batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1565 &batadv_claim_hash_lock_class_key);
1566 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1567 &batadv_backbone_hash_lock_class_key);
1568
1569 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1570
1571 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1572
1573 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1574 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1575 return 0;
1576}
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv,
1598 struct sk_buff *skb, u8 *payload_ptr,
1599 const u8 *orig)
1600{
1601 struct batadv_bcast_duplist_entry *entry;
1602 bool ret = false;
1603 int i, curr;
1604 __be32 crc;
1605
1606
1607 crc = batadv_skb_crc32(skb, payload_ptr);
1608
1609 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1610
1611 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1612 curr = (bat_priv->bla.bcast_duplist_curr + i);
1613 curr %= BATADV_DUPLIST_SIZE;
1614 entry = &bat_priv->bla.bcast_duplist[curr];
1615
1616
1617
1618
1619 if (batadv_has_timed_out(entry->entrytime,
1620 BATADV_DUPLIST_TIMEOUT))
1621 break;
1622
1623 if (entry->crc != crc)
1624 continue;
1625
1626
1627 if (orig && !is_zero_ether_addr(orig) &&
1628 !is_zero_ether_addr(entry->orig)) {
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638 if (batadv_compare_eth(entry->orig, orig))
1639 continue;
1640 }
1641
1642
1643
1644
1645 ret = true;
1646 goto out;
1647 }
1648
1649
1650
1651 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1652 curr %= BATADV_DUPLIST_SIZE;
1653 entry = &bat_priv->bla.bcast_duplist[curr];
1654 entry->crc = crc;
1655 entry->entrytime = jiffies;
1656
1657
1658 if (orig)
1659 ether_addr_copy(entry->orig, orig);
1660
1661 else
1662 eth_zero_addr(entry->orig);
1663
1664 bat_priv->bla.bcast_duplist_curr = curr;
1665
1666out:
1667 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1668
1669 return ret;
1670}
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv,
1685 struct sk_buff *skb)
1686{
1687 return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL);
1688}
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1702 struct sk_buff *skb)
1703{
1704 struct batadv_bcast_packet *bcast_packet;
1705 u8 *payload_ptr;
1706
1707 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1708 payload_ptr = (u8 *)(bcast_packet + 1);
1709
1710 return batadv_bla_check_duplist(bat_priv, skb, payload_ptr,
1711 bcast_packet->orig);
1712}
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
1724 unsigned short vid)
1725{
1726 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1727 struct hlist_head *head;
1728 struct batadv_bla_backbone_gw *backbone_gw;
1729 int i;
1730
1731 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1732 return false;
1733
1734 if (!hash)
1735 return false;
1736
1737 for (i = 0; i < hash->size; i++) {
1738 head = &hash->table[i];
1739
1740 rcu_read_lock();
1741 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1742 if (batadv_compare_eth(backbone_gw->orig, orig) &&
1743 backbone_gw->vid == vid) {
1744 rcu_read_unlock();
1745 return true;
1746 }
1747 }
1748 rcu_read_unlock();
1749 }
1750
1751 return false;
1752}
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
1764 struct batadv_orig_node *orig_node, int hdr_size)
1765{
1766 struct batadv_bla_backbone_gw *backbone_gw;
1767 unsigned short vid;
1768
1769 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1770 return false;
1771
1772
1773 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1774 return false;
1775
1776 vid = batadv_get_vid(skb, hdr_size);
1777
1778
1779 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1780 orig_node->orig, vid);
1781 if (!backbone_gw)
1782 return false;
1783
1784 batadv_backbone_gw_put(backbone_gw);
1785 return true;
1786}
1787
1788
1789
1790
1791
1792
1793
1794void batadv_bla_free(struct batadv_priv *bat_priv)
1795{
1796 struct batadv_hard_iface *primary_if;
1797
1798 cancel_delayed_work_sync(&bat_priv->bla.work);
1799 primary_if = batadv_primary_if_get_selected(bat_priv);
1800
1801 if (bat_priv->bla.claim_hash) {
1802 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1803 batadv_hash_destroy(bat_priv->bla.claim_hash);
1804 bat_priv->bla.claim_hash = NULL;
1805 }
1806 if (bat_priv->bla.backbone_hash) {
1807 batadv_bla_purge_backbone_gw(bat_priv, 1);
1808 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1809 bat_priv->bla.backbone_hash = NULL;
1810 }
1811 if (primary_if)
1812 batadv_hardif_put(primary_if);
1813}
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828static bool
1829batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1830 struct batadv_hard_iface *primary_if,
1831 unsigned short vid)
1832{
1833 struct batadv_bla_backbone_gw *backbone_gw;
1834 struct ethhdr *ethhdr;
1835 bool ret;
1836
1837 ethhdr = eth_hdr(skb);
1838
1839
1840
1841
1842 if (!batadv_compare_eth(ethhdr->h_source,
1843 bat_priv->bla.loopdetect_addr))
1844 return false;
1845
1846
1847
1848
1849 if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
1850 BATADV_BLA_LOOPDETECT_TIMEOUT))
1851 return true;
1852
1853 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
1854 primary_if->net_dev->dev_addr,
1855 vid, true);
1856 if (unlikely(!backbone_gw))
1857 return true;
1858
1859 ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
1860
1861
1862
1863
1864 if (!ret)
1865 batadv_backbone_gw_put(backbone_gw);
1866
1867 return true;
1868}
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1887 unsigned short vid, int packet_type)
1888{
1889 struct batadv_bla_backbone_gw *backbone_gw;
1890 struct ethhdr *ethhdr;
1891 struct batadv_bla_claim search_claim, *claim = NULL;
1892 struct batadv_hard_iface *primary_if;
1893 bool own_claim;
1894 bool ret;
1895
1896 ethhdr = eth_hdr(skb);
1897
1898 primary_if = batadv_primary_if_get_selected(bat_priv);
1899 if (!primary_if)
1900 goto handled;
1901
1902 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1903 goto allow;
1904
1905 if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
1906 goto handled;
1907
1908 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1909
1910 if (is_multicast_ether_addr(ethhdr->h_dest))
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924 if (packet_type == BATADV_BCAST ||
1925 packet_type == BATADV_UNICAST)
1926 goto handled;
1927
1928
1929
1930
1931 if (is_multicast_ether_addr(ethhdr->h_dest) &&
1932 packet_type == BATADV_UNICAST &&
1933 batadv_bla_check_ucast_duplist(bat_priv, skb))
1934 goto handled;
1935
1936 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1937 search_claim.vid = vid;
1938 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1939
1940 if (!claim) {
1941
1942
1943
1944
1945 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1946 "%s(): Unclaimed MAC %pM found. Claim it. Local: %s\n",
1947 __func__, ethhdr->h_source,
1948 batadv_is_my_client(bat_priv,
1949 ethhdr->h_source, vid) ?
1950 "yes" : "no");
1951 batadv_handle_claim(bat_priv, primary_if,
1952 primary_if->net_dev->dev_addr,
1953 ethhdr->h_source, vid);
1954 goto allow;
1955 }
1956
1957
1958 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1959 own_claim = batadv_compare_eth(backbone_gw->orig,
1960 primary_if->net_dev->dev_addr);
1961 batadv_backbone_gw_put(backbone_gw);
1962
1963 if (own_claim) {
1964
1965 claim->lasttime = jiffies;
1966 goto allow;
1967 }
1968
1969
1970 if (is_multicast_ether_addr(ethhdr->h_dest) &&
1971 (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) {
1972
1973
1974
1975
1976
1977
1978 goto handled;
1979 } else {
1980
1981
1982
1983
1984 batadv_handle_claim(bat_priv, primary_if,
1985 primary_if->net_dev->dev_addr,
1986 ethhdr->h_source, vid);
1987 goto allow;
1988 }
1989allow:
1990 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1991 ret = false;
1992 goto out;
1993
1994handled:
1995 kfree_skb(skb);
1996 ret = true;
1997
1998out:
1999 if (primary_if)
2000 batadv_hardif_put(primary_if);
2001 if (claim)
2002 batadv_claim_put(claim);
2003 return ret;
2004}
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
2024 unsigned short vid)
2025{
2026 struct ethhdr *ethhdr;
2027 struct batadv_bla_claim search_claim, *claim = NULL;
2028 struct batadv_bla_backbone_gw *backbone_gw;
2029 struct batadv_hard_iface *primary_if;
2030 bool client_roamed;
2031 bool ret = false;
2032
2033 primary_if = batadv_primary_if_get_selected(bat_priv);
2034 if (!primary_if)
2035 goto out;
2036
2037 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
2038 goto allow;
2039
2040 if (batadv_bla_process_claim(bat_priv, primary_if, skb))
2041 goto handled;
2042
2043 ethhdr = eth_hdr(skb);
2044
2045 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
2046
2047 if (is_multicast_ether_addr(ethhdr->h_dest))
2048 goto handled;
2049
2050 ether_addr_copy(search_claim.addr, ethhdr->h_source);
2051 search_claim.vid = vid;
2052
2053 claim = batadv_claim_hash_find(bat_priv, &search_claim);
2054
2055
2056 if (!claim)
2057 goto allow;
2058
2059
2060 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
2061 client_roamed = batadv_compare_eth(backbone_gw->orig,
2062 primary_if->net_dev->dev_addr);
2063 batadv_backbone_gw_put(backbone_gw);
2064
2065 if (client_roamed) {
2066
2067
2068
2069 if (batadv_has_timed_out(claim->lasttime, 100)) {
2070
2071
2072
2073
2074 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Roaming client %pM detected. Unclaim it.\n",
2075 __func__, ethhdr->h_source);
2076 batadv_handle_unclaim(bat_priv, primary_if,
2077 primary_if->net_dev->dev_addr,
2078 ethhdr->h_source, vid);
2079 goto allow;
2080 } else {
2081 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Race for claim %pM detected. Drop packet.\n",
2082 __func__, ethhdr->h_source);
2083 goto handled;
2084 }
2085 }
2086
2087
2088 if (is_multicast_ether_addr(ethhdr->h_dest)) {
2089
2090
2091
2092 goto handled;
2093 } else {
2094
2095
2096
2097 goto allow;
2098 }
2099allow:
2100 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
2101 ret = false;
2102 goto out;
2103handled:
2104 ret = true;
2105out:
2106 if (primary_if)
2107 batadv_hardif_put(primary_if);
2108 if (claim)
2109 batadv_claim_put(claim);
2110 return ret;
2111}
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124static int
2125batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
2126 struct netlink_callback *cb,
2127 struct batadv_hard_iface *primary_if,
2128 struct batadv_bla_claim *claim)
2129{
2130 u8 *primary_addr = primary_if->net_dev->dev_addr;
2131 u16 backbone_crc;
2132 bool is_own;
2133 void *hdr;
2134 int ret = -EINVAL;
2135
2136 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
2137 &batadv_netlink_family, NLM_F_MULTI,
2138 BATADV_CMD_GET_BLA_CLAIM);
2139 if (!hdr) {
2140 ret = -ENOBUFS;
2141 goto out;
2142 }
2143
2144 genl_dump_check_consistent(cb, hdr);
2145
2146 is_own = batadv_compare_eth(claim->backbone_gw->orig,
2147 primary_addr);
2148
2149 spin_lock_bh(&claim->backbone_gw->crc_lock);
2150 backbone_crc = claim->backbone_gw->crc;
2151 spin_unlock_bh(&claim->backbone_gw->crc_lock);
2152
2153 if (is_own)
2154 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
2155 genlmsg_cancel(msg, hdr);
2156 goto out;
2157 }
2158
2159 if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
2160 nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
2161 nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
2162 claim->backbone_gw->orig) ||
2163 nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
2164 backbone_crc)) {
2165 genlmsg_cancel(msg, hdr);
2166 goto out;
2167 }
2168
2169 genlmsg_end(msg, hdr);
2170 ret = 0;
2171
2172out:
2173 return ret;
2174}
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189static int
2190batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid,
2191 struct netlink_callback *cb,
2192 struct batadv_hard_iface *primary_if,
2193 struct batadv_hashtable *hash, unsigned int bucket,
2194 int *idx_skip)
2195{
2196 struct batadv_bla_claim *claim;
2197 int idx = 0;
2198 int ret = 0;
2199
2200 spin_lock_bh(&hash->list_locks[bucket]);
2201 cb->seq = atomic_read(&hash->generation) << 1 | 1;
2202
2203 hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) {
2204 if (idx++ < *idx_skip)
2205 continue;
2206
2207 ret = batadv_bla_claim_dump_entry(msg, portid, cb,
2208 primary_if, claim);
2209 if (ret) {
2210 *idx_skip = idx - 1;
2211 goto unlock;
2212 }
2213 }
2214
2215 *idx_skip = 0;
2216unlock:
2217 spin_unlock_bh(&hash->list_locks[bucket]);
2218 return ret;
2219}
2220
2221
2222
2223
2224
2225
2226
2227
2228int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
2229{
2230 struct batadv_hard_iface *primary_if = NULL;
2231 int portid = NETLINK_CB(cb->skb).portid;
2232 struct net *net = sock_net(cb->skb->sk);
2233 struct net_device *soft_iface;
2234 struct batadv_hashtable *hash;
2235 struct batadv_priv *bat_priv;
2236 int bucket = cb->args[0];
2237 int idx = cb->args[1];
2238 int ifindex;
2239 int ret = 0;
2240
2241 ifindex = batadv_netlink_get_ifindex(cb->nlh,
2242 BATADV_ATTR_MESH_IFINDEX);
2243 if (!ifindex)
2244 return -EINVAL;
2245
2246 soft_iface = dev_get_by_index(net, ifindex);
2247 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2248 ret = -ENODEV;
2249 goto out;
2250 }
2251
2252 bat_priv = netdev_priv(soft_iface);
2253 hash = bat_priv->bla.claim_hash;
2254
2255 primary_if = batadv_primary_if_get_selected(bat_priv);
2256 if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
2257 ret = -ENOENT;
2258 goto out;
2259 }
2260
2261 while (bucket < hash->size) {
2262 if (batadv_bla_claim_dump_bucket(msg, portid, cb, primary_if,
2263 hash, bucket, &idx))
2264 break;
2265 bucket++;
2266 }
2267
2268 cb->args[0] = bucket;
2269 cb->args[1] = idx;
2270
2271 ret = msg->len;
2272
2273out:
2274 if (primary_if)
2275 batadv_hardif_put(primary_if);
2276
2277 if (soft_iface)
2278 dev_put(soft_iface);
2279
2280 return ret;
2281}
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294static int
2295batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid,
2296 struct netlink_callback *cb,
2297 struct batadv_hard_iface *primary_if,
2298 struct batadv_bla_backbone_gw *backbone_gw)
2299{
2300 u8 *primary_addr = primary_if->net_dev->dev_addr;
2301 u16 backbone_crc;
2302 bool is_own;
2303 int msecs;
2304 void *hdr;
2305 int ret = -EINVAL;
2306
2307 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
2308 &batadv_netlink_family, NLM_F_MULTI,
2309 BATADV_CMD_GET_BLA_BACKBONE);
2310 if (!hdr) {
2311 ret = -ENOBUFS;
2312 goto out;
2313 }
2314
2315 genl_dump_check_consistent(cb, hdr);
2316
2317 is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
2318
2319 spin_lock_bh(&backbone_gw->crc_lock);
2320 backbone_crc = backbone_gw->crc;
2321 spin_unlock_bh(&backbone_gw->crc_lock);
2322
2323 msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime);
2324
2325 if (is_own)
2326 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
2327 genlmsg_cancel(msg, hdr);
2328 goto out;
2329 }
2330
2331 if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
2332 backbone_gw->orig) ||
2333 nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) ||
2334 nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
2335 backbone_crc) ||
2336 nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
2337 genlmsg_cancel(msg, hdr);
2338 goto out;
2339 }
2340
2341 genlmsg_end(msg, hdr);
2342 ret = 0;
2343
2344out:
2345 return ret;
2346}
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361static int
2362batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid,
2363 struct netlink_callback *cb,
2364 struct batadv_hard_iface *primary_if,
2365 struct batadv_hashtable *hash,
2366 unsigned int bucket, int *idx_skip)
2367{
2368 struct batadv_bla_backbone_gw *backbone_gw;
2369 int idx = 0;
2370 int ret = 0;
2371
2372 spin_lock_bh(&hash->list_locks[bucket]);
2373 cb->seq = atomic_read(&hash->generation) << 1 | 1;
2374
2375 hlist_for_each_entry(backbone_gw, &hash->table[bucket], hash_entry) {
2376 if (idx++ < *idx_skip)
2377 continue;
2378
2379 ret = batadv_bla_backbone_dump_entry(msg, portid, cb,
2380 primary_if, backbone_gw);
2381 if (ret) {
2382 *idx_skip = idx - 1;
2383 goto unlock;
2384 }
2385 }
2386
2387 *idx_skip = 0;
2388unlock:
2389 spin_unlock_bh(&hash->list_locks[bucket]);
2390 return ret;
2391}
2392
2393
2394
2395
2396
2397
2398
2399
2400int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
2401{
2402 struct batadv_hard_iface *primary_if = NULL;
2403 int portid = NETLINK_CB(cb->skb).portid;
2404 struct net *net = sock_net(cb->skb->sk);
2405 struct net_device *soft_iface;
2406 struct batadv_hashtable *hash;
2407 struct batadv_priv *bat_priv;
2408 int bucket = cb->args[0];
2409 int idx = cb->args[1];
2410 int ifindex;
2411 int ret = 0;
2412
2413 ifindex = batadv_netlink_get_ifindex(cb->nlh,
2414 BATADV_ATTR_MESH_IFINDEX);
2415 if (!ifindex)
2416 return -EINVAL;
2417
2418 soft_iface = dev_get_by_index(net, ifindex);
2419 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2420 ret = -ENODEV;
2421 goto out;
2422 }
2423
2424 bat_priv = netdev_priv(soft_iface);
2425 hash = bat_priv->bla.backbone_hash;
2426
2427 primary_if = batadv_primary_if_get_selected(bat_priv);
2428 if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
2429 ret = -ENOENT;
2430 goto out;
2431 }
2432
2433 while (bucket < hash->size) {
2434 if (batadv_bla_backbone_dump_bucket(msg, portid, cb, primary_if,
2435 hash, bucket, &idx))
2436 break;
2437 bucket++;
2438 }
2439
2440 cb->args[0] = bucket;
2441 cb->args[1] = idx;
2442
2443 ret = msg->len;
2444
2445out:
2446 if (primary_if)
2447 batadv_hardif_put(primary_if);
2448
2449 if (soft_iface)
2450 dev_put(soft_iface);
2451
2452 return ret;
2453}
2454
2455#ifdef CONFIG_BATMAN_ADV_DAT
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
2469 u8 *addr, unsigned short vid)
2470{
2471 struct batadv_bla_claim search_claim;
2472 struct batadv_bla_claim *claim = NULL;
2473 struct batadv_hard_iface *primary_if = NULL;
2474 bool ret = true;
2475
2476 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
2477 return ret;
2478
2479 primary_if = batadv_primary_if_get_selected(bat_priv);
2480 if (!primary_if)
2481 return ret;
2482
2483
2484 ether_addr_copy(search_claim.addr, addr);
2485 search_claim.vid = vid;
2486
2487 claim = batadv_claim_hash_find(bat_priv, &search_claim);
2488
2489
2490
2491
2492 if (claim) {
2493 if (!batadv_compare_eth(claim->backbone_gw->orig,
2494 primary_if->net_dev->dev_addr))
2495 ret = false;
2496 batadv_claim_put(claim);
2497 }
2498
2499 batadv_hardif_put(primary_if);
2500 return ret;
2501}
2502#endif
2503