1
2
3
4
5
6
7#include "bridge_loop_avoidance.h"
8#include "main.h"
9
10#include <linux/atomic.h>
11#include <linux/byteorder/generic.h>
12#include <linux/compiler.h>
13#include <linux/crc16.h>
14#include <linux/errno.h>
15#include <linux/etherdevice.h>
16#include <linux/gfp.h>
17#include <linux/if_arp.h>
18#include <linux/if_ether.h>
19#include <linux/if_vlan.h>
20#include <linux/jhash.h>
21#include <linux/jiffies.h>
22#include <linux/kernel.h>
23#include <linux/kref.h>
24#include <linux/list.h>
25#include <linux/lockdep.h>
26#include <linux/netdevice.h>
27#include <linux/netlink.h>
28#include <linux/rculist.h>
29#include <linux/rcupdate.h>
30#include <linux/skbuff.h>
31#include <linux/slab.h>
32#include <linux/spinlock.h>
33#include <linux/stddef.h>
34#include <linux/string.h>
35#include <linux/workqueue.h>
36#include <net/arp.h>
37#include <net/genetlink.h>
38#include <net/netlink.h>
39#include <net/sock.h>
40#include <uapi/linux/batadv_packet.h>
41#include <uapi/linux/batman_adv.h>
42
43#include "hard-interface.h"
44#include "hash.h"
45#include "log.h"
46#include "netlink.h"
47#include "originator.h"
48#include "soft-interface.h"
49#include "translation-table.h"
50
51static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
52
53static void batadv_bla_periodic_work(struct work_struct *work);
54static void
55batadv_bla_send_announce(struct batadv_priv *bat_priv,
56 struct batadv_bla_backbone_gw *backbone_gw);
57
58
59
60
61
62
63
64
65static inline u32 batadv_choose_claim(const void *data, u32 size)
66{
67 struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
68 u32 hash = 0;
69
70 hash = jhash(&claim->addr, sizeof(claim->addr), hash);
71 hash = jhash(&claim->vid, sizeof(claim->vid), hash);
72
73 return hash % size;
74}
75
76
77
78
79
80
81
82
83static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
84{
85 const struct batadv_bla_backbone_gw *gw;
86 u32 hash = 0;
87
88 gw = (struct batadv_bla_backbone_gw *)data;
89 hash = jhash(&gw->orig, sizeof(gw->orig), hash);
90 hash = jhash(&gw->vid, sizeof(gw->vid), hash);
91
92 return hash % size;
93}
94
95
96
97
98
99
100
101
102static bool batadv_compare_backbone_gw(const struct hlist_node *node,
103 const void *data2)
104{
105 const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
106 hash_entry);
107 const struct batadv_bla_backbone_gw *gw1 = data1;
108 const struct batadv_bla_backbone_gw *gw2 = data2;
109
110 if (!batadv_compare_eth(gw1->orig, gw2->orig))
111 return false;
112
113 if (gw1->vid != gw2->vid)
114 return false;
115
116 return true;
117}
118
119
120
121
122
123
124
125
126static bool batadv_compare_claim(const struct hlist_node *node,
127 const void *data2)
128{
129 const void *data1 = container_of(node, struct batadv_bla_claim,
130 hash_entry);
131 const struct batadv_bla_claim *cl1 = data1;
132 const struct batadv_bla_claim *cl2 = data2;
133
134 if (!batadv_compare_eth(cl1->addr, cl2->addr))
135 return false;
136
137 if (cl1->vid != cl2->vid)
138 return false;
139
140 return true;
141}
142
143
144
145
146
147
148static void batadv_backbone_gw_release(struct kref *ref)
149{
150 struct batadv_bla_backbone_gw *backbone_gw;
151
152 backbone_gw = container_of(ref, struct batadv_bla_backbone_gw,
153 refcount);
154
155 kfree_rcu(backbone_gw, rcu);
156}
157
158
159
160
161
162
163static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
164{
165 if (!backbone_gw)
166 return;
167
168 kref_put(&backbone_gw->refcount, batadv_backbone_gw_release);
169}
170
171
172
173
174
175
176static void batadv_claim_release(struct kref *ref)
177{
178 struct batadv_bla_claim *claim;
179 struct batadv_bla_backbone_gw *old_backbone_gw;
180
181 claim = container_of(ref, struct batadv_bla_claim, refcount);
182
183 spin_lock_bh(&claim->backbone_lock);
184 old_backbone_gw = claim->backbone_gw;
185 claim->backbone_gw = NULL;
186 spin_unlock_bh(&claim->backbone_lock);
187
188 spin_lock_bh(&old_backbone_gw->crc_lock);
189 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
190 spin_unlock_bh(&old_backbone_gw->crc_lock);
191
192 batadv_backbone_gw_put(old_backbone_gw);
193
194 kfree_rcu(claim, rcu);
195}
196
197
198
199
200
201static void batadv_claim_put(struct batadv_bla_claim *claim)
202{
203 if (!claim)
204 return;
205
206 kref_put(&claim->refcount, batadv_claim_release);
207}
208
209
210
211
212
213
214
215
216static struct batadv_bla_claim *
217batadv_claim_hash_find(struct batadv_priv *bat_priv,
218 struct batadv_bla_claim *data)
219{
220 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
221 struct hlist_head *head;
222 struct batadv_bla_claim *claim;
223 struct batadv_bla_claim *claim_tmp = NULL;
224 int index;
225
226 if (!hash)
227 return NULL;
228
229 index = batadv_choose_claim(data, hash->size);
230 head = &hash->table[index];
231
232 rcu_read_lock();
233 hlist_for_each_entry_rcu(claim, head, hash_entry) {
234 if (!batadv_compare_claim(&claim->hash_entry, data))
235 continue;
236
237 if (!kref_get_unless_zero(&claim->refcount))
238 continue;
239
240 claim_tmp = claim;
241 break;
242 }
243 rcu_read_unlock();
244
245 return claim_tmp;
246}
247
248
249
250
251
252
253
254
255
256static struct batadv_bla_backbone_gw *
257batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
258 unsigned short vid)
259{
260 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
261 struct hlist_head *head;
262 struct batadv_bla_backbone_gw search_entry, *backbone_gw;
263 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
264 int index;
265
266 if (!hash)
267 return NULL;
268
269 ether_addr_copy(search_entry.orig, addr);
270 search_entry.vid = vid;
271
272 index = batadv_choose_backbone_gw(&search_entry, hash->size);
273 head = &hash->table[index];
274
275 rcu_read_lock();
276 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
277 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
278 &search_entry))
279 continue;
280
281 if (!kref_get_unless_zero(&backbone_gw->refcount))
282 continue;
283
284 backbone_gw_tmp = backbone_gw;
285 break;
286 }
287 rcu_read_unlock();
288
289 return backbone_gw_tmp;
290}
291
292
293
294
295
296static void
297batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
298{
299 struct batadv_hashtable *hash;
300 struct hlist_node *node_tmp;
301 struct hlist_head *head;
302 struct batadv_bla_claim *claim;
303 int i;
304 spinlock_t *list_lock;
305
306 hash = backbone_gw->bat_priv->bla.claim_hash;
307 if (!hash)
308 return;
309
310 for (i = 0; i < hash->size; i++) {
311 head = &hash->table[i];
312 list_lock = &hash->list_locks[i];
313
314 spin_lock_bh(list_lock);
315 hlist_for_each_entry_safe(claim, node_tmp,
316 head, hash_entry) {
317 if (claim->backbone_gw != backbone_gw)
318 continue;
319
320 batadv_claim_put(claim);
321 hlist_del_rcu(&claim->hash_entry);
322 }
323 spin_unlock_bh(list_lock);
324 }
325
326
327 spin_lock_bh(&backbone_gw->crc_lock);
328 backbone_gw->crc = BATADV_BLA_CRC_INIT;
329 spin_unlock_bh(&backbone_gw->crc_lock);
330}
331
332
333
334
335
336
337
338
339static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
340 unsigned short vid, int claimtype)
341{
342 struct sk_buff *skb;
343 struct ethhdr *ethhdr;
344 struct batadv_hard_iface *primary_if;
345 struct net_device *soft_iface;
346 u8 *hw_src;
347 struct batadv_bla_claim_dst local_claim_dest;
348 __be32 zeroip = 0;
349
350 primary_if = batadv_primary_if_get_selected(bat_priv);
351 if (!primary_if)
352 return;
353
354 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
355 sizeof(local_claim_dest));
356 local_claim_dest.type = claimtype;
357
358 soft_iface = primary_if->soft_iface;
359
360 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
361
362 zeroip,
363 primary_if->soft_iface,
364
365 zeroip,
366
367 NULL,
368
369 primary_if->net_dev->dev_addr,
370
371
372
373
374 (u8 *)&local_claim_dest);
375
376 if (!skb)
377 goto out;
378
379 ethhdr = (struct ethhdr *)skb->data;
380 hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
381
382
383 switch (claimtype) {
384 case BATADV_CLAIM_TYPE_CLAIM:
385
386
387
388 ether_addr_copy(ethhdr->h_source, mac);
389 batadv_dbg(BATADV_DBG_BLA, bat_priv,
390 "%s(): CLAIM %pM on vid %d\n", __func__, mac,
391 batadv_print_vid(vid));
392 break;
393 case BATADV_CLAIM_TYPE_UNCLAIM:
394
395
396
397 ether_addr_copy(hw_src, mac);
398 batadv_dbg(BATADV_DBG_BLA, bat_priv,
399 "%s(): UNCLAIM %pM on vid %d\n", __func__, mac,
400 batadv_print_vid(vid));
401 break;
402 case BATADV_CLAIM_TYPE_ANNOUNCE:
403
404
405
406 ether_addr_copy(hw_src, mac);
407 batadv_dbg(BATADV_DBG_BLA, bat_priv,
408 "%s(): ANNOUNCE of %pM on vid %d\n", __func__,
409 ethhdr->h_source, batadv_print_vid(vid));
410 break;
411 case BATADV_CLAIM_TYPE_REQUEST:
412
413
414
415
416 ether_addr_copy(hw_src, mac);
417 ether_addr_copy(ethhdr->h_dest, mac);
418 batadv_dbg(BATADV_DBG_BLA, bat_priv,
419 "%s(): REQUEST of %pM to %pM on vid %d\n", __func__,
420 ethhdr->h_source, ethhdr->h_dest,
421 batadv_print_vid(vid));
422 break;
423 case BATADV_CLAIM_TYPE_LOOPDETECT:
424 ether_addr_copy(ethhdr->h_source, mac);
425 batadv_dbg(BATADV_DBG_BLA, bat_priv,
426 "%s(): LOOPDETECT of %pM to %pM on vid %d\n",
427 __func__, ethhdr->h_source, ethhdr->h_dest,
428 batadv_print_vid(vid));
429
430 break;
431 }
432
433 if (vid & BATADV_VLAN_HAS_TAG) {
434 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
435 vid & VLAN_VID_MASK);
436 if (!skb)
437 goto out;
438 }
439
440 skb_reset_mac_header(skb);
441 skb->protocol = eth_type_trans(skb, soft_iface);
442 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
443 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
444 skb->len + ETH_HLEN);
445
446 netif_rx_any_context(skb);
447out:
448 batadv_hardif_put(primary_if);
449}
450
451
452
453
454
455
456
457
458static void batadv_bla_loopdetect_report(struct work_struct *work)
459{
460 struct batadv_bla_backbone_gw *backbone_gw;
461 struct batadv_priv *bat_priv;
462 char vid_str[6] = { '\0' };
463
464 backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
465 report_work);
466 bat_priv = backbone_gw->bat_priv;
467
468 batadv_info(bat_priv->soft_iface,
469 "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
470 batadv_print_vid(backbone_gw->vid));
471 snprintf(vid_str, sizeof(vid_str), "%d",
472 batadv_print_vid(backbone_gw->vid));
473 vid_str[sizeof(vid_str) - 1] = 0;
474
475 batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
476 vid_str);
477
478 batadv_backbone_gw_put(backbone_gw);
479}
480
481
482
483
484
485
486
487
488
489
490static struct batadv_bla_backbone_gw *
491batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
492 unsigned short vid, bool own_backbone)
493{
494 struct batadv_bla_backbone_gw *entry;
495 struct batadv_orig_node *orig_node;
496 int hash_added;
497
498 entry = batadv_backbone_hash_find(bat_priv, orig, vid);
499
500 if (entry)
501 return entry;
502
503 batadv_dbg(BATADV_DBG_BLA, bat_priv,
504 "%s(): not found (%pM, %d), creating new entry\n", __func__,
505 orig, batadv_print_vid(vid));
506
507 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
508 if (!entry)
509 return NULL;
510
511 entry->vid = vid;
512 entry->lasttime = jiffies;
513 entry->crc = BATADV_BLA_CRC_INIT;
514 entry->bat_priv = bat_priv;
515 spin_lock_init(&entry->crc_lock);
516 atomic_set(&entry->request_sent, 0);
517 atomic_set(&entry->wait_periods, 0);
518 ether_addr_copy(entry->orig, orig);
519 INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
520 kref_init(&entry->refcount);
521
522 kref_get(&entry->refcount);
523 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
524 batadv_compare_backbone_gw,
525 batadv_choose_backbone_gw, entry,
526 &entry->hash_entry);
527
528 if (unlikely(hash_added != 0)) {
529
530 kfree(entry);
531 return NULL;
532 }
533
534
535 orig_node = batadv_orig_hash_find(bat_priv, orig);
536 if (orig_node) {
537 batadv_tt_global_del_orig(bat_priv, orig_node, vid,
538 "became a backbone gateway");
539 batadv_orig_node_put(orig_node);
540 }
541
542 if (own_backbone) {
543 batadv_bla_send_announce(bat_priv, entry);
544
545
546 atomic_inc(&entry->request_sent);
547 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
548 atomic_inc(&bat_priv->bla.num_requests);
549 }
550
551 return entry;
552}
553
554
555
556
557
558
559
560
561
562
563static void
564batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
565 struct batadv_hard_iface *primary_if,
566 unsigned short vid)
567{
568 struct batadv_bla_backbone_gw *backbone_gw;
569
570 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
571 primary_if->net_dev->dev_addr,
572 vid, true);
573 if (unlikely(!backbone_gw))
574 return;
575
576 backbone_gw->lasttime = jiffies;
577 batadv_backbone_gw_put(backbone_gw);
578}
579
580
581
582
583
584
585
586
587
588
589static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
590 struct batadv_hard_iface *primary_if,
591 unsigned short vid)
592{
593 struct hlist_head *head;
594 struct batadv_hashtable *hash;
595 struct batadv_bla_claim *claim;
596 struct batadv_bla_backbone_gw *backbone_gw;
597 int i;
598
599 batadv_dbg(BATADV_DBG_BLA, bat_priv,
600 "%s(): received a claim request, send all of our own claims again\n",
601 __func__);
602
603 backbone_gw = batadv_backbone_hash_find(bat_priv,
604 primary_if->net_dev->dev_addr,
605 vid);
606 if (!backbone_gw)
607 return;
608
609 hash = bat_priv->bla.claim_hash;
610 for (i = 0; i < hash->size; i++) {
611 head = &hash->table[i];
612
613 rcu_read_lock();
614 hlist_for_each_entry_rcu(claim, head, hash_entry) {
615
616 if (claim->backbone_gw != backbone_gw)
617 continue;
618
619 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
620 BATADV_CLAIM_TYPE_CLAIM);
621 }
622 rcu_read_unlock();
623 }
624
625
626 batadv_bla_send_announce(bat_priv, backbone_gw);
627 batadv_backbone_gw_put(backbone_gw);
628}
629
630
631
632
633
634
635
636
637
638static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
639{
640
641 batadv_bla_del_backbone_claims(backbone_gw);
642
643 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
644 "Sending REQUEST to %pM\n", backbone_gw->orig);
645
646
647 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
648 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
649
650
651 if (!atomic_read(&backbone_gw->request_sent)) {
652 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
653 atomic_set(&backbone_gw->request_sent, 1);
654 }
655}
656
657
658
659
660
661
662static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
663 struct batadv_bla_backbone_gw *backbone_gw)
664{
665 u8 mac[ETH_ALEN];
666 __be16 crc;
667
668 memcpy(mac, batadv_announce_mac, 4);
669 spin_lock_bh(&backbone_gw->crc_lock);
670 crc = htons(backbone_gw->crc);
671 spin_unlock_bh(&backbone_gw->crc_lock);
672 memcpy(&mac[4], &crc, 2);
673
674 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
675 BATADV_CLAIM_TYPE_ANNOUNCE);
676}
677
678
679
680
681
682
683
684
685static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
686 const u8 *mac, const unsigned short vid,
687 struct batadv_bla_backbone_gw *backbone_gw)
688{
689 struct batadv_bla_backbone_gw *old_backbone_gw;
690 struct batadv_bla_claim *claim;
691 struct batadv_bla_claim search_claim;
692 bool remove_crc = false;
693 int hash_added;
694
695 ether_addr_copy(search_claim.addr, mac);
696 search_claim.vid = vid;
697 claim = batadv_claim_hash_find(bat_priv, &search_claim);
698
699
700 if (!claim) {
701 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
702 if (!claim)
703 return;
704
705 ether_addr_copy(claim->addr, mac);
706 spin_lock_init(&claim->backbone_lock);
707 claim->vid = vid;
708 claim->lasttime = jiffies;
709 kref_get(&backbone_gw->refcount);
710 claim->backbone_gw = backbone_gw;
711 kref_init(&claim->refcount);
712
713 batadv_dbg(BATADV_DBG_BLA, bat_priv,
714 "%s(): adding new entry %pM, vid %d to hash ...\n",
715 __func__, mac, batadv_print_vid(vid));
716
717 kref_get(&claim->refcount);
718 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
719 batadv_compare_claim,
720 batadv_choose_claim, claim,
721 &claim->hash_entry);
722
723 if (unlikely(hash_added != 0)) {
724
725 kfree(claim);
726 return;
727 }
728 } else {
729 claim->lasttime = jiffies;
730 if (claim->backbone_gw == backbone_gw)
731
732 goto claim_free_ref;
733
734 batadv_dbg(BATADV_DBG_BLA, bat_priv,
735 "%s(): changing ownership for %pM, vid %d to gw %pM\n",
736 __func__, mac, batadv_print_vid(vid),
737 backbone_gw->orig);
738
739 remove_crc = true;
740 }
741
742
743 spin_lock_bh(&claim->backbone_lock);
744 old_backbone_gw = claim->backbone_gw;
745 kref_get(&backbone_gw->refcount);
746 claim->backbone_gw = backbone_gw;
747 spin_unlock_bh(&claim->backbone_lock);
748
749 if (remove_crc) {
750
751 spin_lock_bh(&old_backbone_gw->crc_lock);
752 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
753 spin_unlock_bh(&old_backbone_gw->crc_lock);
754 }
755
756 batadv_backbone_gw_put(old_backbone_gw);
757
758
759 spin_lock_bh(&backbone_gw->crc_lock);
760 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
761 spin_unlock_bh(&backbone_gw->crc_lock);
762 backbone_gw->lasttime = jiffies;
763
764claim_free_ref:
765 batadv_claim_put(claim);
766}
767
768
769
770
771
772
773
774
775static struct batadv_bla_backbone_gw *
776batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
777{
778 struct batadv_bla_backbone_gw *backbone_gw;
779
780 spin_lock_bh(&claim->backbone_lock);
781 backbone_gw = claim->backbone_gw;
782 kref_get(&backbone_gw->refcount);
783 spin_unlock_bh(&claim->backbone_lock);
784
785 return backbone_gw;
786}
787
788
789
790
791
792
793
794static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
795 const u8 *mac, const unsigned short vid)
796{
797 struct batadv_bla_claim search_claim, *claim;
798 struct batadv_bla_claim *claim_removed_entry;
799 struct hlist_node *claim_removed_node;
800
801 ether_addr_copy(search_claim.addr, mac);
802 search_claim.vid = vid;
803 claim = batadv_claim_hash_find(bat_priv, &search_claim);
804 if (!claim)
805 return;
806
807 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
808 mac, batadv_print_vid(vid));
809
810 claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
811 batadv_compare_claim,
812 batadv_choose_claim, claim);
813 if (!claim_removed_node)
814 goto free_claim;
815
816
817 claim_removed_entry = hlist_entry(claim_removed_node,
818 struct batadv_bla_claim, hash_entry);
819 batadv_claim_put(claim_removed_entry);
820
821free_claim:
822
823 batadv_claim_put(claim);
824}
825
826
827
828
829
830
831
832
833
834
835static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
836 u8 *backbone_addr, unsigned short vid)
837{
838 struct batadv_bla_backbone_gw *backbone_gw;
839 u16 backbone_crc, crc;
840
841 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
842 return false;
843
844 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
845 false);
846
847 if (unlikely(!backbone_gw))
848 return true;
849
850
851 backbone_gw->lasttime = jiffies;
852 crc = ntohs(*((__force __be16 *)(&an_addr[4])));
853
854 batadv_dbg(BATADV_DBG_BLA, bat_priv,
855 "%s(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
856 __func__, batadv_print_vid(vid), backbone_gw->orig, crc);
857
858 spin_lock_bh(&backbone_gw->crc_lock);
859 backbone_crc = backbone_gw->crc;
860 spin_unlock_bh(&backbone_gw->crc_lock);
861
862 if (backbone_crc != crc) {
863 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
864 "%s(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
865 __func__, backbone_gw->orig,
866 batadv_print_vid(backbone_gw->vid),
867 backbone_crc, crc);
868
869 batadv_bla_send_request(backbone_gw);
870 } else {
871
872
873
874 if (atomic_read(&backbone_gw->request_sent)) {
875 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
876 atomic_set(&backbone_gw->request_sent, 0);
877 }
878 }
879
880 batadv_backbone_gw_put(backbone_gw);
881 return true;
882}
883
884
885
886
887
888
889
890
891
892
893
894static bool batadv_handle_request(struct batadv_priv *bat_priv,
895 struct batadv_hard_iface *primary_if,
896 u8 *backbone_addr, struct ethhdr *ethhdr,
897 unsigned short vid)
898{
899
900 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
901 return false;
902
903
904
905
906 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
907 return true;
908
909 batadv_dbg(BATADV_DBG_BLA, bat_priv,
910 "%s(): REQUEST vid %d (sent by %pM)...\n",
911 __func__, batadv_print_vid(vid), ethhdr->h_source);
912
913 batadv_bla_answer_request(bat_priv, primary_if, vid);
914 return true;
915}
916
917
918
919
920
921
922
923
924
925
926
927static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
928 struct batadv_hard_iface *primary_if,
929 u8 *backbone_addr, u8 *claim_addr,
930 unsigned short vid)
931{
932 struct batadv_bla_backbone_gw *backbone_gw;
933
934
935 if (primary_if && batadv_compare_eth(backbone_addr,
936 primary_if->net_dev->dev_addr))
937 batadv_bla_send_claim(bat_priv, claim_addr, vid,
938 BATADV_CLAIM_TYPE_UNCLAIM);
939
940 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
941
942 if (!backbone_gw)
943 return true;
944
945
946 batadv_dbg(BATADV_DBG_BLA, bat_priv,
947 "%s(): UNCLAIM %pM on vid %d (sent by %pM)...\n", __func__,
948 claim_addr, batadv_print_vid(vid), backbone_gw->orig);
949
950 batadv_bla_del_claim(bat_priv, claim_addr, vid);
951 batadv_backbone_gw_put(backbone_gw);
952 return true;
953}
954
955
956
957
958
959
960
961
962
963
964
965static bool batadv_handle_claim(struct batadv_priv *bat_priv,
966 struct batadv_hard_iface *primary_if,
967 u8 *backbone_addr, u8 *claim_addr,
968 unsigned short vid)
969{
970 struct batadv_bla_backbone_gw *backbone_gw;
971
972
973
974 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
975 false);
976
977 if (unlikely(!backbone_gw))
978 return true;
979
980
981 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
982 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
983 batadv_bla_send_claim(bat_priv, claim_addr, vid,
984 BATADV_CLAIM_TYPE_CLAIM);
985
986
987
988 batadv_backbone_gw_put(backbone_gw);
989 return true;
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009static int batadv_check_claim_group(struct batadv_priv *bat_priv,
1010 struct batadv_hard_iface *primary_if,
1011 u8 *hw_src, u8 *hw_dst,
1012 struct ethhdr *ethhdr)
1013{
1014 u8 *backbone_addr;
1015 struct batadv_orig_node *orig_node;
1016 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1017
1018 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1019 bla_dst_own = &bat_priv->bla.claim_dest;
1020
1021
1022
1023
1024 switch (bla_dst->type) {
1025 case BATADV_CLAIM_TYPE_CLAIM:
1026 backbone_addr = hw_src;
1027 break;
1028 case BATADV_CLAIM_TYPE_REQUEST:
1029 case BATADV_CLAIM_TYPE_ANNOUNCE:
1030 case BATADV_CLAIM_TYPE_UNCLAIM:
1031 backbone_addr = ethhdr->h_source;
1032 break;
1033 default:
1034 return 0;
1035 }
1036
1037
1038 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
1039 return 0;
1040
1041
1042 if (bla_dst->group == bla_dst_own->group)
1043 return 2;
1044
1045
1046 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
1047
1048
1049
1050
1051 if (!orig_node)
1052 return 1;
1053
1054
1055 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
1056 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1057 "taking other backbones claim group: %#.4x\n",
1058 ntohs(bla_dst->group));
1059 bla_dst_own->group = bla_dst->group;
1060 }
1061
1062 batadv_orig_node_put(orig_node);
1063
1064 return 2;
1065}
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
1077 struct batadv_hard_iface *primary_if,
1078 struct sk_buff *skb)
1079{
1080 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1081 u8 *hw_src, *hw_dst;
1082 struct vlan_hdr *vhdr, vhdr_buf;
1083 struct ethhdr *ethhdr;
1084 struct arphdr *arphdr;
1085 unsigned short vid;
1086 int vlan_depth = 0;
1087 __be16 proto;
1088 int headlen;
1089 int ret;
1090
1091 vid = batadv_get_vid(skb, 0);
1092 ethhdr = eth_hdr(skb);
1093
1094 proto = ethhdr->h_proto;
1095 headlen = ETH_HLEN;
1096 if (vid & BATADV_VLAN_HAS_TAG) {
1097
1098
1099
1100
1101
1102
1103
1104
1105 do {
1106 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
1107 &vhdr_buf);
1108 if (!vhdr)
1109 return false;
1110
1111 proto = vhdr->h_vlan_encapsulated_proto;
1112 headlen += VLAN_HLEN;
1113 vlan_depth++;
1114 } while (proto == htons(ETH_P_8021Q));
1115 }
1116
1117 if (proto != htons(ETH_P_ARP))
1118 return false;
1119
1120
1121
1122 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
1123 return false;
1124
1125
1126 ethhdr = eth_hdr(skb);
1127 arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
1128
1129
1130
1131
1132 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
1133 return false;
1134 if (arphdr->ar_pro != htons(ETH_P_IP))
1135 return false;
1136 if (arphdr->ar_hln != ETH_ALEN)
1137 return false;
1138 if (arphdr->ar_pln != 4)
1139 return false;
1140
1141 hw_src = (u8 *)arphdr + sizeof(struct arphdr);
1142 hw_dst = hw_src + ETH_ALEN + 4;
1143 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1144 bla_dst_own = &bat_priv->bla.claim_dest;
1145
1146
1147 if (memcmp(bla_dst->magic, bla_dst_own->magic,
1148 sizeof(bla_dst->magic)) != 0)
1149 return false;
1150
1151
1152
1153
1154
1155 if (vlan_depth > 1)
1156 return true;
1157
1158
1159 if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
1160 return false;
1161
1162
1163 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
1164 ethhdr);
1165 if (ret == 1)
1166 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1167 "%s(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1168 __func__, ethhdr->h_source, batadv_print_vid(vid),
1169 hw_src, hw_dst);
1170
1171 if (ret < 2)
1172 return !!ret;
1173
1174
1175 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1176
1177
1178 switch (bla_dst->type) {
1179 case BATADV_CLAIM_TYPE_CLAIM:
1180 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
1181 ethhdr->h_source, vid))
1182 return true;
1183 break;
1184 case BATADV_CLAIM_TYPE_UNCLAIM:
1185 if (batadv_handle_unclaim(bat_priv, primary_if,
1186 ethhdr->h_source, hw_src, vid))
1187 return true;
1188 break;
1189
1190 case BATADV_CLAIM_TYPE_ANNOUNCE:
1191 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
1192 vid))
1193 return true;
1194 break;
1195 case BATADV_CLAIM_TYPE_REQUEST:
1196 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
1197 vid))
1198 return true;
1199 break;
1200 }
1201
1202 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1203 "%s(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1204 __func__, ethhdr->h_source, batadv_print_vid(vid), hw_src,
1205 hw_dst);
1206 return true;
1207}
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
1219{
1220 struct batadv_bla_backbone_gw *backbone_gw;
1221 struct hlist_node *node_tmp;
1222 struct hlist_head *head;
1223 struct batadv_hashtable *hash;
1224 spinlock_t *list_lock;
1225 int i;
1226
1227 hash = bat_priv->bla.backbone_hash;
1228 if (!hash)
1229 return;
1230
1231 for (i = 0; i < hash->size; i++) {
1232 head = &hash->table[i];
1233 list_lock = &hash->list_locks[i];
1234
1235 spin_lock_bh(list_lock);
1236 hlist_for_each_entry_safe(backbone_gw, node_tmp,
1237 head, hash_entry) {
1238 if (now)
1239 goto purge_now;
1240 if (!batadv_has_timed_out(backbone_gw->lasttime,
1241 BATADV_BLA_BACKBONE_TIMEOUT))
1242 continue;
1243
1244 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1245 "%s(): backbone gw %pM timed out\n",
1246 __func__, backbone_gw->orig);
1247
1248purge_now:
1249
1250 if (atomic_read(&backbone_gw->request_sent))
1251 atomic_dec(&bat_priv->bla.num_requests);
1252
1253 batadv_bla_del_backbone_claims(backbone_gw);
1254
1255 hlist_del_rcu(&backbone_gw->hash_entry);
1256 batadv_backbone_gw_put(backbone_gw);
1257 }
1258 spin_unlock_bh(list_lock);
1259 }
1260}
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1272 struct batadv_hard_iface *primary_if,
1273 int now)
1274{
1275 struct batadv_bla_backbone_gw *backbone_gw;
1276 struct batadv_bla_claim *claim;
1277 struct hlist_head *head;
1278 struct batadv_hashtable *hash;
1279 int i;
1280
1281 hash = bat_priv->bla.claim_hash;
1282 if (!hash)
1283 return;
1284
1285 for (i = 0; i < hash->size; i++) {
1286 head = &hash->table[i];
1287
1288 rcu_read_lock();
1289 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1290 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1291 if (now)
1292 goto purge_now;
1293
1294 if (!batadv_compare_eth(backbone_gw->orig,
1295 primary_if->net_dev->dev_addr))
1296 goto skip;
1297
1298 if (!batadv_has_timed_out(claim->lasttime,
1299 BATADV_BLA_CLAIM_TIMEOUT))
1300 goto skip;
1301
1302 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1303 "%s(): timed out.\n", __func__);
1304
1305purge_now:
1306 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1307 "%s(): %pM, vid %d\n", __func__,
1308 claim->addr, claim->vid);
1309
1310 batadv_handle_unclaim(bat_priv, primary_if,
1311 backbone_gw->orig,
1312 claim->addr, claim->vid);
1313skip:
1314 batadv_backbone_gw_put(backbone_gw);
1315 }
1316 rcu_read_unlock();
1317 }
1318}
1319
1320
1321
1322
1323
1324
1325
1326
1327void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1328 struct batadv_hard_iface *primary_if,
1329 struct batadv_hard_iface *oldif)
1330{
1331 struct batadv_bla_backbone_gw *backbone_gw;
1332 struct hlist_head *head;
1333 struct batadv_hashtable *hash;
1334 __be16 group;
1335 int i;
1336
1337
1338 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1339 bat_priv->bla.claim_dest.group = group;
1340
1341
1342 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1343 oldif = NULL;
1344
1345 if (!oldif) {
1346 batadv_bla_purge_claims(bat_priv, NULL, 1);
1347 batadv_bla_purge_backbone_gw(bat_priv, 1);
1348 return;
1349 }
1350
1351 hash = bat_priv->bla.backbone_hash;
1352 if (!hash)
1353 return;
1354
1355 for (i = 0; i < hash->size; i++) {
1356 head = &hash->table[i];
1357
1358 rcu_read_lock();
1359 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1360
1361 if (!batadv_compare_eth(backbone_gw->orig,
1362 oldif->net_dev->dev_addr))
1363 continue;
1364
1365 ether_addr_copy(backbone_gw->orig,
1366 primary_if->net_dev->dev_addr);
1367
1368
1369
1370 batadv_bla_send_announce(bat_priv, backbone_gw);
1371 }
1372 rcu_read_unlock();
1373 }
1374}
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386static void
1387batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
1388 struct batadv_bla_backbone_gw *backbone_gw)
1389{
1390 batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
1391 backbone_gw->vid);
1392 batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
1393 backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
1394}
1395
1396
1397
1398
1399
1400void batadv_bla_status_update(struct net_device *net_dev)
1401{
1402 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1403 struct batadv_hard_iface *primary_if;
1404
1405 primary_if = batadv_primary_if_get_selected(bat_priv);
1406 if (!primary_if)
1407 return;
1408
1409
1410
1411
1412 batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
1413 batadv_hardif_put(primary_if);
1414}
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424static void batadv_bla_periodic_work(struct work_struct *work)
1425{
1426 struct delayed_work *delayed_work;
1427 struct batadv_priv *bat_priv;
1428 struct batadv_priv_bla *priv_bla;
1429 struct hlist_head *head;
1430 struct batadv_bla_backbone_gw *backbone_gw;
1431 struct batadv_hashtable *hash;
1432 struct batadv_hard_iface *primary_if;
1433 bool send_loopdetect = false;
1434 int i;
1435
1436 delayed_work = to_delayed_work(work);
1437 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1438 bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1439 primary_if = batadv_primary_if_get_selected(bat_priv);
1440 if (!primary_if)
1441 goto out;
1442
1443 batadv_bla_purge_claims(bat_priv, primary_if, 0);
1444 batadv_bla_purge_backbone_gw(bat_priv, 0);
1445
1446 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1447 goto out;
1448
1449 if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
1450
1451
1452
1453
1454 eth_random_addr(bat_priv->bla.loopdetect_addr);
1455 bat_priv->bla.loopdetect_addr[0] = 0xba;
1456 bat_priv->bla.loopdetect_addr[1] = 0xbe;
1457 bat_priv->bla.loopdetect_lasttime = jiffies;
1458 atomic_set(&bat_priv->bla.loopdetect_next,
1459 BATADV_BLA_LOOPDETECT_PERIODS);
1460
1461
1462 send_loopdetect = true;
1463 }
1464
1465 hash = bat_priv->bla.backbone_hash;
1466 if (!hash)
1467 goto out;
1468
1469 for (i = 0; i < hash->size; i++) {
1470 head = &hash->table[i];
1471
1472 rcu_read_lock();
1473 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1474 if (!batadv_compare_eth(backbone_gw->orig,
1475 primary_if->net_dev->dev_addr))
1476 continue;
1477
1478 backbone_gw->lasttime = jiffies;
1479
1480 batadv_bla_send_announce(bat_priv, backbone_gw);
1481 if (send_loopdetect)
1482 batadv_bla_send_loopdetect(bat_priv,
1483 backbone_gw);
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494 if (atomic_read(&backbone_gw->request_sent) == 0)
1495 continue;
1496
1497 if (!atomic_dec_and_test(&backbone_gw->wait_periods))
1498 continue;
1499
1500 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
1501 atomic_set(&backbone_gw->request_sent, 0);
1502 }
1503 rcu_read_unlock();
1504 }
1505out:
1506 batadv_hardif_put(primary_if);
1507
1508 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1509 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1510}
1511
1512
1513
1514
1515
1516
1517static struct lock_class_key batadv_claim_hash_lock_class_key;
1518static struct lock_class_key batadv_backbone_hash_lock_class_key;
1519
1520
1521
1522
1523
1524
1525
1526int batadv_bla_init(struct batadv_priv *bat_priv)
1527{
1528 int i;
1529 u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1530 struct batadv_hard_iface *primary_if;
1531 u16 crc;
1532 unsigned long entrytime;
1533
1534 spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1535
1536 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1537
1538
1539 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1540 bat_priv->bla.claim_dest.type = 0;
1541 primary_if = batadv_primary_if_get_selected(bat_priv);
1542 if (primary_if) {
1543 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1544 bat_priv->bla.claim_dest.group = htons(crc);
1545 batadv_hardif_put(primary_if);
1546 } else {
1547 bat_priv->bla.claim_dest.group = 0;
1548 }
1549
1550
1551 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1552 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1553 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1554 bat_priv->bla.bcast_duplist_curr = 0;
1555
1556 atomic_set(&bat_priv->bla.loopdetect_next,
1557 BATADV_BLA_LOOPDETECT_PERIODS);
1558
1559 if (bat_priv->bla.claim_hash)
1560 return 0;
1561
1562 bat_priv->bla.claim_hash = batadv_hash_new(128);
1563 if (!bat_priv->bla.claim_hash)
1564 return -ENOMEM;
1565
1566 bat_priv->bla.backbone_hash = batadv_hash_new(32);
1567 if (!bat_priv->bla.backbone_hash) {
1568 batadv_hash_destroy(bat_priv->bla.claim_hash);
1569 return -ENOMEM;
1570 }
1571
1572 batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1573 &batadv_claim_hash_lock_class_key);
1574 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1575 &batadv_backbone_hash_lock_class_key);
1576
1577 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1578
1579 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1580
1581 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1582 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1583 return 0;
1584}
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv,
1606 struct sk_buff *skb, u8 *payload_ptr,
1607 const u8 *orig)
1608{
1609 struct batadv_bcast_duplist_entry *entry;
1610 bool ret = false;
1611 int i, curr;
1612 __be32 crc;
1613
1614
1615 crc = batadv_skb_crc32(skb, payload_ptr);
1616
1617 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1618
1619 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1620 curr = (bat_priv->bla.bcast_duplist_curr + i);
1621 curr %= BATADV_DUPLIST_SIZE;
1622 entry = &bat_priv->bla.bcast_duplist[curr];
1623
1624
1625
1626
1627 if (batadv_has_timed_out(entry->entrytime,
1628 BATADV_DUPLIST_TIMEOUT))
1629 break;
1630
1631 if (entry->crc != crc)
1632 continue;
1633
1634
1635 if (orig && !is_zero_ether_addr(orig) &&
1636 !is_zero_ether_addr(entry->orig)) {
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646 if (batadv_compare_eth(entry->orig, orig))
1647 continue;
1648 }
1649
1650
1651
1652
1653 ret = true;
1654 goto out;
1655 }
1656
1657
1658
1659 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1660 curr %= BATADV_DUPLIST_SIZE;
1661 entry = &bat_priv->bla.bcast_duplist[curr];
1662 entry->crc = crc;
1663 entry->entrytime = jiffies;
1664
1665
1666 if (orig)
1667 ether_addr_copy(entry->orig, orig);
1668
1669 else
1670 eth_zero_addr(entry->orig);
1671
1672 bat_priv->bla.bcast_duplist_curr = curr;
1673
1674out:
1675 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1676
1677 return ret;
1678}
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv,
1693 struct sk_buff *skb)
1694{
1695 return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL);
1696}
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1710 struct sk_buff *skb)
1711{
1712 struct batadv_bcast_packet *bcast_packet;
1713 u8 *payload_ptr;
1714
1715 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1716 payload_ptr = (u8 *)(bcast_packet + 1);
1717
1718 return batadv_bla_check_duplist(bat_priv, skb, payload_ptr,
1719 bcast_packet->orig);
1720}
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
1732 unsigned short vid)
1733{
1734 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1735 struct hlist_head *head;
1736 struct batadv_bla_backbone_gw *backbone_gw;
1737 int i;
1738
1739 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1740 return false;
1741
1742 if (!hash)
1743 return false;
1744
1745 for (i = 0; i < hash->size; i++) {
1746 head = &hash->table[i];
1747
1748 rcu_read_lock();
1749 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1750 if (batadv_compare_eth(backbone_gw->orig, orig) &&
1751 backbone_gw->vid == vid) {
1752 rcu_read_unlock();
1753 return true;
1754 }
1755 }
1756 rcu_read_unlock();
1757 }
1758
1759 return false;
1760}
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
1772 struct batadv_orig_node *orig_node, int hdr_size)
1773{
1774 struct batadv_bla_backbone_gw *backbone_gw;
1775 unsigned short vid;
1776
1777 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1778 return false;
1779
1780
1781 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1782 return false;
1783
1784 vid = batadv_get_vid(skb, hdr_size);
1785
1786
1787 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1788 orig_node->orig, vid);
1789 if (!backbone_gw)
1790 return false;
1791
1792 batadv_backbone_gw_put(backbone_gw);
1793 return true;
1794}
1795
1796
1797
1798
1799
1800
1801
1802void batadv_bla_free(struct batadv_priv *bat_priv)
1803{
1804 struct batadv_hard_iface *primary_if;
1805
1806 cancel_delayed_work_sync(&bat_priv->bla.work);
1807 primary_if = batadv_primary_if_get_selected(bat_priv);
1808
1809 if (bat_priv->bla.claim_hash) {
1810 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1811 batadv_hash_destroy(bat_priv->bla.claim_hash);
1812 bat_priv->bla.claim_hash = NULL;
1813 }
1814 if (bat_priv->bla.backbone_hash) {
1815 batadv_bla_purge_backbone_gw(bat_priv, 1);
1816 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1817 bat_priv->bla.backbone_hash = NULL;
1818 }
1819 batadv_hardif_put(primary_if);
1820}
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835static bool
1836batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1837 struct batadv_hard_iface *primary_if,
1838 unsigned short vid)
1839{
1840 struct batadv_bla_backbone_gw *backbone_gw;
1841 struct ethhdr *ethhdr;
1842 bool ret;
1843
1844 ethhdr = eth_hdr(skb);
1845
1846
1847
1848
1849 if (!batadv_compare_eth(ethhdr->h_source,
1850 bat_priv->bla.loopdetect_addr))
1851 return false;
1852
1853
1854
1855
1856 if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
1857 BATADV_BLA_LOOPDETECT_TIMEOUT))
1858 return true;
1859
1860 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
1861 primary_if->net_dev->dev_addr,
1862 vid, true);
1863 if (unlikely(!backbone_gw))
1864 return true;
1865
1866 ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
1867
1868
1869
1870
1871 if (!ret)
1872 batadv_backbone_gw_put(backbone_gw);
1873
1874 return true;
1875}
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1894 unsigned short vid, int packet_type)
1895{
1896 struct batadv_bla_backbone_gw *backbone_gw;
1897 struct ethhdr *ethhdr;
1898 struct batadv_bla_claim search_claim, *claim = NULL;
1899 struct batadv_hard_iface *primary_if;
1900 bool own_claim;
1901 bool ret;
1902
1903 ethhdr = eth_hdr(skb);
1904
1905 primary_if = batadv_primary_if_get_selected(bat_priv);
1906 if (!primary_if)
1907 goto handled;
1908
1909 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1910 goto allow;
1911
1912 if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
1913 goto handled;
1914
1915 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1916
1917 if (is_multicast_ether_addr(ethhdr->h_dest))
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931 if (packet_type == BATADV_BCAST ||
1932 packet_type == BATADV_UNICAST)
1933 goto handled;
1934
1935
1936
1937
1938 if (is_multicast_ether_addr(ethhdr->h_dest) &&
1939 packet_type == BATADV_UNICAST &&
1940 batadv_bla_check_ucast_duplist(bat_priv, skb))
1941 goto handled;
1942
1943 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1944 search_claim.vid = vid;
1945 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1946
1947 if (!claim) {
1948
1949
1950
1951
1952 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1953 "%s(): Unclaimed MAC %pM found. Claim it. Local: %s\n",
1954 __func__, ethhdr->h_source,
1955 batadv_is_my_client(bat_priv,
1956 ethhdr->h_source, vid) ?
1957 "yes" : "no");
1958 batadv_handle_claim(bat_priv, primary_if,
1959 primary_if->net_dev->dev_addr,
1960 ethhdr->h_source, vid);
1961 goto allow;
1962 }
1963
1964
1965 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1966 own_claim = batadv_compare_eth(backbone_gw->orig,
1967 primary_if->net_dev->dev_addr);
1968 batadv_backbone_gw_put(backbone_gw);
1969
1970 if (own_claim) {
1971
1972 claim->lasttime = jiffies;
1973 goto allow;
1974 }
1975
1976
1977 if (is_multicast_ether_addr(ethhdr->h_dest) &&
1978 (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) {
1979
1980
1981
1982
1983
1984
1985 goto handled;
1986 } else {
1987
1988
1989
1990
1991 batadv_handle_claim(bat_priv, primary_if,
1992 primary_if->net_dev->dev_addr,
1993 ethhdr->h_source, vid);
1994 goto allow;
1995 }
1996allow:
1997 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1998 ret = false;
1999 goto out;
2000
2001handled:
2002 kfree_skb(skb);
2003 ret = true;
2004
2005out:
2006 batadv_hardif_put(primary_if);
2007 batadv_claim_put(claim);
2008 return ret;
2009}
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
2029 unsigned short vid)
2030{
2031 struct ethhdr *ethhdr;
2032 struct batadv_bla_claim search_claim, *claim = NULL;
2033 struct batadv_bla_backbone_gw *backbone_gw;
2034 struct batadv_hard_iface *primary_if;
2035 bool client_roamed;
2036 bool ret = false;
2037
2038 primary_if = batadv_primary_if_get_selected(bat_priv);
2039 if (!primary_if)
2040 goto out;
2041
2042 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
2043 goto allow;
2044
2045 if (batadv_bla_process_claim(bat_priv, primary_if, skb))
2046 goto handled;
2047
2048 ethhdr = eth_hdr(skb);
2049
2050 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
2051
2052 if (is_multicast_ether_addr(ethhdr->h_dest))
2053 goto handled;
2054
2055 ether_addr_copy(search_claim.addr, ethhdr->h_source);
2056 search_claim.vid = vid;
2057
2058 claim = batadv_claim_hash_find(bat_priv, &search_claim);
2059
2060
2061 if (!claim)
2062 goto allow;
2063
2064
2065 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
2066 client_roamed = batadv_compare_eth(backbone_gw->orig,
2067 primary_if->net_dev->dev_addr);
2068 batadv_backbone_gw_put(backbone_gw);
2069
2070 if (client_roamed) {
2071
2072
2073
2074 if (batadv_has_timed_out(claim->lasttime, 100)) {
2075
2076
2077
2078
2079 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Roaming client %pM detected. Unclaim it.\n",
2080 __func__, ethhdr->h_source);
2081 batadv_handle_unclaim(bat_priv, primary_if,
2082 primary_if->net_dev->dev_addr,
2083 ethhdr->h_source, vid);
2084 goto allow;
2085 } else {
2086 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Race for claim %pM detected. Drop packet.\n",
2087 __func__, ethhdr->h_source);
2088 goto handled;
2089 }
2090 }
2091
2092
2093 if (is_multicast_ether_addr(ethhdr->h_dest)) {
2094
2095
2096
2097 goto handled;
2098 } else {
2099
2100
2101
2102 goto allow;
2103 }
2104allow:
2105 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
2106 ret = false;
2107 goto out;
2108handled:
2109 ret = true;
2110out:
2111 batadv_hardif_put(primary_if);
2112 batadv_claim_put(claim);
2113 return ret;
2114}
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127static int
2128batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
2129 struct netlink_callback *cb,
2130 struct batadv_hard_iface *primary_if,
2131 struct batadv_bla_claim *claim)
2132{
2133 u8 *primary_addr = primary_if->net_dev->dev_addr;
2134 u16 backbone_crc;
2135 bool is_own;
2136 void *hdr;
2137 int ret = -EINVAL;
2138
2139 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
2140 &batadv_netlink_family, NLM_F_MULTI,
2141 BATADV_CMD_GET_BLA_CLAIM);
2142 if (!hdr) {
2143 ret = -ENOBUFS;
2144 goto out;
2145 }
2146
2147 genl_dump_check_consistent(cb, hdr);
2148
2149 is_own = batadv_compare_eth(claim->backbone_gw->orig,
2150 primary_addr);
2151
2152 spin_lock_bh(&claim->backbone_gw->crc_lock);
2153 backbone_crc = claim->backbone_gw->crc;
2154 spin_unlock_bh(&claim->backbone_gw->crc_lock);
2155
2156 if (is_own)
2157 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
2158 genlmsg_cancel(msg, hdr);
2159 goto out;
2160 }
2161
2162 if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
2163 nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
2164 nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
2165 claim->backbone_gw->orig) ||
2166 nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
2167 backbone_crc)) {
2168 genlmsg_cancel(msg, hdr);
2169 goto out;
2170 }
2171
2172 genlmsg_end(msg, hdr);
2173 ret = 0;
2174
2175out:
2176 return ret;
2177}
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192static int
2193batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid,
2194 struct netlink_callback *cb,
2195 struct batadv_hard_iface *primary_if,
2196 struct batadv_hashtable *hash, unsigned int bucket,
2197 int *idx_skip)
2198{
2199 struct batadv_bla_claim *claim;
2200 int idx = 0;
2201 int ret = 0;
2202
2203 spin_lock_bh(&hash->list_locks[bucket]);
2204 cb->seq = atomic_read(&hash->generation) << 1 | 1;
2205
2206 hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) {
2207 if (idx++ < *idx_skip)
2208 continue;
2209
2210 ret = batadv_bla_claim_dump_entry(msg, portid, cb,
2211 primary_if, claim);
2212 if (ret) {
2213 *idx_skip = idx - 1;
2214 goto unlock;
2215 }
2216 }
2217
2218 *idx_skip = 0;
2219unlock:
2220 spin_unlock_bh(&hash->list_locks[bucket]);
2221 return ret;
2222}
2223
2224
2225
2226
2227
2228
2229
2230
2231int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
2232{
2233 struct batadv_hard_iface *primary_if = NULL;
2234 int portid = NETLINK_CB(cb->skb).portid;
2235 struct net *net = sock_net(cb->skb->sk);
2236 struct net_device *soft_iface;
2237 struct batadv_hashtable *hash;
2238 struct batadv_priv *bat_priv;
2239 int bucket = cb->args[0];
2240 int idx = cb->args[1];
2241 int ifindex;
2242 int ret = 0;
2243
2244 ifindex = batadv_netlink_get_ifindex(cb->nlh,
2245 BATADV_ATTR_MESH_IFINDEX);
2246 if (!ifindex)
2247 return -EINVAL;
2248
2249 soft_iface = dev_get_by_index(net, ifindex);
2250 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2251 ret = -ENODEV;
2252 goto out;
2253 }
2254
2255 bat_priv = netdev_priv(soft_iface);
2256 hash = bat_priv->bla.claim_hash;
2257
2258 primary_if = batadv_primary_if_get_selected(bat_priv);
2259 if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
2260 ret = -ENOENT;
2261 goto out;
2262 }
2263
2264 while (bucket < hash->size) {
2265 if (batadv_bla_claim_dump_bucket(msg, portid, cb, primary_if,
2266 hash, bucket, &idx))
2267 break;
2268 bucket++;
2269 }
2270
2271 cb->args[0] = bucket;
2272 cb->args[1] = idx;
2273
2274 ret = msg->len;
2275
2276out:
2277 batadv_hardif_put(primary_if);
2278
2279 dev_put(soft_iface);
2280
2281 return ret;
2282}
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295static int
2296batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid,
2297 struct netlink_callback *cb,
2298 struct batadv_hard_iface *primary_if,
2299 struct batadv_bla_backbone_gw *backbone_gw)
2300{
2301 u8 *primary_addr = primary_if->net_dev->dev_addr;
2302 u16 backbone_crc;
2303 bool is_own;
2304 int msecs;
2305 void *hdr;
2306 int ret = -EINVAL;
2307
2308 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
2309 &batadv_netlink_family, NLM_F_MULTI,
2310 BATADV_CMD_GET_BLA_BACKBONE);
2311 if (!hdr) {
2312 ret = -ENOBUFS;
2313 goto out;
2314 }
2315
2316 genl_dump_check_consistent(cb, hdr);
2317
2318 is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
2319
2320 spin_lock_bh(&backbone_gw->crc_lock);
2321 backbone_crc = backbone_gw->crc;
2322 spin_unlock_bh(&backbone_gw->crc_lock);
2323
2324 msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime);
2325
2326 if (is_own)
2327 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
2328 genlmsg_cancel(msg, hdr);
2329 goto out;
2330 }
2331
2332 if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
2333 backbone_gw->orig) ||
2334 nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) ||
2335 nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
2336 backbone_crc) ||
2337 nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
2338 genlmsg_cancel(msg, hdr);
2339 goto out;
2340 }
2341
2342 genlmsg_end(msg, hdr);
2343 ret = 0;
2344
2345out:
2346 return ret;
2347}
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362static int
2363batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid,
2364 struct netlink_callback *cb,
2365 struct batadv_hard_iface *primary_if,
2366 struct batadv_hashtable *hash,
2367 unsigned int bucket, int *idx_skip)
2368{
2369 struct batadv_bla_backbone_gw *backbone_gw;
2370 int idx = 0;
2371 int ret = 0;
2372
2373 spin_lock_bh(&hash->list_locks[bucket]);
2374 cb->seq = atomic_read(&hash->generation) << 1 | 1;
2375
2376 hlist_for_each_entry(backbone_gw, &hash->table[bucket], hash_entry) {
2377 if (idx++ < *idx_skip)
2378 continue;
2379
2380 ret = batadv_bla_backbone_dump_entry(msg, portid, cb,
2381 primary_if, backbone_gw);
2382 if (ret) {
2383 *idx_skip = idx - 1;
2384 goto unlock;
2385 }
2386 }
2387
2388 *idx_skip = 0;
2389unlock:
2390 spin_unlock_bh(&hash->list_locks[bucket]);
2391 return ret;
2392}
2393
2394
2395
2396
2397
2398
2399
2400
2401int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
2402{
2403 struct batadv_hard_iface *primary_if = NULL;
2404 int portid = NETLINK_CB(cb->skb).portid;
2405 struct net *net = sock_net(cb->skb->sk);
2406 struct net_device *soft_iface;
2407 struct batadv_hashtable *hash;
2408 struct batadv_priv *bat_priv;
2409 int bucket = cb->args[0];
2410 int idx = cb->args[1];
2411 int ifindex;
2412 int ret = 0;
2413
2414 ifindex = batadv_netlink_get_ifindex(cb->nlh,
2415 BATADV_ATTR_MESH_IFINDEX);
2416 if (!ifindex)
2417 return -EINVAL;
2418
2419 soft_iface = dev_get_by_index(net, ifindex);
2420 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2421 ret = -ENODEV;
2422 goto out;
2423 }
2424
2425 bat_priv = netdev_priv(soft_iface);
2426 hash = bat_priv->bla.backbone_hash;
2427
2428 primary_if = batadv_primary_if_get_selected(bat_priv);
2429 if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
2430 ret = -ENOENT;
2431 goto out;
2432 }
2433
2434 while (bucket < hash->size) {
2435 if (batadv_bla_backbone_dump_bucket(msg, portid, cb, primary_if,
2436 hash, bucket, &idx))
2437 break;
2438 bucket++;
2439 }
2440
2441 cb->args[0] = bucket;
2442 cb->args[1] = idx;
2443
2444 ret = msg->len;
2445
2446out:
2447 batadv_hardif_put(primary_if);
2448
2449 dev_put(soft_iface);
2450
2451 return ret;
2452}
2453
2454#ifdef CONFIG_BATMAN_ADV_DAT
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
2468 u8 *addr, unsigned short vid)
2469{
2470 struct batadv_bla_claim search_claim;
2471 struct batadv_bla_claim *claim = NULL;
2472 struct batadv_hard_iface *primary_if = NULL;
2473 bool ret = true;
2474
2475 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
2476 return ret;
2477
2478 primary_if = batadv_primary_if_get_selected(bat_priv);
2479 if (!primary_if)
2480 return ret;
2481
2482
2483 ether_addr_copy(search_claim.addr, addr);
2484 search_claim.vid = vid;
2485
2486 claim = batadv_claim_hash_find(bat_priv, &search_claim);
2487
2488
2489
2490
2491 if (claim) {
2492 if (!batadv_compare_eth(claim->backbone_gw->orig,
2493 primary_if->net_dev->dev_addr))
2494 ret = false;
2495 batadv_claim_put(claim);
2496 }
2497
2498 batadv_hardif_put(primary_if);
2499 return ret;
2500}
2501#endif
2502