1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "bridge_loop_avoidance.h"
19#include "main.h"
20
21#include <linux/atomic.h>
22#include <linux/byteorder/generic.h>
23#include <linux/compiler.h>
24#include <linux/crc16.h>
25#include <linux/errno.h>
26#include <linux/etherdevice.h>
27#include <linux/fs.h>
28#include <linux/if_arp.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
31#include <linux/jhash.h>
32#include <linux/jiffies.h>
33#include <linux/kernel.h>
34#include <linux/kref.h>
35#include <linux/list.h>
36#include <linux/lockdep.h>
37#include <linux/netdevice.h>
38#include <linux/rculist.h>
39#include <linux/rcupdate.h>
40#include <linux/seq_file.h>
41#include <linux/skbuff.h>
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44#include <linux/stddef.h>
45#include <linux/string.h>
46#include <linux/workqueue.h>
47#include <net/arp.h>
48
49#include "hard-interface.h"
50#include "hash.h"
51#include "originator.h"
52#include "packet.h"
53#include "translation-table.h"
54
55static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
56
57static void batadv_bla_periodic_work(struct work_struct *work);
58static void
59batadv_bla_send_announce(struct batadv_priv *bat_priv,
60 struct batadv_bla_backbone_gw *backbone_gw);
61
62
63
64
65
66
67
68
69static inline u32 batadv_choose_claim(const void *data, u32 size)
70{
71 struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
72 u32 hash = 0;
73
74 hash = jhash(&claim->addr, sizeof(claim->addr), hash);
75 hash = jhash(&claim->vid, sizeof(claim->vid), hash);
76
77 return hash % size;
78}
79
80
81
82
83
84
85
86
87static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
88{
89 const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
90 u32 hash = 0;
91
92 hash = jhash(&claim->addr, sizeof(claim->addr), hash);
93 hash = jhash(&claim->vid, sizeof(claim->vid), hash);
94
95 return hash % size;
96}
97
98
99
100
101
102
103
104
105static int batadv_compare_backbone_gw(const struct hlist_node *node,
106 const void *data2)
107{
108 const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
109 hash_entry);
110 const struct batadv_bla_backbone_gw *gw1 = data1;
111 const struct batadv_bla_backbone_gw *gw2 = data2;
112
113 if (!batadv_compare_eth(gw1->orig, gw2->orig))
114 return 0;
115
116 if (gw1->vid != gw2->vid)
117 return 0;
118
119 return 1;
120}
121
122
123
124
125
126
127
128
129static int batadv_compare_claim(const struct hlist_node *node,
130 const void *data2)
131{
132 const void *data1 = container_of(node, struct batadv_bla_claim,
133 hash_entry);
134 const struct batadv_bla_claim *cl1 = data1;
135 const struct batadv_bla_claim *cl2 = data2;
136
137 if (!batadv_compare_eth(cl1->addr, cl2->addr))
138 return 0;
139
140 if (cl1->vid != cl2->vid)
141 return 0;
142
143 return 1;
144}
145
146
147
148
149
150
151static void batadv_backbone_gw_release(struct kref *ref)
152{
153 struct batadv_bla_backbone_gw *backbone_gw;
154
155 backbone_gw = container_of(ref, struct batadv_bla_backbone_gw,
156 refcount);
157
158 kfree_rcu(backbone_gw, rcu);
159}
160
161
162
163
164
165
166static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
167{
168 kref_put(&backbone_gw->refcount, batadv_backbone_gw_release);
169}
170
171
172
173
174
175
176static void batadv_claim_release(struct kref *ref)
177{
178 struct batadv_bla_claim *claim;
179
180 claim = container_of(ref, struct batadv_bla_claim, refcount);
181
182 batadv_backbone_gw_put(claim->backbone_gw);
183 kfree_rcu(claim, rcu);
184}
185
186
187
188
189
190
191static void batadv_claim_put(struct batadv_bla_claim *claim)
192{
193 kref_put(&claim->refcount, batadv_claim_release);
194}
195
196
197
198
199
200
201
202
203static struct batadv_bla_claim
204*batadv_claim_hash_find(struct batadv_priv *bat_priv,
205 struct batadv_bla_claim *data)
206{
207 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
208 struct hlist_head *head;
209 struct batadv_bla_claim *claim;
210 struct batadv_bla_claim *claim_tmp = NULL;
211 int index;
212
213 if (!hash)
214 return NULL;
215
216 index = batadv_choose_claim(data, hash->size);
217 head = &hash->table[index];
218
219 rcu_read_lock();
220 hlist_for_each_entry_rcu(claim, head, hash_entry) {
221 if (!batadv_compare_claim(&claim->hash_entry, data))
222 continue;
223
224 if (!kref_get_unless_zero(&claim->refcount))
225 continue;
226
227 claim_tmp = claim;
228 break;
229 }
230 rcu_read_unlock();
231
232 return claim_tmp;
233}
234
235
236
237
238
239
240
241
242
243static struct batadv_bla_backbone_gw *
244batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
245 unsigned short vid)
246{
247 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
248 struct hlist_head *head;
249 struct batadv_bla_backbone_gw search_entry, *backbone_gw;
250 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
251 int index;
252
253 if (!hash)
254 return NULL;
255
256 ether_addr_copy(search_entry.orig, addr);
257 search_entry.vid = vid;
258
259 index = batadv_choose_backbone_gw(&search_entry, hash->size);
260 head = &hash->table[index];
261
262 rcu_read_lock();
263 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
264 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
265 &search_entry))
266 continue;
267
268 if (!kref_get_unless_zero(&backbone_gw->refcount))
269 continue;
270
271 backbone_gw_tmp = backbone_gw;
272 break;
273 }
274 rcu_read_unlock();
275
276 return backbone_gw_tmp;
277}
278
279
280
281
282
283static void
284batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
285{
286 struct batadv_hashtable *hash;
287 struct hlist_node *node_tmp;
288 struct hlist_head *head;
289 struct batadv_bla_claim *claim;
290 int i;
291 spinlock_t *list_lock;
292
293 hash = backbone_gw->bat_priv->bla.claim_hash;
294 if (!hash)
295 return;
296
297 for (i = 0; i < hash->size; i++) {
298 head = &hash->table[i];
299 list_lock = &hash->list_locks[i];
300
301 spin_lock_bh(list_lock);
302 hlist_for_each_entry_safe(claim, node_tmp,
303 head, hash_entry) {
304 if (claim->backbone_gw != backbone_gw)
305 continue;
306
307 batadv_claim_put(claim);
308 hlist_del_rcu(&claim->hash_entry);
309 }
310 spin_unlock_bh(list_lock);
311 }
312
313
314 spin_lock_bh(&backbone_gw->crc_lock);
315 backbone_gw->crc = BATADV_BLA_CRC_INIT;
316 spin_unlock_bh(&backbone_gw->crc_lock);
317}
318
319
320
321
322
323
324
325
326static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
327 unsigned short vid, int claimtype)
328{
329 struct sk_buff *skb;
330 struct ethhdr *ethhdr;
331 struct batadv_hard_iface *primary_if;
332 struct net_device *soft_iface;
333 u8 *hw_src;
334 struct batadv_bla_claim_dst local_claim_dest;
335 __be32 zeroip = 0;
336
337 primary_if = batadv_primary_if_get_selected(bat_priv);
338 if (!primary_if)
339 return;
340
341 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
342 sizeof(local_claim_dest));
343 local_claim_dest.type = claimtype;
344
345 soft_iface = primary_if->soft_iface;
346
347 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
348
349 zeroip,
350 primary_if->soft_iface,
351
352 zeroip,
353
354 NULL,
355
356 primary_if->net_dev->dev_addr,
357
358
359
360
361 (u8 *)&local_claim_dest);
362
363 if (!skb)
364 goto out;
365
366 ethhdr = (struct ethhdr *)skb->data;
367 hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
368
369
370 switch (claimtype) {
371 case BATADV_CLAIM_TYPE_CLAIM:
372
373
374
375 ether_addr_copy(ethhdr->h_source, mac);
376 batadv_dbg(BATADV_DBG_BLA, bat_priv,
377 "bla_send_claim(): CLAIM %pM on vid %d\n", mac,
378 BATADV_PRINT_VID(vid));
379 break;
380 case BATADV_CLAIM_TYPE_UNCLAIM:
381
382
383
384 ether_addr_copy(hw_src, mac);
385 batadv_dbg(BATADV_DBG_BLA, bat_priv,
386 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
387 BATADV_PRINT_VID(vid));
388 break;
389 case BATADV_CLAIM_TYPE_ANNOUNCE:
390
391
392
393 ether_addr_copy(hw_src, mac);
394 batadv_dbg(BATADV_DBG_BLA, bat_priv,
395 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
396 ethhdr->h_source, BATADV_PRINT_VID(vid));
397 break;
398 case BATADV_CLAIM_TYPE_REQUEST:
399
400
401
402
403 ether_addr_copy(hw_src, mac);
404 ether_addr_copy(ethhdr->h_dest, mac);
405 batadv_dbg(BATADV_DBG_BLA, bat_priv,
406 "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n",
407 ethhdr->h_source, ethhdr->h_dest,
408 BATADV_PRINT_VID(vid));
409 break;
410 }
411
412 if (vid & BATADV_VLAN_HAS_TAG)
413 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
414 vid & VLAN_VID_MASK);
415
416 skb_reset_mac_header(skb);
417 skb->protocol = eth_type_trans(skb, soft_iface);
418 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
419 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
420 skb->len + ETH_HLEN);
421 soft_iface->last_rx = jiffies;
422
423 netif_rx(skb);
424out:
425 if (primary_if)
426 batadv_hardif_put(primary_if);
427}
428
429
430
431
432
433
434
435
436
437
438static struct batadv_bla_backbone_gw *
439batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
440 unsigned short vid, bool own_backbone)
441{
442 struct batadv_bla_backbone_gw *entry;
443 struct batadv_orig_node *orig_node;
444 int hash_added;
445
446 entry = batadv_backbone_hash_find(bat_priv, orig, vid);
447
448 if (entry)
449 return entry;
450
451 batadv_dbg(BATADV_DBG_BLA, bat_priv,
452 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
453 orig, BATADV_PRINT_VID(vid));
454
455 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
456 if (!entry)
457 return NULL;
458
459 entry->vid = vid;
460 entry->lasttime = jiffies;
461 entry->crc = BATADV_BLA_CRC_INIT;
462 entry->bat_priv = bat_priv;
463 spin_lock_init(&entry->crc_lock);
464 atomic_set(&entry->request_sent, 0);
465 atomic_set(&entry->wait_periods, 0);
466 ether_addr_copy(entry->orig, orig);
467
468
469 kref_init(&entry->refcount);
470 kref_get(&entry->refcount);
471
472 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
473 batadv_compare_backbone_gw,
474 batadv_choose_backbone_gw, entry,
475 &entry->hash_entry);
476
477 if (unlikely(hash_added != 0)) {
478
479 kfree(entry);
480 return NULL;
481 }
482
483
484 orig_node = batadv_orig_hash_find(bat_priv, orig);
485 if (orig_node) {
486 batadv_tt_global_del_orig(bat_priv, orig_node, vid,
487 "became a backbone gateway");
488 batadv_orig_node_put(orig_node);
489 }
490
491 if (own_backbone) {
492 batadv_bla_send_announce(bat_priv, entry);
493
494
495 atomic_inc(&entry->request_sent);
496 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
497 atomic_inc(&bat_priv->bla.num_requests);
498 }
499
500 return entry;
501}
502
503
504
505
506
507
508
509
510
511
512static void
513batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
514 struct batadv_hard_iface *primary_if,
515 unsigned short vid)
516{
517 struct batadv_bla_backbone_gw *backbone_gw;
518
519 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
520 primary_if->net_dev->dev_addr,
521 vid, true);
522 if (unlikely(!backbone_gw))
523 return;
524
525 backbone_gw->lasttime = jiffies;
526 batadv_backbone_gw_put(backbone_gw);
527}
528
529
530
531
532
533
534
535
536
537
538static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
539 struct batadv_hard_iface *primary_if,
540 unsigned short vid)
541{
542 struct hlist_head *head;
543 struct batadv_hashtable *hash;
544 struct batadv_bla_claim *claim;
545 struct batadv_bla_backbone_gw *backbone_gw;
546 int i;
547
548 batadv_dbg(BATADV_DBG_BLA, bat_priv,
549 "bla_answer_request(): received a claim request, send all of our own claims again\n");
550
551 backbone_gw = batadv_backbone_hash_find(bat_priv,
552 primary_if->net_dev->dev_addr,
553 vid);
554 if (!backbone_gw)
555 return;
556
557 hash = bat_priv->bla.claim_hash;
558 for (i = 0; i < hash->size; i++) {
559 head = &hash->table[i];
560
561 rcu_read_lock();
562 hlist_for_each_entry_rcu(claim, head, hash_entry) {
563
564 if (claim->backbone_gw != backbone_gw)
565 continue;
566
567 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
568 BATADV_CLAIM_TYPE_CLAIM);
569 }
570 rcu_read_unlock();
571 }
572
573
574 batadv_bla_send_announce(bat_priv, backbone_gw);
575 batadv_backbone_gw_put(backbone_gw);
576}
577
578
579
580
581
582
583
584
585
586static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
587{
588
589 batadv_bla_del_backbone_claims(backbone_gw);
590
591 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
592 "Sending REQUEST to %pM\n", backbone_gw->orig);
593
594
595 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
596 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
597
598
599 if (!atomic_read(&backbone_gw->request_sent)) {
600 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
601 atomic_set(&backbone_gw->request_sent, 1);
602 }
603}
604
605
606
607
608
609
610static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
611 struct batadv_bla_backbone_gw *backbone_gw)
612{
613 u8 mac[ETH_ALEN];
614 __be16 crc;
615
616 memcpy(mac, batadv_announce_mac, 4);
617 spin_lock_bh(&backbone_gw->crc_lock);
618 crc = htons(backbone_gw->crc);
619 spin_unlock_bh(&backbone_gw->crc_lock);
620 memcpy(&mac[4], &crc, 2);
621
622 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
623 BATADV_CLAIM_TYPE_ANNOUNCE);
624}
625
626
627
628
629
630
631
632
633static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
634 const u8 *mac, const unsigned short vid,
635 struct batadv_bla_backbone_gw *backbone_gw)
636{
637 struct batadv_bla_claim *claim;
638 struct batadv_bla_claim search_claim;
639 int hash_added;
640
641 ether_addr_copy(search_claim.addr, mac);
642 search_claim.vid = vid;
643 claim = batadv_claim_hash_find(bat_priv, &search_claim);
644
645
646 if (!claim) {
647 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
648 if (!claim)
649 return;
650
651 ether_addr_copy(claim->addr, mac);
652 claim->vid = vid;
653 claim->lasttime = jiffies;
654 claim->backbone_gw = backbone_gw;
655
656 kref_init(&claim->refcount);
657 kref_get(&claim->refcount);
658 batadv_dbg(BATADV_DBG_BLA, bat_priv,
659 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
660 mac, BATADV_PRINT_VID(vid));
661 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
662 batadv_compare_claim,
663 batadv_choose_claim, claim,
664 &claim->hash_entry);
665
666 if (unlikely(hash_added != 0)) {
667
668 kfree(claim);
669 return;
670 }
671 } else {
672 claim->lasttime = jiffies;
673 if (claim->backbone_gw == backbone_gw)
674
675 goto claim_free_ref;
676
677 batadv_dbg(BATADV_DBG_BLA, bat_priv,
678 "bla_add_claim(): changing ownership for %pM, vid %d\n",
679 mac, BATADV_PRINT_VID(vid));
680
681 spin_lock_bh(&claim->backbone_gw->crc_lock);
682 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
683 spin_unlock_bh(&claim->backbone_gw->crc_lock);
684 batadv_backbone_gw_put(claim->backbone_gw);
685 }
686
687 kref_get(&backbone_gw->refcount);
688 claim->backbone_gw = backbone_gw;
689
690 spin_lock_bh(&backbone_gw->crc_lock);
691 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
692 spin_unlock_bh(&backbone_gw->crc_lock);
693 backbone_gw->lasttime = jiffies;
694
695claim_free_ref:
696 batadv_claim_put(claim);
697}
698
699
700
701
702
703
704
705static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
706 const u8 *mac, const unsigned short vid)
707{
708 struct batadv_bla_claim search_claim, *claim;
709
710 ether_addr_copy(search_claim.addr, mac);
711 search_claim.vid = vid;
712 claim = batadv_claim_hash_find(bat_priv, &search_claim);
713 if (!claim)
714 return;
715
716 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
717 mac, BATADV_PRINT_VID(vid));
718
719 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
720 batadv_choose_claim, claim);
721 batadv_claim_put(claim);
722
723 spin_lock_bh(&claim->backbone_gw->crc_lock);
724 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
725 spin_unlock_bh(&claim->backbone_gw->crc_lock);
726
727
728 batadv_claim_put(claim);
729}
730
731
732
733
734
735
736
737
738
739
740static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
741 u8 *backbone_addr, unsigned short vid)
742{
743 struct batadv_bla_backbone_gw *backbone_gw;
744 u16 backbone_crc, crc;
745
746 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
747 return 0;
748
749 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
750 false);
751
752 if (unlikely(!backbone_gw))
753 return 1;
754
755
756 backbone_gw->lasttime = jiffies;
757 crc = ntohs(*((__be16 *)(&an_addr[4])));
758
759 batadv_dbg(BATADV_DBG_BLA, bat_priv,
760 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
761 BATADV_PRINT_VID(vid), backbone_gw->orig, crc);
762
763 spin_lock_bh(&backbone_gw->crc_lock);
764 backbone_crc = backbone_gw->crc;
765 spin_unlock_bh(&backbone_gw->crc_lock);
766
767 if (backbone_crc != crc) {
768 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
769 "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
770 backbone_gw->orig,
771 BATADV_PRINT_VID(backbone_gw->vid),
772 backbone_crc, crc);
773
774 batadv_bla_send_request(backbone_gw);
775 } else {
776
777
778
779 if (atomic_read(&backbone_gw->request_sent)) {
780 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
781 atomic_set(&backbone_gw->request_sent, 0);
782 }
783 }
784
785 batadv_backbone_gw_put(backbone_gw);
786 return 1;
787}
788
789
790
791
792
793
794
795
796
797
798
799static int batadv_handle_request(struct batadv_priv *bat_priv,
800 struct batadv_hard_iface *primary_if,
801 u8 *backbone_addr, struct ethhdr *ethhdr,
802 unsigned short vid)
803{
804
805 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
806 return 0;
807
808
809
810
811 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
812 return 1;
813
814 batadv_dbg(BATADV_DBG_BLA, bat_priv,
815 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
816 BATADV_PRINT_VID(vid), ethhdr->h_source);
817
818 batadv_bla_answer_request(bat_priv, primary_if, vid);
819 return 1;
820}
821
822
823
824
825
826
827
828
829
830
831
832static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
833 struct batadv_hard_iface *primary_if,
834 u8 *backbone_addr, u8 *claim_addr,
835 unsigned short vid)
836{
837 struct batadv_bla_backbone_gw *backbone_gw;
838
839
840 if (primary_if && batadv_compare_eth(backbone_addr,
841 primary_if->net_dev->dev_addr))
842 batadv_bla_send_claim(bat_priv, claim_addr, vid,
843 BATADV_CLAIM_TYPE_UNCLAIM);
844
845 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
846
847 if (!backbone_gw)
848 return 1;
849
850
851 batadv_dbg(BATADV_DBG_BLA, bat_priv,
852 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
853 claim_addr, BATADV_PRINT_VID(vid), backbone_gw->orig);
854
855 batadv_bla_del_claim(bat_priv, claim_addr, vid);
856 batadv_backbone_gw_put(backbone_gw);
857 return 1;
858}
859
860
861
862
863
864
865
866
867
868
869
870static int batadv_handle_claim(struct batadv_priv *bat_priv,
871 struct batadv_hard_iface *primary_if,
872 u8 *backbone_addr, u8 *claim_addr,
873 unsigned short vid)
874{
875 struct batadv_bla_backbone_gw *backbone_gw;
876
877
878
879 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
880 false);
881
882 if (unlikely(!backbone_gw))
883 return 1;
884
885
886 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
887 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
888 batadv_bla_send_claim(bat_priv, claim_addr, vid,
889 BATADV_CLAIM_TYPE_CLAIM);
890
891
892
893 batadv_backbone_gw_put(backbone_gw);
894 return 1;
895}
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914static int batadv_check_claim_group(struct batadv_priv *bat_priv,
915 struct batadv_hard_iface *primary_if,
916 u8 *hw_src, u8 *hw_dst,
917 struct ethhdr *ethhdr)
918{
919 u8 *backbone_addr;
920 struct batadv_orig_node *orig_node;
921 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
922
923 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
924 bla_dst_own = &bat_priv->bla.claim_dest;
925
926
927
928
929 switch (bla_dst->type) {
930 case BATADV_CLAIM_TYPE_CLAIM:
931 backbone_addr = hw_src;
932 break;
933 case BATADV_CLAIM_TYPE_REQUEST:
934 case BATADV_CLAIM_TYPE_ANNOUNCE:
935 case BATADV_CLAIM_TYPE_UNCLAIM:
936 backbone_addr = ethhdr->h_source;
937 break;
938 default:
939 return 0;
940 }
941
942
943 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
944 return 0;
945
946
947 if (bla_dst->group == bla_dst_own->group)
948 return 2;
949
950
951 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
952
953
954
955
956 if (!orig_node)
957 return 1;
958
959
960 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
961 batadv_dbg(BATADV_DBG_BLA, bat_priv,
962 "taking other backbones claim group: %#.4x\n",
963 ntohs(bla_dst->group));
964 bla_dst_own->group = bla_dst->group;
965 }
966
967 batadv_orig_node_put(orig_node);
968
969 return 2;
970}
971
972
973
974
975
976
977
978
979
980
981static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
982 struct batadv_hard_iface *primary_if,
983 struct sk_buff *skb)
984{
985 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
986 u8 *hw_src, *hw_dst;
987 struct vlan_hdr *vhdr, vhdr_buf;
988 struct ethhdr *ethhdr;
989 struct arphdr *arphdr;
990 unsigned short vid;
991 int vlan_depth = 0;
992 __be16 proto;
993 int headlen;
994 int ret;
995
996 vid = batadv_get_vid(skb, 0);
997 ethhdr = eth_hdr(skb);
998
999 proto = ethhdr->h_proto;
1000 headlen = ETH_HLEN;
1001 if (vid & BATADV_VLAN_HAS_TAG) {
1002
1003
1004
1005
1006
1007
1008
1009
1010 do {
1011 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
1012 &vhdr_buf);
1013 if (!vhdr)
1014 return 0;
1015
1016 proto = vhdr->h_vlan_encapsulated_proto;
1017 headlen += VLAN_HLEN;
1018 vlan_depth++;
1019 } while (proto == htons(ETH_P_8021Q));
1020 }
1021
1022 if (proto != htons(ETH_P_ARP))
1023 return 0;
1024
1025
1026
1027 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
1028 return 0;
1029
1030
1031 ethhdr = eth_hdr(skb);
1032 arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
1033
1034
1035
1036
1037 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
1038 return 0;
1039 if (arphdr->ar_pro != htons(ETH_P_IP))
1040 return 0;
1041 if (arphdr->ar_hln != ETH_ALEN)
1042 return 0;
1043 if (arphdr->ar_pln != 4)
1044 return 0;
1045
1046 hw_src = (u8 *)arphdr + sizeof(struct arphdr);
1047 hw_dst = hw_src + ETH_ALEN + 4;
1048 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1049 bla_dst_own = &bat_priv->bla.claim_dest;
1050
1051
1052 if (memcmp(bla_dst->magic, bla_dst_own->magic,
1053 sizeof(bla_dst->magic)) != 0)
1054 return 0;
1055
1056
1057
1058
1059
1060 if (vlan_depth > 1)
1061 return 1;
1062
1063
1064 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
1065 ethhdr);
1066 if (ret == 1)
1067 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1068 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1069 ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src,
1070 hw_dst);
1071
1072 if (ret < 2)
1073 return ret;
1074
1075
1076 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1077
1078
1079 switch (bla_dst->type) {
1080 case BATADV_CLAIM_TYPE_CLAIM:
1081 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
1082 ethhdr->h_source, vid))
1083 return 1;
1084 break;
1085 case BATADV_CLAIM_TYPE_UNCLAIM:
1086 if (batadv_handle_unclaim(bat_priv, primary_if,
1087 ethhdr->h_source, hw_src, vid))
1088 return 1;
1089 break;
1090
1091 case BATADV_CLAIM_TYPE_ANNOUNCE:
1092 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
1093 vid))
1094 return 1;
1095 break;
1096 case BATADV_CLAIM_TYPE_REQUEST:
1097 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
1098 vid))
1099 return 1;
1100 break;
1101 }
1102
1103 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1104 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1105 ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst);
1106 return 1;
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
1119{
1120 struct batadv_bla_backbone_gw *backbone_gw;
1121 struct hlist_node *node_tmp;
1122 struct hlist_head *head;
1123 struct batadv_hashtable *hash;
1124 spinlock_t *list_lock;
1125 int i;
1126
1127 hash = bat_priv->bla.backbone_hash;
1128 if (!hash)
1129 return;
1130
1131 for (i = 0; i < hash->size; i++) {
1132 head = &hash->table[i];
1133 list_lock = &hash->list_locks[i];
1134
1135 spin_lock_bh(list_lock);
1136 hlist_for_each_entry_safe(backbone_gw, node_tmp,
1137 head, hash_entry) {
1138 if (now)
1139 goto purge_now;
1140 if (!batadv_has_timed_out(backbone_gw->lasttime,
1141 BATADV_BLA_BACKBONE_TIMEOUT))
1142 continue;
1143
1144 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1145 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
1146 backbone_gw->orig);
1147
1148purge_now:
1149
1150 if (atomic_read(&backbone_gw->request_sent))
1151 atomic_dec(&bat_priv->bla.num_requests);
1152
1153 batadv_bla_del_backbone_claims(backbone_gw);
1154
1155 hlist_del_rcu(&backbone_gw->hash_entry);
1156 batadv_backbone_gw_put(backbone_gw);
1157 }
1158 spin_unlock_bh(list_lock);
1159 }
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1172 struct batadv_hard_iface *primary_if,
1173 int now)
1174{
1175 struct batadv_bla_claim *claim;
1176 struct hlist_head *head;
1177 struct batadv_hashtable *hash;
1178 int i;
1179
1180 hash = bat_priv->bla.claim_hash;
1181 if (!hash)
1182 return;
1183
1184 for (i = 0; i < hash->size; i++) {
1185 head = &hash->table[i];
1186
1187 rcu_read_lock();
1188 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1189 if (now)
1190 goto purge_now;
1191 if (!batadv_compare_eth(claim->backbone_gw->orig,
1192 primary_if->net_dev->dev_addr))
1193 continue;
1194 if (!batadv_has_timed_out(claim->lasttime,
1195 BATADV_BLA_CLAIM_TIMEOUT))
1196 continue;
1197
1198 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1199 "bla_purge_claims(): %pM, vid %d, time out\n",
1200 claim->addr, claim->vid);
1201
1202purge_now:
1203 batadv_handle_unclaim(bat_priv, primary_if,
1204 claim->backbone_gw->orig,
1205 claim->addr, claim->vid);
1206 }
1207 rcu_read_unlock();
1208 }
1209}
1210
1211
1212
1213
1214
1215
1216
1217
1218void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1219 struct batadv_hard_iface *primary_if,
1220 struct batadv_hard_iface *oldif)
1221{
1222 struct batadv_bla_backbone_gw *backbone_gw;
1223 struct hlist_head *head;
1224 struct batadv_hashtable *hash;
1225 __be16 group;
1226 int i;
1227
1228
1229 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1230 bat_priv->bla.claim_dest.group = group;
1231
1232
1233 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1234 oldif = NULL;
1235
1236 if (!oldif) {
1237 batadv_bla_purge_claims(bat_priv, NULL, 1);
1238 batadv_bla_purge_backbone_gw(bat_priv, 1);
1239 return;
1240 }
1241
1242 hash = bat_priv->bla.backbone_hash;
1243 if (!hash)
1244 return;
1245
1246 for (i = 0; i < hash->size; i++) {
1247 head = &hash->table[i];
1248
1249 rcu_read_lock();
1250 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1251
1252 if (!batadv_compare_eth(backbone_gw->orig,
1253 oldif->net_dev->dev_addr))
1254 continue;
1255
1256 ether_addr_copy(backbone_gw->orig,
1257 primary_if->net_dev->dev_addr);
1258
1259
1260
1261 batadv_bla_send_announce(bat_priv, backbone_gw);
1262 }
1263 rcu_read_unlock();
1264 }
1265}
1266
1267
1268
1269
1270
1271void batadv_bla_status_update(struct net_device *net_dev)
1272{
1273 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1274 struct batadv_hard_iface *primary_if;
1275
1276 primary_if = batadv_primary_if_get_selected(bat_priv);
1277 if (!primary_if)
1278 return;
1279
1280
1281
1282
1283 batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
1284 batadv_hardif_put(primary_if);
1285}
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295static void batadv_bla_periodic_work(struct work_struct *work)
1296{
1297 struct delayed_work *delayed_work;
1298 struct batadv_priv *bat_priv;
1299 struct batadv_priv_bla *priv_bla;
1300 struct hlist_head *head;
1301 struct batadv_bla_backbone_gw *backbone_gw;
1302 struct batadv_hashtable *hash;
1303 struct batadv_hard_iface *primary_if;
1304 int i;
1305
1306 delayed_work = container_of(work, struct delayed_work, work);
1307 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1308 bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1309 primary_if = batadv_primary_if_get_selected(bat_priv);
1310 if (!primary_if)
1311 goto out;
1312
1313 batadv_bla_purge_claims(bat_priv, primary_if, 0);
1314 batadv_bla_purge_backbone_gw(bat_priv, 0);
1315
1316 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1317 goto out;
1318
1319 hash = bat_priv->bla.backbone_hash;
1320 if (!hash)
1321 goto out;
1322
1323 for (i = 0; i < hash->size; i++) {
1324 head = &hash->table[i];
1325
1326 rcu_read_lock();
1327 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1328 if (!batadv_compare_eth(backbone_gw->orig,
1329 primary_if->net_dev->dev_addr))
1330 continue;
1331
1332 backbone_gw->lasttime = jiffies;
1333
1334 batadv_bla_send_announce(bat_priv, backbone_gw);
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345 if (atomic_read(&backbone_gw->request_sent) == 0)
1346 continue;
1347
1348 if (!atomic_dec_and_test(&backbone_gw->wait_periods))
1349 continue;
1350
1351 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
1352 atomic_set(&backbone_gw->request_sent, 0);
1353 }
1354 rcu_read_unlock();
1355 }
1356out:
1357 if (primary_if)
1358 batadv_hardif_put(primary_if);
1359
1360 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1361 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1362}
1363
1364
1365
1366
1367
1368
1369static struct lock_class_key batadv_claim_hash_lock_class_key;
1370static struct lock_class_key batadv_backbone_hash_lock_class_key;
1371
1372
1373
1374
1375
1376
1377
1378int batadv_bla_init(struct batadv_priv *bat_priv)
1379{
1380 int i;
1381 u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1382 struct batadv_hard_iface *primary_if;
1383 u16 crc;
1384 unsigned long entrytime;
1385
1386 spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1387
1388 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1389
1390
1391 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1392 bat_priv->bla.claim_dest.type = 0;
1393 primary_if = batadv_primary_if_get_selected(bat_priv);
1394 if (primary_if) {
1395 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1396 bat_priv->bla.claim_dest.group = htons(crc);
1397 batadv_hardif_put(primary_if);
1398 } else {
1399 bat_priv->bla.claim_dest.group = 0;
1400 }
1401
1402
1403 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1404 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1405 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1406 bat_priv->bla.bcast_duplist_curr = 0;
1407
1408 if (bat_priv->bla.claim_hash)
1409 return 0;
1410
1411 bat_priv->bla.claim_hash = batadv_hash_new(128);
1412 bat_priv->bla.backbone_hash = batadv_hash_new(32);
1413
1414 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1415 return -ENOMEM;
1416
1417 batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1418 &batadv_claim_hash_lock_class_key);
1419 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1420 &batadv_backbone_hash_lock_class_key);
1421
1422 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1423
1424 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1425
1426 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1427 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1428 return 0;
1429}
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1448 struct sk_buff *skb)
1449{
1450 int i, curr, ret = 0;
1451 __be32 crc;
1452 struct batadv_bcast_packet *bcast_packet;
1453 struct batadv_bcast_duplist_entry *entry;
1454
1455 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1456
1457
1458 crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
1459
1460 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1461
1462 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1463 curr = (bat_priv->bla.bcast_duplist_curr + i);
1464 curr %= BATADV_DUPLIST_SIZE;
1465 entry = &bat_priv->bla.bcast_duplist[curr];
1466
1467
1468
1469
1470 if (batadv_has_timed_out(entry->entrytime,
1471 BATADV_DUPLIST_TIMEOUT))
1472 break;
1473
1474 if (entry->crc != crc)
1475 continue;
1476
1477 if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1478 continue;
1479
1480
1481
1482
1483 ret = 1;
1484 goto out;
1485 }
1486
1487
1488
1489 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1490 curr %= BATADV_DUPLIST_SIZE;
1491 entry = &bat_priv->bla.bcast_duplist[curr];
1492 entry->crc = crc;
1493 entry->entrytime = jiffies;
1494 ether_addr_copy(entry->orig, bcast_packet->orig);
1495 bat_priv->bla.bcast_duplist_curr = curr;
1496
1497out:
1498 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1499
1500 return ret;
1501}
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
1513 unsigned short vid)
1514{
1515 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1516 struct hlist_head *head;
1517 struct batadv_bla_backbone_gw *backbone_gw;
1518 int i;
1519
1520 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1521 return false;
1522
1523 if (!hash)
1524 return false;
1525
1526 for (i = 0; i < hash->size; i++) {
1527 head = &hash->table[i];
1528
1529 rcu_read_lock();
1530 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1531 if (batadv_compare_eth(backbone_gw->orig, orig) &&
1532 backbone_gw->vid == vid) {
1533 rcu_read_unlock();
1534 return true;
1535 }
1536 }
1537 rcu_read_unlock();
1538 }
1539
1540 return false;
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552int batadv_bla_is_backbone_gw(struct sk_buff *skb,
1553 struct batadv_orig_node *orig_node, int hdr_size)
1554{
1555 struct batadv_bla_backbone_gw *backbone_gw;
1556 unsigned short vid;
1557
1558 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1559 return 0;
1560
1561
1562 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1563 return 0;
1564
1565 vid = batadv_get_vid(skb, hdr_size);
1566
1567
1568 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1569 orig_node->orig, vid);
1570 if (!backbone_gw)
1571 return 0;
1572
1573 batadv_backbone_gw_put(backbone_gw);
1574 return 1;
1575}
1576
1577
1578
1579
1580
1581
1582
1583void batadv_bla_free(struct batadv_priv *bat_priv)
1584{
1585 struct batadv_hard_iface *primary_if;
1586
1587 cancel_delayed_work_sync(&bat_priv->bla.work);
1588 primary_if = batadv_primary_if_get_selected(bat_priv);
1589
1590 if (bat_priv->bla.claim_hash) {
1591 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1592 batadv_hash_destroy(bat_priv->bla.claim_hash);
1593 bat_priv->bla.claim_hash = NULL;
1594 }
1595 if (bat_priv->bla.backbone_hash) {
1596 batadv_bla_purge_backbone_gw(bat_priv, 1);
1597 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1598 bat_priv->bla.backbone_hash = NULL;
1599 }
1600 if (primary_if)
1601 batadv_hardif_put(primary_if);
1602}
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1621 unsigned short vid, bool is_bcast)
1622{
1623 struct ethhdr *ethhdr;
1624 struct batadv_bla_claim search_claim, *claim = NULL;
1625 struct batadv_hard_iface *primary_if;
1626 int ret;
1627
1628 ethhdr = eth_hdr(skb);
1629
1630 primary_if = batadv_primary_if_get_selected(bat_priv);
1631 if (!primary_if)
1632 goto handled;
1633
1634 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1635 goto allow;
1636
1637 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1638
1639 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1640 goto handled;
1641
1642 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1643 search_claim.vid = vid;
1644 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1645
1646 if (!claim) {
1647
1648
1649
1650 batadv_handle_claim(bat_priv, primary_if,
1651 primary_if->net_dev->dev_addr,
1652 ethhdr->h_source, vid);
1653 goto allow;
1654 }
1655
1656
1657 if (batadv_compare_eth(claim->backbone_gw->orig,
1658 primary_if->net_dev->dev_addr)) {
1659
1660 claim->lasttime = jiffies;
1661 goto allow;
1662 }
1663
1664
1665 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
1666
1667
1668
1669
1670
1671
1672 goto handled;
1673 } else {
1674
1675
1676
1677
1678 batadv_handle_claim(bat_priv, primary_if,
1679 primary_if->net_dev->dev_addr,
1680 ethhdr->h_source, vid);
1681 goto allow;
1682 }
1683allow:
1684 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1685 ret = 0;
1686 goto out;
1687
1688handled:
1689 kfree_skb(skb);
1690 ret = 1;
1691
1692out:
1693 if (primary_if)
1694 batadv_hardif_put(primary_if);
1695 if (claim)
1696 batadv_claim_put(claim);
1697 return ret;
1698}
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1718 unsigned short vid)
1719{
1720 struct ethhdr *ethhdr;
1721 struct batadv_bla_claim search_claim, *claim = NULL;
1722 struct batadv_hard_iface *primary_if;
1723 int ret = 0;
1724
1725 primary_if = batadv_primary_if_get_selected(bat_priv);
1726 if (!primary_if)
1727 goto out;
1728
1729 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1730 goto allow;
1731
1732 if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1733 goto handled;
1734
1735 ethhdr = eth_hdr(skb);
1736
1737 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1738
1739 if (is_multicast_ether_addr(ethhdr->h_dest))
1740 goto handled;
1741
1742 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1743 search_claim.vid = vid;
1744
1745 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1746
1747
1748 if (!claim)
1749 goto allow;
1750
1751
1752 if (batadv_compare_eth(claim->backbone_gw->orig,
1753 primary_if->net_dev->dev_addr)) {
1754
1755
1756
1757 batadv_handle_unclaim(bat_priv, primary_if,
1758 primary_if->net_dev->dev_addr,
1759 ethhdr->h_source, vid);
1760 goto allow;
1761 }
1762
1763
1764 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1765
1766
1767
1768 goto handled;
1769 } else {
1770
1771
1772
1773 goto allow;
1774 }
1775allow:
1776 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1777 ret = 0;
1778 goto out;
1779handled:
1780 ret = 1;
1781out:
1782 if (primary_if)
1783 batadv_hardif_put(primary_if);
1784 if (claim)
1785 batadv_claim_put(claim);
1786 return ret;
1787}
1788
1789
1790
1791
1792
1793
1794
1795
1796int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1797{
1798 struct net_device *net_dev = (struct net_device *)seq->private;
1799 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1800 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
1801 struct batadv_bla_claim *claim;
1802 struct batadv_hard_iface *primary_if;
1803 struct hlist_head *head;
1804 u16 backbone_crc;
1805 u32 i;
1806 bool is_own;
1807 u8 *primary_addr;
1808
1809 primary_if = batadv_seq_print_text_primary_if_get(seq);
1810 if (!primary_if)
1811 goto out;
1812
1813 primary_addr = primary_if->net_dev->dev_addr;
1814 seq_printf(seq,
1815 "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
1816 net_dev->name, primary_addr,
1817 ntohs(bat_priv->bla.claim_dest.group));
1818 seq_printf(seq, " %-17s %-5s %-17s [o] (%-6s)\n",
1819 "Client", "VID", "Originator", "CRC");
1820 for (i = 0; i < hash->size; i++) {
1821 head = &hash->table[i];
1822
1823 rcu_read_lock();
1824 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1825 is_own = batadv_compare_eth(claim->backbone_gw->orig,
1826 primary_addr);
1827
1828 spin_lock_bh(&claim->backbone_gw->crc_lock);
1829 backbone_crc = claim->backbone_gw->crc;
1830 spin_unlock_bh(&claim->backbone_gw->crc_lock);
1831 seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
1832 claim->addr, BATADV_PRINT_VID(claim->vid),
1833 claim->backbone_gw->orig,
1834 (is_own ? 'x' : ' '),
1835 backbone_crc);
1836 }
1837 rcu_read_unlock();
1838 }
1839out:
1840 if (primary_if)
1841 batadv_hardif_put(primary_if);
1842 return 0;
1843}
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1854{
1855 struct net_device *net_dev = (struct net_device *)seq->private;
1856 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1857 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1858 struct batadv_bla_backbone_gw *backbone_gw;
1859 struct batadv_hard_iface *primary_if;
1860 struct hlist_head *head;
1861 int secs, msecs;
1862 u16 backbone_crc;
1863 u32 i;
1864 bool is_own;
1865 u8 *primary_addr;
1866
1867 primary_if = batadv_seq_print_text_primary_if_get(seq);
1868 if (!primary_if)
1869 goto out;
1870
1871 primary_addr = primary_if->net_dev->dev_addr;
1872 seq_printf(seq,
1873 "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
1874 net_dev->name, primary_addr,
1875 ntohs(bat_priv->bla.claim_dest.group));
1876 seq_printf(seq, " %-17s %-5s %-9s (%-6s)\n",
1877 "Originator", "VID", "last seen", "CRC");
1878 for (i = 0; i < hash->size; i++) {
1879 head = &hash->table[i];
1880
1881 rcu_read_lock();
1882 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1883 msecs = jiffies_to_msecs(jiffies -
1884 backbone_gw->lasttime);
1885 secs = msecs / 1000;
1886 msecs = msecs % 1000;
1887
1888 is_own = batadv_compare_eth(backbone_gw->orig,
1889 primary_addr);
1890 if (is_own)
1891 continue;
1892
1893 spin_lock_bh(&backbone_gw->crc_lock);
1894 backbone_crc = backbone_gw->crc;
1895 spin_unlock_bh(&backbone_gw->crc_lock);
1896
1897 seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
1898 backbone_gw->orig,
1899 BATADV_PRINT_VID(backbone_gw->vid), secs,
1900 msecs, backbone_crc);
1901 }
1902 rcu_read_unlock();
1903 }
1904out:
1905 if (primary_if)
1906 batadv_hardif_put(primary_if);
1907 return 0;
1908}
1909