1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "main.h"
19#include "hash.h"
20#include "hard-interface.h"
21#include "originator.h"
22#include "bridge_loop_avoidance.h"
23#include "translation-table.h"
24#include "send.h"
25
26#include <linux/etherdevice.h>
27#include <linux/crc16.h>
28#include <linux/if_arp.h>
29#include <net/arp.h>
30#include <linux/if_vlan.h>
31
32static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
33
34static void batadv_bla_periodic_work(struct work_struct *work);
35static void
36batadv_bla_send_announce(struct batadv_priv *bat_priv,
37 struct batadv_bla_backbone_gw *backbone_gw);
38
39
40static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
41{
42 struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
43 uint32_t hash = 0;
44
45 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
46 hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
47
48 hash += (hash << 3);
49 hash ^= (hash >> 11);
50 hash += (hash << 15);
51
52 return hash % size;
53}
54
55
56static inline uint32_t batadv_choose_backbone_gw(const void *data,
57 uint32_t size)
58{
59 const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
60 uint32_t hash = 0;
61
62 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
63 hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
64
65 hash += (hash << 3);
66 hash ^= (hash >> 11);
67 hash += (hash << 15);
68
69 return hash % size;
70}
71
72
73static int batadv_compare_backbone_gw(const struct hlist_node *node,
74 const void *data2)
75{
76 const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
77 hash_entry);
78 const struct batadv_bla_backbone_gw *gw1 = data1, *gw2 = data2;
79
80 if (!batadv_compare_eth(gw1->orig, gw2->orig))
81 return 0;
82
83 if (gw1->vid != gw2->vid)
84 return 0;
85
86 return 1;
87}
88
89
90static int batadv_compare_claim(const struct hlist_node *node,
91 const void *data2)
92{
93 const void *data1 = container_of(node, struct batadv_bla_claim,
94 hash_entry);
95 const struct batadv_bla_claim *cl1 = data1, *cl2 = data2;
96
97 if (!batadv_compare_eth(cl1->addr, cl2->addr))
98 return 0;
99
100 if (cl1->vid != cl2->vid)
101 return 0;
102
103 return 1;
104}
105
106
107static void
108batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
109{
110 if (atomic_dec_and_test(&backbone_gw->refcount))
111 kfree_rcu(backbone_gw, rcu);
112}
113
114
115static void batadv_claim_free_rcu(struct rcu_head *rcu)
116{
117 struct batadv_bla_claim *claim;
118
119 claim = container_of(rcu, struct batadv_bla_claim, rcu);
120
121 batadv_backbone_gw_free_ref(claim->backbone_gw);
122 kfree(claim);
123}
124
125
126static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
127{
128 if (atomic_dec_and_test(&claim->refcount))
129 call_rcu(&claim->rcu, batadv_claim_free_rcu);
130}
131
132
133
134
135
136
137
138
139
140static struct batadv_bla_claim
141*batadv_claim_hash_find(struct batadv_priv *bat_priv,
142 struct batadv_bla_claim *data)
143{
144 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
145 struct hlist_head *head;
146 struct batadv_bla_claim *claim;
147 struct batadv_bla_claim *claim_tmp = NULL;
148 int index;
149
150 if (!hash)
151 return NULL;
152
153 index = batadv_choose_claim(data, hash->size);
154 head = &hash->table[index];
155
156 rcu_read_lock();
157 hlist_for_each_entry_rcu(claim, head, hash_entry) {
158 if (!batadv_compare_claim(&claim->hash_entry, data))
159 continue;
160
161 if (!atomic_inc_not_zero(&claim->refcount))
162 continue;
163
164 claim_tmp = claim;
165 break;
166 }
167 rcu_read_unlock();
168
169 return claim_tmp;
170}
171
172
173
174
175
176
177
178
179
180static struct batadv_bla_backbone_gw *
181batadv_backbone_hash_find(struct batadv_priv *bat_priv,
182 uint8_t *addr, unsigned short vid)
183{
184 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
185 struct hlist_head *head;
186 struct batadv_bla_backbone_gw search_entry, *backbone_gw;
187 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
188 int index;
189
190 if (!hash)
191 return NULL;
192
193 ether_addr_copy(search_entry.orig, addr);
194 search_entry.vid = vid;
195
196 index = batadv_choose_backbone_gw(&search_entry, hash->size);
197 head = &hash->table[index];
198
199 rcu_read_lock();
200 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
201 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
202 &search_entry))
203 continue;
204
205 if (!atomic_inc_not_zero(&backbone_gw->refcount))
206 continue;
207
208 backbone_gw_tmp = backbone_gw;
209 break;
210 }
211 rcu_read_unlock();
212
213 return backbone_gw_tmp;
214}
215
216
217static void
218batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
219{
220 struct batadv_hashtable *hash;
221 struct hlist_node *node_tmp;
222 struct hlist_head *head;
223 struct batadv_bla_claim *claim;
224 int i;
225 spinlock_t *list_lock;
226
227 hash = backbone_gw->bat_priv->bla.claim_hash;
228 if (!hash)
229 return;
230
231 for (i = 0; i < hash->size; i++) {
232 head = &hash->table[i];
233 list_lock = &hash->list_locks[i];
234
235 spin_lock_bh(list_lock);
236 hlist_for_each_entry_safe(claim, node_tmp,
237 head, hash_entry) {
238 if (claim->backbone_gw != backbone_gw)
239 continue;
240
241 batadv_claim_free_ref(claim);
242 hlist_del_rcu(&claim->hash_entry);
243 }
244 spin_unlock_bh(list_lock);
245 }
246
247
248 backbone_gw->crc = BATADV_BLA_CRC_INIT;
249}
250
251
252
253
254
255
256
257
258static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
259 unsigned short vid, int claimtype)
260{
261 struct sk_buff *skb;
262 struct ethhdr *ethhdr;
263 struct batadv_hard_iface *primary_if;
264 struct net_device *soft_iface;
265 uint8_t *hw_src;
266 struct batadv_bla_claim_dst local_claim_dest;
267 __be32 zeroip = 0;
268
269 primary_if = batadv_primary_if_get_selected(bat_priv);
270 if (!primary_if)
271 return;
272
273 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
274 sizeof(local_claim_dest));
275 local_claim_dest.type = claimtype;
276
277 soft_iface = primary_if->soft_iface;
278
279 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
280
281 zeroip,
282 primary_if->soft_iface,
283
284 zeroip,
285
286 NULL,
287
288 primary_if->net_dev->dev_addr,
289
290
291
292
293 (uint8_t *)&local_claim_dest);
294
295 if (!skb)
296 goto out;
297
298 ethhdr = (struct ethhdr *)skb->data;
299 hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
300
301
302 switch (claimtype) {
303 case BATADV_CLAIM_TYPE_CLAIM:
304
305
306
307 ether_addr_copy(ethhdr->h_source, mac);
308 batadv_dbg(BATADV_DBG_BLA, bat_priv,
309 "bla_send_claim(): CLAIM %pM on vid %d\n", mac,
310 BATADV_PRINT_VID(vid));
311 break;
312 case BATADV_CLAIM_TYPE_UNCLAIM:
313
314
315
316 ether_addr_copy(hw_src, mac);
317 batadv_dbg(BATADV_DBG_BLA, bat_priv,
318 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
319 BATADV_PRINT_VID(vid));
320 break;
321 case BATADV_CLAIM_TYPE_ANNOUNCE:
322
323
324
325 ether_addr_copy(hw_src, mac);
326 batadv_dbg(BATADV_DBG_BLA, bat_priv,
327 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
328 ethhdr->h_source, BATADV_PRINT_VID(vid));
329 break;
330 case BATADV_CLAIM_TYPE_REQUEST:
331
332
333
334
335 ether_addr_copy(hw_src, mac);
336 ether_addr_copy(ethhdr->h_dest, mac);
337 batadv_dbg(BATADV_DBG_BLA, bat_priv,
338 "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n",
339 ethhdr->h_source, ethhdr->h_dest,
340 BATADV_PRINT_VID(vid));
341 break;
342 }
343
344 if (vid & BATADV_VLAN_HAS_TAG)
345 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
346 vid & VLAN_VID_MASK);
347
348 skb_reset_mac_header(skb);
349 skb->protocol = eth_type_trans(skb, soft_iface);
350 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
351 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
352 skb->len + ETH_HLEN);
353 soft_iface->last_rx = jiffies;
354
355 netif_rx(skb);
356out:
357 if (primary_if)
358 batadv_hardif_free_ref(primary_if);
359}
360
361
362
363
364
365
366
367
368
369
370
371static struct batadv_bla_backbone_gw *
372batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
373 unsigned short vid, bool own_backbone)
374{
375 struct batadv_bla_backbone_gw *entry;
376 struct batadv_orig_node *orig_node;
377 int hash_added;
378
379 entry = batadv_backbone_hash_find(bat_priv, orig, vid);
380
381 if (entry)
382 return entry;
383
384 batadv_dbg(BATADV_DBG_BLA, bat_priv,
385 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
386 orig, BATADV_PRINT_VID(vid));
387
388 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
389 if (!entry)
390 return NULL;
391
392 entry->vid = vid;
393 entry->lasttime = jiffies;
394 entry->crc = BATADV_BLA_CRC_INIT;
395 entry->bat_priv = bat_priv;
396 atomic_set(&entry->request_sent, 0);
397 atomic_set(&entry->wait_periods, 0);
398 ether_addr_copy(entry->orig, orig);
399
400
401 atomic_set(&entry->refcount, 2);
402
403 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
404 batadv_compare_backbone_gw,
405 batadv_choose_backbone_gw, entry,
406 &entry->hash_entry);
407
408 if (unlikely(hash_added != 0)) {
409
410 kfree(entry);
411 return NULL;
412 }
413
414
415 orig_node = batadv_orig_hash_find(bat_priv, orig);
416 if (orig_node) {
417 batadv_tt_global_del_orig(bat_priv, orig_node, vid,
418 "became a backbone gateway");
419 batadv_orig_node_free_ref(orig_node);
420 }
421
422 if (own_backbone) {
423 batadv_bla_send_announce(bat_priv, entry);
424
425
426 atomic_inc(&entry->request_sent);
427 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
428 atomic_inc(&bat_priv->bla.num_requests);
429 }
430
431 return entry;
432}
433
434
435
436
437static void
438batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
439 struct batadv_hard_iface *primary_if,
440 unsigned short vid)
441{
442 struct batadv_bla_backbone_gw *backbone_gw;
443
444 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
445 primary_if->net_dev->dev_addr,
446 vid, true);
447 if (unlikely(!backbone_gw))
448 return;
449
450 backbone_gw->lasttime = jiffies;
451 batadv_backbone_gw_free_ref(backbone_gw);
452}
453
454
455
456
457
458
459
460
461
462
463static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
464 struct batadv_hard_iface *primary_if,
465 unsigned short vid)
466{
467 struct hlist_head *head;
468 struct batadv_hashtable *hash;
469 struct batadv_bla_claim *claim;
470 struct batadv_bla_backbone_gw *backbone_gw;
471 int i;
472
473 batadv_dbg(BATADV_DBG_BLA, bat_priv,
474 "bla_answer_request(): received a claim request, send all of our own claims again\n");
475
476 backbone_gw = batadv_backbone_hash_find(bat_priv,
477 primary_if->net_dev->dev_addr,
478 vid);
479 if (!backbone_gw)
480 return;
481
482 hash = bat_priv->bla.claim_hash;
483 for (i = 0; i < hash->size; i++) {
484 head = &hash->table[i];
485
486 rcu_read_lock();
487 hlist_for_each_entry_rcu(claim, head, hash_entry) {
488
489 if (claim->backbone_gw != backbone_gw)
490 continue;
491
492 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
493 BATADV_CLAIM_TYPE_CLAIM);
494 }
495 rcu_read_unlock();
496 }
497
498
499 batadv_bla_send_announce(bat_priv, backbone_gw);
500 batadv_backbone_gw_free_ref(backbone_gw);
501}
502
503
504
505
506
507
508
509
510
511static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
512{
513
514 batadv_bla_del_backbone_claims(backbone_gw);
515
516 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
517 "Sending REQUEST to %pM\n", backbone_gw->orig);
518
519
520 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
521 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
522
523
524 if (!atomic_read(&backbone_gw->request_sent)) {
525 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
526 atomic_set(&backbone_gw->request_sent, 1);
527 }
528}
529
530
531
532
533
534
535
536
537
538static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
539 struct batadv_bla_backbone_gw *backbone_gw)
540{
541 uint8_t mac[ETH_ALEN];
542 __be16 crc;
543
544 memcpy(mac, batadv_announce_mac, 4);
545 crc = htons(backbone_gw->crc);
546 memcpy(&mac[4], &crc, 2);
547
548 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
549 BATADV_CLAIM_TYPE_ANNOUNCE);
550}
551
552
553
554
555
556
557
558
559static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
560 const uint8_t *mac, const unsigned short vid,
561 struct batadv_bla_backbone_gw *backbone_gw)
562{
563 struct batadv_bla_claim *claim;
564 struct batadv_bla_claim search_claim;
565 int hash_added;
566
567 ether_addr_copy(search_claim.addr, mac);
568 search_claim.vid = vid;
569 claim = batadv_claim_hash_find(bat_priv, &search_claim);
570
571
572 if (!claim) {
573 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
574 if (!claim)
575 return;
576
577 ether_addr_copy(claim->addr, mac);
578 claim->vid = vid;
579 claim->lasttime = jiffies;
580 claim->backbone_gw = backbone_gw;
581
582 atomic_set(&claim->refcount, 2);
583 batadv_dbg(BATADV_DBG_BLA, bat_priv,
584 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
585 mac, BATADV_PRINT_VID(vid));
586 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
587 batadv_compare_claim,
588 batadv_choose_claim, claim,
589 &claim->hash_entry);
590
591 if (unlikely(hash_added != 0)) {
592
593 kfree(claim);
594 return;
595 }
596 } else {
597 claim->lasttime = jiffies;
598 if (claim->backbone_gw == backbone_gw)
599
600 goto claim_free_ref;
601
602 batadv_dbg(BATADV_DBG_BLA, bat_priv,
603 "bla_add_claim(): changing ownership for %pM, vid %d\n",
604 mac, BATADV_PRINT_VID(vid));
605
606 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
607 batadv_backbone_gw_free_ref(claim->backbone_gw);
608 }
609
610 atomic_inc(&backbone_gw->refcount);
611 claim->backbone_gw = backbone_gw;
612
613 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
614 backbone_gw->lasttime = jiffies;
615
616claim_free_ref:
617 batadv_claim_free_ref(claim);
618}
619
620
621
622
623static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
624 const uint8_t *mac, const unsigned short vid)
625{
626 struct batadv_bla_claim search_claim, *claim;
627
628 ether_addr_copy(search_claim.addr, mac);
629 search_claim.vid = vid;
630 claim = batadv_claim_hash_find(bat_priv, &search_claim);
631 if (!claim)
632 return;
633
634 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
635 mac, BATADV_PRINT_VID(vid));
636
637 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
638 batadv_choose_claim, claim);
639 batadv_claim_free_ref(claim);
640
641 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
642
643
644 batadv_claim_free_ref(claim);
645}
646
647
648static int batadv_handle_announce(struct batadv_priv *bat_priv,
649 uint8_t *an_addr, uint8_t *backbone_addr,
650 unsigned short vid)
651{
652 struct batadv_bla_backbone_gw *backbone_gw;
653 uint16_t crc;
654
655 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
656 return 0;
657
658 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
659 false);
660
661 if (unlikely(!backbone_gw))
662 return 1;
663
664
665 backbone_gw->lasttime = jiffies;
666 crc = ntohs(*((__be16 *)(&an_addr[4])));
667
668 batadv_dbg(BATADV_DBG_BLA, bat_priv,
669 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
670 BATADV_PRINT_VID(vid), backbone_gw->orig, crc);
671
672 if (backbone_gw->crc != crc) {
673 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
674 "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
675 backbone_gw->orig,
676 BATADV_PRINT_VID(backbone_gw->vid),
677 backbone_gw->crc, crc);
678
679 batadv_bla_send_request(backbone_gw);
680 } else {
681
682
683
684 if (atomic_read(&backbone_gw->request_sent)) {
685 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
686 atomic_set(&backbone_gw->request_sent, 0);
687 }
688 }
689
690 batadv_backbone_gw_free_ref(backbone_gw);
691 return 1;
692}
693
694
695static int batadv_handle_request(struct batadv_priv *bat_priv,
696 struct batadv_hard_iface *primary_if,
697 uint8_t *backbone_addr,
698 struct ethhdr *ethhdr, unsigned short vid)
699{
700
701 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
702 return 0;
703
704
705
706
707 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
708 return 1;
709
710 batadv_dbg(BATADV_DBG_BLA, bat_priv,
711 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
712 BATADV_PRINT_VID(vid), ethhdr->h_source);
713
714 batadv_bla_answer_request(bat_priv, primary_if, vid);
715 return 1;
716}
717
718
719static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
720 struct batadv_hard_iface *primary_if,
721 uint8_t *backbone_addr,
722 uint8_t *claim_addr, unsigned short vid)
723{
724 struct batadv_bla_backbone_gw *backbone_gw;
725
726
727 if (primary_if && batadv_compare_eth(backbone_addr,
728 primary_if->net_dev->dev_addr))
729 batadv_bla_send_claim(bat_priv, claim_addr, vid,
730 BATADV_CLAIM_TYPE_UNCLAIM);
731
732 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
733
734 if (!backbone_gw)
735 return 1;
736
737
738 batadv_dbg(BATADV_DBG_BLA, bat_priv,
739 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
740 claim_addr, BATADV_PRINT_VID(vid), backbone_gw->orig);
741
742 batadv_bla_del_claim(bat_priv, claim_addr, vid);
743 batadv_backbone_gw_free_ref(backbone_gw);
744 return 1;
745}
746
747
748static int batadv_handle_claim(struct batadv_priv *bat_priv,
749 struct batadv_hard_iface *primary_if,
750 uint8_t *backbone_addr, uint8_t *claim_addr,
751 unsigned short vid)
752{
753 struct batadv_bla_backbone_gw *backbone_gw;
754
755
756
757 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
758 false);
759
760 if (unlikely(!backbone_gw))
761 return 1;
762
763
764 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
765 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
766 batadv_bla_send_claim(bat_priv, claim_addr, vid,
767 BATADV_CLAIM_TYPE_CLAIM);
768
769
770
771 batadv_backbone_gw_free_ref(backbone_gw);
772 return 1;
773}
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792static int batadv_check_claim_group(struct batadv_priv *bat_priv,
793 struct batadv_hard_iface *primary_if,
794 uint8_t *hw_src, uint8_t *hw_dst,
795 struct ethhdr *ethhdr)
796{
797 uint8_t *backbone_addr;
798 struct batadv_orig_node *orig_node;
799 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
800
801 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
802 bla_dst_own = &bat_priv->bla.claim_dest;
803
804
805
806
807 switch (bla_dst->type) {
808 case BATADV_CLAIM_TYPE_CLAIM:
809 backbone_addr = hw_src;
810 break;
811 case BATADV_CLAIM_TYPE_REQUEST:
812 case BATADV_CLAIM_TYPE_ANNOUNCE:
813 case BATADV_CLAIM_TYPE_UNCLAIM:
814 backbone_addr = ethhdr->h_source;
815 break;
816 default:
817 return 0;
818 }
819
820
821 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
822 return 0;
823
824
825 if (bla_dst->group == bla_dst_own->group)
826 return 2;
827
828
829 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
830
831
832
833
834 if (!orig_node)
835 return 1;
836
837
838 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
839 batadv_dbg(BATADV_DBG_BLA, bat_priv,
840 "taking other backbones claim group: %#.4x\n",
841 ntohs(bla_dst->group));
842 bla_dst_own->group = bla_dst->group;
843 }
844
845 batadv_orig_node_free_ref(orig_node);
846
847 return 2;
848}
849
850
851
852
853
854
855
856
857
858
859
860
861static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
862 struct batadv_hard_iface *primary_if,
863 struct sk_buff *skb)
864{
865 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
866 uint8_t *hw_src, *hw_dst;
867 struct vlan_hdr *vhdr, vhdr_buf;
868 struct ethhdr *ethhdr;
869 struct arphdr *arphdr;
870 unsigned short vid;
871 int vlan_depth = 0;
872 __be16 proto;
873 int headlen;
874 int ret;
875
876 vid = batadv_get_vid(skb, 0);
877 ethhdr = eth_hdr(skb);
878
879 proto = ethhdr->h_proto;
880 headlen = ETH_HLEN;
881 if (vid & BATADV_VLAN_HAS_TAG) {
882
883
884
885
886
887
888
889
890 do {
891 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
892 &vhdr_buf);
893 if (!vhdr)
894 return 0;
895
896 proto = vhdr->h_vlan_encapsulated_proto;
897 headlen += VLAN_HLEN;
898 vlan_depth++;
899 } while (proto == htons(ETH_P_8021Q));
900 }
901
902 if (proto != htons(ETH_P_ARP))
903 return 0;
904
905
906
907 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
908 return 0;
909
910
911 ethhdr = eth_hdr(skb);
912 arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
913
914
915
916
917 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
918 return 0;
919 if (arphdr->ar_pro != htons(ETH_P_IP))
920 return 0;
921 if (arphdr->ar_hln != ETH_ALEN)
922 return 0;
923 if (arphdr->ar_pln != 4)
924 return 0;
925
926 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
927 hw_dst = hw_src + ETH_ALEN + 4;
928 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
929 bla_dst_own = &bat_priv->bla.claim_dest;
930
931
932 if (memcmp(bla_dst->magic, bla_dst_own->magic,
933 sizeof(bla_dst->magic)) != 0)
934 return 0;
935
936
937
938
939
940 if (vlan_depth > 1)
941 return 1;
942
943
944 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
945 ethhdr);
946 if (ret == 1)
947 batadv_dbg(BATADV_DBG_BLA, bat_priv,
948 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
949 ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src,
950 hw_dst);
951
952 if (ret < 2)
953 return ret;
954
955
956 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
957
958
959 switch (bla_dst->type) {
960 case BATADV_CLAIM_TYPE_CLAIM:
961 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
962 ethhdr->h_source, vid))
963 return 1;
964 break;
965 case BATADV_CLAIM_TYPE_UNCLAIM:
966 if (batadv_handle_unclaim(bat_priv, primary_if,
967 ethhdr->h_source, hw_src, vid))
968 return 1;
969 break;
970
971 case BATADV_CLAIM_TYPE_ANNOUNCE:
972 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
973 vid))
974 return 1;
975 break;
976 case BATADV_CLAIM_TYPE_REQUEST:
977 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
978 vid))
979 return 1;
980 break;
981 }
982
983 batadv_dbg(BATADV_DBG_BLA, bat_priv,
984 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
985 ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst);
986 return 1;
987}
988
989
990
991
992static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
993{
994 struct batadv_bla_backbone_gw *backbone_gw;
995 struct hlist_node *node_tmp;
996 struct hlist_head *head;
997 struct batadv_hashtable *hash;
998 spinlock_t *list_lock;
999 int i;
1000
1001 hash = bat_priv->bla.backbone_hash;
1002 if (!hash)
1003 return;
1004
1005 for (i = 0; i < hash->size; i++) {
1006 head = &hash->table[i];
1007 list_lock = &hash->list_locks[i];
1008
1009 spin_lock_bh(list_lock);
1010 hlist_for_each_entry_safe(backbone_gw, node_tmp,
1011 head, hash_entry) {
1012 if (now)
1013 goto purge_now;
1014 if (!batadv_has_timed_out(backbone_gw->lasttime,
1015 BATADV_BLA_BACKBONE_TIMEOUT))
1016 continue;
1017
1018 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1019 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
1020 backbone_gw->orig);
1021
1022purge_now:
1023
1024 if (atomic_read(&backbone_gw->request_sent))
1025 atomic_dec(&bat_priv->bla.num_requests);
1026
1027 batadv_bla_del_backbone_claims(backbone_gw);
1028
1029 hlist_del_rcu(&backbone_gw->hash_entry);
1030 batadv_backbone_gw_free_ref(backbone_gw);
1031 }
1032 spin_unlock_bh(list_lock);
1033 }
1034}
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1046 struct batadv_hard_iface *primary_if,
1047 int now)
1048{
1049 struct batadv_bla_claim *claim;
1050 struct hlist_head *head;
1051 struct batadv_hashtable *hash;
1052 int i;
1053
1054 hash = bat_priv->bla.claim_hash;
1055 if (!hash)
1056 return;
1057
1058 for (i = 0; i < hash->size; i++) {
1059 head = &hash->table[i];
1060
1061 rcu_read_lock();
1062 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1063 if (now)
1064 goto purge_now;
1065 if (!batadv_compare_eth(claim->backbone_gw->orig,
1066 primary_if->net_dev->dev_addr))
1067 continue;
1068 if (!batadv_has_timed_out(claim->lasttime,
1069 BATADV_BLA_CLAIM_TIMEOUT))
1070 continue;
1071
1072 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1073 "bla_purge_claims(): %pM, vid %d, time out\n",
1074 claim->addr, claim->vid);
1075
1076purge_now:
1077 batadv_handle_unclaim(bat_priv, primary_if,
1078 claim->backbone_gw->orig,
1079 claim->addr, claim->vid);
1080 }
1081 rcu_read_unlock();
1082 }
1083}
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1094 struct batadv_hard_iface *primary_if,
1095 struct batadv_hard_iface *oldif)
1096{
1097 struct batadv_bla_backbone_gw *backbone_gw;
1098 struct hlist_head *head;
1099 struct batadv_hashtable *hash;
1100 __be16 group;
1101 int i;
1102
1103
1104 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1105 bat_priv->bla.claim_dest.group = group;
1106
1107
1108 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1109 oldif = NULL;
1110
1111 if (!oldif) {
1112 batadv_bla_purge_claims(bat_priv, NULL, 1);
1113 batadv_bla_purge_backbone_gw(bat_priv, 1);
1114 return;
1115 }
1116
1117 hash = bat_priv->bla.backbone_hash;
1118 if (!hash)
1119 return;
1120
1121 for (i = 0; i < hash->size; i++) {
1122 head = &hash->table[i];
1123
1124 rcu_read_lock();
1125 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1126
1127 if (!batadv_compare_eth(backbone_gw->orig,
1128 oldif->net_dev->dev_addr))
1129 continue;
1130
1131 ether_addr_copy(backbone_gw->orig,
1132 primary_if->net_dev->dev_addr);
1133
1134
1135
1136 batadv_bla_send_announce(bat_priv, backbone_gw);
1137 }
1138 rcu_read_unlock();
1139 }
1140}
1141
1142
1143
1144
1145
1146static void batadv_bla_periodic_work(struct work_struct *work)
1147{
1148 struct delayed_work *delayed_work;
1149 struct batadv_priv *bat_priv;
1150 struct batadv_priv_bla *priv_bla;
1151 struct hlist_head *head;
1152 struct batadv_bla_backbone_gw *backbone_gw;
1153 struct batadv_hashtable *hash;
1154 struct batadv_hard_iface *primary_if;
1155 int i;
1156
1157 delayed_work = container_of(work, struct delayed_work, work);
1158 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1159 bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1160 primary_if = batadv_primary_if_get_selected(bat_priv);
1161 if (!primary_if)
1162 goto out;
1163
1164 batadv_bla_purge_claims(bat_priv, primary_if, 0);
1165 batadv_bla_purge_backbone_gw(bat_priv, 0);
1166
1167 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1168 goto out;
1169
1170 hash = bat_priv->bla.backbone_hash;
1171 if (!hash)
1172 goto out;
1173
1174 for (i = 0; i < hash->size; i++) {
1175 head = &hash->table[i];
1176
1177 rcu_read_lock();
1178 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1179 if (!batadv_compare_eth(backbone_gw->orig,
1180 primary_if->net_dev->dev_addr))
1181 continue;
1182
1183 backbone_gw->lasttime = jiffies;
1184
1185 batadv_bla_send_announce(bat_priv, backbone_gw);
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196 if (atomic_read(&backbone_gw->request_sent) == 0)
1197 continue;
1198
1199 if (!atomic_dec_and_test(&backbone_gw->wait_periods))
1200 continue;
1201
1202 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
1203 atomic_set(&backbone_gw->request_sent, 0);
1204 }
1205 rcu_read_unlock();
1206 }
1207out:
1208 if (primary_if)
1209 batadv_hardif_free_ref(primary_if);
1210
1211 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1212 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1213}
1214
1215
1216
1217
1218
1219
1220static struct lock_class_key batadv_claim_hash_lock_class_key;
1221static struct lock_class_key batadv_backbone_hash_lock_class_key;
1222
1223
1224int batadv_bla_init(struct batadv_priv *bat_priv)
1225{
1226 int i;
1227 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1228 struct batadv_hard_iface *primary_if;
1229 uint16_t crc;
1230 unsigned long entrytime;
1231
1232 spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1233
1234 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1235
1236
1237 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1238 bat_priv->bla.claim_dest.type = 0;
1239 primary_if = batadv_primary_if_get_selected(bat_priv);
1240 if (primary_if) {
1241 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1242 bat_priv->bla.claim_dest.group = htons(crc);
1243 batadv_hardif_free_ref(primary_if);
1244 } else {
1245 bat_priv->bla.claim_dest.group = 0;
1246 }
1247
1248
1249 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1250 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1251 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1252 bat_priv->bla.bcast_duplist_curr = 0;
1253
1254 if (bat_priv->bla.claim_hash)
1255 return 0;
1256
1257 bat_priv->bla.claim_hash = batadv_hash_new(128);
1258 bat_priv->bla.backbone_hash = batadv_hash_new(32);
1259
1260 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1261 return -ENOMEM;
1262
1263 batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1264 &batadv_claim_hash_lock_class_key);
1265 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1266 &batadv_backbone_hash_lock_class_key);
1267
1268 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1269
1270 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1271
1272 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1273 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1274 return 0;
1275}
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1292 struct sk_buff *skb)
1293{
1294 int i, curr, ret = 0;
1295 __be32 crc;
1296 struct batadv_bcast_packet *bcast_packet;
1297 struct batadv_bcast_duplist_entry *entry;
1298
1299 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1300
1301
1302 crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
1303
1304 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1305
1306 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1307 curr = (bat_priv->bla.bcast_duplist_curr + i);
1308 curr %= BATADV_DUPLIST_SIZE;
1309 entry = &bat_priv->bla.bcast_duplist[curr];
1310
1311
1312
1313
1314 if (batadv_has_timed_out(entry->entrytime,
1315 BATADV_DUPLIST_TIMEOUT))
1316 break;
1317
1318 if (entry->crc != crc)
1319 continue;
1320
1321 if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1322 continue;
1323
1324
1325
1326
1327 ret = 1;
1328 goto out;
1329 }
1330
1331
1332
1333 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1334 curr %= BATADV_DUPLIST_SIZE;
1335 entry = &bat_priv->bla.bcast_duplist[curr];
1336 entry->crc = crc;
1337 entry->entrytime = jiffies;
1338 ether_addr_copy(entry->orig, bcast_packet->orig);
1339 bat_priv->bla.bcast_duplist_curr = curr;
1340
1341out:
1342 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1343
1344 return ret;
1345}
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
1358 unsigned short vid)
1359{
1360 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1361 struct hlist_head *head;
1362 struct batadv_bla_backbone_gw *backbone_gw;
1363 int i;
1364
1365 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1366 return false;
1367
1368 if (!hash)
1369 return false;
1370
1371 for (i = 0; i < hash->size; i++) {
1372 head = &hash->table[i];
1373
1374 rcu_read_lock();
1375 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1376 if (batadv_compare_eth(backbone_gw->orig, orig) &&
1377 backbone_gw->vid == vid) {
1378 rcu_read_unlock();
1379 return true;
1380 }
1381 }
1382 rcu_read_unlock();
1383 }
1384
1385 return false;
1386}
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398int batadv_bla_is_backbone_gw(struct sk_buff *skb,
1399 struct batadv_orig_node *orig_node, int hdr_size)
1400{
1401 struct batadv_bla_backbone_gw *backbone_gw;
1402 unsigned short vid;
1403
1404 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1405 return 0;
1406
1407
1408 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1409 return 0;
1410
1411 vid = batadv_get_vid(skb, hdr_size);
1412
1413
1414 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1415 orig_node->orig, vid);
1416 if (!backbone_gw)
1417 return 0;
1418
1419 batadv_backbone_gw_free_ref(backbone_gw);
1420 return 1;
1421}
1422
1423
1424void batadv_bla_free(struct batadv_priv *bat_priv)
1425{
1426 struct batadv_hard_iface *primary_if;
1427
1428 cancel_delayed_work_sync(&bat_priv->bla.work);
1429 primary_if = batadv_primary_if_get_selected(bat_priv);
1430
1431 if (bat_priv->bla.claim_hash) {
1432 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1433 batadv_hash_destroy(bat_priv->bla.claim_hash);
1434 bat_priv->bla.claim_hash = NULL;
1435 }
1436 if (bat_priv->bla.backbone_hash) {
1437 batadv_bla_purge_backbone_gw(bat_priv, 1);
1438 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1439 bat_priv->bla.backbone_hash = NULL;
1440 }
1441 if (primary_if)
1442 batadv_hardif_free_ref(primary_if);
1443}
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1461 unsigned short vid, bool is_bcast)
1462{
1463 struct ethhdr *ethhdr;
1464 struct batadv_bla_claim search_claim, *claim = NULL;
1465 struct batadv_hard_iface *primary_if;
1466 int ret;
1467
1468 ethhdr = eth_hdr(skb);
1469
1470 primary_if = batadv_primary_if_get_selected(bat_priv);
1471 if (!primary_if)
1472 goto handled;
1473
1474 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1475 goto allow;
1476
1477 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1478
1479 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1480 goto handled;
1481
1482 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1483 search_claim.vid = vid;
1484 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1485
1486 if (!claim) {
1487
1488
1489
1490 batadv_handle_claim(bat_priv, primary_if,
1491 primary_if->net_dev->dev_addr,
1492 ethhdr->h_source, vid);
1493 goto allow;
1494 }
1495
1496
1497 if (batadv_compare_eth(claim->backbone_gw->orig,
1498 primary_if->net_dev->dev_addr)) {
1499
1500 claim->lasttime = jiffies;
1501 goto allow;
1502 }
1503
1504
1505 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
1506
1507
1508
1509
1510
1511
1512 goto handled;
1513 } else {
1514
1515
1516
1517
1518 batadv_handle_claim(bat_priv, primary_if,
1519 primary_if->net_dev->dev_addr,
1520 ethhdr->h_source, vid);
1521 goto allow;
1522 }
1523allow:
1524 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1525 ret = 0;
1526 goto out;
1527
1528handled:
1529 kfree_skb(skb);
1530 ret = 1;
1531
1532out:
1533 if (primary_if)
1534 batadv_hardif_free_ref(primary_if);
1535 if (claim)
1536 batadv_claim_free_ref(claim);
1537 return ret;
1538}
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1557 unsigned short vid)
1558{
1559 struct ethhdr *ethhdr;
1560 struct batadv_bla_claim search_claim, *claim = NULL;
1561 struct batadv_hard_iface *primary_if;
1562 int ret = 0;
1563
1564 primary_if = batadv_primary_if_get_selected(bat_priv);
1565 if (!primary_if)
1566 goto out;
1567
1568 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1569 goto allow;
1570
1571 if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1572 goto handled;
1573
1574 ethhdr = eth_hdr(skb);
1575
1576 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1577
1578 if (is_multicast_ether_addr(ethhdr->h_dest))
1579 goto handled;
1580
1581 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1582 search_claim.vid = vid;
1583
1584 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1585
1586
1587 if (!claim)
1588 goto allow;
1589
1590
1591 if (batadv_compare_eth(claim->backbone_gw->orig,
1592 primary_if->net_dev->dev_addr)) {
1593
1594
1595
1596 batadv_handle_unclaim(bat_priv, primary_if,
1597 primary_if->net_dev->dev_addr,
1598 ethhdr->h_source, vid);
1599 goto allow;
1600 }
1601
1602
1603 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1604
1605
1606
1607 goto handled;
1608 } else {
1609
1610
1611
1612 goto allow;
1613 }
1614allow:
1615 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1616 ret = 0;
1617 goto out;
1618handled:
1619 ret = 1;
1620out:
1621 if (primary_if)
1622 batadv_hardif_free_ref(primary_if);
1623 if (claim)
1624 batadv_claim_free_ref(claim);
1625 return ret;
1626}
1627
1628int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1629{
1630 struct net_device *net_dev = (struct net_device *)seq->private;
1631 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1632 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
1633 struct batadv_bla_claim *claim;
1634 struct batadv_hard_iface *primary_if;
1635 struct hlist_head *head;
1636 uint32_t i;
1637 bool is_own;
1638 uint8_t *primary_addr;
1639
1640 primary_if = batadv_seq_print_text_primary_if_get(seq);
1641 if (!primary_if)
1642 goto out;
1643
1644 primary_addr = primary_if->net_dev->dev_addr;
1645 seq_printf(seq,
1646 "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
1647 net_dev->name, primary_addr,
1648 ntohs(bat_priv->bla.claim_dest.group));
1649 seq_printf(seq, " %-17s %-5s %-17s [o] (%-6s)\n",
1650 "Client", "VID", "Originator", "CRC");
1651 for (i = 0; i < hash->size; i++) {
1652 head = &hash->table[i];
1653
1654 rcu_read_lock();
1655 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1656 is_own = batadv_compare_eth(claim->backbone_gw->orig,
1657 primary_addr);
1658 seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
1659 claim->addr, BATADV_PRINT_VID(claim->vid),
1660 claim->backbone_gw->orig,
1661 (is_own ? 'x' : ' '),
1662 claim->backbone_gw->crc);
1663 }
1664 rcu_read_unlock();
1665 }
1666out:
1667 if (primary_if)
1668 batadv_hardif_free_ref(primary_if);
1669 return 0;
1670}
1671
1672int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1673{
1674 struct net_device *net_dev = (struct net_device *)seq->private;
1675 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1676 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1677 struct batadv_bla_backbone_gw *backbone_gw;
1678 struct batadv_hard_iface *primary_if;
1679 struct hlist_head *head;
1680 int secs, msecs;
1681 uint32_t i;
1682 bool is_own;
1683 uint8_t *primary_addr;
1684
1685 primary_if = batadv_seq_print_text_primary_if_get(seq);
1686 if (!primary_if)
1687 goto out;
1688
1689 primary_addr = primary_if->net_dev->dev_addr;
1690 seq_printf(seq,
1691 "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
1692 net_dev->name, primary_addr,
1693 ntohs(bat_priv->bla.claim_dest.group));
1694 seq_printf(seq, " %-17s %-5s %-9s (%-6s)\n",
1695 "Originator", "VID", "last seen", "CRC");
1696 for (i = 0; i < hash->size; i++) {
1697 head = &hash->table[i];
1698
1699 rcu_read_lock();
1700 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1701 msecs = jiffies_to_msecs(jiffies -
1702 backbone_gw->lasttime);
1703 secs = msecs / 1000;
1704 msecs = msecs % 1000;
1705
1706 is_own = batadv_compare_eth(backbone_gw->orig,
1707 primary_addr);
1708 if (is_own)
1709 continue;
1710
1711 seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
1712 backbone_gw->orig,
1713 BATADV_PRINT_VID(backbone_gw->vid), secs,
1714 msecs, backbone_gw->crc);
1715 }
1716 rcu_read_unlock();
1717 }
1718out:
1719 if (primary_if)
1720 batadv_hardif_free_ref(primary_if);
1721 return 0;
1722}
1723