1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include "main.h"
19#include "hash.h"
20#include "hard-interface.h"
21#include "originator.h"
22#include "bridge_loop_avoidance.h"
23#include "translation-table.h"
24#include "send.h"
25
26#include <linux/etherdevice.h>
27#include <linux/crc16.h>
28#include <linux/if_arp.h>
29#include <net/arp.h>
30#include <linux/if_vlan.h>
31
32static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
33
34static void batadv_bla_periodic_work(struct work_struct *work);
35static void
36batadv_bla_send_announce(struct batadv_priv *bat_priv,
37 struct batadv_bla_backbone_gw *backbone_gw);
38
39
40static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
41{
42 struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
43 uint32_t hash = 0;
44
45 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
46 hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
47
48 hash += (hash << 3);
49 hash ^= (hash >> 11);
50 hash += (hash << 15);
51
52 return hash % size;
53}
54
55
56static inline uint32_t batadv_choose_backbone_gw(const void *data,
57 uint32_t size)
58{
59 const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
60 uint32_t hash = 0;
61
62 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
63 hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
64
65 hash += (hash << 3);
66 hash ^= (hash >> 11);
67 hash += (hash << 15);
68
69 return hash % size;
70}
71
72
73
74static int batadv_compare_backbone_gw(const struct hlist_node *node,
75 const void *data2)
76{
77 const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
78 hash_entry);
79 const struct batadv_bla_backbone_gw *gw1 = data1, *gw2 = data2;
80
81 if (!batadv_compare_eth(gw1->orig, gw2->orig))
82 return 0;
83
84 if (gw1->vid != gw2->vid)
85 return 0;
86
87 return 1;
88}
89
90
91static int batadv_compare_claim(const struct hlist_node *node,
92 const void *data2)
93{
94 const void *data1 = container_of(node, struct batadv_bla_claim,
95 hash_entry);
96 const struct batadv_bla_claim *cl1 = data1, *cl2 = data2;
97
98 if (!batadv_compare_eth(cl1->addr, cl2->addr))
99 return 0;
100
101 if (cl1->vid != cl2->vid)
102 return 0;
103
104 return 1;
105}
106
107
108static void
109batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
110{
111 if (atomic_dec_and_test(&backbone_gw->refcount))
112 kfree_rcu(backbone_gw, rcu);
113}
114
115
116static void batadv_claim_free_rcu(struct rcu_head *rcu)
117{
118 struct batadv_bla_claim *claim;
119
120 claim = container_of(rcu, struct batadv_bla_claim, rcu);
121
122 batadv_backbone_gw_free_ref(claim->backbone_gw);
123 kfree(claim);
124}
125
126
127static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
128{
129 if (atomic_dec_and_test(&claim->refcount))
130 call_rcu(&claim->rcu, batadv_claim_free_rcu);
131}
132
133
134
135
136
137
138
139
140
141static struct batadv_bla_claim
142*batadv_claim_hash_find(struct batadv_priv *bat_priv,
143 struct batadv_bla_claim *data)
144{
145 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
146 struct hlist_head *head;
147 struct batadv_bla_claim *claim;
148 struct batadv_bla_claim *claim_tmp = NULL;
149 int index;
150
151 if (!hash)
152 return NULL;
153
154 index = batadv_choose_claim(data, hash->size);
155 head = &hash->table[index];
156
157 rcu_read_lock();
158 hlist_for_each_entry_rcu(claim, head, hash_entry) {
159 if (!batadv_compare_claim(&claim->hash_entry, data))
160 continue;
161
162 if (!atomic_inc_not_zero(&claim->refcount))
163 continue;
164
165 claim_tmp = claim;
166 break;
167 }
168 rcu_read_unlock();
169
170 return claim_tmp;
171}
172
173
174
175
176
177
178
179
180
181static struct batadv_bla_backbone_gw *
182batadv_backbone_hash_find(struct batadv_priv *bat_priv,
183 uint8_t *addr, unsigned short vid)
184{
185 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
186 struct hlist_head *head;
187 struct batadv_bla_backbone_gw search_entry, *backbone_gw;
188 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
189 int index;
190
191 if (!hash)
192 return NULL;
193
194 ether_addr_copy(search_entry.orig, addr);
195 search_entry.vid = vid;
196
197 index = batadv_choose_backbone_gw(&search_entry, hash->size);
198 head = &hash->table[index];
199
200 rcu_read_lock();
201 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
202 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
203 &search_entry))
204 continue;
205
206 if (!atomic_inc_not_zero(&backbone_gw->refcount))
207 continue;
208
209 backbone_gw_tmp = backbone_gw;
210 break;
211 }
212 rcu_read_unlock();
213
214 return backbone_gw_tmp;
215}
216
217
218static void
219batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
220{
221 struct batadv_hashtable *hash;
222 struct hlist_node *node_tmp;
223 struct hlist_head *head;
224 struct batadv_bla_claim *claim;
225 int i;
226 spinlock_t *list_lock;
227
228 hash = backbone_gw->bat_priv->bla.claim_hash;
229 if (!hash)
230 return;
231
232 for (i = 0; i < hash->size; i++) {
233 head = &hash->table[i];
234 list_lock = &hash->list_locks[i];
235
236 spin_lock_bh(list_lock);
237 hlist_for_each_entry_safe(claim, node_tmp,
238 head, hash_entry) {
239 if (claim->backbone_gw != backbone_gw)
240 continue;
241
242 batadv_claim_free_ref(claim);
243 hlist_del_rcu(&claim->hash_entry);
244 }
245 spin_unlock_bh(list_lock);
246 }
247
248
249 backbone_gw->crc = BATADV_BLA_CRC_INIT;
250}
251
252
253
254
255
256
257
258
259static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
260 unsigned short vid, int claimtype)
261{
262 struct sk_buff *skb;
263 struct ethhdr *ethhdr;
264 struct batadv_hard_iface *primary_if;
265 struct net_device *soft_iface;
266 uint8_t *hw_src;
267 struct batadv_bla_claim_dst local_claim_dest;
268 __be32 zeroip = 0;
269
270 primary_if = batadv_primary_if_get_selected(bat_priv);
271 if (!primary_if)
272 return;
273
274 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
275 sizeof(local_claim_dest));
276 local_claim_dest.type = claimtype;
277
278 soft_iface = primary_if->soft_iface;
279
280 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
281
282 zeroip,
283 primary_if->soft_iface,
284
285 zeroip,
286
287 NULL,
288
289 primary_if->net_dev->dev_addr,
290
291
292
293
294 (uint8_t *)&local_claim_dest);
295
296 if (!skb)
297 goto out;
298
299 ethhdr = (struct ethhdr *)skb->data;
300 hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
301
302
303 switch (claimtype) {
304 case BATADV_CLAIM_TYPE_CLAIM:
305
306
307
308 ether_addr_copy(ethhdr->h_source, mac);
309 batadv_dbg(BATADV_DBG_BLA, bat_priv,
310 "bla_send_claim(): CLAIM %pM on vid %d\n", mac,
311 BATADV_PRINT_VID(vid));
312 break;
313 case BATADV_CLAIM_TYPE_UNCLAIM:
314
315
316
317 ether_addr_copy(hw_src, mac);
318 batadv_dbg(BATADV_DBG_BLA, bat_priv,
319 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
320 BATADV_PRINT_VID(vid));
321 break;
322 case BATADV_CLAIM_TYPE_ANNOUNCE:
323
324
325
326 ether_addr_copy(hw_src, mac);
327 batadv_dbg(BATADV_DBG_BLA, bat_priv,
328 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
329 ethhdr->h_source, BATADV_PRINT_VID(vid));
330 break;
331 case BATADV_CLAIM_TYPE_REQUEST:
332
333
334
335
336 ether_addr_copy(hw_src, mac);
337 ether_addr_copy(ethhdr->h_dest, mac);
338 batadv_dbg(BATADV_DBG_BLA, bat_priv,
339 "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n",
340 ethhdr->h_source, ethhdr->h_dest,
341 BATADV_PRINT_VID(vid));
342 break;
343 }
344
345 if (vid & BATADV_VLAN_HAS_TAG)
346 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
347 vid & VLAN_VID_MASK);
348
349 skb_reset_mac_header(skb);
350 skb->protocol = eth_type_trans(skb, soft_iface);
351 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
352 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
353 skb->len + ETH_HLEN);
354 soft_iface->last_rx = jiffies;
355
356 netif_rx(skb);
357out:
358 if (primary_if)
359 batadv_hardif_free_ref(primary_if);
360}
361
362
363
364
365
366
367
368
369
370
371static struct batadv_bla_backbone_gw *
372batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
373 unsigned short vid, bool own_backbone)
374{
375 struct batadv_bla_backbone_gw *entry;
376 struct batadv_orig_node *orig_node;
377 int hash_added;
378
379 entry = batadv_backbone_hash_find(bat_priv, orig, vid);
380
381 if (entry)
382 return entry;
383
384 batadv_dbg(BATADV_DBG_BLA, bat_priv,
385 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
386 orig, BATADV_PRINT_VID(vid));
387
388 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
389 if (!entry)
390 return NULL;
391
392 entry->vid = vid;
393 entry->lasttime = jiffies;
394 entry->crc = BATADV_BLA_CRC_INIT;
395 entry->bat_priv = bat_priv;
396 atomic_set(&entry->request_sent, 0);
397 atomic_set(&entry->wait_periods, 0);
398 ether_addr_copy(entry->orig, orig);
399
400
401 atomic_set(&entry->refcount, 2);
402
403 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
404 batadv_compare_backbone_gw,
405 batadv_choose_backbone_gw, entry,
406 &entry->hash_entry);
407
408 if (unlikely(hash_added != 0)) {
409
410 kfree(entry);
411 return NULL;
412 }
413
414
415 orig_node = batadv_orig_hash_find(bat_priv, orig);
416 if (orig_node) {
417 batadv_tt_global_del_orig(bat_priv, orig_node, vid,
418 "became a backbone gateway");
419 batadv_orig_node_free_ref(orig_node);
420 }
421
422 if (own_backbone) {
423 batadv_bla_send_announce(bat_priv, entry);
424
425
426 atomic_inc(&entry->request_sent);
427 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
428 atomic_inc(&bat_priv->bla.num_requests);
429 }
430
431 return entry;
432}
433
434
435
436
437static void
438batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
439 struct batadv_hard_iface *primary_if,
440 unsigned short vid)
441{
442 struct batadv_bla_backbone_gw *backbone_gw;
443
444 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
445 primary_if->net_dev->dev_addr,
446 vid, true);
447 if (unlikely(!backbone_gw))
448 return;
449
450 backbone_gw->lasttime = jiffies;
451 batadv_backbone_gw_free_ref(backbone_gw);
452}
453
454
455
456
457
458
459
460
461
462static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
463 struct batadv_hard_iface *primary_if,
464 unsigned short vid)
465{
466 struct hlist_head *head;
467 struct batadv_hashtable *hash;
468 struct batadv_bla_claim *claim;
469 struct batadv_bla_backbone_gw *backbone_gw;
470 int i;
471
472 batadv_dbg(BATADV_DBG_BLA, bat_priv,
473 "bla_answer_request(): received a claim request, send all of our own claims again\n");
474
475 backbone_gw = batadv_backbone_hash_find(bat_priv,
476 primary_if->net_dev->dev_addr,
477 vid);
478 if (!backbone_gw)
479 return;
480
481 hash = bat_priv->bla.claim_hash;
482 for (i = 0; i < hash->size; i++) {
483 head = &hash->table[i];
484
485 rcu_read_lock();
486 hlist_for_each_entry_rcu(claim, head, hash_entry) {
487
488 if (claim->backbone_gw != backbone_gw)
489 continue;
490
491 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
492 BATADV_CLAIM_TYPE_CLAIM);
493 }
494 rcu_read_unlock();
495 }
496
497
498 batadv_bla_send_announce(bat_priv, backbone_gw);
499 batadv_backbone_gw_free_ref(backbone_gw);
500}
501
502
503
504
505
506
507
508
509
510static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
511{
512
513 batadv_bla_del_backbone_claims(backbone_gw);
514
515 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
516 "Sending REQUEST to %pM\n", backbone_gw->orig);
517
518
519 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
520 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
521
522
523 if (!atomic_read(&backbone_gw->request_sent)) {
524 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
525 atomic_set(&backbone_gw->request_sent, 1);
526 }
527}
528
529
530
531
532
533
534
535
536
537static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
538 struct batadv_bla_backbone_gw *backbone_gw)
539{
540 uint8_t mac[ETH_ALEN];
541 __be16 crc;
542
543 memcpy(mac, batadv_announce_mac, 4);
544 crc = htons(backbone_gw->crc);
545 memcpy(&mac[4], &crc, 2);
546
547 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
548 BATADV_CLAIM_TYPE_ANNOUNCE);
549}
550
551
552
553
554
555
556
557
558static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
559 const uint8_t *mac, const unsigned short vid,
560 struct batadv_bla_backbone_gw *backbone_gw)
561{
562 struct batadv_bla_claim *claim;
563 struct batadv_bla_claim search_claim;
564 int hash_added;
565
566 ether_addr_copy(search_claim.addr, mac);
567 search_claim.vid = vid;
568 claim = batadv_claim_hash_find(bat_priv, &search_claim);
569
570
571 if (!claim) {
572 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
573 if (!claim)
574 return;
575
576 ether_addr_copy(claim->addr, mac);
577 claim->vid = vid;
578 claim->lasttime = jiffies;
579 claim->backbone_gw = backbone_gw;
580
581 atomic_set(&claim->refcount, 2);
582 batadv_dbg(BATADV_DBG_BLA, bat_priv,
583 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
584 mac, BATADV_PRINT_VID(vid));
585 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
586 batadv_compare_claim,
587 batadv_choose_claim, claim,
588 &claim->hash_entry);
589
590 if (unlikely(hash_added != 0)) {
591
592 kfree(claim);
593 return;
594 }
595 } else {
596 claim->lasttime = jiffies;
597 if (claim->backbone_gw == backbone_gw)
598
599 goto claim_free_ref;
600
601 batadv_dbg(BATADV_DBG_BLA, bat_priv,
602 "bla_add_claim(): changing ownership for %pM, vid %d\n",
603 mac, BATADV_PRINT_VID(vid));
604
605 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
606 batadv_backbone_gw_free_ref(claim->backbone_gw);
607 }
608
609 atomic_inc(&backbone_gw->refcount);
610 claim->backbone_gw = backbone_gw;
611
612 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
613 backbone_gw->lasttime = jiffies;
614
615claim_free_ref:
616 batadv_claim_free_ref(claim);
617}
618
619
620
621
622static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
623 const uint8_t *mac, const unsigned short vid)
624{
625 struct batadv_bla_claim search_claim, *claim;
626
627 ether_addr_copy(search_claim.addr, mac);
628 search_claim.vid = vid;
629 claim = batadv_claim_hash_find(bat_priv, &search_claim);
630 if (!claim)
631 return;
632
633 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
634 mac, BATADV_PRINT_VID(vid));
635
636 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
637 batadv_choose_claim, claim);
638 batadv_claim_free_ref(claim);
639
640 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
641
642
643 batadv_claim_free_ref(claim);
644}
645
646
647static int batadv_handle_announce(struct batadv_priv *bat_priv,
648 uint8_t *an_addr, uint8_t *backbone_addr,
649 unsigned short vid)
650{
651 struct batadv_bla_backbone_gw *backbone_gw;
652 uint16_t crc;
653
654 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
655 return 0;
656
657 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
658 false);
659
660 if (unlikely(!backbone_gw))
661 return 1;
662
663
664
665 backbone_gw->lasttime = jiffies;
666 crc = ntohs(*((__be16 *)(&an_addr[4])));
667
668 batadv_dbg(BATADV_DBG_BLA, bat_priv,
669 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
670 BATADV_PRINT_VID(vid), backbone_gw->orig, crc);
671
672 if (backbone_gw->crc != crc) {
673 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
674 "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
675 backbone_gw->orig,
676 BATADV_PRINT_VID(backbone_gw->vid),
677 backbone_gw->crc, crc);
678
679 batadv_bla_send_request(backbone_gw);
680 } else {
681
682
683
684 if (atomic_read(&backbone_gw->request_sent)) {
685 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
686 atomic_set(&backbone_gw->request_sent, 0);
687 }
688 }
689
690 batadv_backbone_gw_free_ref(backbone_gw);
691 return 1;
692}
693
694
695static int batadv_handle_request(struct batadv_priv *bat_priv,
696 struct batadv_hard_iface *primary_if,
697 uint8_t *backbone_addr,
698 struct ethhdr *ethhdr, unsigned short vid)
699{
700
701 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
702 return 0;
703
704
705
706
707 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
708 return 1;
709
710 batadv_dbg(BATADV_DBG_BLA, bat_priv,
711 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
712 BATADV_PRINT_VID(vid), ethhdr->h_source);
713
714 batadv_bla_answer_request(bat_priv, primary_if, vid);
715 return 1;
716}
717
718
719static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
720 struct batadv_hard_iface *primary_if,
721 uint8_t *backbone_addr,
722 uint8_t *claim_addr, unsigned short vid)
723{
724 struct batadv_bla_backbone_gw *backbone_gw;
725
726
727 if (primary_if && batadv_compare_eth(backbone_addr,
728 primary_if->net_dev->dev_addr))
729 batadv_bla_send_claim(bat_priv, claim_addr, vid,
730 BATADV_CLAIM_TYPE_UNCLAIM);
731
732 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
733
734 if (!backbone_gw)
735 return 1;
736
737
738 batadv_dbg(BATADV_DBG_BLA, bat_priv,
739 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
740 claim_addr, BATADV_PRINT_VID(vid), backbone_gw->orig);
741
742 batadv_bla_del_claim(bat_priv, claim_addr, vid);
743 batadv_backbone_gw_free_ref(backbone_gw);
744 return 1;
745}
746
747
748static int batadv_handle_claim(struct batadv_priv *bat_priv,
749 struct batadv_hard_iface *primary_if,
750 uint8_t *backbone_addr, uint8_t *claim_addr,
751 unsigned short vid)
752{
753 struct batadv_bla_backbone_gw *backbone_gw;
754
755
756
757 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
758 false);
759
760 if (unlikely(!backbone_gw))
761 return 1;
762
763
764 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
765 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
766 batadv_bla_send_claim(bat_priv, claim_addr, vid,
767 BATADV_CLAIM_TYPE_CLAIM);
768
769
770
771 batadv_backbone_gw_free_ref(backbone_gw);
772 return 1;
773}
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791static int batadv_check_claim_group(struct batadv_priv *bat_priv,
792 struct batadv_hard_iface *primary_if,
793 uint8_t *hw_src, uint8_t *hw_dst,
794 struct ethhdr *ethhdr)
795{
796 uint8_t *backbone_addr;
797 struct batadv_orig_node *orig_node;
798 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
799
800 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
801 bla_dst_own = &bat_priv->bla.claim_dest;
802
803
804
805
806 switch (bla_dst->type) {
807 case BATADV_CLAIM_TYPE_CLAIM:
808 backbone_addr = hw_src;
809 break;
810 case BATADV_CLAIM_TYPE_REQUEST:
811 case BATADV_CLAIM_TYPE_ANNOUNCE:
812 case BATADV_CLAIM_TYPE_UNCLAIM:
813 backbone_addr = ethhdr->h_source;
814 break;
815 default:
816 return 0;
817 }
818
819
820 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
821 return 0;
822
823
824 if (bla_dst->group == bla_dst_own->group)
825 return 2;
826
827
828 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
829
830
831
832
833 if (!orig_node)
834 return 1;
835
836
837 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
838 batadv_dbg(BATADV_DBG_BLA, bat_priv,
839 "taking other backbones claim group: %#.4x\n",
840 ntohs(bla_dst->group));
841 bla_dst_own->group = bla_dst->group;
842 }
843
844 batadv_orig_node_free_ref(orig_node);
845
846 return 2;
847}
848
849
850
851
852
853
854
855
856
857
858
859
860static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
861 struct batadv_hard_iface *primary_if,
862 struct sk_buff *skb)
863{
864 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
865 uint8_t *hw_src, *hw_dst;
866 struct vlan_hdr *vhdr, vhdr_buf;
867 struct ethhdr *ethhdr;
868 struct arphdr *arphdr;
869 unsigned short vid;
870 int vlan_depth = 0;
871 __be16 proto;
872 int headlen;
873 int ret;
874
875 vid = batadv_get_vid(skb, 0);
876 ethhdr = eth_hdr(skb);
877
878 proto = ethhdr->h_proto;
879 headlen = ETH_HLEN;
880 if (vid & BATADV_VLAN_HAS_TAG) {
881
882
883
884
885
886
887
888
889 do {
890 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
891 &vhdr_buf);
892 if (!vhdr)
893 return 0;
894
895 proto = vhdr->h_vlan_encapsulated_proto;
896 headlen += VLAN_HLEN;
897 vlan_depth++;
898 } while (proto == htons(ETH_P_8021Q));
899 }
900
901 if (proto != htons(ETH_P_ARP))
902 return 0;
903
904
905
906 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
907 return 0;
908
909
910 ethhdr = eth_hdr(skb);
911 arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
912
913
914
915
916 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
917 return 0;
918 if (arphdr->ar_pro != htons(ETH_P_IP))
919 return 0;
920 if (arphdr->ar_hln != ETH_ALEN)
921 return 0;
922 if (arphdr->ar_pln != 4)
923 return 0;
924
925 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
926 hw_dst = hw_src + ETH_ALEN + 4;
927 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
928 bla_dst_own = &bat_priv->bla.claim_dest;
929
930
931 if (memcmp(bla_dst->magic, bla_dst_own->magic,
932 sizeof(bla_dst->magic)) != 0)
933 return 0;
934
935
936
937
938
939 if (vlan_depth > 1)
940 return 1;
941
942
943 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
944 ethhdr);
945 if (ret == 1)
946 batadv_dbg(BATADV_DBG_BLA, bat_priv,
947 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
948 ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src,
949 hw_dst);
950
951 if (ret < 2)
952 return ret;
953
954
955 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
956
957
958 switch (bla_dst->type) {
959 case BATADV_CLAIM_TYPE_CLAIM:
960 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
961 ethhdr->h_source, vid))
962 return 1;
963 break;
964 case BATADV_CLAIM_TYPE_UNCLAIM:
965 if (batadv_handle_unclaim(bat_priv, primary_if,
966 ethhdr->h_source, hw_src, vid))
967 return 1;
968 break;
969
970 case BATADV_CLAIM_TYPE_ANNOUNCE:
971 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
972 vid))
973 return 1;
974 break;
975 case BATADV_CLAIM_TYPE_REQUEST:
976 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
977 vid))
978 return 1;
979 break;
980 }
981
982 batadv_dbg(BATADV_DBG_BLA, bat_priv,
983 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
984 ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst);
985 return 1;
986}
987
988
989
990
991static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
992{
993 struct batadv_bla_backbone_gw *backbone_gw;
994 struct hlist_node *node_tmp;
995 struct hlist_head *head;
996 struct batadv_hashtable *hash;
997 spinlock_t *list_lock;
998 int i;
999
1000 hash = bat_priv->bla.backbone_hash;
1001 if (!hash)
1002 return;
1003
1004 for (i = 0; i < hash->size; i++) {
1005 head = &hash->table[i];
1006 list_lock = &hash->list_locks[i];
1007
1008 spin_lock_bh(list_lock);
1009 hlist_for_each_entry_safe(backbone_gw, node_tmp,
1010 head, hash_entry) {
1011 if (now)
1012 goto purge_now;
1013 if (!batadv_has_timed_out(backbone_gw->lasttime,
1014 BATADV_BLA_BACKBONE_TIMEOUT))
1015 continue;
1016
1017 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1018 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
1019 backbone_gw->orig);
1020
1021purge_now:
1022
1023 if (atomic_read(&backbone_gw->request_sent))
1024 atomic_dec(&bat_priv->bla.num_requests);
1025
1026 batadv_bla_del_backbone_claims(backbone_gw);
1027
1028 hlist_del_rcu(&backbone_gw->hash_entry);
1029 batadv_backbone_gw_free_ref(backbone_gw);
1030 }
1031 spin_unlock_bh(list_lock);
1032 }
1033}
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1045 struct batadv_hard_iface *primary_if,
1046 int now)
1047{
1048 struct batadv_bla_claim *claim;
1049 struct hlist_head *head;
1050 struct batadv_hashtable *hash;
1051 int i;
1052
1053 hash = bat_priv->bla.claim_hash;
1054 if (!hash)
1055 return;
1056
1057 for (i = 0; i < hash->size; i++) {
1058 head = &hash->table[i];
1059
1060 rcu_read_lock();
1061 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1062 if (now)
1063 goto purge_now;
1064 if (!batadv_compare_eth(claim->backbone_gw->orig,
1065 primary_if->net_dev->dev_addr))
1066 continue;
1067 if (!batadv_has_timed_out(claim->lasttime,
1068 BATADV_BLA_CLAIM_TIMEOUT))
1069 continue;
1070
1071 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1072 "bla_purge_claims(): %pM, vid %d, time out\n",
1073 claim->addr, claim->vid);
1074
1075purge_now:
1076 batadv_handle_unclaim(bat_priv, primary_if,
1077 claim->backbone_gw->orig,
1078 claim->addr, claim->vid);
1079 }
1080 rcu_read_unlock();
1081 }
1082}
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1093 struct batadv_hard_iface *primary_if,
1094 struct batadv_hard_iface *oldif)
1095{
1096 struct batadv_bla_backbone_gw *backbone_gw;
1097 struct hlist_head *head;
1098 struct batadv_hashtable *hash;
1099 __be16 group;
1100 int i;
1101
1102
1103 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1104 bat_priv->bla.claim_dest.group = group;
1105
1106
1107 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1108 oldif = NULL;
1109
1110 if (!oldif) {
1111 batadv_bla_purge_claims(bat_priv, NULL, 1);
1112 batadv_bla_purge_backbone_gw(bat_priv, 1);
1113 return;
1114 }
1115
1116 hash = bat_priv->bla.backbone_hash;
1117 if (!hash)
1118 return;
1119
1120 for (i = 0; i < hash->size; i++) {
1121 head = &hash->table[i];
1122
1123 rcu_read_lock();
1124 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1125
1126 if (!batadv_compare_eth(backbone_gw->orig,
1127 oldif->net_dev->dev_addr))
1128 continue;
1129
1130 ether_addr_copy(backbone_gw->orig,
1131 primary_if->net_dev->dev_addr);
1132
1133
1134
1135 batadv_bla_send_announce(bat_priv, backbone_gw);
1136 }
1137 rcu_read_unlock();
1138 }
1139}
1140
1141
1142
1143
1144
1145static void batadv_bla_periodic_work(struct work_struct *work)
1146{
1147 struct delayed_work *delayed_work;
1148 struct batadv_priv *bat_priv;
1149 struct batadv_priv_bla *priv_bla;
1150 struct hlist_head *head;
1151 struct batadv_bla_backbone_gw *backbone_gw;
1152 struct batadv_hashtable *hash;
1153 struct batadv_hard_iface *primary_if;
1154 int i;
1155
1156 delayed_work = container_of(work, struct delayed_work, work);
1157 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1158 bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1159 primary_if = batadv_primary_if_get_selected(bat_priv);
1160 if (!primary_if)
1161 goto out;
1162
1163 batadv_bla_purge_claims(bat_priv, primary_if, 0);
1164 batadv_bla_purge_backbone_gw(bat_priv, 0);
1165
1166 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1167 goto out;
1168
1169 hash = bat_priv->bla.backbone_hash;
1170 if (!hash)
1171 goto out;
1172
1173 for (i = 0; i < hash->size; i++) {
1174 head = &hash->table[i];
1175
1176 rcu_read_lock();
1177 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1178 if (!batadv_compare_eth(backbone_gw->orig,
1179 primary_if->net_dev->dev_addr))
1180 continue;
1181
1182 backbone_gw->lasttime = jiffies;
1183
1184 batadv_bla_send_announce(bat_priv, backbone_gw);
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195 if (atomic_read(&backbone_gw->request_sent) == 0)
1196 continue;
1197
1198 if (!atomic_dec_and_test(&backbone_gw->wait_periods))
1199 continue;
1200
1201 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
1202 atomic_set(&backbone_gw->request_sent, 0);
1203 }
1204 rcu_read_unlock();
1205 }
1206out:
1207 if (primary_if)
1208 batadv_hardif_free_ref(primary_if);
1209
1210 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1211 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1212}
1213
1214
1215
1216
1217
1218
1219static struct lock_class_key batadv_claim_hash_lock_class_key;
1220static struct lock_class_key batadv_backbone_hash_lock_class_key;
1221
1222
1223int batadv_bla_init(struct batadv_priv *bat_priv)
1224{
1225 int i;
1226 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1227 struct batadv_hard_iface *primary_if;
1228 uint16_t crc;
1229 unsigned long entrytime;
1230
1231 spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1232
1233 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1234
1235
1236 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1237 bat_priv->bla.claim_dest.type = 0;
1238 primary_if = batadv_primary_if_get_selected(bat_priv);
1239 if (primary_if) {
1240 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1241 bat_priv->bla.claim_dest.group = htons(crc);
1242 batadv_hardif_free_ref(primary_if);
1243 } else {
1244 bat_priv->bla.claim_dest.group = 0;
1245 }
1246
1247
1248 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1249 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1250 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1251 bat_priv->bla.bcast_duplist_curr = 0;
1252
1253 if (bat_priv->bla.claim_hash)
1254 return 0;
1255
1256 bat_priv->bla.claim_hash = batadv_hash_new(128);
1257 bat_priv->bla.backbone_hash = batadv_hash_new(32);
1258
1259 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1260 return -ENOMEM;
1261
1262 batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1263 &batadv_claim_hash_lock_class_key);
1264 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1265 &batadv_backbone_hash_lock_class_key);
1266
1267 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1268
1269 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1270
1271 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1272 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1273 return 0;
1274}
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1291 struct sk_buff *skb)
1292{
1293 int i, curr, ret = 0;
1294 __be32 crc;
1295 struct batadv_bcast_packet *bcast_packet;
1296 struct batadv_bcast_duplist_entry *entry;
1297
1298 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1299
1300
1301 crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
1302
1303 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1304
1305 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1306 curr = (bat_priv->bla.bcast_duplist_curr + i);
1307 curr %= BATADV_DUPLIST_SIZE;
1308 entry = &bat_priv->bla.bcast_duplist[curr];
1309
1310
1311
1312
1313 if (batadv_has_timed_out(entry->entrytime,
1314 BATADV_DUPLIST_TIMEOUT))
1315 break;
1316
1317 if (entry->crc != crc)
1318 continue;
1319
1320 if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1321 continue;
1322
1323
1324
1325
1326 ret = 1;
1327 goto out;
1328 }
1329
1330
1331
1332 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1333 curr %= BATADV_DUPLIST_SIZE;
1334 entry = &bat_priv->bla.bcast_duplist[curr];
1335 entry->crc = crc;
1336 entry->entrytime = jiffies;
1337 ether_addr_copy(entry->orig, bcast_packet->orig);
1338 bat_priv->bla.bcast_duplist_curr = curr;
1339
1340out:
1341 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1342
1343 return ret;
1344}
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
1359 unsigned short vid)
1360{
1361 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1362 struct hlist_head *head;
1363 struct batadv_bla_backbone_gw *backbone_gw;
1364 int i;
1365
1366 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1367 return false;
1368
1369 if (!hash)
1370 return false;
1371
1372 for (i = 0; i < hash->size; i++) {
1373 head = &hash->table[i];
1374
1375 rcu_read_lock();
1376 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1377 if (batadv_compare_eth(backbone_gw->orig, orig) &&
1378 backbone_gw->vid == vid) {
1379 rcu_read_unlock();
1380 return true;
1381 }
1382 }
1383 rcu_read_unlock();
1384 }
1385
1386 return false;
1387}
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400int batadv_bla_is_backbone_gw(struct sk_buff *skb,
1401 struct batadv_orig_node *orig_node, int hdr_size)
1402{
1403 struct batadv_bla_backbone_gw *backbone_gw;
1404 unsigned short vid;
1405
1406 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1407 return 0;
1408
1409
1410 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1411 return 0;
1412
1413 vid = batadv_get_vid(skb, hdr_size);
1414
1415
1416 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1417 orig_node->orig, vid);
1418 if (!backbone_gw)
1419 return 0;
1420
1421 batadv_backbone_gw_free_ref(backbone_gw);
1422 return 1;
1423}
1424
1425
1426void batadv_bla_free(struct batadv_priv *bat_priv)
1427{
1428 struct batadv_hard_iface *primary_if;
1429
1430 cancel_delayed_work_sync(&bat_priv->bla.work);
1431 primary_if = batadv_primary_if_get_selected(bat_priv);
1432
1433 if (bat_priv->bla.claim_hash) {
1434 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1435 batadv_hash_destroy(bat_priv->bla.claim_hash);
1436 bat_priv->bla.claim_hash = NULL;
1437 }
1438 if (bat_priv->bla.backbone_hash) {
1439 batadv_bla_purge_backbone_gw(bat_priv, 1);
1440 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1441 bat_priv->bla.backbone_hash = NULL;
1442 }
1443 if (primary_if)
1444 batadv_hardif_free_ref(primary_if);
1445}
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1463 unsigned short vid, bool is_bcast)
1464{
1465 struct ethhdr *ethhdr;
1466 struct batadv_bla_claim search_claim, *claim = NULL;
1467 struct batadv_hard_iface *primary_if;
1468 int ret;
1469
1470 ethhdr = eth_hdr(skb);
1471
1472 primary_if = batadv_primary_if_get_selected(bat_priv);
1473 if (!primary_if)
1474 goto handled;
1475
1476 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1477 goto allow;
1478
1479
1480 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1481
1482 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1483 goto handled;
1484
1485 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1486 search_claim.vid = vid;
1487 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1488
1489 if (!claim) {
1490
1491
1492
1493 batadv_handle_claim(bat_priv, primary_if,
1494 primary_if->net_dev->dev_addr,
1495 ethhdr->h_source, vid);
1496 goto allow;
1497 }
1498
1499
1500 if (batadv_compare_eth(claim->backbone_gw->orig,
1501 primary_if->net_dev->dev_addr)) {
1502
1503 claim->lasttime = jiffies;
1504 goto allow;
1505 }
1506
1507
1508 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
1509
1510
1511
1512
1513
1514
1515 goto handled;
1516 } else {
1517
1518
1519
1520
1521 batadv_handle_claim(bat_priv, primary_if,
1522 primary_if->net_dev->dev_addr,
1523 ethhdr->h_source, vid);
1524 goto allow;
1525 }
1526allow:
1527 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1528 ret = 0;
1529 goto out;
1530
1531handled:
1532 kfree_skb(skb);
1533 ret = 1;
1534
1535out:
1536 if (primary_if)
1537 batadv_hardif_free_ref(primary_if);
1538 if (claim)
1539 batadv_claim_free_ref(claim);
1540 return ret;
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1560 unsigned short vid)
1561{
1562 struct ethhdr *ethhdr;
1563 struct batadv_bla_claim search_claim, *claim = NULL;
1564 struct batadv_hard_iface *primary_if;
1565 int ret = 0;
1566
1567 primary_if = batadv_primary_if_get_selected(bat_priv);
1568 if (!primary_if)
1569 goto out;
1570
1571 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1572 goto allow;
1573
1574 if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1575 goto handled;
1576
1577 ethhdr = eth_hdr(skb);
1578
1579 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1580
1581 if (is_multicast_ether_addr(ethhdr->h_dest))
1582 goto handled;
1583
1584 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1585 search_claim.vid = vid;
1586
1587 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1588
1589
1590 if (!claim)
1591 goto allow;
1592
1593
1594 if (batadv_compare_eth(claim->backbone_gw->orig,
1595 primary_if->net_dev->dev_addr)) {
1596
1597
1598
1599 batadv_handle_unclaim(bat_priv, primary_if,
1600 primary_if->net_dev->dev_addr,
1601 ethhdr->h_source, vid);
1602 goto allow;
1603 }
1604
1605
1606 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1607
1608
1609
1610 goto handled;
1611 } else {
1612
1613
1614
1615 goto allow;
1616 }
1617allow:
1618 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1619 ret = 0;
1620 goto out;
1621handled:
1622 ret = 1;
1623out:
1624 if (primary_if)
1625 batadv_hardif_free_ref(primary_if);
1626 if (claim)
1627 batadv_claim_free_ref(claim);
1628 return ret;
1629}
1630
1631int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1632{
1633 struct net_device *net_dev = (struct net_device *)seq->private;
1634 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1635 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
1636 struct batadv_bla_claim *claim;
1637 struct batadv_hard_iface *primary_if;
1638 struct hlist_head *head;
1639 uint32_t i;
1640 bool is_own;
1641 uint8_t *primary_addr;
1642
1643 primary_if = batadv_seq_print_text_primary_if_get(seq);
1644 if (!primary_if)
1645 goto out;
1646
1647 primary_addr = primary_if->net_dev->dev_addr;
1648 seq_printf(seq,
1649 "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
1650 net_dev->name, primary_addr,
1651 ntohs(bat_priv->bla.claim_dest.group));
1652 seq_printf(seq, " %-17s %-5s %-17s [o] (%-6s)\n",
1653 "Client", "VID", "Originator", "CRC");
1654 for (i = 0; i < hash->size; i++) {
1655 head = &hash->table[i];
1656
1657 rcu_read_lock();
1658 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1659 is_own = batadv_compare_eth(claim->backbone_gw->orig,
1660 primary_addr);
1661 seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
1662 claim->addr, BATADV_PRINT_VID(claim->vid),
1663 claim->backbone_gw->orig,
1664 (is_own ? 'x' : ' '),
1665 claim->backbone_gw->crc);
1666 }
1667 rcu_read_unlock();
1668 }
1669out:
1670 if (primary_if)
1671 batadv_hardif_free_ref(primary_if);
1672 return 0;
1673}
1674
1675int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1676{
1677 struct net_device *net_dev = (struct net_device *)seq->private;
1678 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1679 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1680 struct batadv_bla_backbone_gw *backbone_gw;
1681 struct batadv_hard_iface *primary_if;
1682 struct hlist_head *head;
1683 int secs, msecs;
1684 uint32_t i;
1685 bool is_own;
1686 uint8_t *primary_addr;
1687
1688 primary_if = batadv_seq_print_text_primary_if_get(seq);
1689 if (!primary_if)
1690 goto out;
1691
1692 primary_addr = primary_if->net_dev->dev_addr;
1693 seq_printf(seq,
1694 "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
1695 net_dev->name, primary_addr,
1696 ntohs(bat_priv->bla.claim_dest.group));
1697 seq_printf(seq, " %-17s %-5s %-9s (%-6s)\n",
1698 "Originator", "VID", "last seen", "CRC");
1699 for (i = 0; i < hash->size; i++) {
1700 head = &hash->table[i];
1701
1702 rcu_read_lock();
1703 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1704 msecs = jiffies_to_msecs(jiffies -
1705 backbone_gw->lasttime);
1706 secs = msecs / 1000;
1707 msecs = msecs % 1000;
1708
1709 is_own = batadv_compare_eth(backbone_gw->orig,
1710 primary_addr);
1711 if (is_own)
1712 continue;
1713
1714 seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
1715 backbone_gw->orig,
1716 BATADV_PRINT_VID(backbone_gw->vid), secs,
1717 msecs, backbone_gw->crc);
1718 }
1719 rcu_read_unlock();
1720 }
1721out:
1722 if (primary_if)
1723 batadv_hardif_free_ref(primary_if);
1724 return 0;
1725}
1726