1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include "main.h"
21#include "hash.h"
22#include "hard-interface.h"
23#include "originator.h"
24#include "bridge_loop_avoidance.h"
25#include "translation-table.h"
26#include "send.h"
27
28#include <linux/etherdevice.h>
29#include <linux/crc16.h>
30#include <linux/if_arp.h>
31#include <net/arp.h>
32#include <linux/if_vlan.h>
33
34static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
35
36static void batadv_bla_periodic_work(struct work_struct *work);
37static void
38batadv_bla_send_announce(struct batadv_priv *bat_priv,
39 struct batadv_bla_backbone_gw *backbone_gw);
40
41
42static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
43{
44 struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
45 uint32_t hash = 0;
46
47 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
48 hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
49
50 hash += (hash << 3);
51 hash ^= (hash >> 11);
52 hash += (hash << 15);
53
54 return hash % size;
55}
56
57
58static inline uint32_t batadv_choose_backbone_gw(const void *data,
59 uint32_t size)
60{
61 const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
62 uint32_t hash = 0;
63
64 hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
65 hash = batadv_hash_bytes(hash, &claim->vid, sizeof(claim->vid));
66
67 hash += (hash << 3);
68 hash ^= (hash >> 11);
69 hash += (hash << 15);
70
71 return hash % size;
72}
73
74
75
76static int batadv_compare_backbone_gw(const struct hlist_node *node,
77 const void *data2)
78{
79 const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
80 hash_entry);
81 const struct batadv_bla_backbone_gw *gw1 = data1, *gw2 = data2;
82
83 if (!batadv_compare_eth(gw1->orig, gw2->orig))
84 return 0;
85
86 if (gw1->vid != gw2->vid)
87 return 0;
88
89 return 1;
90}
91
92
93static int batadv_compare_claim(const struct hlist_node *node,
94 const void *data2)
95{
96 const void *data1 = container_of(node, struct batadv_bla_claim,
97 hash_entry);
98 const struct batadv_bla_claim *cl1 = data1, *cl2 = data2;
99
100 if (!batadv_compare_eth(cl1->addr, cl2->addr))
101 return 0;
102
103 if (cl1->vid != cl2->vid)
104 return 0;
105
106 return 1;
107}
108
109
110static void
111batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
112{
113 if (atomic_dec_and_test(&backbone_gw->refcount))
114 kfree_rcu(backbone_gw, rcu);
115}
116
117
118static void batadv_claim_free_rcu(struct rcu_head *rcu)
119{
120 struct batadv_bla_claim *claim;
121
122 claim = container_of(rcu, struct batadv_bla_claim, rcu);
123
124 batadv_backbone_gw_free_ref(claim->backbone_gw);
125 kfree(claim);
126}
127
128
129static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
130{
131 if (atomic_dec_and_test(&claim->refcount))
132 call_rcu(&claim->rcu, batadv_claim_free_rcu);
133}
134
135
136
137
138
139
140
141static struct batadv_bla_claim
142*batadv_claim_hash_find(struct batadv_priv *bat_priv,
143 struct batadv_bla_claim *data)
144{
145 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
146 struct hlist_head *head;
147 struct batadv_bla_claim *claim;
148 struct batadv_bla_claim *claim_tmp = NULL;
149 int index;
150
151 if (!hash)
152 return NULL;
153
154 index = batadv_choose_claim(data, hash->size);
155 head = &hash->table[index];
156
157 rcu_read_lock();
158 hlist_for_each_entry_rcu(claim, head, hash_entry) {
159 if (!batadv_compare_claim(&claim->hash_entry, data))
160 continue;
161
162 if (!atomic_inc_not_zero(&claim->refcount))
163 continue;
164
165 claim_tmp = claim;
166 break;
167 }
168 rcu_read_unlock();
169
170 return claim_tmp;
171}
172
173
174
175
176
177
178
179
180
181static struct batadv_bla_backbone_gw *
182batadv_backbone_hash_find(struct batadv_priv *bat_priv,
183 uint8_t *addr, short vid)
184{
185 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
186 struct hlist_head *head;
187 struct batadv_bla_backbone_gw search_entry, *backbone_gw;
188 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
189 int index;
190
191 if (!hash)
192 return NULL;
193
194 memcpy(search_entry.orig, addr, ETH_ALEN);
195 search_entry.vid = vid;
196
197 index = batadv_choose_backbone_gw(&search_entry, hash->size);
198 head = &hash->table[index];
199
200 rcu_read_lock();
201 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
202 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
203 &search_entry))
204 continue;
205
206 if (!atomic_inc_not_zero(&backbone_gw->refcount))
207 continue;
208
209 backbone_gw_tmp = backbone_gw;
210 break;
211 }
212 rcu_read_unlock();
213
214 return backbone_gw_tmp;
215}
216
217
218static void
219batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
220{
221 struct batadv_hashtable *hash;
222 struct hlist_node *node_tmp;
223 struct hlist_head *head;
224 struct batadv_bla_claim *claim;
225 int i;
226 spinlock_t *list_lock;
227
228 hash = backbone_gw->bat_priv->bla.claim_hash;
229 if (!hash)
230 return;
231
232 for (i = 0; i < hash->size; i++) {
233 head = &hash->table[i];
234 list_lock = &hash->list_locks[i];
235
236 spin_lock_bh(list_lock);
237 hlist_for_each_entry_safe(claim, node_tmp,
238 head, hash_entry) {
239 if (claim->backbone_gw != backbone_gw)
240 continue;
241
242 batadv_claim_free_ref(claim);
243 hlist_del_rcu(&claim->hash_entry);
244 }
245 spin_unlock_bh(list_lock);
246 }
247
248
249 backbone_gw->crc = BATADV_BLA_CRC_INIT;
250}
251
252
253
254
255
256
257
258
259static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
260 short vid, int claimtype)
261{
262 struct sk_buff *skb;
263 struct ethhdr *ethhdr;
264 struct batadv_hard_iface *primary_if;
265 struct net_device *soft_iface;
266 uint8_t *hw_src;
267 struct batadv_bla_claim_dst local_claim_dest;
268 __be32 zeroip = 0;
269
270 primary_if = batadv_primary_if_get_selected(bat_priv);
271 if (!primary_if)
272 return;
273
274 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
275 sizeof(local_claim_dest));
276 local_claim_dest.type = claimtype;
277
278 soft_iface = primary_if->soft_iface;
279
280 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
281
282 zeroip,
283 primary_if->soft_iface,
284
285 zeroip,
286
287 NULL,
288
289 primary_if->net_dev->dev_addr,
290
291
292
293
294 (uint8_t *)&local_claim_dest);
295
296 if (!skb)
297 goto out;
298
299 ethhdr = (struct ethhdr *)skb->data;
300 hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
301
302
303 switch (claimtype) {
304 case BATADV_CLAIM_TYPE_CLAIM:
305
306
307
308 memcpy(ethhdr->h_source, mac, ETH_ALEN);
309 batadv_dbg(BATADV_DBG_BLA, bat_priv,
310 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
311 break;
312 case BATADV_CLAIM_TYPE_UNCLAIM:
313
314
315
316 memcpy(hw_src, mac, ETH_ALEN);
317 batadv_dbg(BATADV_DBG_BLA, bat_priv,
318 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
319 vid);
320 break;
321 case BATADV_CLAIM_TYPE_ANNOUNCE:
322
323
324
325 memcpy(hw_src, mac, ETH_ALEN);
326 batadv_dbg(BATADV_DBG_BLA, bat_priv,
327 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
328 ethhdr->h_source, vid);
329 break;
330 case BATADV_CLAIM_TYPE_REQUEST:
331
332
333
334
335 memcpy(hw_src, mac, ETH_ALEN);
336 memcpy(ethhdr->h_dest, mac, ETH_ALEN);
337 batadv_dbg(BATADV_DBG_BLA, bat_priv,
338 "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
339 ethhdr->h_source, ethhdr->h_dest, vid);
340 break;
341 }
342
343 if (vid != -1)
344 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), vid);
345
346 skb_reset_mac_header(skb);
347 skb->protocol = eth_type_trans(skb, soft_iface);
348 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
349 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
350 skb->len + ETH_HLEN);
351 soft_iface->last_rx = jiffies;
352
353 netif_rx(skb);
354out:
355 if (primary_if)
356 batadv_hardif_free_ref(primary_if);
357}
358
359
360
361
362
363
364
365
366
367
368static struct batadv_bla_backbone_gw *
369batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
370 short vid, bool own_backbone)
371{
372 struct batadv_bla_backbone_gw *entry;
373 struct batadv_orig_node *orig_node;
374 int hash_added;
375
376 entry = batadv_backbone_hash_find(bat_priv, orig, vid);
377
378 if (entry)
379 return entry;
380
381 batadv_dbg(BATADV_DBG_BLA, bat_priv,
382 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
383 orig, vid);
384
385 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
386 if (!entry)
387 return NULL;
388
389 entry->vid = vid;
390 entry->lasttime = jiffies;
391 entry->crc = BATADV_BLA_CRC_INIT;
392 entry->bat_priv = bat_priv;
393 atomic_set(&entry->request_sent, 0);
394 atomic_set(&entry->wait_periods, 0);
395 memcpy(entry->orig, orig, ETH_ALEN);
396
397
398 atomic_set(&entry->refcount, 2);
399
400 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
401 batadv_compare_backbone_gw,
402 batadv_choose_backbone_gw, entry,
403 &entry->hash_entry);
404
405 if (unlikely(hash_added != 0)) {
406
407 kfree(entry);
408 return NULL;
409 }
410
411
412 orig_node = batadv_orig_hash_find(bat_priv, orig);
413 if (orig_node) {
414 batadv_tt_global_del_orig(bat_priv, orig_node,
415 "became a backbone gateway");
416 batadv_orig_node_free_ref(orig_node);
417 }
418
419 if (own_backbone) {
420 batadv_bla_send_announce(bat_priv, entry);
421
422
423 atomic_inc(&entry->request_sent);
424 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
425 atomic_inc(&bat_priv->bla.num_requests);
426 }
427
428 return entry;
429}
430
431
432
433
434static void
435batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
436 struct batadv_hard_iface *primary_if,
437 short vid)
438{
439 struct batadv_bla_backbone_gw *backbone_gw;
440
441 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
442 primary_if->net_dev->dev_addr,
443 vid, true);
444 if (unlikely(!backbone_gw))
445 return;
446
447 backbone_gw->lasttime = jiffies;
448 batadv_backbone_gw_free_ref(backbone_gw);
449}
450
451
452
453
454
455
456
457static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
458 struct batadv_hard_iface *primary_if,
459 short vid)
460{
461 struct hlist_head *head;
462 struct batadv_hashtable *hash;
463 struct batadv_bla_claim *claim;
464 struct batadv_bla_backbone_gw *backbone_gw;
465 int i;
466
467 batadv_dbg(BATADV_DBG_BLA, bat_priv,
468 "bla_answer_request(): received a claim request, send all of our own claims again\n");
469
470 backbone_gw = batadv_backbone_hash_find(bat_priv,
471 primary_if->net_dev->dev_addr,
472 vid);
473 if (!backbone_gw)
474 return;
475
476 hash = bat_priv->bla.claim_hash;
477 for (i = 0; i < hash->size; i++) {
478 head = &hash->table[i];
479
480 rcu_read_lock();
481 hlist_for_each_entry_rcu(claim, head, hash_entry) {
482
483 if (claim->backbone_gw != backbone_gw)
484 continue;
485
486 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
487 BATADV_CLAIM_TYPE_CLAIM);
488 }
489 rcu_read_unlock();
490 }
491
492
493 batadv_bla_send_announce(bat_priv, backbone_gw);
494 batadv_backbone_gw_free_ref(backbone_gw);
495}
496
497
498
499
500
501
502
503static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
504{
505
506 batadv_bla_del_backbone_claims(backbone_gw);
507
508 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
509 "Sending REQUEST to %pM\n", backbone_gw->orig);
510
511
512 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
513 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
514
515
516 if (!atomic_read(&backbone_gw->request_sent)) {
517 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
518 atomic_set(&backbone_gw->request_sent, 1);
519 }
520}
521
522
523
524
525
526
527
528static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
529 struct batadv_bla_backbone_gw *backbone_gw)
530{
531 uint8_t mac[ETH_ALEN];
532 __be16 crc;
533
534 memcpy(mac, batadv_announce_mac, 4);
535 crc = htons(backbone_gw->crc);
536 memcpy(&mac[4], &crc, 2);
537
538 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
539 BATADV_CLAIM_TYPE_ANNOUNCE);
540}
541
542
543
544
545
546
547
548
549static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
550 const uint8_t *mac, const short vid,
551 struct batadv_bla_backbone_gw *backbone_gw)
552{
553 struct batadv_bla_claim *claim;
554 struct batadv_bla_claim search_claim;
555 int hash_added;
556
557 memcpy(search_claim.addr, mac, ETH_ALEN);
558 search_claim.vid = vid;
559 claim = batadv_claim_hash_find(bat_priv, &search_claim);
560
561
562 if (!claim) {
563 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
564 if (!claim)
565 return;
566
567 memcpy(claim->addr, mac, ETH_ALEN);
568 claim->vid = vid;
569 claim->lasttime = jiffies;
570 claim->backbone_gw = backbone_gw;
571
572 atomic_set(&claim->refcount, 2);
573 batadv_dbg(BATADV_DBG_BLA, bat_priv,
574 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
575 mac, vid);
576 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
577 batadv_compare_claim,
578 batadv_choose_claim, claim,
579 &claim->hash_entry);
580
581 if (unlikely(hash_added != 0)) {
582
583 kfree(claim);
584 return;
585 }
586 } else {
587 claim->lasttime = jiffies;
588 if (claim->backbone_gw == backbone_gw)
589
590 goto claim_free_ref;
591
592 batadv_dbg(BATADV_DBG_BLA, bat_priv,
593 "bla_add_claim(): changing ownership for %pM, vid %d\n",
594 mac, vid);
595
596 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
597 batadv_backbone_gw_free_ref(claim->backbone_gw);
598 }
599
600 atomic_inc(&backbone_gw->refcount);
601 claim->backbone_gw = backbone_gw;
602
603 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
604 backbone_gw->lasttime = jiffies;
605
606claim_free_ref:
607 batadv_claim_free_ref(claim);
608}
609
610
611
612
613static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
614 const uint8_t *mac, const short vid)
615{
616 struct batadv_bla_claim search_claim, *claim;
617
618 memcpy(search_claim.addr, mac, ETH_ALEN);
619 search_claim.vid = vid;
620 claim = batadv_claim_hash_find(bat_priv, &search_claim);
621 if (!claim)
622 return;
623
624 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
625 mac, vid);
626
627 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
628 batadv_choose_claim, claim);
629 batadv_claim_free_ref(claim);
630
631 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
632
633
634 batadv_claim_free_ref(claim);
635}
636
637
638static int batadv_handle_announce(struct batadv_priv *bat_priv,
639 uint8_t *an_addr, uint8_t *backbone_addr,
640 short vid)
641{
642 struct batadv_bla_backbone_gw *backbone_gw;
643 uint16_t crc;
644
645 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
646 return 0;
647
648 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
649 false);
650
651 if (unlikely(!backbone_gw))
652 return 1;
653
654
655
656 backbone_gw->lasttime = jiffies;
657 crc = ntohs(*((__be16 *)(&an_addr[4])));
658
659 batadv_dbg(BATADV_DBG_BLA, bat_priv,
660 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
661 vid, backbone_gw->orig, crc);
662
663 if (backbone_gw->crc != crc) {
664 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
665 "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
666 backbone_gw->orig, backbone_gw->vid,
667 backbone_gw->crc, crc);
668
669 batadv_bla_send_request(backbone_gw);
670 } else {
671
672
673
674 if (atomic_read(&backbone_gw->request_sent)) {
675 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
676 atomic_set(&backbone_gw->request_sent, 0);
677 }
678 }
679
680 batadv_backbone_gw_free_ref(backbone_gw);
681 return 1;
682}
683
684
685static int batadv_handle_request(struct batadv_priv *bat_priv,
686 struct batadv_hard_iface *primary_if,
687 uint8_t *backbone_addr,
688 struct ethhdr *ethhdr, short vid)
689{
690
691 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
692 return 0;
693
694
695
696
697 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
698 return 1;
699
700 batadv_dbg(BATADV_DBG_BLA, bat_priv,
701 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
702 vid, ethhdr->h_source);
703
704 batadv_bla_answer_request(bat_priv, primary_if, vid);
705 return 1;
706}
707
708
709static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
710 struct batadv_hard_iface *primary_if,
711 uint8_t *backbone_addr,
712 uint8_t *claim_addr, short vid)
713{
714 struct batadv_bla_backbone_gw *backbone_gw;
715
716
717 if (primary_if && batadv_compare_eth(backbone_addr,
718 primary_if->net_dev->dev_addr))
719 batadv_bla_send_claim(bat_priv, claim_addr, vid,
720 BATADV_CLAIM_TYPE_UNCLAIM);
721
722 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
723
724 if (!backbone_gw)
725 return 1;
726
727
728 batadv_dbg(BATADV_DBG_BLA, bat_priv,
729 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
730 claim_addr, vid, backbone_gw->orig);
731
732 batadv_bla_del_claim(bat_priv, claim_addr, vid);
733 batadv_backbone_gw_free_ref(backbone_gw);
734 return 1;
735}
736
737
738static int batadv_handle_claim(struct batadv_priv *bat_priv,
739 struct batadv_hard_iface *primary_if,
740 uint8_t *backbone_addr, uint8_t *claim_addr,
741 short vid)
742{
743 struct batadv_bla_backbone_gw *backbone_gw;
744
745
746
747 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
748 false);
749
750 if (unlikely(!backbone_gw))
751 return 1;
752
753
754 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
755 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
756 batadv_bla_send_claim(bat_priv, claim_addr, vid,
757 BATADV_CLAIM_TYPE_CLAIM);
758
759
760
761 batadv_backbone_gw_free_ref(backbone_gw);
762 return 1;
763}
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781static int batadv_check_claim_group(struct batadv_priv *bat_priv,
782 struct batadv_hard_iface *primary_if,
783 uint8_t *hw_src, uint8_t *hw_dst,
784 struct ethhdr *ethhdr)
785{
786 uint8_t *backbone_addr;
787 struct batadv_orig_node *orig_node;
788 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
789
790 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
791 bla_dst_own = &bat_priv->bla.claim_dest;
792
793
794 if (memcmp(bla_dst->magic, bla_dst_own->magic,
795 sizeof(bla_dst->magic)) != 0)
796 return 0;
797
798
799
800
801 switch (bla_dst->type) {
802 case BATADV_CLAIM_TYPE_CLAIM:
803 backbone_addr = hw_src;
804 break;
805 case BATADV_CLAIM_TYPE_REQUEST:
806 case BATADV_CLAIM_TYPE_ANNOUNCE:
807 case BATADV_CLAIM_TYPE_UNCLAIM:
808 backbone_addr = ethhdr->h_source;
809 break;
810 default:
811 return 0;
812 }
813
814
815 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
816 return 0;
817
818
819 if (bla_dst->group == bla_dst_own->group)
820 return 2;
821
822
823 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
824
825
826
827
828 if (!orig_node)
829 return 1;
830
831
832 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
833 batadv_dbg(BATADV_DBG_BLA, bat_priv,
834 "taking other backbones claim group: %#.4x\n",
835 ntohs(bla_dst->group));
836 bla_dst_own->group = bla_dst->group;
837 }
838
839 batadv_orig_node_free_ref(orig_node);
840
841 return 2;
842}
843
844
845
846
847
848
849
850
851
852
853static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
854 struct batadv_hard_iface *primary_if,
855 struct sk_buff *skb)
856{
857 struct ethhdr *ethhdr;
858 struct vlan_ethhdr *vhdr;
859 struct arphdr *arphdr;
860 uint8_t *hw_src, *hw_dst;
861 struct batadv_bla_claim_dst *bla_dst;
862 uint16_t proto;
863 int headlen;
864 short vid = -1;
865 int ret;
866
867 ethhdr = (struct ethhdr *)skb_mac_header(skb);
868
869 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
870 vhdr = (struct vlan_ethhdr *)ethhdr;
871 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
872 proto = ntohs(vhdr->h_vlan_encapsulated_proto);
873 headlen = sizeof(*vhdr);
874 } else {
875 proto = ntohs(ethhdr->h_proto);
876 headlen = ETH_HLEN;
877 }
878
879 if (proto != ETH_P_ARP)
880 return 0;
881
882
883
884 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
885 return 0;
886
887
888 ethhdr = (struct ethhdr *)skb_mac_header(skb);
889 arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
890
891
892
893
894 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
895 return 0;
896 if (arphdr->ar_pro != htons(ETH_P_IP))
897 return 0;
898 if (arphdr->ar_hln != ETH_ALEN)
899 return 0;
900 if (arphdr->ar_pln != 4)
901 return 0;
902
903 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
904 hw_dst = hw_src + ETH_ALEN + 4;
905 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
906
907
908 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
909 ethhdr);
910 if (ret == 1)
911 batadv_dbg(BATADV_DBG_BLA, bat_priv,
912 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
913 ethhdr->h_source, vid, hw_src, hw_dst);
914
915 if (ret < 2)
916 return ret;
917
918
919 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
920
921
922 switch (bla_dst->type) {
923 case BATADV_CLAIM_TYPE_CLAIM:
924 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
925 ethhdr->h_source, vid))
926 return 1;
927 break;
928 case BATADV_CLAIM_TYPE_UNCLAIM:
929 if (batadv_handle_unclaim(bat_priv, primary_if,
930 ethhdr->h_source, hw_src, vid))
931 return 1;
932 break;
933
934 case BATADV_CLAIM_TYPE_ANNOUNCE:
935 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
936 vid))
937 return 1;
938 break;
939 case BATADV_CLAIM_TYPE_REQUEST:
940 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
941 vid))
942 return 1;
943 break;
944 }
945
946 batadv_dbg(BATADV_DBG_BLA, bat_priv,
947 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
948 ethhdr->h_source, vid, hw_src, hw_dst);
949 return 1;
950}
951
952
953
954
955static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
956{
957 struct batadv_bla_backbone_gw *backbone_gw;
958 struct hlist_node *node_tmp;
959 struct hlist_head *head;
960 struct batadv_hashtable *hash;
961 spinlock_t *list_lock;
962 int i;
963
964 hash = bat_priv->bla.backbone_hash;
965 if (!hash)
966 return;
967
968 for (i = 0; i < hash->size; i++) {
969 head = &hash->table[i];
970 list_lock = &hash->list_locks[i];
971
972 spin_lock_bh(list_lock);
973 hlist_for_each_entry_safe(backbone_gw, node_tmp,
974 head, hash_entry) {
975 if (now)
976 goto purge_now;
977 if (!batadv_has_timed_out(backbone_gw->lasttime,
978 BATADV_BLA_BACKBONE_TIMEOUT))
979 continue;
980
981 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
982 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
983 backbone_gw->orig);
984
985purge_now:
986
987 if (atomic_read(&backbone_gw->request_sent))
988 atomic_dec(&bat_priv->bla.num_requests);
989
990 batadv_bla_del_backbone_claims(backbone_gw);
991
992 hlist_del_rcu(&backbone_gw->hash_entry);
993 batadv_backbone_gw_free_ref(backbone_gw);
994 }
995 spin_unlock_bh(list_lock);
996 }
997}
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1009 struct batadv_hard_iface *primary_if,
1010 int now)
1011{
1012 struct batadv_bla_claim *claim;
1013 struct hlist_head *head;
1014 struct batadv_hashtable *hash;
1015 int i;
1016
1017 hash = bat_priv->bla.claim_hash;
1018 if (!hash)
1019 return;
1020
1021 for (i = 0; i < hash->size; i++) {
1022 head = &hash->table[i];
1023
1024 rcu_read_lock();
1025 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1026 if (now)
1027 goto purge_now;
1028 if (!batadv_compare_eth(claim->backbone_gw->orig,
1029 primary_if->net_dev->dev_addr))
1030 continue;
1031 if (!batadv_has_timed_out(claim->lasttime,
1032 BATADV_BLA_CLAIM_TIMEOUT))
1033 continue;
1034
1035 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1036 "bla_purge_claims(): %pM, vid %d, time out\n",
1037 claim->addr, claim->vid);
1038
1039purge_now:
1040 batadv_handle_unclaim(bat_priv, primary_if,
1041 claim->backbone_gw->orig,
1042 claim->addr, claim->vid);
1043 }
1044 rcu_read_unlock();
1045 }
1046}
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1057 struct batadv_hard_iface *primary_if,
1058 struct batadv_hard_iface *oldif)
1059{
1060 struct batadv_bla_backbone_gw *backbone_gw;
1061 struct hlist_head *head;
1062 struct batadv_hashtable *hash;
1063 __be16 group;
1064 int i;
1065
1066
1067 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1068 bat_priv->bla.claim_dest.group = group;
1069
1070
1071 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1072 oldif = NULL;
1073
1074 if (!oldif) {
1075 batadv_bla_purge_claims(bat_priv, NULL, 1);
1076 batadv_bla_purge_backbone_gw(bat_priv, 1);
1077 return;
1078 }
1079
1080 hash = bat_priv->bla.backbone_hash;
1081 if (!hash)
1082 return;
1083
1084 for (i = 0; i < hash->size; i++) {
1085 head = &hash->table[i];
1086
1087 rcu_read_lock();
1088 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1089
1090 if (!batadv_compare_eth(backbone_gw->orig,
1091 oldif->net_dev->dev_addr))
1092 continue;
1093
1094 memcpy(backbone_gw->orig,
1095 primary_if->net_dev->dev_addr, ETH_ALEN);
1096
1097
1098
1099 batadv_bla_send_announce(bat_priv, backbone_gw);
1100 }
1101 rcu_read_unlock();
1102 }
1103}
1104
1105
1106
1107
1108
1109static void batadv_bla_periodic_work(struct work_struct *work)
1110{
1111 struct delayed_work *delayed_work;
1112 struct batadv_priv *bat_priv;
1113 struct batadv_priv_bla *priv_bla;
1114 struct hlist_head *head;
1115 struct batadv_bla_backbone_gw *backbone_gw;
1116 struct batadv_hashtable *hash;
1117 struct batadv_hard_iface *primary_if;
1118 int i;
1119
1120 delayed_work = container_of(work, struct delayed_work, work);
1121 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1122 bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1123 primary_if = batadv_primary_if_get_selected(bat_priv);
1124 if (!primary_if)
1125 goto out;
1126
1127 batadv_bla_purge_claims(bat_priv, primary_if, 0);
1128 batadv_bla_purge_backbone_gw(bat_priv, 0);
1129
1130 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1131 goto out;
1132
1133 hash = bat_priv->bla.backbone_hash;
1134 if (!hash)
1135 goto out;
1136
1137 for (i = 0; i < hash->size; i++) {
1138 head = &hash->table[i];
1139
1140 rcu_read_lock();
1141 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1142 if (!batadv_compare_eth(backbone_gw->orig,
1143 primary_if->net_dev->dev_addr))
1144 continue;
1145
1146 backbone_gw->lasttime = jiffies;
1147
1148 batadv_bla_send_announce(bat_priv, backbone_gw);
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159 if (atomic_read(&backbone_gw->request_sent) == 0)
1160 continue;
1161
1162 if (!atomic_dec_and_test(&backbone_gw->wait_periods))
1163 continue;
1164
1165 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
1166 atomic_set(&backbone_gw->request_sent, 0);
1167 }
1168 rcu_read_unlock();
1169 }
1170out:
1171 if (primary_if)
1172 batadv_hardif_free_ref(primary_if);
1173
1174 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1175 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1176}
1177
1178
1179
1180
1181
1182
1183static struct lock_class_key batadv_claim_hash_lock_class_key;
1184static struct lock_class_key batadv_backbone_hash_lock_class_key;
1185
1186
1187int batadv_bla_init(struct batadv_priv *bat_priv)
1188{
1189 int i;
1190 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1191 struct batadv_hard_iface *primary_if;
1192 uint16_t crc;
1193 unsigned long entrytime;
1194
1195 spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1196
1197 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1198
1199
1200 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1201 bat_priv->bla.claim_dest.type = 0;
1202 primary_if = batadv_primary_if_get_selected(bat_priv);
1203 if (primary_if) {
1204 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1205 bat_priv->bla.claim_dest.group = htons(crc);
1206 batadv_hardif_free_ref(primary_if);
1207 } else {
1208 bat_priv->bla.claim_dest.group = 0;
1209 }
1210
1211
1212 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1213 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1214 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1215 bat_priv->bla.bcast_duplist_curr = 0;
1216
1217 if (bat_priv->bla.claim_hash)
1218 return 0;
1219
1220 bat_priv->bla.claim_hash = batadv_hash_new(128);
1221 bat_priv->bla.backbone_hash = batadv_hash_new(32);
1222
1223 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1224 return -ENOMEM;
1225
1226 batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1227 &batadv_claim_hash_lock_class_key);
1228 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1229 &batadv_backbone_hash_lock_class_key);
1230
1231 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1232
1233 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1234
1235 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1236 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1237 return 0;
1238}
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1255 struct sk_buff *skb)
1256{
1257 int i, curr, ret = 0;
1258 __be32 crc;
1259 struct batadv_bcast_packet *bcast_packet;
1260 struct batadv_bcast_duplist_entry *entry;
1261
1262 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1263
1264
1265 crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
1266
1267 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1268
1269 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1270 curr = (bat_priv->bla.bcast_duplist_curr + i);
1271 curr %= BATADV_DUPLIST_SIZE;
1272 entry = &bat_priv->bla.bcast_duplist[curr];
1273
1274
1275
1276
1277 if (batadv_has_timed_out(entry->entrytime,
1278 BATADV_DUPLIST_TIMEOUT))
1279 break;
1280
1281 if (entry->crc != crc)
1282 continue;
1283
1284 if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1285 continue;
1286
1287
1288
1289
1290 ret = 1;
1291 goto out;
1292 }
1293
1294
1295
1296 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1297 curr %= BATADV_DUPLIST_SIZE;
1298 entry = &bat_priv->bla.bcast_duplist[curr];
1299 entry->crc = crc;
1300 entry->entrytime = jiffies;
1301 memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
1302 bat_priv->bla.bcast_duplist_curr = curr;
1303
1304out:
1305 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1306
1307 return ret;
1308}
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
1320{
1321 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1322 struct hlist_head *head;
1323 struct batadv_bla_backbone_gw *backbone_gw;
1324 int i;
1325
1326 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1327 return 0;
1328
1329 if (!hash)
1330 return 0;
1331
1332 for (i = 0; i < hash->size; i++) {
1333 head = &hash->table[i];
1334
1335 rcu_read_lock();
1336 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1337 if (batadv_compare_eth(backbone_gw->orig, orig)) {
1338 rcu_read_unlock();
1339 return 1;
1340 }
1341 }
1342 rcu_read_unlock();
1343 }
1344
1345 return 0;
1346}
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359int batadv_bla_is_backbone_gw(struct sk_buff *skb,
1360 struct batadv_orig_node *orig_node, int hdr_size)
1361{
1362 struct ethhdr *ethhdr;
1363 struct vlan_ethhdr *vhdr;
1364 struct batadv_bla_backbone_gw *backbone_gw;
1365 short vid = -1;
1366
1367 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1368 return 0;
1369
1370
1371 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1372 return 0;
1373
1374 ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
1375
1376 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
1377 if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
1378 return 0;
1379
1380 vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size);
1381 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1382 }
1383
1384
1385 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1386 orig_node->orig, vid);
1387 if (!backbone_gw)
1388 return 0;
1389
1390 batadv_backbone_gw_free_ref(backbone_gw);
1391 return 1;
1392}
1393
1394
1395void batadv_bla_free(struct batadv_priv *bat_priv)
1396{
1397 struct batadv_hard_iface *primary_if;
1398
1399 cancel_delayed_work_sync(&bat_priv->bla.work);
1400 primary_if = batadv_primary_if_get_selected(bat_priv);
1401
1402 if (bat_priv->bla.claim_hash) {
1403 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1404 batadv_hash_destroy(bat_priv->bla.claim_hash);
1405 bat_priv->bla.claim_hash = NULL;
1406 }
1407 if (bat_priv->bla.backbone_hash) {
1408 batadv_bla_purge_backbone_gw(bat_priv, 1);
1409 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1410 bat_priv->bla.backbone_hash = NULL;
1411 }
1412 if (primary_if)
1413 batadv_hardif_free_ref(primary_if);
1414}
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
1432 bool is_bcast)
1433{
1434 struct ethhdr *ethhdr;
1435 struct batadv_bla_claim search_claim, *claim = NULL;
1436 struct batadv_hard_iface *primary_if;
1437 int ret;
1438
1439 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1440
1441 primary_if = batadv_primary_if_get_selected(bat_priv);
1442 if (!primary_if)
1443 goto handled;
1444
1445 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1446 goto allow;
1447
1448
1449 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1450
1451 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1452 goto handled;
1453
1454 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1455 search_claim.vid = vid;
1456 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1457
1458 if (!claim) {
1459
1460
1461
1462 batadv_handle_claim(bat_priv, primary_if,
1463 primary_if->net_dev->dev_addr,
1464 ethhdr->h_source, vid);
1465 goto allow;
1466 }
1467
1468
1469 if (batadv_compare_eth(claim->backbone_gw->orig,
1470 primary_if->net_dev->dev_addr)) {
1471
1472 claim->lasttime = jiffies;
1473 goto allow;
1474 }
1475
1476
1477 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
1478
1479
1480
1481
1482
1483
1484 goto handled;
1485 } else {
1486
1487
1488
1489
1490 batadv_handle_claim(bat_priv, primary_if,
1491 primary_if->net_dev->dev_addr,
1492 ethhdr->h_source, vid);
1493 goto allow;
1494 }
1495allow:
1496 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1497 ret = 0;
1498 goto out;
1499
1500handled:
1501 kfree_skb(skb);
1502 ret = 1;
1503
1504out:
1505 if (primary_if)
1506 batadv_hardif_free_ref(primary_if);
1507 if (claim)
1508 batadv_claim_free_ref(claim);
1509 return ret;
1510}
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
1527{
1528 struct ethhdr *ethhdr;
1529 struct batadv_bla_claim search_claim, *claim = NULL;
1530 struct batadv_hard_iface *primary_if;
1531 int ret = 0;
1532
1533 primary_if = batadv_primary_if_get_selected(bat_priv);
1534 if (!primary_if)
1535 goto out;
1536
1537 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1538 goto allow;
1539
1540
1541 skb_reset_mac_header(skb);
1542
1543 if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1544 goto handled;
1545
1546 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1547
1548 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1549
1550 if (is_multicast_ether_addr(ethhdr->h_dest))
1551 goto handled;
1552
1553 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1554 search_claim.vid = vid;
1555
1556 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1557
1558
1559 if (!claim)
1560 goto allow;
1561
1562
1563 if (batadv_compare_eth(claim->backbone_gw->orig,
1564 primary_if->net_dev->dev_addr)) {
1565
1566
1567
1568 batadv_handle_unclaim(bat_priv, primary_if,
1569 primary_if->net_dev->dev_addr,
1570 ethhdr->h_source, vid);
1571 goto allow;
1572 }
1573
1574
1575 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1576
1577
1578
1579 goto handled;
1580 } else {
1581
1582
1583
1584 goto allow;
1585 }
1586allow:
1587 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1588 ret = 0;
1589 goto out;
1590handled:
1591 ret = 1;
1592out:
1593 if (primary_if)
1594 batadv_hardif_free_ref(primary_if);
1595 if (claim)
1596 batadv_claim_free_ref(claim);
1597 return ret;
1598}
1599
1600int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1601{
1602 struct net_device *net_dev = (struct net_device *)seq->private;
1603 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1604 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
1605 struct batadv_bla_claim *claim;
1606 struct batadv_hard_iface *primary_if;
1607 struct hlist_head *head;
1608 uint32_t i;
1609 bool is_own;
1610 uint8_t *primary_addr;
1611
1612 primary_if = batadv_seq_print_text_primary_if_get(seq);
1613 if (!primary_if)
1614 goto out;
1615
1616 primary_addr = primary_if->net_dev->dev_addr;
1617 seq_printf(seq,
1618 "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
1619 net_dev->name, primary_addr,
1620 ntohs(bat_priv->bla.claim_dest.group));
1621 seq_printf(seq, " %-17s %-5s %-17s [o] (%-6s)\n",
1622 "Client", "VID", "Originator", "CRC");
1623 for (i = 0; i < hash->size; i++) {
1624 head = &hash->table[i];
1625
1626 rcu_read_lock();
1627 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1628 is_own = batadv_compare_eth(claim->backbone_gw->orig,
1629 primary_addr);
1630 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%#.4x)\n",
1631 claim->addr, claim->vid,
1632 claim->backbone_gw->orig,
1633 (is_own ? 'x' : ' '),
1634 claim->backbone_gw->crc);
1635 }
1636 rcu_read_unlock();
1637 }
1638out:
1639 if (primary_if)
1640 batadv_hardif_free_ref(primary_if);
1641 return 0;
1642}
1643
1644int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
1645{
1646 struct net_device *net_dev = (struct net_device *)seq->private;
1647 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1648 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1649 struct batadv_bla_backbone_gw *backbone_gw;
1650 struct batadv_hard_iface *primary_if;
1651 struct hlist_head *head;
1652 int secs, msecs;
1653 uint32_t i;
1654 bool is_own;
1655 uint8_t *primary_addr;
1656
1657 primary_if = batadv_seq_print_text_primary_if_get(seq);
1658 if (!primary_if)
1659 goto out;
1660
1661 primary_addr = primary_if->net_dev->dev_addr;
1662 seq_printf(seq,
1663 "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
1664 net_dev->name, primary_addr,
1665 ntohs(bat_priv->bla.claim_dest.group));
1666 seq_printf(seq, " %-17s %-5s %-9s (%-6s)\n",
1667 "Originator", "VID", "last seen", "CRC");
1668 for (i = 0; i < hash->size; i++) {
1669 head = &hash->table[i];
1670
1671 rcu_read_lock();
1672 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1673 msecs = jiffies_to_msecs(jiffies -
1674 backbone_gw->lasttime);
1675 secs = msecs / 1000;
1676 msecs = msecs % 1000;
1677
1678 is_own = batadv_compare_eth(backbone_gw->orig,
1679 primary_addr);
1680 if (is_own)
1681 continue;
1682
1683 seq_printf(seq,
1684 " * %pM on % 5d % 4i.%03is (%#.4x)\n",
1685 backbone_gw->orig, backbone_gw->vid,
1686 secs, msecs, backbone_gw->crc);
1687 }
1688 rcu_read_unlock();
1689 }
1690out:
1691 if (primary_if)
1692 batadv_hardif_free_ref(primary_if);
1693 return 0;
1694}
1695