1
2
3
4
5
6
7#include <linux/etherdevice.h>
8#include <linux/list.h>
9#include <linux/random.h>
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/string.h>
13#include <net/mac80211.h>
14#include "wme.h"
15#include "ieee80211_i.h"
16#include "mesh.h"
17
18static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
19
20static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
21{
22
23 return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
24}
25
26static const struct rhashtable_params mesh_rht_params = {
27 .nelem_hint = 2,
28 .automatic_shrinking = true,
29 .key_len = ETH_ALEN,
30 .key_offset = offsetof(struct mesh_path, dst),
31 .head_offset = offsetof(struct mesh_path, rhash),
32 .hashfn = mesh_table_hash,
33};
34
35static inline bool mpath_expired(struct mesh_path *mpath)
36{
37 return (mpath->flags & MESH_PATH_ACTIVE) &&
38 time_after(jiffies, mpath->exp_time) &&
39 !(mpath->flags & MESH_PATH_FIXED);
40}
41
42static void mesh_path_rht_free(void *ptr, void *tblptr)
43{
44 struct mesh_path *mpath = ptr;
45 struct mesh_table *tbl = tblptr;
46
47 mesh_path_free_rcu(tbl, mpath);
48}
49
50static struct mesh_table *mesh_table_alloc(void)
51{
52 struct mesh_table *newtbl;
53
54 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
55 if (!newtbl)
56 return NULL;
57
58 INIT_HLIST_HEAD(&newtbl->known_gates);
59 INIT_HLIST_HEAD(&newtbl->walk_head);
60 atomic_set(&newtbl->entries, 0);
61 spin_lock_init(&newtbl->gates_lock);
62 spin_lock_init(&newtbl->walk_lock);
63
64 return newtbl;
65}
66
67static void mesh_table_free(struct mesh_table *tbl)
68{
69 rhashtable_free_and_destroy(&tbl->rhead,
70 mesh_path_rht_free, tbl);
71 kfree(tbl);
72}
73
74
75
76
77
78
79
80
81
82
83void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
84{
85 struct sk_buff *skb;
86 struct ieee80211_hdr *hdr;
87 unsigned long flags;
88
89 rcu_assign_pointer(mpath->next_hop, sta);
90
91 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
92 skb_queue_walk(&mpath->frame_queue, skb) {
93 hdr = (struct ieee80211_hdr *) skb->data;
94 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
95 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
96 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
97 }
98
99 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
100}
101
102static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
103 struct mesh_path *gate_mpath)
104{
105 struct ieee80211_hdr *hdr;
106 struct ieee80211s_hdr *mshdr;
107 int mesh_hdrlen, hdrlen;
108 char *next_hop;
109
110 hdr = (struct ieee80211_hdr *) skb->data;
111 hdrlen = ieee80211_hdrlen(hdr->frame_control);
112 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
113
114 if (!(mshdr->flags & MESH_FLAGS_AE)) {
115
116 mesh_hdrlen = 6;
117
118
119 skb_push(skb, 2 * ETH_ALEN);
120 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
121
122 hdr = (struct ieee80211_hdr *) skb->data;
123
124
125
126 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
127 mshdr->flags = MESH_FLAGS_AE_A5_A6;
128 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
129 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
130 }
131
132
133 hdr = (struct ieee80211_hdr *) skb->data;
134 rcu_read_lock();
135 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
136 memcpy(hdr->addr1, next_hop, ETH_ALEN);
137 rcu_read_unlock();
138 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
139 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
140}
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
161 struct mesh_path *from_mpath,
162 bool copy)
163{
164 struct sk_buff *skb, *fskb, *tmp;
165 struct sk_buff_head failq;
166 unsigned long flags;
167
168 if (WARN_ON(gate_mpath == from_mpath))
169 return;
170 if (WARN_ON(!gate_mpath->next_hop))
171 return;
172
173 __skb_queue_head_init(&failq);
174
175 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
176 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
177 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
178
179 skb_queue_walk_safe(&failq, fskb, tmp) {
180 if (skb_queue_len(&gate_mpath->frame_queue) >=
181 MESH_FRAME_QUEUE_LEN) {
182 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
183 break;
184 }
185
186 skb = skb_copy(fskb, GFP_ATOMIC);
187 if (WARN_ON(!skb))
188 break;
189
190 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
191 skb_queue_tail(&gate_mpath->frame_queue, skb);
192
193 if (copy)
194 continue;
195
196 __skb_unlink(fskb, &failq);
197 kfree_skb(fskb);
198 }
199
200 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
201 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
202
203 if (!copy)
204 return;
205
206 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
207 skb_queue_splice(&failq, &from_mpath->frame_queue);
208 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
209}
210
211
212static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
213 struct ieee80211_sub_if_data *sdata)
214{
215 struct mesh_path *mpath;
216
217 mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params);
218
219 if (mpath && mpath_expired(mpath)) {
220 spin_lock_bh(&mpath->state_lock);
221 mpath->flags &= ~MESH_PATH_ACTIVE;
222 spin_unlock_bh(&mpath->state_lock);
223 }
224 return mpath;
225}
226
227
228
229
230
231
232
233
234
235
236struct mesh_path *
237mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
238{
239 return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
240}
241
242struct mesh_path *
243mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
244{
245 return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
246}
247
248static struct mesh_path *
249__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
250{
251 int i = 0;
252 struct mesh_path *mpath;
253
254 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
255 if (i++ == idx)
256 break;
257 }
258
259 if (!mpath)
260 return NULL;
261
262 if (mpath_expired(mpath)) {
263 spin_lock_bh(&mpath->state_lock);
264 mpath->flags &= ~MESH_PATH_ACTIVE;
265 spin_unlock_bh(&mpath->state_lock);
266 }
267 return mpath;
268}
269
270
271
272
273
274
275
276
277
278
279struct mesh_path *
280mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
281{
282 return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
283}
284
285
286
287
288
289
290
291
292
293
294struct mesh_path *
295mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
296{
297 return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
298}
299
300
301
302
303
304int mesh_path_add_gate(struct mesh_path *mpath)
305{
306 struct mesh_table *tbl;
307 int err;
308
309 rcu_read_lock();
310 tbl = mpath->sdata->u.mesh.mesh_paths;
311
312 spin_lock_bh(&mpath->state_lock);
313 if (mpath->is_gate) {
314 err = -EEXIST;
315 spin_unlock_bh(&mpath->state_lock);
316 goto err_rcu;
317 }
318 mpath->is_gate = true;
319 mpath->sdata->u.mesh.num_gates++;
320
321 spin_lock(&tbl->gates_lock);
322 hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates);
323 spin_unlock(&tbl->gates_lock);
324
325 spin_unlock_bh(&mpath->state_lock);
326
327 mpath_dbg(mpath->sdata,
328 "Mesh path: Recorded new gate: %pM. %d known gates\n",
329 mpath->dst, mpath->sdata->u.mesh.num_gates);
330 err = 0;
331err_rcu:
332 rcu_read_unlock();
333 return err;
334}
335
336
337
338
339
340
341static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
342{
343 lockdep_assert_held(&mpath->state_lock);
344 if (!mpath->is_gate)
345 return;
346
347 mpath->is_gate = false;
348 spin_lock_bh(&tbl->gates_lock);
349 hlist_del_rcu(&mpath->gate_list);
350 mpath->sdata->u.mesh.num_gates--;
351 spin_unlock_bh(&tbl->gates_lock);
352
353 mpath_dbg(mpath->sdata,
354 "Mesh path: Deleted gate: %pM. %d known gates\n",
355 mpath->dst, mpath->sdata->u.mesh.num_gates);
356}
357
358
359
360
361
362int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
363{
364 return sdata->u.mesh.num_gates;
365}
366
367static
368struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
369 const u8 *dst, gfp_t gfp_flags)
370{
371 struct mesh_path *new_mpath;
372
373 new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags);
374 if (!new_mpath)
375 return NULL;
376
377 memcpy(new_mpath->dst, dst, ETH_ALEN);
378 eth_broadcast_addr(new_mpath->rann_snd_addr);
379 new_mpath->is_root = false;
380 new_mpath->sdata = sdata;
381 new_mpath->flags = 0;
382 skb_queue_head_init(&new_mpath->frame_queue);
383 new_mpath->exp_time = jiffies;
384 spin_lock_init(&new_mpath->state_lock);
385 timer_setup(&new_mpath->timer, mesh_path_timer, 0);
386
387 return new_mpath;
388}
389
390
391
392
393
394
395
396
397
398
399struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
400 const u8 *dst)
401{
402 struct mesh_table *tbl;
403 struct mesh_path *mpath, *new_mpath;
404
405 if (ether_addr_equal(dst, sdata->vif.addr))
406
407 return ERR_PTR(-ENOTSUPP);
408
409 if (is_multicast_ether_addr(dst))
410 return ERR_PTR(-ENOTSUPP);
411
412 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
413 return ERR_PTR(-ENOSPC);
414
415 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
416 if (!new_mpath)
417 return ERR_PTR(-ENOMEM);
418
419 tbl = sdata->u.mesh.mesh_paths;
420 spin_lock_bh(&tbl->walk_lock);
421 mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
422 &new_mpath->rhash,
423 mesh_rht_params);
424 if (!mpath)
425 hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
426 spin_unlock_bh(&tbl->walk_lock);
427
428 if (mpath) {
429 kfree(new_mpath);
430
431 if (IS_ERR(mpath))
432 return mpath;
433
434 new_mpath = mpath;
435 }
436
437 sdata->u.mesh.mesh_paths_generation++;
438 return new_mpath;
439}
440
441int mpp_path_add(struct ieee80211_sub_if_data *sdata,
442 const u8 *dst, const u8 *mpp)
443{
444 struct mesh_table *tbl;
445 struct mesh_path *new_mpath;
446 int ret;
447
448 if (ether_addr_equal(dst, sdata->vif.addr))
449
450 return -ENOTSUPP;
451
452 if (is_multicast_ether_addr(dst))
453 return -ENOTSUPP;
454
455 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
456
457 if (!new_mpath)
458 return -ENOMEM;
459
460 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
461 tbl = sdata->u.mesh.mpp_paths;
462
463 spin_lock_bh(&tbl->walk_lock);
464 ret = rhashtable_lookup_insert_fast(&tbl->rhead,
465 &new_mpath->rhash,
466 mesh_rht_params);
467 if (!ret)
468 hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
469 spin_unlock_bh(&tbl->walk_lock);
470
471 if (ret)
472 kfree(new_mpath);
473
474 sdata->u.mesh.mpp_paths_generation++;
475 return ret;
476}
477
478
479
480
481
482
483
484
485
486
487void mesh_plink_broken(struct sta_info *sta)
488{
489 struct ieee80211_sub_if_data *sdata = sta->sdata;
490 struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
491 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
492 struct mesh_path *mpath;
493
494 rcu_read_lock();
495 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
496 if (rcu_access_pointer(mpath->next_hop) == sta &&
497 mpath->flags & MESH_PATH_ACTIVE &&
498 !(mpath->flags & MESH_PATH_FIXED)) {
499 spin_lock_bh(&mpath->state_lock);
500 mpath->flags &= ~MESH_PATH_ACTIVE;
501 ++mpath->sn;
502 spin_unlock_bh(&mpath->state_lock);
503 mesh_path_error_tx(sdata,
504 sdata->u.mesh.mshcfg.element_ttl,
505 mpath->dst, mpath->sn,
506 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
507 }
508 }
509 rcu_read_unlock();
510}
511
512static void mesh_path_free_rcu(struct mesh_table *tbl,
513 struct mesh_path *mpath)
514{
515 struct ieee80211_sub_if_data *sdata = mpath->sdata;
516
517 spin_lock_bh(&mpath->state_lock);
518 mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED;
519 mesh_gate_del(tbl, mpath);
520 spin_unlock_bh(&mpath->state_lock);
521 del_timer_sync(&mpath->timer);
522 atomic_dec(&sdata->u.mesh.mpaths);
523 atomic_dec(&tbl->entries);
524 kfree_rcu(mpath, rcu);
525}
526
527static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
528{
529 hlist_del_rcu(&mpath->walk_list);
530 rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
531 mesh_path_free_rcu(tbl, mpath);
532}
533
534
535
536
537
538
539
540
541
542
543
544
545void mesh_path_flush_by_nexthop(struct sta_info *sta)
546{
547 struct ieee80211_sub_if_data *sdata = sta->sdata;
548 struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
549 struct mesh_path *mpath;
550 struct hlist_node *n;
551
552 spin_lock_bh(&tbl->walk_lock);
553 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
554 if (rcu_access_pointer(mpath->next_hop) == sta)
555 __mesh_path_del(tbl, mpath);
556 }
557 spin_unlock_bh(&tbl->walk_lock);
558}
559
560static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
561 const u8 *proxy)
562{
563 struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
564 struct mesh_path *mpath;
565 struct hlist_node *n;
566
567 spin_lock_bh(&tbl->walk_lock);
568 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
569 if (ether_addr_equal(mpath->mpp, proxy))
570 __mesh_path_del(tbl, mpath);
571 }
572 spin_unlock_bh(&tbl->walk_lock);
573}
574
575static void table_flush_by_iface(struct mesh_table *tbl)
576{
577 struct mesh_path *mpath;
578 struct hlist_node *n;
579
580 spin_lock_bh(&tbl->walk_lock);
581 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
582 __mesh_path_del(tbl, mpath);
583 }
584 spin_unlock_bh(&tbl->walk_lock);
585}
586
587
588
589
590
591
592
593
594
595void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
596{
597 table_flush_by_iface(sdata->u.mesh.mesh_paths);
598 table_flush_by_iface(sdata->u.mesh.mpp_paths);
599}
600
601
602
603
604
605
606
607
608
609
610static int table_path_del(struct mesh_table *tbl,
611 struct ieee80211_sub_if_data *sdata,
612 const u8 *addr)
613{
614 struct mesh_path *mpath;
615
616 spin_lock_bh(&tbl->walk_lock);
617 mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
618 if (!mpath) {
619 spin_unlock_bh(&tbl->walk_lock);
620 return -ENXIO;
621 }
622
623 __mesh_path_del(tbl, mpath);
624 spin_unlock_bh(&tbl->walk_lock);
625 return 0;
626}
627
628
629
630
631
632
633
634
635
636
637int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
638{
639 int err;
640
641
642 mpp_flush_by_proxy(sdata, addr);
643
644 err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
645 sdata->u.mesh.mesh_paths_generation++;
646 return err;
647}
648
649
650
651
652
653
654
655
656
657void mesh_path_tx_pending(struct mesh_path *mpath)
658{
659 if (mpath->flags & MESH_PATH_ACTIVE)
660 ieee80211_add_pending_skbs(mpath->sdata->local,
661 &mpath->frame_queue);
662}
663
664
665
666
667
668
669
670
671
672
673
674int mesh_path_send_to_gates(struct mesh_path *mpath)
675{
676 struct ieee80211_sub_if_data *sdata = mpath->sdata;
677 struct mesh_table *tbl;
678 struct mesh_path *from_mpath = mpath;
679 struct mesh_path *gate;
680 bool copy = false;
681
682 tbl = sdata->u.mesh.mesh_paths;
683
684 rcu_read_lock();
685 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
686 if (gate->flags & MESH_PATH_ACTIVE) {
687 mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
688 mesh_path_move_to_queue(gate, from_mpath, copy);
689 from_mpath = gate;
690 copy = true;
691 } else {
692 mpath_dbg(sdata,
693 "Not forwarding to %pM (flags %#x)\n",
694 gate->dst, gate->flags);
695 }
696 }
697
698 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
699 mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
700 mesh_path_tx_pending(gate);
701 }
702 rcu_read_unlock();
703
704 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
705}
706
707
708
709
710
711
712
713
714
715void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
716 struct sk_buff *skb)
717{
718 kfree_skb(skb);
719 sdata->u.mesh.mshstats.dropped_frames_no_route++;
720}
721
722
723
724
725
726
727
728
729void mesh_path_flush_pending(struct mesh_path *mpath)
730{
731 struct sk_buff *skb;
732
733 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
734 mesh_path_discard_frame(mpath->sdata, skb);
735}
736
737
738
739
740
741
742
743
744
745void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
746{
747 spin_lock_bh(&mpath->state_lock);
748 mesh_path_assign_nexthop(mpath, next_hop);
749 mpath->sn = 0xffff;
750 mpath->metric = 0;
751 mpath->hop_count = 0;
752 mpath->exp_time = 0;
753 mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID;
754 mesh_path_activate(mpath);
755 spin_unlock_bh(&mpath->state_lock);
756 ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg);
757
758 ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1);
759 mesh_path_tx_pending(mpath);
760}
761
762int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
763{
764 struct mesh_table *tbl_path, *tbl_mpp;
765 int ret;
766
767 tbl_path = mesh_table_alloc();
768 if (!tbl_path)
769 return -ENOMEM;
770
771 tbl_mpp = mesh_table_alloc();
772 if (!tbl_mpp) {
773 ret = -ENOMEM;
774 goto free_path;
775 }
776
777 rhashtable_init(&tbl_path->rhead, &mesh_rht_params);
778 rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params);
779
780 sdata->u.mesh.mesh_paths = tbl_path;
781 sdata->u.mesh.mpp_paths = tbl_mpp;
782
783 return 0;
784
785free_path:
786 mesh_table_free(tbl_path);
787 return ret;
788}
789
790static
791void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
792 struct mesh_table *tbl)
793{
794 struct mesh_path *mpath;
795 struct hlist_node *n;
796
797 spin_lock_bh(&tbl->walk_lock);
798 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
799 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
800 (!(mpath->flags & MESH_PATH_FIXED)) &&
801 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
802 __mesh_path_del(tbl, mpath);
803 }
804 spin_unlock_bh(&tbl->walk_lock);
805}
806
807void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
808{
809 mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
810 mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
811}
812
813void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
814{
815 mesh_table_free(sdata->u.mesh.mesh_paths);
816 mesh_table_free(sdata->u.mesh.mpp_paths);
817}
818