1
2
3
4
5
6
7
8
9
10#include <linux/etherdevice.h>
11#include <linux/list.h>
12#include <linux/random.h>
13#include <linux/spinlock.h>
14#include <linux/string.h>
15#include <net/mac80211.h>
16#include "ieee80211_i.h"
17#include "mesh.h"
18
19
20#define INIT_PATHS_SIZE_ORDER 2
21
22
23#define MEAN_CHAIN_LEN 2
24
25#define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
26 time_after(jiffies, mpath->exp_time) && \
27 !(mpath->flags & MESH_PATH_FIXED))
28
29struct mpath_node {
30 struct hlist_node list;
31 struct rcu_head rcu;
32
33
34
35 struct mesh_path *mpath;
36};
37
38static struct mesh_table *mesh_paths;
39static struct mesh_table *mpp_paths;
40
41int mesh_paths_generation;
42static void __mesh_table_free(struct mesh_table *tbl)
43{
44 kfree(tbl->hash_buckets);
45 kfree(tbl->hashwlock);
46 kfree(tbl);
47}
48
49void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
50{
51 struct hlist_head *mesh_hash;
52 struct hlist_node *p, *q;
53 int i;
54
55 mesh_hash = tbl->hash_buckets;
56 for (i = 0; i <= tbl->hash_mask; i++) {
57 spin_lock(&tbl->hashwlock[i]);
58 hlist_for_each_safe(p, q, &mesh_hash[i]) {
59 tbl->free_node(p, free_leafs);
60 atomic_dec(&tbl->entries);
61 }
62 spin_unlock(&tbl->hashwlock[i]);
63 }
64 __mesh_table_free(tbl);
65}
66
67static struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
68{
69 struct mesh_table *newtbl;
70 struct hlist_head *oldhash;
71 struct hlist_node *p, *q;
72 int i;
73
74 if (atomic_read(&tbl->entries)
75 < tbl->mean_chain_len * (tbl->hash_mask + 1))
76 goto endgrow;
77
78 newtbl = mesh_table_alloc(tbl->size_order + 1);
79 if (!newtbl)
80 goto endgrow;
81
82 newtbl->free_node = tbl->free_node;
83 newtbl->mean_chain_len = tbl->mean_chain_len;
84 newtbl->copy_node = tbl->copy_node;
85 atomic_set(&newtbl->entries, atomic_read(&tbl->entries));
86
87 oldhash = tbl->hash_buckets;
88 for (i = 0; i <= tbl->hash_mask; i++)
89 hlist_for_each(p, &oldhash[i])
90 if (tbl->copy_node(p, newtbl) < 0)
91 goto errcopy;
92
93 return newtbl;
94
95errcopy:
96 for (i = 0; i <= newtbl->hash_mask; i++) {
97 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
98 tbl->free_node(p, 0);
99 }
100 __mesh_table_free(newtbl);
101endgrow:
102 return NULL;
103}
104
105
106
107
108
109
110static DEFINE_RWLOCK(pathtbl_resize_lock);
111
112
113
114
115
116
117
118
119
120
121void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
122{
123 struct sk_buff *skb;
124 struct ieee80211_hdr *hdr;
125 struct sk_buff_head tmpq;
126 unsigned long flags;
127
128 rcu_assign_pointer(mpath->next_hop, sta);
129
130 __skb_queue_head_init(&tmpq);
131
132 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
133
134 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
135 hdr = (struct ieee80211_hdr *) skb->data;
136 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
137 __skb_queue_tail(&tmpq, skb);
138 }
139
140 skb_queue_splice(&tmpq, &mpath->frame_queue);
141 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
142}
143
144
145
146
147
148
149
150
151
152
153
154struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
155{
156 struct mesh_path *mpath;
157 struct hlist_node *n;
158 struct hlist_head *bucket;
159 struct mesh_table *tbl;
160 struct mpath_node *node;
161
162 tbl = rcu_dereference(mesh_paths);
163
164 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
165 hlist_for_each_entry_rcu(node, n, bucket, list) {
166 mpath = node->mpath;
167 if (mpath->sdata == sdata &&
168 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
169 if (MPATH_EXPIRED(mpath)) {
170 spin_lock_bh(&mpath->state_lock);
171 if (MPATH_EXPIRED(mpath))
172 mpath->flags &= ~MESH_PATH_ACTIVE;
173 spin_unlock_bh(&mpath->state_lock);
174 }
175 return mpath;
176 }
177 }
178 return NULL;
179}
180
181struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
182{
183 struct mesh_path *mpath;
184 struct hlist_node *n;
185 struct hlist_head *bucket;
186 struct mesh_table *tbl;
187 struct mpath_node *node;
188
189 tbl = rcu_dereference(mpp_paths);
190
191 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
192 hlist_for_each_entry_rcu(node, n, bucket, list) {
193 mpath = node->mpath;
194 if (mpath->sdata == sdata &&
195 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
196 if (MPATH_EXPIRED(mpath)) {
197 spin_lock_bh(&mpath->state_lock);
198 if (MPATH_EXPIRED(mpath))
199 mpath->flags &= ~MESH_PATH_ACTIVE;
200 spin_unlock_bh(&mpath->state_lock);
201 }
202 return mpath;
203 }
204 }
205 return NULL;
206}
207
208
209
210
211
212
213
214
215
216
217
218struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
219{
220 struct mpath_node *node;
221 struct hlist_node *p;
222 int i;
223 int j = 0;
224
225 for_each_mesh_entry(mesh_paths, p, node, i) {
226 if (sdata && node->mpath->sdata != sdata)
227 continue;
228 if (j++ == idx) {
229 if (MPATH_EXPIRED(node->mpath)) {
230 spin_lock_bh(&node->mpath->state_lock);
231 if (MPATH_EXPIRED(node->mpath))
232 node->mpath->flags &= ~MESH_PATH_ACTIVE;
233 spin_unlock_bh(&node->mpath->state_lock);
234 }
235 return node->mpath;
236 }
237 }
238
239 return NULL;
240}
241
242
243
244
245
246
247
248
249
250
251int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
252{
253 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
254 struct ieee80211_local *local = sdata->local;
255 struct mesh_path *mpath, *new_mpath;
256 struct mpath_node *node, *new_node;
257 struct hlist_head *bucket;
258 struct hlist_node *n;
259 int grow = 0;
260 int err = 0;
261 u32 hash_idx;
262
263 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
264
265 return -ENOTSUPP;
266
267 if (is_multicast_ether_addr(dst))
268 return -ENOTSUPP;
269
270 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
271 return -ENOSPC;
272
273 err = -ENOMEM;
274 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
275 if (!new_mpath)
276 goto err_path_alloc;
277
278 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
279 if (!new_node)
280 goto err_node_alloc;
281
282 read_lock(&pathtbl_resize_lock);
283 memcpy(new_mpath->dst, dst, ETH_ALEN);
284 new_mpath->sdata = sdata;
285 new_mpath->flags = 0;
286 skb_queue_head_init(&new_mpath->frame_queue);
287 new_node->mpath = new_mpath;
288 new_mpath->timer.data = (unsigned long) new_mpath;
289 new_mpath->timer.function = mesh_path_timer;
290 new_mpath->exp_time = jiffies;
291 spin_lock_init(&new_mpath->state_lock);
292 init_timer(&new_mpath->timer);
293
294 hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
295 bucket = &mesh_paths->hash_buckets[hash_idx];
296
297 spin_lock(&mesh_paths->hashwlock[hash_idx]);
298
299 err = -EEXIST;
300 hlist_for_each_entry(node, n, bucket, list) {
301 mpath = node->mpath;
302 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
303 goto err_exists;
304 }
305
306 hlist_add_head_rcu(&new_node->list, bucket);
307 if (atomic_inc_return(&mesh_paths->entries) >=
308 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
309 grow = 1;
310
311 mesh_paths_generation++;
312
313 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
314 read_unlock(&pathtbl_resize_lock);
315 if (grow) {
316 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
317 ieee80211_queue_work(&local->hw, &ifmsh->work);
318 }
319 return 0;
320
321err_exists:
322 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
323 read_unlock(&pathtbl_resize_lock);
324 kfree(new_node);
325err_node_alloc:
326 kfree(new_mpath);
327err_path_alloc:
328 atomic_dec(&sdata->u.mesh.mpaths);
329 return err;
330}
331
332void mesh_mpath_table_grow(void)
333{
334 struct mesh_table *oldtbl, *newtbl;
335
336 write_lock(&pathtbl_resize_lock);
337 oldtbl = mesh_paths;
338 newtbl = mesh_table_grow(mesh_paths);
339 if (!newtbl) {
340 write_unlock(&pathtbl_resize_lock);
341 return;
342 }
343 rcu_assign_pointer(mesh_paths, newtbl);
344 write_unlock(&pathtbl_resize_lock);
345
346 synchronize_rcu();
347 mesh_table_free(oldtbl, false);
348}
349
350void mesh_mpp_table_grow(void)
351{
352 struct mesh_table *oldtbl, *newtbl;
353
354 write_lock(&pathtbl_resize_lock);
355 oldtbl = mpp_paths;
356 newtbl = mesh_table_grow(mpp_paths);
357 if (!newtbl) {
358 write_unlock(&pathtbl_resize_lock);
359 return;
360 }
361 rcu_assign_pointer(mpp_paths, newtbl);
362 write_unlock(&pathtbl_resize_lock);
363
364 synchronize_rcu();
365 mesh_table_free(oldtbl, false);
366}
367
368int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
369{
370 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
371 struct ieee80211_local *local = sdata->local;
372 struct mesh_path *mpath, *new_mpath;
373 struct mpath_node *node, *new_node;
374 struct hlist_head *bucket;
375 struct hlist_node *n;
376 int grow = 0;
377 int err = 0;
378 u32 hash_idx;
379
380 if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
381
382 return -ENOTSUPP;
383
384 if (is_multicast_ether_addr(dst))
385 return -ENOTSUPP;
386
387 err = -ENOMEM;
388 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
389 if (!new_mpath)
390 goto err_path_alloc;
391
392 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
393 if (!new_node)
394 goto err_node_alloc;
395
396 read_lock(&pathtbl_resize_lock);
397 memcpy(new_mpath->dst, dst, ETH_ALEN);
398 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
399 new_mpath->sdata = sdata;
400 new_mpath->flags = 0;
401 skb_queue_head_init(&new_mpath->frame_queue);
402 new_node->mpath = new_mpath;
403 new_mpath->exp_time = jiffies;
404 spin_lock_init(&new_mpath->state_lock);
405
406 hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
407 bucket = &mpp_paths->hash_buckets[hash_idx];
408
409 spin_lock(&mpp_paths->hashwlock[hash_idx]);
410
411 err = -EEXIST;
412 hlist_for_each_entry(node, n, bucket, list) {
413 mpath = node->mpath;
414 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
415 goto err_exists;
416 }
417
418 hlist_add_head_rcu(&new_node->list, bucket);
419 if (atomic_inc_return(&mpp_paths->entries) >=
420 mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
421 grow = 1;
422
423 spin_unlock(&mpp_paths->hashwlock[hash_idx]);
424 read_unlock(&pathtbl_resize_lock);
425 if (grow) {
426 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
427 ieee80211_queue_work(&local->hw, &ifmsh->work);
428 }
429 return 0;
430
431err_exists:
432 spin_unlock(&mpp_paths->hashwlock[hash_idx]);
433 read_unlock(&pathtbl_resize_lock);
434 kfree(new_node);
435err_node_alloc:
436 kfree(new_mpath);
437err_path_alloc:
438 return err;
439}
440
441
442
443
444
445
446
447
448
449
450void mesh_plink_broken(struct sta_info *sta)
451{
452 struct mesh_path *mpath;
453 struct mpath_node *node;
454 struct hlist_node *p;
455 struct ieee80211_sub_if_data *sdata = sta->sdata;
456 int i;
457
458 rcu_read_lock();
459 for_each_mesh_entry(mesh_paths, p, node, i) {
460 mpath = node->mpath;
461 spin_lock_bh(&mpath->state_lock);
462 if (mpath->next_hop == sta &&
463 mpath->flags & MESH_PATH_ACTIVE &&
464 !(mpath->flags & MESH_PATH_FIXED)) {
465 mpath->flags &= ~MESH_PATH_ACTIVE;
466 ++mpath->dsn;
467 spin_unlock_bh(&mpath->state_lock);
468 mesh_path_error_tx(mpath->dst,
469 cpu_to_le32(mpath->dsn),
470 sdata->dev->broadcast, sdata);
471 } else
472 spin_unlock_bh(&mpath->state_lock);
473 }
474 rcu_read_unlock();
475}
476
477
478
479
480
481
482
483
484
485
486
487
488void mesh_path_flush_by_nexthop(struct sta_info *sta)
489{
490 struct mesh_path *mpath;
491 struct mpath_node *node;
492 struct hlist_node *p;
493 int i;
494
495 for_each_mesh_entry(mesh_paths, p, node, i) {
496 mpath = node->mpath;
497 if (mpath->next_hop == sta)
498 mesh_path_del(mpath->dst, mpath->sdata);
499 }
500}
501
502void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
503{
504 struct mesh_path *mpath;
505 struct mpath_node *node;
506 struct hlist_node *p;
507 int i;
508
509 for_each_mesh_entry(mesh_paths, p, node, i) {
510 mpath = node->mpath;
511 if (mpath->sdata == sdata)
512 mesh_path_del(mpath->dst, mpath->sdata);
513 }
514}
515
516static void mesh_path_node_reclaim(struct rcu_head *rp)
517{
518 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
519 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
520
521 del_timer_sync(&node->mpath->timer);
522 atomic_dec(&sdata->u.mesh.mpaths);
523 kfree(node->mpath);
524 kfree(node);
525}
526
527
528
529
530
531
532
533
534
535int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
536{
537 struct mesh_path *mpath;
538 struct mpath_node *node;
539 struct hlist_head *bucket;
540 struct hlist_node *n;
541 int hash_idx;
542 int err = 0;
543
544 read_lock(&pathtbl_resize_lock);
545 hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
546 bucket = &mesh_paths->hash_buckets[hash_idx];
547
548 spin_lock(&mesh_paths->hashwlock[hash_idx]);
549 hlist_for_each_entry(node, n, bucket, list) {
550 mpath = node->mpath;
551 if (mpath->sdata == sdata &&
552 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
553 spin_lock_bh(&mpath->state_lock);
554 mpath->flags |= MESH_PATH_RESOLVING;
555 hlist_del_rcu(&node->list);
556 call_rcu(&node->rcu, mesh_path_node_reclaim);
557 atomic_dec(&mesh_paths->entries);
558 spin_unlock_bh(&mpath->state_lock);
559 goto enddel;
560 }
561 }
562
563 err = -ENXIO;
564enddel:
565 mesh_paths_generation++;
566 spin_unlock(&mesh_paths->hashwlock[hash_idx]);
567 read_unlock(&pathtbl_resize_lock);
568 return err;
569}
570
571
572
573
574
575
576
577
578
579void mesh_path_tx_pending(struct mesh_path *mpath)
580{
581 if (mpath->flags & MESH_PATH_ACTIVE)
582 ieee80211_add_pending_skbs(mpath->sdata->local,
583 &mpath->frame_queue);
584}
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599void mesh_path_discard_frame(struct sk_buff *skb,
600 struct ieee80211_sub_if_data *sdata)
601{
602 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
603 struct mesh_path *mpath;
604 u32 dsn = 0;
605
606 if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) {
607 u8 *ra, *da;
608
609 da = hdr->addr3;
610 ra = hdr->addr1;
611 mpath = mesh_path_lookup(da, sdata);
612 if (mpath)
613 dsn = ++mpath->dsn;
614 mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, sdata);
615 }
616
617 kfree_skb(skb);
618 sdata->u.mesh.mshstats.dropped_frames_no_route++;
619}
620
621
622
623
624
625
626
627
628void mesh_path_flush_pending(struct mesh_path *mpath)
629{
630 struct sk_buff *skb;
631
632 while ((skb = skb_dequeue(&mpath->frame_queue)) &&
633 (mpath->flags & MESH_PATH_ACTIVE))
634 mesh_path_discard_frame(skb, mpath->sdata);
635}
636
637
638
639
640
641
642
643
644
645void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
646{
647 spin_lock_bh(&mpath->state_lock);
648 mesh_path_assign_nexthop(mpath, next_hop);
649 mpath->dsn = 0xffff;
650 mpath->metric = 0;
651 mpath->hop_count = 0;
652 mpath->exp_time = 0;
653 mpath->flags |= MESH_PATH_FIXED;
654 mesh_path_activate(mpath);
655 spin_unlock_bh(&mpath->state_lock);
656 mesh_path_tx_pending(mpath);
657}
658
659static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
660{
661 struct mesh_path *mpath;
662 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
663 mpath = node->mpath;
664 hlist_del_rcu(p);
665 if (free_leafs)
666 kfree(mpath);
667 kfree(node);
668}
669
670static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
671{
672 struct mesh_path *mpath;
673 struct mpath_node *node, *new_node;
674 u32 hash_idx;
675
676 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
677 if (new_node == NULL)
678 return -ENOMEM;
679
680 node = hlist_entry(p, struct mpath_node, list);
681 mpath = node->mpath;
682 new_node->mpath = mpath;
683 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
684 hlist_add_head(&new_node->list,
685 &newtbl->hash_buckets[hash_idx]);
686 return 0;
687}
688
689int mesh_pathtbl_init(void)
690{
691 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
692 if (!mesh_paths)
693 return -ENOMEM;
694 mesh_paths->free_node = &mesh_path_node_free;
695 mesh_paths->copy_node = &mesh_path_node_copy;
696 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
697
698 mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
699 if (!mpp_paths) {
700 mesh_table_free(mesh_paths, true);
701 return -ENOMEM;
702 }
703 mpp_paths->free_node = &mesh_path_node_free;
704 mpp_paths->copy_node = &mesh_path_node_copy;
705 mpp_paths->mean_chain_len = MEAN_CHAIN_LEN;
706
707 return 0;
708}
709
710void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
711{
712 struct mesh_path *mpath;
713 struct mpath_node *node;
714 struct hlist_node *p;
715 int i;
716
717 read_lock(&pathtbl_resize_lock);
718 for_each_mesh_entry(mesh_paths, p, node, i) {
719 if (node->mpath->sdata != sdata)
720 continue;
721 mpath = node->mpath;
722 spin_lock_bh(&mpath->state_lock);
723 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
724 (!(mpath->flags & MESH_PATH_FIXED)) &&
725 time_after(jiffies,
726 mpath->exp_time + MESH_PATH_EXPIRE)) {
727 spin_unlock_bh(&mpath->state_lock);
728 mesh_path_del(mpath->dst, mpath->sdata);
729 } else
730 spin_unlock_bh(&mpath->state_lock);
731 }
732 read_unlock(&pathtbl_resize_lock);
733}
734
735void mesh_pathtbl_unregister(void)
736{
737 mesh_table_free(mesh_paths, true);
738 mesh_table_free(mpp_paths, true);
739}
740