1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/atomic.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/log2.h>
21#include <linux/sched.h>
22#include <linux/rculist.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include <linux/mm.h>
26#include <linux/jhash.h>
27#include <linux/random.h>
28#include <linux/rhashtable.h>
29#include <linux/err.h>
30#include <linux/export.h>
31
32#define HASH_DEFAULT_SIZE 64UL
33#define HASH_MIN_SIZE 4U
34#define BUCKET_LOCKS_PER_CPU 32UL
35
36union nested_table {
37 union nested_table __rcu *table;
38 struct rhash_head __rcu *bucket;
39};
40
41static u32 head_hashfn(struct rhashtable *ht,
42 const struct bucket_table *tbl,
43 const struct rhash_head *he)
44{
45 return rht_head_hashfn(ht, tbl, he, ht->p);
46}
47
48#ifdef CONFIG_PROVE_LOCKING
49#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
50
51int lockdep_rht_mutex_is_held(struct rhashtable *ht)
52{
53 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
54}
55EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
56
57int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
58{
59 spinlock_t *lock = rht_bucket_lock(tbl, hash);
60
61 return (debug_locks) ? lockdep_is_held(lock) : 1;
62}
63EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
64#else
65#define ASSERT_RHT_MUTEX(HT)
66#endif
67
68
69static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
70 gfp_t gfp)
71{
72 unsigned int i, size;
73#if defined(CONFIG_PROVE_LOCKING)
74 unsigned int nr_pcpus = 2;
75#else
76 unsigned int nr_pcpus = num_possible_cpus();
77#endif
78
79 nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
80 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
81
82
83 size = min_t(unsigned int, size, tbl->size >> 1);
84
85 if (tbl->nest)
86 size = min(size, 1U << tbl->nest);
87
88 if (sizeof(spinlock_t) != 0) {
89 if (gfpflags_allow_blocking(gfp))
90 tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp);
91 else
92 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
93 gfp);
94 if (!tbl->locks)
95 return -ENOMEM;
96 for (i = 0; i < size; i++)
97 spin_lock_init(&tbl->locks[i]);
98 }
99 tbl->locks_mask = size - 1;
100
101 return 0;
102}
103
104static void nested_table_free(union nested_table *ntbl, unsigned int size)
105{
106 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
107 const unsigned int len = 1 << shift;
108 unsigned int i;
109
110 ntbl = rcu_dereference_raw(ntbl->table);
111 if (!ntbl)
112 return;
113
114 if (size > len) {
115 size >>= shift;
116 for (i = 0; i < len; i++)
117 nested_table_free(ntbl + i, size);
118 }
119
120 kfree(ntbl);
121}
122
123static void nested_bucket_table_free(const struct bucket_table *tbl)
124{
125 unsigned int size = tbl->size >> tbl->nest;
126 unsigned int len = 1 << tbl->nest;
127 union nested_table *ntbl;
128 unsigned int i;
129
130 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
131
132 for (i = 0; i < len; i++)
133 nested_table_free(ntbl + i, size);
134
135 kfree(ntbl);
136}
137
138static void bucket_table_free(const struct bucket_table *tbl)
139{
140 if (tbl->nest)
141 nested_bucket_table_free(tbl);
142
143 kvfree(tbl->locks);
144 kvfree(tbl);
145}
146
147static void bucket_table_free_rcu(struct rcu_head *head)
148{
149 bucket_table_free(container_of(head, struct bucket_table, rcu));
150}
151
152static union nested_table *nested_table_alloc(struct rhashtable *ht,
153 union nested_table __rcu **prev,
154 unsigned int shifted,
155 unsigned int nhash)
156{
157 union nested_table *ntbl;
158 int i;
159
160 ntbl = rcu_dereference(*prev);
161 if (ntbl)
162 return ntbl;
163
164 ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
165
166 if (ntbl && shifted) {
167 for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
168 INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
169 (i << shifted) | nhash);
170 }
171
172 rcu_assign_pointer(*prev, ntbl);
173
174 return ntbl;
175}
176
177static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
178 size_t nbuckets,
179 gfp_t gfp)
180{
181 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
182 struct bucket_table *tbl;
183 size_t size;
184
185 if (nbuckets < (1 << (shift + 1)))
186 return NULL;
187
188 size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
189
190 tbl = kzalloc(size, gfp);
191 if (!tbl)
192 return NULL;
193
194 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
195 0, 0)) {
196 kfree(tbl);
197 return NULL;
198 }
199
200 tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
201
202 return tbl;
203}
204
205static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
206 size_t nbuckets,
207 gfp_t gfp)
208{
209 struct bucket_table *tbl = NULL;
210 size_t size;
211 int i;
212
213 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
214 if (gfp != GFP_KERNEL)
215 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
216 else
217 tbl = kvzalloc(size, gfp);
218
219 size = nbuckets;
220
221 if (tbl == NULL && gfp != GFP_KERNEL) {
222 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
223 nbuckets = 0;
224 }
225 if (tbl == NULL)
226 return NULL;
227
228 tbl->size = size;
229
230 if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
231 bucket_table_free(tbl);
232 return NULL;
233 }
234
235 INIT_LIST_HEAD(&tbl->walkers);
236
237 tbl->hash_rnd = get_random_u32();
238
239 for (i = 0; i < nbuckets; i++)
240 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
241
242 return tbl;
243}
244
245static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
246 struct bucket_table *tbl)
247{
248 struct bucket_table *new_tbl;
249
250 do {
251 new_tbl = tbl;
252 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
253 } while (tbl);
254
255 return new_tbl;
256}
257
258static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
259{
260 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
261 struct bucket_table *new_tbl = rhashtable_last_table(ht,
262 rht_dereference_rcu(old_tbl->future_tbl, ht));
263 struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
264 int err = -EAGAIN;
265 struct rhash_head *head, *next, *entry;
266 spinlock_t *new_bucket_lock;
267 unsigned int new_hash;
268
269 if (new_tbl->nest)
270 goto out;
271
272 err = -ENOENT;
273
274 rht_for_each(entry, old_tbl, old_hash) {
275 err = 0;
276 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
277
278 if (rht_is_a_nulls(next))
279 break;
280
281 pprev = &entry->next;
282 }
283
284 if (err)
285 goto out;
286
287 new_hash = head_hashfn(ht, new_tbl, entry);
288
289 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
290
291 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
292 head = rht_dereference_bucket(new_tbl->buckets[new_hash],
293 new_tbl, new_hash);
294
295 RCU_INIT_POINTER(entry->next, head);
296
297 rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
298 spin_unlock(new_bucket_lock);
299
300 rcu_assign_pointer(*pprev, next);
301
302out:
303 return err;
304}
305
306static int rhashtable_rehash_chain(struct rhashtable *ht,
307 unsigned int old_hash)
308{
309 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
310 spinlock_t *old_bucket_lock;
311 int err;
312
313 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
314
315 spin_lock_bh(old_bucket_lock);
316 while (!(err = rhashtable_rehash_one(ht, old_hash)))
317 ;
318
319 if (err == -ENOENT) {
320 old_tbl->rehash++;
321 err = 0;
322 }
323 spin_unlock_bh(old_bucket_lock);
324
325 return err;
326}
327
328static int rhashtable_rehash_attach(struct rhashtable *ht,
329 struct bucket_table *old_tbl,
330 struct bucket_table *new_tbl)
331{
332
333 spin_lock_bh(old_tbl->locks);
334
335
336 if (rcu_access_pointer(old_tbl->future_tbl)) {
337 spin_unlock_bh(old_tbl->locks);
338 return -EEXIST;
339 }
340
341
342
343
344 rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
345
346 spin_unlock_bh(old_tbl->locks);
347
348 return 0;
349}
350
351static int rhashtable_rehash_table(struct rhashtable *ht)
352{
353 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
354 struct bucket_table *new_tbl;
355 struct rhashtable_walker *walker;
356 unsigned int old_hash;
357 int err;
358
359 new_tbl = rht_dereference(old_tbl->future_tbl, ht);
360 if (!new_tbl)
361 return 0;
362
363 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
364 err = rhashtable_rehash_chain(ht, old_hash);
365 if (err)
366 return err;
367 }
368
369
370 rcu_assign_pointer(ht->tbl, new_tbl);
371
372 spin_lock(&ht->lock);
373 list_for_each_entry(walker, &old_tbl->walkers, list)
374 walker->tbl = NULL;
375 spin_unlock(&ht->lock);
376
377
378
379
380
381 call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
382
383 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
384}
385
386static int rhashtable_rehash_alloc(struct rhashtable *ht,
387 struct bucket_table *old_tbl,
388 unsigned int size)
389{
390 struct bucket_table *new_tbl;
391 int err;
392
393 ASSERT_RHT_MUTEX(ht);
394
395 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
396 if (new_tbl == NULL)
397 return -ENOMEM;
398
399 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
400 if (err)
401 bucket_table_free(new_tbl);
402
403 return err;
404}
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422static int rhashtable_shrink(struct rhashtable *ht)
423{
424 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
425 unsigned int nelems = atomic_read(&ht->nelems);
426 unsigned int size = 0;
427
428 if (nelems)
429 size = roundup_pow_of_two(nelems * 3 / 2);
430 if (size < ht->p.min_size)
431 size = ht->p.min_size;
432
433 if (old_tbl->size <= size)
434 return 0;
435
436 if (rht_dereference(old_tbl->future_tbl, ht))
437 return -EEXIST;
438
439 return rhashtable_rehash_alloc(ht, old_tbl, size);
440}
441
442static void rht_deferred_worker(struct work_struct *work)
443{
444 struct rhashtable *ht;
445 struct bucket_table *tbl;
446 int err = 0;
447
448 ht = container_of(work, struct rhashtable, run_work);
449 mutex_lock(&ht->mutex);
450
451 tbl = rht_dereference(ht->tbl, ht);
452 tbl = rhashtable_last_table(ht, tbl);
453
454 if (rht_grow_above_75(ht, tbl))
455 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
456 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
457 err = rhashtable_shrink(ht);
458 else if (tbl->nest)
459 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
460
461 if (!err)
462 err = rhashtable_rehash_table(ht);
463
464 mutex_unlock(&ht->mutex);
465
466 if (err)
467 schedule_work(&ht->run_work);
468}
469
470static int rhashtable_insert_rehash(struct rhashtable *ht,
471 struct bucket_table *tbl)
472{
473 struct bucket_table *old_tbl;
474 struct bucket_table *new_tbl;
475 unsigned int size;
476 int err;
477
478 old_tbl = rht_dereference_rcu(ht->tbl, ht);
479
480 size = tbl->size;
481
482 err = -EBUSY;
483
484 if (rht_grow_above_75(ht, tbl))
485 size *= 2;
486
487 else if (old_tbl != tbl)
488 goto fail;
489
490 err = -ENOMEM;
491
492 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
493 if (new_tbl == NULL)
494 goto fail;
495
496 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
497 if (err) {
498 bucket_table_free(new_tbl);
499 if (err == -EEXIST)
500 err = 0;
501 } else
502 schedule_work(&ht->run_work);
503
504 return err;
505
506fail:
507
508 if (likely(rcu_dereference_raw(tbl->future_tbl)))
509 return 0;
510
511
512 if (err == -ENOMEM)
513 schedule_work(&ht->run_work);
514
515 return err;
516}
517
518static void *rhashtable_lookup_one(struct rhashtable *ht,
519 struct bucket_table *tbl, unsigned int hash,
520 const void *key, struct rhash_head *obj)
521{
522 struct rhashtable_compare_arg arg = {
523 .ht = ht,
524 .key = key,
525 };
526 struct rhash_head __rcu **pprev;
527 struct rhash_head *head;
528 int elasticity;
529
530 elasticity = RHT_ELASTICITY;
531 pprev = rht_bucket_var(tbl, hash);
532 rht_for_each_continue(head, *pprev, tbl, hash) {
533 struct rhlist_head *list;
534 struct rhlist_head *plist;
535
536 elasticity--;
537 if (!key ||
538 (ht->p.obj_cmpfn ?
539 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
540 rhashtable_compare(&arg, rht_obj(ht, head))))
541 continue;
542
543 if (!ht->rhlist)
544 return rht_obj(ht, head);
545
546 list = container_of(obj, struct rhlist_head, rhead);
547 plist = container_of(head, struct rhlist_head, rhead);
548
549 RCU_INIT_POINTER(list->next, plist);
550 head = rht_dereference_bucket(head->next, tbl, hash);
551 RCU_INIT_POINTER(list->rhead.next, head);
552 rcu_assign_pointer(*pprev, obj);
553
554 return NULL;
555 }
556
557 if (elasticity <= 0)
558 return ERR_PTR(-EAGAIN);
559
560 return ERR_PTR(-ENOENT);
561}
562
563static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
564 struct bucket_table *tbl,
565 unsigned int hash,
566 struct rhash_head *obj,
567 void *data)
568{
569 struct rhash_head __rcu **pprev;
570 struct bucket_table *new_tbl;
571 struct rhash_head *head;
572
573 if (!IS_ERR_OR_NULL(data))
574 return ERR_PTR(-EEXIST);
575
576 if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
577 return ERR_CAST(data);
578
579 new_tbl = rcu_dereference(tbl->future_tbl);
580 if (new_tbl)
581 return new_tbl;
582
583 if (PTR_ERR(data) != -ENOENT)
584 return ERR_CAST(data);
585
586 if (unlikely(rht_grow_above_max(ht, tbl)))
587 return ERR_PTR(-E2BIG);
588
589 if (unlikely(rht_grow_above_100(ht, tbl)))
590 return ERR_PTR(-EAGAIN);
591
592 pprev = rht_bucket_insert(ht, tbl, hash);
593 if (!pprev)
594 return ERR_PTR(-ENOMEM);
595
596 head = rht_dereference_bucket(*pprev, tbl, hash);
597
598 RCU_INIT_POINTER(obj->next, head);
599 if (ht->rhlist) {
600 struct rhlist_head *list;
601
602 list = container_of(obj, struct rhlist_head, rhead);
603 RCU_INIT_POINTER(list->next, NULL);
604 }
605
606 rcu_assign_pointer(*pprev, obj);
607
608 atomic_inc(&ht->nelems);
609 if (rht_grow_above_75(ht, tbl))
610 schedule_work(&ht->run_work);
611
612 return NULL;
613}
614
615static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
616 struct rhash_head *obj)
617{
618 struct bucket_table *new_tbl;
619 struct bucket_table *tbl;
620 unsigned int hash;
621 spinlock_t *lock;
622 void *data;
623
624 tbl = rcu_dereference(ht->tbl);
625
626
627
628
629 for (;;) {
630 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
631 lock = rht_bucket_lock(tbl, hash);
632 spin_lock_bh(lock);
633
634 if (tbl->rehash <= hash)
635 break;
636
637 spin_unlock_bh(lock);
638 tbl = rcu_dereference(tbl->future_tbl);
639 }
640
641 data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
642 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
643 if (PTR_ERR(new_tbl) != -EEXIST)
644 data = ERR_CAST(new_tbl);
645
646 while (!IS_ERR_OR_NULL(new_tbl)) {
647 tbl = new_tbl;
648 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
649 spin_lock_nested(rht_bucket_lock(tbl, hash),
650 SINGLE_DEPTH_NESTING);
651
652 data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
653 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
654 if (PTR_ERR(new_tbl) != -EEXIST)
655 data = ERR_CAST(new_tbl);
656
657 spin_unlock(rht_bucket_lock(tbl, hash));
658 }
659
660 spin_unlock_bh(lock);
661
662 if (PTR_ERR(data) == -EAGAIN)
663 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
664 -EAGAIN);
665
666 return data;
667}
668
669void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
670 struct rhash_head *obj)
671{
672 void *data;
673
674 do {
675 rcu_read_lock();
676 data = rhashtable_try_insert(ht, key, obj);
677 rcu_read_unlock();
678 } while (PTR_ERR(data) == -EAGAIN);
679
680 return data;
681}
682EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
705{
706 iter->ht = ht;
707 iter->p = NULL;
708 iter->slot = 0;
709 iter->skip = 0;
710
711 spin_lock(&ht->lock);
712 iter->walker.tbl =
713 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
714 list_add(&iter->walker.list, &iter->walker.tbl->walkers);
715 spin_unlock(&ht->lock);
716}
717EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
718
719
720
721
722
723
724
725void rhashtable_walk_exit(struct rhashtable_iter *iter)
726{
727 spin_lock(&iter->ht->lock);
728 if (iter->walker.tbl)
729 list_del(&iter->walker.list);
730 spin_unlock(&iter->ht->lock);
731}
732EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748int rhashtable_walk_start(struct rhashtable_iter *iter)
749 __acquires(RCU)
750{
751 struct rhashtable *ht = iter->ht;
752
753 rcu_read_lock();
754
755 spin_lock(&ht->lock);
756 if (iter->walker.tbl)
757 list_del(&iter->walker.list);
758 spin_unlock(&ht->lock);
759
760 if (!iter->walker.tbl) {
761 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
762 return -EAGAIN;
763 }
764
765 return 0;
766}
767EXPORT_SYMBOL_GPL(rhashtable_walk_start);
768
769
770
771
772
773
774
775
776
777
778
779
780
781void *rhashtable_walk_next(struct rhashtable_iter *iter)
782{
783 struct bucket_table *tbl = iter->walker.tbl;
784 struct rhlist_head *list = iter->list;
785 struct rhashtable *ht = iter->ht;
786 struct rhash_head *p = iter->p;
787 bool rhlist = ht->rhlist;
788
789 if (p) {
790 if (!rhlist || !(list = rcu_dereference(list->next))) {
791 p = rcu_dereference(p->next);
792 list = container_of(p, struct rhlist_head, rhead);
793 }
794 goto next;
795 }
796
797 for (; iter->slot < tbl->size; iter->slot++) {
798 int skip = iter->skip;
799
800 rht_for_each_rcu(p, tbl, iter->slot) {
801 if (rhlist) {
802 list = container_of(p, struct rhlist_head,
803 rhead);
804 do {
805 if (!skip)
806 goto next;
807 skip--;
808 list = rcu_dereference(list->next);
809 } while (list);
810
811 continue;
812 }
813 if (!skip)
814 break;
815 skip--;
816 }
817
818next:
819 if (!rht_is_a_nulls(p)) {
820 iter->skip++;
821 iter->p = p;
822 iter->list = list;
823 return rht_obj(ht, rhlist ? &list->rhead : p);
824 }
825
826 iter->skip = 0;
827 }
828
829 iter->p = NULL;
830
831
832 smp_rmb();
833
834 iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
835 if (iter->walker.tbl) {
836 iter->slot = 0;
837 iter->skip = 0;
838 return ERR_PTR(-EAGAIN);
839 }
840
841 return NULL;
842}
843EXPORT_SYMBOL_GPL(rhashtable_walk_next);
844
845
846
847
848
849
850
851void rhashtable_walk_stop(struct rhashtable_iter *iter)
852 __releases(RCU)
853{
854 struct rhashtable *ht;
855 struct bucket_table *tbl = iter->walker.tbl;
856
857 if (!tbl)
858 goto out;
859
860 ht = iter->ht;
861
862 spin_lock(&ht->lock);
863 if (tbl->rehash < tbl->size)
864 list_add(&iter->walker.list, &tbl->walkers);
865 else
866 iter->walker.tbl = NULL;
867 spin_unlock(&ht->lock);
868
869 iter->p = NULL;
870
871out:
872 rcu_read_unlock();
873}
874EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
875
876static size_t rounded_hashtable_size(const struct rhashtable_params *params)
877{
878 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
879 (unsigned long)params->min_size);
880}
881
882static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
883{
884 return jhash2(key, length, seed);
885}
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930int rhashtable_init(struct rhashtable *ht,
931 const struct rhashtable_params *params)
932{
933 struct bucket_table *tbl;
934 size_t size;
935
936 size = HASH_DEFAULT_SIZE;
937
938 if ((!params->key_len && !params->obj_hashfn) ||
939 (params->obj_hashfn && !params->obj_cmpfn))
940 return -EINVAL;
941
942 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
943 return -EINVAL;
944
945 memset(ht, 0, sizeof(*ht));
946 mutex_init(&ht->mutex);
947 spin_lock_init(&ht->lock);
948 memcpy(&ht->p, params, sizeof(*params));
949
950 if (params->min_size)
951 ht->p.min_size = roundup_pow_of_two(params->min_size);
952
953
954 ht->max_elems = 1u << 31;
955
956 if (params->max_size) {
957 ht->p.max_size = rounddown_pow_of_two(params->max_size);
958 if (ht->p.max_size < ht->max_elems / 2)
959 ht->max_elems = ht->p.max_size * 2;
960 }
961
962 ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
963
964 if (params->nelem_hint)
965 size = rounded_hashtable_size(&ht->p);
966
967 if (params->locks_mul)
968 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
969 else
970 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
971
972 ht->key_len = ht->p.key_len;
973 if (!params->hashfn) {
974 ht->p.hashfn = jhash;
975
976 if (!(ht->key_len & (sizeof(u32) - 1))) {
977 ht->key_len /= sizeof(u32);
978 ht->p.hashfn = rhashtable_jhash2;
979 }
980 }
981
982 tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
983 if (tbl == NULL)
984 return -ENOMEM;
985
986 atomic_set(&ht->nelems, 0);
987
988 RCU_INIT_POINTER(ht->tbl, tbl);
989
990 INIT_WORK(&ht->run_work, rht_deferred_worker);
991
992 return 0;
993}
994EXPORT_SYMBOL_GPL(rhashtable_init);
995
996
997
998
999
1000
1001
1002
1003
1004
1005int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
1006{
1007 int err;
1008
1009
1010 if (params->nulls_base)
1011 return -EINVAL;
1012
1013 err = rhashtable_init(&hlt->ht, params);
1014 hlt->ht.rhlist = true;
1015 return err;
1016}
1017EXPORT_SYMBOL_GPL(rhltable_init);
1018
1019static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1020 void (*free_fn)(void *ptr, void *arg),
1021 void *arg)
1022{
1023 struct rhlist_head *list;
1024
1025 if (!ht->rhlist) {
1026 free_fn(rht_obj(ht, obj), arg);
1027 return;
1028 }
1029
1030 list = container_of(obj, struct rhlist_head, rhead);
1031 do {
1032 obj = &list->rhead;
1033 list = rht_dereference(list->next, ht);
1034 free_fn(rht_obj(ht, obj), arg);
1035 } while (list);
1036}
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053void rhashtable_free_and_destroy(struct rhashtable *ht,
1054 void (*free_fn)(void *ptr, void *arg),
1055 void *arg)
1056{
1057 struct bucket_table *tbl;
1058 unsigned int i;
1059
1060 cancel_work_sync(&ht->run_work);
1061
1062 mutex_lock(&ht->mutex);
1063 tbl = rht_dereference(ht->tbl, ht);
1064 if (free_fn) {
1065 for (i = 0; i < tbl->size; i++) {
1066 struct rhash_head *pos, *next;
1067
1068 for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
1069 next = !rht_is_a_nulls(pos) ?
1070 rht_dereference(pos->next, ht) : NULL;
1071 !rht_is_a_nulls(pos);
1072 pos = next,
1073 next = !rht_is_a_nulls(pos) ?
1074 rht_dereference(pos->next, ht) : NULL)
1075 rhashtable_free_one(ht, pos, free_fn, arg);
1076 }
1077 }
1078
1079 bucket_table_free(tbl);
1080 mutex_unlock(&ht->mutex);
1081}
1082EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1083
1084void rhashtable_destroy(struct rhashtable *ht)
1085{
1086 return rhashtable_free_and_destroy(ht, NULL, NULL);
1087}
1088EXPORT_SYMBOL_GPL(rhashtable_destroy);
1089
1090struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1091 unsigned int hash)
1092{
1093 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1094 static struct rhash_head __rcu *rhnull =
1095 (struct rhash_head __rcu *)NULLS_MARKER(0);
1096 unsigned int index = hash & ((1 << tbl->nest) - 1);
1097 unsigned int size = tbl->size >> tbl->nest;
1098 unsigned int subhash = hash;
1099 union nested_table *ntbl;
1100
1101 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1102 ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
1103 subhash >>= tbl->nest;
1104
1105 while (ntbl && size > (1 << shift)) {
1106 index = subhash & ((1 << shift) - 1);
1107 ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1108 tbl, hash);
1109 size >>= shift;
1110 subhash >>= shift;
1111 }
1112
1113 if (!ntbl)
1114 return &rhnull;
1115
1116 return &ntbl[subhash].bucket;
1117
1118}
1119EXPORT_SYMBOL_GPL(rht_bucket_nested);
1120
1121struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
1122 struct bucket_table *tbl,
1123 unsigned int hash)
1124{
1125 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1126 unsigned int index = hash & ((1 << tbl->nest) - 1);
1127 unsigned int size = tbl->size >> tbl->nest;
1128 union nested_table *ntbl;
1129 unsigned int shifted;
1130 unsigned int nhash;
1131
1132 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1133 hash >>= tbl->nest;
1134 nhash = index;
1135 shifted = tbl->nest;
1136 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1137 size <= (1 << shift) ? shifted : 0, nhash);
1138
1139 while (ntbl && size > (1 << shift)) {
1140 index = hash & ((1 << shift) - 1);
1141 size >>= shift;
1142 hash >>= shift;
1143 nhash |= index << shifted;
1144 shifted += shift;
1145 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1146 size <= (1 << shift) ? shifted : 0,
1147 nhash);
1148 }
1149
1150 if (!ntbl)
1151 return NULL;
1152
1153 return &ntbl[hash].bucket;
1154
1155}
1156EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);
1157