1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/atomic.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/log2.h>
21#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/vmalloc.h>
24#include <linux/mm.h>
25#include <linux/jhash.h>
26#include <linux/random.h>
27#include <linux/rhashtable.h>
28#include <linux/err.h>
29#include <linux/export.h>
30
31#define HASH_DEFAULT_SIZE 64UL
32#define HASH_MIN_SIZE 4U
33#define BUCKET_LOCKS_PER_CPU 32UL
34
35static u32 head_hashfn(struct rhashtable *ht,
36 const struct bucket_table *tbl,
37 const struct rhash_head *he)
38{
39 return rht_head_hashfn(ht, tbl, he, ht->p);
40}
41
42#ifdef CONFIG_PROVE_LOCKING
43#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
44
45int lockdep_rht_mutex_is_held(struct rhashtable *ht)
46{
47 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
48}
49EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
50
51int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
52{
53 spinlock_t *lock = rht_bucket_lock(tbl, hash);
54
55 return (debug_locks) ? lockdep_is_held(lock) : 1;
56}
57EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
58#else
59#define ASSERT_RHT_MUTEX(HT)
60#endif
61
62
63static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
64 gfp_t gfp)
65{
66 unsigned int i, size;
67#if defined(CONFIG_PROVE_LOCKING)
68 unsigned int nr_pcpus = 2;
69#else
70 unsigned int nr_pcpus = num_possible_cpus();
71#endif
72
73 nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
74 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
75
76
77 size = min_t(unsigned int, size, tbl->size >> 1);
78
79 if (sizeof(spinlock_t) != 0) {
80 tbl->locks = NULL;
81#ifdef CONFIG_NUMA
82 if (size * sizeof(spinlock_t) > PAGE_SIZE &&
83 gfp == GFP_KERNEL)
84 tbl->locks = vmalloc(size * sizeof(spinlock_t));
85#endif
86 if (gfp != GFP_KERNEL)
87 gfp |= __GFP_NOWARN | __GFP_NORETRY;
88
89 if (!tbl->locks)
90 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
91 gfp);
92 if (!tbl->locks)
93 return -ENOMEM;
94 for (i = 0; i < size; i++)
95 spin_lock_init(&tbl->locks[i]);
96 }
97 tbl->locks_mask = size - 1;
98
99 return 0;
100}
101
102static void bucket_table_free(const struct bucket_table *tbl)
103{
104 if (tbl)
105 kvfree(tbl->locks);
106
107 kvfree(tbl);
108}
109
110static void bucket_table_free_rcu(struct rcu_head *head)
111{
112 bucket_table_free(container_of(head, struct bucket_table, rcu));
113}
114
115static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
116 size_t nbuckets,
117 gfp_t gfp)
118{
119 struct bucket_table *tbl = NULL;
120 size_t size;
121 int i;
122
123 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
124 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
125 gfp != GFP_KERNEL)
126 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
127 if (tbl == NULL && gfp == GFP_KERNEL)
128 tbl = vzalloc(size);
129 if (tbl == NULL)
130 return NULL;
131
132 tbl->size = nbuckets;
133
134 if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
135 bucket_table_free(tbl);
136 return NULL;
137 }
138
139 INIT_LIST_HEAD(&tbl->walkers);
140
141 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
142
143 for (i = 0; i < nbuckets; i++)
144 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
145
146 return tbl;
147}
148
149static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
150 struct bucket_table *tbl)
151{
152 struct bucket_table *new_tbl;
153
154 do {
155 new_tbl = tbl;
156 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
157 } while (tbl);
158
159 return new_tbl;
160}
161
162static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
163{
164 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
165 struct bucket_table *new_tbl = rhashtable_last_table(ht,
166 rht_dereference_rcu(old_tbl->future_tbl, ht));
167 struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
168 int err = -ENOENT;
169 struct rhash_head *head, *next, *entry;
170 spinlock_t *new_bucket_lock;
171 unsigned int new_hash;
172
173 rht_for_each(entry, old_tbl, old_hash) {
174 err = 0;
175 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
176
177 if (rht_is_a_nulls(next))
178 break;
179
180 pprev = &entry->next;
181 }
182
183 if (err)
184 goto out;
185
186 new_hash = head_hashfn(ht, new_tbl, entry);
187
188 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
189
190 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
191 head = rht_dereference_bucket(new_tbl->buckets[new_hash],
192 new_tbl, new_hash);
193
194 RCU_INIT_POINTER(entry->next, head);
195
196 rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
197 spin_unlock(new_bucket_lock);
198
199 rcu_assign_pointer(*pprev, next);
200
201out:
202 return err;
203}
204
205static void rhashtable_rehash_chain(struct rhashtable *ht,
206 unsigned int old_hash)
207{
208 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
209 spinlock_t *old_bucket_lock;
210
211 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
212
213 spin_lock_bh(old_bucket_lock);
214 while (!rhashtable_rehash_one(ht, old_hash))
215 ;
216 old_tbl->rehash++;
217 spin_unlock_bh(old_bucket_lock);
218}
219
220static int rhashtable_rehash_attach(struct rhashtable *ht,
221 struct bucket_table *old_tbl,
222 struct bucket_table *new_tbl)
223{
224
225 spin_lock_bh(old_tbl->locks);
226
227
228 if (rcu_access_pointer(old_tbl->future_tbl)) {
229 spin_unlock_bh(old_tbl->locks);
230 return -EEXIST;
231 }
232
233
234
235
236 rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
237
238 spin_unlock_bh(old_tbl->locks);
239
240 return 0;
241}
242
243static int rhashtable_rehash_table(struct rhashtable *ht)
244{
245 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
246 struct bucket_table *new_tbl;
247 struct rhashtable_walker *walker;
248 unsigned int old_hash;
249
250 new_tbl = rht_dereference(old_tbl->future_tbl, ht);
251 if (!new_tbl)
252 return 0;
253
254 for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
255 rhashtable_rehash_chain(ht, old_hash);
256
257
258 rcu_assign_pointer(ht->tbl, new_tbl);
259
260 spin_lock(&ht->lock);
261 list_for_each_entry(walker, &old_tbl->walkers, list)
262 walker->tbl = NULL;
263 spin_unlock(&ht->lock);
264
265
266
267
268
269 call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
270
271 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
272}
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289static int rhashtable_expand(struct rhashtable *ht)
290{
291 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
292 int err;
293
294 ASSERT_RHT_MUTEX(ht);
295
296 old_tbl = rhashtable_last_table(ht, old_tbl);
297
298 new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
299 if (new_tbl == NULL)
300 return -ENOMEM;
301
302 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
303 if (err)
304 bucket_table_free(new_tbl);
305
306 return err;
307}
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325static int rhashtable_shrink(struct rhashtable *ht)
326{
327 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
328 unsigned int nelems = atomic_read(&ht->nelems);
329 unsigned int size = 0;
330 int err;
331
332 ASSERT_RHT_MUTEX(ht);
333
334 if (nelems)
335 size = roundup_pow_of_two(nelems * 3 / 2);
336 if (size < ht->p.min_size)
337 size = ht->p.min_size;
338
339 if (old_tbl->size <= size)
340 return 0;
341
342 if (rht_dereference(old_tbl->future_tbl, ht))
343 return -EEXIST;
344
345 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
346 if (new_tbl == NULL)
347 return -ENOMEM;
348
349 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
350 if (err)
351 bucket_table_free(new_tbl);
352
353 return err;
354}
355
356static void rht_deferred_worker(struct work_struct *work)
357{
358 struct rhashtable *ht;
359 struct bucket_table *tbl;
360 int err = 0;
361
362 ht = container_of(work, struct rhashtable, run_work);
363 mutex_lock(&ht->mutex);
364
365 tbl = rht_dereference(ht->tbl, ht);
366 tbl = rhashtable_last_table(ht, tbl);
367
368 if (rht_grow_above_75(ht, tbl))
369 rhashtable_expand(ht);
370 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
371 rhashtable_shrink(ht);
372
373 err = rhashtable_rehash_table(ht);
374
375 mutex_unlock(&ht->mutex);
376
377 if (err)
378 schedule_work(&ht->run_work);
379}
380
381static int rhashtable_insert_rehash(struct rhashtable *ht,
382 struct bucket_table *tbl)
383{
384 struct bucket_table *old_tbl;
385 struct bucket_table *new_tbl;
386 unsigned int size;
387 int err;
388
389 old_tbl = rht_dereference_rcu(ht->tbl, ht);
390
391 size = tbl->size;
392
393 err = -EBUSY;
394
395 if (rht_grow_above_75(ht, tbl))
396 size *= 2;
397
398 else if (old_tbl != tbl)
399 goto fail;
400
401 err = -ENOMEM;
402
403 new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
404 if (new_tbl == NULL)
405 goto fail;
406
407 err = rhashtable_rehash_attach(ht, tbl, new_tbl);
408 if (err) {
409 bucket_table_free(new_tbl);
410 if (err == -EEXIST)
411 err = 0;
412 } else
413 schedule_work(&ht->run_work);
414
415 return err;
416
417fail:
418
419 if (likely(rcu_dereference_raw(tbl->future_tbl)))
420 return 0;
421
422
423 if (err == -ENOMEM)
424 schedule_work(&ht->run_work);
425
426 return err;
427}
428
429static void *rhashtable_lookup_one(struct rhashtable *ht,
430 struct bucket_table *tbl, unsigned int hash,
431 const void *key, struct rhash_head *obj)
432{
433 struct rhashtable_compare_arg arg = {
434 .ht = ht,
435 .key = key,
436 };
437 struct rhash_head __rcu **pprev;
438 struct rhash_head *head;
439 int elasticity;
440
441 elasticity = ht->elasticity;
442 pprev = &tbl->buckets[hash];
443 rht_for_each(head, tbl, hash) {
444 struct rhlist_head *list;
445 struct rhlist_head *plist;
446
447 elasticity--;
448 if (!key ||
449 (ht->p.obj_cmpfn ?
450 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
451 rhashtable_compare(&arg, rht_obj(ht, head))))
452 continue;
453
454 if (!ht->rhlist)
455 return rht_obj(ht, head);
456
457 list = container_of(obj, struct rhlist_head, rhead);
458 plist = container_of(head, struct rhlist_head, rhead);
459
460 RCU_INIT_POINTER(list->next, plist);
461 head = rht_dereference_bucket(head->next, tbl, hash);
462 RCU_INIT_POINTER(list->rhead.next, head);
463 rcu_assign_pointer(*pprev, obj);
464
465 return NULL;
466 }
467
468 if (elasticity <= 0)
469 return ERR_PTR(-EAGAIN);
470
471 return ERR_PTR(-ENOENT);
472}
473
474static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
475 struct bucket_table *tbl,
476 unsigned int hash,
477 struct rhash_head *obj,
478 void *data)
479{
480 struct bucket_table *new_tbl;
481 struct rhash_head *head;
482
483 if (!IS_ERR_OR_NULL(data))
484 return ERR_PTR(-EEXIST);
485
486 if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
487 return ERR_CAST(data);
488
489 new_tbl = rcu_dereference(tbl->future_tbl);
490 if (new_tbl)
491 return new_tbl;
492
493 if (PTR_ERR(data) != -ENOENT)
494 return ERR_CAST(data);
495
496 if (unlikely(rht_grow_above_max(ht, tbl)))
497 return ERR_PTR(-E2BIG);
498
499 if (unlikely(rht_grow_above_100(ht, tbl)))
500 return ERR_PTR(-EAGAIN);
501
502 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
503
504 RCU_INIT_POINTER(obj->next, head);
505 if (ht->rhlist) {
506 struct rhlist_head *list;
507
508 list = container_of(obj, struct rhlist_head, rhead);
509 RCU_INIT_POINTER(list->next, NULL);
510 }
511
512 rcu_assign_pointer(tbl->buckets[hash], obj);
513
514 atomic_inc(&ht->nelems);
515 if (rht_grow_above_75(ht, tbl))
516 schedule_work(&ht->run_work);
517
518 return NULL;
519}
520
521static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
522 struct rhash_head *obj)
523{
524 struct bucket_table *new_tbl;
525 struct bucket_table *tbl;
526 unsigned int hash;
527 spinlock_t *lock;
528 void *data;
529
530 tbl = rcu_dereference(ht->tbl);
531
532
533
534
535 for (;;) {
536 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
537 lock = rht_bucket_lock(tbl, hash);
538 spin_lock_bh(lock);
539
540 if (tbl->rehash <= hash)
541 break;
542
543 spin_unlock_bh(lock);
544 tbl = rcu_dereference(tbl->future_tbl);
545 }
546
547 data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
548 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
549 if (PTR_ERR(new_tbl) != -EEXIST)
550 data = ERR_CAST(new_tbl);
551
552 while (!IS_ERR_OR_NULL(new_tbl)) {
553 tbl = new_tbl;
554 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
555 spin_lock_nested(rht_bucket_lock(tbl, hash),
556 SINGLE_DEPTH_NESTING);
557
558 data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
559 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
560 if (PTR_ERR(new_tbl) != -EEXIST)
561 data = ERR_CAST(new_tbl);
562
563 spin_unlock(rht_bucket_lock(tbl, hash));
564 }
565
566 spin_unlock_bh(lock);
567
568 if (PTR_ERR(data) == -EAGAIN)
569 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
570 -EAGAIN);
571
572 return data;
573}
574
575void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
576 struct rhash_head *obj)
577{
578 void *data;
579
580 do {
581 rcu_read_lock();
582 data = rhashtable_try_insert(ht, key, obj);
583 rcu_read_unlock();
584 } while (PTR_ERR(data) == -EAGAIN);
585
586 return data;
587}
588EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
611{
612 iter->ht = ht;
613 iter->p = NULL;
614 iter->slot = 0;
615 iter->skip = 0;
616
617 spin_lock(&ht->lock);
618 iter->walker.tbl =
619 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
620 list_add(&iter->walker.list, &iter->walker.tbl->walkers);
621 spin_unlock(&ht->lock);
622}
623EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
624
625
626
627
628
629
630
631void rhashtable_walk_exit(struct rhashtable_iter *iter)
632{
633 spin_lock(&iter->ht->lock);
634 if (iter->walker.tbl)
635 list_del(&iter->walker.list);
636 spin_unlock(&iter->ht->lock);
637}
638EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654int rhashtable_walk_start(struct rhashtable_iter *iter)
655 __acquires(RCU)
656{
657 struct rhashtable *ht = iter->ht;
658
659 rcu_read_lock();
660
661 spin_lock(&ht->lock);
662 if (iter->walker.tbl)
663 list_del(&iter->walker.list);
664 spin_unlock(&ht->lock);
665
666 if (!iter->walker.tbl) {
667 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
668 return -EAGAIN;
669 }
670
671 return 0;
672}
673EXPORT_SYMBOL_GPL(rhashtable_walk_start);
674
675
676
677
678
679
680
681
682
683
684
685
686
687void *rhashtable_walk_next(struct rhashtable_iter *iter)
688{
689 struct bucket_table *tbl = iter->walker.tbl;
690 struct rhlist_head *list = iter->list;
691 struct rhashtable *ht = iter->ht;
692 struct rhash_head *p = iter->p;
693 bool rhlist = ht->rhlist;
694
695 if (p) {
696 if (!rhlist || !(list = rcu_dereference(list->next))) {
697 p = rcu_dereference(p->next);
698 list = container_of(p, struct rhlist_head, rhead);
699 }
700 goto next;
701 }
702
703 for (; iter->slot < tbl->size; iter->slot++) {
704 int skip = iter->skip;
705
706 rht_for_each_rcu(p, tbl, iter->slot) {
707 if (rhlist) {
708 list = container_of(p, struct rhlist_head,
709 rhead);
710 do {
711 if (!skip)
712 goto next;
713 skip--;
714 list = rcu_dereference(list->next);
715 } while (list);
716
717 continue;
718 }
719 if (!skip)
720 break;
721 skip--;
722 }
723
724next:
725 if (!rht_is_a_nulls(p)) {
726 iter->skip++;
727 iter->p = p;
728 iter->list = list;
729 return rht_obj(ht, rhlist ? &list->rhead : p);
730 }
731
732 iter->skip = 0;
733 }
734
735 iter->p = NULL;
736
737
738 smp_rmb();
739
740 iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
741 if (iter->walker.tbl) {
742 iter->slot = 0;
743 iter->skip = 0;
744 return ERR_PTR(-EAGAIN);
745 }
746
747 return NULL;
748}
749EXPORT_SYMBOL_GPL(rhashtable_walk_next);
750
751
752
753
754
755
756
757void rhashtable_walk_stop(struct rhashtable_iter *iter)
758 __releases(RCU)
759{
760 struct rhashtable *ht;
761 struct bucket_table *tbl = iter->walker.tbl;
762
763 if (!tbl)
764 goto out;
765
766 ht = iter->ht;
767
768 spin_lock(&ht->lock);
769 if (tbl->rehash < tbl->size)
770 list_add(&iter->walker.list, &tbl->walkers);
771 else
772 iter->walker.tbl = NULL;
773 spin_unlock(&ht->lock);
774
775 iter->p = NULL;
776
777out:
778 rcu_read_unlock();
779}
780EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
781
782static size_t rounded_hashtable_size(const struct rhashtable_params *params)
783{
784 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
785 (unsigned long)params->min_size);
786}
787
788static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
789{
790 return jhash2(key, length, seed);
791}
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836int rhashtable_init(struct rhashtable *ht,
837 const struct rhashtable_params *params)
838{
839 struct bucket_table *tbl;
840 size_t size;
841
842 size = HASH_DEFAULT_SIZE;
843
844 if ((!params->key_len && !params->obj_hashfn) ||
845 (params->obj_hashfn && !params->obj_cmpfn))
846 return -EINVAL;
847
848 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
849 return -EINVAL;
850
851 memset(ht, 0, sizeof(*ht));
852 mutex_init(&ht->mutex);
853 spin_lock_init(&ht->lock);
854 memcpy(&ht->p, params, sizeof(*params));
855
856 if (params->min_size)
857 ht->p.min_size = roundup_pow_of_two(params->min_size);
858
859 if (params->max_size)
860 ht->p.max_size = rounddown_pow_of_two(params->max_size);
861
862 if (params->insecure_max_entries)
863 ht->p.insecure_max_entries =
864 rounddown_pow_of_two(params->insecure_max_entries);
865 else
866 ht->p.insecure_max_entries = ht->p.max_size * 2;
867
868 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
869
870 if (params->nelem_hint)
871 size = rounded_hashtable_size(&ht->p);
872
873
874
875
876
877
878
879
880
881
882
883
884
885 if (!params->insecure_elasticity)
886 ht->elasticity = 16;
887
888 if (params->locks_mul)
889 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
890 else
891 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
892
893 ht->key_len = ht->p.key_len;
894 if (!params->hashfn) {
895 ht->p.hashfn = jhash;
896
897 if (!(ht->key_len & (sizeof(u32) - 1))) {
898 ht->key_len /= sizeof(u32);
899 ht->p.hashfn = rhashtable_jhash2;
900 }
901 }
902
903 tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
904 if (tbl == NULL)
905 return -ENOMEM;
906
907 atomic_set(&ht->nelems, 0);
908
909 RCU_INIT_POINTER(ht->tbl, tbl);
910
911 INIT_WORK(&ht->run_work, rht_deferred_worker);
912
913 return 0;
914}
915EXPORT_SYMBOL_GPL(rhashtable_init);
916
917
918
919
920
921
922
923
924
925
926int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
927{
928 int err;
929
930
931 if (params->nulls_base)
932 return -EINVAL;
933
934 err = rhashtable_init(&hlt->ht, params);
935 hlt->ht.rhlist = true;
936 return err;
937}
938EXPORT_SYMBOL_GPL(rhltable_init);
939
940static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
941 void (*free_fn)(void *ptr, void *arg),
942 void *arg)
943{
944 struct rhlist_head *list;
945
946 if (!ht->rhlist) {
947 free_fn(rht_obj(ht, obj), arg);
948 return;
949 }
950
951 list = container_of(obj, struct rhlist_head, rhead);
952 do {
953 obj = &list->rhead;
954 list = rht_dereference(list->next, ht);
955 free_fn(rht_obj(ht, obj), arg);
956 } while (list);
957}
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974void rhashtable_free_and_destroy(struct rhashtable *ht,
975 void (*free_fn)(void *ptr, void *arg),
976 void *arg)
977{
978 const struct bucket_table *tbl;
979 unsigned int i;
980
981 cancel_work_sync(&ht->run_work);
982
983 mutex_lock(&ht->mutex);
984 tbl = rht_dereference(ht->tbl, ht);
985 if (free_fn) {
986 for (i = 0; i < tbl->size; i++) {
987 struct rhash_head *pos, *next;
988
989 for (pos = rht_dereference(tbl->buckets[i], ht),
990 next = !rht_is_a_nulls(pos) ?
991 rht_dereference(pos->next, ht) : NULL;
992 !rht_is_a_nulls(pos);
993 pos = next,
994 next = !rht_is_a_nulls(pos) ?
995 rht_dereference(pos->next, ht) : NULL)
996 rhashtable_free_one(ht, pos, free_fn, arg);
997 }
998 }
999
1000 bucket_table_free(tbl);
1001 mutex_unlock(&ht->mutex);
1002}
1003EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1004
1005void rhashtable_destroy(struct rhashtable *ht)
1006{
1007 return rhashtable_free_and_destroy(ht, NULL, NULL);
1008}
1009EXPORT_SYMBOL_GPL(rhashtable_destroy);
1010