1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef _LINUX_RHASHTABLE_H
18#define _LINUX_RHASHTABLE_H
19
20#include <linux/atomic.h>
21#include <linux/compiler.h>
22#include <linux/err.h>
23#include <linux/errno.h>
24#include <linux/jhash.h>
25#include <linux/list_nulls.h>
26#include <linux/workqueue.h>
27#include <linux/mutex.h>
28#include <linux/rculist.h>
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#define RHT_BASE_BITS 4
46#define RHT_HASH_BITS 27
47#define RHT_BASE_SHIFT RHT_HASH_BITS
48
49
50#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#define RHT_ELASTICITY 16u
66
67struct rhash_head {
68 struct rhash_head __rcu *next;
69};
70
71struct rhlist_head {
72 struct rhash_head rhead;
73 struct rhlist_head __rcu *next;
74};
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90struct bucket_table {
91 unsigned int size;
92 unsigned int nest;
93 unsigned int rehash;
94 u32 hash_rnd;
95 unsigned int locks_mask;
96 spinlock_t *locks;
97 struct list_head walkers;
98 struct rcu_head rcu;
99
100 struct bucket_table __rcu *future_tbl;
101
102 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
103};
104
105
106
107
108
109
110struct rhashtable_compare_arg {
111 struct rhashtable *ht;
112 const void *key;
113};
114
115typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
116typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
117typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
118 const void *obj);
119
120struct rhashtable;
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137struct rhashtable_params {
138 u16 nelem_hint;
139 u16 key_len;
140 u16 key_offset;
141 u16 head_offset;
142 unsigned int max_size;
143 u16 min_size;
144 bool automatic_shrinking;
145 u8 locks_mul;
146 u32 nulls_base;
147 rht_hashfn_t hashfn;
148 rht_obj_hashfn_t obj_hashfn;
149 rht_obj_cmpfn_t obj_cmpfn;
150};
151
152
153
154
155
156
157
158
159
160
161
162
163
164struct rhashtable {
165 struct bucket_table __rcu *tbl;
166 unsigned int key_len;
167 unsigned int max_elems;
168 struct rhashtable_params p;
169 bool rhlist;
170 struct work_struct run_work;
171 struct mutex mutex;
172 spinlock_t lock;
173 atomic_t nelems;
174};
175
176
177
178
179
180struct rhltable {
181 struct rhashtable ht;
182};
183
184
185
186
187
188
189struct rhashtable_walker {
190 struct list_head list;
191 struct bucket_table *tbl;
192};
193
194
195
196
197
198
199
200
201
202
203struct rhashtable_iter {
204 struct rhashtable *ht;
205 struct rhash_head *p;
206 struct rhlist_head *list;
207 struct rhashtable_walker walker;
208 unsigned int slot;
209 unsigned int skip;
210 bool end_of_table;
211};
212
213static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
214{
215 return NULLS_MARKER(ht->p.nulls_base + hash);
216}
217
218#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
219 ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
220
221static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
222{
223 return ((unsigned long) ptr & 1);
224}
225
226static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
227{
228 return ((unsigned long) ptr) >> 1;
229}
230
231static inline void *rht_obj(const struct rhashtable *ht,
232 const struct rhash_head *he)
233{
234 return (char *)he - ht->p.head_offset;
235}
236
237static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
238 unsigned int hash)
239{
240 return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
241}
242
243static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
244 const void *key, const struct rhashtable_params params,
245 unsigned int hash_rnd)
246{
247 unsigned int hash;
248
249
250 if (!__builtin_constant_p(params.key_len))
251 hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
252 else if (params.key_len) {
253 unsigned int key_len = params.key_len;
254
255 if (params.hashfn)
256 hash = params.hashfn(key, key_len, hash_rnd);
257 else if (key_len & (sizeof(u32) - 1))
258 hash = jhash(key, key_len, hash_rnd);
259 else
260 hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
261 } else {
262 unsigned int key_len = ht->p.key_len;
263
264 if (params.hashfn)
265 hash = params.hashfn(key, key_len, hash_rnd);
266 else
267 hash = jhash(key, key_len, hash_rnd);
268 }
269
270 return hash;
271}
272
273static inline unsigned int rht_key_hashfn(
274 struct rhashtable *ht, const struct bucket_table *tbl,
275 const void *key, const struct rhashtable_params params)
276{
277 unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
278
279 return rht_bucket_index(tbl, hash);
280}
281
282static inline unsigned int rht_head_hashfn(
283 struct rhashtable *ht, const struct bucket_table *tbl,
284 const struct rhash_head *he, const struct rhashtable_params params)
285{
286 const char *ptr = rht_obj(ht, he);
287
288 return likely(params.obj_hashfn) ?
289 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
290 ht->p.key_len,
291 tbl->hash_rnd)) :
292 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
293}
294
295
296
297
298
299
300static inline bool rht_grow_above_75(const struct rhashtable *ht,
301 const struct bucket_table *tbl)
302{
303
304 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
305 (!ht->p.max_size || tbl->size < ht->p.max_size);
306}
307
308
309
310
311
312
313static inline bool rht_shrink_below_30(const struct rhashtable *ht,
314 const struct bucket_table *tbl)
315{
316
317 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
318 tbl->size > ht->p.min_size;
319}
320
321
322
323
324
325
326static inline bool rht_grow_above_100(const struct rhashtable *ht,
327 const struct bucket_table *tbl)
328{
329 return atomic_read(&ht->nelems) > tbl->size &&
330 (!ht->p.max_size || tbl->size < ht->p.max_size);
331}
332
333
334
335
336
337
338static inline bool rht_grow_above_max(const struct rhashtable *ht,
339 const struct bucket_table *tbl)
340{
341 return atomic_read(&ht->nelems) >= ht->max_elems;
342}
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
358 unsigned int hash)
359{
360 return &tbl->locks[hash & tbl->locks_mask];
361}
362
363#ifdef CONFIG_PROVE_LOCKING
364int lockdep_rht_mutex_is_held(struct rhashtable *ht);
365int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
366#else
367static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
368{
369 return 1;
370}
371
372static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
373 u32 hash)
374{
375 return 1;
376}
377#endif
378
379int rhashtable_init(struct rhashtable *ht,
380 const struct rhashtable_params *params);
381int rhltable_init(struct rhltable *hlt,
382 const struct rhashtable_params *params);
383
384void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
385 struct rhash_head *obj);
386
387void rhashtable_walk_enter(struct rhashtable *ht,
388 struct rhashtable_iter *iter);
389void rhashtable_walk_exit(struct rhashtable_iter *iter);
390int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
391
392static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
393{
394 (void)rhashtable_walk_start_check(iter);
395}
396
397void *rhashtable_walk_next(struct rhashtable_iter *iter);
398void *rhashtable_walk_peek(struct rhashtable_iter *iter);
399void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
400
401void rhashtable_free_and_destroy(struct rhashtable *ht,
402 void (*free_fn)(void *ptr, void *arg),
403 void *arg);
404void rhashtable_destroy(struct rhashtable *ht);
405
406struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
407 unsigned int hash);
408struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
409 struct bucket_table *tbl,
410 unsigned int hash);
411
412#define rht_dereference(p, ht) \
413 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
414
415#define rht_dereference_rcu(p, ht) \
416 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
417
418#define rht_dereference_bucket(p, tbl, hash) \
419 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
420
421#define rht_dereference_bucket_rcu(p, tbl, hash) \
422 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
423
424#define rht_entry(tpos, pos, member) \
425 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
426
427static inline struct rhash_head __rcu *const *rht_bucket(
428 const struct bucket_table *tbl, unsigned int hash)
429{
430 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
431 &tbl->buckets[hash];
432}
433
434static inline struct rhash_head __rcu **rht_bucket_var(
435 struct bucket_table *tbl, unsigned int hash)
436{
437 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
438 &tbl->buckets[hash];
439}
440
441static inline struct rhash_head __rcu **rht_bucket_insert(
442 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
443{
444 return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
445 &tbl->buckets[hash];
446}
447
448
449
450
451
452
453
454
455#define rht_for_each_continue(pos, head, tbl, hash) \
456 for (pos = rht_dereference_bucket(head, tbl, hash); \
457 !rht_is_a_nulls(pos); \
458 pos = rht_dereference_bucket((pos)->next, tbl, hash))
459
460
461
462
463
464
465
466#define rht_for_each(pos, tbl, hash) \
467 rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
468
469
470
471
472
473
474
475
476
477
478#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
479 for (pos = rht_dereference_bucket(head, tbl, hash); \
480 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
481 pos = rht_dereference_bucket((pos)->next, tbl, hash))
482
483
484
485
486
487
488
489
490
491#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
492 rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \
493 tbl, hash, member)
494
495
496
497
498
499
500
501
502
503
504
505
506
507#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
508 for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
509 next = !rht_is_a_nulls(pos) ? \
510 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
511 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
512 pos = next, \
513 next = !rht_is_a_nulls(pos) ? \
514 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
515
516
517
518
519
520
521
522
523
524
525
526
527#define rht_for_each_rcu_continue(pos, head, tbl, hash) \
528 for (({barrier(); }), \
529 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
530 !rht_is_a_nulls(pos); \
531 pos = rcu_dereference_raw(pos->next))
532
533
534
535
536
537
538
539
540
541
542
543#define rht_for_each_rcu(pos, tbl, hash) \
544 rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
560 for (({barrier(); }), \
561 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
562 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
563 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
564
565
566
567
568
569
570
571
572
573
574
575
576
577#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
578 rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \
579 tbl, hash, member)
580
581
582
583
584
585
586
587
588
589#define rhl_for_each_rcu(pos, list) \
590 for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
591
592
593
594
595
596
597
598
599
600
601
602#define rhl_for_each_entry_rcu(tpos, pos, list, member) \
603 for (pos = list; pos && rht_entry(tpos, pos, member); \
604 pos = rcu_dereference_raw(pos->next))
605
606static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
607 const void *obj)
608{
609 struct rhashtable *ht = arg->ht;
610 const char *ptr = obj;
611
612 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
613}
614
615
616static inline struct rhash_head *__rhashtable_lookup(
617 struct rhashtable *ht, const void *key,
618 const struct rhashtable_params params)
619{
620 struct rhashtable_compare_arg arg = {
621 .ht = ht,
622 .key = key,
623 };
624 struct bucket_table *tbl;
625 struct rhash_head *he;
626 unsigned int hash;
627
628 tbl = rht_dereference_rcu(ht->tbl, ht);
629restart:
630 hash = rht_key_hashfn(ht, tbl, key, params);
631 rht_for_each_rcu(he, tbl, hash) {
632 if (params.obj_cmpfn ?
633 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
634 rhashtable_compare(&arg, rht_obj(ht, he)))
635 continue;
636 return he;
637 }
638
639
640 smp_rmb();
641
642 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
643 if (unlikely(tbl))
644 goto restart;
645
646 return NULL;
647}
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662static inline void *rhashtable_lookup(
663 struct rhashtable *ht, const void *key,
664 const struct rhashtable_params params)
665{
666 struct rhash_head *he = __rhashtable_lookup(ht, key, params);
667
668 return he ? rht_obj(ht, he) : NULL;
669}
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685static inline void *rhashtable_lookup_fast(
686 struct rhashtable *ht, const void *key,
687 const struct rhashtable_params params)
688{
689 void *obj;
690
691 rcu_read_lock();
692 obj = rhashtable_lookup(ht, key, params);
693 rcu_read_unlock();
694
695 return obj;
696}
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712static inline struct rhlist_head *rhltable_lookup(
713 struct rhltable *hlt, const void *key,
714 const struct rhashtable_params params)
715{
716 struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
717
718 return he ? container_of(he, struct rhlist_head, rhead) : NULL;
719}
720
721
722
723
724
725static inline void *__rhashtable_insert_fast(
726 struct rhashtable *ht, const void *key, struct rhash_head *obj,
727 const struct rhashtable_params params, bool rhlist)
728{
729 struct rhashtable_compare_arg arg = {
730 .ht = ht,
731 .key = key,
732 };
733 struct rhash_head __rcu **pprev;
734 struct bucket_table *tbl;
735 struct rhash_head *head;
736 spinlock_t *lock;
737 unsigned int hash;
738 int elasticity;
739 void *data;
740
741 rcu_read_lock();
742
743 tbl = rht_dereference_rcu(ht->tbl, ht);
744 hash = rht_head_hashfn(ht, tbl, obj, params);
745 lock = rht_bucket_lock(tbl, hash);
746 spin_lock_bh(lock);
747
748 if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
749slow_path:
750 spin_unlock_bh(lock);
751 rcu_read_unlock();
752 return rhashtable_insert_slow(ht, key, obj);
753 }
754
755 elasticity = RHT_ELASTICITY;
756 pprev = rht_bucket_insert(ht, tbl, hash);
757 data = ERR_PTR(-ENOMEM);
758 if (!pprev)
759 goto out;
760
761 rht_for_each_continue(head, *pprev, tbl, hash) {
762 struct rhlist_head *plist;
763 struct rhlist_head *list;
764
765 elasticity--;
766 if (!key ||
767 (params.obj_cmpfn ?
768 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
769 rhashtable_compare(&arg, rht_obj(ht, head)))) {
770 pprev = &head->next;
771 continue;
772 }
773
774 data = rht_obj(ht, head);
775
776 if (!rhlist)
777 goto out;
778
779
780 list = container_of(obj, struct rhlist_head, rhead);
781 plist = container_of(head, struct rhlist_head, rhead);
782
783 RCU_INIT_POINTER(list->next, plist);
784 head = rht_dereference_bucket(head->next, tbl, hash);
785 RCU_INIT_POINTER(list->rhead.next, head);
786 rcu_assign_pointer(*pprev, obj);
787
788 goto good;
789 }
790
791 if (elasticity <= 0)
792 goto slow_path;
793
794 data = ERR_PTR(-E2BIG);
795 if (unlikely(rht_grow_above_max(ht, tbl)))
796 goto out;
797
798 if (unlikely(rht_grow_above_100(ht, tbl)))
799 goto slow_path;
800
801 head = rht_dereference_bucket(*pprev, tbl, hash);
802
803 RCU_INIT_POINTER(obj->next, head);
804 if (rhlist) {
805 struct rhlist_head *list;
806
807 list = container_of(obj, struct rhlist_head, rhead);
808 RCU_INIT_POINTER(list->next, NULL);
809 }
810
811 rcu_assign_pointer(*pprev, obj);
812
813 atomic_inc(&ht->nelems);
814 if (rht_grow_above_75(ht, tbl))
815 schedule_work(&ht->run_work);
816
817good:
818 data = NULL;
819
820out:
821 spin_unlock_bh(lock);
822 rcu_read_unlock();
823
824 return data;
825}
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843static inline int rhashtable_insert_fast(
844 struct rhashtable *ht, struct rhash_head *obj,
845 const struct rhashtable_params params)
846{
847 void *ret;
848
849 ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
850 if (IS_ERR(ret))
851 return PTR_ERR(ret);
852
853 return ret == NULL ? 0 : -EEXIST;
854}
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873static inline int rhltable_insert_key(
874 struct rhltable *hlt, const void *key, struct rhlist_head *list,
875 const struct rhashtable_params params)
876{
877 return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
878 params, true));
879}
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897static inline int rhltable_insert(
898 struct rhltable *hlt, struct rhlist_head *list,
899 const struct rhashtable_params params)
900{
901 const char *key = rht_obj(&hlt->ht, &list->rhead);
902
903 key += params.key_offset;
904
905 return rhltable_insert_key(hlt, key, list, params);
906}
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929static inline int rhashtable_lookup_insert_fast(
930 struct rhashtable *ht, struct rhash_head *obj,
931 const struct rhashtable_params params)
932{
933 const char *key = rht_obj(ht, obj);
934 void *ret;
935
936 BUG_ON(ht->p.obj_hashfn);
937
938 ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
939 false);
940 if (IS_ERR(ret))
941 return PTR_ERR(ret);
942
943 return ret == NULL ? 0 : -EEXIST;
944}
945
946
947
948
949
950
951
952
953
954
955
956static inline void *rhashtable_lookup_get_insert_fast(
957 struct rhashtable *ht, struct rhash_head *obj,
958 const struct rhashtable_params params)
959{
960 const char *key = rht_obj(ht, obj);
961
962 BUG_ON(ht->p.obj_hashfn);
963
964 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
965 false);
966}
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990static inline int rhashtable_lookup_insert_key(
991 struct rhashtable *ht, const void *key, struct rhash_head *obj,
992 const struct rhashtable_params params)
993{
994 void *ret;
995
996 BUG_ON(!ht->p.obj_hashfn || !key);
997
998 ret = __rhashtable_insert_fast(ht, key, obj, params, false);
999 if (IS_ERR(ret))
1000 return PTR_ERR(ret);
1001
1002 return ret == NULL ? 0 : -EEXIST;
1003}
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016static inline void *rhashtable_lookup_get_insert_key(
1017 struct rhashtable *ht, const void *key, struct rhash_head *obj,
1018 const struct rhashtable_params params)
1019{
1020 BUG_ON(!ht->p.obj_hashfn || !key);
1021
1022 return __rhashtable_insert_fast(ht, key, obj, params, false);
1023}
1024
1025
1026static inline int __rhashtable_remove_fast_one(
1027 struct rhashtable *ht, struct bucket_table *tbl,
1028 struct rhash_head *obj, const struct rhashtable_params params,
1029 bool rhlist)
1030{
1031 struct rhash_head __rcu **pprev;
1032 struct rhash_head *he;
1033 spinlock_t * lock;
1034 unsigned int hash;
1035 int err = -ENOENT;
1036
1037 hash = rht_head_hashfn(ht, tbl, obj, params);
1038 lock = rht_bucket_lock(tbl, hash);
1039
1040 spin_lock_bh(lock);
1041
1042 pprev = rht_bucket_var(tbl, hash);
1043 rht_for_each_continue(he, *pprev, tbl, hash) {
1044 struct rhlist_head *list;
1045
1046 list = container_of(he, struct rhlist_head, rhead);
1047
1048 if (he != obj) {
1049 struct rhlist_head __rcu **lpprev;
1050
1051 pprev = &he->next;
1052
1053 if (!rhlist)
1054 continue;
1055
1056 do {
1057 lpprev = &list->next;
1058 list = rht_dereference_bucket(list->next,
1059 tbl, hash);
1060 } while (list && obj != &list->rhead);
1061
1062 if (!list)
1063 continue;
1064
1065 list = rht_dereference_bucket(list->next, tbl, hash);
1066 RCU_INIT_POINTER(*lpprev, list);
1067 err = 0;
1068 break;
1069 }
1070
1071 obj = rht_dereference_bucket(obj->next, tbl, hash);
1072 err = 1;
1073
1074 if (rhlist) {
1075 list = rht_dereference_bucket(list->next, tbl, hash);
1076 if (list) {
1077 RCU_INIT_POINTER(list->rhead.next, obj);
1078 obj = &list->rhead;
1079 err = 0;
1080 }
1081 }
1082
1083 rcu_assign_pointer(*pprev, obj);
1084 break;
1085 }
1086
1087 spin_unlock_bh(lock);
1088
1089 if (err > 0) {
1090 atomic_dec(&ht->nelems);
1091 if (unlikely(ht->p.automatic_shrinking &&
1092 rht_shrink_below_30(ht, tbl)))
1093 schedule_work(&ht->run_work);
1094 err = 0;
1095 }
1096
1097 return err;
1098}
1099
1100
1101static inline int __rhashtable_remove_fast(
1102 struct rhashtable *ht, struct rhash_head *obj,
1103 const struct rhashtable_params params, bool rhlist)
1104{
1105 struct bucket_table *tbl;
1106 int err;
1107
1108 rcu_read_lock();
1109
1110 tbl = rht_dereference_rcu(ht->tbl, ht);
1111
1112
1113
1114
1115
1116
1117 while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
1118 rhlist)) &&
1119 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1120 ;
1121
1122 rcu_read_unlock();
1123
1124 return err;
1125}
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142static inline int rhashtable_remove_fast(
1143 struct rhashtable *ht, struct rhash_head *obj,
1144 const struct rhashtable_params params)
1145{
1146 return __rhashtable_remove_fast(ht, obj, params, false);
1147}
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164static inline int rhltable_remove(
1165 struct rhltable *hlt, struct rhlist_head *list,
1166 const struct rhashtable_params params)
1167{
1168 return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
1169}
1170
1171
1172static inline int __rhashtable_replace_fast(
1173 struct rhashtable *ht, struct bucket_table *tbl,
1174 struct rhash_head *obj_old, struct rhash_head *obj_new,
1175 const struct rhashtable_params params)
1176{
1177 struct rhash_head __rcu **pprev;
1178 struct rhash_head *he;
1179 spinlock_t *lock;
1180 unsigned int hash;
1181 int err = -ENOENT;
1182
1183
1184
1185
1186 hash = rht_head_hashfn(ht, tbl, obj_old, params);
1187 if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
1188 return -EINVAL;
1189
1190 lock = rht_bucket_lock(tbl, hash);
1191
1192 spin_lock_bh(lock);
1193
1194 pprev = rht_bucket_var(tbl, hash);
1195 rht_for_each_continue(he, *pprev, tbl, hash) {
1196 if (he != obj_old) {
1197 pprev = &he->next;
1198 continue;
1199 }
1200
1201 rcu_assign_pointer(obj_new->next, obj_old->next);
1202 rcu_assign_pointer(*pprev, obj_new);
1203 err = 0;
1204 break;
1205 }
1206
1207 spin_unlock_bh(lock);
1208
1209 return err;
1210}
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226static inline int rhashtable_replace_fast(
1227 struct rhashtable *ht, struct rhash_head *obj_old,
1228 struct rhash_head *obj_new,
1229 const struct rhashtable_params params)
1230{
1231 struct bucket_table *tbl;
1232 int err;
1233
1234 rcu_read_lock();
1235
1236 tbl = rht_dereference_rcu(ht->tbl, ht);
1237
1238
1239
1240
1241
1242
1243 while ((err = __rhashtable_replace_fast(ht, tbl, obj_old,
1244 obj_new, params)) &&
1245 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
1246 ;
1247
1248 rcu_read_unlock();
1249
1250 return err;
1251}
1252
1253
1254static inline int rhashtable_walk_init(struct rhashtable *ht,
1255 struct rhashtable_iter *iter, gfp_t gfp)
1256{
1257 rhashtable_walk_enter(ht, iter);
1258 return 0;
1259}
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281static inline void rhltable_walk_enter(struct rhltable *hlt,
1282 struct rhashtable_iter *iter)
1283{
1284 return rhashtable_walk_enter(&hlt->ht, iter);
1285}
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295static inline void rhltable_free_and_destroy(struct rhltable *hlt,
1296 void (*free_fn)(void *ptr,
1297 void *arg),
1298 void *arg)
1299{
1300 return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
1301}
1302
1303static inline void rhltable_destroy(struct rhltable *hlt)
1304{
1305 return rhltable_free_and_destroy(hlt, NULL, NULL);
1306}
1307
1308#endif
1309