1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68#include "qemu/osdep.h"
69#include "qemu/qht.h"
70#include "qemu/atomic.h"
71#include "qemu/rcu.h"
72#include "qemu/memalign.h"
73
74
75
76
77
78
79
80
81
82
83
84#define QHT_BUCKET_ALIGN 64
85
86
87#if HOST_LONG_BITS == 32
88#define QHT_BUCKET_ENTRIES 6
89#else
90#define QHT_BUCKET_ENTRIES 4
91#endif
92
93enum qht_iter_type {
94 QHT_ITER_VOID,
95 QHT_ITER_RM,
96};
97
98struct qht_iter {
99 union {
100 qht_iter_func_t retvoid;
101 qht_iter_bool_func_t retbool;
102 } f;
103 enum qht_iter_type type;
104};
105
106
107
108
109
110static inline void qht_lock(struct qht *ht)
111{
112 if (ht->mode & QHT_MODE_RAW_MUTEXES) {
113 qemu_mutex_lock__raw(&ht->lock);
114 } else {
115 qemu_mutex_lock(&ht->lock);
116 }
117}
118
119static inline int qht_trylock(struct qht *ht)
120{
121 if (ht->mode & QHT_MODE_RAW_MUTEXES) {
122 return qemu_mutex_trylock__raw(&(ht)->lock);
123 }
124 return qemu_mutex_trylock(&(ht)->lock);
125}
126
127
128static inline void qht_unlock(struct qht *ht)
129{
130 qemu_mutex_unlock(&ht->lock);
131}
132
133
134
135
136
137
138
139
140
141
142
143
144struct qht_bucket {
145 QemuSpin lock;
146 QemuSeqLock sequence;
147 uint32_t hashes[QHT_BUCKET_ENTRIES];
148 void *pointers[QHT_BUCKET_ENTRIES];
149 struct qht_bucket *next;
150} QEMU_ALIGNED(QHT_BUCKET_ALIGN);
151
152QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
153
154
155
156
157
158
159
160
161
162
163
164
165
166struct qht_map {
167 struct rcu_head rcu;
168 struct qht_bucket *buckets;
169 size_t n_buckets;
170 size_t n_added_buckets;
171 size_t n_added_buckets_threshold;
172};
173
174
175#define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8
176
177static void qht_do_resize_reset(struct qht *ht, struct qht_map *new,
178 bool reset);
179static void qht_grow_maybe(struct qht *ht);
180
181#ifdef QHT_DEBUG
182
183#define qht_debug_assert(X) do { assert(X); } while (0)
184
185static void qht_bucket_debug__locked(struct qht_bucket *b)
186{
187 bool seen_empty = false;
188 bool corrupt = false;
189 int i;
190
191 do {
192 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
193 if (b->pointers[i] == NULL) {
194 seen_empty = true;
195 continue;
196 }
197 if (seen_empty) {
198 fprintf(stderr, "%s: b: %p, pos: %i, hash: 0x%x, p: %p\n",
199 __func__, b, i, b->hashes[i], b->pointers[i]);
200 corrupt = true;
201 }
202 }
203 b = b->next;
204 } while (b);
205 qht_debug_assert(!corrupt);
206}
207
208static void qht_map_debug__all_locked(struct qht_map *map)
209{
210 int i;
211
212 for (i = 0; i < map->n_buckets; i++) {
213 qht_bucket_debug__locked(&map->buckets[i]);
214 }
215}
216#else
217
218#define qht_debug_assert(X) do { (void)(X); } while (0)
219
220static inline void qht_bucket_debug__locked(struct qht_bucket *b)
221{ }
222
223static inline void qht_map_debug__all_locked(struct qht_map *map)
224{ }
225#endif
226
227static inline size_t qht_elems_to_buckets(size_t n_elems)
228{
229 return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
230}
231
232static inline void qht_head_init(struct qht_bucket *b)
233{
234 memset(b, 0, sizeof(*b));
235 qemu_spin_init(&b->lock);
236 seqlock_init(&b->sequence);
237}
238
239static inline
240struct qht_bucket *qht_map_to_bucket(const struct qht_map *map, uint32_t hash)
241{
242 return &map->buckets[hash & (map->n_buckets - 1)];
243}
244
245
246static void qht_map_lock_buckets(struct qht_map *map)
247{
248 size_t i;
249
250 for (i = 0; i < map->n_buckets; i++) {
251 struct qht_bucket *b = &map->buckets[i];
252
253 qemu_spin_lock(&b->lock);
254 }
255}
256
257static void qht_map_unlock_buckets(struct qht_map *map)
258{
259 size_t i;
260
261 for (i = 0; i < map->n_buckets; i++) {
262 struct qht_bucket *b = &map->buckets[i];
263
264 qemu_spin_unlock(&b->lock);
265 }
266}
267
268
269
270
271
272static inline bool qht_map_is_stale__locked(const struct qht *ht,
273 const struct qht_map *map)
274{
275 return map != ht->map;
276}
277
278
279
280
281
282
283
284
285static inline
286void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
287{
288 struct qht_map *map;
289
290 map = qatomic_rcu_read(&ht->map);
291 qht_map_lock_buckets(map);
292 if (likely(!qht_map_is_stale__locked(ht, map))) {
293 *pmap = map;
294 return;
295 }
296 qht_map_unlock_buckets(map);
297
298
299 qht_lock(ht);
300 map = ht->map;
301 qht_map_lock_buckets(map);
302 qht_unlock(ht);
303 *pmap = map;
304 return;
305}
306
307
308
309
310
311
312
313
314
315static inline
316struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
317 struct qht_map **pmap)
318{
319 struct qht_bucket *b;
320 struct qht_map *map;
321
322 map = qatomic_rcu_read(&ht->map);
323 b = qht_map_to_bucket(map, hash);
324
325 qemu_spin_lock(&b->lock);
326 if (likely(!qht_map_is_stale__locked(ht, map))) {
327 *pmap = map;
328 return b;
329 }
330 qemu_spin_unlock(&b->lock);
331
332
333 qht_lock(ht);
334 map = ht->map;
335 b = qht_map_to_bucket(map, hash);
336 qemu_spin_lock(&b->lock);
337 qht_unlock(ht);
338 *pmap = map;
339 return b;
340}
341
342static inline bool qht_map_needs_resize(const struct qht_map *map)
343{
344 return qatomic_read(&map->n_added_buckets) >
345 map->n_added_buckets_threshold;
346}
347
348static inline void qht_chain_destroy(const struct qht_bucket *head)
349{
350 struct qht_bucket *curr = head->next;
351 struct qht_bucket *prev;
352
353 qemu_spin_destroy(&head->lock);
354 while (curr) {
355 prev = curr;
356 curr = curr->next;
357 qemu_vfree(prev);
358 }
359}
360
361
362static void qht_map_destroy(struct qht_map *map)
363{
364 size_t i;
365
366 for (i = 0; i < map->n_buckets; i++) {
367 qht_chain_destroy(&map->buckets[i]);
368 }
369 qemu_vfree(map->buckets);
370 g_free(map);
371}
372
373static struct qht_map *qht_map_create(size_t n_buckets)
374{
375 struct qht_map *map;
376 size_t i;
377
378 map = g_malloc(sizeof(*map));
379 map->n_buckets = n_buckets;
380
381 map->n_added_buckets = 0;
382 map->n_added_buckets_threshold = n_buckets /
383 QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV;
384
385
386 if (unlikely(map->n_added_buckets_threshold == 0)) {
387 map->n_added_buckets_threshold = 1;
388 }
389
390 map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
391 sizeof(*map->buckets) * n_buckets);
392 for (i = 0; i < n_buckets; i++) {
393 qht_head_init(&map->buckets[i]);
394 }
395 return map;
396}
397
398void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems,
399 unsigned int mode)
400{
401 struct qht_map *map;
402 size_t n_buckets = qht_elems_to_buckets(n_elems);
403
404 g_assert(cmp);
405 ht->cmp = cmp;
406 ht->mode = mode;
407 qemu_mutex_init(&ht->lock);
408 map = qht_map_create(n_buckets);
409 qatomic_rcu_set(&ht->map, map);
410}
411
412
413void qht_destroy(struct qht *ht)
414{
415 qht_map_destroy(ht->map);
416 memset(ht, 0, sizeof(*ht));
417}
418
419static void qht_bucket_reset__locked(struct qht_bucket *head)
420{
421 struct qht_bucket *b = head;
422 int i;
423
424 seqlock_write_begin(&head->sequence);
425 do {
426 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
427 if (b->pointers[i] == NULL) {
428 goto done;
429 }
430 qatomic_set(&b->hashes[i], 0);
431 qatomic_set(&b->pointers[i], NULL);
432 }
433 b = b->next;
434 } while (b);
435 done:
436 seqlock_write_end(&head->sequence);
437}
438
439
440static void qht_map_reset__all_locked(struct qht_map *map)
441{
442 size_t i;
443
444 for (i = 0; i < map->n_buckets; i++) {
445 qht_bucket_reset__locked(&map->buckets[i]);
446 }
447 qht_map_debug__all_locked(map);
448}
449
450void qht_reset(struct qht *ht)
451{
452 struct qht_map *map;
453
454 qht_map_lock_buckets__no_stale(ht, &map);
455 qht_map_reset__all_locked(map);
456 qht_map_unlock_buckets(map);
457}
458
459static inline void qht_do_resize(struct qht *ht, struct qht_map *new)
460{
461 qht_do_resize_reset(ht, new, false);
462}
463
464static inline void qht_do_resize_and_reset(struct qht *ht, struct qht_map *new)
465{
466 qht_do_resize_reset(ht, new, true);
467}
468
469bool qht_reset_size(struct qht *ht, size_t n_elems)
470{
471 struct qht_map *new = NULL;
472 struct qht_map *map;
473 size_t n_buckets;
474
475 n_buckets = qht_elems_to_buckets(n_elems);
476
477 qht_lock(ht);
478 map = ht->map;
479 if (n_buckets != map->n_buckets) {
480 new = qht_map_create(n_buckets);
481 }
482 qht_do_resize_and_reset(ht, new);
483 qht_unlock(ht);
484
485 return !!new;
486}
487
488static inline
489void *qht_do_lookup(const struct qht_bucket *head, qht_lookup_func_t func,
490 const void *userp, uint32_t hash)
491{
492 const struct qht_bucket *b = head;
493 int i;
494
495 do {
496 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
497 if (qatomic_read(&b->hashes[i]) == hash) {
498
499
500
501
502 void *p = qatomic_rcu_read(&b->pointers[i]);
503
504 if (likely(p) && likely(func(p, userp))) {
505 return p;
506 }
507 }
508 }
509 b = qatomic_rcu_read(&b->next);
510 } while (b);
511
512 return NULL;
513}
514
515static __attribute__((noinline))
516void *qht_lookup__slowpath(const struct qht_bucket *b, qht_lookup_func_t func,
517 const void *userp, uint32_t hash)
518{
519 unsigned int version;
520 void *ret;
521
522 do {
523 version = seqlock_read_begin(&b->sequence);
524 ret = qht_do_lookup(b, func, userp, hash);
525 } while (seqlock_read_retry(&b->sequence, version));
526 return ret;
527}
528
529void *qht_lookup_custom(const struct qht *ht, const void *userp, uint32_t hash,
530 qht_lookup_func_t func)
531{
532 const struct qht_bucket *b;
533 const struct qht_map *map;
534 unsigned int version;
535 void *ret;
536
537 map = qatomic_rcu_read(&ht->map);
538 b = qht_map_to_bucket(map, hash);
539
540 version = seqlock_read_begin(&b->sequence);
541 ret = qht_do_lookup(b, func, userp, hash);
542 if (likely(!seqlock_read_retry(&b->sequence, version))) {
543 return ret;
544 }
545
546
547
548
549 return qht_lookup__slowpath(b, func, userp, hash);
550}
551
552void *qht_lookup(const struct qht *ht, const void *userp, uint32_t hash)
553{
554 return qht_lookup_custom(ht, userp, hash, ht->cmp);
555}
556
557
558
559
560
561static void *qht_insert__locked(const struct qht *ht, struct qht_map *map,
562 struct qht_bucket *head, void *p, uint32_t hash,
563 bool *needs_resize)
564{
565 struct qht_bucket *b = head;
566 struct qht_bucket *prev = NULL;
567 struct qht_bucket *new = NULL;
568 int i;
569
570 do {
571 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
572 if (b->pointers[i]) {
573 if (unlikely(b->hashes[i] == hash &&
574 ht->cmp(b->pointers[i], p))) {
575 return b->pointers[i];
576 }
577 } else {
578 goto found;
579 }
580 }
581 prev = b;
582 b = b->next;
583 } while (b);
584
585 b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b));
586 memset(b, 0, sizeof(*b));
587 new = b;
588 i = 0;
589 qatomic_inc(&map->n_added_buckets);
590 if (unlikely(qht_map_needs_resize(map)) && needs_resize) {
591 *needs_resize = true;
592 }
593
594 found:
595
596 seqlock_write_begin(&head->sequence);
597 if (new) {
598 qatomic_rcu_set(&prev->next, b);
599 }
600
601 qatomic_set(&b->hashes[i], hash);
602 qatomic_set(&b->pointers[i], p);
603 seqlock_write_end(&head->sequence);
604 return NULL;
605}
606
607static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht)
608{
609 struct qht_map *map;
610
611
612
613
614
615 if (qht_trylock(ht)) {
616 return;
617 }
618 map = ht->map;
619
620 if (qht_map_needs_resize(map)) {
621 struct qht_map *new = qht_map_create(map->n_buckets * 2);
622
623 qht_do_resize(ht, new);
624 }
625 qht_unlock(ht);
626}
627
628bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing)
629{
630 struct qht_bucket *b;
631 struct qht_map *map;
632 bool needs_resize = false;
633 void *prev;
634
635
636 qht_debug_assert(p);
637
638 b = qht_bucket_lock__no_stale(ht, hash, &map);
639 prev = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
640 qht_bucket_debug__locked(b);
641 qemu_spin_unlock(&b->lock);
642
643 if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
644 qht_grow_maybe(ht);
645 }
646 if (likely(prev == NULL)) {
647 return true;
648 }
649 if (existing) {
650 *existing = prev;
651 }
652 return false;
653}
654
655static inline bool qht_entry_is_last(const struct qht_bucket *b, int pos)
656{
657 if (pos == QHT_BUCKET_ENTRIES - 1) {
658 if (b->next == NULL) {
659 return true;
660 }
661 return b->next->pointers[0] == NULL;
662 }
663 return b->pointers[pos + 1] == NULL;
664}
665
666static void
667qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
668{
669 qht_debug_assert(!(to == from && i == j));
670 qht_debug_assert(to->pointers[i]);
671 qht_debug_assert(from->pointers[j]);
672
673 qatomic_set(&to->hashes[i], from->hashes[j]);
674 qatomic_set(&to->pointers[i], from->pointers[j]);
675
676 qatomic_set(&from->hashes[j], 0);
677 qatomic_set(&from->pointers[j], NULL);
678}
679
680
681
682
683
684static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
685{
686 struct qht_bucket *b = orig;
687 struct qht_bucket *prev = NULL;
688 int i;
689
690 if (qht_entry_is_last(orig, pos)) {
691 orig->hashes[pos] = 0;
692 qatomic_set(&orig->pointers[pos], NULL);
693 return;
694 }
695 do {
696 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
697 if (b->pointers[i]) {
698 continue;
699 }
700 if (i > 0) {
701 return qht_entry_move(orig, pos, b, i - 1);
702 }
703 qht_debug_assert(prev);
704 return qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
705 }
706 prev = b;
707 b = b->next;
708 } while (b);
709
710 qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
711}
712
713
714static inline
715bool qht_remove__locked(struct qht_bucket *head, const void *p, uint32_t hash)
716{
717 struct qht_bucket *b = head;
718 int i;
719
720 do {
721 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
722 void *q = b->pointers[i];
723
724 if (unlikely(q == NULL)) {
725 return false;
726 }
727 if (q == p) {
728 qht_debug_assert(b->hashes[i] == hash);
729 seqlock_write_begin(&head->sequence);
730 qht_bucket_remove_entry(b, i);
731 seqlock_write_end(&head->sequence);
732 return true;
733 }
734 }
735 b = b->next;
736 } while (b);
737 return false;
738}
739
740bool qht_remove(struct qht *ht, const void *p, uint32_t hash)
741{
742 struct qht_bucket *b;
743 struct qht_map *map;
744 bool ret;
745
746
747 qht_debug_assert(p);
748
749 b = qht_bucket_lock__no_stale(ht, hash, &map);
750 ret = qht_remove__locked(b, p, hash);
751 qht_bucket_debug__locked(b);
752 qemu_spin_unlock(&b->lock);
753 return ret;
754}
755
756static inline void qht_bucket_iter(struct qht_bucket *head,
757 const struct qht_iter *iter, void *userp)
758{
759 struct qht_bucket *b = head;
760 int i;
761
762 do {
763 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
764 if (b->pointers[i] == NULL) {
765 return;
766 }
767 switch (iter->type) {
768 case QHT_ITER_VOID:
769 iter->f.retvoid(b->pointers[i], b->hashes[i], userp);
770 break;
771 case QHT_ITER_RM:
772 if (iter->f.retbool(b->pointers[i], b->hashes[i], userp)) {
773
774 seqlock_write_begin(&head->sequence);
775 qht_bucket_remove_entry(b, i);
776 seqlock_write_end(&head->sequence);
777 qht_bucket_debug__locked(b);
778
779 i--;
780 continue;
781 }
782 break;
783 default:
784 g_assert_not_reached();
785 }
786 }
787 b = b->next;
788 } while (b);
789}
790
791
792static inline void qht_map_iter__all_locked(struct qht_map *map,
793 const struct qht_iter *iter,
794 void *userp)
795{
796 size_t i;
797
798 for (i = 0; i < map->n_buckets; i++) {
799 qht_bucket_iter(&map->buckets[i], iter, userp);
800 }
801}
802
803static inline void
804do_qht_iter(struct qht *ht, const struct qht_iter *iter, void *userp)
805{
806 struct qht_map *map;
807
808 map = qatomic_rcu_read(&ht->map);
809 qht_map_lock_buckets(map);
810 qht_map_iter__all_locked(map, iter, userp);
811 qht_map_unlock_buckets(map);
812}
813
814void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp)
815{
816 const struct qht_iter iter = {
817 .f.retvoid = func,
818 .type = QHT_ITER_VOID,
819 };
820
821 do_qht_iter(ht, &iter, userp);
822}
823
824void qht_iter_remove(struct qht *ht, qht_iter_bool_func_t func, void *userp)
825{
826 const struct qht_iter iter = {
827 .f.retbool = func,
828 .type = QHT_ITER_RM,
829 };
830
831 do_qht_iter(ht, &iter, userp);
832}
833
834struct qht_map_copy_data {
835 struct qht *ht;
836 struct qht_map *new;
837};
838
839static void qht_map_copy(void *p, uint32_t hash, void *userp)
840{
841 struct qht_map_copy_data *data = userp;
842 struct qht *ht = data->ht;
843 struct qht_map *new = data->new;
844 struct qht_bucket *b = qht_map_to_bucket(new, hash);
845
846
847 qht_insert__locked(ht, new, b, p, hash, NULL);
848}
849
850
851
852
853
854static void qht_do_resize_reset(struct qht *ht, struct qht_map *new, bool reset)
855{
856 struct qht_map *old;
857 const struct qht_iter iter = {
858 .f.retvoid = qht_map_copy,
859 .type = QHT_ITER_VOID,
860 };
861 struct qht_map_copy_data data;
862
863 old = ht->map;
864 qht_map_lock_buckets(old);
865
866 if (reset) {
867 qht_map_reset__all_locked(old);
868 }
869
870 if (new == NULL) {
871 qht_map_unlock_buckets(old);
872 return;
873 }
874
875 g_assert(new->n_buckets != old->n_buckets);
876 data.ht = ht;
877 data.new = new;
878 qht_map_iter__all_locked(old, &iter, &data);
879 qht_map_debug__all_locked(new);
880
881 qatomic_rcu_set(&ht->map, new);
882 qht_map_unlock_buckets(old);
883 call_rcu(old, qht_map_destroy, rcu);
884}
885
886bool qht_resize(struct qht *ht, size_t n_elems)
887{
888 size_t n_buckets = qht_elems_to_buckets(n_elems);
889 size_t ret = false;
890
891 qht_lock(ht);
892 if (n_buckets != ht->map->n_buckets) {
893 struct qht_map *new;
894
895 new = qht_map_create(n_buckets);
896 qht_do_resize(ht, new);
897 ret = true;
898 }
899 qht_unlock(ht);
900
901 return ret;
902}
903
904
905void qht_statistics_init(const struct qht *ht, struct qht_stats *stats)
906{
907 const struct qht_map *map;
908 int i;
909
910 map = qatomic_rcu_read(&ht->map);
911
912 stats->used_head_buckets = 0;
913 stats->entries = 0;
914 qdist_init(&stats->chain);
915 qdist_init(&stats->occupancy);
916
917 if (unlikely(map == NULL)) {
918 stats->head_buckets = 0;
919 return;
920 }
921 stats->head_buckets = map->n_buckets;
922
923 for (i = 0; i < map->n_buckets; i++) {
924 const struct qht_bucket *head = &map->buckets[i];
925 const struct qht_bucket *b;
926 unsigned int version;
927 size_t buckets;
928 size_t entries;
929 int j;
930
931 do {
932 version = seqlock_read_begin(&head->sequence);
933 buckets = 0;
934 entries = 0;
935 b = head;
936 do {
937 for (j = 0; j < QHT_BUCKET_ENTRIES; j++) {
938 if (qatomic_read(&b->pointers[j]) == NULL) {
939 break;
940 }
941 entries++;
942 }
943 buckets++;
944 b = qatomic_rcu_read(&b->next);
945 } while (b);
946 } while (seqlock_read_retry(&head->sequence, version));
947
948 if (entries) {
949 qdist_inc(&stats->chain, buckets);
950 qdist_inc(&stats->occupancy,
951 (double)entries / QHT_BUCKET_ENTRIES / buckets);
952 stats->used_head_buckets++;
953 stats->entries += entries;
954 } else {
955 qdist_inc(&stats->occupancy, 0);
956 }
957 }
958}
959
960void qht_statistics_destroy(struct qht_stats *stats)
961{
962 qdist_destroy(&stats->occupancy);
963 qdist_destroy(&stats->chain);
964}
965