1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68#include "qemu/osdep.h"
69#include "qemu/qht.h"
70#include "qemu/atomic.h"
71#include "qemu/rcu.h"
72
73
74
75
76
77
78
79
80
81
82
83#define QHT_BUCKET_ALIGN 64
84
85
86#if HOST_LONG_BITS == 32
87#define QHT_BUCKET_ENTRIES 6
88#else
89#define QHT_BUCKET_ENTRIES 4
90#endif
91
92
93
94
95
96
97
98
99
100
101
102
103struct qht_bucket {
104 QemuSpin lock;
105 QemuSeqLock sequence;
106 uint32_t hashes[QHT_BUCKET_ENTRIES];
107 void *pointers[QHT_BUCKET_ENTRIES];
108 struct qht_bucket *next;
109} QEMU_ALIGNED(QHT_BUCKET_ALIGN);
110
111QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
112
113
114
115
116
117
118
119
120
121
122
123
124
125struct qht_map {
126 struct rcu_head rcu;
127 struct qht_bucket *buckets;
128 size_t n_buckets;
129 size_t n_added_buckets;
130 size_t n_added_buckets_threshold;
131};
132
133
134#define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8
135
136static void qht_do_resize(struct qht *ht, struct qht_map *new);
137static void qht_grow_maybe(struct qht *ht);
138
139#ifdef QHT_DEBUG
140
141#define qht_debug_assert(X) do { assert(X); } while (0)
142
143static void qht_bucket_debug__locked(struct qht_bucket *b)
144{
145 bool seen_empty = false;
146 bool corrupt = false;
147 int i;
148
149 do {
150 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
151 if (b->pointers[i] == NULL) {
152 seen_empty = true;
153 continue;
154 }
155 if (seen_empty) {
156 fprintf(stderr, "%s: b: %p, pos: %i, hash: 0x%x, p: %p\n",
157 __func__, b, i, b->hashes[i], b->pointers[i]);
158 corrupt = true;
159 }
160 }
161 b = b->next;
162 } while (b);
163 qht_debug_assert(!corrupt);
164}
165
166static void qht_map_debug__all_locked(struct qht_map *map)
167{
168 int i;
169
170 for (i = 0; i < map->n_buckets; i++) {
171 qht_bucket_debug__locked(&map->buckets[i]);
172 }
173}
174#else
175
176#define qht_debug_assert(X) do { (void)(X); } while (0)
177
178static inline void qht_bucket_debug__locked(struct qht_bucket *b)
179{ }
180
181static inline void qht_map_debug__all_locked(struct qht_map *map)
182{ }
183#endif
184
185static inline size_t qht_elems_to_buckets(size_t n_elems)
186{
187 return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
188}
189
190static inline void qht_head_init(struct qht_bucket *b)
191{
192 memset(b, 0, sizeof(*b));
193 qemu_spin_init(&b->lock);
194 seqlock_init(&b->sequence);
195}
196
197static inline
198struct qht_bucket *qht_map_to_bucket(struct qht_map *map, uint32_t hash)
199{
200 return &map->buckets[hash & (map->n_buckets - 1)];
201}
202
203
204static void qht_map_lock_buckets(struct qht_map *map)
205{
206 size_t i;
207
208 for (i = 0; i < map->n_buckets; i++) {
209 struct qht_bucket *b = &map->buckets[i];
210
211 qemu_spin_lock(&b->lock);
212 }
213}
214
215static void qht_map_unlock_buckets(struct qht_map *map)
216{
217 size_t i;
218
219 for (i = 0; i < map->n_buckets; i++) {
220 struct qht_bucket *b = &map->buckets[i];
221
222 qemu_spin_unlock(&b->lock);
223 }
224}
225
226
227
228
229
230static inline bool qht_map_is_stale__locked(struct qht *ht, struct qht_map *map)
231{
232 return map != ht->map;
233}
234
235
236
237
238
239
240
241
242static inline
243void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
244{
245 struct qht_map *map;
246
247 map = atomic_rcu_read(&ht->map);
248 qht_map_lock_buckets(map);
249 if (likely(!qht_map_is_stale__locked(ht, map))) {
250 *pmap = map;
251 return;
252 }
253 qht_map_unlock_buckets(map);
254
255
256 qemu_mutex_lock(&ht->lock);
257 map = ht->map;
258 qht_map_lock_buckets(map);
259 qemu_mutex_unlock(&ht->lock);
260 *pmap = map;
261 return;
262}
263
264
265
266
267
268
269
270
271
272static inline
273struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
274 struct qht_map **pmap)
275{
276 struct qht_bucket *b;
277 struct qht_map *map;
278
279 map = atomic_rcu_read(&ht->map);
280 b = qht_map_to_bucket(map, hash);
281
282 qemu_spin_lock(&b->lock);
283 if (likely(!qht_map_is_stale__locked(ht, map))) {
284 *pmap = map;
285 return b;
286 }
287 qemu_spin_unlock(&b->lock);
288
289
290 qemu_mutex_lock(&ht->lock);
291 map = ht->map;
292 b = qht_map_to_bucket(map, hash);
293 qemu_spin_lock(&b->lock);
294 qemu_mutex_unlock(&ht->lock);
295 *pmap = map;
296 return b;
297}
298
299static inline bool qht_map_needs_resize(struct qht_map *map)
300{
301 return atomic_read(&map->n_added_buckets) > map->n_added_buckets_threshold;
302}
303
304static inline void qht_chain_destroy(struct qht_bucket *head)
305{
306 struct qht_bucket *curr = head->next;
307 struct qht_bucket *prev;
308
309 while (curr) {
310 prev = curr;
311 curr = curr->next;
312 qemu_vfree(prev);
313 }
314}
315
316
317static void qht_map_destroy(struct qht_map *map)
318{
319 size_t i;
320
321 for (i = 0; i < map->n_buckets; i++) {
322 qht_chain_destroy(&map->buckets[i]);
323 }
324 qemu_vfree(map->buckets);
325 g_free(map);
326}
327
328static struct qht_map *qht_map_create(size_t n_buckets)
329{
330 struct qht_map *map;
331 size_t i;
332
333 map = g_malloc(sizeof(*map));
334 map->n_buckets = n_buckets;
335
336 map->n_added_buckets = 0;
337 map->n_added_buckets_threshold = n_buckets /
338 QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV;
339
340
341 if (unlikely(map->n_added_buckets_threshold == 0)) {
342 map->n_added_buckets_threshold = 1;
343 }
344
345 map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
346 sizeof(*map->buckets) * n_buckets);
347 for (i = 0; i < n_buckets; i++) {
348 qht_head_init(&map->buckets[i]);
349 }
350 return map;
351}
352
353void qht_init(struct qht *ht, size_t n_elems, unsigned int mode)
354{
355 struct qht_map *map;
356 size_t n_buckets = qht_elems_to_buckets(n_elems);
357
358 ht->mode = mode;
359 qemu_mutex_init(&ht->lock);
360 map = qht_map_create(n_buckets);
361 atomic_rcu_set(&ht->map, map);
362}
363
364
365void qht_destroy(struct qht *ht)
366{
367 qht_map_destroy(ht->map);
368 memset(ht, 0, sizeof(*ht));
369}
370
371static void qht_bucket_reset__locked(struct qht_bucket *head)
372{
373 struct qht_bucket *b = head;
374 int i;
375
376 seqlock_write_begin(&head->sequence);
377 do {
378 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
379 if (b->pointers[i] == NULL) {
380 goto done;
381 }
382 b->hashes[i] = 0;
383 atomic_set(&b->pointers[i], NULL);
384 }
385 b = b->next;
386 } while (b);
387 done:
388 seqlock_write_end(&head->sequence);
389}
390
391
392static void qht_map_reset__all_locked(struct qht_map *map)
393{
394 size_t i;
395
396 for (i = 0; i < map->n_buckets; i++) {
397 qht_bucket_reset__locked(&map->buckets[i]);
398 }
399 qht_map_debug__all_locked(map);
400}
401
402void qht_reset(struct qht *ht)
403{
404 struct qht_map *map;
405
406 qht_map_lock_buckets__no_stale(ht, &map);
407 qht_map_reset__all_locked(map);
408 qht_map_unlock_buckets(map);
409}
410
411bool qht_reset_size(struct qht *ht, size_t n_elems)
412{
413 struct qht_map *new;
414 struct qht_map *map;
415 size_t n_buckets;
416 bool resize = false;
417
418 n_buckets = qht_elems_to_buckets(n_elems);
419
420 qemu_mutex_lock(&ht->lock);
421 map = ht->map;
422 if (n_buckets != map->n_buckets) {
423 new = qht_map_create(n_buckets);
424 resize = true;
425 }
426
427 qht_map_lock_buckets(map);
428 qht_map_reset__all_locked(map);
429 if (resize) {
430 qht_do_resize(ht, new);
431 }
432 qht_map_unlock_buckets(map);
433 qemu_mutex_unlock(&ht->lock);
434
435 return resize;
436}
437
438static inline
439void *qht_do_lookup(struct qht_bucket *head, qht_lookup_func_t func,
440 const void *userp, uint32_t hash)
441{
442 struct qht_bucket *b = head;
443 int i;
444
445 do {
446 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
447 if (b->hashes[i] == hash) {
448
449
450
451
452 void *p = atomic_rcu_read(&b->pointers[i]);
453
454 if (likely(p) && likely(func(p, userp))) {
455 return p;
456 }
457 }
458 }
459 b = atomic_rcu_read(&b->next);
460 } while (b);
461
462 return NULL;
463}
464
465static __attribute__((noinline))
466void *qht_lookup__slowpath(struct qht_bucket *b, qht_lookup_func_t func,
467 const void *userp, uint32_t hash)
468{
469 unsigned int version;
470 void *ret;
471
472 do {
473 version = seqlock_read_begin(&b->sequence);
474 ret = qht_do_lookup(b, func, userp, hash);
475 } while (seqlock_read_retry(&b->sequence, version));
476 return ret;
477}
478
479void *qht_lookup(struct qht *ht, qht_lookup_func_t func, const void *userp,
480 uint32_t hash)
481{
482 struct qht_bucket *b;
483 struct qht_map *map;
484 unsigned int version;
485 void *ret;
486
487 map = atomic_rcu_read(&ht->map);
488 b = qht_map_to_bucket(map, hash);
489
490 version = seqlock_read_begin(&b->sequence);
491 ret = qht_do_lookup(b, func, userp, hash);
492 if (likely(!seqlock_read_retry(&b->sequence, version))) {
493 return ret;
494 }
495
496
497
498
499 return qht_lookup__slowpath(b, func, userp, hash);
500}
501
502
503static bool qht_insert__locked(struct qht *ht, struct qht_map *map,
504 struct qht_bucket *head, void *p, uint32_t hash,
505 bool *needs_resize)
506{
507 struct qht_bucket *b = head;
508 struct qht_bucket *prev = NULL;
509 struct qht_bucket *new = NULL;
510 int i;
511
512 do {
513 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
514 if (b->pointers[i]) {
515 if (unlikely(b->pointers[i] == p)) {
516 return false;
517 }
518 } else {
519 goto found;
520 }
521 }
522 prev = b;
523 b = b->next;
524 } while (b);
525
526 b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b));
527 memset(b, 0, sizeof(*b));
528 new = b;
529 i = 0;
530 atomic_inc(&map->n_added_buckets);
531 if (unlikely(qht_map_needs_resize(map)) && needs_resize) {
532 *needs_resize = true;
533 }
534
535 found:
536
537 seqlock_write_begin(&head->sequence);
538 if (new) {
539 atomic_rcu_set(&prev->next, b);
540 }
541 b->hashes[i] = hash;
542
543 atomic_set(&b->pointers[i], p);
544 seqlock_write_end(&head->sequence);
545 return true;
546}
547
548static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht)
549{
550 struct qht_map *map;
551
552
553
554
555
556 if (qemu_mutex_trylock(&ht->lock)) {
557 return;
558 }
559 map = ht->map;
560
561 if (qht_map_needs_resize(map)) {
562 struct qht_map *new = qht_map_create(map->n_buckets * 2);
563
564 qht_map_lock_buckets(map);
565 qht_do_resize(ht, new);
566 qht_map_unlock_buckets(map);
567 }
568 qemu_mutex_unlock(&ht->lock);
569}
570
571bool qht_insert(struct qht *ht, void *p, uint32_t hash)
572{
573 struct qht_bucket *b;
574 struct qht_map *map;
575 bool needs_resize = false;
576 bool ret;
577
578
579 qht_debug_assert(p);
580
581 b = qht_bucket_lock__no_stale(ht, hash, &map);
582 ret = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
583 qht_bucket_debug__locked(b);
584 qemu_spin_unlock(&b->lock);
585
586 if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
587 qht_grow_maybe(ht);
588 }
589 return ret;
590}
591
592static inline bool qht_entry_is_last(struct qht_bucket *b, int pos)
593{
594 if (pos == QHT_BUCKET_ENTRIES - 1) {
595 if (b->next == NULL) {
596 return true;
597 }
598 return b->next->pointers[0] == NULL;
599 }
600 return b->pointers[pos + 1] == NULL;
601}
602
603static void
604qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
605{
606 qht_debug_assert(!(to == from && i == j));
607 qht_debug_assert(to->pointers[i]);
608 qht_debug_assert(from->pointers[j]);
609
610 to->hashes[i] = from->hashes[j];
611 atomic_set(&to->pointers[i], from->pointers[j]);
612
613 from->hashes[j] = 0;
614 atomic_set(&from->pointers[j], NULL);
615}
616
617
618
619
620
621static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
622{
623 struct qht_bucket *b = orig;
624 struct qht_bucket *prev = NULL;
625 int i;
626
627 if (qht_entry_is_last(orig, pos)) {
628 orig->hashes[pos] = 0;
629 atomic_set(&orig->pointers[pos], NULL);
630 return;
631 }
632 do {
633 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
634 if (b->pointers[i]) {
635 continue;
636 }
637 if (i > 0) {
638 return qht_entry_move(orig, pos, b, i - 1);
639 }
640 qht_debug_assert(prev);
641 return qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
642 }
643 prev = b;
644 b = b->next;
645 } while (b);
646
647 qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
648}
649
650
651static inline
652bool qht_remove__locked(struct qht_map *map, struct qht_bucket *head,
653 const void *p, uint32_t hash)
654{
655 struct qht_bucket *b = head;
656 int i;
657
658 do {
659 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
660 void *q = b->pointers[i];
661
662 if (unlikely(q == NULL)) {
663 return false;
664 }
665 if (q == p) {
666 qht_debug_assert(b->hashes[i] == hash);
667 seqlock_write_begin(&head->sequence);
668 qht_bucket_remove_entry(b, i);
669 seqlock_write_end(&head->sequence);
670 return true;
671 }
672 }
673 b = b->next;
674 } while (b);
675 return false;
676}
677
678bool qht_remove(struct qht *ht, const void *p, uint32_t hash)
679{
680 struct qht_bucket *b;
681 struct qht_map *map;
682 bool ret;
683
684
685 qht_debug_assert(p);
686
687 b = qht_bucket_lock__no_stale(ht, hash, &map);
688 ret = qht_remove__locked(map, b, p, hash);
689 qht_bucket_debug__locked(b);
690 qemu_spin_unlock(&b->lock);
691 return ret;
692}
693
694static inline void qht_bucket_iter(struct qht *ht, struct qht_bucket *b,
695 qht_iter_func_t func, void *userp)
696{
697 int i;
698
699 do {
700 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
701 if (b->pointers[i] == NULL) {
702 return;
703 }
704 func(ht, b->pointers[i], b->hashes[i], userp);
705 }
706 b = b->next;
707 } while (b);
708}
709
710
711static inline void qht_map_iter__all_locked(struct qht *ht, struct qht_map *map,
712 qht_iter_func_t func, void *userp)
713{
714 size_t i;
715
716 for (i = 0; i < map->n_buckets; i++) {
717 qht_bucket_iter(ht, &map->buckets[i], func, userp);
718 }
719}
720
721void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp)
722{
723 struct qht_map *map;
724
725 map = atomic_rcu_read(&ht->map);
726 qht_map_lock_buckets(map);
727
728 qht_map_iter__all_locked(ht, map, func, userp);
729 qht_map_unlock_buckets(map);
730}
731
732static void qht_map_copy(struct qht *ht, void *p, uint32_t hash, void *userp)
733{
734 struct qht_map *new = userp;
735 struct qht_bucket *b = qht_map_to_bucket(new, hash);
736
737
738 qht_insert__locked(ht, new, b, p, hash, NULL);
739}
740
741
742
743
744
745
746
747
748
749static void qht_do_resize(struct qht *ht, struct qht_map *new)
750{
751 struct qht_map *old;
752
753 old = ht->map;
754 g_assert_cmpuint(new->n_buckets, !=, old->n_buckets);
755
756 qht_map_iter__all_locked(ht, old, qht_map_copy, new);
757 qht_map_debug__all_locked(new);
758
759 atomic_rcu_set(&ht->map, new);
760 call_rcu(old, qht_map_destroy, rcu);
761}
762
763bool qht_resize(struct qht *ht, size_t n_elems)
764{
765 size_t n_buckets = qht_elems_to_buckets(n_elems);
766 size_t ret = false;
767
768 qemu_mutex_lock(&ht->lock);
769 if (n_buckets != ht->map->n_buckets) {
770 struct qht_map *new;
771 struct qht_map *old = ht->map;
772
773 new = qht_map_create(n_buckets);
774 qht_map_lock_buckets(old);
775 qht_do_resize(ht, new);
776 qht_map_unlock_buckets(old);
777 ret = true;
778 }
779 qemu_mutex_unlock(&ht->lock);
780
781 return ret;
782}
783
784
785void qht_statistics_init(struct qht *ht, struct qht_stats *stats)
786{
787 struct qht_map *map;
788 int i;
789
790 map = atomic_rcu_read(&ht->map);
791
792 stats->used_head_buckets = 0;
793 stats->entries = 0;
794 qdist_init(&stats->chain);
795 qdist_init(&stats->occupancy);
796
797 if (unlikely(map == NULL)) {
798 stats->head_buckets = 0;
799 return;
800 }
801 stats->head_buckets = map->n_buckets;
802
803 for (i = 0; i < map->n_buckets; i++) {
804 struct qht_bucket *head = &map->buckets[i];
805 struct qht_bucket *b;
806 unsigned int version;
807 size_t buckets;
808 size_t entries;
809 int j;
810
811 do {
812 version = seqlock_read_begin(&head->sequence);
813 buckets = 0;
814 entries = 0;
815 b = head;
816 do {
817 for (j = 0; j < QHT_BUCKET_ENTRIES; j++) {
818 if (atomic_read(&b->pointers[j]) == NULL) {
819 break;
820 }
821 entries++;
822 }
823 buckets++;
824 b = atomic_rcu_read(&b->next);
825 } while (b);
826 } while (seqlock_read_retry(&head->sequence, version));
827
828 if (entries) {
829 qdist_inc(&stats->chain, buckets);
830 qdist_inc(&stats->occupancy,
831 (double)entries / QHT_BUCKET_ENTRIES / buckets);
832 stats->used_head_buckets++;
833 stats->entries += entries;
834 } else {
835 qdist_inc(&stats->occupancy, 0);
836 }
837 }
838}
839
840void qht_statistics_destroy(struct qht_stats *stats)
841{
842 qdist_destroy(&stats->occupancy);
843 qdist_destroy(&stats->chain);
844}
845