qemu/util/hbitmap.c
<<
>>
Prefs
   1/*
   2 * Hierarchical Bitmap Data Type
   3 *
   4 * Copyright Red Hat, Inc., 2012
   5 *
   6 * Author: Paolo Bonzini <pbonzini@redhat.com>
   7 *
   8 * This work is licensed under the terms of the GNU GPL, version 2 or
   9 * later.  See the COPYING file in the top-level directory.
  10 */
  11
  12#include "qemu/osdep.h"
  13#include "qemu/hbitmap.h"
  14#include "qemu/host-utils.h"
  15#include "trace.h"
  16#include "crypto/hash.h"
  17
  18/* HBitmaps provides an array of bits.  The bits are stored as usual in an
  19 * array of unsigned longs, but HBitmap is also optimized to provide fast
  20 * iteration over set bits; going from one bit to the next is O(logB n)
  21 * worst case, with B = sizeof(long) * CHAR_BIT: the result is low enough
  22 * that the number of levels is in fact fixed.
  23 *
  24 * In order to do this, it stacks multiple bitmaps with progressively coarser
  25 * granularity; in all levels except the last, bit N is set iff the N-th
  26 * unsigned long is nonzero in the immediately next level.  When iteration
  27 * completes on the last level it can examine the 2nd-last level to quickly
  28 * skip entire words, and even do so recursively to skip blocks of 64 words or
  29 * powers thereof (32 on 32-bit machines).
  30 *
  31 * Given an index in the bitmap, it can be split in group of bits like
  32 * this (for the 64-bit case):
  33 *
  34 *   bits 0-57 => word in the last bitmap     | bits 58-63 => bit in the word
  35 *   bits 0-51 => word in the 2nd-last bitmap | bits 52-57 => bit in the word
  36 *   bits 0-45 => word in the 3rd-last bitmap | bits 46-51 => bit in the word
  37 *
  38 * So it is easy to move up simply by shifting the index right by
  39 * log2(BITS_PER_LONG) bits.  To move down, you shift the index left
  40 * similarly, and add the word index within the group.  Iteration uses
  41 * ffs (find first set bit) to find the next word to examine; this
  42 * operation can be done in constant time in most current architectures.
  43 *
  44 * Setting or clearing a range of m bits on all levels, the work to perform
  45 * is O(m + m/W + m/W^2 + ...), which is O(m) like on a regular bitmap.
  46 *
  47 * When iterating on a bitmap, each bit (on any level) is only visited
  48 * once.  Hence, The total cost of visiting a bitmap with m bits in it is
  49 * the number of bits that are set in all bitmaps.  Unless the bitmap is
  50 * extremely sparse, this is also O(m + m/W + m/W^2 + ...), so the amortized
  51 * cost of advancing from one bit to the next is usually constant (worst case
  52 * O(logB n) as in the non-amortized complexity).
  53 */
  54
  55struct HBitmap {
  56    /* Number of total bits in the bottom level.  */
  57    uint64_t size;
  58
  59    /* Number of set bits in the bottom level.  */
  60    uint64_t count;
  61
  62    /* A scaling factor.  Given a granularity of G, each bit in the bitmap will
  63     * will actually represent a group of 2^G elements.  Each operation on a
  64     * range of bits first rounds the bits to determine which group they land
  65     * in, and then affect the entire page; iteration will only visit the first
  66     * bit of each group.  Here is an example of operations in a size-16,
  67     * granularity-1 HBitmap:
  68     *
  69     *    initial state            00000000
  70     *    set(start=0, count=9)    11111000 (iter: 0, 2, 4, 6, 8)
  71     *    reset(start=1, count=3)  00111000 (iter: 4, 6, 8)
  72     *    set(start=9, count=2)    00111100 (iter: 4, 6, 8, 10)
  73     *    reset(start=5, count=5)  00000000
  74     *
  75     * From an implementation point of view, when setting or resetting bits,
  76     * the bitmap will scale bit numbers right by this amount of bits.  When
  77     * iterating, the bitmap will scale bit numbers left by this amount of
  78     * bits.
  79     */
  80    int granularity;
  81
  82    /* A meta dirty bitmap to track the dirtiness of bits in this HBitmap. */
  83    HBitmap *meta;
  84
  85    /* A number of progressively less coarse bitmaps (i.e. level 0 is the
  86     * coarsest).  Each bit in level N represents a word in level N+1 that
  87     * has a set bit, except the last level where each bit represents the
  88     * actual bitmap.
  89     *
  90     * Note that all bitmaps have the same number of levels.  Even a 1-bit
  91     * bitmap will still allocate HBITMAP_LEVELS arrays.
  92     */
  93    unsigned long *levels[HBITMAP_LEVELS];
  94
  95    /* The length of each levels[] array. */
  96    uint64_t sizes[HBITMAP_LEVELS];
  97};
  98
  99/* Advance hbi to the next nonzero word and return it.  hbi->pos
 100 * is updated.  Returns zero if we reach the end of the bitmap.
 101 */
 102unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi)
 103{
 104    size_t pos = hbi->pos;
 105    const HBitmap *hb = hbi->hb;
 106    unsigned i = HBITMAP_LEVELS - 1;
 107
 108    unsigned long cur;
 109    do {
 110        i--;
 111        pos >>= BITS_PER_LEVEL;
 112        cur = hbi->cur[i] & hb->levels[i][pos];
 113    } while (cur == 0);
 114
 115    /* Check for end of iteration.  We always use fewer than BITS_PER_LONG
 116     * bits in the level 0 bitmap; thus we can repurpose the most significant
 117     * bit as a sentinel.  The sentinel is set in hbitmap_alloc and ensures
 118     * that the above loop ends even without an explicit check on i.
 119     */
 120
 121    if (i == 0 && cur == (1UL << (BITS_PER_LONG - 1))) {
 122        return 0;
 123    }
 124    for (; i < HBITMAP_LEVELS - 1; i++) {
 125        /* Shift back pos to the left, matching the right shifts above.
 126         * The index of this word's least significant set bit provides
 127         * the low-order bits.
 128         */
 129        assert(cur);
 130        pos = (pos << BITS_PER_LEVEL) + ctzl(cur);
 131        hbi->cur[i] = cur & (cur - 1);
 132
 133        /* Set up next level for iteration.  */
 134        cur = hb->levels[i + 1][pos];
 135    }
 136
 137    hbi->pos = pos;
 138    trace_hbitmap_iter_skip_words(hbi->hb, hbi, pos, cur);
 139
 140    assert(cur);
 141    return cur;
 142}
 143
 144int64_t hbitmap_iter_next(HBitmapIter *hbi)
 145{
 146    unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1] &
 147            hbi->hb->levels[HBITMAP_LEVELS - 1][hbi->pos];
 148    int64_t item;
 149
 150    if (cur == 0) {
 151        cur = hbitmap_iter_skip_words(hbi);
 152        if (cur == 0) {
 153            return -1;
 154        }
 155    }
 156
 157    /* The next call will resume work from the next bit.  */
 158    hbi->cur[HBITMAP_LEVELS - 1] = cur & (cur - 1);
 159    item = ((uint64_t)hbi->pos << BITS_PER_LEVEL) + ctzl(cur);
 160
 161    return item << hbi->granularity;
 162}
 163
 164void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first)
 165{
 166    unsigned i, bit;
 167    uint64_t pos;
 168
 169    hbi->hb = hb;
 170    pos = first >> hb->granularity;
 171    assert(pos < hb->size);
 172    hbi->pos = pos >> BITS_PER_LEVEL;
 173    hbi->granularity = hb->granularity;
 174
 175    for (i = HBITMAP_LEVELS; i-- > 0; ) {
 176        bit = pos & (BITS_PER_LONG - 1);
 177        pos >>= BITS_PER_LEVEL;
 178
 179        /* Drop bits representing items before first.  */
 180        hbi->cur[i] = hb->levels[i][pos] & ~((1UL << bit) - 1);
 181
 182        /* We have already added level i+1, so the lowest set bit has
 183         * been processed.  Clear it.
 184         */
 185        if (i != HBITMAP_LEVELS - 1) {
 186            hbi->cur[i] &= ~(1UL << bit);
 187        }
 188    }
 189}
 190
 191int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start)
 192{
 193    size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL;
 194    unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1];
 195    uint64_t sz = hb->sizes[HBITMAP_LEVELS - 1];
 196    unsigned long cur = last_lev[pos];
 197    unsigned start_bit_offset =
 198            (start >> hb->granularity) & (BITS_PER_LONG - 1);
 199    int64_t res;
 200
 201    cur |= (1UL << start_bit_offset) - 1;
 202    assert((start >> hb->granularity) < hb->size);
 203
 204    if (cur == (unsigned long)-1) {
 205        do {
 206            pos++;
 207        } while (pos < sz && last_lev[pos] == (unsigned long)-1);
 208
 209        if (pos >= sz) {
 210            return -1;
 211        }
 212
 213        cur = last_lev[pos];
 214    }
 215
 216    res = (pos << BITS_PER_LEVEL) + ctol(cur);
 217    if (res >= hb->size) {
 218        return -1;
 219    }
 220
 221    res = res << hb->granularity;
 222    if (res < start) {
 223        assert(((start - res) >> hb->granularity) == 0);
 224        return start;
 225    }
 226
 227    return res;
 228}
 229
 230bool hbitmap_empty(const HBitmap *hb)
 231{
 232    return hb->count == 0;
 233}
 234
 235int hbitmap_granularity(const HBitmap *hb)
 236{
 237    return hb->granularity;
 238}
 239
 240uint64_t hbitmap_count(const HBitmap *hb)
 241{
 242    return hb->count << hb->granularity;
 243}
 244
 245/* Count the number of set bits between start and end, not accounting for
 246 * the granularity.  Also an example of how to use hbitmap_iter_next_word.
 247 */
 248static uint64_t hb_count_between(HBitmap *hb, uint64_t start, uint64_t last)
 249{
 250    HBitmapIter hbi;
 251    uint64_t count = 0;
 252    uint64_t end = last + 1;
 253    unsigned long cur;
 254    size_t pos;
 255
 256    hbitmap_iter_init(&hbi, hb, start << hb->granularity);
 257    for (;;) {
 258        pos = hbitmap_iter_next_word(&hbi, &cur);
 259        if (pos >= (end >> BITS_PER_LEVEL)) {
 260            break;
 261        }
 262        count += ctpopl(cur);
 263    }
 264
 265    if (pos == (end >> BITS_PER_LEVEL)) {
 266        /* Drop bits representing the END-th and subsequent items.  */
 267        int bit = end & (BITS_PER_LONG - 1);
 268        cur &= (1UL << bit) - 1;
 269        count += ctpopl(cur);
 270    }
 271
 272    return count;
 273}
 274
 275/* Setting starts at the last layer and propagates up if an element
 276 * changes.
 277 */
 278static inline bool hb_set_elem(unsigned long *elem, uint64_t start, uint64_t last)
 279{
 280    unsigned long mask;
 281    unsigned long old;
 282
 283    assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
 284    assert(start <= last);
 285
 286    mask = 2UL << (last & (BITS_PER_LONG - 1));
 287    mask -= 1UL << (start & (BITS_PER_LONG - 1));
 288    old = *elem;
 289    *elem |= mask;
 290    return old != *elem;
 291}
 292
 293/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
 294 * Returns true if at least one bit is changed. */
 295static bool hb_set_between(HBitmap *hb, int level, uint64_t start,
 296                           uint64_t last)
 297{
 298    size_t pos = start >> BITS_PER_LEVEL;
 299    size_t lastpos = last >> BITS_PER_LEVEL;
 300    bool changed = false;
 301    size_t i;
 302
 303    i = pos;
 304    if (i < lastpos) {
 305        uint64_t next = (start | (BITS_PER_LONG - 1)) + 1;
 306        changed |= hb_set_elem(&hb->levels[level][i], start, next - 1);
 307        for (;;) {
 308            start = next;
 309            next += BITS_PER_LONG;
 310            if (++i == lastpos) {
 311                break;
 312            }
 313            changed |= (hb->levels[level][i] == 0);
 314            hb->levels[level][i] = ~0UL;
 315        }
 316    }
 317    changed |= hb_set_elem(&hb->levels[level][i], start, last);
 318
 319    /* If there was any change in this layer, we may have to update
 320     * the one above.
 321     */
 322    if (level > 0 && changed) {
 323        hb_set_between(hb, level - 1, pos, lastpos);
 324    }
 325    return changed;
 326}
 327
 328void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count)
 329{
 330    /* Compute range in the last layer.  */
 331    uint64_t first, n;
 332    uint64_t last = start + count - 1;
 333
 334    trace_hbitmap_set(hb, start, count,
 335                      start >> hb->granularity, last >> hb->granularity);
 336
 337    first = start >> hb->granularity;
 338    last >>= hb->granularity;
 339    assert(last < hb->size);
 340    n = last - first + 1;
 341
 342    hb->count += n - hb_count_between(hb, first, last);
 343    if (hb_set_between(hb, HBITMAP_LEVELS - 1, first, last) &&
 344        hb->meta) {
 345        hbitmap_set(hb->meta, start, count);
 346    }
 347}
 348
 349/* Resetting works the other way round: propagate up if the new
 350 * value is zero.
 351 */
 352static inline bool hb_reset_elem(unsigned long *elem, uint64_t start, uint64_t last)
 353{
 354    unsigned long mask;
 355    bool blanked;
 356
 357    assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL));
 358    assert(start <= last);
 359
 360    mask = 2UL << (last & (BITS_PER_LONG - 1));
 361    mask -= 1UL << (start & (BITS_PER_LONG - 1));
 362    blanked = *elem != 0 && ((*elem & ~mask) == 0);
 363    *elem &= ~mask;
 364    return blanked;
 365}
 366
 367/* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)...
 368 * Returns true if at least one bit is changed. */
 369static bool hb_reset_between(HBitmap *hb, int level, uint64_t start,
 370                             uint64_t last)
 371{
 372    size_t pos = start >> BITS_PER_LEVEL;
 373    size_t lastpos = last >> BITS_PER_LEVEL;
 374    bool changed = false;
 375    size_t i;
 376
 377    i = pos;
 378    if (i < lastpos) {
 379        uint64_t next = (start | (BITS_PER_LONG - 1)) + 1;
 380
 381        /* Here we need a more complex test than when setting bits.  Even if
 382         * something was changed, we must not blank bits in the upper level
 383         * unless the lower-level word became entirely zero.  So, remove pos
 384         * from the upper-level range if bits remain set.
 385         */
 386        if (hb_reset_elem(&hb->levels[level][i], start, next - 1)) {
 387            changed = true;
 388        } else {
 389            pos++;
 390        }
 391
 392        for (;;) {
 393            start = next;
 394            next += BITS_PER_LONG;
 395            if (++i == lastpos) {
 396                break;
 397            }
 398            changed |= (hb->levels[level][i] != 0);
 399            hb->levels[level][i] = 0UL;
 400        }
 401    }
 402
 403    /* Same as above, this time for lastpos.  */
 404    if (hb_reset_elem(&hb->levels[level][i], start, last)) {
 405        changed = true;
 406    } else {
 407        lastpos--;
 408    }
 409
 410    if (level > 0 && changed) {
 411        hb_reset_between(hb, level - 1, pos, lastpos);
 412    }
 413
 414    return changed;
 415
 416}
 417
 418void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count)
 419{
 420    /* Compute range in the last layer.  */
 421    uint64_t first;
 422    uint64_t last = start + count - 1;
 423
 424    trace_hbitmap_reset(hb, start, count,
 425                        start >> hb->granularity, last >> hb->granularity);
 426
 427    first = start >> hb->granularity;
 428    last >>= hb->granularity;
 429    assert(last < hb->size);
 430
 431    hb->count -= hb_count_between(hb, first, last);
 432    if (hb_reset_between(hb, HBITMAP_LEVELS - 1, first, last) &&
 433        hb->meta) {
 434        hbitmap_set(hb->meta, start, count);
 435    }
 436}
 437
 438void hbitmap_reset_all(HBitmap *hb)
 439{
 440    unsigned int i;
 441
 442    /* Same as hbitmap_alloc() except for memset() instead of malloc() */
 443    for (i = HBITMAP_LEVELS; --i >= 1; ) {
 444        memset(hb->levels[i], 0, hb->sizes[i] * sizeof(unsigned long));
 445    }
 446
 447    hb->levels[0][0] = 1UL << (BITS_PER_LONG - 1);
 448    hb->count = 0;
 449}
 450
 451bool hbitmap_is_serializable(const HBitmap *hb)
 452{
 453    /* Every serialized chunk must be aligned to 64 bits so that endianness
 454     * requirements can be fulfilled on both 64 bit and 32 bit hosts.
 455     * We have hbitmap_serialization_align() which converts this
 456     * alignment requirement from bitmap bits to items covered (e.g. sectors).
 457     * That value is:
 458     *    64 << hb->granularity
 459     * Since this value must not exceed UINT64_MAX, hb->granularity must be
 460     * less than 58 (== 64 - 6, where 6 is ld(64), i.e. 1 << 6 == 64).
 461     *
 462     * In order for hbitmap_serialization_align() to always return a
 463     * meaningful value, bitmaps that are to be serialized must have a
 464     * granularity of less than 58. */
 465
 466    return hb->granularity < 58;
 467}
 468
 469bool hbitmap_get(const HBitmap *hb, uint64_t item)
 470{
 471    /* Compute position and bit in the last layer.  */
 472    uint64_t pos = item >> hb->granularity;
 473    unsigned long bit = 1UL << (pos & (BITS_PER_LONG - 1));
 474    assert(pos < hb->size);
 475
 476    return (hb->levels[HBITMAP_LEVELS - 1][pos >> BITS_PER_LEVEL] & bit) != 0;
 477}
 478
 479uint64_t hbitmap_serialization_align(const HBitmap *hb)
 480{
 481    assert(hbitmap_is_serializable(hb));
 482
 483    /* Require at least 64 bit granularity to be safe on both 64 bit and 32 bit
 484     * hosts. */
 485    return UINT64_C(64) << hb->granularity;
 486}
 487
 488/* Start should be aligned to serialization granularity, chunk size should be
 489 * aligned to serialization granularity too, except for last chunk.
 490 */
 491static void serialization_chunk(const HBitmap *hb,
 492                                uint64_t start, uint64_t count,
 493                                unsigned long **first_el, uint64_t *el_count)
 494{
 495    uint64_t last = start + count - 1;
 496    uint64_t gran = hbitmap_serialization_align(hb);
 497
 498    assert((start & (gran - 1)) == 0);
 499    assert((last >> hb->granularity) < hb->size);
 500    if ((last >> hb->granularity) != hb->size - 1) {
 501        assert((count & (gran - 1)) == 0);
 502    }
 503
 504    start = (start >> hb->granularity) >> BITS_PER_LEVEL;
 505    last = (last >> hb->granularity) >> BITS_PER_LEVEL;
 506
 507    *first_el = &hb->levels[HBITMAP_LEVELS - 1][start];
 508    *el_count = last - start + 1;
 509}
 510
 511uint64_t hbitmap_serialization_size(const HBitmap *hb,
 512                                    uint64_t start, uint64_t count)
 513{
 514    uint64_t el_count;
 515    unsigned long *cur;
 516
 517    if (!count) {
 518        return 0;
 519    }
 520    serialization_chunk(hb, start, count, &cur, &el_count);
 521
 522    return el_count * sizeof(unsigned long);
 523}
 524
 525void hbitmap_serialize_part(const HBitmap *hb, uint8_t *buf,
 526                            uint64_t start, uint64_t count)
 527{
 528    uint64_t el_count;
 529    unsigned long *cur, *end;
 530
 531    if (!count) {
 532        return;
 533    }
 534    serialization_chunk(hb, start, count, &cur, &el_count);
 535    end = cur + el_count;
 536
 537    while (cur != end) {
 538        unsigned long el =
 539            (BITS_PER_LONG == 32 ? cpu_to_le32(*cur) : cpu_to_le64(*cur));
 540
 541        memcpy(buf, &el, sizeof(el));
 542        buf += sizeof(el);
 543        cur++;
 544    }
 545}
 546
 547void hbitmap_deserialize_part(HBitmap *hb, uint8_t *buf,
 548                              uint64_t start, uint64_t count,
 549                              bool finish)
 550{
 551    uint64_t el_count;
 552    unsigned long *cur, *end;
 553
 554    if (!count) {
 555        return;
 556    }
 557    serialization_chunk(hb, start, count, &cur, &el_count);
 558    end = cur + el_count;
 559
 560    while (cur != end) {
 561        memcpy(cur, buf, sizeof(*cur));
 562
 563        if (BITS_PER_LONG == 32) {
 564            le32_to_cpus((uint32_t *)cur);
 565        } else {
 566            le64_to_cpus((uint64_t *)cur);
 567        }
 568
 569        buf += sizeof(unsigned long);
 570        cur++;
 571    }
 572    if (finish) {
 573        hbitmap_deserialize_finish(hb);
 574    }
 575}
 576
 577void hbitmap_deserialize_zeroes(HBitmap *hb, uint64_t start, uint64_t count,
 578                                bool finish)
 579{
 580    uint64_t el_count;
 581    unsigned long *first;
 582
 583    if (!count) {
 584        return;
 585    }
 586    serialization_chunk(hb, start, count, &first, &el_count);
 587
 588    memset(first, 0, el_count * sizeof(unsigned long));
 589    if (finish) {
 590        hbitmap_deserialize_finish(hb);
 591    }
 592}
 593
 594void hbitmap_deserialize_ones(HBitmap *hb, uint64_t start, uint64_t count,
 595                              bool finish)
 596{
 597    uint64_t el_count;
 598    unsigned long *first;
 599
 600    if (!count) {
 601        return;
 602    }
 603    serialization_chunk(hb, start, count, &first, &el_count);
 604
 605    memset(first, 0xff, el_count * sizeof(unsigned long));
 606    if (finish) {
 607        hbitmap_deserialize_finish(hb);
 608    }
 609}
 610
 611void hbitmap_deserialize_finish(HBitmap *bitmap)
 612{
 613    int64_t i, size, prev_size;
 614    int lev;
 615
 616    /* restore levels starting from penultimate to zero level, assuming
 617     * that the last level is ok */
 618    size = MAX((bitmap->size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);
 619    for (lev = HBITMAP_LEVELS - 1; lev-- > 0; ) {
 620        prev_size = size;
 621        size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);
 622        memset(bitmap->levels[lev], 0, size * sizeof(unsigned long));
 623
 624        for (i = 0; i < prev_size; ++i) {
 625            if (bitmap->levels[lev + 1][i]) {
 626                bitmap->levels[lev][i >> BITS_PER_LEVEL] |=
 627                    1UL << (i & (BITS_PER_LONG - 1));
 628            }
 629        }
 630    }
 631
 632    bitmap->levels[0][0] |= 1UL << (BITS_PER_LONG - 1);
 633    bitmap->count = hb_count_between(bitmap, 0, bitmap->size - 1);
 634}
 635
 636void hbitmap_free(HBitmap *hb)
 637{
 638    unsigned i;
 639    assert(!hb->meta);
 640    for (i = HBITMAP_LEVELS; i-- > 0; ) {
 641        g_free(hb->levels[i]);
 642    }
 643    g_free(hb);
 644}
 645
 646HBitmap *hbitmap_alloc(uint64_t size, int granularity)
 647{
 648    HBitmap *hb = g_new0(struct HBitmap, 1);
 649    unsigned i;
 650
 651    assert(granularity >= 0 && granularity < 64);
 652    size = (size + (1ULL << granularity) - 1) >> granularity;
 653    assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));
 654
 655    hb->size = size;
 656    hb->granularity = granularity;
 657    for (i = HBITMAP_LEVELS; i-- > 0; ) {
 658        size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1);
 659        hb->sizes[i] = size;
 660        hb->levels[i] = g_new0(unsigned long, size);
 661    }
 662
 663    /* We necessarily have free bits in level 0 due to the definition
 664     * of HBITMAP_LEVELS, so use one for a sentinel.  This speeds up
 665     * hbitmap_iter_skip_words.
 666     */
 667    assert(size == 1);
 668    hb->levels[0][0] |= 1UL << (BITS_PER_LONG - 1);
 669    return hb;
 670}
 671
 672void hbitmap_truncate(HBitmap *hb, uint64_t size)
 673{
 674    bool shrink;
 675    unsigned i;
 676    uint64_t num_elements = size;
 677    uint64_t old;
 678
 679    /* Size comes in as logical elements, adjust for granularity. */
 680    size = (size + (1ULL << hb->granularity) - 1) >> hb->granularity;
 681    assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE));
 682    shrink = size < hb->size;
 683
 684    /* bit sizes are identical; nothing to do. */
 685    if (size == hb->size) {
 686        return;
 687    }
 688
 689    /* If we're losing bits, let's clear those bits before we invalidate all of
 690     * our invariants. This helps keep the bitcount consistent, and will prevent
 691     * us from carrying around garbage bits beyond the end of the map.
 692     */
 693    if (shrink) {
 694        /* Don't clear partial granularity groups;
 695         * start at the first full one. */
 696        uint64_t start = ROUND_UP(num_elements, UINT64_C(1) << hb->granularity);
 697        uint64_t fix_count = (hb->size << hb->granularity) - start;
 698
 699        assert(fix_count);
 700        hbitmap_reset(hb, start, fix_count);
 701    }
 702
 703    hb->size = size;
 704    for (i = HBITMAP_LEVELS; i-- > 0; ) {
 705        size = MAX(BITS_TO_LONGS(size), 1);
 706        if (hb->sizes[i] == size) {
 707            break;
 708        }
 709        old = hb->sizes[i];
 710        hb->sizes[i] = size;
 711        hb->levels[i] = g_realloc(hb->levels[i], size * sizeof(unsigned long));
 712        if (!shrink) {
 713            memset(&hb->levels[i][old], 0x00,
 714                   (size - old) * sizeof(*hb->levels[i]));
 715        }
 716    }
 717    if (hb->meta) {
 718        hbitmap_truncate(hb->meta, hb->size << hb->granularity);
 719    }
 720}
 721
 722
 723/**
 724 * Given HBitmaps A and B, let A := A (BITOR) B.
 725 * Bitmap B will not be modified.
 726 *
 727 * @return true if the merge was successful,
 728 *         false if it was not attempted.
 729 */
 730bool hbitmap_merge(HBitmap *a, const HBitmap *b)
 731{
 732    int i;
 733    uint64_t j;
 734
 735    if ((a->size != b->size) || (a->granularity != b->granularity)) {
 736        return false;
 737    }
 738
 739    if (hbitmap_count(b) == 0) {
 740        return true;
 741    }
 742
 743    /* This merge is O(size), as BITS_PER_LONG and HBITMAP_LEVELS are constant.
 744     * It may be possible to improve running times for sparsely populated maps
 745     * by using hbitmap_iter_next, but this is suboptimal for dense maps.
 746     */
 747    for (i = HBITMAP_LEVELS - 1; i >= 0; i--) {
 748        for (j = 0; j < a->sizes[i]; j++) {
 749            a->levels[i][j] |= b->levels[i][j];
 750        }
 751    }
 752
 753    return true;
 754}
 755
 756HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size)
 757{
 758    assert(!(chunk_size & (chunk_size - 1)));
 759    assert(!hb->meta);
 760    hb->meta = hbitmap_alloc(hb->size << hb->granularity,
 761                             hb->granularity + ctz32(chunk_size));
 762    return hb->meta;
 763}
 764
 765void hbitmap_free_meta(HBitmap *hb)
 766{
 767    assert(hb->meta);
 768    hbitmap_free(hb->meta);
 769    hb->meta = NULL;
 770}
 771
 772char *hbitmap_sha256(const HBitmap *bitmap, Error **errp)
 773{
 774    size_t size = bitmap->sizes[HBITMAP_LEVELS - 1] * sizeof(unsigned long);
 775    char *data = (char *)bitmap->levels[HBITMAP_LEVELS - 1];
 776    char *hash = NULL;
 777    qcrypto_hash_digest(QCRYPTO_HASH_ALG_SHA256, data, size, &hash, errp);
 778
 779    return hash;
 780}
 781