Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Hierarchical Bitmap Data Type |
| 3 | * |
| 4 | * Copyright Red Hat, Inc., 2012 |
| 5 | * |
| 6 | * Author: Paolo Bonzini <pbonzini@redhat.com> |
| 7 | * |
| 8 | * This work is licensed under the terms of the GNU GPL, version 2 or |
| 9 | * later. See the COPYING file in the top-level directory. |
| 10 | */ |
| 11 | |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 12 | #include "qemu/osdep.h" |
| 13 | #include "qemu/hbitmap.h" |
| 14 | #include "qemu/host-utils.h" |
| 15 | #include "trace.h" |
Vladimir Sementsov-Ogievskiy | a3b5253 | 2017-06-28 15:05:25 +0300 | [diff] [blame] | 16 | #include "crypto/hash.h" |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 17 | |
| 18 | /* HBitmaps provides an array of bits. The bits are stored as usual in an |
| 19 | * array of unsigned longs, but HBitmap is also optimized to provide fast |
| 20 | * iteration over set bits; going from one bit to the next is O(logB n) |
| 21 | * worst case, with B = sizeof(long) * CHAR_BIT: the result is low enough |
| 22 | * that the number of levels is in fact fixed. |
| 23 | * |
| 24 | * In order to do this, it stacks multiple bitmaps with progressively coarser |
| 25 | * granularity; in all levels except the last, bit N is set iff the N-th |
| 26 | * unsigned long is nonzero in the immediately next level. When iteration |
| 27 | * completes on the last level it can examine the 2nd-last level to quickly |
| 28 | * skip entire words, and even do so recursively to skip blocks of 64 words or |
| 29 | * powers thereof (32 on 32-bit machines). |
| 30 | * |
| 31 | * Given an index in the bitmap, it can be split in group of bits like |
| 32 | * this (for the 64-bit case): |
| 33 | * |
| 34 | * bits 0-57 => word in the last bitmap | bits 58-63 => bit in the word |
| 35 | * bits 0-51 => word in the 2nd-last bitmap | bits 52-57 => bit in the word |
| 36 | * bits 0-45 => word in the 3rd-last bitmap | bits 46-51 => bit in the word |
| 37 | * |
| 38 | * So it is easy to move up simply by shifting the index right by |
| 39 | * log2(BITS_PER_LONG) bits. To move down, you shift the index left |
| 40 | * similarly, and add the word index within the group. Iteration uses |
| 41 | * ffs (find first set bit) to find the next word to examine; this |
| 42 | * operation can be done in constant time in most current architectures. |
| 43 | * |
| 44 | * Setting or clearing a range of m bits on all levels, the work to perform |
| 45 | * is O(m + m/W + m/W^2 + ...), which is O(m) like on a regular bitmap. |
| 46 | * |
| 47 | * When iterating on a bitmap, each bit (on any level) is only visited |
| 48 | * once. Hence, The total cost of visiting a bitmap with m bits in it is |
| 49 | * the number of bits that are set in all bitmaps. Unless the bitmap is |
| 50 | * extremely sparse, this is also O(m + m/W + m/W^2 + ...), so the amortized |
| 51 | * cost of advancing from one bit to the next is usually constant (worst case |
| 52 | * O(logB n) as in the non-amortized complexity). |
| 53 | */ |
| 54 | |
| 55 | struct HBitmap { |
Vladimir Sementsov-Ogievskiy | 4e4de22 | 2019-08-05 15:01:20 +0300 | [diff] [blame] | 56 | /* |
| 57 | * Size of the bitmap, as requested in hbitmap_alloc or in hbitmap_truncate. |
| 58 | */ |
Vladimir Sementsov-Ogievskiy | 76d570d | 2019-01-15 18:26:49 -0500 | [diff] [blame] | 59 | uint64_t orig_size; |
| 60 | |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 61 | /* Number of total bits in the bottom level. */ |
| 62 | uint64_t size; |
| 63 | |
| 64 | /* Number of set bits in the bottom level. */ |
| 65 | uint64_t count; |
| 66 | |
| 67 | /* A scaling factor. Given a granularity of G, each bit in the bitmap will |
| 68 | * will actually represent a group of 2^G elements. Each operation on a |
| 69 | * range of bits first rounds the bits to determine which group they land |
| 70 | * in, and then affect the entire page; iteration will only visit the first |
| 71 | * bit of each group. Here is an example of operations in a size-16, |
| 72 | * granularity-1 HBitmap: |
| 73 | * |
| 74 | * initial state 00000000 |
| 75 | * set(start=0, count=9) 11111000 (iter: 0, 2, 4, 6, 8) |
| 76 | * reset(start=1, count=3) 00111000 (iter: 4, 6, 8) |
| 77 | * set(start=9, count=2) 00111100 (iter: 4, 6, 8, 10) |
| 78 | * reset(start=5, count=5) 00000000 |
| 79 | * |
| 80 | * From an implementation point of view, when setting or resetting bits, |
| 81 | * the bitmap will scale bit numbers right by this amount of bits. When |
| 82 | * iterating, the bitmap will scale bit numbers left by this amount of |
| 83 | * bits. |
| 84 | */ |
| 85 | int granularity; |
| 86 | |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 87 | /* A meta dirty bitmap to track the dirtiness of bits in this HBitmap. */ |
| 88 | HBitmap *meta; |
| 89 | |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 90 | /* A number of progressively less coarse bitmaps (i.e. level 0 is the |
| 91 | * coarsest). Each bit in level N represents a word in level N+1 that |
| 92 | * has a set bit, except the last level where each bit represents the |
| 93 | * actual bitmap. |
| 94 | * |
| 95 | * Note that all bitmaps have the same number of levels. Even a 1-bit |
| 96 | * bitmap will still allocate HBITMAP_LEVELS arrays. |
| 97 | */ |
| 98 | unsigned long *levels[HBITMAP_LEVELS]; |
John Snow | 8515efb | 2015-04-17 19:49:54 -0400 | [diff] [blame] | 99 | |
| 100 | /* The length of each levels[] array. */ |
| 101 | uint64_t sizes[HBITMAP_LEVELS]; |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 102 | }; |
| 103 | |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 104 | /* Advance hbi to the next nonzero word and return it. hbi->pos |
| 105 | * is updated. Returns zero if we reach the end of the bitmap. |
| 106 | */ |
Vladimir Sementsov-Ogievskiy | 30b8346 | 2020-02-05 14:20:34 +0300 | [diff] [blame] | 107 | static unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi) |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 108 | { |
| 109 | size_t pos = hbi->pos; |
| 110 | const HBitmap *hb = hbi->hb; |
| 111 | unsigned i = HBITMAP_LEVELS - 1; |
| 112 | |
| 113 | unsigned long cur; |
| 114 | do { |
Vladimir Sementsov-Ogievskiy | f63ea4e | 2017-06-28 15:05:03 +0300 | [diff] [blame] | 115 | i--; |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 116 | pos >>= BITS_PER_LEVEL; |
Vladimir Sementsov-Ogievskiy | f63ea4e | 2017-06-28 15:05:03 +0300 | [diff] [blame] | 117 | cur = hbi->cur[i] & hb->levels[i][pos]; |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 118 | } while (cur == 0); |
| 119 | |
| 120 | /* Check for end of iteration. We always use fewer than BITS_PER_LONG |
| 121 | * bits in the level 0 bitmap; thus we can repurpose the most significant |
| 122 | * bit as a sentinel. The sentinel is set in hbitmap_alloc and ensures |
| 123 | * that the above loop ends even without an explicit check on i. |
| 124 | */ |
| 125 | |
| 126 | if (i == 0 && cur == (1UL << (BITS_PER_LONG - 1))) { |
| 127 | return 0; |
| 128 | } |
| 129 | for (; i < HBITMAP_LEVELS - 1; i++) { |
| 130 | /* Shift back pos to the left, matching the right shifts above. |
| 131 | * The index of this word's least significant set bit provides |
| 132 | * the low-order bits. |
| 133 | */ |
Richard Henderson | 18331e7 | 2013-02-13 17:47:36 -0800 | [diff] [blame] | 134 | assert(cur); |
| 135 | pos = (pos << BITS_PER_LEVEL) + ctzl(cur); |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 136 | hbi->cur[i] = cur & (cur - 1); |
| 137 | |
| 138 | /* Set up next level for iteration. */ |
| 139 | cur = hb->levels[i + 1][pos]; |
| 140 | } |
| 141 | |
| 142 | hbi->pos = pos; |
| 143 | trace_hbitmap_iter_skip_words(hbi->hb, hbi, pos, cur); |
| 144 | |
| 145 | assert(cur); |
| 146 | return cur; |
| 147 | } |
| 148 | |
Vladimir Sementsov-Ogievskiy | 19c021e | 2019-01-15 18:26:50 -0500 | [diff] [blame] | 149 | int64_t hbitmap_iter_next(HBitmapIter *hbi) |
Vladimir Sementsov-Ogievskiy | f63ea4e | 2017-06-28 15:05:03 +0300 | [diff] [blame] | 150 | { |
| 151 | unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1] & |
| 152 | hbi->hb->levels[HBITMAP_LEVELS - 1][hbi->pos]; |
| 153 | int64_t item; |
| 154 | |
| 155 | if (cur == 0) { |
| 156 | cur = hbitmap_iter_skip_words(hbi); |
| 157 | if (cur == 0) { |
| 158 | return -1; |
| 159 | } |
| 160 | } |
| 161 | |
Vladimir Sementsov-Ogievskiy | 19c021e | 2019-01-15 18:26:50 -0500 | [diff] [blame] | 162 | /* The next call will resume work from the next bit. */ |
| 163 | hbi->cur[HBITMAP_LEVELS - 1] = cur & (cur - 1); |
Vladimir Sementsov-Ogievskiy | f63ea4e | 2017-06-28 15:05:03 +0300 | [diff] [blame] | 164 | item = ((uint64_t)hbi->pos << BITS_PER_LEVEL) + ctzl(cur); |
| 165 | |
| 166 | return item << hbi->granularity; |
| 167 | } |
| 168 | |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 169 | void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first) |
| 170 | { |
| 171 | unsigned i, bit; |
| 172 | uint64_t pos; |
| 173 | |
| 174 | hbi->hb = hb; |
| 175 | pos = first >> hb->granularity; |
Paolo Bonzini | 1b09524 | 2013-01-22 15:01:12 +0100 | [diff] [blame] | 176 | assert(pos < hb->size); |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 177 | hbi->pos = pos >> BITS_PER_LEVEL; |
| 178 | hbi->granularity = hb->granularity; |
| 179 | |
| 180 | for (i = HBITMAP_LEVELS; i-- > 0; ) { |
| 181 | bit = pos & (BITS_PER_LONG - 1); |
| 182 | pos >>= BITS_PER_LEVEL; |
| 183 | |
| 184 | /* Drop bits representing items before first. */ |
| 185 | hbi->cur[i] = hb->levels[i][pos] & ~((1UL << bit) - 1); |
| 186 | |
| 187 | /* We have already added level i+1, so the lowest set bit has |
| 188 | * been processed. Clear it. |
| 189 | */ |
| 190 | if (i != HBITMAP_LEVELS - 1) { |
| 191 | hbi->cur[i] &= ~(1UL << bit); |
| 192 | } |
| 193 | } |
| 194 | } |
| 195 | |
Vladimir Sementsov-Ogievskiy | 9399c54 | 2020-02-05 14:20:37 +0300 | [diff] [blame] | 196 | int64_t hbitmap_next_dirty(const HBitmap *hb, int64_t start, int64_t count) |
| 197 | { |
| 198 | HBitmapIter hbi; |
| 199 | int64_t first_dirty_off; |
| 200 | uint64_t end; |
| 201 | |
| 202 | assert(start >= 0 && count >= 0); |
| 203 | |
| 204 | if (start >= hb->orig_size || count == 0) { |
| 205 | return -1; |
| 206 | } |
| 207 | |
| 208 | end = count > hb->orig_size - start ? hb->orig_size : start + count; |
| 209 | |
| 210 | hbitmap_iter_init(&hbi, hb, start); |
| 211 | first_dirty_off = hbitmap_iter_next(&hbi); |
| 212 | |
| 213 | if (first_dirty_off < 0 || first_dirty_off >= end) { |
| 214 | return -1; |
| 215 | } |
| 216 | |
| 217 | return MAX(start, first_dirty_off); |
| 218 | } |
| 219 | |
Vladimir Sementsov-Ogievskiy | 642700f | 2020-02-05 14:20:36 +0300 | [diff] [blame] | 220 | int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count) |
Vladimir Sementsov-Ogievskiy | 56207df | 2017-10-12 16:53:09 +0300 | [diff] [blame] | 221 | { |
| 222 | size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL; |
| 223 | unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1]; |
Vladimir Sementsov-Ogievskiy | 56207df | 2017-10-12 16:53:09 +0300 | [diff] [blame] | 224 | unsigned long cur = last_lev[pos]; |
Vladimir Sementsov-Ogievskiy | 76d570d | 2019-01-15 18:26:49 -0500 | [diff] [blame] | 225 | unsigned start_bit_offset; |
| 226 | uint64_t end_bit, sz; |
Vladimir Sementsov-Ogievskiy | 56207df | 2017-10-12 16:53:09 +0300 | [diff] [blame] | 227 | int64_t res; |
| 228 | |
Vladimir Sementsov-Ogievskiy | 642700f | 2020-02-05 14:20:36 +0300 | [diff] [blame] | 229 | assert(start >= 0 && count >= 0); |
| 230 | |
Vladimir Sementsov-Ogievskiy | 76d570d | 2019-01-15 18:26:49 -0500 | [diff] [blame] | 231 | if (start >= hb->orig_size || count == 0) { |
| 232 | return -1; |
| 233 | } |
| 234 | |
| 235 | end_bit = count > hb->orig_size - start ? |
| 236 | hb->size : |
| 237 | ((start + count - 1) >> hb->granularity) + 1; |
| 238 | sz = (end_bit + BITS_PER_LONG - 1) >> BITS_PER_LEVEL; |
| 239 | |
| 240 | /* There may be some zero bits in @cur before @start. We are not interested |
| 241 | * in them, let's set them. |
| 242 | */ |
| 243 | start_bit_offset = (start >> hb->granularity) & (BITS_PER_LONG - 1); |
Vladimir Sementsov-Ogievskiy | 56207df | 2017-10-12 16:53:09 +0300 | [diff] [blame] | 244 | cur |= (1UL << start_bit_offset) - 1; |
| 245 | assert((start >> hb->granularity) < hb->size); |
| 246 | |
| 247 | if (cur == (unsigned long)-1) { |
| 248 | do { |
| 249 | pos++; |
| 250 | } while (pos < sz && last_lev[pos] == (unsigned long)-1); |
| 251 | |
| 252 | if (pos >= sz) { |
| 253 | return -1; |
| 254 | } |
| 255 | |
| 256 | cur = last_lev[pos]; |
| 257 | } |
| 258 | |
| 259 | res = (pos << BITS_PER_LEVEL) + ctol(cur); |
Vladimir Sementsov-Ogievskiy | 76d570d | 2019-01-15 18:26:49 -0500 | [diff] [blame] | 260 | if (res >= end_bit) { |
Vladimir Sementsov-Ogievskiy | 56207df | 2017-10-12 16:53:09 +0300 | [diff] [blame] | 261 | return -1; |
| 262 | } |
| 263 | |
| 264 | res = res << hb->granularity; |
| 265 | if (res < start) { |
| 266 | assert(((start - res) >> hb->granularity) == 0); |
| 267 | return start; |
| 268 | } |
| 269 | |
| 270 | return res; |
| 271 | } |
| 272 | |
Vladimir Sementsov-Ogievskiy | 299ea9f | 2020-02-05 14:20:38 +0300 | [diff] [blame] | 273 | bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t start, int64_t end, |
| 274 | int64_t max_dirty_count, |
| 275 | int64_t *dirty_start, int64_t *dirty_count) |
Vladimir Sementsov-Ogievskiy | a78a1a4 | 2019-01-15 18:26:50 -0500 | [diff] [blame] | 276 | { |
Vladimir Sementsov-Ogievskiy | 299ea9f | 2020-02-05 14:20:38 +0300 | [diff] [blame] | 277 | int64_t next_zero; |
Vladimir Sementsov-Ogievskiy | a78a1a4 | 2019-01-15 18:26:50 -0500 | [diff] [blame] | 278 | |
Vladimir Sementsov-Ogievskiy | 299ea9f | 2020-02-05 14:20:38 +0300 | [diff] [blame] | 279 | assert(start >= 0 && end >= 0 && max_dirty_count > 0); |
| 280 | |
| 281 | end = MIN(end, hb->orig_size); |
| 282 | if (start >= end) { |
Vladimir Sementsov-Ogievskiy | a78a1a4 | 2019-01-15 18:26:50 -0500 | [diff] [blame] | 283 | return false; |
| 284 | } |
| 285 | |
Vladimir Sementsov-Ogievskiy | 299ea9f | 2020-02-05 14:20:38 +0300 | [diff] [blame] | 286 | start = hbitmap_next_dirty(hb, start, end - start); |
| 287 | if (start < 0) { |
| 288 | return false; |
Vladimir Sementsov-Ogievskiy | a78a1a4 | 2019-01-15 18:26:50 -0500 | [diff] [blame] | 289 | } |
| 290 | |
Vladimir Sementsov-Ogievskiy | 299ea9f | 2020-02-05 14:20:38 +0300 | [diff] [blame] | 291 | end = start + MIN(end - start, max_dirty_count); |
| 292 | |
| 293 | next_zero = hbitmap_next_zero(hb, start, end - start); |
| 294 | if (next_zero >= 0) { |
| 295 | end = next_zero; |
| 296 | } |
| 297 | |
| 298 | *dirty_start = start; |
| 299 | *dirty_count = end - start; |
Vladimir Sementsov-Ogievskiy | a78a1a4 | 2019-01-15 18:26:50 -0500 | [diff] [blame] | 300 | |
| 301 | return true; |
| 302 | } |
| 303 | |
Vladimir Sementsov-Ogievskiy | a642647 | 2022-03-03 20:43:41 +0100 | [diff] [blame] | 304 | bool hbitmap_status(const HBitmap *hb, int64_t start, int64_t count, |
| 305 | int64_t *pnum) |
| 306 | { |
| 307 | int64_t next_dirty, next_zero; |
| 308 | |
| 309 | assert(start >= 0); |
| 310 | assert(count > 0); |
| 311 | assert(start + count <= hb->orig_size); |
| 312 | |
| 313 | next_dirty = hbitmap_next_dirty(hb, start, count); |
| 314 | if (next_dirty == -1) { |
| 315 | *pnum = count; |
| 316 | return false; |
| 317 | } |
| 318 | |
| 319 | if (next_dirty > start) { |
| 320 | *pnum = next_dirty - start; |
| 321 | return false; |
| 322 | } |
| 323 | |
| 324 | assert(next_dirty == start); |
| 325 | |
| 326 | next_zero = hbitmap_next_zero(hb, start, count); |
| 327 | if (next_zero == -1) { |
| 328 | *pnum = count; |
| 329 | return true; |
| 330 | } |
| 331 | |
| 332 | assert(next_zero > start); |
| 333 | *pnum = next_zero - start; |
Andrey Zhadchenko | a4d5224 | 2023-02-02 21:15:23 +0300 | [diff] [blame] | 334 | return true; |
Vladimir Sementsov-Ogievskiy | a642647 | 2022-03-03 20:43:41 +0100 | [diff] [blame] | 335 | } |
| 336 | |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 337 | bool hbitmap_empty(const HBitmap *hb) |
| 338 | { |
| 339 | return hb->count == 0; |
| 340 | } |
| 341 | |
| 342 | int hbitmap_granularity(const HBitmap *hb) |
| 343 | { |
| 344 | return hb->granularity; |
| 345 | } |
| 346 | |
| 347 | uint64_t hbitmap_count(const HBitmap *hb) |
| 348 | { |
| 349 | return hb->count << hb->granularity; |
| 350 | } |
| 351 | |
Vladimir Sementsov-Ogievskiy | be24c71 | 2020-02-05 14:20:33 +0300 | [diff] [blame] | 352 | /** |
| 353 | * hbitmap_iter_next_word: |
| 354 | * @hbi: HBitmapIter to operate on. |
| 355 | * @p_cur: Location where to store the next non-zero word. |
| 356 | * |
| 357 | * Return the index of the next nonzero word that is set in @hbi's |
| 358 | * associated HBitmap, and set *p_cur to the content of that word |
| 359 | * (bits before the index that was passed to hbitmap_iter_init are |
| 360 | * trimmed on the first call). Return -1, and set *p_cur to zero, |
| 361 | * if all remaining words are zero. |
| 362 | */ |
| 363 | static size_t hbitmap_iter_next_word(HBitmapIter *hbi, unsigned long *p_cur) |
| 364 | { |
| 365 | unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1]; |
| 366 | |
| 367 | if (cur == 0) { |
| 368 | cur = hbitmap_iter_skip_words(hbi); |
| 369 | if (cur == 0) { |
| 370 | *p_cur = 0; |
| 371 | return -1; |
| 372 | } |
| 373 | } |
| 374 | |
| 375 | /* The next call will resume work from the next word. */ |
| 376 | hbi->cur[HBITMAP_LEVELS - 1] = 0; |
| 377 | *p_cur = cur; |
| 378 | return hbi->pos; |
| 379 | } |
| 380 | |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 381 | /* Count the number of set bits between start and end, not accounting for |
| 382 | * the granularity. Also an example of how to use hbitmap_iter_next_word. |
| 383 | */ |
| 384 | static uint64_t hb_count_between(HBitmap *hb, uint64_t start, uint64_t last) |
| 385 | { |
| 386 | HBitmapIter hbi; |
| 387 | uint64_t count = 0; |
| 388 | uint64_t end = last + 1; |
| 389 | unsigned long cur; |
| 390 | size_t pos; |
| 391 | |
| 392 | hbitmap_iter_init(&hbi, hb, start << hb->granularity); |
| 393 | for (;;) { |
| 394 | pos = hbitmap_iter_next_word(&hbi, &cur); |
| 395 | if (pos >= (end >> BITS_PER_LEVEL)) { |
| 396 | break; |
| 397 | } |
Peter Maydell | 591b320 | 2014-06-04 00:07:20 +0100 | [diff] [blame] | 398 | count += ctpopl(cur); |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 399 | } |
| 400 | |
| 401 | if (pos == (end >> BITS_PER_LEVEL)) { |
| 402 | /* Drop bits representing the END-th and subsequent items. */ |
| 403 | int bit = end & (BITS_PER_LONG - 1); |
| 404 | cur &= (1UL << bit) - 1; |
Peter Maydell | 591b320 | 2014-06-04 00:07:20 +0100 | [diff] [blame] | 405 | count += ctpopl(cur); |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 406 | } |
| 407 | |
| 408 | return count; |
| 409 | } |
| 410 | |
| 411 | /* Setting starts at the last layer and propagates up if an element |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 412 | * changes. |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 413 | */ |
| 414 | static inline bool hb_set_elem(unsigned long *elem, uint64_t start, uint64_t last) |
| 415 | { |
| 416 | unsigned long mask; |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 417 | unsigned long old; |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 418 | |
| 419 | assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL)); |
| 420 | assert(start <= last); |
| 421 | |
| 422 | mask = 2UL << (last & (BITS_PER_LONG - 1)); |
| 423 | mask -= 1UL << (start & (BITS_PER_LONG - 1)); |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 424 | old = *elem; |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 425 | *elem |= mask; |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 426 | return old != *elem; |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 427 | } |
| 428 | |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 429 | /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... |
| 430 | * Returns true if at least one bit is changed. */ |
| 431 | static bool hb_set_between(HBitmap *hb, int level, uint64_t start, |
| 432 | uint64_t last) |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 433 | { |
| 434 | size_t pos = start >> BITS_PER_LEVEL; |
| 435 | size_t lastpos = last >> BITS_PER_LEVEL; |
| 436 | bool changed = false; |
| 437 | size_t i; |
| 438 | |
| 439 | i = pos; |
| 440 | if (i < lastpos) { |
| 441 | uint64_t next = (start | (BITS_PER_LONG - 1)) + 1; |
| 442 | changed |= hb_set_elem(&hb->levels[level][i], start, next - 1); |
| 443 | for (;;) { |
| 444 | start = next; |
| 445 | next += BITS_PER_LONG; |
| 446 | if (++i == lastpos) { |
| 447 | break; |
| 448 | } |
| 449 | changed |= (hb->levels[level][i] == 0); |
| 450 | hb->levels[level][i] = ~0UL; |
| 451 | } |
| 452 | } |
| 453 | changed |= hb_set_elem(&hb->levels[level][i], start, last); |
| 454 | |
| 455 | /* If there was any change in this layer, we may have to update |
| 456 | * the one above. |
| 457 | */ |
| 458 | if (level > 0 && changed) { |
| 459 | hb_set_between(hb, level - 1, pos, lastpos); |
| 460 | } |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 461 | return changed; |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 462 | } |
| 463 | |
| 464 | void hbitmap_set(HBitmap *hb, uint64_t start, uint64_t count) |
| 465 | { |
| 466 | /* Compute range in the last layer. */ |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 467 | uint64_t first, n; |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 468 | uint64_t last = start + count - 1; |
| 469 | |
Vladimir Sementsov-Ogievskiy | fed33bd | 2019-10-11 12:07:07 +0300 | [diff] [blame] | 470 | if (count == 0) { |
| 471 | return; |
| 472 | } |
| 473 | |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 474 | trace_hbitmap_set(hb, start, count, |
| 475 | start >> hb->granularity, last >> hb->granularity); |
| 476 | |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 477 | first = start >> hb->granularity; |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 478 | last >>= hb->granularity; |
Vladimir Sementsov-Ogievskiy | 0e32119 | 2016-06-14 20:08:12 +0300 | [diff] [blame] | 479 | assert(last < hb->size); |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 480 | n = last - first + 1; |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 481 | |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 482 | hb->count += n - hb_count_between(hb, first, last); |
| 483 | if (hb_set_between(hb, HBITMAP_LEVELS - 1, first, last) && |
| 484 | hb->meta) { |
| 485 | hbitmap_set(hb->meta, start, count); |
| 486 | } |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 487 | } |
| 488 | |
| 489 | /* Resetting works the other way round: propagate up if the new |
| 490 | * value is zero. |
| 491 | */ |
| 492 | static inline bool hb_reset_elem(unsigned long *elem, uint64_t start, uint64_t last) |
| 493 | { |
| 494 | unsigned long mask; |
| 495 | bool blanked; |
| 496 | |
| 497 | assert((last >> BITS_PER_LEVEL) == (start >> BITS_PER_LEVEL)); |
| 498 | assert(start <= last); |
| 499 | |
| 500 | mask = 2UL << (last & (BITS_PER_LONG - 1)); |
| 501 | mask -= 1UL << (start & (BITS_PER_LONG - 1)); |
| 502 | blanked = *elem != 0 && ((*elem & ~mask) == 0); |
| 503 | *elem &= ~mask; |
| 504 | return blanked; |
| 505 | } |
| 506 | |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 507 | /* The recursive workhorse (the depth is limited to HBITMAP_LEVELS)... |
| 508 | * Returns true if at least one bit is changed. */ |
| 509 | static bool hb_reset_between(HBitmap *hb, int level, uint64_t start, |
| 510 | uint64_t last) |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 511 | { |
| 512 | size_t pos = start >> BITS_PER_LEVEL; |
| 513 | size_t lastpos = last >> BITS_PER_LEVEL; |
| 514 | bool changed = false; |
| 515 | size_t i; |
| 516 | |
| 517 | i = pos; |
| 518 | if (i < lastpos) { |
| 519 | uint64_t next = (start | (BITS_PER_LONG - 1)) + 1; |
| 520 | |
| 521 | /* Here we need a more complex test than when setting bits. Even if |
| 522 | * something was changed, we must not blank bits in the upper level |
| 523 | * unless the lower-level word became entirely zero. So, remove pos |
| 524 | * from the upper-level range if bits remain set. |
| 525 | */ |
| 526 | if (hb_reset_elem(&hb->levels[level][i], start, next - 1)) { |
| 527 | changed = true; |
| 528 | } else { |
| 529 | pos++; |
| 530 | } |
| 531 | |
| 532 | for (;;) { |
| 533 | start = next; |
| 534 | next += BITS_PER_LONG; |
| 535 | if (++i == lastpos) { |
| 536 | break; |
| 537 | } |
| 538 | changed |= (hb->levels[level][i] != 0); |
| 539 | hb->levels[level][i] = 0UL; |
| 540 | } |
| 541 | } |
| 542 | |
| 543 | /* Same as above, this time for lastpos. */ |
| 544 | if (hb_reset_elem(&hb->levels[level][i], start, last)) { |
| 545 | changed = true; |
| 546 | } else { |
| 547 | lastpos--; |
| 548 | } |
| 549 | |
| 550 | if (level > 0 && changed) { |
| 551 | hb_reset_between(hb, level - 1, pos, lastpos); |
| 552 | } |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 553 | |
| 554 | return changed; |
| 555 | |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 556 | } |
| 557 | |
| 558 | void hbitmap_reset(HBitmap *hb, uint64_t start, uint64_t count) |
| 559 | { |
| 560 | /* Compute range in the last layer. */ |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 561 | uint64_t first; |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 562 | uint64_t last = start + count - 1; |
Vladimir Sementsov-Ogievskiy | 48557b1 | 2019-08-06 18:26:11 +0300 | [diff] [blame] | 563 | uint64_t gran = 1ULL << hb->granularity; |
| 564 | |
Vladimir Sementsov-Ogievskiy | fed33bd | 2019-10-11 12:07:07 +0300 | [diff] [blame] | 565 | if (count == 0) { |
| 566 | return; |
| 567 | } |
| 568 | |
Vladimir Sementsov-Ogievskiy | 48557b1 | 2019-08-06 18:26:11 +0300 | [diff] [blame] | 569 | assert(QEMU_IS_ALIGNED(start, gran)); |
| 570 | assert(QEMU_IS_ALIGNED(count, gran) || (start + count == hb->orig_size)); |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 571 | |
| 572 | trace_hbitmap_reset(hb, start, count, |
| 573 | start >> hb->granularity, last >> hb->granularity); |
| 574 | |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 575 | first = start >> hb->granularity; |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 576 | last >>= hb->granularity; |
Vladimir Sementsov-Ogievskiy | 0e32119 | 2016-06-14 20:08:12 +0300 | [diff] [blame] | 577 | assert(last < hb->size); |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 578 | |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 579 | hb->count -= hb_count_between(hb, first, last); |
| 580 | if (hb_reset_between(hb, HBITMAP_LEVELS - 1, first, last) && |
| 581 | hb->meta) { |
| 582 | hbitmap_set(hb->meta, start, count); |
| 583 | } |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 584 | } |
| 585 | |
Wen Congyang | c6a8c32 | 2015-05-22 09:29:46 +0800 | [diff] [blame] | 586 | void hbitmap_reset_all(HBitmap *hb) |
| 587 | { |
| 588 | unsigned int i; |
| 589 | |
| 590 | /* Same as hbitmap_alloc() except for memset() instead of malloc() */ |
| 591 | for (i = HBITMAP_LEVELS; --i >= 1; ) { |
| 592 | memset(hb->levels[i], 0, hb->sizes[i] * sizeof(unsigned long)); |
| 593 | } |
| 594 | |
| 595 | hb->levels[0][0] = 1UL << (BITS_PER_LONG - 1); |
| 596 | hb->count = 0; |
| 597 | } |
| 598 | |
Max Reitz | 20a579d | 2016-11-15 23:57:45 +0100 | [diff] [blame] | 599 | bool hbitmap_is_serializable(const HBitmap *hb) |
| 600 | { |
| 601 | /* Every serialized chunk must be aligned to 64 bits so that endianness |
| 602 | * requirements can be fulfilled on both 64 bit and 32 bit hosts. |
Eric Blake | ecbfa28 | 2017-09-25 09:55:08 -0500 | [diff] [blame] | 603 | * We have hbitmap_serialization_align() which converts this |
Max Reitz | 20a579d | 2016-11-15 23:57:45 +0100 | [diff] [blame] | 604 | * alignment requirement from bitmap bits to items covered (e.g. sectors). |
| 605 | * That value is: |
| 606 | * 64 << hb->granularity |
| 607 | * Since this value must not exceed UINT64_MAX, hb->granularity must be |
| 608 | * less than 58 (== 64 - 6, where 6 is ld(64), i.e. 1 << 6 == 64). |
| 609 | * |
Eric Blake | ecbfa28 | 2017-09-25 09:55:08 -0500 | [diff] [blame] | 610 | * In order for hbitmap_serialization_align() to always return a |
Max Reitz | 20a579d | 2016-11-15 23:57:45 +0100 | [diff] [blame] | 611 | * meaningful value, bitmaps that are to be serialized must have a |
| 612 | * granularity of less than 58. */ |
| 613 | |
| 614 | return hb->granularity < 58; |
| 615 | } |
| 616 | |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 617 | bool hbitmap_get(const HBitmap *hb, uint64_t item) |
| 618 | { |
| 619 | /* Compute position and bit in the last layer. */ |
| 620 | uint64_t pos = item >> hb->granularity; |
| 621 | unsigned long bit = 1UL << (pos & (BITS_PER_LONG - 1)); |
Vladimir Sementsov-Ogievskiy | 0e32119 | 2016-06-14 20:08:12 +0300 | [diff] [blame] | 622 | assert(pos < hb->size); |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 623 | |
| 624 | return (hb->levels[HBITMAP_LEVELS - 1][pos >> BITS_PER_LEVEL] & bit) != 0; |
| 625 | } |
| 626 | |
Eric Blake | ecbfa28 | 2017-09-25 09:55:08 -0500 | [diff] [blame] | 627 | uint64_t hbitmap_serialization_align(const HBitmap *hb) |
Vladimir Sementsov-Ogievskiy | 8258888 | 2016-10-13 17:58:27 -0400 | [diff] [blame] | 628 | { |
Max Reitz | 20a579d | 2016-11-15 23:57:45 +0100 | [diff] [blame] | 629 | assert(hbitmap_is_serializable(hb)); |
Max Reitz | 6725f88 | 2016-11-15 23:47:32 +0100 | [diff] [blame] | 630 | |
Vladimir Sementsov-Ogievskiy | 8258888 | 2016-10-13 17:58:27 -0400 | [diff] [blame] | 631 | /* Require at least 64 bit granularity to be safe on both 64 bit and 32 bit |
| 632 | * hosts. */ |
Max Reitz | 6725f88 | 2016-11-15 23:47:32 +0100 | [diff] [blame] | 633 | return UINT64_C(64) << hb->granularity; |
Vladimir Sementsov-Ogievskiy | 8258888 | 2016-10-13 17:58:27 -0400 | [diff] [blame] | 634 | } |
| 635 | |
| 636 | /* Start should be aligned to serialization granularity, chunk size should be |
| 637 | * aligned to serialization granularity too, except for last chunk. |
| 638 | */ |
| 639 | static void serialization_chunk(const HBitmap *hb, |
| 640 | uint64_t start, uint64_t count, |
| 641 | unsigned long **first_el, uint64_t *el_count) |
| 642 | { |
| 643 | uint64_t last = start + count - 1; |
Eric Blake | ecbfa28 | 2017-09-25 09:55:08 -0500 | [diff] [blame] | 644 | uint64_t gran = hbitmap_serialization_align(hb); |
Vladimir Sementsov-Ogievskiy | 8258888 | 2016-10-13 17:58:27 -0400 | [diff] [blame] | 645 | |
| 646 | assert((start & (gran - 1)) == 0); |
| 647 | assert((last >> hb->granularity) < hb->size); |
| 648 | if ((last >> hb->granularity) != hb->size - 1) { |
| 649 | assert((count & (gran - 1)) == 0); |
| 650 | } |
| 651 | |
| 652 | start = (start >> hb->granularity) >> BITS_PER_LEVEL; |
| 653 | last = (last >> hb->granularity) >> BITS_PER_LEVEL; |
| 654 | |
| 655 | *first_el = &hb->levels[HBITMAP_LEVELS - 1][start]; |
| 656 | *el_count = last - start + 1; |
| 657 | } |
| 658 | |
| 659 | uint64_t hbitmap_serialization_size(const HBitmap *hb, |
| 660 | uint64_t start, uint64_t count) |
| 661 | { |
| 662 | uint64_t el_count; |
| 663 | unsigned long *cur; |
| 664 | |
| 665 | if (!count) { |
| 666 | return 0; |
| 667 | } |
| 668 | serialization_chunk(hb, start, count, &cur, &el_count); |
| 669 | |
| 670 | return el_count * sizeof(unsigned long); |
| 671 | } |
| 672 | |
| 673 | void hbitmap_serialize_part(const HBitmap *hb, uint8_t *buf, |
| 674 | uint64_t start, uint64_t count) |
| 675 | { |
| 676 | uint64_t el_count; |
| 677 | unsigned long *cur, *end; |
| 678 | |
| 679 | if (!count) { |
| 680 | return; |
| 681 | } |
| 682 | serialization_chunk(hb, start, count, &cur, &el_count); |
| 683 | end = cur + el_count; |
| 684 | |
| 685 | while (cur != end) { |
| 686 | unsigned long el = |
| 687 | (BITS_PER_LONG == 32 ? cpu_to_le32(*cur) : cpu_to_le64(*cur)); |
| 688 | |
| 689 | memcpy(buf, &el, sizeof(el)); |
| 690 | buf += sizeof(el); |
| 691 | cur++; |
| 692 | } |
| 693 | } |
| 694 | |
| 695 | void hbitmap_deserialize_part(HBitmap *hb, uint8_t *buf, |
| 696 | uint64_t start, uint64_t count, |
| 697 | bool finish) |
| 698 | { |
| 699 | uint64_t el_count; |
| 700 | unsigned long *cur, *end; |
| 701 | |
| 702 | if (!count) { |
| 703 | return; |
| 704 | } |
| 705 | serialization_chunk(hb, start, count, &cur, &el_count); |
| 706 | end = cur + el_count; |
| 707 | |
| 708 | while (cur != end) { |
| 709 | memcpy(cur, buf, sizeof(*cur)); |
| 710 | |
| 711 | if (BITS_PER_LONG == 32) { |
| 712 | le32_to_cpus((uint32_t *)cur); |
| 713 | } else { |
| 714 | le64_to_cpus((uint64_t *)cur); |
| 715 | } |
| 716 | |
| 717 | buf += sizeof(unsigned long); |
| 718 | cur++; |
| 719 | } |
| 720 | if (finish) { |
| 721 | hbitmap_deserialize_finish(hb); |
| 722 | } |
| 723 | } |
| 724 | |
| 725 | void hbitmap_deserialize_zeroes(HBitmap *hb, uint64_t start, uint64_t count, |
| 726 | bool finish) |
| 727 | { |
| 728 | uint64_t el_count; |
| 729 | unsigned long *first; |
| 730 | |
| 731 | if (!count) { |
| 732 | return; |
| 733 | } |
| 734 | serialization_chunk(hb, start, count, &first, &el_count); |
| 735 | |
| 736 | memset(first, 0, el_count * sizeof(unsigned long)); |
| 737 | if (finish) { |
| 738 | hbitmap_deserialize_finish(hb); |
| 739 | } |
| 740 | } |
| 741 | |
Vladimir Sementsov-Ogievskiy | 6bdc8b7 | 2017-06-28 15:05:06 +0300 | [diff] [blame] | 742 | void hbitmap_deserialize_ones(HBitmap *hb, uint64_t start, uint64_t count, |
| 743 | bool finish) |
| 744 | { |
| 745 | uint64_t el_count; |
| 746 | unsigned long *first; |
| 747 | |
| 748 | if (!count) { |
| 749 | return; |
| 750 | } |
| 751 | serialization_chunk(hb, start, count, &first, &el_count); |
| 752 | |
| 753 | memset(first, 0xff, el_count * sizeof(unsigned long)); |
| 754 | if (finish) { |
| 755 | hbitmap_deserialize_finish(hb); |
| 756 | } |
| 757 | } |
| 758 | |
Vladimir Sementsov-Ogievskiy | 8258888 | 2016-10-13 17:58:27 -0400 | [diff] [blame] | 759 | void hbitmap_deserialize_finish(HBitmap *bitmap) |
| 760 | { |
| 761 | int64_t i, size, prev_size; |
| 762 | int lev; |
| 763 | |
| 764 | /* restore levels starting from penultimate to zero level, assuming |
| 765 | * that the last level is ok */ |
| 766 | size = MAX((bitmap->size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1); |
| 767 | for (lev = HBITMAP_LEVELS - 1; lev-- > 0; ) { |
| 768 | prev_size = size; |
| 769 | size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1); |
| 770 | memset(bitmap->levels[lev], 0, size * sizeof(unsigned long)); |
| 771 | |
| 772 | for (i = 0; i < prev_size; ++i) { |
| 773 | if (bitmap->levels[lev + 1][i]) { |
| 774 | bitmap->levels[lev][i >> BITS_PER_LEVEL] |= |
| 775 | 1UL << (i & (BITS_PER_LONG - 1)); |
| 776 | } |
| 777 | } |
| 778 | } |
| 779 | |
| 780 | bitmap->levels[0][0] |= 1UL << (BITS_PER_LONG - 1); |
Liang Li | 3260cdf | 2018-02-07 11:35:49 -0500 | [diff] [blame] | 781 | bitmap->count = hb_count_between(bitmap, 0, bitmap->size - 1); |
Vladimir Sementsov-Ogievskiy | 8258888 | 2016-10-13 17:58:27 -0400 | [diff] [blame] | 782 | } |
| 783 | |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 784 | void hbitmap_free(HBitmap *hb) |
| 785 | { |
| 786 | unsigned i; |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 787 | assert(!hb->meta); |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 788 | for (i = HBITMAP_LEVELS; i-- > 0; ) { |
| 789 | g_free(hb->levels[i]); |
| 790 | } |
| 791 | g_free(hb); |
| 792 | } |
| 793 | |
| 794 | HBitmap *hbitmap_alloc(uint64_t size, int granularity) |
| 795 | { |
Markus Armbruster | e1cf558 | 2014-12-04 15:00:03 +0100 | [diff] [blame] | 796 | HBitmap *hb = g_new0(struct HBitmap, 1); |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 797 | unsigned i; |
| 798 | |
Vladimir Sementsov-Ogievskiy | 6a15099 | 2020-02-05 14:20:32 +0300 | [diff] [blame] | 799 | assert(size <= INT64_MAX); |
Vladimir Sementsov-Ogievskiy | 76d570d | 2019-01-15 18:26:49 -0500 | [diff] [blame] | 800 | hb->orig_size = size; |
| 801 | |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 802 | assert(granularity >= 0 && granularity < 64); |
| 803 | size = (size + (1ULL << granularity) - 1) >> granularity; |
| 804 | assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE)); |
| 805 | |
| 806 | hb->size = size; |
| 807 | hb->granularity = granularity; |
| 808 | for (i = HBITMAP_LEVELS; i-- > 0; ) { |
| 809 | size = MAX((size + BITS_PER_LONG - 1) >> BITS_PER_LEVEL, 1); |
John Snow | 8515efb | 2015-04-17 19:49:54 -0400 | [diff] [blame] | 810 | hb->sizes[i] = size; |
Markus Armbruster | e1cf558 | 2014-12-04 15:00:03 +0100 | [diff] [blame] | 811 | hb->levels[i] = g_new0(unsigned long, size); |
Paolo Bonzini | e7c033c | 2013-01-21 17:09:40 +0100 | [diff] [blame] | 812 | } |
| 813 | |
| 814 | /* We necessarily have free bits in level 0 due to the definition |
| 815 | * of HBITMAP_LEVELS, so use one for a sentinel. This speeds up |
| 816 | * hbitmap_iter_skip_words. |
| 817 | */ |
| 818 | assert(size == 1); |
| 819 | hb->levels[0][0] |= 1UL << (BITS_PER_LONG - 1); |
| 820 | return hb; |
| 821 | } |
John Snow | be58721 | 2015-04-17 19:49:55 -0400 | [diff] [blame] | 822 | |
John Snow | ce1ffea | 2015-04-17 19:50:03 -0400 | [diff] [blame] | 823 | void hbitmap_truncate(HBitmap *hb, uint64_t size) |
| 824 | { |
| 825 | bool shrink; |
| 826 | unsigned i; |
| 827 | uint64_t num_elements = size; |
| 828 | uint64_t old; |
| 829 | |
Vladimir Sementsov-Ogievskiy | 6a15099 | 2020-02-05 14:20:32 +0300 | [diff] [blame] | 830 | assert(size <= INT64_MAX); |
Vladimir Sementsov-Ogievskiy | 4e4de22 | 2019-08-05 15:01:20 +0300 | [diff] [blame] | 831 | hb->orig_size = size; |
| 832 | |
John Snow | ce1ffea | 2015-04-17 19:50:03 -0400 | [diff] [blame] | 833 | /* Size comes in as logical elements, adjust for granularity. */ |
| 834 | size = (size + (1ULL << hb->granularity) - 1) >> hb->granularity; |
| 835 | assert(size <= ((uint64_t)1 << HBITMAP_LOG_MAX_SIZE)); |
| 836 | shrink = size < hb->size; |
| 837 | |
| 838 | /* bit sizes are identical; nothing to do. */ |
| 839 | if (size == hb->size) { |
| 840 | return; |
| 841 | } |
| 842 | |
| 843 | /* If we're losing bits, let's clear those bits before we invalidate all of |
| 844 | * our invariants. This helps keep the bitcount consistent, and will prevent |
| 845 | * us from carrying around garbage bits beyond the end of the map. |
| 846 | */ |
| 847 | if (shrink) { |
| 848 | /* Don't clear partial granularity groups; |
| 849 | * start at the first full one. */ |
Max Reitz | 6725f88 | 2016-11-15 23:47:32 +0100 | [diff] [blame] | 850 | uint64_t start = ROUND_UP(num_elements, UINT64_C(1) << hb->granularity); |
John Snow | ce1ffea | 2015-04-17 19:50:03 -0400 | [diff] [blame] | 851 | uint64_t fix_count = (hb->size << hb->granularity) - start; |
| 852 | |
| 853 | assert(fix_count); |
| 854 | hbitmap_reset(hb, start, fix_count); |
| 855 | } |
| 856 | |
| 857 | hb->size = size; |
| 858 | for (i = HBITMAP_LEVELS; i-- > 0; ) { |
| 859 | size = MAX(BITS_TO_LONGS(size), 1); |
| 860 | if (hb->sizes[i] == size) { |
| 861 | break; |
| 862 | } |
| 863 | old = hb->sizes[i]; |
| 864 | hb->sizes[i] = size; |
Markus Armbruster | b21e238 | 2022-03-15 15:41:56 +0100 | [diff] [blame] | 865 | hb->levels[i] = g_renew(unsigned long, hb->levels[i], size); |
John Snow | ce1ffea | 2015-04-17 19:50:03 -0400 | [diff] [blame] | 866 | if (!shrink) { |
| 867 | memset(&hb->levels[i][old], 0x00, |
| 868 | (size - old) * sizeof(*hb->levels[i])); |
| 869 | } |
| 870 | } |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 871 | if (hb->meta) { |
| 872 | hbitmap_truncate(hb->meta, hb->size << hb->granularity); |
| 873 | } |
John Snow | ce1ffea | 2015-04-17 19:50:03 -0400 | [diff] [blame] | 874 | } |
| 875 | |
John Snow | c5b40c1 | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 876 | /** |
| 877 | * hbitmap_sparse_merge: performs dst = dst | src |
| 878 | * works with differing granularities. |
| 879 | * best used when src is sparsely populated. |
| 880 | */ |
| 881 | static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src) |
| 882 | { |
Vladimir Sementsov-Ogievskiy | 299ea9f | 2020-02-05 14:20:38 +0300 | [diff] [blame] | 883 | int64_t offset; |
| 884 | int64_t count; |
John Snow | c5b40c1 | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 885 | |
Vladimir Sementsov-Ogievskiy | 299ea9f | 2020-02-05 14:20:38 +0300 | [diff] [blame] | 886 | for (offset = 0; |
| 887 | hbitmap_next_dirty_area(src, offset, src->orig_size, INT64_MAX, |
| 888 | &offset, &count); |
| 889 | offset += count) |
| 890 | { |
John Snow | c5b40c1 | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 891 | hbitmap_set(dst, offset, count); |
John Snow | c5b40c1 | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 892 | } |
Vladimir Sementsov-Ogievskiy | fa000f2 | 2018-10-29 16:23:15 -0400 | [diff] [blame] | 893 | } |
John Snow | ce1ffea | 2015-04-17 19:50:03 -0400 | [diff] [blame] | 894 | |
John Snow | be58721 | 2015-04-17 19:49:55 -0400 | [diff] [blame] | 895 | /** |
John Snow | 3bde4b0 | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 896 | * Given HBitmaps A and B, let R := A (BITOR) B. |
| 897 | * Bitmaps A and B will not be modified, |
| 898 | * except when bitmap R is an alias of A or B. |
Vladimir Sementsov-Ogievskiy | 618af89e | 2022-05-17 14:12:06 +0300 | [diff] [blame] | 899 | * Bitmaps must have same size. |
John Snow | be58721 | 2015-04-17 19:49:55 -0400 | [diff] [blame] | 900 | */ |
Vladimir Sementsov-Ogievskiy | 618af89e | 2022-05-17 14:12:06 +0300 | [diff] [blame] | 901 | void hbitmap_merge(const HBitmap *a, const HBitmap *b, HBitmap *result) |
John Snow | be58721 | 2015-04-17 19:49:55 -0400 | [diff] [blame] | 902 | { |
| 903 | int i; |
| 904 | uint64_t j; |
| 905 | |
Vladimir Sementsov-Ogievskiy | 618af89e | 2022-05-17 14:12:06 +0300 | [diff] [blame] | 906 | assert(a->orig_size == result->orig_size); |
| 907 | assert(b->orig_size == result->orig_size); |
John Snow | be58721 | 2015-04-17 19:49:55 -0400 | [diff] [blame] | 908 | |
John Snow | 3bde4b0 | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 909 | if ((!hbitmap_count(a) && result == b) || |
| 910 | (!hbitmap_count(b) && result == a)) { |
Vladimir Sementsov-Ogievskiy | 618af89e | 2022-05-17 14:12:06 +0300 | [diff] [blame] | 911 | return; |
John Snow | 3bde4b0 | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 912 | } |
| 913 | |
| 914 | if (!hbitmap_count(a) && !hbitmap_count(b)) { |
| 915 | hbitmap_reset_all(result); |
Vladimir Sementsov-Ogievskiy | 618af89e | 2022-05-17 14:12:06 +0300 | [diff] [blame] | 916 | return; |
John Snow | be58721 | 2015-04-17 19:49:55 -0400 | [diff] [blame] | 917 | } |
| 918 | |
John Snow | c5b40c1 | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 919 | if (a->granularity != b->granularity) { |
| 920 | if ((a != result) && (b != result)) { |
| 921 | hbitmap_reset_all(result); |
| 922 | } |
| 923 | if (a != result) { |
| 924 | hbitmap_sparse_merge(result, a); |
| 925 | } |
| 926 | if (b != result) { |
| 927 | hbitmap_sparse_merge(result, b); |
| 928 | } |
Vladimir Sementsov-Ogievskiy | 618af89e | 2022-05-17 14:12:06 +0300 | [diff] [blame] | 929 | return; |
John Snow | c5b40c1 | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 930 | } |
| 931 | |
John Snow | be58721 | 2015-04-17 19:49:55 -0400 | [diff] [blame] | 932 | /* This merge is O(size), as BITS_PER_LONG and HBITMAP_LEVELS are constant. |
| 933 | * It may be possible to improve running times for sparsely populated maps |
| 934 | * by using hbitmap_iter_next, but this is suboptimal for dense maps. |
| 935 | */ |
John Snow | c5b40c1 | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 936 | assert(a->size == b->size); |
John Snow | be58721 | 2015-04-17 19:49:55 -0400 | [diff] [blame] | 937 | for (i = HBITMAP_LEVELS - 1; i >= 0; i--) { |
| 938 | for (j = 0; j < a->sizes[i]; j++) { |
Vladimir Sementsov-Ogievskiy | fa000f2 | 2018-10-29 16:23:15 -0400 | [diff] [blame] | 939 | result->levels[i][j] = a->levels[i][j] | b->levels[i][j]; |
John Snow | be58721 | 2015-04-17 19:49:55 -0400 | [diff] [blame] | 940 | } |
| 941 | } |
| 942 | |
Eric Blake | d1dde71 | 2018-10-29 16:23:17 -0400 | [diff] [blame] | 943 | /* Recompute the dirty count */ |
| 944 | result->count = hb_count_between(result, 0, result->size - 1); |
John Snow | be58721 | 2015-04-17 19:49:55 -0400 | [diff] [blame] | 945 | } |
Fam Zheng | 07ac4cd | 2016-10-13 17:58:22 -0400 | [diff] [blame] | 946 | |
Vladimir Sementsov-Ogievskiy | a3b5253 | 2017-06-28 15:05:25 +0300 | [diff] [blame] | 947 | char *hbitmap_sha256(const HBitmap *bitmap, Error **errp) |
| 948 | { |
| 949 | size_t size = bitmap->sizes[HBITMAP_LEVELS - 1] * sizeof(unsigned long); |
| 950 | char *data = (char *)bitmap->levels[HBITMAP_LEVELS - 1]; |
| 951 | char *hash = NULL; |
| 952 | qcrypto_hash_digest(QCRYPTO_HASH_ALG_SHA256, data, size, &hash, errp); |
| 953 | |
| 954 | return hash; |
| 955 | } |