1
2
3#include <linux/err.h>
4#include <linux/slab.h>
5#include <linux/spinlock.h>
6#include "ctree.h"
7#include "volumes.h"
8#include "extent_map.h"
9#include "compression.h"
10
11
12static struct kmem_cache *extent_map_cache;
13
14int __init extent_map_init(void)
15{
16 extent_map_cache = kmem_cache_create("btrfs_extent_map",
17 sizeof(struct extent_map), 0,
18 SLAB_MEM_SPREAD, NULL);
19 if (!extent_map_cache)
20 return -ENOMEM;
21 return 0;
22}
23
24void __cold extent_map_exit(void)
25{
26 kmem_cache_destroy(extent_map_cache);
27}
28
29
30
31
32
33
34
35
36void extent_map_tree_init(struct extent_map_tree *tree)
37{
38 tree->map = RB_ROOT_CACHED;
39 INIT_LIST_HEAD(&tree->modified_extents);
40 rwlock_init(&tree->lock);
41}
42
43
44
45
46
47
48
49
50struct extent_map *alloc_extent_map(void)
51{
52 struct extent_map *em;
53 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
54 if (!em)
55 return NULL;
56 RB_CLEAR_NODE(&em->rb_node);
57 em->flags = 0;
58 em->compress_type = BTRFS_COMPRESS_NONE;
59 em->generation = 0;
60 refcount_set(&em->refs, 1);
61 INIT_LIST_HEAD(&em->list);
62 return em;
63}
64
65
66
67
68
69
70
71
72void free_extent_map(struct extent_map *em)
73{
74 if (!em)
75 return;
76 WARN_ON(refcount_read(&em->refs) == 0);
77 if (refcount_dec_and_test(&em->refs)) {
78 WARN_ON(extent_map_in_tree(em));
79 WARN_ON(!list_empty(&em->list));
80 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
81 kfree(em->map_lookup);
82 kmem_cache_free(extent_map_cache, em);
83 }
84}
85
86
87static u64 range_end(u64 start, u64 len)
88{
89 if (start + len < start)
90 return (u64)-1;
91 return start + len;
92}
93
94static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
95{
96 struct rb_node **p = &root->rb_root.rb_node;
97 struct rb_node *parent = NULL;
98 struct extent_map *entry = NULL;
99 struct rb_node *orig_parent = NULL;
100 u64 end = range_end(em->start, em->len);
101 bool leftmost = true;
102
103 while (*p) {
104 parent = *p;
105 entry = rb_entry(parent, struct extent_map, rb_node);
106
107 if (em->start < entry->start) {
108 p = &(*p)->rb_left;
109 } else if (em->start >= extent_map_end(entry)) {
110 p = &(*p)->rb_right;
111 leftmost = false;
112 } else {
113 return -EEXIST;
114 }
115 }
116
117 orig_parent = parent;
118 while (parent && em->start >= extent_map_end(entry)) {
119 parent = rb_next(parent);
120 entry = rb_entry(parent, struct extent_map, rb_node);
121 }
122 if (parent)
123 if (end > entry->start && em->start < extent_map_end(entry))
124 return -EEXIST;
125
126 parent = orig_parent;
127 entry = rb_entry(parent, struct extent_map, rb_node);
128 while (parent && em->start < entry->start) {
129 parent = rb_prev(parent);
130 entry = rb_entry(parent, struct extent_map, rb_node);
131 }
132 if (parent)
133 if (end > entry->start && em->start < extent_map_end(entry))
134 return -EEXIST;
135
136 rb_link_node(&em->rb_node, orig_parent, p);
137 rb_insert_color_cached(&em->rb_node, root, leftmost);
138 return 0;
139}
140
141
142
143
144
145static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
146 struct rb_node **prev_ret,
147 struct rb_node **next_ret)
148{
149 struct rb_node *n = root->rb_node;
150 struct rb_node *prev = NULL;
151 struct rb_node *orig_prev = NULL;
152 struct extent_map *entry;
153 struct extent_map *prev_entry = NULL;
154
155 while (n) {
156 entry = rb_entry(n, struct extent_map, rb_node);
157 prev = n;
158 prev_entry = entry;
159
160 if (offset < entry->start)
161 n = n->rb_left;
162 else if (offset >= extent_map_end(entry))
163 n = n->rb_right;
164 else
165 return n;
166 }
167
168 if (prev_ret) {
169 orig_prev = prev;
170 while (prev && offset >= extent_map_end(prev_entry)) {
171 prev = rb_next(prev);
172 prev_entry = rb_entry(prev, struct extent_map, rb_node);
173 }
174 *prev_ret = prev;
175 prev = orig_prev;
176 }
177
178 if (next_ret) {
179 prev_entry = rb_entry(prev, struct extent_map, rb_node);
180 while (prev && offset < prev_entry->start) {
181 prev = rb_prev(prev);
182 prev_entry = rb_entry(prev, struct extent_map, rb_node);
183 }
184 *next_ret = prev;
185 }
186 return NULL;
187}
188
189
190static int mergable_maps(struct extent_map *prev, struct extent_map *next)
191{
192 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
193 return 0;
194
195
196
197
198
199 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
200 return 0;
201
202 if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
203 test_bit(EXTENT_FLAG_LOGGING, &next->flags))
204 return 0;
205
206
207
208
209
210
211 if (!list_empty(&prev->list) || !list_empty(&next->list))
212 return 0;
213
214 ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
215 prev->block_start != EXTENT_MAP_DELALLOC);
216
217 if (extent_map_end(prev) == next->start &&
218 prev->flags == next->flags &&
219 prev->bdev == next->bdev &&
220 ((next->block_start == EXTENT_MAP_HOLE &&
221 prev->block_start == EXTENT_MAP_HOLE) ||
222 (next->block_start == EXTENT_MAP_INLINE &&
223 prev->block_start == EXTENT_MAP_INLINE) ||
224 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
225 next->block_start == extent_map_block_end(prev)))) {
226 return 1;
227 }
228 return 0;
229}
230
231static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
232{
233 struct extent_map *merge = NULL;
234 struct rb_node *rb;
235
236 if (em->start != 0) {
237 rb = rb_prev(&em->rb_node);
238 if (rb)
239 merge = rb_entry(rb, struct extent_map, rb_node);
240 if (rb && mergable_maps(merge, em)) {
241 em->start = merge->start;
242 em->orig_start = merge->orig_start;
243 em->len += merge->len;
244 em->block_len += merge->block_len;
245 em->block_start = merge->block_start;
246 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
247 em->mod_start = merge->mod_start;
248 em->generation = max(em->generation, merge->generation);
249
250 rb_erase_cached(&merge->rb_node, &tree->map);
251 RB_CLEAR_NODE(&merge->rb_node);
252 free_extent_map(merge);
253 }
254 }
255
256 rb = rb_next(&em->rb_node);
257 if (rb)
258 merge = rb_entry(rb, struct extent_map, rb_node);
259 if (rb && mergable_maps(em, merge)) {
260 em->len += merge->len;
261 em->block_len += merge->block_len;
262 rb_erase_cached(&merge->rb_node, &tree->map);
263 RB_CLEAR_NODE(&merge->rb_node);
264 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
265 em->generation = max(em->generation, merge->generation);
266 free_extent_map(merge);
267 }
268}
269
270
271
272
273
274
275
276
277
278
279
280
281int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
282 u64 gen)
283{
284 int ret = 0;
285 struct extent_map *em;
286 bool prealloc = false;
287
288 write_lock(&tree->lock);
289 em = lookup_extent_mapping(tree, start, len);
290
291 WARN_ON(!em || em->start != start);
292
293 if (!em)
294 goto out;
295
296 em->generation = gen;
297 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
298 em->mod_start = em->start;
299 em->mod_len = em->len;
300
301 if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
302 prealloc = true;
303 clear_bit(EXTENT_FLAG_FILLING, &em->flags);
304 }
305
306 try_merge_map(tree, em);
307
308 if (prealloc) {
309 em->mod_start = em->start;
310 em->mod_len = em->len;
311 }
312
313 free_extent_map(em);
314out:
315 write_unlock(&tree->lock);
316 return ret;
317
318}
319
320void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
321{
322 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
323 if (extent_map_in_tree(em))
324 try_merge_map(tree, em);
325}
326
327static inline void setup_extent_mapping(struct extent_map_tree *tree,
328 struct extent_map *em,
329 int modified)
330{
331 refcount_inc(&em->refs);
332 em->mod_start = em->start;
333 em->mod_len = em->len;
334
335 if (modified)
336 list_move(&em->list, &tree->modified_extents);
337 else
338 try_merge_map(tree, em);
339}
340
341static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
342{
343 struct map_lookup *map = em->map_lookup;
344 u64 stripe_size = em->orig_block_len;
345 int i;
346
347 for (i = 0; i < map->num_stripes; i++) {
348 struct btrfs_bio_stripe *stripe = &map->stripes[i];
349 struct btrfs_device *device = stripe->dev;
350
351 set_extent_bits_nowait(&device->alloc_state, stripe->physical,
352 stripe->physical + stripe_size - 1, bits);
353 }
354}
355
356static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
357{
358 struct map_lookup *map = em->map_lookup;
359 u64 stripe_size = em->orig_block_len;
360 int i;
361
362 for (i = 0; i < map->num_stripes; i++) {
363 struct btrfs_bio_stripe *stripe = &map->stripes[i];
364 struct btrfs_device *device = stripe->dev;
365
366 __clear_extent_bit(&device->alloc_state, stripe->physical,
367 stripe->physical + stripe_size - 1, bits,
368 0, 0, NULL, GFP_NOWAIT, NULL);
369 }
370}
371
372
373
374
375
376
377
378
379
380
381
382int add_extent_mapping(struct extent_map_tree *tree,
383 struct extent_map *em, int modified)
384{
385 int ret = 0;
386
387 lockdep_assert_held_write(&tree->lock);
388
389 ret = tree_insert(&tree->map, em);
390 if (ret)
391 goto out;
392
393 setup_extent_mapping(tree, em, modified);
394 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) {
395 extent_map_device_set_bits(em, CHUNK_ALLOCATED);
396 extent_map_device_clear_bits(em, CHUNK_TRIMMED);
397 }
398out:
399 return ret;
400}
401
402static struct extent_map *
403__lookup_extent_mapping(struct extent_map_tree *tree,
404 u64 start, u64 len, int strict)
405{
406 struct extent_map *em;
407 struct rb_node *rb_node;
408 struct rb_node *prev = NULL;
409 struct rb_node *next = NULL;
410 u64 end = range_end(start, len);
411
412 rb_node = __tree_search(&tree->map.rb_root, start, &prev, &next);
413 if (!rb_node) {
414 if (prev)
415 rb_node = prev;
416 else if (next)
417 rb_node = next;
418 else
419 return NULL;
420 }
421
422 em = rb_entry(rb_node, struct extent_map, rb_node);
423
424 if (strict && !(end > em->start && start < extent_map_end(em)))
425 return NULL;
426
427 refcount_inc(&em->refs);
428 return em;
429}
430
431
432
433
434
435
436
437
438
439
440
441
442struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
443 u64 start, u64 len)
444{
445 return __lookup_extent_mapping(tree, start, len, 1);
446}
447
448
449
450
451
452
453
454
455
456
457
458
459struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
460 u64 start, u64 len)
461{
462 return __lookup_extent_mapping(tree, start, len, 0);
463}
464
465
466
467
468
469
470
471
472
473void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
474{
475 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
476 rb_erase_cached(&em->rb_node, &tree->map);
477 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
478 list_del_init(&em->list);
479 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
480 extent_map_device_clear_bits(em, CHUNK_ALLOCATED);
481 RB_CLEAR_NODE(&em->rb_node);
482}
483
484void replace_extent_mapping(struct extent_map_tree *tree,
485 struct extent_map *cur,
486 struct extent_map *new,
487 int modified)
488{
489 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags));
490 ASSERT(extent_map_in_tree(cur));
491 if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
492 list_del_init(&cur->list);
493 rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
494 RB_CLEAR_NODE(&cur->rb_node);
495
496 setup_extent_mapping(tree, new, modified);
497}
498
499static struct extent_map *next_extent_map(struct extent_map *em)
500{
501 struct rb_node *next;
502
503 next = rb_next(&em->rb_node);
504 if (!next)
505 return NULL;
506 return container_of(next, struct extent_map, rb_node);
507}
508
509static struct extent_map *prev_extent_map(struct extent_map *em)
510{
511 struct rb_node *prev;
512
513 prev = rb_prev(&em->rb_node);
514 if (!prev)
515 return NULL;
516 return container_of(prev, struct extent_map, rb_node);
517}
518
519
520
521
522
523
524
525static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
526 struct extent_map *existing,
527 struct extent_map *em,
528 u64 map_start)
529{
530 struct extent_map *prev;
531 struct extent_map *next;
532 u64 start;
533 u64 end;
534 u64 start_diff;
535
536 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
537
538 if (existing->start > map_start) {
539 next = existing;
540 prev = prev_extent_map(next);
541 } else {
542 prev = existing;
543 next = next_extent_map(prev);
544 }
545
546 start = prev ? extent_map_end(prev) : em->start;
547 start = max_t(u64, start, em->start);
548 end = next ? next->start : extent_map_end(em);
549 end = min_t(u64, end, extent_map_end(em));
550 start_diff = start - em->start;
551 em->start = start;
552 em->len = end - start;
553 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
554 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
555 em->block_start += start_diff;
556 em->block_len = em->len;
557 }
558 return add_extent_mapping(em_tree, em, 0);
559}
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
582 struct extent_map_tree *em_tree,
583 struct extent_map **em_in, u64 start, u64 len)
584{
585 int ret;
586 struct extent_map *em = *em_in;
587
588 ret = add_extent_mapping(em_tree, em, 0);
589
590
591
592
593 if (ret == -EEXIST) {
594 struct extent_map *existing;
595
596 ret = 0;
597
598 existing = search_extent_mapping(em_tree, start, len);
599
600 trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
601
602
603
604
605
606 if (start >= existing->start &&
607 start < extent_map_end(existing)) {
608 free_extent_map(em);
609 *em_in = existing;
610 ret = 0;
611 } else {
612 u64 orig_start = em->start;
613 u64 orig_len = em->len;
614
615
616
617
618
619 ret = merge_extent_mapping(em_tree, existing,
620 em, start);
621 if (ret) {
622 free_extent_map(em);
623 *em_in = NULL;
624 WARN_ONCE(ret,
625"unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",
626 ret, existing->start, existing->len,
627 orig_start, orig_len);
628 }
629 free_extent_map(existing);
630 }
631 }
632
633 ASSERT(ret == 0 || ret == -EEXIST);
634 return ret;
635}
636