1
2
3#include <linux/err.h>
4#include <linux/slab.h>
5#include <linux/spinlock.h>
6#include "ctree.h"
7#include "volumes.h"
8#include "extent_map.h"
9#include "compression.h"
10
11
12static struct kmem_cache *extent_map_cache;
13
14int __init extent_map_init(void)
15{
16 extent_map_cache = kmem_cache_create("btrfs_extent_map",
17 sizeof(struct extent_map), 0,
18 SLAB_MEM_SPREAD, NULL);
19 if (!extent_map_cache)
20 return -ENOMEM;
21 return 0;
22}
23
24void __cold extent_map_exit(void)
25{
26 kmem_cache_destroy(extent_map_cache);
27}
28
29
30
31
32
33
34
35
36void extent_map_tree_init(struct extent_map_tree *tree)
37{
38 tree->map = RB_ROOT_CACHED;
39 INIT_LIST_HEAD(&tree->modified_extents);
40 rwlock_init(&tree->lock);
41}
42
43
44
45
46
47
48
49
50struct extent_map *alloc_extent_map(void)
51{
52 struct extent_map *em;
53 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
54 if (!em)
55 return NULL;
56 RB_CLEAR_NODE(&em->rb_node);
57 em->flags = 0;
58 em->compress_type = BTRFS_COMPRESS_NONE;
59 em->generation = 0;
60 refcount_set(&em->refs, 1);
61 INIT_LIST_HEAD(&em->list);
62 return em;
63}
64
65
66
67
68
69
70
71
72void free_extent_map(struct extent_map *em)
73{
74 if (!em)
75 return;
76 WARN_ON(refcount_read(&em->refs) == 0);
77 if (refcount_dec_and_test(&em->refs)) {
78 WARN_ON(extent_map_in_tree(em));
79 WARN_ON(!list_empty(&em->list));
80 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
81 kfree(em->map_lookup);
82 kmem_cache_free(extent_map_cache, em);
83 }
84}
85
86
87static u64 range_end(u64 start, u64 len)
88{
89 if (start + len < start)
90 return (u64)-1;
91 return start + len;
92}
93
94static int tree_insert(struct rb_root_cached *root, struct extent_map *em)
95{
96 struct rb_node **p = &root->rb_root.rb_node;
97 struct rb_node *parent = NULL;
98 struct extent_map *entry = NULL;
99 struct rb_node *orig_parent = NULL;
100 u64 end = range_end(em->start, em->len);
101 bool leftmost = true;
102
103 while (*p) {
104 parent = *p;
105 entry = rb_entry(parent, struct extent_map, rb_node);
106
107 if (em->start < entry->start) {
108 p = &(*p)->rb_left;
109 } else if (em->start >= extent_map_end(entry)) {
110 p = &(*p)->rb_right;
111 leftmost = false;
112 } else {
113 return -EEXIST;
114 }
115 }
116
117 orig_parent = parent;
118 while (parent && em->start >= extent_map_end(entry)) {
119 parent = rb_next(parent);
120 entry = rb_entry(parent, struct extent_map, rb_node);
121 }
122 if (parent)
123 if (end > entry->start && em->start < extent_map_end(entry))
124 return -EEXIST;
125
126 parent = orig_parent;
127 entry = rb_entry(parent, struct extent_map, rb_node);
128 while (parent && em->start < entry->start) {
129 parent = rb_prev(parent);
130 entry = rb_entry(parent, struct extent_map, rb_node);
131 }
132 if (parent)
133 if (end > entry->start && em->start < extent_map_end(entry))
134 return -EEXIST;
135
136 rb_link_node(&em->rb_node, orig_parent, p);
137 rb_insert_color_cached(&em->rb_node, root, leftmost);
138 return 0;
139}
140
141
142
143
144
145static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
146 struct rb_node **prev_ret,
147 struct rb_node **next_ret)
148{
149 struct rb_node *n = root->rb_node;
150 struct rb_node *prev = NULL;
151 struct rb_node *orig_prev = NULL;
152 struct extent_map *entry;
153 struct extent_map *prev_entry = NULL;
154
155 while (n) {
156 entry = rb_entry(n, struct extent_map, rb_node);
157 prev = n;
158 prev_entry = entry;
159
160 if (offset < entry->start)
161 n = n->rb_left;
162 else if (offset >= extent_map_end(entry))
163 n = n->rb_right;
164 else
165 return n;
166 }
167
168 if (prev_ret) {
169 orig_prev = prev;
170 while (prev && offset >= extent_map_end(prev_entry)) {
171 prev = rb_next(prev);
172 prev_entry = rb_entry(prev, struct extent_map, rb_node);
173 }
174 *prev_ret = prev;
175 prev = orig_prev;
176 }
177
178 if (next_ret) {
179 prev_entry = rb_entry(prev, struct extent_map, rb_node);
180 while (prev && offset < prev_entry->start) {
181 prev = rb_prev(prev);
182 prev_entry = rb_entry(prev, struct extent_map, rb_node);
183 }
184 *next_ret = prev;
185 }
186 return NULL;
187}
188
189
190static int mergable_maps(struct extent_map *prev, struct extent_map *next)
191{
192 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
193 return 0;
194
195
196
197
198
199 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
200 return 0;
201
202 if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
203 test_bit(EXTENT_FLAG_LOGGING, &next->flags))
204 return 0;
205
206
207
208
209
210
211 if (!list_empty(&prev->list) || !list_empty(&next->list))
212 return 0;
213
214 ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
215 prev->block_start != EXTENT_MAP_DELALLOC);
216
217 if (prev->map_lookup || next->map_lookup)
218 ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) &&
219 test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags));
220
221 if (extent_map_end(prev) == next->start &&
222 prev->flags == next->flags &&
223 prev->map_lookup == next->map_lookup &&
224 ((next->block_start == EXTENT_MAP_HOLE &&
225 prev->block_start == EXTENT_MAP_HOLE) ||
226 (next->block_start == EXTENT_MAP_INLINE &&
227 prev->block_start == EXTENT_MAP_INLINE) ||
228 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
229 next->block_start == extent_map_block_end(prev)))) {
230 return 1;
231 }
232 return 0;
233}
234
235static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
236{
237 struct extent_map *merge = NULL;
238 struct rb_node *rb;
239
240
241
242
243
244
245
246
247
248 if (refcount_read(&em->refs) > 2)
249 return;
250
251 if (em->start != 0) {
252 rb = rb_prev(&em->rb_node);
253 if (rb)
254 merge = rb_entry(rb, struct extent_map, rb_node);
255 if (rb && mergable_maps(merge, em)) {
256 em->start = merge->start;
257 em->orig_start = merge->orig_start;
258 em->len += merge->len;
259 em->block_len += merge->block_len;
260 em->block_start = merge->block_start;
261 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
262 em->mod_start = merge->mod_start;
263 em->generation = max(em->generation, merge->generation);
264
265 rb_erase_cached(&merge->rb_node, &tree->map);
266 RB_CLEAR_NODE(&merge->rb_node);
267 free_extent_map(merge);
268 }
269 }
270
271 rb = rb_next(&em->rb_node);
272 if (rb)
273 merge = rb_entry(rb, struct extent_map, rb_node);
274 if (rb && mergable_maps(em, merge)) {
275 em->len += merge->len;
276 em->block_len += merge->block_len;
277 rb_erase_cached(&merge->rb_node, &tree->map);
278 RB_CLEAR_NODE(&merge->rb_node);
279 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
280 em->generation = max(em->generation, merge->generation);
281 free_extent_map(merge);
282 }
283}
284
285
286
287
288
289
290
291
292
293
294
295
296int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
297 u64 gen)
298{
299 int ret = 0;
300 struct extent_map *em;
301 bool prealloc = false;
302
303 write_lock(&tree->lock);
304 em = lookup_extent_mapping(tree, start, len);
305
306 WARN_ON(!em || em->start != start);
307
308 if (!em)
309 goto out;
310
311 em->generation = gen;
312 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
313 em->mod_start = em->start;
314 em->mod_len = em->len;
315
316 if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) {
317 prealloc = true;
318 clear_bit(EXTENT_FLAG_FILLING, &em->flags);
319 }
320
321 try_merge_map(tree, em);
322
323 if (prealloc) {
324 em->mod_start = em->start;
325 em->mod_len = em->len;
326 }
327
328 free_extent_map(em);
329out:
330 write_unlock(&tree->lock);
331 return ret;
332
333}
334
335void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
336{
337 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
338 if (extent_map_in_tree(em))
339 try_merge_map(tree, em);
340}
341
342static inline void setup_extent_mapping(struct extent_map_tree *tree,
343 struct extent_map *em,
344 int modified)
345{
346 refcount_inc(&em->refs);
347 em->mod_start = em->start;
348 em->mod_len = em->len;
349
350 if (modified)
351 list_move(&em->list, &tree->modified_extents);
352 else
353 try_merge_map(tree, em);
354}
355
356static void extent_map_device_set_bits(struct extent_map *em, unsigned bits)
357{
358 struct map_lookup *map = em->map_lookup;
359 u64 stripe_size = em->orig_block_len;
360 int i;
361
362 for (i = 0; i < map->num_stripes; i++) {
363 struct btrfs_bio_stripe *stripe = &map->stripes[i];
364 struct btrfs_device *device = stripe->dev;
365
366 set_extent_bits_nowait(&device->alloc_state, stripe->physical,
367 stripe->physical + stripe_size - 1, bits);
368 }
369}
370
371static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits)
372{
373 struct map_lookup *map = em->map_lookup;
374 u64 stripe_size = em->orig_block_len;
375 int i;
376
377 for (i = 0; i < map->num_stripes; i++) {
378 struct btrfs_bio_stripe *stripe = &map->stripes[i];
379 struct btrfs_device *device = stripe->dev;
380
381 __clear_extent_bit(&device->alloc_state, stripe->physical,
382 stripe->physical + stripe_size - 1, bits,
383 0, 0, NULL, GFP_NOWAIT, NULL);
384 }
385}
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400int add_extent_mapping(struct extent_map_tree *tree,
401 struct extent_map *em, int modified)
402{
403 int ret = 0;
404
405 lockdep_assert_held_write(&tree->lock);
406
407 ret = tree_insert(&tree->map, em);
408 if (ret)
409 goto out;
410
411 setup_extent_mapping(tree, em, modified);
412 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) {
413 extent_map_device_set_bits(em, CHUNK_ALLOCATED);
414 extent_map_device_clear_bits(em, CHUNK_TRIMMED);
415 }
416out:
417 return ret;
418}
419
420static struct extent_map *
421__lookup_extent_mapping(struct extent_map_tree *tree,
422 u64 start, u64 len, int strict)
423{
424 struct extent_map *em;
425 struct rb_node *rb_node;
426 struct rb_node *prev = NULL;
427 struct rb_node *next = NULL;
428 u64 end = range_end(start, len);
429
430 rb_node = __tree_search(&tree->map.rb_root, start, &prev, &next);
431 if (!rb_node) {
432 if (prev)
433 rb_node = prev;
434 else if (next)
435 rb_node = next;
436 else
437 return NULL;
438 }
439
440 em = rb_entry(rb_node, struct extent_map, rb_node);
441
442 if (strict && !(end > em->start && start < extent_map_end(em)))
443 return NULL;
444
445 refcount_inc(&em->refs);
446 return em;
447}
448
449
450
451
452
453
454
455
456
457
458
459
460struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
461 u64 start, u64 len)
462{
463 return __lookup_extent_mapping(tree, start, len, 1);
464}
465
466
467
468
469
470
471
472
473
474
475
476
477struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
478 u64 start, u64 len)
479{
480 return __lookup_extent_mapping(tree, start, len, 0);
481}
482
483
484
485
486
487
488
489
490
491void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
492{
493 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
494 rb_erase_cached(&em->rb_node, &tree->map);
495 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
496 list_del_init(&em->list);
497 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
498 extent_map_device_clear_bits(em, CHUNK_ALLOCATED);
499 RB_CLEAR_NODE(&em->rb_node);
500}
501
502void replace_extent_mapping(struct extent_map_tree *tree,
503 struct extent_map *cur,
504 struct extent_map *new,
505 int modified)
506{
507 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &cur->flags));
508 ASSERT(extent_map_in_tree(cur));
509 if (!test_bit(EXTENT_FLAG_LOGGING, &cur->flags))
510 list_del_init(&cur->list);
511 rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map);
512 RB_CLEAR_NODE(&cur->rb_node);
513
514 setup_extent_mapping(tree, new, modified);
515}
516
517static struct extent_map *next_extent_map(struct extent_map *em)
518{
519 struct rb_node *next;
520
521 next = rb_next(&em->rb_node);
522 if (!next)
523 return NULL;
524 return container_of(next, struct extent_map, rb_node);
525}
526
527static struct extent_map *prev_extent_map(struct extent_map *em)
528{
529 struct rb_node *prev;
530
531 prev = rb_prev(&em->rb_node);
532 if (!prev)
533 return NULL;
534 return container_of(prev, struct extent_map, rb_node);
535}
536
537
538
539
540
541
542
543static noinline int merge_extent_mapping(struct extent_map_tree *em_tree,
544 struct extent_map *existing,
545 struct extent_map *em,
546 u64 map_start)
547{
548 struct extent_map *prev;
549 struct extent_map *next;
550 u64 start;
551 u64 end;
552 u64 start_diff;
553
554 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
555
556 if (existing->start > map_start) {
557 next = existing;
558 prev = prev_extent_map(next);
559 } else {
560 prev = existing;
561 next = next_extent_map(prev);
562 }
563
564 start = prev ? extent_map_end(prev) : em->start;
565 start = max_t(u64, start, em->start);
566 end = next ? next->start : extent_map_end(em);
567 end = min_t(u64, end, extent_map_end(em));
568 start_diff = start - em->start;
569 em->start = start;
570 em->len = end - start;
571 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
572 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
573 em->block_start += start_diff;
574 em->block_len = em->len;
575 }
576 return add_extent_mapping(em_tree, em, 0);
577}
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info,
601 struct extent_map_tree *em_tree,
602 struct extent_map **em_in, u64 start, u64 len)
603{
604 int ret;
605 struct extent_map *em = *em_in;
606
607 ret = add_extent_mapping(em_tree, em, 0);
608
609
610
611
612 if (ret == -EEXIST) {
613 struct extent_map *existing;
614
615 ret = 0;
616
617 existing = search_extent_mapping(em_tree, start, len);
618
619 trace_btrfs_handle_em_exist(fs_info, existing, em, start, len);
620
621
622
623
624
625 if (start >= existing->start &&
626 start < extent_map_end(existing)) {
627 free_extent_map(em);
628 *em_in = existing;
629 ret = 0;
630 } else {
631 u64 orig_start = em->start;
632 u64 orig_len = em->len;
633
634
635
636
637
638 ret = merge_extent_mapping(em_tree, existing,
639 em, start);
640 if (ret) {
641 free_extent_map(em);
642 *em_in = NULL;
643 WARN_ONCE(ret,
644"unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n",
645 ret, existing->start, existing->len,
646 orig_start, orig_len);
647 }
648 free_extent_map(existing);
649 }
650 }
651
652 ASSERT(ret == 0 || ret == -EEXIST);
653 return ret;
654}
655