1
2
3
4
5
6#include <linux/kernel.h>
7#include <linux/bio.h>
8#include <linux/file.h>
9#include <linux/fs.h>
10#include <linux/pagemap.h>
11#include <linux/highmem.h>
12#include <linux/kthread.h>
13#include <linux/time.h>
14#include <linux/init.h>
15#include <linux/string.h>
16#include <linux/backing-dev.h>
17#include <linux/writeback.h>
18#include <linux/slab.h>
19#include <linux/sched/mm.h>
20#include <linux/log2.h>
21#include <crypto/hash.h>
22#include "misc.h"
23#include "ctree.h"
24#include "disk-io.h"
25#include "transaction.h"
26#include "btrfs_inode.h"
27#include "volumes.h"
28#include "ordered-data.h"
29#include "compression.h"
30#include "extent_io.h"
31#include "extent_map.h"
32#include "subpage.h"
33#include "zoned.h"
34
35static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
36
37const char* btrfs_compress_type2str(enum btrfs_compression_type type)
38{
39 switch (type) {
40 case BTRFS_COMPRESS_ZLIB:
41 case BTRFS_COMPRESS_LZO:
42 case BTRFS_COMPRESS_ZSTD:
43 case BTRFS_COMPRESS_NONE:
44 return btrfs_compress_types[type];
45 default:
46 break;
47 }
48
49 return NULL;
50}
51
52bool btrfs_compress_is_valid_type(const char *str, size_t len)
53{
54 int i;
55
56 for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
57 size_t comp_len = strlen(btrfs_compress_types[i]);
58
59 if (len < comp_len)
60 continue;
61
62 if (!strncmp(btrfs_compress_types[i], str, comp_len))
63 return true;
64 }
65 return false;
66}
67
68static int compression_compress_pages(int type, struct list_head *ws,
69 struct address_space *mapping, u64 start, struct page **pages,
70 unsigned long *out_pages, unsigned long *total_in,
71 unsigned long *total_out)
72{
73 switch (type) {
74 case BTRFS_COMPRESS_ZLIB:
75 return zlib_compress_pages(ws, mapping, start, pages,
76 out_pages, total_in, total_out);
77 case BTRFS_COMPRESS_LZO:
78 return lzo_compress_pages(ws, mapping, start, pages,
79 out_pages, total_in, total_out);
80 case BTRFS_COMPRESS_ZSTD:
81 return zstd_compress_pages(ws, mapping, start, pages,
82 out_pages, total_in, total_out);
83 case BTRFS_COMPRESS_NONE:
84 default:
85
86
87
88
89
90
91
92
93
94 *out_pages = 0;
95 return -E2BIG;
96 }
97}
98
99static int compression_decompress_bio(int type, struct list_head *ws,
100 struct compressed_bio *cb)
101{
102 switch (type) {
103 case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
104 case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
105 case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
106 case BTRFS_COMPRESS_NONE:
107 default:
108
109
110
111
112 BUG();
113 }
114}
115
116static int compression_decompress(int type, struct list_head *ws,
117 unsigned char *data_in, struct page *dest_page,
118 unsigned long start_byte, size_t srclen, size_t destlen)
119{
120 switch (type) {
121 case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
122 start_byte, srclen, destlen);
123 case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
124 start_byte, srclen, destlen);
125 case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
126 start_byte, srclen, destlen);
127 case BTRFS_COMPRESS_NONE:
128 default:
129
130
131
132
133 BUG();
134 }
135}
136
137static int btrfs_decompress_bio(struct compressed_bio *cb);
138
139static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
140 unsigned long disk_size)
141{
142 return sizeof(struct compressed_bio) +
143 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size;
144}
145
146static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
147 u64 disk_start)
148{
149 struct btrfs_fs_info *fs_info = inode->root->fs_info;
150 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
151 const u32 csum_size = fs_info->csum_size;
152 const u32 sectorsize = fs_info->sectorsize;
153 struct page *page;
154 unsigned int i;
155 char *kaddr;
156 u8 csum[BTRFS_CSUM_SIZE];
157 struct compressed_bio *cb = bio->bi_private;
158 u8 *cb_sum = cb->sums;
159
160 if (!fs_info->csum_root || (inode->flags & BTRFS_INODE_NODATASUM))
161 return 0;
162
163 shash->tfm = fs_info->csum_shash;
164
165 for (i = 0; i < cb->nr_pages; i++) {
166 u32 pg_offset;
167 u32 bytes_left = PAGE_SIZE;
168 page = cb->compressed_pages[i];
169
170
171 if (i == cb->nr_pages - 1)
172 bytes_left = cb->compressed_len - i * PAGE_SIZE;
173
174
175 for (pg_offset = 0; pg_offset < bytes_left;
176 pg_offset += sectorsize) {
177 kaddr = kmap_atomic(page);
178 crypto_shash_digest(shash, kaddr + pg_offset,
179 sectorsize, csum);
180 kunmap_atomic(kaddr);
181
182 if (memcmp(&csum, cb_sum, csum_size) != 0) {
183 btrfs_print_data_csum_error(inode, disk_start,
184 csum, cb_sum, cb->mirror_num);
185 if (btrfs_bio(bio)->device)
186 btrfs_dev_stat_inc_and_print(
187 btrfs_bio(bio)->device,
188 BTRFS_DEV_STAT_CORRUPTION_ERRS);
189 return -EIO;
190 }
191 cb_sum += csum_size;
192 disk_start += sectorsize;
193 }
194 }
195 return 0;
196}
197
198
199
200
201
202
203
204static bool dec_and_test_compressed_bio(struct compressed_bio *cb, struct bio *bio)
205{
206 struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
207 unsigned int bi_size = 0;
208 bool last_io = false;
209 struct bio_vec *bvec;
210 struct bvec_iter_all iter_all;
211
212
213
214
215
216
217 bio_for_each_segment_all(bvec, bio, iter_all)
218 bi_size += bvec->bv_len;
219
220 if (bio->bi_status)
221 cb->errors = 1;
222
223 ASSERT(bi_size && bi_size <= cb->compressed_len);
224 last_io = refcount_sub_and_test(bi_size >> fs_info->sectorsize_bits,
225 &cb->pending_sectors);
226
227
228
229
230
231 wake_up_var(cb);
232
233 return last_io;
234}
235
236static void finish_compressed_bio_read(struct compressed_bio *cb, struct bio *bio)
237{
238 unsigned int index;
239 struct page *page;
240
241
242 for (index = 0; index < cb->nr_pages; index++) {
243 page = cb->compressed_pages[index];
244 page->mapping = NULL;
245 put_page(page);
246 }
247
248
249 if (cb->errors) {
250 bio_io_error(cb->orig_bio);
251 } else {
252 struct bio_vec *bvec;
253 struct bvec_iter_all iter_all;
254
255 ASSERT(bio);
256 ASSERT(!bio->bi_status);
257
258
259
260
261 ASSERT(!bio_flagged(bio, BIO_CLONED));
262 bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) {
263 u64 bvec_start = page_offset(bvec->bv_page) +
264 bvec->bv_offset;
265
266 btrfs_page_set_checked(btrfs_sb(cb->inode->i_sb),
267 bvec->bv_page, bvec_start,
268 bvec->bv_len);
269 }
270
271 bio_endio(cb->orig_bio);
272 }
273
274
275 kfree(cb->compressed_pages);
276 kfree(cb);
277}
278
279
280
281
282
283
284
285
286
287
288
289static void end_compressed_bio_read(struct bio *bio)
290{
291 struct compressed_bio *cb = bio->bi_private;
292 struct inode *inode;
293 unsigned int mirror = btrfs_bio(bio)->mirror_num;
294 int ret = 0;
295
296 if (!dec_and_test_compressed_bio(cb, bio))
297 goto out;
298
299
300
301
302
303 btrfs_bio(cb->orig_bio)->mirror_num = mirror;
304 cb->mirror_num = mirror;
305
306
307
308
309
310 if (cb->errors == 1)
311 goto csum_failed;
312
313 inode = cb->inode;
314 ret = check_compressed_csum(BTRFS_I(inode), bio,
315 bio->bi_iter.bi_sector << 9);
316 if (ret)
317 goto csum_failed;
318
319
320
321
322 ret = btrfs_decompress_bio(cb);
323
324csum_failed:
325 if (ret)
326 cb->errors = 1;
327 finish_compressed_bio_read(cb, bio);
328out:
329 bio_put(bio);
330}
331
332
333
334
335
336static noinline void end_compressed_writeback(struct inode *inode,
337 const struct compressed_bio *cb)
338{
339 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
340 unsigned long index = cb->start >> PAGE_SHIFT;
341 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
342 struct page *pages[16];
343 unsigned long nr_pages = end_index - index + 1;
344 int i;
345 int ret;
346
347 if (cb->errors)
348 mapping_set_error(inode->i_mapping, -EIO);
349
350 while (nr_pages > 0) {
351 ret = find_get_pages_contig(inode->i_mapping, index,
352 min_t(unsigned long,
353 nr_pages, ARRAY_SIZE(pages)), pages);
354 if (ret == 0) {
355 nr_pages -= 1;
356 index += 1;
357 continue;
358 }
359 for (i = 0; i < ret; i++) {
360 if (cb->errors)
361 SetPageError(pages[i]);
362 btrfs_page_clamp_clear_writeback(fs_info, pages[i],
363 cb->start, cb->len);
364 put_page(pages[i]);
365 }
366 nr_pages -= ret;
367 index += ret;
368 }
369
370}
371
372static void finish_compressed_bio_write(struct compressed_bio *cb)
373{
374 struct inode *inode = cb->inode;
375 unsigned int index;
376
377
378
379
380
381 btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
382 cb->start, cb->start + cb->len - 1,
383 !cb->errors);
384
385 end_compressed_writeback(inode, cb);
386
387
388
389
390
391
392 for (index = 0; index < cb->nr_pages; index++) {
393 struct page *page = cb->compressed_pages[index];
394
395 page->mapping = NULL;
396 put_page(page);
397 }
398
399
400 kfree(cb->compressed_pages);
401 kfree(cb);
402}
403
404
405
406
407
408
409
410
411static void end_compressed_bio_write(struct bio *bio)
412{
413 struct compressed_bio *cb = bio->bi_private;
414
415 if (!dec_and_test_compressed_bio(cb, bio))
416 goto out;
417
418 btrfs_record_physical_zoned(cb->inode, cb->start, bio);
419
420 finish_compressed_bio_write(cb);
421out:
422 bio_put(bio);
423}
424
425static blk_status_t submit_compressed_bio(struct btrfs_fs_info *fs_info,
426 struct compressed_bio *cb,
427 struct bio *bio, int mirror_num)
428{
429 blk_status_t ret;
430
431 ASSERT(bio->bi_iter.bi_size);
432 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
433 if (ret)
434 return ret;
435 ret = btrfs_map_bio(fs_info, bio, mirror_num);
436 return ret;
437}
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_bytenr,
457 unsigned int opf, bio_end_io_t endio_func,
458 u64 *next_stripe_start)
459{
460 struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
461 struct btrfs_io_geometry geom;
462 struct extent_map *em;
463 struct bio *bio;
464 int ret;
465
466 bio = btrfs_bio_alloc(BIO_MAX_VECS);
467
468 bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
469 bio->bi_opf = opf;
470 bio->bi_private = cb;
471 bio->bi_end_io = endio_func;
472
473 em = btrfs_get_chunk_map(fs_info, disk_bytenr, fs_info->sectorsize);
474 if (IS_ERR(em)) {
475 bio_put(bio);
476 return ERR_CAST(em);
477 }
478
479 if (bio_op(bio) == REQ_OP_ZONE_APPEND)
480 bio_set_dev(bio, em->map_lookup->stripes[0].dev->bdev);
481
482 ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio), disk_bytenr, &geom);
483 free_extent_map(em);
484 if (ret < 0) {
485 bio_put(bio);
486 return ERR_PTR(ret);
487 }
488 *next_stripe_start = disk_bytenr + geom.len;
489
490 return bio;
491}
492
493
494
495
496
497
498
499
500
501
502blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
503 unsigned int len, u64 disk_start,
504 unsigned int compressed_len,
505 struct page **compressed_pages,
506 unsigned int nr_pages,
507 unsigned int write_flags,
508 struct cgroup_subsys_state *blkcg_css)
509{
510 struct btrfs_fs_info *fs_info = inode->root->fs_info;
511 struct bio *bio = NULL;
512 struct compressed_bio *cb;
513 u64 cur_disk_bytenr = disk_start;
514 u64 next_stripe_start;
515 blk_status_t ret;
516 int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
517 const bool use_append = btrfs_use_zone_append(inode, disk_start);
518 const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
519
520 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
521 IS_ALIGNED(len, fs_info->sectorsize));
522 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
523 if (!cb)
524 return BLK_STS_RESOURCE;
525 refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits);
526 cb->errors = 0;
527 cb->inode = &inode->vfs_inode;
528 cb->start = start;
529 cb->len = len;
530 cb->mirror_num = 0;
531 cb->compressed_pages = compressed_pages;
532 cb->compressed_len = compressed_len;
533 cb->orig_bio = NULL;
534 cb->nr_pages = nr_pages;
535
536 while (cur_disk_bytenr < disk_start + compressed_len) {
537 u64 offset = cur_disk_bytenr - disk_start;
538 unsigned int index = offset >> PAGE_SHIFT;
539 unsigned int real_size;
540 unsigned int added;
541 struct page *page = compressed_pages[index];
542 bool submit = false;
543
544
545 if (!bio) {
546 bio = alloc_compressed_bio(cb, cur_disk_bytenr,
547 bio_op | write_flags, end_compressed_bio_write,
548 &next_stripe_start);
549 if (IS_ERR(bio)) {
550 ret = errno_to_blk_status(PTR_ERR(bio));
551 bio = NULL;
552 goto finish_cb;
553 }
554 }
555
556
557
558
559 ASSERT(cur_disk_bytenr != next_stripe_start);
560
561
562
563
564
565
566
567 real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_bytenr);
568 real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset));
569 real_size = min_t(u64, real_size, compressed_len - offset);
570 ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));
571
572 if (use_append)
573 added = bio_add_zone_append_page(bio, page, real_size,
574 offset_in_page(offset));
575 else
576 added = bio_add_page(bio, page, real_size,
577 offset_in_page(offset));
578
579 if (added == 0)
580 submit = true;
581
582 cur_disk_bytenr += added;
583
584 if (cur_disk_bytenr == next_stripe_start)
585 submit = true;
586
587
588 if (cur_disk_bytenr == disk_start + compressed_len)
589 submit = true;
590
591 if (submit) {
592 if (!skip_sum) {
593 ret = btrfs_csum_one_bio(inode, bio, start, 1);
594 if (ret)
595 goto finish_cb;
596 }
597
598 ret = submit_compressed_bio(fs_info, cb, bio, 0);
599 if (ret)
600 goto finish_cb;
601 bio = NULL;
602 }
603 cond_resched();
604 }
605 if (blkcg_css)
606 kthread_associate_blkcg(NULL);
607
608 return 0;
609
610finish_cb:
611 if (bio) {
612 bio->bi_status = ret;
613 bio_endio(bio);
614 }
615
616 if (cur_disk_bytenr == disk_start + compressed_len)
617 return ret;
618
619 wait_var_event(cb, refcount_read(&cb->pending_sectors) ==
620 (disk_start + compressed_len - cur_disk_bytenr) >>
621 fs_info->sectorsize_bits);
622
623
624
625
626 ASSERT(refcount_read(&cb->pending_sectors));
627
628 finish_compressed_bio_write(cb);
629 return ret;
630}
631
632static u64 bio_end_offset(struct bio *bio)
633{
634 struct bio_vec *last = bio_last_bvec_all(bio);
635
636 return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
637}
638
639
640
641
642
643
644
645
646
647
648
649
650static noinline int add_ra_bio_pages(struct inode *inode,
651 u64 compressed_end,
652 struct compressed_bio *cb)
653{
654 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
655 unsigned long end_index;
656 u64 cur = bio_end_offset(cb->orig_bio);
657 u64 isize = i_size_read(inode);
658 int ret;
659 struct page *page;
660 struct extent_map *em;
661 struct address_space *mapping = inode->i_mapping;
662 struct extent_map_tree *em_tree;
663 struct extent_io_tree *tree;
664 int sectors_missed = 0;
665
666 em_tree = &BTRFS_I(inode)->extent_tree;
667 tree = &BTRFS_I(inode)->io_tree;
668
669 if (isize == 0)
670 return 0;
671
672
673
674
675
676
677
678
679 if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE)
680 return 0;
681
682 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
683
684 while (cur < compressed_end) {
685 u64 page_end;
686 u64 pg_index = cur >> PAGE_SHIFT;
687 u32 add_size;
688
689 if (pg_index > end_index)
690 break;
691
692 page = xa_load(&mapping->i_pages, pg_index);
693 if (page && !xa_is_value(page)) {
694 sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >>
695 fs_info->sectorsize_bits;
696
697
698 if (sectors_missed > 4)
699 break;
700
701
702
703
704
705 cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
706 continue;
707 }
708
709 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
710 ~__GFP_FS));
711 if (!page)
712 break;
713
714 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
715 put_page(page);
716
717 cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
718 continue;
719 }
720
721 ret = set_page_extent_mapped(page);
722 if (ret < 0) {
723 unlock_page(page);
724 put_page(page);
725 break;
726 }
727
728 page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1;
729 lock_extent(tree, cur, page_end);
730 read_lock(&em_tree->lock);
731 em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
732 read_unlock(&em_tree->lock);
733
734
735
736
737
738
739 if (!em || cur < em->start ||
740 (cur + fs_info->sectorsize > extent_map_end(em)) ||
741 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
742 free_extent_map(em);
743 unlock_extent(tree, cur, page_end);
744 unlock_page(page);
745 put_page(page);
746 break;
747 }
748 free_extent_map(em);
749
750 if (page->index == end_index) {
751 size_t zero_offset = offset_in_page(isize);
752
753 if (zero_offset) {
754 int zeros;
755 zeros = PAGE_SIZE - zero_offset;
756 memzero_page(page, zero_offset, zeros);
757 flush_dcache_page(page);
758 }
759 }
760
761 add_size = min(em->start + em->len, page_end + 1) - cur;
762 ret = bio_add_page(cb->orig_bio, page, add_size, offset_in_page(cur));
763 if (ret != add_size) {
764 unlock_extent(tree, cur, page_end);
765 unlock_page(page);
766 put_page(page);
767 break;
768 }
769
770
771
772
773
774 if (fs_info->sectorsize < PAGE_SIZE)
775 btrfs_subpage_start_reader(fs_info, page, cur, add_size);
776 put_page(page);
777 cur += add_size;
778 }
779 return 0;
780}
781
782
783
784
785
786
787
788
789
790
791
792
793blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
794 int mirror_num, unsigned long bio_flags)
795{
796 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
797 struct extent_map_tree *em_tree;
798 struct compressed_bio *cb;
799 unsigned int compressed_len;
800 unsigned int nr_pages;
801 unsigned int pg_index;
802 struct bio *comp_bio = NULL;
803 const u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
804 u64 cur_disk_byte = disk_bytenr;
805 u64 next_stripe_start;
806 u64 file_offset;
807 u64 em_len;
808 u64 em_start;
809 struct extent_map *em;
810 blk_status_t ret = BLK_STS_RESOURCE;
811 int faili = 0;
812 u8 *sums;
813
814 em_tree = &BTRFS_I(inode)->extent_tree;
815
816 file_offset = bio_first_bvec_all(bio)->bv_offset +
817 page_offset(bio_first_page_all(bio));
818
819
820 read_lock(&em_tree->lock);
821 em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
822 read_unlock(&em_tree->lock);
823 if (!em)
824 return BLK_STS_IOERR;
825
826 ASSERT(em->compress_type != BTRFS_COMPRESS_NONE);
827 compressed_len = em->block_len;
828 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
829 if (!cb)
830 goto out;
831
832 refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits);
833 cb->errors = 0;
834 cb->inode = inode;
835 cb->mirror_num = mirror_num;
836 sums = cb->sums;
837
838 cb->start = em->orig_start;
839 em_len = em->len;
840 em_start = em->start;
841
842 free_extent_map(em);
843 em = NULL;
844
845 cb->len = bio->bi_iter.bi_size;
846 cb->compressed_len = compressed_len;
847 cb->compress_type = extent_compress_type(bio_flags);
848 cb->orig_bio = bio;
849
850 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
851 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
852 GFP_NOFS);
853 if (!cb->compressed_pages)
854 goto fail1;
855
856 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
857 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS);
858 if (!cb->compressed_pages[pg_index]) {
859 faili = pg_index - 1;
860 ret = BLK_STS_RESOURCE;
861 goto fail2;
862 }
863 }
864 faili = nr_pages - 1;
865 cb->nr_pages = nr_pages;
866
867 add_ra_bio_pages(inode, em_start + em_len, cb);
868
869
870 cb->len = bio->bi_iter.bi_size;
871
872 while (cur_disk_byte < disk_bytenr + compressed_len) {
873 u64 offset = cur_disk_byte - disk_bytenr;
874 unsigned int index = offset >> PAGE_SHIFT;
875 unsigned int real_size;
876 unsigned int added;
877 struct page *page = cb->compressed_pages[index];
878 bool submit = false;
879
880
881 if (!comp_bio) {
882 comp_bio = alloc_compressed_bio(cb, cur_disk_byte,
883 REQ_OP_READ, end_compressed_bio_read,
884 &next_stripe_start);
885 if (IS_ERR(comp_bio)) {
886 ret = errno_to_blk_status(PTR_ERR(comp_bio));
887 comp_bio = NULL;
888 goto finish_cb;
889 }
890 }
891
892
893
894
895 ASSERT(cur_disk_byte != next_stripe_start);
896
897
898
899
900
901
902 real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_byte);
903 real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset));
904 real_size = min_t(u64, real_size, compressed_len - offset);
905 ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));
906
907 added = bio_add_page(comp_bio, page, real_size, offset_in_page(offset));
908
909
910
911
912 ASSERT(added == real_size);
913 cur_disk_byte += added;
914
915
916 if (cur_disk_byte == next_stripe_start)
917 submit = true;
918
919
920 if (cur_disk_byte == disk_bytenr + compressed_len)
921 submit = true;
922
923 if (submit) {
924 unsigned int nr_sectors;
925
926 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
927 if (ret)
928 goto finish_cb;
929
930 nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
931 fs_info->sectorsize);
932 sums += fs_info->csum_size * nr_sectors;
933
934 ret = submit_compressed_bio(fs_info, cb, comp_bio, mirror_num);
935 if (ret)
936 goto finish_cb;
937 comp_bio = NULL;
938 }
939 }
940 return 0;
941
942fail2:
943 while (faili >= 0) {
944 __free_page(cb->compressed_pages[faili]);
945 faili--;
946 }
947
948 kfree(cb->compressed_pages);
949fail1:
950 kfree(cb);
951out:
952 free_extent_map(em);
953 return ret;
954finish_cb:
955 if (comp_bio) {
956 comp_bio->bi_status = ret;
957 bio_endio(comp_bio);
958 }
959
960 if (cur_disk_byte == disk_bytenr + compressed_len)
961 return ret;
962
963 wait_var_event(cb, refcount_read(&cb->pending_sectors) ==
964 (disk_bytenr + compressed_len - cur_disk_byte) >>
965 fs_info->sectorsize_bits);
966
967
968
969
970 ASSERT(refcount_read(&cb->pending_sectors));
971
972 finish_compressed_bio_read(cb, NULL);
973 return ret;
974}
975
976
977
978
979
980
981
982
983#define SAMPLING_READ_SIZE (16)
984#define SAMPLING_INTERVAL (256)
985
986
987
988
989
990
991#define BUCKET_SIZE (256)
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005#define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
1006 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
1007
1008struct bucket_item {
1009 u32 count;
1010};
1011
1012struct heuristic_ws {
1013
1014 u8 *sample;
1015 u32 sample_size;
1016
1017 struct bucket_item *bucket;
1018
1019 struct bucket_item *bucket_b;
1020 struct list_head list;
1021};
1022
1023static struct workspace_manager heuristic_wsm;
1024
1025static void free_heuristic_ws(struct list_head *ws)
1026{
1027 struct heuristic_ws *workspace;
1028
1029 workspace = list_entry(ws, struct heuristic_ws, list);
1030
1031 kvfree(workspace->sample);
1032 kfree(workspace->bucket);
1033 kfree(workspace->bucket_b);
1034 kfree(workspace);
1035}
1036
1037static struct list_head *alloc_heuristic_ws(unsigned int level)
1038{
1039 struct heuristic_ws *ws;
1040
1041 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
1042 if (!ws)
1043 return ERR_PTR(-ENOMEM);
1044
1045 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
1046 if (!ws->sample)
1047 goto fail;
1048
1049 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
1050 if (!ws->bucket)
1051 goto fail;
1052
1053 ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
1054 if (!ws->bucket_b)
1055 goto fail;
1056
1057 INIT_LIST_HEAD(&ws->list);
1058 return &ws->list;
1059fail:
1060 free_heuristic_ws(&ws->list);
1061 return ERR_PTR(-ENOMEM);
1062}
1063
1064const struct btrfs_compress_op btrfs_heuristic_compress = {
1065 .workspace_manager = &heuristic_wsm,
1066};
1067
1068static const struct btrfs_compress_op * const btrfs_compress_op[] = {
1069
1070 &btrfs_heuristic_compress,
1071 &btrfs_zlib_compress,
1072 &btrfs_lzo_compress,
1073 &btrfs_zstd_compress,
1074};
1075
1076static struct list_head *alloc_workspace(int type, unsigned int level)
1077{
1078 switch (type) {
1079 case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
1080 case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
1081 case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level);
1082 case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
1083 default:
1084
1085
1086
1087
1088 BUG();
1089 }
1090}
1091
1092static void free_workspace(int type, struct list_head *ws)
1093{
1094 switch (type) {
1095 case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
1096 case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
1097 case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws);
1098 case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
1099 default:
1100
1101
1102
1103
1104 BUG();
1105 }
1106}
1107
1108static void btrfs_init_workspace_manager(int type)
1109{
1110 struct workspace_manager *wsm;
1111 struct list_head *workspace;
1112
1113 wsm = btrfs_compress_op[type]->workspace_manager;
1114 INIT_LIST_HEAD(&wsm->idle_ws);
1115 spin_lock_init(&wsm->ws_lock);
1116 atomic_set(&wsm->total_ws, 0);
1117 init_waitqueue_head(&wsm->ws_wait);
1118
1119
1120
1121
1122
1123 workspace = alloc_workspace(type, 0);
1124 if (IS_ERR(workspace)) {
1125 pr_warn(
1126 "BTRFS: cannot preallocate compression workspace, will try later\n");
1127 } else {
1128 atomic_set(&wsm->total_ws, 1);
1129 wsm->free_ws = 1;
1130 list_add(workspace, &wsm->idle_ws);
1131 }
1132}
1133
1134static void btrfs_cleanup_workspace_manager(int type)
1135{
1136 struct workspace_manager *wsman;
1137 struct list_head *ws;
1138
1139 wsman = btrfs_compress_op[type]->workspace_manager;
1140 while (!list_empty(&wsman->idle_ws)) {
1141 ws = wsman->idle_ws.next;
1142 list_del(ws);
1143 free_workspace(type, ws);
1144 atomic_dec(&wsman->total_ws);
1145 }
1146}
1147
1148
1149
1150
1151
1152
1153
1154struct list_head *btrfs_get_workspace(int type, unsigned int level)
1155{
1156 struct workspace_manager *wsm;
1157 struct list_head *workspace;
1158 int cpus = num_online_cpus();
1159 unsigned nofs_flag;
1160 struct list_head *idle_ws;
1161 spinlock_t *ws_lock;
1162 atomic_t *total_ws;
1163 wait_queue_head_t *ws_wait;
1164 int *free_ws;
1165
1166 wsm = btrfs_compress_op[type]->workspace_manager;
1167 idle_ws = &wsm->idle_ws;
1168 ws_lock = &wsm->ws_lock;
1169 total_ws = &wsm->total_ws;
1170 ws_wait = &wsm->ws_wait;
1171 free_ws = &wsm->free_ws;
1172
1173again:
1174 spin_lock(ws_lock);
1175 if (!list_empty(idle_ws)) {
1176 workspace = idle_ws->next;
1177 list_del(workspace);
1178 (*free_ws)--;
1179 spin_unlock(ws_lock);
1180 return workspace;
1181
1182 }
1183 if (atomic_read(total_ws) > cpus) {
1184 DEFINE_WAIT(wait);
1185
1186 spin_unlock(ws_lock);
1187 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1188 if (atomic_read(total_ws) > cpus && !*free_ws)
1189 schedule();
1190 finish_wait(ws_wait, &wait);
1191 goto again;
1192 }
1193 atomic_inc(total_ws);
1194 spin_unlock(ws_lock);
1195
1196
1197
1198
1199
1200
1201 nofs_flag = memalloc_nofs_save();
1202 workspace = alloc_workspace(type, level);
1203 memalloc_nofs_restore(nofs_flag);
1204
1205 if (IS_ERR(workspace)) {
1206 atomic_dec(total_ws);
1207 wake_up(ws_wait);
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219 if (atomic_read(total_ws) == 0) {
1220 static DEFINE_RATELIMIT_STATE(_rs,
1221 60 * HZ,
1222 1);
1223
1224 if (__ratelimit(&_rs)) {
1225 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
1226 }
1227 }
1228 goto again;
1229 }
1230 return workspace;
1231}
1232
1233static struct list_head *get_workspace(int type, int level)
1234{
1235 switch (type) {
1236 case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
1237 case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1238 case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level);
1239 case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
1240 default:
1241
1242
1243
1244
1245 BUG();
1246 }
1247}
1248
1249
1250
1251
1252
1253void btrfs_put_workspace(int type, struct list_head *ws)
1254{
1255 struct workspace_manager *wsm;
1256 struct list_head *idle_ws;
1257 spinlock_t *ws_lock;
1258 atomic_t *total_ws;
1259 wait_queue_head_t *ws_wait;
1260 int *free_ws;
1261
1262 wsm = btrfs_compress_op[type]->workspace_manager;
1263 idle_ws = &wsm->idle_ws;
1264 ws_lock = &wsm->ws_lock;
1265 total_ws = &wsm->total_ws;
1266 ws_wait = &wsm->ws_wait;
1267 free_ws = &wsm->free_ws;
1268
1269 spin_lock(ws_lock);
1270 if (*free_ws <= num_online_cpus()) {
1271 list_add(ws, idle_ws);
1272 (*free_ws)++;
1273 spin_unlock(ws_lock);
1274 goto wake;
1275 }
1276 spin_unlock(ws_lock);
1277
1278 free_workspace(type, ws);
1279 atomic_dec(total_ws);
1280wake:
1281 cond_wake_up(ws_wait);
1282}
1283
1284static void put_workspace(int type, struct list_head *ws)
1285{
1286 switch (type) {
1287 case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
1288 case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
1289 case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws);
1290 case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
1291 default:
1292
1293
1294
1295
1296 BUG();
1297 }
1298}
1299
1300
1301
1302
1303
1304static unsigned int btrfs_compress_set_level(int type, unsigned level)
1305{
1306 const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1307
1308 if (level == 0)
1309 level = ops->default_level;
1310 else
1311 level = min(level, ops->max_level);
1312
1313 return level;
1314}
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1337 u64 start, struct page **pages,
1338 unsigned long *out_pages,
1339 unsigned long *total_in,
1340 unsigned long *total_out)
1341{
1342 int type = btrfs_compress_type(type_level);
1343 int level = btrfs_compress_level(type_level);
1344 struct list_head *workspace;
1345 int ret;
1346
1347 level = btrfs_compress_set_level(type, level);
1348 workspace = get_workspace(type, level);
1349 ret = compression_compress_pages(type, workspace, mapping, start, pages,
1350 out_pages, total_in, total_out);
1351 put_workspace(type, workspace);
1352 return ret;
1353}
1354
1355static int btrfs_decompress_bio(struct compressed_bio *cb)
1356{
1357 struct list_head *workspace;
1358 int ret;
1359 int type = cb->compress_type;
1360
1361 workspace = get_workspace(type, 0);
1362 ret = compression_decompress_bio(type, workspace, cb);
1363 put_workspace(type, workspace);
1364
1365 return ret;
1366}
1367
1368
1369
1370
1371
1372
1373int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1374 unsigned long start_byte, size_t srclen, size_t destlen)
1375{
1376 struct list_head *workspace;
1377 int ret;
1378
1379 workspace = get_workspace(type, 0);
1380 ret = compression_decompress(type, workspace, data_in, dest_page,
1381 start_byte, srclen, destlen);
1382 put_workspace(type, workspace);
1383
1384 return ret;
1385}
1386
1387void __init btrfs_init_compress(void)
1388{
1389 btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1390 btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1391 btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1392 zstd_init_workspace_manager();
1393}
1394
1395void __cold btrfs_exit_compress(void)
1396{
1397 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1398 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1399 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1400 zstd_cleanup_workspace_manager();
1401}
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
1434 struct compressed_bio *cb, u32 decompressed)
1435{
1436 struct bio *orig_bio = cb->orig_bio;
1437
1438 u32 cur_offset;
1439
1440 cur_offset = decompressed;
1441
1442 while (cur_offset < decompressed + buf_len) {
1443 struct bio_vec bvec;
1444 size_t copy_len;
1445 u32 copy_start;
1446
1447 u32 bvec_offset;
1448
1449 bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
1450
1451
1452
1453
1454 bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start;
1455
1456
1457 if (decompressed + buf_len <= bvec_offset)
1458 return 1;
1459
1460 copy_start = max(cur_offset, bvec_offset);
1461 copy_len = min(bvec_offset + bvec.bv_len,
1462 decompressed + buf_len) - copy_start;
1463 ASSERT(copy_len);
1464
1465
1466
1467
1468
1469 ASSERT(copy_start - decompressed < buf_len);
1470 memcpy_to_page(bvec.bv_page, bvec.bv_offset,
1471 buf + copy_start - decompressed, copy_len);
1472 flush_dcache_page(bvec.bv_page);
1473 cur_offset += copy_len;
1474
1475 bio_advance(orig_bio, copy_len);
1476
1477 if (!orig_bio->bi_iter.bi_size)
1478 return 0;
1479 }
1480 return 1;
1481}
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500#define ENTROPY_LVL_ACEPTABLE (65)
1501#define ENTROPY_LVL_HIGH (80)
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513static inline u32 ilog2_w(u64 n)
1514{
1515 return ilog2(n * n * n * n);
1516}
1517
1518static u32 shannon_entropy(struct heuristic_ws *ws)
1519{
1520 const u32 entropy_max = 8 * ilog2_w(2);
1521 u32 entropy_sum = 0;
1522 u32 p, p_base, sz_base;
1523 u32 i;
1524
1525 sz_base = ilog2_w(ws->sample_size);
1526 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1527 p = ws->bucket[i].count;
1528 p_base = ilog2_w(p);
1529 entropy_sum += p * (sz_base - p_base);
1530 }
1531
1532 entropy_sum /= ws->sample_size;
1533 return entropy_sum * 100 / entropy_max;
1534}
1535
1536#define RADIX_BASE 4U
1537#define COUNTERS_SIZE (1U << RADIX_BASE)
1538
1539static u8 get4bits(u64 num, int shift) {
1540 u8 low4bits;
1541
1542 num >>= shift;
1543
1544 low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1545 return low4bits;
1546}
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1558 int num)
1559{
1560 u64 max_num;
1561 u64 buf_num;
1562 u32 counters[COUNTERS_SIZE];
1563 u32 new_addr;
1564 u32 addr;
1565 int bitlen;
1566 int shift;
1567 int i;
1568
1569
1570
1571
1572
1573 max_num = array[0].count;
1574 for (i = 1; i < num; i++) {
1575 buf_num = array[i].count;
1576 if (buf_num > max_num)
1577 max_num = buf_num;
1578 }
1579
1580 buf_num = ilog2(max_num);
1581 bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1582
1583 shift = 0;
1584 while (shift < bitlen) {
1585 memset(counters, 0, sizeof(counters));
1586
1587 for (i = 0; i < num; i++) {
1588 buf_num = array[i].count;
1589 addr = get4bits(buf_num, shift);
1590 counters[addr]++;
1591 }
1592
1593 for (i = 1; i < COUNTERS_SIZE; i++)
1594 counters[i] += counters[i - 1];
1595
1596 for (i = num - 1; i >= 0; i--) {
1597 buf_num = array[i].count;
1598 addr = get4bits(buf_num, shift);
1599 counters[addr]--;
1600 new_addr = counters[addr];
1601 array_buf[new_addr] = array[i];
1602 }
1603
1604 shift += RADIX_BASE;
1605
1606
1607
1608
1609
1610
1611
1612 memset(counters, 0, sizeof(counters));
1613
1614 for (i = 0; i < num; i ++) {
1615 buf_num = array_buf[i].count;
1616 addr = get4bits(buf_num, shift);
1617 counters[addr]++;
1618 }
1619
1620 for (i = 1; i < COUNTERS_SIZE; i++)
1621 counters[i] += counters[i - 1];
1622
1623 for (i = num - 1; i >= 0; i--) {
1624 buf_num = array_buf[i].count;
1625 addr = get4bits(buf_num, shift);
1626 counters[addr]--;
1627 new_addr = counters[addr];
1628 array[new_addr] = array_buf[i];
1629 }
1630
1631 shift += RADIX_BASE;
1632 }
1633}
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651#define BYTE_CORE_SET_LOW (64)
1652#define BYTE_CORE_SET_HIGH (200)
1653
1654static int byte_core_set_size(struct heuristic_ws *ws)
1655{
1656 u32 i;
1657 u32 coreset_sum = 0;
1658 const u32 core_set_threshold = ws->sample_size * 90 / 100;
1659 struct bucket_item *bucket = ws->bucket;
1660
1661
1662 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1663
1664 for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1665 coreset_sum += bucket[i].count;
1666
1667 if (coreset_sum > core_set_threshold)
1668 return i;
1669
1670 for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1671 coreset_sum += bucket[i].count;
1672 if (coreset_sum > core_set_threshold)
1673 break;
1674 }
1675
1676 return i;
1677}
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690#define BYTE_SET_THRESHOLD (64)
1691
1692static u32 byte_set_size(const struct heuristic_ws *ws)
1693{
1694 u32 i;
1695 u32 byte_set_size = 0;
1696
1697 for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1698 if (ws->bucket[i].count > 0)
1699 byte_set_size++;
1700 }
1701
1702
1703
1704
1705
1706
1707 for (; i < BUCKET_SIZE; i++) {
1708 if (ws->bucket[i].count > 0) {
1709 byte_set_size++;
1710 if (byte_set_size > BYTE_SET_THRESHOLD)
1711 return byte_set_size;
1712 }
1713 }
1714
1715 return byte_set_size;
1716}
1717
1718static bool sample_repeated_patterns(struct heuristic_ws *ws)
1719{
1720 const u32 half_of_sample = ws->sample_size / 2;
1721 const u8 *data = ws->sample;
1722
1723 return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1724}
1725
1726static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1727 struct heuristic_ws *ws)
1728{
1729 struct page *page;
1730 u64 index, index_end;
1731 u32 i, curr_sample_pos;
1732 u8 *in_data;
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743 if (end - start > BTRFS_MAX_UNCOMPRESSED)
1744 end = start + BTRFS_MAX_UNCOMPRESSED;
1745
1746 index = start >> PAGE_SHIFT;
1747 index_end = end >> PAGE_SHIFT;
1748
1749
1750 if (!IS_ALIGNED(end, PAGE_SIZE))
1751 index_end++;
1752
1753 curr_sample_pos = 0;
1754 while (index < index_end) {
1755 page = find_get_page(inode->i_mapping, index);
1756 in_data = kmap_local_page(page);
1757
1758 i = start % PAGE_SIZE;
1759 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1760
1761 if (start > end - SAMPLING_READ_SIZE)
1762 break;
1763 memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1764 SAMPLING_READ_SIZE);
1765 i += SAMPLING_INTERVAL;
1766 start += SAMPLING_INTERVAL;
1767 curr_sample_pos += SAMPLING_READ_SIZE;
1768 }
1769 kunmap_local(in_data);
1770 put_page(page);
1771
1772 index++;
1773 }
1774
1775 ws->sample_size = curr_sample_pos;
1776}
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1794{
1795 struct list_head *ws_list = get_workspace(0, 0);
1796 struct heuristic_ws *ws;
1797 u32 i;
1798 u8 byte;
1799 int ret = 0;
1800
1801 ws = list_entry(ws_list, struct heuristic_ws, list);
1802
1803 heuristic_collect_sample(inode, start, end, ws);
1804
1805 if (sample_repeated_patterns(ws)) {
1806 ret = 1;
1807 goto out;
1808 }
1809
1810 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1811
1812 for (i = 0; i < ws->sample_size; i++) {
1813 byte = ws->sample[i];
1814 ws->bucket[byte].count++;
1815 }
1816
1817 i = byte_set_size(ws);
1818 if (i < BYTE_SET_THRESHOLD) {
1819 ret = 2;
1820 goto out;
1821 }
1822
1823 i = byte_core_set_size(ws);
1824 if (i <= BYTE_CORE_SET_LOW) {
1825 ret = 3;
1826 goto out;
1827 }
1828
1829 if (i >= BYTE_CORE_SET_HIGH) {
1830 ret = 0;
1831 goto out;
1832 }
1833
1834 i = shannon_entropy(ws);
1835 if (i <= ENTROPY_LVL_ACEPTABLE) {
1836 ret = 4;
1837 goto out;
1838 }
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855 if (i < ENTROPY_LVL_HIGH) {
1856 ret = 5;
1857 goto out;
1858 } else {
1859 ret = 0;
1860 goto out;
1861 }
1862
1863out:
1864 put_workspace(0, ws_list);
1865 return ret;
1866}
1867
1868
1869
1870
1871
1872unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1873{
1874 unsigned int level = 0;
1875 int ret;
1876
1877 if (!type)
1878 return 0;
1879
1880 if (str[0] == ':') {
1881 ret = kstrtouint(str + 1, 10, &level);
1882 if (ret)
1883 level = 0;
1884 }
1885
1886 level = btrfs_compress_set_level(type, level);
1887
1888 return level;
1889}
1890