linux/fs/btrfs/compression.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2008 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/bio.h>
   8#include <linux/file.h>
   9#include <linux/fs.h>
  10#include <linux/pagemap.h>
  11#include <linux/highmem.h>
  12#include <linux/time.h>
  13#include <linux/init.h>
  14#include <linux/string.h>
  15#include <linux/backing-dev.h>
  16#include <linux/writeback.h>
  17#include <linux/slab.h>
  18#include <linux/sched/mm.h>
  19#include <linux/log2.h>
  20#include <crypto/hash.h>
  21#include "misc.h"
  22#include "ctree.h"
  23#include "disk-io.h"
  24#include "transaction.h"
  25#include "btrfs_inode.h"
  26#include "volumes.h"
  27#include "ordered-data.h"
  28#include "compression.h"
  29#include "extent_io.h"
  30#include "extent_map.h"
  31#include "zoned.h"
  32
  33static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
  34
  35const char* btrfs_compress_type2str(enum btrfs_compression_type type)
  36{
  37        switch (type) {
  38        case BTRFS_COMPRESS_ZLIB:
  39        case BTRFS_COMPRESS_LZO:
  40        case BTRFS_COMPRESS_ZSTD:
  41        case BTRFS_COMPRESS_NONE:
  42                return btrfs_compress_types[type];
  43        default:
  44                break;
  45        }
  46
  47        return NULL;
  48}
  49
  50bool btrfs_compress_is_valid_type(const char *str, size_t len)
  51{
  52        int i;
  53
  54        for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
  55                size_t comp_len = strlen(btrfs_compress_types[i]);
  56
  57                if (len < comp_len)
  58                        continue;
  59
  60                if (!strncmp(btrfs_compress_types[i], str, comp_len))
  61                        return true;
  62        }
  63        return false;
  64}
  65
  66static int compression_compress_pages(int type, struct list_head *ws,
  67               struct address_space *mapping, u64 start, struct page **pages,
  68               unsigned long *out_pages, unsigned long *total_in,
  69               unsigned long *total_out)
  70{
  71        switch (type) {
  72        case BTRFS_COMPRESS_ZLIB:
  73                return zlib_compress_pages(ws, mapping, start, pages,
  74                                out_pages, total_in, total_out);
  75        case BTRFS_COMPRESS_LZO:
  76                return lzo_compress_pages(ws, mapping, start, pages,
  77                                out_pages, total_in, total_out);
  78        case BTRFS_COMPRESS_ZSTD:
  79                return zstd_compress_pages(ws, mapping, start, pages,
  80                                out_pages, total_in, total_out);
  81        case BTRFS_COMPRESS_NONE:
  82        default:
  83                /*
  84                 * This can happen when compression races with remount setting
  85                 * it to 'no compress', while caller doesn't call
  86                 * inode_need_compress() to check if we really need to
  87                 * compress.
  88                 *
  89                 * Not a big deal, just need to inform caller that we
  90                 * haven't allocated any pages yet.
  91                 */
  92                *out_pages = 0;
  93                return -E2BIG;
  94        }
  95}
  96
  97static int compression_decompress_bio(int type, struct list_head *ws,
  98                struct compressed_bio *cb)
  99{
 100        switch (type) {
 101        case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
 102        case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
 103        case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
 104        case BTRFS_COMPRESS_NONE:
 105        default:
 106                /*
 107                 * This can't happen, the type is validated several times
 108                 * before we get here.
 109                 */
 110                BUG();
 111        }
 112}
 113
 114static int compression_decompress(int type, struct list_head *ws,
 115               unsigned char *data_in, struct page *dest_page,
 116               unsigned long start_byte, size_t srclen, size_t destlen)
 117{
 118        switch (type) {
 119        case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
 120                                                start_byte, srclen, destlen);
 121        case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_page,
 122                                                start_byte, srclen, destlen);
 123        case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
 124                                                start_byte, srclen, destlen);
 125        case BTRFS_COMPRESS_NONE:
 126        default:
 127                /*
 128                 * This can't happen, the type is validated several times
 129                 * before we get here.
 130                 */
 131                BUG();
 132        }
 133}
 134
 135static int btrfs_decompress_bio(struct compressed_bio *cb);
 136
 137static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
 138                                      unsigned long disk_size)
 139{
 140        return sizeof(struct compressed_bio) +
 141                (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size;
 142}
 143
 144static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
 145                                 u64 disk_start)
 146{
 147        struct btrfs_fs_info *fs_info = inode->root->fs_info;
 148        SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
 149        const u32 csum_size = fs_info->csum_size;
 150        const u32 sectorsize = fs_info->sectorsize;
 151        struct page *page;
 152        unsigned int i;
 153        char *kaddr;
 154        u8 csum[BTRFS_CSUM_SIZE];
 155        struct compressed_bio *cb = bio->bi_private;
 156        u8 *cb_sum = cb->sums;
 157
 158        if (!fs_info->csum_root || (inode->flags & BTRFS_INODE_NODATASUM))
 159                return 0;
 160
 161        shash->tfm = fs_info->csum_shash;
 162
 163        for (i = 0; i < cb->nr_pages; i++) {
 164                u32 pg_offset;
 165                u32 bytes_left = PAGE_SIZE;
 166                page = cb->compressed_pages[i];
 167
 168                /* Determine the remaining bytes inside the page first */
 169                if (i == cb->nr_pages - 1)
 170                        bytes_left = cb->compressed_len - i * PAGE_SIZE;
 171
 172                /* Hash through the page sector by sector */
 173                for (pg_offset = 0; pg_offset < bytes_left;
 174                     pg_offset += sectorsize) {
 175                        kaddr = kmap_atomic(page);
 176                        crypto_shash_digest(shash, kaddr + pg_offset,
 177                                            sectorsize, csum);
 178                        kunmap_atomic(kaddr);
 179
 180                        if (memcmp(&csum, cb_sum, csum_size) != 0) {
 181                                btrfs_print_data_csum_error(inode, disk_start,
 182                                                csum, cb_sum, cb->mirror_num);
 183                                if (btrfs_io_bio(bio)->device)
 184                                        btrfs_dev_stat_inc_and_print(
 185                                                btrfs_io_bio(bio)->device,
 186                                                BTRFS_DEV_STAT_CORRUPTION_ERRS);
 187                                return -EIO;
 188                        }
 189                        cb_sum += csum_size;
 190                        disk_start += sectorsize;
 191                }
 192        }
 193        return 0;
 194}
 195
 196/* when we finish reading compressed pages from the disk, we
 197 * decompress them and then run the bio end_io routines on the
 198 * decompressed pages (in the inode address space).
 199 *
 200 * This allows the checksumming and other IO error handling routines
 201 * to work normally
 202 *
 203 * The compressed pages are freed here, and it must be run
 204 * in process context
 205 */
 206static void end_compressed_bio_read(struct bio *bio)
 207{
 208        struct compressed_bio *cb = bio->bi_private;
 209        struct inode *inode;
 210        struct page *page;
 211        unsigned int index;
 212        unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
 213        int ret = 0;
 214
 215        if (bio->bi_status)
 216                cb->errors = 1;
 217
 218        /* if there are more bios still pending for this compressed
 219         * extent, just exit
 220         */
 221        if (!refcount_dec_and_test(&cb->pending_bios))
 222                goto out;
 223
 224        /*
 225         * Record the correct mirror_num in cb->orig_bio so that
 226         * read-repair can work properly.
 227         */
 228        btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
 229        cb->mirror_num = mirror;
 230
 231        /*
 232         * Some IO in this cb have failed, just skip checksum as there
 233         * is no way it could be correct.
 234         */
 235        if (cb->errors == 1)
 236                goto csum_failed;
 237
 238        inode = cb->inode;
 239        ret = check_compressed_csum(BTRFS_I(inode), bio,
 240                                    bio->bi_iter.bi_sector << 9);
 241        if (ret)
 242                goto csum_failed;
 243
 244        /* ok, we're the last bio for this extent, lets start
 245         * the decompression.
 246         */
 247        ret = btrfs_decompress_bio(cb);
 248
 249csum_failed:
 250        if (ret)
 251                cb->errors = 1;
 252
 253        /* release the compressed pages */
 254        index = 0;
 255        for (index = 0; index < cb->nr_pages; index++) {
 256                page = cb->compressed_pages[index];
 257                page->mapping = NULL;
 258                put_page(page);
 259        }
 260
 261        /* do io completion on the original bio */
 262        if (cb->errors) {
 263                bio_io_error(cb->orig_bio);
 264        } else {
 265                struct bio_vec *bvec;
 266                struct bvec_iter_all iter_all;
 267
 268                /*
 269                 * we have verified the checksum already, set page
 270                 * checked so the end_io handlers know about it
 271                 */
 272                ASSERT(!bio_flagged(bio, BIO_CLONED));
 273                bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
 274                        SetPageChecked(bvec->bv_page);
 275
 276                bio_endio(cb->orig_bio);
 277        }
 278
 279        /* finally free the cb struct */
 280        kfree(cb->compressed_pages);
 281        kfree(cb);
 282out:
 283        bio_put(bio);
 284}
 285
 286/*
 287 * Clear the writeback bits on all of the file
 288 * pages for a compressed write
 289 */
 290static noinline void end_compressed_writeback(struct inode *inode,
 291                                              const struct compressed_bio *cb)
 292{
 293        unsigned long index = cb->start >> PAGE_SHIFT;
 294        unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
 295        struct page *pages[16];
 296        unsigned long nr_pages = end_index - index + 1;
 297        int i;
 298        int ret;
 299
 300        if (cb->errors)
 301                mapping_set_error(inode->i_mapping, -EIO);
 302
 303        while (nr_pages > 0) {
 304                ret = find_get_pages_contig(inode->i_mapping, index,
 305                                     min_t(unsigned long,
 306                                     nr_pages, ARRAY_SIZE(pages)), pages);
 307                if (ret == 0) {
 308                        nr_pages -= 1;
 309                        index += 1;
 310                        continue;
 311                }
 312                for (i = 0; i < ret; i++) {
 313                        if (cb->errors)
 314                                SetPageError(pages[i]);
 315                        end_page_writeback(pages[i]);
 316                        put_page(pages[i]);
 317                }
 318                nr_pages -= ret;
 319                index += ret;
 320        }
 321        /* the inode may be gone now */
 322}
 323
 324/*
 325 * do the cleanup once all the compressed pages hit the disk.
 326 * This will clear writeback on the file pages and free the compressed
 327 * pages.
 328 *
 329 * This also calls the writeback end hooks for the file pages so that
 330 * metadata and checksums can be updated in the file.
 331 */
 332static void end_compressed_bio_write(struct bio *bio)
 333{
 334        struct compressed_bio *cb = bio->bi_private;
 335        struct inode *inode;
 336        struct page *page;
 337        unsigned int index;
 338
 339        if (bio->bi_status)
 340                cb->errors = 1;
 341
 342        /* if there are more bios still pending for this compressed
 343         * extent, just exit
 344         */
 345        if (!refcount_dec_and_test(&cb->pending_bios))
 346                goto out;
 347
 348        /* ok, we're the last bio for this extent, step one is to
 349         * call back into the FS and do all the end_io operations
 350         */
 351        inode = cb->inode;
 352        btrfs_record_physical_zoned(inode, cb->start, bio);
 353        btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
 354                        cb->start, cb->start + cb->len - 1,
 355                        !cb->errors);
 356
 357        end_compressed_writeback(inode, cb);
 358        /* note, our inode could be gone now */
 359
 360        /*
 361         * release the compressed pages, these came from alloc_page and
 362         * are not attached to the inode at all
 363         */
 364        index = 0;
 365        for (index = 0; index < cb->nr_pages; index++) {
 366                page = cb->compressed_pages[index];
 367                page->mapping = NULL;
 368                put_page(page);
 369        }
 370
 371        /* finally free the cb struct */
 372        kfree(cb->compressed_pages);
 373        kfree(cb);
 374out:
 375        bio_put(bio);
 376}
 377
 378/*
 379 * worker function to build and submit bios for previously compressed pages.
 380 * The corresponding pages in the inode should be marked for writeback
 381 * and the compressed pages should have a reference on them for dropping
 382 * when the IO is complete.
 383 *
 384 * This also checksums the file bytes and gets things ready for
 385 * the end io hooks.
 386 */
 387blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
 388                                 unsigned int len, u64 disk_start,
 389                                 unsigned int compressed_len,
 390                                 struct page **compressed_pages,
 391                                 unsigned int nr_pages,
 392                                 unsigned int write_flags,
 393                                 struct cgroup_subsys_state *blkcg_css)
 394{
 395        struct btrfs_fs_info *fs_info = inode->root->fs_info;
 396        struct bio *bio = NULL;
 397        struct compressed_bio *cb;
 398        unsigned long bytes_left;
 399        int pg_index = 0;
 400        struct page *page;
 401        u64 first_byte = disk_start;
 402        blk_status_t ret;
 403        int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
 404        const bool use_append = btrfs_use_zone_append(inode, disk_start);
 405        const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
 406
 407        WARN_ON(!PAGE_ALIGNED(start));
 408        cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 409        if (!cb)
 410                return BLK_STS_RESOURCE;
 411        refcount_set(&cb->pending_bios, 0);
 412        cb->errors = 0;
 413        cb->inode = &inode->vfs_inode;
 414        cb->start = start;
 415        cb->len = len;
 416        cb->mirror_num = 0;
 417        cb->compressed_pages = compressed_pages;
 418        cb->compressed_len = compressed_len;
 419        cb->orig_bio = NULL;
 420        cb->nr_pages = nr_pages;
 421
 422        bio = btrfs_bio_alloc(first_byte);
 423        bio->bi_opf = bio_op | write_flags;
 424        bio->bi_private = cb;
 425        bio->bi_end_io = end_compressed_bio_write;
 426
 427        if (use_append) {
 428                struct btrfs_device *device;
 429
 430                device = btrfs_zoned_get_device(fs_info, disk_start, PAGE_SIZE);
 431                if (IS_ERR(device)) {
 432                        kfree(cb);
 433                        bio_put(bio);
 434                        return BLK_STS_NOTSUPP;
 435                }
 436
 437                bio_set_dev(bio, device->bdev);
 438        }
 439
 440        if (blkcg_css) {
 441                bio->bi_opf |= REQ_CGROUP_PUNT;
 442                kthread_associate_blkcg(blkcg_css);
 443        }
 444        refcount_set(&cb->pending_bios, 1);
 445
 446        /* create and submit bios for the compressed pages */
 447        bytes_left = compressed_len;
 448        for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
 449                int submit = 0;
 450                int len = 0;
 451
 452                page = compressed_pages[pg_index];
 453                page->mapping = inode->vfs_inode.i_mapping;
 454                if (bio->bi_iter.bi_size)
 455                        submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
 456                                                          0);
 457
 458                /*
 459                 * Page can only be added to bio if the current bio fits in
 460                 * stripe.
 461                 */
 462                if (!submit) {
 463                        if (pg_index == 0 && use_append)
 464                                len = bio_add_zone_append_page(bio, page,
 465                                                               PAGE_SIZE, 0);
 466                        else
 467                                len = bio_add_page(bio, page, PAGE_SIZE, 0);
 468                }
 469
 470                page->mapping = NULL;
 471                if (submit || len < PAGE_SIZE) {
 472                        /*
 473                         * inc the count before we submit the bio so
 474                         * we know the end IO handler won't happen before
 475                         * we inc the count.  Otherwise, the cb might get
 476                         * freed before we're done setting it up
 477                         */
 478                        refcount_inc(&cb->pending_bios);
 479                        ret = btrfs_bio_wq_end_io(fs_info, bio,
 480                                                  BTRFS_WQ_ENDIO_DATA);
 481                        BUG_ON(ret); /* -ENOMEM */
 482
 483                        if (!skip_sum) {
 484                                ret = btrfs_csum_one_bio(inode, bio, start, 1);
 485                                BUG_ON(ret); /* -ENOMEM */
 486                        }
 487
 488                        ret = btrfs_map_bio(fs_info, bio, 0);
 489                        if (ret) {
 490                                bio->bi_status = ret;
 491                                bio_endio(bio);
 492                        }
 493
 494                        bio = btrfs_bio_alloc(first_byte);
 495                        bio->bi_opf = bio_op | write_flags;
 496                        bio->bi_private = cb;
 497                        bio->bi_end_io = end_compressed_bio_write;
 498                        if (blkcg_css)
 499                                bio->bi_opf |= REQ_CGROUP_PUNT;
 500                        /*
 501                         * Use bio_add_page() to ensure the bio has at least one
 502                         * page.
 503                         */
 504                        bio_add_page(bio, page, PAGE_SIZE, 0);
 505                }
 506                if (bytes_left < PAGE_SIZE) {
 507                        btrfs_info(fs_info,
 508                                        "bytes left %lu compress len %u nr %u",
 509                               bytes_left, cb->compressed_len, cb->nr_pages);
 510                }
 511                bytes_left -= PAGE_SIZE;
 512                first_byte += PAGE_SIZE;
 513                cond_resched();
 514        }
 515
 516        ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
 517        BUG_ON(ret); /* -ENOMEM */
 518
 519        if (!skip_sum) {
 520                ret = btrfs_csum_one_bio(inode, bio, start, 1);
 521                BUG_ON(ret); /* -ENOMEM */
 522        }
 523
 524        ret = btrfs_map_bio(fs_info, bio, 0);
 525        if (ret) {
 526                bio->bi_status = ret;
 527                bio_endio(bio);
 528        }
 529
 530        if (blkcg_css)
 531                kthread_associate_blkcg(NULL);
 532
 533        return 0;
 534}
 535
 536static u64 bio_end_offset(struct bio *bio)
 537{
 538        struct bio_vec *last = bio_last_bvec_all(bio);
 539
 540        return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
 541}
 542
 543static noinline int add_ra_bio_pages(struct inode *inode,
 544                                     u64 compressed_end,
 545                                     struct compressed_bio *cb)
 546{
 547        unsigned long end_index;
 548        unsigned long pg_index;
 549        u64 last_offset;
 550        u64 isize = i_size_read(inode);
 551        int ret;
 552        struct page *page;
 553        unsigned long nr_pages = 0;
 554        struct extent_map *em;
 555        struct address_space *mapping = inode->i_mapping;
 556        struct extent_map_tree *em_tree;
 557        struct extent_io_tree *tree;
 558        u64 end;
 559        int misses = 0;
 560
 561        last_offset = bio_end_offset(cb->orig_bio);
 562        em_tree = &BTRFS_I(inode)->extent_tree;
 563        tree = &BTRFS_I(inode)->io_tree;
 564
 565        if (isize == 0)
 566                return 0;
 567
 568        end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
 569
 570        while (last_offset < compressed_end) {
 571                pg_index = last_offset >> PAGE_SHIFT;
 572
 573                if (pg_index > end_index)
 574                        break;
 575
 576                page = xa_load(&mapping->i_pages, pg_index);
 577                if (page && !xa_is_value(page)) {
 578                        misses++;
 579                        if (misses > 4)
 580                                break;
 581                        goto next;
 582                }
 583
 584                page = __page_cache_alloc(mapping_gfp_constraint(mapping,
 585                                                                 ~__GFP_FS));
 586                if (!page)
 587                        break;
 588
 589                if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
 590                        put_page(page);
 591                        goto next;
 592                }
 593
 594                /*
 595                 * at this point, we have a locked page in the page cache
 596                 * for these bytes in the file.  But, we have to make
 597                 * sure they map to this compressed extent on disk.
 598                 */
 599                ret = set_page_extent_mapped(page);
 600                if (ret < 0) {
 601                        unlock_page(page);
 602                        put_page(page);
 603                        break;
 604                }
 605
 606                end = last_offset + PAGE_SIZE - 1;
 607                lock_extent(tree, last_offset, end);
 608                read_lock(&em_tree->lock);
 609                em = lookup_extent_mapping(em_tree, last_offset,
 610                                           PAGE_SIZE);
 611                read_unlock(&em_tree->lock);
 612
 613                if (!em || last_offset < em->start ||
 614                    (last_offset + PAGE_SIZE > extent_map_end(em)) ||
 615                    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
 616                        free_extent_map(em);
 617                        unlock_extent(tree, last_offset, end);
 618                        unlock_page(page);
 619                        put_page(page);
 620                        break;
 621                }
 622                free_extent_map(em);
 623
 624                if (page->index == end_index) {
 625                        size_t zero_offset = offset_in_page(isize);
 626
 627                        if (zero_offset) {
 628                                int zeros;
 629                                zeros = PAGE_SIZE - zero_offset;
 630                                memzero_page(page, zero_offset, zeros);
 631                                flush_dcache_page(page);
 632                        }
 633                }
 634
 635                ret = bio_add_page(cb->orig_bio, page,
 636                                   PAGE_SIZE, 0);
 637
 638                if (ret == PAGE_SIZE) {
 639                        nr_pages++;
 640                        put_page(page);
 641                } else {
 642                        unlock_extent(tree, last_offset, end);
 643                        unlock_page(page);
 644                        put_page(page);
 645                        break;
 646                }
 647next:
 648                last_offset += PAGE_SIZE;
 649        }
 650        return 0;
 651}
 652
 653/*
 654 * for a compressed read, the bio we get passed has all the inode pages
 655 * in it.  We don't actually do IO on those pages but allocate new ones
 656 * to hold the compressed pages on disk.
 657 *
 658 * bio->bi_iter.bi_sector points to the compressed extent on disk
 659 * bio->bi_io_vec points to all of the inode pages
 660 *
 661 * After the compressed pages are read, we copy the bytes into the
 662 * bio we were passed and then call the bio end_io calls
 663 */
 664blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 665                                 int mirror_num, unsigned long bio_flags)
 666{
 667        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
 668        struct extent_map_tree *em_tree;
 669        struct compressed_bio *cb;
 670        unsigned int compressed_len;
 671        unsigned int nr_pages;
 672        unsigned int pg_index;
 673        struct page *page;
 674        struct bio *comp_bio;
 675        u64 cur_disk_byte = bio->bi_iter.bi_sector << 9;
 676        u64 em_len;
 677        u64 em_start;
 678        struct extent_map *em;
 679        blk_status_t ret = BLK_STS_RESOURCE;
 680        int faili = 0;
 681        u8 *sums;
 682
 683        em_tree = &BTRFS_I(inode)->extent_tree;
 684
 685        /* we need the actual starting offset of this extent in the file */
 686        read_lock(&em_tree->lock);
 687        em = lookup_extent_mapping(em_tree,
 688                                   page_offset(bio_first_page_all(bio)),
 689                                   fs_info->sectorsize);
 690        read_unlock(&em_tree->lock);
 691        if (!em)
 692                return BLK_STS_IOERR;
 693
 694        compressed_len = em->block_len;
 695        cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
 696        if (!cb)
 697                goto out;
 698
 699        refcount_set(&cb->pending_bios, 0);
 700        cb->errors = 0;
 701        cb->inode = inode;
 702        cb->mirror_num = mirror_num;
 703        sums = cb->sums;
 704
 705        cb->start = em->orig_start;
 706        em_len = em->len;
 707        em_start = em->start;
 708
 709        free_extent_map(em);
 710        em = NULL;
 711
 712        cb->len = bio->bi_iter.bi_size;
 713        cb->compressed_len = compressed_len;
 714        cb->compress_type = extent_compress_type(bio_flags);
 715        cb->orig_bio = bio;
 716
 717        nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
 718        cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
 719                                       GFP_NOFS);
 720        if (!cb->compressed_pages)
 721                goto fail1;
 722
 723        for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 724                cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
 725                                                              __GFP_HIGHMEM);
 726                if (!cb->compressed_pages[pg_index]) {
 727                        faili = pg_index - 1;
 728                        ret = BLK_STS_RESOURCE;
 729                        goto fail2;
 730                }
 731        }
 732        faili = nr_pages - 1;
 733        cb->nr_pages = nr_pages;
 734
 735        add_ra_bio_pages(inode, em_start + em_len, cb);
 736
 737        /* include any pages we added in add_ra-bio_pages */
 738        cb->len = bio->bi_iter.bi_size;
 739
 740        comp_bio = btrfs_bio_alloc(cur_disk_byte);
 741        comp_bio->bi_opf = REQ_OP_READ;
 742        comp_bio->bi_private = cb;
 743        comp_bio->bi_end_io = end_compressed_bio_read;
 744        refcount_set(&cb->pending_bios, 1);
 745
 746        for (pg_index = 0; pg_index < nr_pages; pg_index++) {
 747                u32 pg_len = PAGE_SIZE;
 748                int submit = 0;
 749
 750                /*
 751                 * To handle subpage case, we need to make sure the bio only
 752                 * covers the range we need.
 753                 *
 754                 * If we're at the last page, truncate the length to only cover
 755                 * the remaining part.
 756                 */
 757                if (pg_index == nr_pages - 1)
 758                        pg_len = min_t(u32, PAGE_SIZE,
 759                                        compressed_len - pg_index * PAGE_SIZE);
 760
 761                page = cb->compressed_pages[pg_index];
 762                page->mapping = inode->i_mapping;
 763                page->index = em_start >> PAGE_SHIFT;
 764
 765                if (comp_bio->bi_iter.bi_size)
 766                        submit = btrfs_bio_fits_in_stripe(page, pg_len,
 767                                                          comp_bio, 0);
 768
 769                page->mapping = NULL;
 770                if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) {
 771                        unsigned int nr_sectors;
 772
 773                        ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
 774                                                  BTRFS_WQ_ENDIO_DATA);
 775                        BUG_ON(ret); /* -ENOMEM */
 776
 777                        /*
 778                         * inc the count before we submit the bio so
 779                         * we know the end IO handler won't happen before
 780                         * we inc the count.  Otherwise, the cb might get
 781                         * freed before we're done setting it up
 782                         */
 783                        refcount_inc(&cb->pending_bios);
 784
 785                        ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
 786                        BUG_ON(ret); /* -ENOMEM */
 787
 788                        nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
 789                                                  fs_info->sectorsize);
 790                        sums += fs_info->csum_size * nr_sectors;
 791
 792                        ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
 793                        if (ret) {
 794                                comp_bio->bi_status = ret;
 795                                bio_endio(comp_bio);
 796                        }
 797
 798                        comp_bio = btrfs_bio_alloc(cur_disk_byte);
 799                        comp_bio->bi_opf = REQ_OP_READ;
 800                        comp_bio->bi_private = cb;
 801                        comp_bio->bi_end_io = end_compressed_bio_read;
 802
 803                        bio_add_page(comp_bio, page, pg_len, 0);
 804                }
 805                cur_disk_byte += pg_len;
 806        }
 807
 808        ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
 809        BUG_ON(ret); /* -ENOMEM */
 810
 811        ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
 812        BUG_ON(ret); /* -ENOMEM */
 813
 814        ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
 815        if (ret) {
 816                comp_bio->bi_status = ret;
 817                bio_endio(comp_bio);
 818        }
 819
 820        return 0;
 821
 822fail2:
 823        while (faili >= 0) {
 824                __free_page(cb->compressed_pages[faili]);
 825                faili--;
 826        }
 827
 828        kfree(cb->compressed_pages);
 829fail1:
 830        kfree(cb);
 831out:
 832        free_extent_map(em);
 833        return ret;
 834}
 835
 836/*
 837 * Heuristic uses systematic sampling to collect data from the input data
 838 * range, the logic can be tuned by the following constants:
 839 *
 840 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
 841 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
 842 */
 843#define SAMPLING_READ_SIZE      (16)
 844#define SAMPLING_INTERVAL       (256)
 845
 846/*
 847 * For statistical analysis of the input data we consider bytes that form a
 848 * Galois Field of 256 objects. Each object has an attribute count, ie. how
 849 * many times the object appeared in the sample.
 850 */
 851#define BUCKET_SIZE             (256)
 852
 853/*
 854 * The size of the sample is based on a statistical sampling rule of thumb.
 855 * The common way is to perform sampling tests as long as the number of
 856 * elements in each cell is at least 5.
 857 *
 858 * Instead of 5, we choose 32 to obtain more accurate results.
 859 * If the data contain the maximum number of symbols, which is 256, we obtain a
 860 * sample size bound by 8192.
 861 *
 862 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
 863 * from up to 512 locations.
 864 */
 865#define MAX_SAMPLE_SIZE         (BTRFS_MAX_UNCOMPRESSED *               \
 866                                 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
 867
 868struct bucket_item {
 869        u32 count;
 870};
 871
 872struct heuristic_ws {
 873        /* Partial copy of input data */
 874        u8 *sample;
 875        u32 sample_size;
 876        /* Buckets store counters for each byte value */
 877        struct bucket_item *bucket;
 878        /* Sorting buffer */
 879        struct bucket_item *bucket_b;
 880        struct list_head list;
 881};
 882
 883static struct workspace_manager heuristic_wsm;
 884
 885static void free_heuristic_ws(struct list_head *ws)
 886{
 887        struct heuristic_ws *workspace;
 888
 889        workspace = list_entry(ws, struct heuristic_ws, list);
 890
 891        kvfree(workspace->sample);
 892        kfree(workspace->bucket);
 893        kfree(workspace->bucket_b);
 894        kfree(workspace);
 895}
 896
 897static struct list_head *alloc_heuristic_ws(unsigned int level)
 898{
 899        struct heuristic_ws *ws;
 900
 901        ws = kzalloc(sizeof(*ws), GFP_KERNEL);
 902        if (!ws)
 903                return ERR_PTR(-ENOMEM);
 904
 905        ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
 906        if (!ws->sample)
 907                goto fail;
 908
 909        ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
 910        if (!ws->bucket)
 911                goto fail;
 912
 913        ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
 914        if (!ws->bucket_b)
 915                goto fail;
 916
 917        INIT_LIST_HEAD(&ws->list);
 918        return &ws->list;
 919fail:
 920        free_heuristic_ws(&ws->list);
 921        return ERR_PTR(-ENOMEM);
 922}
 923
 924const struct btrfs_compress_op btrfs_heuristic_compress = {
 925        .workspace_manager = &heuristic_wsm,
 926};
 927
 928static const struct btrfs_compress_op * const btrfs_compress_op[] = {
 929        /* The heuristic is represented as compression type 0 */
 930        &btrfs_heuristic_compress,
 931        &btrfs_zlib_compress,
 932        &btrfs_lzo_compress,
 933        &btrfs_zstd_compress,
 934};
 935
 936static struct list_head *alloc_workspace(int type, unsigned int level)
 937{
 938        switch (type) {
 939        case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
 940        case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
 941        case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace(level);
 942        case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
 943        default:
 944                /*
 945                 * This can't happen, the type is validated several times
 946                 * before we get here.
 947                 */
 948                BUG();
 949        }
 950}
 951
 952static void free_workspace(int type, struct list_head *ws)
 953{
 954        switch (type) {
 955        case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
 956        case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
 957        case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
 958        case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
 959        default:
 960                /*
 961                 * This can't happen, the type is validated several times
 962                 * before we get here.
 963                 */
 964                BUG();
 965        }
 966}
 967
 968static void btrfs_init_workspace_manager(int type)
 969{
 970        struct workspace_manager *wsm;
 971        struct list_head *workspace;
 972
 973        wsm = btrfs_compress_op[type]->workspace_manager;
 974        INIT_LIST_HEAD(&wsm->idle_ws);
 975        spin_lock_init(&wsm->ws_lock);
 976        atomic_set(&wsm->total_ws, 0);
 977        init_waitqueue_head(&wsm->ws_wait);
 978
 979        /*
 980         * Preallocate one workspace for each compression type so we can
 981         * guarantee forward progress in the worst case
 982         */
 983        workspace = alloc_workspace(type, 0);
 984        if (IS_ERR(workspace)) {
 985                pr_warn(
 986        "BTRFS: cannot preallocate compression workspace, will try later\n");
 987        } else {
 988                atomic_set(&wsm->total_ws, 1);
 989                wsm->free_ws = 1;
 990                list_add(workspace, &wsm->idle_ws);
 991        }
 992}
 993
 994static void btrfs_cleanup_workspace_manager(int type)
 995{
 996        struct workspace_manager *wsman;
 997        struct list_head *ws;
 998
 999        wsman = btrfs_compress_op[type]->workspace_manager;
1000        while (!list_empty(&wsman->idle_ws)) {
1001                ws = wsman->idle_ws.next;
1002                list_del(ws);
1003                free_workspace(type, ws);
1004                atomic_dec(&wsman->total_ws);
1005        }
1006}
1007
1008/*
1009 * This finds an available workspace or allocates a new one.
1010 * If it's not possible to allocate a new one, waits until there's one.
1011 * Preallocation makes a forward progress guarantees and we do not return
1012 * errors.
1013 */
1014struct list_head *btrfs_get_workspace(int type, unsigned int level)
1015{
1016        struct workspace_manager *wsm;
1017        struct list_head *workspace;
1018        int cpus = num_online_cpus();
1019        unsigned nofs_flag;
1020        struct list_head *idle_ws;
1021        spinlock_t *ws_lock;
1022        atomic_t *total_ws;
1023        wait_queue_head_t *ws_wait;
1024        int *free_ws;
1025
1026        wsm = btrfs_compress_op[type]->workspace_manager;
1027        idle_ws  = &wsm->idle_ws;
1028        ws_lock  = &wsm->ws_lock;
1029        total_ws = &wsm->total_ws;
1030        ws_wait  = &wsm->ws_wait;
1031        free_ws  = &wsm->free_ws;
1032
1033again:
1034        spin_lock(ws_lock);
1035        if (!list_empty(idle_ws)) {
1036                workspace = idle_ws->next;
1037                list_del(workspace);
1038                (*free_ws)--;
1039                spin_unlock(ws_lock);
1040                return workspace;
1041
1042        }
1043        if (atomic_read(total_ws) > cpus) {
1044                DEFINE_WAIT(wait);
1045
1046                spin_unlock(ws_lock);
1047                prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1048                if (atomic_read(total_ws) > cpus && !*free_ws)
1049                        schedule();
1050                finish_wait(ws_wait, &wait);
1051                goto again;
1052        }
1053        atomic_inc(total_ws);
1054        spin_unlock(ws_lock);
1055
1056        /*
1057         * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
1058         * to turn it off here because we might get called from the restricted
1059         * context of btrfs_compress_bio/btrfs_compress_pages
1060         */
1061        nofs_flag = memalloc_nofs_save();
1062        workspace = alloc_workspace(type, level);
1063        memalloc_nofs_restore(nofs_flag);
1064
1065        if (IS_ERR(workspace)) {
1066                atomic_dec(total_ws);
1067                wake_up(ws_wait);
1068
1069                /*
1070                 * Do not return the error but go back to waiting. There's a
1071                 * workspace preallocated for each type and the compression
1072                 * time is bounded so we get to a workspace eventually. This
1073                 * makes our caller's life easier.
1074                 *
1075                 * To prevent silent and low-probability deadlocks (when the
1076                 * initial preallocation fails), check if there are any
1077                 * workspaces at all.
1078                 */
1079                if (atomic_read(total_ws) == 0) {
1080                        static DEFINE_RATELIMIT_STATE(_rs,
1081                                        /* once per minute */ 60 * HZ,
1082                                        /* no burst */ 1);
1083
1084                        if (__ratelimit(&_rs)) {
1085                                pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
1086                        }
1087                }
1088                goto again;
1089        }
1090        return workspace;
1091}
1092
1093static struct list_head *get_workspace(int type, int level)
1094{
1095        switch (type) {
1096        case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
1097        case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1098        case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(type, level);
1099        case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
1100        default:
1101                /*
1102                 * This can't happen, the type is validated several times
1103                 * before we get here.
1104                 */
1105                BUG();
1106        }
1107}
1108
1109/*
1110 * put a workspace struct back on the list or free it if we have enough
1111 * idle ones sitting around
1112 */
1113void btrfs_put_workspace(int type, struct list_head *ws)
1114{
1115        struct workspace_manager *wsm;
1116        struct list_head *idle_ws;
1117        spinlock_t *ws_lock;
1118        atomic_t *total_ws;
1119        wait_queue_head_t *ws_wait;
1120        int *free_ws;
1121
1122        wsm = btrfs_compress_op[type]->workspace_manager;
1123        idle_ws  = &wsm->idle_ws;
1124        ws_lock  = &wsm->ws_lock;
1125        total_ws = &wsm->total_ws;
1126        ws_wait  = &wsm->ws_wait;
1127        free_ws  = &wsm->free_ws;
1128
1129        spin_lock(ws_lock);
1130        if (*free_ws <= num_online_cpus()) {
1131                list_add(ws, idle_ws);
1132                (*free_ws)++;
1133                spin_unlock(ws_lock);
1134                goto wake;
1135        }
1136        spin_unlock(ws_lock);
1137
1138        free_workspace(type, ws);
1139        atomic_dec(total_ws);
1140wake:
1141        cond_wake_up(ws_wait);
1142}
1143
1144static void put_workspace(int type, struct list_head *ws)
1145{
1146        switch (type) {
1147        case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
1148        case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
1149        case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(type, ws);
1150        case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
1151        default:
1152                /*
1153                 * This can't happen, the type is validated several times
1154                 * before we get here.
1155                 */
1156                BUG();
1157        }
1158}
1159
1160/*
1161 * Adjust @level according to the limits of the compression algorithm or
1162 * fallback to default
1163 */
1164static unsigned int btrfs_compress_set_level(int type, unsigned level)
1165{
1166        const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1167
1168        if (level == 0)
1169                level = ops->default_level;
1170        else
1171                level = min(level, ops->max_level);
1172
1173        return level;
1174}
1175
1176/*
1177 * Given an address space and start and length, compress the bytes into @pages
1178 * that are allocated on demand.
1179 *
1180 * @type_level is encoded algorithm and level, where level 0 means whatever
1181 * default the algorithm chooses and is opaque here;
1182 * - compression algo are 0-3
1183 * - the level are bits 4-7
1184 *
1185 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1186 * and returns number of actually allocated pages
1187 *
1188 * @total_in is used to return the number of bytes actually read.  It
1189 * may be smaller than the input length if we had to exit early because we
1190 * ran out of room in the pages array or because we cross the
1191 * max_out threshold.
1192 *
1193 * @total_out is an in/out parameter, must be set to the input length and will
1194 * be also used to return the total number of compressed bytes
1195 */
1196int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1197                         u64 start, struct page **pages,
1198                         unsigned long *out_pages,
1199                         unsigned long *total_in,
1200                         unsigned long *total_out)
1201{
1202        int type = btrfs_compress_type(type_level);
1203        int level = btrfs_compress_level(type_level);
1204        struct list_head *workspace;
1205        int ret;
1206
1207        level = btrfs_compress_set_level(type, level);
1208        workspace = get_workspace(type, level);
1209        ret = compression_compress_pages(type, workspace, mapping, start, pages,
1210                                         out_pages, total_in, total_out);
1211        put_workspace(type, workspace);
1212        return ret;
1213}
1214
1215static int btrfs_decompress_bio(struct compressed_bio *cb)
1216{
1217        struct list_head *workspace;
1218        int ret;
1219        int type = cb->compress_type;
1220
1221        workspace = get_workspace(type, 0);
1222        ret = compression_decompress_bio(type, workspace, cb);
1223        put_workspace(type, workspace);
1224
1225        return ret;
1226}
1227
1228/*
1229 * a less complex decompression routine.  Our compressed data fits in a
1230 * single page, and we want to read a single page out of it.
1231 * start_byte tells us the offset into the compressed data we're interested in
1232 */
1233int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1234                     unsigned long start_byte, size_t srclen, size_t destlen)
1235{
1236        struct list_head *workspace;
1237        int ret;
1238
1239        workspace = get_workspace(type, 0);
1240        ret = compression_decompress(type, workspace, data_in, dest_page,
1241                                     start_byte, srclen, destlen);
1242        put_workspace(type, workspace);
1243
1244        return ret;
1245}
1246
1247void __init btrfs_init_compress(void)
1248{
1249        btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1250        btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1251        btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1252        zstd_init_workspace_manager();
1253}
1254
1255void __cold btrfs_exit_compress(void)
1256{
1257        btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1258        btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1259        btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1260        zstd_cleanup_workspace_manager();
1261}
1262
1263/*
1264 * Copy uncompressed data from working buffer to pages.
1265 *
1266 * buf_start is the byte offset we're of the start of our workspace buffer.
1267 *
1268 * total_out is the last byte of the buffer
1269 */
1270int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1271                              unsigned long total_out, u64 disk_start,
1272                              struct bio *bio)
1273{
1274        unsigned long buf_offset;
1275        unsigned long current_buf_start;
1276        unsigned long start_byte;
1277        unsigned long prev_start_byte;
1278        unsigned long working_bytes = total_out - buf_start;
1279        unsigned long bytes;
1280        struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1281
1282        /*
1283         * start byte is the first byte of the page we're currently
1284         * copying into relative to the start of the compressed data.
1285         */
1286        start_byte = page_offset(bvec.bv_page) - disk_start;
1287
1288        /* we haven't yet hit data corresponding to this page */
1289        if (total_out <= start_byte)
1290                return 1;
1291
1292        /*
1293         * the start of the data we care about is offset into
1294         * the middle of our working buffer
1295         */
1296        if (total_out > start_byte && buf_start < start_byte) {
1297                buf_offset = start_byte - buf_start;
1298                working_bytes -= buf_offset;
1299        } else {
1300                buf_offset = 0;
1301        }
1302        current_buf_start = buf_start;
1303
1304        /* copy bytes from the working buffer into the pages */
1305        while (working_bytes > 0) {
1306                bytes = min_t(unsigned long, bvec.bv_len,
1307                                PAGE_SIZE - (buf_offset % PAGE_SIZE));
1308                bytes = min(bytes, working_bytes);
1309
1310                memcpy_to_page(bvec.bv_page, bvec.bv_offset, buf + buf_offset,
1311                               bytes);
1312                flush_dcache_page(bvec.bv_page);
1313
1314                buf_offset += bytes;
1315                working_bytes -= bytes;
1316                current_buf_start += bytes;
1317
1318                /* check if we need to pick another page */
1319                bio_advance(bio, bytes);
1320                if (!bio->bi_iter.bi_size)
1321                        return 0;
1322                bvec = bio_iter_iovec(bio, bio->bi_iter);
1323                prev_start_byte = start_byte;
1324                start_byte = page_offset(bvec.bv_page) - disk_start;
1325
1326                /*
1327                 * We need to make sure we're only adjusting
1328                 * our offset into compression working buffer when
1329                 * we're switching pages.  Otherwise we can incorrectly
1330                 * keep copying when we were actually done.
1331                 */
1332                if (start_byte != prev_start_byte) {
1333                        /*
1334                         * make sure our new page is covered by this
1335                         * working buffer
1336                         */
1337                        if (total_out <= start_byte)
1338                                return 1;
1339
1340                        /*
1341                         * the next page in the biovec might not be adjacent
1342                         * to the last page, but it might still be found
1343                         * inside this working buffer. bump our offset pointer
1344                         */
1345                        if (total_out > start_byte &&
1346                            current_buf_start < start_byte) {
1347                                buf_offset = start_byte - buf_start;
1348                                working_bytes = total_out - start_byte;
1349                                current_buf_start = buf_start + buf_offset;
1350                        }
1351                }
1352        }
1353
1354        return 1;
1355}
1356
1357/*
1358 * Shannon Entropy calculation
1359 *
1360 * Pure byte distribution analysis fails to determine compressibility of data.
1361 * Try calculating entropy to estimate the average minimum number of bits
1362 * needed to encode the sampled data.
1363 *
1364 * For convenience, return the percentage of needed bits, instead of amount of
1365 * bits directly.
1366 *
1367 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1368 *                          and can be compressible with high probability
1369 *
1370 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1371 *
1372 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1373 */
1374#define ENTROPY_LVL_ACEPTABLE           (65)
1375#define ENTROPY_LVL_HIGH                (80)
1376
1377/*
1378 * For increasead precision in shannon_entropy calculation,
1379 * let's do pow(n, M) to save more digits after comma:
1380 *
1381 * - maximum int bit length is 64
1382 * - ilog2(MAX_SAMPLE_SIZE)     -> 13
1383 * - 13 * 4 = 52 < 64           -> M = 4
1384 *
1385 * So use pow(n, 4).
1386 */
1387static inline u32 ilog2_w(u64 n)
1388{
1389        return ilog2(n * n * n * n);
1390}
1391
1392static u32 shannon_entropy(struct heuristic_ws *ws)
1393{
1394        const u32 entropy_max = 8 * ilog2_w(2);
1395        u32 entropy_sum = 0;
1396        u32 p, p_base, sz_base;
1397        u32 i;
1398
1399        sz_base = ilog2_w(ws->sample_size);
1400        for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1401                p = ws->bucket[i].count;
1402                p_base = ilog2_w(p);
1403                entropy_sum += p * (sz_base - p_base);
1404        }
1405
1406        entropy_sum /= ws->sample_size;
1407        return entropy_sum * 100 / entropy_max;
1408}
1409
1410#define RADIX_BASE              4U
1411#define COUNTERS_SIZE           (1U << RADIX_BASE)
1412
1413static u8 get4bits(u64 num, int shift) {
1414        u8 low4bits;
1415
1416        num >>= shift;
1417        /* Reverse order */
1418        low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1419        return low4bits;
1420}
1421
1422/*
1423 * Use 4 bits as radix base
1424 * Use 16 u32 counters for calculating new position in buf array
1425 *
1426 * @array     - array that will be sorted
1427 * @array_buf - buffer array to store sorting results
1428 *              must be equal in size to @array
1429 * @num       - array size
1430 */
1431static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1432                       int num)
1433{
1434        u64 max_num;
1435        u64 buf_num;
1436        u32 counters[COUNTERS_SIZE];
1437        u32 new_addr;
1438        u32 addr;
1439        int bitlen;
1440        int shift;
1441        int i;
1442
1443        /*
1444         * Try avoid useless loop iterations for small numbers stored in big
1445         * counters.  Example: 48 33 4 ... in 64bit array
1446         */
1447        max_num = array[0].count;
1448        for (i = 1; i < num; i++) {
1449                buf_num = array[i].count;
1450                if (buf_num > max_num)
1451                        max_num = buf_num;
1452        }
1453
1454        buf_num = ilog2(max_num);
1455        bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1456
1457        shift = 0;
1458        while (shift < bitlen) {
1459                memset(counters, 0, sizeof(counters));
1460
1461                for (i = 0; i < num; i++) {
1462                        buf_num = array[i].count;
1463                        addr = get4bits(buf_num, shift);
1464                        counters[addr]++;
1465                }
1466
1467                for (i = 1; i < COUNTERS_SIZE; i++)
1468                        counters[i] += counters[i - 1];
1469
1470                for (i = num - 1; i >= 0; i--) {
1471                        buf_num = array[i].count;
1472                        addr = get4bits(buf_num, shift);
1473                        counters[addr]--;
1474                        new_addr = counters[addr];
1475                        array_buf[new_addr] = array[i];
1476                }
1477
1478                shift += RADIX_BASE;
1479
1480                /*
1481                 * Normal radix expects to move data from a temporary array, to
1482                 * the main one.  But that requires some CPU time. Avoid that
1483                 * by doing another sort iteration to original array instead of
1484                 * memcpy()
1485                 */
1486                memset(counters, 0, sizeof(counters));
1487
1488                for (i = 0; i < num; i ++) {
1489                        buf_num = array_buf[i].count;
1490                        addr = get4bits(buf_num, shift);
1491                        counters[addr]++;
1492                }
1493
1494                for (i = 1; i < COUNTERS_SIZE; i++)
1495                        counters[i] += counters[i - 1];
1496
1497                for (i = num - 1; i >= 0; i--) {
1498                        buf_num = array_buf[i].count;
1499                        addr = get4bits(buf_num, shift);
1500                        counters[addr]--;
1501                        new_addr = counters[addr];
1502                        array[new_addr] = array_buf[i];
1503                }
1504
1505                shift += RADIX_BASE;
1506        }
1507}
1508
1509/*
1510 * Size of the core byte set - how many bytes cover 90% of the sample
1511 *
1512 * There are several types of structured binary data that use nearly all byte
1513 * values. The distribution can be uniform and counts in all buckets will be
1514 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1515 *
1516 * Other possibility is normal (Gaussian) distribution, where the data could
1517 * be potentially compressible, but we have to take a few more steps to decide
1518 * how much.
1519 *
1520 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1521 *                       compression algo can easy fix that
1522 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1523 *                       probability is not compressible
1524 */
1525#define BYTE_CORE_SET_LOW               (64)
1526#define BYTE_CORE_SET_HIGH              (200)
1527
1528static int byte_core_set_size(struct heuristic_ws *ws)
1529{
1530        u32 i;
1531        u32 coreset_sum = 0;
1532        const u32 core_set_threshold = ws->sample_size * 90 / 100;
1533        struct bucket_item *bucket = ws->bucket;
1534
1535        /* Sort in reverse order */
1536        radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1537
1538        for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1539                coreset_sum += bucket[i].count;
1540
1541        if (coreset_sum > core_set_threshold)
1542                return i;
1543
1544        for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1545                coreset_sum += bucket[i].count;
1546                if (coreset_sum > core_set_threshold)
1547                        break;
1548        }
1549
1550        return i;
1551}
1552
1553/*
1554 * Count byte values in buckets.
1555 * This heuristic can detect textual data (configs, xml, json, html, etc).
1556 * Because in most text-like data byte set is restricted to limited number of
1557 * possible characters, and that restriction in most cases makes data easy to
1558 * compress.
1559 *
1560 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1561 *      less - compressible
1562 *      more - need additional analysis
1563 */
1564#define BYTE_SET_THRESHOLD              (64)
1565
1566static u32 byte_set_size(const struct heuristic_ws *ws)
1567{
1568        u32 i;
1569        u32 byte_set_size = 0;
1570
1571        for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1572                if (ws->bucket[i].count > 0)
1573                        byte_set_size++;
1574        }
1575
1576        /*
1577         * Continue collecting count of byte values in buckets.  If the byte
1578         * set size is bigger then the threshold, it's pointless to continue,
1579         * the detection technique would fail for this type of data.
1580         */
1581        for (; i < BUCKET_SIZE; i++) {
1582                if (ws->bucket[i].count > 0) {
1583                        byte_set_size++;
1584                        if (byte_set_size > BYTE_SET_THRESHOLD)
1585                                return byte_set_size;
1586                }
1587        }
1588
1589        return byte_set_size;
1590}
1591
1592static bool sample_repeated_patterns(struct heuristic_ws *ws)
1593{
1594        const u32 half_of_sample = ws->sample_size / 2;
1595        const u8 *data = ws->sample;
1596
1597        return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1598}
1599
1600static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1601                                     struct heuristic_ws *ws)
1602{
1603        struct page *page;
1604        u64 index, index_end;
1605        u32 i, curr_sample_pos;
1606        u8 *in_data;
1607
1608        /*
1609         * Compression handles the input data by chunks of 128KiB
1610         * (defined by BTRFS_MAX_UNCOMPRESSED)
1611         *
1612         * We do the same for the heuristic and loop over the whole range.
1613         *
1614         * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1615         * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1616         */
1617        if (end - start > BTRFS_MAX_UNCOMPRESSED)
1618                end = start + BTRFS_MAX_UNCOMPRESSED;
1619
1620        index = start >> PAGE_SHIFT;
1621        index_end = end >> PAGE_SHIFT;
1622
1623        /* Don't miss unaligned end */
1624        if (!IS_ALIGNED(end, PAGE_SIZE))
1625                index_end++;
1626
1627        curr_sample_pos = 0;
1628        while (index < index_end) {
1629                page = find_get_page(inode->i_mapping, index);
1630                in_data = kmap_local_page(page);
1631                /* Handle case where the start is not aligned to PAGE_SIZE */
1632                i = start % PAGE_SIZE;
1633                while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1634                        /* Don't sample any garbage from the last page */
1635                        if (start > end - SAMPLING_READ_SIZE)
1636                                break;
1637                        memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1638                                        SAMPLING_READ_SIZE);
1639                        i += SAMPLING_INTERVAL;
1640                        start += SAMPLING_INTERVAL;
1641                        curr_sample_pos += SAMPLING_READ_SIZE;
1642                }
1643                kunmap_local(in_data);
1644                put_page(page);
1645
1646                index++;
1647        }
1648
1649        ws->sample_size = curr_sample_pos;
1650}
1651
1652/*
1653 * Compression heuristic.
1654 *
1655 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1656 * quickly (compared to direct compression) detect data characteristics
1657 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1658 * data.
1659 *
1660 * The following types of analysis can be performed:
1661 * - detect mostly zero data
1662 * - detect data with low "byte set" size (text, etc)
1663 * - detect data with low/high "core byte" set
1664 *
1665 * Return non-zero if the compression should be done, 0 otherwise.
1666 */
1667int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1668{
1669        struct list_head *ws_list = get_workspace(0, 0);
1670        struct heuristic_ws *ws;
1671        u32 i;
1672        u8 byte;
1673        int ret = 0;
1674
1675        ws = list_entry(ws_list, struct heuristic_ws, list);
1676
1677        heuristic_collect_sample(inode, start, end, ws);
1678
1679        if (sample_repeated_patterns(ws)) {
1680                ret = 1;
1681                goto out;
1682        }
1683
1684        memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1685
1686        for (i = 0; i < ws->sample_size; i++) {
1687                byte = ws->sample[i];
1688                ws->bucket[byte].count++;
1689        }
1690
1691        i = byte_set_size(ws);
1692        if (i < BYTE_SET_THRESHOLD) {
1693                ret = 2;
1694                goto out;
1695        }
1696
1697        i = byte_core_set_size(ws);
1698        if (i <= BYTE_CORE_SET_LOW) {
1699                ret = 3;
1700                goto out;
1701        }
1702
1703        if (i >= BYTE_CORE_SET_HIGH) {
1704                ret = 0;
1705                goto out;
1706        }
1707
1708        i = shannon_entropy(ws);
1709        if (i <= ENTROPY_LVL_ACEPTABLE) {
1710                ret = 4;
1711                goto out;
1712        }
1713
1714        /*
1715         * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1716         * needed to give green light to compression.
1717         *
1718         * For now just assume that compression at that level is not worth the
1719         * resources because:
1720         *
1721         * 1. it is possible to defrag the data later
1722         *
1723         * 2. the data would turn out to be hardly compressible, eg. 150 byte
1724         * values, every bucket has counter at level ~54. The heuristic would
1725         * be confused. This can happen when data have some internal repeated
1726         * patterns like "abbacbbc...". This can be detected by analyzing
1727         * pairs of bytes, which is too costly.
1728         */
1729        if (i < ENTROPY_LVL_HIGH) {
1730                ret = 5;
1731                goto out;
1732        } else {
1733                ret = 0;
1734                goto out;
1735        }
1736
1737out:
1738        put_workspace(0, ws_list);
1739        return ret;
1740}
1741
1742/*
1743 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1744 * level, unrecognized string will set the default level
1745 */
1746unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1747{
1748        unsigned int level = 0;
1749        int ret;
1750
1751        if (!type)
1752                return 0;
1753
1754        if (str[0] == ':') {
1755                ret = kstrtouint(str + 1, 10, &level);
1756                if (ret)
1757                        level = 0;
1758        }
1759
1760        level = btrfs_compress_set_level(type, level);
1761
1762        return level;
1763}
1764