linux/fs/btrfs/lzo.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2008 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/slab.h>
   8#include <linux/mm.h>
   9#include <linux/init.h>
  10#include <linux/err.h>
  11#include <linux/sched.h>
  12#include <linux/pagemap.h>
  13#include <linux/bio.h>
  14#include <linux/lzo.h>
  15#include <linux/refcount.h>
  16#include "compression.h"
  17
  18#define LZO_LEN 4
  19
  20/*
  21 * Btrfs LZO compression format
  22 *
  23 * Regular and inlined LZO compressed data extents consist of:
  24 *
  25 * 1.  Header
  26 *     Fixed size. LZO_LEN (4) bytes long, LE32.
  27 *     Records the total size (including the header) of compressed data.
  28 *
  29 * 2.  Segment(s)
  30 *     Variable size. Each segment includes one segment header, followed by data
  31 *     payload.
  32 *     One regular LZO compressed extent can have one or more segments.
  33 *     For inlined LZO compressed extent, only one segment is allowed.
  34 *     One segment represents at most one page of uncompressed data.
  35 *
  36 * 2.1 Segment header
  37 *     Fixed size. LZO_LEN (4) bytes long, LE32.
  38 *     Records the total size of the segment (not including the header).
  39 *     Segment header never crosses page boundary, thus it's possible to
  40 *     have at most 3 padding zeros at the end of the page.
  41 *
  42 * 2.2 Data Payload
  43 *     Variable size. Size up limit should be lzo1x_worst_compress(PAGE_SIZE)
  44 *     which is 4419 for a 4KiB page.
  45 *
  46 * Example:
  47 * Page 1:
  48 *          0     0x2   0x4   0x6   0x8   0xa   0xc   0xe     0x10
  49 * 0x0000   |  Header   | SegHdr 01 | Data payload 01 ...     |
  50 * ...
  51 * 0x0ff0   | SegHdr  N | Data payload  N     ...          |00|
  52 *                                                          ^^ padding zeros
  53 * Page 2:
  54 * 0x1000   | SegHdr N+1| Data payload N+1 ...                |
  55 */
  56
  57struct workspace {
  58        void *mem;
  59        void *buf;      /* where decompressed data goes */
  60        void *cbuf;     /* where compressed data goes */
  61        struct list_head list;
  62};
  63
  64static struct workspace_manager wsm;
  65
  66static void lzo_init_workspace_manager(void)
  67{
  68        btrfs_init_workspace_manager(&wsm, &btrfs_lzo_compress);
  69}
  70
  71static void lzo_cleanup_workspace_manager(void)
  72{
  73        btrfs_cleanup_workspace_manager(&wsm);
  74}
  75
  76static struct list_head *lzo_get_workspace(unsigned int level)
  77{
  78        return btrfs_get_workspace(&wsm, level);
  79}
  80
  81static void lzo_put_workspace(struct list_head *ws)
  82{
  83        btrfs_put_workspace(&wsm, ws);
  84}
  85
  86static void lzo_free_workspace(struct list_head *ws)
  87{
  88        struct workspace *workspace = list_entry(ws, struct workspace, list);
  89
  90        kvfree(workspace->buf);
  91        kvfree(workspace->cbuf);
  92        kvfree(workspace->mem);
  93        kfree(workspace);
  94}
  95
  96static struct list_head *lzo_alloc_workspace(unsigned int level)
  97{
  98        struct workspace *workspace;
  99
 100        workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
 101        if (!workspace)
 102                return ERR_PTR(-ENOMEM);
 103
 104        workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
 105        workspace->buf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
 106        workspace->cbuf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
 107        if (!workspace->mem || !workspace->buf || !workspace->cbuf)
 108                goto fail;
 109
 110        INIT_LIST_HEAD(&workspace->list);
 111
 112        return &workspace->list;
 113fail:
 114        lzo_free_workspace(&workspace->list);
 115        return ERR_PTR(-ENOMEM);
 116}
 117
 118static inline void write_compress_length(char *buf, size_t len)
 119{
 120        __le32 dlen;
 121
 122        dlen = cpu_to_le32(len);
 123        memcpy(buf, &dlen, LZO_LEN);
 124}
 125
 126static inline size_t read_compress_length(const char *buf)
 127{
 128        __le32 dlen;
 129
 130        memcpy(&dlen, buf, LZO_LEN);
 131        return le32_to_cpu(dlen);
 132}
 133
 134static int lzo_compress_pages(struct list_head *ws,
 135                              struct address_space *mapping,
 136                              u64 start,
 137                              struct page **pages,
 138                              unsigned long *out_pages,
 139                              unsigned long *total_in,
 140                              unsigned long *total_out)
 141{
 142        struct workspace *workspace = list_entry(ws, struct workspace, list);
 143        int ret = 0;
 144        char *data_in;
 145        char *cpage_out;
 146        int nr_pages = 0;
 147        struct page *in_page = NULL;
 148        struct page *out_page = NULL;
 149        unsigned long bytes_left;
 150        unsigned long len = *total_out;
 151        unsigned long nr_dest_pages = *out_pages;
 152        const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
 153        size_t in_len;
 154        size_t out_len;
 155        char *buf;
 156        unsigned long tot_in = 0;
 157        unsigned long tot_out = 0;
 158        unsigned long pg_bytes_left;
 159        unsigned long out_offset;
 160        unsigned long bytes;
 161
 162        *out_pages = 0;
 163        *total_out = 0;
 164        *total_in = 0;
 165
 166        in_page = find_get_page(mapping, start >> PAGE_SHIFT);
 167        data_in = kmap(in_page);
 168
 169        /*
 170         * store the size of all chunks of compressed data in
 171         * the first 4 bytes
 172         */
 173        out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
 174        if (out_page == NULL) {
 175                ret = -ENOMEM;
 176                goto out;
 177        }
 178        cpage_out = kmap(out_page);
 179        out_offset = LZO_LEN;
 180        tot_out = LZO_LEN;
 181        pages[0] = out_page;
 182        nr_pages = 1;
 183        pg_bytes_left = PAGE_SIZE - LZO_LEN;
 184
 185        /* compress at most one page of data each time */
 186        in_len = min(len, PAGE_SIZE);
 187        while (tot_in < len) {
 188                ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
 189                                       &out_len, workspace->mem);
 190                if (ret != LZO_E_OK) {
 191                        pr_debug("BTRFS: lzo in loop returned %d\n",
 192                               ret);
 193                        ret = -EIO;
 194                        goto out;
 195                }
 196
 197                /* store the size of this chunk of compressed data */
 198                write_compress_length(cpage_out + out_offset, out_len);
 199                tot_out += LZO_LEN;
 200                out_offset += LZO_LEN;
 201                pg_bytes_left -= LZO_LEN;
 202
 203                tot_in += in_len;
 204                tot_out += out_len;
 205
 206                /* copy bytes from the working buffer into the pages */
 207                buf = workspace->cbuf;
 208                while (out_len) {
 209                        bytes = min_t(unsigned long, pg_bytes_left, out_len);
 210
 211                        memcpy(cpage_out + out_offset, buf, bytes);
 212
 213                        out_len -= bytes;
 214                        pg_bytes_left -= bytes;
 215                        buf += bytes;
 216                        out_offset += bytes;
 217
 218                        /*
 219                         * we need another page for writing out.
 220                         *
 221                         * Note if there's less than 4 bytes left, we just
 222                         * skip to a new page.
 223                         */
 224                        if ((out_len == 0 && pg_bytes_left < LZO_LEN) ||
 225                            pg_bytes_left == 0) {
 226                                if (pg_bytes_left) {
 227                                        memset(cpage_out + out_offset, 0,
 228                                               pg_bytes_left);
 229                                        tot_out += pg_bytes_left;
 230                                }
 231
 232                                /* we're done, don't allocate new page */
 233                                if (out_len == 0 && tot_in >= len)
 234                                        break;
 235
 236                                kunmap(out_page);
 237                                if (nr_pages == nr_dest_pages) {
 238                                        out_page = NULL;
 239                                        ret = -E2BIG;
 240                                        goto out;
 241                                }
 242
 243                                out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
 244                                if (out_page == NULL) {
 245                                        ret = -ENOMEM;
 246                                        goto out;
 247                                }
 248                                cpage_out = kmap(out_page);
 249                                pages[nr_pages++] = out_page;
 250
 251                                pg_bytes_left = PAGE_SIZE;
 252                                out_offset = 0;
 253                        }
 254                }
 255
 256                /* we're making it bigger, give up */
 257                if (tot_in > 8192 && tot_in < tot_out) {
 258                        ret = -E2BIG;
 259                        goto out;
 260                }
 261
 262                /* we're all done */
 263                if (tot_in >= len)
 264                        break;
 265
 266                if (tot_out > max_out)
 267                        break;
 268
 269                bytes_left = len - tot_in;
 270                kunmap(in_page);
 271                put_page(in_page);
 272
 273                start += PAGE_SIZE;
 274                in_page = find_get_page(mapping, start >> PAGE_SHIFT);
 275                data_in = kmap(in_page);
 276                in_len = min(bytes_left, PAGE_SIZE);
 277        }
 278
 279        if (tot_out >= tot_in) {
 280                ret = -E2BIG;
 281                goto out;
 282        }
 283
 284        /* store the size of all chunks of compressed data */
 285        cpage_out = kmap(pages[0]);
 286        write_compress_length(cpage_out, tot_out);
 287
 288        kunmap(pages[0]);
 289
 290        ret = 0;
 291        *total_out = tot_out;
 292        *total_in = tot_in;
 293out:
 294        *out_pages = nr_pages;
 295        if (out_page)
 296                kunmap(out_page);
 297
 298        if (in_page) {
 299                kunmap(in_page);
 300                put_page(in_page);
 301        }
 302
 303        return ret;
 304}
 305
 306static int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
 307{
 308        struct workspace *workspace = list_entry(ws, struct workspace, list);
 309        int ret = 0, ret2;
 310        char *data_in;
 311        unsigned long page_in_index = 0;
 312        size_t srclen = cb->compressed_len;
 313        unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
 314        unsigned long buf_start;
 315        unsigned long buf_offset = 0;
 316        unsigned long bytes;
 317        unsigned long working_bytes;
 318        size_t in_len;
 319        size_t out_len;
 320        const size_t max_segment_len = lzo1x_worst_compress(PAGE_SIZE);
 321        unsigned long in_offset;
 322        unsigned long in_page_bytes_left;
 323        unsigned long tot_in;
 324        unsigned long tot_out;
 325        unsigned long tot_len;
 326        char *buf;
 327        bool may_late_unmap, need_unmap;
 328        struct page **pages_in = cb->compressed_pages;
 329        u64 disk_start = cb->start;
 330        struct bio *orig_bio = cb->orig_bio;
 331
 332        data_in = kmap(pages_in[0]);
 333        tot_len = read_compress_length(data_in);
 334        /*
 335         * Compressed data header check.
 336         *
 337         * The real compressed size can't exceed the maximum extent length, and
 338         * all pages should be used (whole unused page with just the segment
 339         * header is not possible).  If this happens it means the compressed
 340         * extent is corrupted.
 341         */
 342        if (tot_len > min_t(size_t, BTRFS_MAX_COMPRESSED, srclen) ||
 343            tot_len < srclen - PAGE_SIZE) {
 344                ret = -EUCLEAN;
 345                goto done;
 346        }
 347
 348        tot_in = LZO_LEN;
 349        in_offset = LZO_LEN;
 350        in_page_bytes_left = PAGE_SIZE - LZO_LEN;
 351
 352        tot_out = 0;
 353
 354        while (tot_in < tot_len) {
 355                in_len = read_compress_length(data_in + in_offset);
 356                in_page_bytes_left -= LZO_LEN;
 357                in_offset += LZO_LEN;
 358                tot_in += LZO_LEN;
 359
 360                /*
 361                 * Segment header check.
 362                 *
 363                 * The segment length must not exceed the maximum LZO
 364                 * compression size, nor the total compressed size.
 365                 */
 366                if (in_len > max_segment_len || tot_in + in_len > tot_len) {
 367                        ret = -EUCLEAN;
 368                        goto done;
 369                }
 370
 371                tot_in += in_len;
 372                working_bytes = in_len;
 373                may_late_unmap = need_unmap = false;
 374
 375                /* fast path: avoid using the working buffer */
 376                if (in_page_bytes_left >= in_len) {
 377                        buf = data_in + in_offset;
 378                        bytes = in_len;
 379                        may_late_unmap = true;
 380                        goto cont;
 381                }
 382
 383                /* copy bytes from the pages into the working buffer */
 384                buf = workspace->cbuf;
 385                buf_offset = 0;
 386                while (working_bytes) {
 387                        bytes = min(working_bytes, in_page_bytes_left);
 388
 389                        memcpy(buf + buf_offset, data_in + in_offset, bytes);
 390                        buf_offset += bytes;
 391cont:
 392                        working_bytes -= bytes;
 393                        in_page_bytes_left -= bytes;
 394                        in_offset += bytes;
 395
 396                        /* check if we need to pick another page */
 397                        if ((working_bytes == 0 && in_page_bytes_left < LZO_LEN)
 398                            || in_page_bytes_left == 0) {
 399                                tot_in += in_page_bytes_left;
 400
 401                                if (working_bytes == 0 && tot_in >= tot_len)
 402                                        break;
 403
 404                                if (page_in_index + 1 >= total_pages_in) {
 405                                        ret = -EIO;
 406                                        goto done;
 407                                }
 408
 409                                if (may_late_unmap)
 410                                        need_unmap = true;
 411                                else
 412                                        kunmap(pages_in[page_in_index]);
 413
 414                                data_in = kmap(pages_in[++page_in_index]);
 415
 416                                in_page_bytes_left = PAGE_SIZE;
 417                                in_offset = 0;
 418                        }
 419                }
 420
 421                out_len = max_segment_len;
 422                ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
 423                                            &out_len);
 424                if (need_unmap)
 425                        kunmap(pages_in[page_in_index - 1]);
 426                if (ret != LZO_E_OK) {
 427                        pr_warn("BTRFS: decompress failed\n");
 428                        ret = -EIO;
 429                        break;
 430                }
 431
 432                buf_start = tot_out;
 433                tot_out += out_len;
 434
 435                ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
 436                                                 tot_out, disk_start, orig_bio);
 437                if (ret2 == 0)
 438                        break;
 439        }
 440done:
 441        kunmap(pages_in[page_in_index]);
 442        if (!ret)
 443                zero_fill_bio(orig_bio);
 444        return ret;
 445}
 446
 447static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
 448                          struct page *dest_page,
 449                          unsigned long start_byte,
 450                          size_t srclen, size_t destlen)
 451{
 452        struct workspace *workspace = list_entry(ws, struct workspace, list);
 453        size_t in_len;
 454        size_t out_len;
 455        size_t max_segment_len = lzo1x_worst_compress(PAGE_SIZE);
 456        int ret = 0;
 457        char *kaddr;
 458        unsigned long bytes;
 459
 460        if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
 461                return -EUCLEAN;
 462
 463        in_len = read_compress_length(data_in);
 464        if (in_len != srclen)
 465                return -EUCLEAN;
 466        data_in += LZO_LEN;
 467
 468        in_len = read_compress_length(data_in);
 469        if (in_len != srclen - LZO_LEN * 2) {
 470                ret = -EUCLEAN;
 471                goto out;
 472        }
 473        data_in += LZO_LEN;
 474
 475        out_len = PAGE_SIZE;
 476        ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
 477        if (ret != LZO_E_OK) {
 478                pr_warn("BTRFS: decompress failed!\n");
 479                ret = -EIO;
 480                goto out;
 481        }
 482
 483        if (out_len < start_byte) {
 484                ret = -EIO;
 485                goto out;
 486        }
 487
 488        /*
 489         * the caller is already checking against PAGE_SIZE, but lets
 490         * move this check closer to the memcpy/memset
 491         */
 492        destlen = min_t(unsigned long, destlen, PAGE_SIZE);
 493        bytes = min_t(unsigned long, destlen, out_len - start_byte);
 494
 495        kaddr = kmap_atomic(dest_page);
 496        memcpy(kaddr, workspace->buf + start_byte, bytes);
 497
 498        /*
 499         * btrfs_getblock is doing a zero on the tail of the page too,
 500         * but this will cover anything missing from the decompressed
 501         * data.
 502         */
 503        if (bytes < destlen)
 504                memset(kaddr+bytes, 0, destlen-bytes);
 505        kunmap_atomic(kaddr);
 506out:
 507        return ret;
 508}
 509
 510static unsigned int lzo_set_level(unsigned int level)
 511{
 512        return 0;
 513}
 514
 515const struct btrfs_compress_op btrfs_lzo_compress = {
 516        .init_workspace_manager = lzo_init_workspace_manager,
 517        .cleanup_workspace_manager = lzo_cleanup_workspace_manager,
 518        .get_workspace          = lzo_get_workspace,
 519        .put_workspace          = lzo_put_workspace,
 520        .alloc_workspace        = lzo_alloc_workspace,
 521        .free_workspace         = lzo_free_workspace,
 522        .compress_pages         = lzo_compress_pages,
 523        .decompress_bio         = lzo_decompress_bio,
 524        .decompress             = lzo_decompress,
 525        .set_level              = lzo_set_level,
 526};
 527