linux/fs/btrfs/lzo.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2008 Oracle.  All rights reserved.
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/slab.h>
   8#include <linux/mm.h>
   9#include <linux/init.h>
  10#include <linux/err.h>
  11#include <linux/sched.h>
  12#include <linux/pagemap.h>
  13#include <linux/bio.h>
  14#include <linux/lzo.h>
  15#include <linux/refcount.h>
  16#include "compression.h"
  17#include "ctree.h"
  18
  19#define LZO_LEN 4
  20
  21/*
  22 * Btrfs LZO compression format
  23 *
  24 * Regular and inlined LZO compressed data extents consist of:
  25 *
  26 * 1.  Header
  27 *     Fixed size. LZO_LEN (4) bytes long, LE32.
  28 *     Records the total size (including the header) of compressed data.
  29 *
  30 * 2.  Segment(s)
  31 *     Variable size. Each segment includes one segment header, followed by data
  32 *     payload.
  33 *     One regular LZO compressed extent can have one or more segments.
  34 *     For inlined LZO compressed extent, only one segment is allowed.
  35 *     One segment represents at most one page of uncompressed data.
  36 *
  37 * 2.1 Segment header
  38 *     Fixed size. LZO_LEN (4) bytes long, LE32.
  39 *     Records the total size of the segment (not including the header).
  40 *     Segment header never crosses page boundary, thus it's possible to
  41 *     have at most 3 padding zeros at the end of the page.
  42 *
  43 * 2.2 Data Payload
  44 *     Variable size. Size up limit should be lzo1x_worst_compress(PAGE_SIZE)
  45 *     which is 4419 for a 4KiB page.
  46 *
  47 * Example:
  48 * Page 1:
  49 *          0     0x2   0x4   0x6   0x8   0xa   0xc   0xe     0x10
  50 * 0x0000   |  Header   | SegHdr 01 | Data payload 01 ...     |
  51 * ...
  52 * 0x0ff0   | SegHdr  N | Data payload  N     ...          |00|
  53 *                                                          ^^ padding zeros
  54 * Page 2:
  55 * 0x1000   | SegHdr N+1| Data payload N+1 ...                |
  56 */
  57
  58struct workspace {
  59        void *mem;
  60        void *buf;      /* where decompressed data goes */
  61        void *cbuf;     /* where compressed data goes */
  62        struct list_head list;
  63};
  64
  65static struct workspace_manager wsm;
  66
  67void lzo_free_workspace(struct list_head *ws)
  68{
  69        struct workspace *workspace = list_entry(ws, struct workspace, list);
  70
  71        kvfree(workspace->buf);
  72        kvfree(workspace->cbuf);
  73        kvfree(workspace->mem);
  74        kfree(workspace);
  75}
  76
  77struct list_head *lzo_alloc_workspace(unsigned int level)
  78{
  79        struct workspace *workspace;
  80
  81        workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
  82        if (!workspace)
  83                return ERR_PTR(-ENOMEM);
  84
  85        workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
  86        workspace->buf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
  87        workspace->cbuf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
  88        if (!workspace->mem || !workspace->buf || !workspace->cbuf)
  89                goto fail;
  90
  91        INIT_LIST_HEAD(&workspace->list);
  92
  93        return &workspace->list;
  94fail:
  95        lzo_free_workspace(&workspace->list);
  96        return ERR_PTR(-ENOMEM);
  97}
  98
  99static inline void write_compress_length(char *buf, size_t len)
 100{
 101        __le32 dlen;
 102
 103        dlen = cpu_to_le32(len);
 104        memcpy(buf, &dlen, LZO_LEN);
 105}
 106
 107static inline size_t read_compress_length(const char *buf)
 108{
 109        __le32 dlen;
 110
 111        memcpy(&dlen, buf, LZO_LEN);
 112        return le32_to_cpu(dlen);
 113}
 114
 115int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
 116                u64 start, struct page **pages, unsigned long *out_pages,
 117                unsigned long *total_in, unsigned long *total_out)
 118{
 119        struct workspace *workspace = list_entry(ws, struct workspace, list);
 120        int ret = 0;
 121        char *data_in;
 122        char *cpage_out, *sizes_ptr;
 123        int nr_pages = 0;
 124        struct page *in_page = NULL;
 125        struct page *out_page = NULL;
 126        unsigned long bytes_left;
 127        unsigned long len = *total_out;
 128        unsigned long nr_dest_pages = *out_pages;
 129        const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
 130        size_t in_len;
 131        size_t out_len;
 132        char *buf;
 133        unsigned long tot_in = 0;
 134        unsigned long tot_out = 0;
 135        unsigned long pg_bytes_left;
 136        unsigned long out_offset;
 137        unsigned long bytes;
 138
 139        *out_pages = 0;
 140        *total_out = 0;
 141        *total_in = 0;
 142
 143        in_page = find_get_page(mapping, start >> PAGE_SHIFT);
 144        data_in = kmap(in_page);
 145
 146        /*
 147         * store the size of all chunks of compressed data in
 148         * the first 4 bytes
 149         */
 150        out_page = alloc_page(GFP_NOFS);
 151        if (out_page == NULL) {
 152                ret = -ENOMEM;
 153                goto out;
 154        }
 155        cpage_out = kmap(out_page);
 156        out_offset = LZO_LEN;
 157        tot_out = LZO_LEN;
 158        pages[0] = out_page;
 159        nr_pages = 1;
 160        pg_bytes_left = PAGE_SIZE - LZO_LEN;
 161
 162        /* compress at most one page of data each time */
 163        in_len = min(len, PAGE_SIZE);
 164        while (tot_in < len) {
 165                ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
 166                                       &out_len, workspace->mem);
 167                if (ret != LZO_E_OK) {
 168                        pr_debug("BTRFS: lzo in loop returned %d\n",
 169                               ret);
 170                        ret = -EIO;
 171                        goto out;
 172                }
 173
 174                /* store the size of this chunk of compressed data */
 175                write_compress_length(cpage_out + out_offset, out_len);
 176                tot_out += LZO_LEN;
 177                out_offset += LZO_LEN;
 178                pg_bytes_left -= LZO_LEN;
 179
 180                tot_in += in_len;
 181                tot_out += out_len;
 182
 183                /* copy bytes from the working buffer into the pages */
 184                buf = workspace->cbuf;
 185                while (out_len) {
 186                        bytes = min_t(unsigned long, pg_bytes_left, out_len);
 187
 188                        memcpy(cpage_out + out_offset, buf, bytes);
 189
 190                        out_len -= bytes;
 191                        pg_bytes_left -= bytes;
 192                        buf += bytes;
 193                        out_offset += bytes;
 194
 195                        /*
 196                         * we need another page for writing out.
 197                         *
 198                         * Note if there's less than 4 bytes left, we just
 199                         * skip to a new page.
 200                         */
 201                        if ((out_len == 0 && pg_bytes_left < LZO_LEN) ||
 202                            pg_bytes_left == 0) {
 203                                if (pg_bytes_left) {
 204                                        memset(cpage_out + out_offset, 0,
 205                                               pg_bytes_left);
 206                                        tot_out += pg_bytes_left;
 207                                }
 208
 209                                /* we're done, don't allocate new page */
 210                                if (out_len == 0 && tot_in >= len)
 211                                        break;
 212
 213                                kunmap(out_page);
 214                                if (nr_pages == nr_dest_pages) {
 215                                        out_page = NULL;
 216                                        ret = -E2BIG;
 217                                        goto out;
 218                                }
 219
 220                                out_page = alloc_page(GFP_NOFS);
 221                                if (out_page == NULL) {
 222                                        ret = -ENOMEM;
 223                                        goto out;
 224                                }
 225                                cpage_out = kmap(out_page);
 226                                pages[nr_pages++] = out_page;
 227
 228                                pg_bytes_left = PAGE_SIZE;
 229                                out_offset = 0;
 230                        }
 231                }
 232
 233                /* we're making it bigger, give up */
 234                if (tot_in > 8192 && tot_in < tot_out) {
 235                        ret = -E2BIG;
 236                        goto out;
 237                }
 238
 239                /* we're all done */
 240                if (tot_in >= len)
 241                        break;
 242
 243                if (tot_out > max_out)
 244                        break;
 245
 246                bytes_left = len - tot_in;
 247                kunmap(in_page);
 248                put_page(in_page);
 249
 250                start += PAGE_SIZE;
 251                in_page = find_get_page(mapping, start >> PAGE_SHIFT);
 252                data_in = kmap(in_page);
 253                in_len = min(bytes_left, PAGE_SIZE);
 254        }
 255
 256        if (tot_out >= tot_in) {
 257                ret = -E2BIG;
 258                goto out;
 259        }
 260
 261        /* store the size of all chunks of compressed data */
 262        sizes_ptr = kmap_local_page(pages[0]);
 263        write_compress_length(sizes_ptr, tot_out);
 264        kunmap_local(sizes_ptr);
 265
 266        ret = 0;
 267        *total_out = tot_out;
 268        *total_in = tot_in;
 269out:
 270        *out_pages = nr_pages;
 271        if (out_page)
 272                kunmap(out_page);
 273
 274        if (in_page) {
 275                kunmap(in_page);
 276                put_page(in_page);
 277        }
 278
 279        return ret;
 280}
 281
 282/*
 283 * Copy the compressed segment payload into @dest.
 284 *
 285 * For the payload there will be no padding, just need to do page switching.
 286 */
 287static void copy_compressed_segment(struct compressed_bio *cb,
 288                                    char *dest, u32 len, u32 *cur_in)
 289{
 290        u32 orig_in = *cur_in;
 291
 292        while (*cur_in < orig_in + len) {
 293                char *kaddr;
 294                struct page *cur_page;
 295                u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
 296                                          orig_in + len - *cur_in);
 297
 298                ASSERT(copy_len);
 299                cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
 300
 301                kaddr = kmap(cur_page);
 302                memcpy(dest + *cur_in - orig_in,
 303                        kaddr + offset_in_page(*cur_in),
 304                        copy_len);
 305                kunmap(cur_page);
 306
 307                *cur_in += copy_len;
 308        }
 309}
 310
 311int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
 312{
 313        struct workspace *workspace = list_entry(ws, struct workspace, list);
 314        const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
 315        const u32 sectorsize = fs_info->sectorsize;
 316        char *kaddr;
 317        int ret;
 318        /* Compressed data length, can be unaligned */
 319        u32 len_in;
 320        /* Offset inside the compressed data */
 321        u32 cur_in = 0;
 322        /* Bytes decompressed so far */
 323        u32 cur_out = 0;
 324
 325        kaddr = kmap(cb->compressed_pages[0]);
 326        len_in = read_compress_length(kaddr);
 327        kunmap(cb->compressed_pages[0]);
 328        cur_in += LZO_LEN;
 329
 330        /*
 331         * LZO header length check
 332         *
 333         * The total length should not exceed the maximum extent length,
 334         * and all sectors should be used.
 335         * If this happens, it means the compressed extent is corrupted.
 336         */
 337        if (len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
 338            round_up(len_in, sectorsize) < cb->compressed_len) {
 339                btrfs_err(fs_info,
 340                        "invalid lzo header, lzo len %u compressed len %u",
 341                        len_in, cb->compressed_len);
 342                return -EUCLEAN;
 343        }
 344
 345        /* Go through each lzo segment */
 346        while (cur_in < len_in) {
 347                struct page *cur_page;
 348                /* Length of the compressed segment */
 349                u32 seg_len;
 350                u32 sector_bytes_left;
 351                size_t out_len = lzo1x_worst_compress(sectorsize);
 352
 353                /*
 354                 * We should always have enough space for one segment header
 355                 * inside current sector.
 356                 */
 357                ASSERT(cur_in / sectorsize ==
 358                       (cur_in + LZO_LEN - 1) / sectorsize);
 359                cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
 360                kaddr = kmap(cur_page);
 361                ASSERT(cur_page);
 362                seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
 363                cur_in += LZO_LEN;
 364
 365                /* Copy the compressed segment payload into workspace */
 366                copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
 367
 368                /* Decompress the data */
 369                ret = lzo1x_decompress_safe(workspace->cbuf, seg_len,
 370                                            workspace->buf, &out_len);
 371                if (ret != LZO_E_OK) {
 372                        btrfs_err(fs_info, "failed to decompress");
 373                        ret = -EIO;
 374                        goto out;
 375                }
 376
 377                /* Copy the data into inode pages */
 378                ret = btrfs_decompress_buf2page(workspace->buf, out_len, cb, cur_out);
 379                cur_out += out_len;
 380
 381                /* All data read, exit */
 382                if (ret == 0)
 383                        goto out;
 384                ret = 0;
 385
 386                /* Check if the sector has enough space for a segment header */
 387                sector_bytes_left = sectorsize - (cur_in % sectorsize);
 388                if (sector_bytes_left >= LZO_LEN)
 389                        continue;
 390
 391                /* Skip the padding zeros */
 392                cur_in += sector_bytes_left;
 393        }
 394out:
 395        if (!ret)
 396                zero_fill_bio(cb->orig_bio);
 397        return ret;
 398}
 399
 400int lzo_decompress(struct list_head *ws, unsigned char *data_in,
 401                struct page *dest_page, unsigned long start_byte, size_t srclen,
 402                size_t destlen)
 403{
 404        struct workspace *workspace = list_entry(ws, struct workspace, list);
 405        size_t in_len;
 406        size_t out_len;
 407        size_t max_segment_len = lzo1x_worst_compress(PAGE_SIZE);
 408        int ret = 0;
 409        char *kaddr;
 410        unsigned long bytes;
 411
 412        if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
 413                return -EUCLEAN;
 414
 415        in_len = read_compress_length(data_in);
 416        if (in_len != srclen)
 417                return -EUCLEAN;
 418        data_in += LZO_LEN;
 419
 420        in_len = read_compress_length(data_in);
 421        if (in_len != srclen - LZO_LEN * 2) {
 422                ret = -EUCLEAN;
 423                goto out;
 424        }
 425        data_in += LZO_LEN;
 426
 427        out_len = PAGE_SIZE;
 428        ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
 429        if (ret != LZO_E_OK) {
 430                pr_warn("BTRFS: decompress failed!\n");
 431                ret = -EIO;
 432                goto out;
 433        }
 434
 435        if (out_len < start_byte) {
 436                ret = -EIO;
 437                goto out;
 438        }
 439
 440        /*
 441         * the caller is already checking against PAGE_SIZE, but lets
 442         * move this check closer to the memcpy/memset
 443         */
 444        destlen = min_t(unsigned long, destlen, PAGE_SIZE);
 445        bytes = min_t(unsigned long, destlen, out_len - start_byte);
 446
 447        kaddr = kmap_local_page(dest_page);
 448        memcpy(kaddr, workspace->buf + start_byte, bytes);
 449
 450        /*
 451         * btrfs_getblock is doing a zero on the tail of the page too,
 452         * but this will cover anything missing from the decompressed
 453         * data.
 454         */
 455        if (bytes < destlen)
 456                memset(kaddr+bytes, 0, destlen-bytes);
 457        kunmap_local(kaddr);
 458out:
 459        return ret;
 460}
 461
 462const struct btrfs_compress_op btrfs_lzo_compress = {
 463        .workspace_manager      = &wsm,
 464        .max_level              = 1,
 465        .default_level          = 1,
 466};
 467