linux/fs/btrfs/lzo.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/slab.h>
  21#include <linux/vmalloc.h>
  22#include <linux/init.h>
  23#include <linux/err.h>
  24#include <linux/sched.h>
  25#include <linux/pagemap.h>
  26#include <linux/bio.h>
  27#include <linux/lzo.h>
  28#include "compression.h"
  29
  30#define LZO_LEN 4
  31
  32struct workspace {
  33        void *mem;
  34        void *buf;      /* where decompressed data goes */
  35        void *cbuf;     /* where compressed data goes */
  36        struct list_head list;
  37};
  38
  39static void lzo_free_workspace(struct list_head *ws)
  40{
  41        struct workspace *workspace = list_entry(ws, struct workspace, list);
  42
  43        vfree(workspace->buf);
  44        vfree(workspace->cbuf);
  45        vfree(workspace->mem);
  46        kfree(workspace);
  47}
  48
  49static struct list_head *lzo_alloc_workspace(void)
  50{
  51        struct workspace *workspace;
  52
  53        workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
  54        if (!workspace)
  55                return ERR_PTR(-ENOMEM);
  56
  57        workspace->mem = vmalloc(LZO1X_MEM_COMPRESS);
  58        workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE));
  59        workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE));
  60        if (!workspace->mem || !workspace->buf || !workspace->cbuf)
  61                goto fail;
  62
  63        INIT_LIST_HEAD(&workspace->list);
  64
  65        return &workspace->list;
  66fail:
  67        lzo_free_workspace(&workspace->list);
  68        return ERR_PTR(-ENOMEM);
  69}
  70
  71static inline void write_compress_length(char *buf, size_t len)
  72{
  73        __le32 dlen;
  74
  75        dlen = cpu_to_le32(len);
  76        memcpy(buf, &dlen, LZO_LEN);
  77}
  78
  79static inline size_t read_compress_length(char *buf)
  80{
  81        __le32 dlen;
  82
  83        memcpy(&dlen, buf, LZO_LEN);
  84        return le32_to_cpu(dlen);
  85}
  86
  87static int lzo_compress_pages(struct list_head *ws,
  88                              struct address_space *mapping,
  89                              u64 start, unsigned long len,
  90                              struct page **pages,
  91                              unsigned long nr_dest_pages,
  92                              unsigned long *out_pages,
  93                              unsigned long *total_in,
  94                              unsigned long *total_out,
  95                              unsigned long max_out)
  96{
  97        struct workspace *workspace = list_entry(ws, struct workspace, list);
  98        int ret = 0;
  99        char *data_in;
 100        char *cpage_out;
 101        int nr_pages = 0;
 102        struct page *in_page = NULL;
 103        struct page *out_page = NULL;
 104        unsigned long bytes_left;
 105
 106        size_t in_len;
 107        size_t out_len;
 108        char *buf;
 109        unsigned long tot_in = 0;
 110        unsigned long tot_out = 0;
 111        unsigned long pg_bytes_left;
 112        unsigned long out_offset;
 113        unsigned long bytes;
 114
 115        *out_pages = 0;
 116        *total_out = 0;
 117        *total_in = 0;
 118
 119        in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
 120        data_in = kmap(in_page);
 121
 122        /*
 123         * store the size of all chunks of compressed data in
 124         * the first 4 bytes
 125         */
 126        out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
 127        if (out_page == NULL) {
 128                ret = -ENOMEM;
 129                goto out;
 130        }
 131        cpage_out = kmap(out_page);
 132        out_offset = LZO_LEN;
 133        tot_out = LZO_LEN;
 134        pages[0] = out_page;
 135        nr_pages = 1;
 136        pg_bytes_left = PAGE_CACHE_SIZE - LZO_LEN;
 137
 138        /* compress at most one page of data each time */
 139        in_len = min(len, PAGE_CACHE_SIZE);
 140        while (tot_in < len) {
 141                ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
 142                                       &out_len, workspace->mem);
 143                if (ret != LZO_E_OK) {
 144                        printk(KERN_DEBUG "btrfs deflate in loop returned %d\n",
 145                               ret);
 146                        ret = -1;
 147                        goto out;
 148                }
 149
 150                /* store the size of this chunk of compressed data */
 151                write_compress_length(cpage_out + out_offset, out_len);
 152                tot_out += LZO_LEN;
 153                out_offset += LZO_LEN;
 154                pg_bytes_left -= LZO_LEN;
 155
 156                tot_in += in_len;
 157                tot_out += out_len;
 158
 159                /* copy bytes from the working buffer into the pages */
 160                buf = workspace->cbuf;
 161                while (out_len) {
 162                        bytes = min_t(unsigned long, pg_bytes_left, out_len);
 163
 164                        memcpy(cpage_out + out_offset, buf, bytes);
 165
 166                        out_len -= bytes;
 167                        pg_bytes_left -= bytes;
 168                        buf += bytes;
 169                        out_offset += bytes;
 170
 171                        /*
 172                         * we need another page for writing out.
 173                         *
 174                         * Note if there's less than 4 bytes left, we just
 175                         * skip to a new page.
 176                         */
 177                        if ((out_len == 0 && pg_bytes_left < LZO_LEN) ||
 178                            pg_bytes_left == 0) {
 179                                if (pg_bytes_left) {
 180                                        memset(cpage_out + out_offset, 0,
 181                                               pg_bytes_left);
 182                                        tot_out += pg_bytes_left;
 183                                }
 184
 185                                /* we're done, don't allocate new page */
 186                                if (out_len == 0 && tot_in >= len)
 187                                        break;
 188
 189                                kunmap(out_page);
 190                                if (nr_pages == nr_dest_pages) {
 191                                        out_page = NULL;
 192                                        ret = -1;
 193                                        goto out;
 194                                }
 195
 196                                out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
 197                                if (out_page == NULL) {
 198                                        ret = -ENOMEM;
 199                                        goto out;
 200                                }
 201                                cpage_out = kmap(out_page);
 202                                pages[nr_pages++] = out_page;
 203
 204                                pg_bytes_left = PAGE_CACHE_SIZE;
 205                                out_offset = 0;
 206                        }
 207                }
 208
 209                /* we're making it bigger, give up */
 210                if (tot_in > 8192 && tot_in < tot_out) {
 211                        ret = -1;
 212                        goto out;
 213                }
 214
 215                /* we're all done */
 216                if (tot_in >= len)
 217                        break;
 218
 219                if (tot_out > max_out)
 220                        break;
 221
 222                bytes_left = len - tot_in;
 223                kunmap(in_page);
 224                page_cache_release(in_page);
 225
 226                start += PAGE_CACHE_SIZE;
 227                in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
 228                data_in = kmap(in_page);
 229                in_len = min(bytes_left, PAGE_CACHE_SIZE);
 230        }
 231
 232        if (tot_out > tot_in)
 233                goto out;
 234
 235        /* store the size of all chunks of compressed data */
 236        cpage_out = kmap(pages[0]);
 237        write_compress_length(cpage_out, tot_out);
 238
 239        kunmap(pages[0]);
 240
 241        ret = 0;
 242        *total_out = tot_out;
 243        *total_in = tot_in;
 244out:
 245        *out_pages = nr_pages;
 246        if (out_page)
 247                kunmap(out_page);
 248
 249        if (in_page) {
 250                kunmap(in_page);
 251                page_cache_release(in_page);
 252        }
 253
 254        return ret;
 255}
 256
 257static int lzo_decompress_biovec(struct list_head *ws,
 258                                 struct page **pages_in,
 259                                 u64 disk_start,
 260                                 struct bio_vec *bvec,
 261                                 int vcnt,
 262                                 size_t srclen)
 263{
 264        struct workspace *workspace = list_entry(ws, struct workspace, list);
 265        int ret = 0, ret2;
 266        char *data_in;
 267        unsigned long page_in_index = 0;
 268        unsigned long page_out_index = 0;
 269        unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) /
 270                                        PAGE_CACHE_SIZE;
 271        unsigned long buf_start;
 272        unsigned long buf_offset = 0;
 273        unsigned long bytes;
 274        unsigned long working_bytes;
 275        unsigned long pg_offset;
 276
 277        size_t in_len;
 278        size_t out_len;
 279        unsigned long in_offset;
 280        unsigned long in_page_bytes_left;
 281        unsigned long tot_in;
 282        unsigned long tot_out;
 283        unsigned long tot_len;
 284        char *buf;
 285        bool may_late_unmap, need_unmap;
 286
 287        data_in = kmap(pages_in[0]);
 288        tot_len = read_compress_length(data_in);
 289
 290        tot_in = LZO_LEN;
 291        in_offset = LZO_LEN;
 292        tot_len = min_t(size_t, srclen, tot_len);
 293        in_page_bytes_left = PAGE_CACHE_SIZE - LZO_LEN;
 294
 295        tot_out = 0;
 296        pg_offset = 0;
 297
 298        while (tot_in < tot_len) {
 299                in_len = read_compress_length(data_in + in_offset);
 300                in_page_bytes_left -= LZO_LEN;
 301                in_offset += LZO_LEN;
 302                tot_in += LZO_LEN;
 303
 304                tot_in += in_len;
 305                working_bytes = in_len;
 306                may_late_unmap = need_unmap = false;
 307
 308                /* fast path: avoid using the working buffer */
 309                if (in_page_bytes_left >= in_len) {
 310                        buf = data_in + in_offset;
 311                        bytes = in_len;
 312                        may_late_unmap = true;
 313                        goto cont;
 314                }
 315
 316                /* copy bytes from the pages into the working buffer */
 317                buf = workspace->cbuf;
 318                buf_offset = 0;
 319                while (working_bytes) {
 320                        bytes = min(working_bytes, in_page_bytes_left);
 321
 322                        memcpy(buf + buf_offset, data_in + in_offset, bytes);
 323                        buf_offset += bytes;
 324cont:
 325                        working_bytes -= bytes;
 326                        in_page_bytes_left -= bytes;
 327                        in_offset += bytes;
 328
 329                        /* check if we need to pick another page */
 330                        if ((working_bytes == 0 && in_page_bytes_left < LZO_LEN)
 331                            || in_page_bytes_left == 0) {
 332                                tot_in += in_page_bytes_left;
 333
 334                                if (working_bytes == 0 && tot_in >= tot_len)
 335                                        break;
 336
 337                                if (page_in_index + 1 >= total_pages_in) {
 338                                        ret = -1;
 339                                        goto done;
 340                                }
 341
 342                                if (may_late_unmap)
 343                                        need_unmap = true;
 344                                else
 345                                        kunmap(pages_in[page_in_index]);
 346
 347                                data_in = kmap(pages_in[++page_in_index]);
 348
 349                                in_page_bytes_left = PAGE_CACHE_SIZE;
 350                                in_offset = 0;
 351                        }
 352                }
 353
 354                out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE);
 355                ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
 356                                            &out_len);
 357                if (need_unmap)
 358                        kunmap(pages_in[page_in_index - 1]);
 359                if (ret != LZO_E_OK) {
 360                        printk(KERN_WARNING "btrfs decompress failed\n");
 361                        ret = -1;
 362                        break;
 363                }
 364
 365                buf_start = tot_out;
 366                tot_out += out_len;
 367
 368                ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
 369                                                 tot_out, disk_start,
 370                                                 bvec, vcnt,
 371                                                 &page_out_index, &pg_offset);
 372                if (ret2 == 0)
 373                        break;
 374        }
 375done:
 376        kunmap(pages_in[page_in_index]);
 377        return ret;
 378}
 379
 380static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
 381                          struct page *dest_page,
 382                          unsigned long start_byte,
 383                          size_t srclen, size_t destlen)
 384{
 385        struct workspace *workspace = list_entry(ws, struct workspace, list);
 386        size_t in_len;
 387        size_t out_len;
 388        size_t tot_len;
 389        int ret = 0;
 390        char *kaddr;
 391        unsigned long bytes;
 392
 393        BUG_ON(srclen < LZO_LEN);
 394
 395        tot_len = read_compress_length(data_in);
 396        data_in += LZO_LEN;
 397
 398        in_len = read_compress_length(data_in);
 399        data_in += LZO_LEN;
 400
 401        out_len = PAGE_CACHE_SIZE;
 402        ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
 403        if (ret != LZO_E_OK) {
 404                printk(KERN_WARNING "btrfs decompress failed!\n");
 405                ret = -1;
 406                goto out;
 407        }
 408
 409        if (out_len < start_byte) {
 410                ret = -1;
 411                goto out;
 412        }
 413
 414        bytes = min_t(unsigned long, destlen, out_len - start_byte);
 415
 416        kaddr = kmap_atomic(dest_page);
 417        memcpy(kaddr, workspace->buf + start_byte, bytes);
 418        kunmap_atomic(kaddr);
 419out:
 420        return ret;
 421}
 422
 423struct btrfs_compress_op btrfs_lzo_compress = {
 424        .alloc_workspace        = lzo_alloc_workspace,
 425        .free_workspace         = lzo_free_workspace,
 426        .compress_pages         = lzo_compress_pages,
 427        .decompress_biovec      = lzo_decompress_biovec,
 428        .decompress             = lzo_decompress,
 429};
 430