linux/fs/btrfs/lzo.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/slab.h>
  21#include <linux/vmalloc.h>
  22#include <linux/init.h>
  23#include <linux/err.h>
  24#include <linux/sched.h>
  25#include <linux/pagemap.h>
  26#include <linux/bio.h>
  27#include <linux/lzo.h>
  28#include "compression.h"
  29
  30#define LZO_LEN 4
  31
  32struct workspace {
  33        void *mem;
  34        void *buf;      /* where compressed data goes */
  35        void *cbuf;     /* where decompressed data goes */
  36        struct list_head list;
  37};
  38
  39static void lzo_free_workspace(struct list_head *ws)
  40{
  41        struct workspace *workspace = list_entry(ws, struct workspace, list);
  42
  43        vfree(workspace->buf);
  44        vfree(workspace->cbuf);
  45        vfree(workspace->mem);
  46        kfree(workspace);
  47}
  48
  49static struct list_head *lzo_alloc_workspace(void)
  50{
  51        struct workspace *workspace;
  52
  53        workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
  54        if (!workspace)
  55                return ERR_PTR(-ENOMEM);
  56
  57        workspace->mem = vmalloc(LZO1X_MEM_COMPRESS);
  58        workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE));
  59        workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE));
  60        if (!workspace->mem || !workspace->buf || !workspace->cbuf)
  61                goto fail;
  62
  63        INIT_LIST_HEAD(&workspace->list);
  64
  65        return &workspace->list;
  66fail:
  67        lzo_free_workspace(&workspace->list);
  68        return ERR_PTR(-ENOMEM);
  69}
  70
  71static inline void write_compress_length(char *buf, size_t len)
  72{
  73        __le32 dlen;
  74
  75        dlen = cpu_to_le32(len);
  76        memcpy(buf, &dlen, LZO_LEN);
  77}
  78
  79static inline size_t read_compress_length(char *buf)
  80{
  81        __le32 dlen;
  82
  83        memcpy(&dlen, buf, LZO_LEN);
  84        return le32_to_cpu(dlen);
  85}
  86
  87static int lzo_compress_pages(struct list_head *ws,
  88                              struct address_space *mapping,
  89                              u64 start, unsigned long len,
  90                              struct page **pages,
  91                              unsigned long nr_dest_pages,
  92                              unsigned long *out_pages,
  93                              unsigned long *total_in,
  94                              unsigned long *total_out,
  95                              unsigned long max_out)
  96{
  97        struct workspace *workspace = list_entry(ws, struct workspace, list);
  98        int ret = 0;
  99        char *data_in;
 100        char *cpage_out;
 101        int nr_pages = 0;
 102        struct page *in_page = NULL;
 103        struct page *out_page = NULL;
 104        unsigned long bytes_left;
 105
 106        size_t in_len;
 107        size_t out_len;
 108        char *buf;
 109        unsigned long tot_in = 0;
 110        unsigned long tot_out = 0;
 111        unsigned long pg_bytes_left;
 112        unsigned long out_offset;
 113        unsigned long bytes;
 114
 115        *out_pages = 0;
 116        *total_out = 0;
 117        *total_in = 0;
 118
 119        in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
 120        data_in = kmap(in_page);
 121
 122        /*
 123         * store the size of all chunks of compressed data in
 124         * the first 4 bytes
 125         */
 126        out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
 127        if (out_page == NULL) {
 128                ret = -ENOMEM;
 129                goto out;
 130        }
 131        cpage_out = kmap(out_page);
 132        out_offset = LZO_LEN;
 133        tot_out = LZO_LEN;
 134        pages[0] = out_page;
 135        nr_pages = 1;
 136        pg_bytes_left = PAGE_CACHE_SIZE - LZO_LEN;
 137
 138        /* compress at most one page of data each time */
 139        in_len = min(len, PAGE_CACHE_SIZE);
 140        while (tot_in < len) {
 141                ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
 142                                       &out_len, workspace->mem);
 143                if (ret != LZO_E_OK) {
 144                        printk(KERN_DEBUG "btrfs deflate in loop returned %d\n",
 145                               ret);
 146                        ret = -1;
 147                        goto out;
 148                }
 149
 150                /* store the size of this chunk of compressed data */
 151                write_compress_length(cpage_out + out_offset, out_len);
 152                tot_out += LZO_LEN;
 153                out_offset += LZO_LEN;
 154                pg_bytes_left -= LZO_LEN;
 155
 156                tot_in += in_len;
 157                tot_out += out_len;
 158
 159                /* copy bytes from the working buffer into the pages */
 160                buf = workspace->cbuf;
 161                while (out_len) {
 162                        bytes = min_t(unsigned long, pg_bytes_left, out_len);
 163
 164                        memcpy(cpage_out + out_offset, buf, bytes);
 165
 166                        out_len -= bytes;
 167                        pg_bytes_left -= bytes;
 168                        buf += bytes;
 169                        out_offset += bytes;
 170
 171                        /*
 172                         * we need another page for writing out.
 173                         *
 174                         * Note if there's less than 4 bytes left, we just
 175                         * skip to a new page.
 176                         */
 177                        if ((out_len == 0 && pg_bytes_left < LZO_LEN) ||
 178                            pg_bytes_left == 0) {
 179                                if (pg_bytes_left) {
 180                                        memset(cpage_out + out_offset, 0,
 181                                               pg_bytes_left);
 182                                        tot_out += pg_bytes_left;
 183                                }
 184
 185                                /* we're done, don't allocate new page */
 186                                if (out_len == 0 && tot_in >= len)
 187                                        break;
 188
 189                                kunmap(out_page);
 190                                if (nr_pages == nr_dest_pages) {
 191                                        out_page = NULL;
 192                                        ret = -1;
 193                                        goto out;
 194                                }
 195
 196                                out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
 197                                if (out_page == NULL) {
 198                                        ret = -ENOMEM;
 199                                        goto out;
 200                                }
 201                                cpage_out = kmap(out_page);
 202                                pages[nr_pages++] = out_page;
 203
 204                                pg_bytes_left = PAGE_CACHE_SIZE;
 205                                out_offset = 0;
 206                        }
 207                }
 208
 209                /* we're making it bigger, give up */
 210                if (tot_in > 8192 && tot_in < tot_out)
 211                        goto out;
 212
 213                /* we're all done */
 214                if (tot_in >= len)
 215                        break;
 216
 217                if (tot_out > max_out)
 218                        break;
 219
 220                bytes_left = len - tot_in;
 221                kunmap(in_page);
 222                page_cache_release(in_page);
 223
 224                start += PAGE_CACHE_SIZE;
 225                in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
 226                data_in = kmap(in_page);
 227                in_len = min(bytes_left, PAGE_CACHE_SIZE);
 228        }
 229
 230        if (tot_out > tot_in)
 231                goto out;
 232
 233        /* store the size of all chunks of compressed data */
 234        cpage_out = kmap(pages[0]);
 235        write_compress_length(cpage_out, tot_out);
 236
 237        kunmap(pages[0]);
 238
 239        ret = 0;
 240        *total_out = tot_out;
 241        *total_in = tot_in;
 242out:
 243        *out_pages = nr_pages;
 244        if (out_page)
 245                kunmap(out_page);
 246
 247        if (in_page) {
 248                kunmap(in_page);
 249                page_cache_release(in_page);
 250        }
 251
 252        return ret;
 253}
 254
 255static int lzo_decompress_biovec(struct list_head *ws,
 256                                 struct page **pages_in,
 257                                 u64 disk_start,
 258                                 struct bio_vec *bvec,
 259                                 int vcnt,
 260                                 size_t srclen)
 261{
 262        struct workspace *workspace = list_entry(ws, struct workspace, list);
 263        int ret = 0, ret2;
 264        char *data_in;
 265        unsigned long page_in_index = 0;
 266        unsigned long page_out_index = 0;
 267        unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) /
 268                                        PAGE_CACHE_SIZE;
 269        unsigned long buf_start;
 270        unsigned long buf_offset = 0;
 271        unsigned long bytes;
 272        unsigned long working_bytes;
 273        unsigned long pg_offset;
 274
 275        size_t in_len;
 276        size_t out_len;
 277        unsigned long in_offset;
 278        unsigned long in_page_bytes_left;
 279        unsigned long tot_in;
 280        unsigned long tot_out;
 281        unsigned long tot_len;
 282        char *buf;
 283        bool may_late_unmap, need_unmap;
 284
 285        data_in = kmap(pages_in[0]);
 286        tot_len = read_compress_length(data_in);
 287
 288        tot_in = LZO_LEN;
 289        in_offset = LZO_LEN;
 290        tot_len = min_t(size_t, srclen, tot_len);
 291        in_page_bytes_left = PAGE_CACHE_SIZE - LZO_LEN;
 292
 293        tot_out = 0;
 294        pg_offset = 0;
 295
 296        while (tot_in < tot_len) {
 297                in_len = read_compress_length(data_in + in_offset);
 298                in_page_bytes_left -= LZO_LEN;
 299                in_offset += LZO_LEN;
 300                tot_in += LZO_LEN;
 301
 302                tot_in += in_len;
 303                working_bytes = in_len;
 304                may_late_unmap = need_unmap = false;
 305
 306                /* fast path: avoid using the working buffer */
 307                if (in_page_bytes_left >= in_len) {
 308                        buf = data_in + in_offset;
 309                        bytes = in_len;
 310                        may_late_unmap = true;
 311                        goto cont;
 312                }
 313
 314                /* copy bytes from the pages into the working buffer */
 315                buf = workspace->cbuf;
 316                buf_offset = 0;
 317                while (working_bytes) {
 318                        bytes = min(working_bytes, in_page_bytes_left);
 319
 320                        memcpy(buf + buf_offset, data_in + in_offset, bytes);
 321                        buf_offset += bytes;
 322cont:
 323                        working_bytes -= bytes;
 324                        in_page_bytes_left -= bytes;
 325                        in_offset += bytes;
 326
 327                        /* check if we need to pick another page */
 328                        if ((working_bytes == 0 && in_page_bytes_left < LZO_LEN)
 329                            || in_page_bytes_left == 0) {
 330                                tot_in += in_page_bytes_left;
 331
 332                                if (working_bytes == 0 && tot_in >= tot_len)
 333                                        break;
 334
 335                                if (page_in_index + 1 >= total_pages_in) {
 336                                        ret = -1;
 337                                        goto done;
 338                                }
 339
 340                                if (may_late_unmap)
 341                                        need_unmap = true;
 342                                else
 343                                        kunmap(pages_in[page_in_index]);
 344
 345                                data_in = kmap(pages_in[++page_in_index]);
 346
 347                                in_page_bytes_left = PAGE_CACHE_SIZE;
 348                                in_offset = 0;
 349                        }
 350                }
 351
 352                out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE);
 353                ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
 354                                            &out_len);
 355                if (need_unmap)
 356                        kunmap(pages_in[page_in_index - 1]);
 357                if (ret != LZO_E_OK) {
 358                        printk(KERN_WARNING "btrfs decompress failed\n");
 359                        ret = -1;
 360                        break;
 361                }
 362
 363                buf_start = tot_out;
 364                tot_out += out_len;
 365
 366                ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
 367                                                 tot_out, disk_start,
 368                                                 bvec, vcnt,
 369                                                 &page_out_index, &pg_offset);
 370                if (ret2 == 0)
 371                        break;
 372        }
 373done:
 374        kunmap(pages_in[page_in_index]);
 375        return ret;
 376}
 377
 378static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
 379                          struct page *dest_page,
 380                          unsigned long start_byte,
 381                          size_t srclen, size_t destlen)
 382{
 383        struct workspace *workspace = list_entry(ws, struct workspace, list);
 384        size_t in_len;
 385        size_t out_len;
 386        size_t tot_len;
 387        int ret = 0;
 388        char *kaddr;
 389        unsigned long bytes;
 390
 391        BUG_ON(srclen < LZO_LEN);
 392
 393        tot_len = read_compress_length(data_in);
 394        data_in += LZO_LEN;
 395
 396        in_len = read_compress_length(data_in);
 397        data_in += LZO_LEN;
 398
 399        out_len = PAGE_CACHE_SIZE;
 400        ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
 401        if (ret != LZO_E_OK) {
 402                printk(KERN_WARNING "btrfs decompress failed!\n");
 403                ret = -1;
 404                goto out;
 405        }
 406
 407        if (out_len < start_byte) {
 408                ret = -1;
 409                goto out;
 410        }
 411
 412        bytes = min_t(unsigned long, destlen, out_len - start_byte);
 413
 414        kaddr = kmap_atomic(dest_page, KM_USER0);
 415        memcpy(kaddr, workspace->buf + start_byte, bytes);
 416        kunmap_atomic(kaddr, KM_USER0);
 417out:
 418        return ret;
 419}
 420
 421struct btrfs_compress_op btrfs_lzo_compress = {
 422        .alloc_workspace        = lzo_alloc_workspace,
 423        .free_workspace         = lzo_free_workspace,
 424        .compress_pages         = lzo_compress_pages,
 425        .decompress_biovec      = lzo_decompress_biovec,
 426        .decompress             = lzo_decompress,
 427};
 428