linux/kernel/power/swap.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/power/swap.c
   3 *
   4 * This file provides functions for reading the suspend image from
   5 * and writing it to a swap partition.
   6 *
   7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
   8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
   9 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
  10 *
  11 * This file is released under the GPLv2.
  12 *
  13 */
  14
  15#include <linux/module.h>
  16#include <linux/file.h>
  17#include <linux/delay.h>
  18#include <linux/bitops.h>
  19#include <linux/genhd.h>
  20#include <linux/device.h>
  21#include <linux/bio.h>
  22#include <linux/blkdev.h>
  23#include <linux/swap.h>
  24#include <linux/swapops.h>
  25#include <linux/pm.h>
  26#include <linux/slab.h>
  27#include <linux/lzo.h>
  28#include <linux/vmalloc.h>
  29#include <linux/cpumask.h>
  30#include <linux/atomic.h>
  31#include <linux/kthread.h>
  32#include <linux/crc32.h>
  33#include <linux/ktime.h>
  34
  35#include "power.h"
  36
  37#define HIBERNATE_SIG   "S1SUSPEND"
  38
  39/*
  40 * When reading an {un,}compressed image, we may restore pages in place,
  41 * in which case some architectures need these pages cleaning before they
  42 * can be executed. We don't know which pages these may be, so clean the lot.
  43 */
  44static bool clean_pages_on_read;
  45static bool clean_pages_on_decompress;
  46
  47/*
  48 *      The swap map is a data structure used for keeping track of each page
  49 *      written to a swap partition.  It consists of many swap_map_page
  50 *      structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
  51 *      These structures are stored on the swap and linked together with the
  52 *      help of the .next_swap member.
  53 *
  54 *      The swap map is created during suspend.  The swap map pages are
  55 *      allocated and populated one at a time, so we only need one memory
  56 *      page to set up the entire structure.
  57 *
  58 *      During resume we pick up all swap_map_page structures into a list.
  59 */
  60
  61#define MAP_PAGE_ENTRIES        (PAGE_SIZE / sizeof(sector_t) - 1)
  62
  63/*
  64 * Number of free pages that are not high.
  65 */
  66static inline unsigned long low_free_pages(void)
  67{
  68        return nr_free_pages() - nr_free_highpages();
  69}
  70
  71/*
  72 * Number of pages required to be kept free while writing the image. Always
  73 * half of all available low pages before the writing starts.
  74 */
  75static inline unsigned long reqd_free_pages(void)
  76{
  77        return low_free_pages() / 2;
  78}
  79
  80struct swap_map_page {
  81        sector_t entries[MAP_PAGE_ENTRIES];
  82        sector_t next_swap;
  83};
  84
  85struct swap_map_page_list {
  86        struct swap_map_page *map;
  87        struct swap_map_page_list *next;
  88};
  89
  90/**
  91 *      The swap_map_handle structure is used for handling swap in
  92 *      a file-alike way
  93 */
  94
  95struct swap_map_handle {
  96        struct swap_map_page *cur;
  97        struct swap_map_page_list *maps;
  98        sector_t cur_swap;
  99        sector_t first_sector;
 100        unsigned int k;
 101        unsigned long reqd_free_pages;
 102        u32 crc32;
 103};
 104
 105struct swsusp_header {
 106        char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
 107                      sizeof(u32)];
 108        u32     crc32;
 109        sector_t image;
 110        unsigned int flags;     /* Flags to pass to the "boot" kernel */
 111        char    orig_sig[10];
 112        char    sig[10];
 113} __packed;
 114
 115static struct swsusp_header *swsusp_header;
 116
 117/**
 118 *      The following functions are used for tracing the allocated
 119 *      swap pages, so that they can be freed in case of an error.
 120 */
 121
 122struct swsusp_extent {
 123        struct rb_node node;
 124        unsigned long start;
 125        unsigned long end;
 126};
 127
 128static struct rb_root swsusp_extents = RB_ROOT;
 129
 130static int swsusp_extents_insert(unsigned long swap_offset)
 131{
 132        struct rb_node **new = &(swsusp_extents.rb_node);
 133        struct rb_node *parent = NULL;
 134        struct swsusp_extent *ext;
 135
 136        /* Figure out where to put the new node */
 137        while (*new) {
 138                ext = rb_entry(*new, struct swsusp_extent, node);
 139                parent = *new;
 140                if (swap_offset < ext->start) {
 141                        /* Try to merge */
 142                        if (swap_offset == ext->start - 1) {
 143                                ext->start--;
 144                                return 0;
 145                        }
 146                        new = &((*new)->rb_left);
 147                } else if (swap_offset > ext->end) {
 148                        /* Try to merge */
 149                        if (swap_offset == ext->end + 1) {
 150                                ext->end++;
 151                                return 0;
 152                        }
 153                        new = &((*new)->rb_right);
 154                } else {
 155                        /* It already is in the tree */
 156                        return -EINVAL;
 157                }
 158        }
 159        /* Add the new node and rebalance the tree. */
 160        ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
 161        if (!ext)
 162                return -ENOMEM;
 163
 164        ext->start = swap_offset;
 165        ext->end = swap_offset;
 166        rb_link_node(&ext->node, parent, new);
 167        rb_insert_color(&ext->node, &swsusp_extents);
 168        return 0;
 169}
 170
 171/**
 172 *      alloc_swapdev_block - allocate a swap page and register that it has
 173 *      been allocated, so that it can be freed in case of an error.
 174 */
 175
 176sector_t alloc_swapdev_block(int swap)
 177{
 178        unsigned long offset;
 179
 180        offset = swp_offset(get_swap_page_of_type(swap));
 181        if (offset) {
 182                if (swsusp_extents_insert(offset))
 183                        swap_free(swp_entry(swap, offset));
 184                else
 185                        return swapdev_block(swap, offset);
 186        }
 187        return 0;
 188}
 189
 190/**
 191 *      free_all_swap_pages - free swap pages allocated for saving image data.
 192 *      It also frees the extents used to register which swap entries had been
 193 *      allocated.
 194 */
 195
 196void free_all_swap_pages(int swap)
 197{
 198        struct rb_node *node;
 199
 200        while ((node = swsusp_extents.rb_node)) {
 201                struct swsusp_extent *ext;
 202                unsigned long offset;
 203
 204                ext = rb_entry(node, struct swsusp_extent, node);
 205                rb_erase(node, &swsusp_extents);
 206                for (offset = ext->start; offset <= ext->end; offset++)
 207                        swap_free(swp_entry(swap, offset));
 208
 209                kfree(ext);
 210        }
 211}
 212
 213int swsusp_swap_in_use(void)
 214{
 215        return (swsusp_extents.rb_node != NULL);
 216}
 217
 218/*
 219 * General things
 220 */
 221
 222static unsigned short root_swap = 0xffff;
 223static struct block_device *hib_resume_bdev;
 224
 225struct hib_bio_batch {
 226        atomic_t                count;
 227        wait_queue_head_t       wait;
 228        blk_status_t            error;
 229};
 230
 231static void hib_init_batch(struct hib_bio_batch *hb)
 232{
 233        atomic_set(&hb->count, 0);
 234        init_waitqueue_head(&hb->wait);
 235        hb->error = BLK_STS_OK;
 236}
 237
 238static void hib_end_io(struct bio *bio)
 239{
 240        struct hib_bio_batch *hb = bio->bi_private;
 241        struct page *page = bio->bi_io_vec[0].bv_page;
 242
 243        if (bio->bi_status) {
 244                printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
 245                                MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
 246                                (unsigned long long)bio->bi_iter.bi_sector);
 247        }
 248
 249        if (bio_data_dir(bio) == WRITE)
 250                put_page(page);
 251        else if (clean_pages_on_read)
 252                flush_icache_range((unsigned long)page_address(page),
 253                                   (unsigned long)page_address(page) + PAGE_SIZE);
 254
 255        if (bio->bi_status && !hb->error)
 256                hb->error = bio->bi_status;
 257        if (atomic_dec_and_test(&hb->count))
 258                wake_up(&hb->wait);
 259
 260        bio_put(bio);
 261}
 262
 263static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
 264                struct hib_bio_batch *hb)
 265{
 266        struct page *page = virt_to_page(addr);
 267        struct bio *bio;
 268        int error = 0;
 269
 270        bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
 271        bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
 272        bio_set_dev(bio, hib_resume_bdev);
 273        bio_set_op_attrs(bio, op, op_flags);
 274
 275        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 276                printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
 277                        (unsigned long long)bio->bi_iter.bi_sector);
 278                bio_put(bio);
 279                return -EFAULT;
 280        }
 281
 282        if (hb) {
 283                bio->bi_end_io = hib_end_io;
 284                bio->bi_private = hb;
 285                atomic_inc(&hb->count);
 286                submit_bio(bio);
 287        } else {
 288                error = submit_bio_wait(bio);
 289                bio_put(bio);
 290        }
 291
 292        return error;
 293}
 294
 295static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
 296{
 297        wait_event(hb->wait, atomic_read(&hb->count) == 0);
 298        return blk_status_to_errno(hb->error);
 299}
 300
 301/*
 302 * Saving part
 303 */
 304
 305static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
 306{
 307        int error;
 308
 309        hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
 310                      swsusp_header, NULL);
 311        if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
 312            !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
 313                memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
 314                memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
 315                swsusp_header->image = handle->first_sector;
 316                swsusp_header->flags = flags;
 317                if (flags & SF_CRC32_MODE)
 318                        swsusp_header->crc32 = handle->crc32;
 319                error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
 320                                      swsusp_resume_block, swsusp_header, NULL);
 321        } else {
 322                printk(KERN_ERR "PM: Swap header not found!\n");
 323                error = -ENODEV;
 324        }
 325        return error;
 326}
 327
 328/**
 329 *      swsusp_swap_check - check if the resume device is a swap device
 330 *      and get its index (if so)
 331 *
 332 *      This is called before saving image
 333 */
 334static int swsusp_swap_check(void)
 335{
 336        int res;
 337
 338        res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
 339                        &hib_resume_bdev);
 340        if (res < 0)
 341                return res;
 342
 343        root_swap = res;
 344        res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
 345        if (res)
 346                return res;
 347
 348        res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
 349        if (res < 0)
 350                blkdev_put(hib_resume_bdev, FMODE_WRITE);
 351
 352        /*
 353         * Update the resume device to the one actually used,
 354         * so the test_resume mode can use it in case it is
 355         * invoked from hibernate() to test the snapshot.
 356         */
 357        swsusp_resume_device = hib_resume_bdev->bd_dev;
 358        return res;
 359}
 360
 361/**
 362 *      write_page - Write one page to given swap location.
 363 *      @buf:           Address we're writing.
 364 *      @offset:        Offset of the swap page we're writing to.
 365 *      @hb:            bio completion batch
 366 */
 367
 368static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
 369{
 370        void *src;
 371        int ret;
 372
 373        if (!offset)
 374                return -ENOSPC;
 375
 376        if (hb) {
 377                src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN |
 378                                              __GFP_NORETRY);
 379                if (src) {
 380                        copy_page(src, buf);
 381                } else {
 382                        ret = hib_wait_io(hb); /* Free pages */
 383                        if (ret)
 384                                return ret;
 385                        src = (void *)__get_free_page(__GFP_RECLAIM |
 386                                                      __GFP_NOWARN |
 387                                                      __GFP_NORETRY);
 388                        if (src) {
 389                                copy_page(src, buf);
 390                        } else {
 391                                WARN_ON_ONCE(1);
 392                                hb = NULL;      /* Go synchronous */
 393                                src = buf;
 394                        }
 395                }
 396        } else {
 397                src = buf;
 398        }
 399        return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
 400}
 401
 402static void release_swap_writer(struct swap_map_handle *handle)
 403{
 404        if (handle->cur)
 405                free_page((unsigned long)handle->cur);
 406        handle->cur = NULL;
 407}
 408
 409static int get_swap_writer(struct swap_map_handle *handle)
 410{
 411        int ret;
 412
 413        ret = swsusp_swap_check();
 414        if (ret) {
 415                if (ret != -ENOSPC)
 416                        printk(KERN_ERR "PM: Cannot find swap device, try "
 417                                        "swapon -a.\n");
 418                return ret;
 419        }
 420        handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
 421        if (!handle->cur) {
 422                ret = -ENOMEM;
 423                goto err_close;
 424        }
 425        handle->cur_swap = alloc_swapdev_block(root_swap);
 426        if (!handle->cur_swap) {
 427                ret = -ENOSPC;
 428                goto err_rel;
 429        }
 430        handle->k = 0;
 431        handle->reqd_free_pages = reqd_free_pages();
 432        handle->first_sector = handle->cur_swap;
 433        return 0;
 434err_rel:
 435        release_swap_writer(handle);
 436err_close:
 437        swsusp_close(FMODE_WRITE);
 438        return ret;
 439}
 440
 441static int swap_write_page(struct swap_map_handle *handle, void *buf,
 442                struct hib_bio_batch *hb)
 443{
 444        int error = 0;
 445        sector_t offset;
 446
 447        if (!handle->cur)
 448                return -EINVAL;
 449        offset = alloc_swapdev_block(root_swap);
 450        error = write_page(buf, offset, hb);
 451        if (error)
 452                return error;
 453        handle->cur->entries[handle->k++] = offset;
 454        if (handle->k >= MAP_PAGE_ENTRIES) {
 455                offset = alloc_swapdev_block(root_swap);
 456                if (!offset)
 457                        return -ENOSPC;
 458                handle->cur->next_swap = offset;
 459                error = write_page(handle->cur, handle->cur_swap, hb);
 460                if (error)
 461                        goto out;
 462                clear_page(handle->cur);
 463                handle->cur_swap = offset;
 464                handle->k = 0;
 465
 466                if (hb && low_free_pages() <= handle->reqd_free_pages) {
 467                        error = hib_wait_io(hb);
 468                        if (error)
 469                                goto out;
 470                        /*
 471                         * Recalculate the number of required free pages, to
 472                         * make sure we never take more than half.
 473                         */
 474                        handle->reqd_free_pages = reqd_free_pages();
 475                }
 476        }
 477 out:
 478        return error;
 479}
 480
 481static int flush_swap_writer(struct swap_map_handle *handle)
 482{
 483        if (handle->cur && handle->cur_swap)
 484                return write_page(handle->cur, handle->cur_swap, NULL);
 485        else
 486                return -EINVAL;
 487}
 488
 489static int swap_writer_finish(struct swap_map_handle *handle,
 490                unsigned int flags, int error)
 491{
 492        if (!error) {
 493                flush_swap_writer(handle);
 494                printk(KERN_INFO "PM: S");
 495                error = mark_swapfiles(handle, flags);
 496                printk("|\n");
 497        }
 498
 499        if (error)
 500                free_all_swap_pages(root_swap);
 501        release_swap_writer(handle);
 502        swsusp_close(FMODE_WRITE);
 503
 504        return error;
 505}
 506
 507/* We need to remember how much compressed data we need to read. */
 508#define LZO_HEADER      sizeof(size_t)
 509
 510/* Number of pages/bytes we'll compress at one time. */
 511#define LZO_UNC_PAGES   32
 512#define LZO_UNC_SIZE    (LZO_UNC_PAGES * PAGE_SIZE)
 513
 514/* Number of pages/bytes we need for compressed data (worst case). */
 515#define LZO_CMP_PAGES   DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
 516                                     LZO_HEADER, PAGE_SIZE)
 517#define LZO_CMP_SIZE    (LZO_CMP_PAGES * PAGE_SIZE)
 518
 519/* Maximum number of threads for compression/decompression. */
 520#define LZO_THREADS     3
 521
 522/* Minimum/maximum number of pages for read buffering. */
 523#define LZO_MIN_RD_PAGES        1024
 524#define LZO_MAX_RD_PAGES        8192
 525
 526
 527/**
 528 *      save_image - save the suspend image data
 529 */
 530
 531static int save_image(struct swap_map_handle *handle,
 532                      struct snapshot_handle *snapshot,
 533                      unsigned int nr_to_write)
 534{
 535        unsigned int m;
 536        int ret;
 537        int nr_pages;
 538        int err2;
 539        struct hib_bio_batch hb;
 540        ktime_t start;
 541        ktime_t stop;
 542
 543        hib_init_batch(&hb);
 544
 545        printk(KERN_INFO "PM: Saving image data pages (%u pages)...\n",
 546                nr_to_write);
 547        m = nr_to_write / 10;
 548        if (!m)
 549                m = 1;
 550        nr_pages = 0;
 551        start = ktime_get();
 552        while (1) {
 553                ret = snapshot_read_next(snapshot);
 554                if (ret <= 0)
 555                        break;
 556                ret = swap_write_page(handle, data_of(*snapshot), &hb);
 557                if (ret)
 558                        break;
 559                if (!(nr_pages % m))
 560                        printk(KERN_INFO "PM: Image saving progress: %3d%%\n",
 561                               nr_pages / m * 10);
 562                nr_pages++;
 563        }
 564        err2 = hib_wait_io(&hb);
 565        stop = ktime_get();
 566        if (!ret)
 567                ret = err2;
 568        if (!ret)
 569                printk(KERN_INFO "PM: Image saving done.\n");
 570        swsusp_show_speed(start, stop, nr_to_write, "Wrote");
 571        return ret;
 572}
 573
 574/**
 575 * Structure used for CRC32.
 576 */
 577struct crc_data {
 578        struct task_struct *thr;                  /* thread */
 579        atomic_t ready;                           /* ready to start flag */
 580        atomic_t stop;                            /* ready to stop flag */
 581        unsigned run_threads;                     /* nr current threads */
 582        wait_queue_head_t go;                     /* start crc update */
 583        wait_queue_head_t done;                   /* crc update done */
 584        u32 *crc32;                               /* points to handle's crc32 */
 585        size_t *unc_len[LZO_THREADS];             /* uncompressed lengths */
 586        unsigned char *unc[LZO_THREADS];          /* uncompressed data */
 587};
 588
 589/**
 590 * CRC32 update function that runs in its own thread.
 591 */
 592static int crc32_threadfn(void *data)
 593{
 594        struct crc_data *d = data;
 595        unsigned i;
 596
 597        while (1) {
 598                wait_event(d->go, atomic_read(&d->ready) ||
 599                                  kthread_should_stop());
 600                if (kthread_should_stop()) {
 601                        d->thr = NULL;
 602                        atomic_set(&d->stop, 1);
 603                        wake_up(&d->done);
 604                        break;
 605                }
 606                atomic_set(&d->ready, 0);
 607
 608                for (i = 0; i < d->run_threads; i++)
 609                        *d->crc32 = crc32_le(*d->crc32,
 610                                             d->unc[i], *d->unc_len[i]);
 611                atomic_set(&d->stop, 1);
 612                wake_up(&d->done);
 613        }
 614        return 0;
 615}
 616/**
 617 * Structure used for LZO data compression.
 618 */
 619struct cmp_data {
 620        struct task_struct *thr;                  /* thread */
 621        atomic_t ready;                           /* ready to start flag */
 622        atomic_t stop;                            /* ready to stop flag */
 623        int ret;                                  /* return code */
 624        wait_queue_head_t go;                     /* start compression */
 625        wait_queue_head_t done;                   /* compression done */
 626        size_t unc_len;                           /* uncompressed length */
 627        size_t cmp_len;                           /* compressed length */
 628        unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
 629        unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
 630        unsigned char wrk[LZO1X_1_MEM_COMPRESS];  /* compression workspace */
 631};
 632
 633/**
 634 * Compression function that runs in its own thread.
 635 */
 636static int lzo_compress_threadfn(void *data)
 637{
 638        struct cmp_data *d = data;
 639
 640        while (1) {
 641                wait_event(d->go, atomic_read(&d->ready) ||
 642                                  kthread_should_stop());
 643                if (kthread_should_stop()) {
 644                        d->thr = NULL;
 645                        d->ret = -1;
 646                        atomic_set(&d->stop, 1);
 647                        wake_up(&d->done);
 648                        break;
 649                }
 650                atomic_set(&d->ready, 0);
 651
 652                d->ret = lzo1x_1_compress(d->unc, d->unc_len,
 653                                          d->cmp + LZO_HEADER, &d->cmp_len,
 654                                          d->wrk);
 655                atomic_set(&d->stop, 1);
 656                wake_up(&d->done);
 657        }
 658        return 0;
 659}
 660
 661/**
 662 * save_image_lzo - Save the suspend image data compressed with LZO.
 663 * @handle: Swap map handle to use for saving the image.
 664 * @snapshot: Image to read data from.
 665 * @nr_to_write: Number of pages to save.
 666 */
 667static int save_image_lzo(struct swap_map_handle *handle,
 668                          struct snapshot_handle *snapshot,
 669                          unsigned int nr_to_write)
 670{
 671        unsigned int m;
 672        int ret = 0;
 673        int nr_pages;
 674        int err2;
 675        struct hib_bio_batch hb;
 676        ktime_t start;
 677        ktime_t stop;
 678        size_t off;
 679        unsigned thr, run_threads, nr_threads;
 680        unsigned char *page = NULL;
 681        struct cmp_data *data = NULL;
 682        struct crc_data *crc = NULL;
 683
 684        hib_init_batch(&hb);
 685
 686        /*
 687         * We'll limit the number of threads for compression to limit memory
 688         * footprint.
 689         */
 690        nr_threads = num_online_cpus() - 1;
 691        nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
 692
 693        page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH);
 694        if (!page) {
 695                printk(KERN_ERR "PM: Failed to allocate LZO page\n");
 696                ret = -ENOMEM;
 697                goto out_clean;
 698        }
 699
 700        data = vmalloc(sizeof(*data) * nr_threads);
 701        if (!data) {
 702                printk(KERN_ERR "PM: Failed to allocate LZO data\n");
 703                ret = -ENOMEM;
 704                goto out_clean;
 705        }
 706        for (thr = 0; thr < nr_threads; thr++)
 707                memset(&data[thr], 0, offsetof(struct cmp_data, go));
 708
 709        crc = kmalloc(sizeof(*crc), GFP_KERNEL);
 710        if (!crc) {
 711                printk(KERN_ERR "PM: Failed to allocate crc\n");
 712                ret = -ENOMEM;
 713                goto out_clean;
 714        }
 715        memset(crc, 0, offsetof(struct crc_data, go));
 716
 717        /*
 718         * Start the compression threads.
 719         */
 720        for (thr = 0; thr < nr_threads; thr++) {
 721                init_waitqueue_head(&data[thr].go);
 722                init_waitqueue_head(&data[thr].done);
 723
 724                data[thr].thr = kthread_run(lzo_compress_threadfn,
 725                                            &data[thr],
 726                                            "image_compress/%u", thr);
 727                if (IS_ERR(data[thr].thr)) {
 728                        data[thr].thr = NULL;
 729                        printk(KERN_ERR
 730                               "PM: Cannot start compression threads\n");
 731                        ret = -ENOMEM;
 732                        goto out_clean;
 733                }
 734        }
 735
 736        /*
 737         * Start the CRC32 thread.
 738         */
 739        init_waitqueue_head(&crc->go);
 740        init_waitqueue_head(&crc->done);
 741
 742        handle->crc32 = 0;
 743        crc->crc32 = &handle->crc32;
 744        for (thr = 0; thr < nr_threads; thr++) {
 745                crc->unc[thr] = data[thr].unc;
 746                crc->unc_len[thr] = &data[thr].unc_len;
 747        }
 748
 749        crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
 750        if (IS_ERR(crc->thr)) {
 751                crc->thr = NULL;
 752                printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
 753                ret = -ENOMEM;
 754                goto out_clean;
 755        }
 756
 757        /*
 758         * Adjust the number of required free pages after all allocations have
 759         * been done. We don't want to run out of pages when writing.
 760         */
 761        handle->reqd_free_pages = reqd_free_pages();
 762
 763        printk(KERN_INFO
 764                "PM: Using %u thread(s) for compression.\n"
 765                "PM: Compressing and saving image data (%u pages)...\n",
 766                nr_threads, nr_to_write);
 767        m = nr_to_write / 10;
 768        if (!m)
 769                m = 1;
 770        nr_pages = 0;
 771        start = ktime_get();
 772        for (;;) {
 773                for (thr = 0; thr < nr_threads; thr++) {
 774                        for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
 775                                ret = snapshot_read_next(snapshot);
 776                                if (ret < 0)
 777                                        goto out_finish;
 778
 779                                if (!ret)
 780                                        break;
 781
 782                                memcpy(data[thr].unc + off,
 783                                       data_of(*snapshot), PAGE_SIZE);
 784
 785                                if (!(nr_pages % m))
 786                                        printk(KERN_INFO
 787                                               "PM: Image saving progress: "
 788                                               "%3d%%\n",
 789                                               nr_pages / m * 10);
 790                                nr_pages++;
 791                        }
 792                        if (!off)
 793                                break;
 794
 795                        data[thr].unc_len = off;
 796
 797                        atomic_set(&data[thr].ready, 1);
 798                        wake_up(&data[thr].go);
 799                }
 800
 801                if (!thr)
 802                        break;
 803
 804                crc->run_threads = thr;
 805                atomic_set(&crc->ready, 1);
 806                wake_up(&crc->go);
 807
 808                for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
 809                        wait_event(data[thr].done,
 810                                   atomic_read(&data[thr].stop));
 811                        atomic_set(&data[thr].stop, 0);
 812
 813                        ret = data[thr].ret;
 814
 815                        if (ret < 0) {
 816                                printk(KERN_ERR "PM: LZO compression failed\n");
 817                                goto out_finish;
 818                        }
 819
 820                        if (unlikely(!data[thr].cmp_len ||
 821                                     data[thr].cmp_len >
 822                                     lzo1x_worst_compress(data[thr].unc_len))) {
 823                                printk(KERN_ERR
 824                                       "PM: Invalid LZO compressed length\n");
 825                                ret = -1;
 826                                goto out_finish;
 827                        }
 828
 829                        *(size_t *)data[thr].cmp = data[thr].cmp_len;
 830
 831                        /*
 832                         * Given we are writing one page at a time to disk, we
 833                         * copy that much from the buffer, although the last
 834                         * bit will likely be smaller than full page. This is
 835                         * OK - we saved the length of the compressed data, so
 836                         * any garbage at the end will be discarded when we
 837                         * read it.
 838                         */
 839                        for (off = 0;
 840                             off < LZO_HEADER + data[thr].cmp_len;
 841                             off += PAGE_SIZE) {
 842                                memcpy(page, data[thr].cmp + off, PAGE_SIZE);
 843
 844                                ret = swap_write_page(handle, page, &hb);
 845                                if (ret)
 846                                        goto out_finish;
 847                        }
 848                }
 849
 850                wait_event(crc->done, atomic_read(&crc->stop));
 851                atomic_set(&crc->stop, 0);
 852        }
 853
 854out_finish:
 855        err2 = hib_wait_io(&hb);
 856        stop = ktime_get();
 857        if (!ret)
 858                ret = err2;
 859        if (!ret)
 860                printk(KERN_INFO "PM: Image saving done.\n");
 861        swsusp_show_speed(start, stop, nr_to_write, "Wrote");
 862out_clean:
 863        if (crc) {
 864                if (crc->thr)
 865                        kthread_stop(crc->thr);
 866                kfree(crc);
 867        }
 868        if (data) {
 869                for (thr = 0; thr < nr_threads; thr++)
 870                        if (data[thr].thr)
 871                                kthread_stop(data[thr].thr);
 872                vfree(data);
 873        }
 874        if (page) free_page((unsigned long)page);
 875
 876        return ret;
 877}
 878
 879/**
 880 *      enough_swap - Make sure we have enough swap to save the image.
 881 *
 882 *      Returns TRUE or FALSE after checking the total amount of swap
 883 *      space avaiable from the resume partition.
 884 */
 885
 886static int enough_swap(unsigned int nr_pages, unsigned int flags)
 887{
 888        unsigned int free_swap = count_swap_pages(root_swap, 1);
 889        unsigned int required;
 890
 891        pr_debug("PM: Free swap pages: %u\n", free_swap);
 892
 893        required = PAGES_FOR_IO + nr_pages;
 894        return free_swap > required;
 895}
 896
 897/**
 898 *      swsusp_write - Write entire image and metadata.
 899 *      @flags: flags to pass to the "boot" kernel in the image header
 900 *
 901 *      It is important _NOT_ to umount filesystems at this point. We want
 902 *      them synced (in case something goes wrong) but we DO not want to mark
 903 *      filesystem clean: it is not. (And it does not matter, if we resume
 904 *      correctly, we'll mark system clean, anyway.)
 905 */
 906
 907int swsusp_write(unsigned int flags)
 908{
 909        struct swap_map_handle handle;
 910        struct snapshot_handle snapshot;
 911        struct swsusp_info *header;
 912        unsigned long pages;
 913        int error;
 914
 915        pages = snapshot_get_image_size();
 916        error = get_swap_writer(&handle);
 917        if (error) {
 918                printk(KERN_ERR "PM: Cannot get swap writer\n");
 919                return error;
 920        }
 921        if (flags & SF_NOCOMPRESS_MODE) {
 922                if (!enough_swap(pages, flags)) {
 923                        printk(KERN_ERR "PM: Not enough free swap\n");
 924                        error = -ENOSPC;
 925                        goto out_finish;
 926                }
 927        }
 928        memset(&snapshot, 0, sizeof(struct snapshot_handle));
 929        error = snapshot_read_next(&snapshot);
 930        if (error < PAGE_SIZE) {
 931                if (error >= 0)
 932                        error = -EFAULT;
 933
 934                goto out_finish;
 935        }
 936        header = (struct swsusp_info *)data_of(snapshot);
 937        error = swap_write_page(&handle, header, NULL);
 938        if (!error) {
 939                error = (flags & SF_NOCOMPRESS_MODE) ?
 940                        save_image(&handle, &snapshot, pages - 1) :
 941                        save_image_lzo(&handle, &snapshot, pages - 1);
 942        }
 943out_finish:
 944        error = swap_writer_finish(&handle, flags, error);
 945        return error;
 946}
 947
 948/**
 949 *      The following functions allow us to read data using a swap map
 950 *      in a file-alike way
 951 */
 952
 953static void release_swap_reader(struct swap_map_handle *handle)
 954{
 955        struct swap_map_page_list *tmp;
 956
 957        while (handle->maps) {
 958                if (handle->maps->map)
 959                        free_page((unsigned long)handle->maps->map);
 960                tmp = handle->maps;
 961                handle->maps = handle->maps->next;
 962                kfree(tmp);
 963        }
 964        handle->cur = NULL;
 965}
 966
 967static int get_swap_reader(struct swap_map_handle *handle,
 968                unsigned int *flags_p)
 969{
 970        int error;
 971        struct swap_map_page_list *tmp, *last;
 972        sector_t offset;
 973
 974        *flags_p = swsusp_header->flags;
 975
 976        if (!swsusp_header->image) /* how can this happen? */
 977                return -EINVAL;
 978
 979        handle->cur = NULL;
 980        last = handle->maps = NULL;
 981        offset = swsusp_header->image;
 982        while (offset) {
 983                tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
 984                if (!tmp) {
 985                        release_swap_reader(handle);
 986                        return -ENOMEM;
 987                }
 988                memset(tmp, 0, sizeof(*tmp));
 989                if (!handle->maps)
 990                        handle->maps = tmp;
 991                if (last)
 992                        last->next = tmp;
 993                last = tmp;
 994
 995                tmp->map = (struct swap_map_page *)
 996                           __get_free_page(__GFP_RECLAIM | __GFP_HIGH);
 997                if (!tmp->map) {
 998                        release_swap_reader(handle);
 999                        return -ENOMEM;
1000                }
1001
1002                error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
1003                if (error) {
1004                        release_swap_reader(handle);
1005                        return error;
1006                }
1007                offset = tmp->map->next_swap;
1008        }
1009        handle->k = 0;
1010        handle->cur = handle->maps->map;
1011        return 0;
1012}
1013
1014static int swap_read_page(struct swap_map_handle *handle, void *buf,
1015                struct hib_bio_batch *hb)
1016{
1017        sector_t offset;
1018        int error;
1019        struct swap_map_page_list *tmp;
1020
1021        if (!handle->cur)
1022                return -EINVAL;
1023        offset = handle->cur->entries[handle->k];
1024        if (!offset)
1025                return -EFAULT;
1026        error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
1027        if (error)
1028                return error;
1029        if (++handle->k >= MAP_PAGE_ENTRIES) {
1030                handle->k = 0;
1031                free_page((unsigned long)handle->maps->map);
1032                tmp = handle->maps;
1033                handle->maps = handle->maps->next;
1034                kfree(tmp);
1035                if (!handle->maps)
1036                        release_swap_reader(handle);
1037                else
1038                        handle->cur = handle->maps->map;
1039        }
1040        return error;
1041}
1042
1043static int swap_reader_finish(struct swap_map_handle *handle)
1044{
1045        release_swap_reader(handle);
1046
1047        return 0;
1048}
1049
1050/**
1051 *      load_image - load the image using the swap map handle
1052 *      @handle and the snapshot handle @snapshot
1053 *      (assume there are @nr_pages pages to load)
1054 */
1055
1056static int load_image(struct swap_map_handle *handle,
1057                      struct snapshot_handle *snapshot,
1058                      unsigned int nr_to_read)
1059{
1060        unsigned int m;
1061        int ret = 0;
1062        ktime_t start;
1063        ktime_t stop;
1064        struct hib_bio_batch hb;
1065        int err2;
1066        unsigned nr_pages;
1067
1068        hib_init_batch(&hb);
1069
1070        clean_pages_on_read = true;
1071        printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
1072                nr_to_read);
1073        m = nr_to_read / 10;
1074        if (!m)
1075                m = 1;
1076        nr_pages = 0;
1077        start = ktime_get();
1078        for ( ; ; ) {
1079                ret = snapshot_write_next(snapshot);
1080                if (ret <= 0)
1081                        break;
1082                ret = swap_read_page(handle, data_of(*snapshot), &hb);
1083                if (ret)
1084                        break;
1085                if (snapshot->sync_read)
1086                        ret = hib_wait_io(&hb);
1087                if (ret)
1088                        break;
1089                if (!(nr_pages % m))
1090                        printk(KERN_INFO "PM: Image loading progress: %3d%%\n",
1091                               nr_pages / m * 10);
1092                nr_pages++;
1093        }
1094        err2 = hib_wait_io(&hb);
1095        stop = ktime_get();
1096        if (!ret)
1097                ret = err2;
1098        if (!ret) {
1099                printk(KERN_INFO "PM: Image loading done.\n");
1100                snapshot_write_finalize(snapshot);
1101                if (!snapshot_image_loaded(snapshot))
1102                        ret = -ENODATA;
1103        }
1104        swsusp_show_speed(start, stop, nr_to_read, "Read");
1105        return ret;
1106}
1107
1108/**
1109 * Structure used for LZO data decompression.
1110 */
1111struct dec_data {
1112        struct task_struct *thr;                  /* thread */
1113        atomic_t ready;                           /* ready to start flag */
1114        atomic_t stop;                            /* ready to stop flag */
1115        int ret;                                  /* return code */
1116        wait_queue_head_t go;                     /* start decompression */
1117        wait_queue_head_t done;                   /* decompression done */
1118        size_t unc_len;                           /* uncompressed length */
1119        size_t cmp_len;                           /* compressed length */
1120        unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
1121        unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
1122};
1123
1124/**
1125 * Deompression function that runs in its own thread.
1126 */
1127static int lzo_decompress_threadfn(void *data)
1128{
1129        struct dec_data *d = data;
1130
1131        while (1) {
1132                wait_event(d->go, atomic_read(&d->ready) ||
1133                                  kthread_should_stop());
1134                if (kthread_should_stop()) {
1135                        d->thr = NULL;
1136                        d->ret = -1;
1137                        atomic_set(&d->stop, 1);
1138                        wake_up(&d->done);
1139                        break;
1140                }
1141                atomic_set(&d->ready, 0);
1142
1143                d->unc_len = LZO_UNC_SIZE;
1144                d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1145                                               d->unc, &d->unc_len);
1146                if (clean_pages_on_decompress)
1147                        flush_icache_range((unsigned long)d->unc,
1148                                           (unsigned long)d->unc + d->unc_len);
1149
1150                atomic_set(&d->stop, 1);
1151                wake_up(&d->done);
1152        }
1153        return 0;
1154}
1155
1156/**
1157 * load_image_lzo - Load compressed image data and decompress them with LZO.
1158 * @handle: Swap map handle to use for loading data.
1159 * @snapshot: Image to copy uncompressed data into.
1160 * @nr_to_read: Number of pages to load.
1161 */
1162static int load_image_lzo(struct swap_map_handle *handle,
1163                          struct snapshot_handle *snapshot,
1164                          unsigned int nr_to_read)
1165{
1166        unsigned int m;
1167        int ret = 0;
1168        int eof = 0;
1169        struct hib_bio_batch hb;
1170        ktime_t start;
1171        ktime_t stop;
1172        unsigned nr_pages;
1173        size_t off;
1174        unsigned i, thr, run_threads, nr_threads;
1175        unsigned ring = 0, pg = 0, ring_size = 0,
1176                 have = 0, want, need, asked = 0;
1177        unsigned long read_pages = 0;
1178        unsigned char **page = NULL;
1179        struct dec_data *data = NULL;
1180        struct crc_data *crc = NULL;
1181
1182        hib_init_batch(&hb);
1183
1184        /*
1185         * We'll limit the number of threads for decompression to limit memory
1186         * footprint.
1187         */
1188        nr_threads = num_online_cpus() - 1;
1189        nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1190
1191        page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
1192        if (!page) {
1193                printk(KERN_ERR "PM: Failed to allocate LZO page\n");
1194                ret = -ENOMEM;
1195                goto out_clean;
1196        }
1197
1198        data = vmalloc(sizeof(*data) * nr_threads);
1199        if (!data) {
1200                printk(KERN_ERR "PM: Failed to allocate LZO data\n");
1201                ret = -ENOMEM;
1202                goto out_clean;
1203        }
1204        for (thr = 0; thr < nr_threads; thr++)
1205                memset(&data[thr], 0, offsetof(struct dec_data, go));
1206
1207        crc = kmalloc(sizeof(*crc), GFP_KERNEL);
1208        if (!crc) {
1209                printk(KERN_ERR "PM: Failed to allocate crc\n");
1210                ret = -ENOMEM;
1211                goto out_clean;
1212        }
1213        memset(crc, 0, offsetof(struct crc_data, go));
1214
1215        clean_pages_on_decompress = true;
1216
1217        /*
1218         * Start the decompression threads.
1219         */
1220        for (thr = 0; thr < nr_threads; thr++) {
1221                init_waitqueue_head(&data[thr].go);
1222                init_waitqueue_head(&data[thr].done);
1223
1224                data[thr].thr = kthread_run(lzo_decompress_threadfn,
1225                                            &data[thr],
1226                                            "image_decompress/%u", thr);
1227                if (IS_ERR(data[thr].thr)) {
1228                        data[thr].thr = NULL;
1229                        printk(KERN_ERR
1230                               "PM: Cannot start decompression threads\n");
1231                        ret = -ENOMEM;
1232                        goto out_clean;
1233                }
1234        }
1235
1236        /*
1237         * Start the CRC32 thread.
1238         */
1239        init_waitqueue_head(&crc->go);
1240        init_waitqueue_head(&crc->done);
1241
1242        handle->crc32 = 0;
1243        crc->crc32 = &handle->crc32;
1244        for (thr = 0; thr < nr_threads; thr++) {
1245                crc->unc[thr] = data[thr].unc;
1246                crc->unc_len[thr] = &data[thr].unc_len;
1247        }
1248
1249        crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1250        if (IS_ERR(crc->thr)) {
1251                crc->thr = NULL;
1252                printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
1253                ret = -ENOMEM;
1254                goto out_clean;
1255        }
1256
1257        /*
1258         * Set the number of pages for read buffering.
1259         * This is complete guesswork, because we'll only know the real
1260         * picture once prepare_image() is called, which is much later on
1261         * during the image load phase. We'll assume the worst case and
1262         * say that none of the image pages are from high memory.
1263         */
1264        if (low_free_pages() > snapshot_get_image_size())
1265                read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1266        read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1267
1268        for (i = 0; i < read_pages; i++) {
1269                page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1270                                                  __GFP_RECLAIM | __GFP_HIGH :
1271                                                  __GFP_RECLAIM | __GFP_NOWARN |
1272                                                  __GFP_NORETRY);
1273
1274                if (!page[i]) {
1275                        if (i < LZO_CMP_PAGES) {
1276                                ring_size = i;
1277                                printk(KERN_ERR
1278                                       "PM: Failed to allocate LZO pages\n");
1279                                ret = -ENOMEM;
1280                                goto out_clean;
1281                        } else {
1282                                break;
1283                        }
1284                }
1285        }
1286        want = ring_size = i;
1287
1288        printk(KERN_INFO
1289                "PM: Using %u thread(s) for decompression.\n"
1290                "PM: Loading and decompressing image data (%u pages)...\n",
1291                nr_threads, nr_to_read);
1292        m = nr_to_read / 10;
1293        if (!m)
1294                m = 1;
1295        nr_pages = 0;
1296        start = ktime_get();
1297
1298        ret = snapshot_write_next(snapshot);
1299        if (ret <= 0)
1300                goto out_finish;
1301
1302        for(;;) {
1303                for (i = 0; !eof && i < want; i++) {
1304                        ret = swap_read_page(handle, page[ring], &hb);
1305                        if (ret) {
1306                                /*
1307                                 * On real read error, finish. On end of data,
1308                                 * set EOF flag and just exit the read loop.
1309                                 */
1310                                if (handle->cur &&
1311                                    handle->cur->entries[handle->k]) {
1312                                        goto out_finish;
1313                                } else {
1314                                        eof = 1;
1315                                        break;
1316                                }
1317                        }
1318                        if (++ring >= ring_size)
1319                                ring = 0;
1320                }
1321                asked += i;
1322                want -= i;
1323
1324                /*
1325                 * We are out of data, wait for some more.
1326                 */
1327                if (!have) {
1328                        if (!asked)
1329                                break;
1330
1331                        ret = hib_wait_io(&hb);
1332                        if (ret)
1333                                goto out_finish;
1334                        have += asked;
1335                        asked = 0;
1336                        if (eof)
1337                                eof = 2;
1338                }
1339
1340                if (crc->run_threads) {
1341                        wait_event(crc->done, atomic_read(&crc->stop));
1342                        atomic_set(&crc->stop, 0);
1343                        crc->run_threads = 0;
1344                }
1345
1346                for (thr = 0; have && thr < nr_threads; thr++) {
1347                        data[thr].cmp_len = *(size_t *)page[pg];
1348                        if (unlikely(!data[thr].cmp_len ||
1349                                     data[thr].cmp_len >
1350                                     lzo1x_worst_compress(LZO_UNC_SIZE))) {
1351                                printk(KERN_ERR
1352                                       "PM: Invalid LZO compressed length\n");
1353                                ret = -1;
1354                                goto out_finish;
1355                        }
1356
1357                        need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1358                                            PAGE_SIZE);
1359                        if (need > have) {
1360                                if (eof > 1) {
1361                                        ret = -1;
1362                                        goto out_finish;
1363                                }
1364                                break;
1365                        }
1366
1367                        for (off = 0;
1368                             off < LZO_HEADER + data[thr].cmp_len;
1369                             off += PAGE_SIZE) {
1370                                memcpy(data[thr].cmp + off,
1371                                       page[pg], PAGE_SIZE);
1372                                have--;
1373                                want++;
1374                                if (++pg >= ring_size)
1375                                        pg = 0;
1376                        }
1377
1378                        atomic_set(&data[thr].ready, 1);
1379                        wake_up(&data[thr].go);
1380                }
1381
1382                /*
1383                 * Wait for more data while we are decompressing.
1384                 */
1385                if (have < LZO_CMP_PAGES && asked) {
1386                        ret = hib_wait_io(&hb);
1387                        if (ret)
1388                                goto out_finish;
1389                        have += asked;
1390                        asked = 0;
1391                        if (eof)
1392                                eof = 2;
1393                }
1394
1395                for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1396                        wait_event(data[thr].done,
1397                                   atomic_read(&data[thr].stop));
1398                        atomic_set(&data[thr].stop, 0);
1399
1400                        ret = data[thr].ret;
1401
1402                        if (ret < 0) {
1403                                printk(KERN_ERR
1404                                       "PM: LZO decompression failed\n");
1405                                goto out_finish;
1406                        }
1407
1408                        if (unlikely(!data[thr].unc_len ||
1409                                     data[thr].unc_len > LZO_UNC_SIZE ||
1410                                     data[thr].unc_len & (PAGE_SIZE - 1))) {
1411                                printk(KERN_ERR
1412                                       "PM: Invalid LZO uncompressed length\n");
1413                                ret = -1;
1414                                goto out_finish;
1415                        }
1416
1417                        for (off = 0;
1418                             off < data[thr].unc_len; off += PAGE_SIZE) {
1419                                memcpy(data_of(*snapshot),
1420                                       data[thr].unc + off, PAGE_SIZE);
1421
1422                                if (!(nr_pages % m))
1423                                        printk(KERN_INFO
1424                                               "PM: Image loading progress: "
1425                                               "%3d%%\n",
1426                                               nr_pages / m * 10);
1427                                nr_pages++;
1428
1429                                ret = snapshot_write_next(snapshot);
1430                                if (ret <= 0) {
1431                                        crc->run_threads = thr + 1;
1432                                        atomic_set(&crc->ready, 1);
1433                                        wake_up(&crc->go);
1434                                        goto out_finish;
1435                                }
1436                        }
1437                }
1438
1439                crc->run_threads = thr;
1440                atomic_set(&crc->ready, 1);
1441                wake_up(&crc->go);
1442        }
1443
1444out_finish:
1445        if (crc->run_threads) {
1446                wait_event(crc->done, atomic_read(&crc->stop));
1447                atomic_set(&crc->stop, 0);
1448        }
1449        stop = ktime_get();
1450        if (!ret) {
1451                printk(KERN_INFO "PM: Image loading done.\n");
1452                snapshot_write_finalize(snapshot);
1453                if (!snapshot_image_loaded(snapshot))
1454                        ret = -ENODATA;
1455                if (!ret) {
1456                        if (swsusp_header->flags & SF_CRC32_MODE) {
1457                                if(handle->crc32 != swsusp_header->crc32) {
1458                                        printk(KERN_ERR
1459                                               "PM: Invalid image CRC32!\n");
1460                                        ret = -ENODATA;
1461                                }
1462                        }
1463                }
1464        }
1465        swsusp_show_speed(start, stop, nr_to_read, "Read");
1466out_clean:
1467        for (i = 0; i < ring_size; i++)
1468                free_page((unsigned long)page[i]);
1469        if (crc) {
1470                if (crc->thr)
1471                        kthread_stop(crc->thr);
1472                kfree(crc);
1473        }
1474        if (data) {
1475                for (thr = 0; thr < nr_threads; thr++)
1476                        if (data[thr].thr)
1477                                kthread_stop(data[thr].thr);
1478                vfree(data);
1479        }
1480        vfree(page);
1481
1482        return ret;
1483}
1484
1485/**
1486 *      swsusp_read - read the hibernation image.
1487 *      @flags_p: flags passed by the "frozen" kernel in the image header should
1488 *                be written into this memory location
1489 */
1490
1491int swsusp_read(unsigned int *flags_p)
1492{
1493        int error;
1494        struct swap_map_handle handle;
1495        struct snapshot_handle snapshot;
1496        struct swsusp_info *header;
1497
1498        memset(&snapshot, 0, sizeof(struct snapshot_handle));
1499        error = snapshot_write_next(&snapshot);
1500        if (error < PAGE_SIZE)
1501                return error < 0 ? error : -EFAULT;
1502        header = (struct swsusp_info *)data_of(snapshot);
1503        error = get_swap_reader(&handle, flags_p);
1504        if (error)
1505                goto end;
1506        if (!error)
1507                error = swap_read_page(&handle, header, NULL);
1508        if (!error) {
1509                error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1510                        load_image(&handle, &snapshot, header->pages - 1) :
1511                        load_image_lzo(&handle, &snapshot, header->pages - 1);
1512        }
1513        swap_reader_finish(&handle);
1514end:
1515        if (!error)
1516                pr_debug("PM: Image successfully loaded\n");
1517        else
1518                pr_debug("PM: Error %d resuming\n", error);
1519        return error;
1520}
1521
1522/**
1523 *      swsusp_check - Check for swsusp signature in the resume device
1524 */
1525
1526int swsusp_check(void)
1527{
1528        int error;
1529
1530        hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1531                                            FMODE_READ, NULL);
1532        if (!IS_ERR(hib_resume_bdev)) {
1533                set_blocksize(hib_resume_bdev, PAGE_SIZE);
1534                clear_page(swsusp_header);
1535                error = hib_submit_io(REQ_OP_READ, 0,
1536                                        swsusp_resume_block,
1537                                        swsusp_header, NULL);
1538                if (error)
1539                        goto put;
1540
1541                if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1542                        memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1543                        /* Reset swap signature now */
1544                        error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1545                                                swsusp_resume_block,
1546                                                swsusp_header, NULL);
1547                } else {
1548                        error = -EINVAL;
1549                }
1550
1551put:
1552                if (error)
1553                        blkdev_put(hib_resume_bdev, FMODE_READ);
1554                else
1555                        pr_debug("PM: Image signature found, resuming\n");
1556        } else {
1557                error = PTR_ERR(hib_resume_bdev);
1558        }
1559
1560        if (error)
1561                pr_debug("PM: Image not found (code %d)\n", error);
1562
1563        return error;
1564}
1565
1566/**
1567 *      swsusp_close - close swap device.
1568 */
1569
1570void swsusp_close(fmode_t mode)
1571{
1572        if (IS_ERR(hib_resume_bdev)) {
1573                pr_debug("PM: Image device not initialised\n");
1574                return;
1575        }
1576
1577        blkdev_put(hib_resume_bdev, mode);
1578}
1579
1580/**
1581 *      swsusp_unmark - Unmark swsusp signature in the resume device
1582 */
1583
1584#ifdef CONFIG_SUSPEND
1585int swsusp_unmark(void)
1586{
1587        int error;
1588
1589        hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
1590                      swsusp_header, NULL);
1591        if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1592                memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1593                error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1594                                        swsusp_resume_block,
1595                                        swsusp_header, NULL);
1596        } else {
1597                printk(KERN_ERR "PM: Cannot find swsusp signature!\n");
1598                error = -ENODEV;
1599        }
1600
1601        /*
1602         * We just returned from suspend, we don't need the image any more.
1603         */
1604        free_all_swap_pages(root_swap);
1605
1606        return error;
1607}
1608#endif
1609
1610static int swsusp_header_init(void)
1611{
1612        swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1613        if (!swsusp_header)
1614                panic("Could not allocate memory for swsusp_header\n");
1615        return 0;
1616}
1617
1618core_initcall(swsusp_header_init);
1619