linux/kernel/power/swap.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/power/swap.c
   3 *
   4 * This file provides functions for reading the suspend image from
   5 * and writing it to a swap partition.
   6 *
   7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
   8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
   9 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
  10 *
  11 * This file is released under the GPLv2.
  12 *
  13 */
  14
  15#include <linux/module.h>
  16#include <linux/file.h>
  17#include <linux/delay.h>
  18#include <linux/bitops.h>
  19#include <linux/genhd.h>
  20#include <linux/device.h>
  21#include <linux/bio.h>
  22#include <linux/blkdev.h>
  23#include <linux/swap.h>
  24#include <linux/swapops.h>
  25#include <linux/pm.h>
  26#include <linux/slab.h>
  27#include <linux/lzo.h>
  28#include <linux/vmalloc.h>
  29#include <linux/cpumask.h>
  30#include <linux/atomic.h>
  31#include <linux/kthread.h>
  32#include <linux/crc32.h>
  33#include <linux/ktime.h>
  34
  35#include "power.h"
  36
  37#define HIBERNATE_SIG   "S1SUSPEND"
  38
  39/*
  40 *      The swap map is a data structure used for keeping track of each page
  41 *      written to a swap partition.  It consists of many swap_map_page
  42 *      structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
  43 *      These structures are stored on the swap and linked together with the
  44 *      help of the .next_swap member.
  45 *
  46 *      The swap map is created during suspend.  The swap map pages are
  47 *      allocated and populated one at a time, so we only need one memory
  48 *      page to set up the entire structure.
  49 *
  50 *      During resume we pick up all swap_map_page structures into a list.
  51 */
  52
  53#define MAP_PAGE_ENTRIES        (PAGE_SIZE / sizeof(sector_t) - 1)
  54
  55/*
  56 * Number of free pages that are not high.
  57 */
  58static inline unsigned long low_free_pages(void)
  59{
  60        return nr_free_pages() - nr_free_highpages();
  61}
  62
  63/*
  64 * Number of pages required to be kept free while writing the image. Always
  65 * half of all available low pages before the writing starts.
  66 */
  67static inline unsigned long reqd_free_pages(void)
  68{
  69        return low_free_pages() / 2;
  70}
  71
  72struct swap_map_page {
  73        sector_t entries[MAP_PAGE_ENTRIES];
  74        sector_t next_swap;
  75};
  76
  77struct swap_map_page_list {
  78        struct swap_map_page *map;
  79        struct swap_map_page_list *next;
  80};
  81
  82/**
  83 *      The swap_map_handle structure is used for handling swap in
  84 *      a file-alike way
  85 */
  86
  87struct swap_map_handle {
  88        struct swap_map_page *cur;
  89        struct swap_map_page_list *maps;
  90        sector_t cur_swap;
  91        sector_t first_sector;
  92        unsigned int k;
  93        unsigned long reqd_free_pages;
  94        u32 crc32;
  95};
  96
  97struct swsusp_header {
  98        char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
  99                      sizeof(u32)];
 100        u32     crc32;
 101        sector_t image;
 102        unsigned int flags;     /* Flags to pass to the "boot" kernel */
 103        char    orig_sig[10];
 104        char    sig[10];
 105} __packed;
 106
 107static struct swsusp_header *swsusp_header;
 108
 109/**
 110 *      The following functions are used for tracing the allocated
 111 *      swap pages, so that they can be freed in case of an error.
 112 */
 113
 114struct swsusp_extent {
 115        struct rb_node node;
 116        unsigned long start;
 117        unsigned long end;
 118};
 119
 120static struct rb_root swsusp_extents = RB_ROOT;
 121
 122static int swsusp_extents_insert(unsigned long swap_offset)
 123{
 124        struct rb_node **new = &(swsusp_extents.rb_node);
 125        struct rb_node *parent = NULL;
 126        struct swsusp_extent *ext;
 127
 128        /* Figure out where to put the new node */
 129        while (*new) {
 130                ext = rb_entry(*new, struct swsusp_extent, node);
 131                parent = *new;
 132                if (swap_offset < ext->start) {
 133                        /* Try to merge */
 134                        if (swap_offset == ext->start - 1) {
 135                                ext->start--;
 136                                return 0;
 137                        }
 138                        new = &((*new)->rb_left);
 139                } else if (swap_offset > ext->end) {
 140                        /* Try to merge */
 141                        if (swap_offset == ext->end + 1) {
 142                                ext->end++;
 143                                return 0;
 144                        }
 145                        new = &((*new)->rb_right);
 146                } else {
 147                        /* It already is in the tree */
 148                        return -EINVAL;
 149                }
 150        }
 151        /* Add the new node and rebalance the tree. */
 152        ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
 153        if (!ext)
 154                return -ENOMEM;
 155
 156        ext->start = swap_offset;
 157        ext->end = swap_offset;
 158        rb_link_node(&ext->node, parent, new);
 159        rb_insert_color(&ext->node, &swsusp_extents);
 160        return 0;
 161}
 162
 163/**
 164 *      alloc_swapdev_block - allocate a swap page and register that it has
 165 *      been allocated, so that it can be freed in case of an error.
 166 */
 167
 168sector_t alloc_swapdev_block(int swap)
 169{
 170        unsigned long offset;
 171
 172        offset = swp_offset(get_swap_page_of_type(swap));
 173        if (offset) {
 174                if (swsusp_extents_insert(offset))
 175                        swap_free(swp_entry(swap, offset));
 176                else
 177                        return swapdev_block(swap, offset);
 178        }
 179        return 0;
 180}
 181
 182/**
 183 *      free_all_swap_pages - free swap pages allocated for saving image data.
 184 *      It also frees the extents used to register which swap entries had been
 185 *      allocated.
 186 */
 187
 188void free_all_swap_pages(int swap)
 189{
 190        struct rb_node *node;
 191
 192        while ((node = swsusp_extents.rb_node)) {
 193                struct swsusp_extent *ext;
 194                unsigned long offset;
 195
 196                ext = container_of(node, struct swsusp_extent, node);
 197                rb_erase(node, &swsusp_extents);
 198                for (offset = ext->start; offset <= ext->end; offset++)
 199                        swap_free(swp_entry(swap, offset));
 200
 201                kfree(ext);
 202        }
 203}
 204
 205int swsusp_swap_in_use(void)
 206{
 207        return (swsusp_extents.rb_node != NULL);
 208}
 209
 210/*
 211 * General things
 212 */
 213
 214static unsigned short root_swap = 0xffff;
 215static struct block_device *hib_resume_bdev;
 216
 217struct hib_bio_batch {
 218        atomic_t                count;
 219        wait_queue_head_t       wait;
 220        int                     error;
 221};
 222
 223static void hib_init_batch(struct hib_bio_batch *hb)
 224{
 225        atomic_set(&hb->count, 0);
 226        init_waitqueue_head(&hb->wait);
 227        hb->error = 0;
 228}
 229
 230static void hib_end_io(struct bio *bio)
 231{
 232        struct hib_bio_batch *hb = bio->bi_private;
 233        struct page *page = bio->bi_io_vec[0].bv_page;
 234
 235        if (bio->bi_error) {
 236                printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
 237                                imajor(bio->bi_bdev->bd_inode),
 238                                iminor(bio->bi_bdev->bd_inode),
 239                                (unsigned long long)bio->bi_iter.bi_sector);
 240        }
 241
 242        if (bio_data_dir(bio) == WRITE)
 243                put_page(page);
 244
 245        if (bio->bi_error && !hb->error)
 246                hb->error = bio->bi_error;
 247        if (atomic_dec_and_test(&hb->count))
 248                wake_up(&hb->wait);
 249
 250        bio_put(bio);
 251}
 252
 253static int hib_submit_io(int rw, pgoff_t page_off, void *addr,
 254                struct hib_bio_batch *hb)
 255{
 256        struct page *page = virt_to_page(addr);
 257        struct bio *bio;
 258        int error = 0;
 259
 260        bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
 261        bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
 262        bio->bi_bdev = hib_resume_bdev;
 263
 264        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 265                printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
 266                        (unsigned long long)bio->bi_iter.bi_sector);
 267                bio_put(bio);
 268                return -EFAULT;
 269        }
 270
 271        if (hb) {
 272                bio->bi_end_io = hib_end_io;
 273                bio->bi_private = hb;
 274                atomic_inc(&hb->count);
 275                submit_bio(rw, bio);
 276        } else {
 277                error = submit_bio_wait(rw, bio);
 278                bio_put(bio);
 279        }
 280
 281        return error;
 282}
 283
 284static int hib_wait_io(struct hib_bio_batch *hb)
 285{
 286        wait_event(hb->wait, atomic_read(&hb->count) == 0);
 287        return hb->error;
 288}
 289
 290/*
 291 * Saving part
 292 */
 293
 294static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
 295{
 296        int error;
 297
 298        hib_submit_io(READ_SYNC, swsusp_resume_block, swsusp_header, NULL);
 299        if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
 300            !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
 301                memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
 302                memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
 303                swsusp_header->image = handle->first_sector;
 304                swsusp_header->flags = flags;
 305                if (flags & SF_CRC32_MODE)
 306                        swsusp_header->crc32 = handle->crc32;
 307                error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
 308                                        swsusp_header, NULL);
 309        } else {
 310                printk(KERN_ERR "PM: Swap header not found!\n");
 311                error = -ENODEV;
 312        }
 313        return error;
 314}
 315
 316/**
 317 *      swsusp_swap_check - check if the resume device is a swap device
 318 *      and get its index (if so)
 319 *
 320 *      This is called before saving image
 321 */
 322static int swsusp_swap_check(void)
 323{
 324        int res;
 325
 326        res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
 327                        &hib_resume_bdev);
 328        if (res < 0)
 329                return res;
 330
 331        root_swap = res;
 332        res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
 333        if (res)
 334                return res;
 335
 336        res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
 337        if (res < 0)
 338                blkdev_put(hib_resume_bdev, FMODE_WRITE);
 339
 340        return res;
 341}
 342
 343/**
 344 *      write_page - Write one page to given swap location.
 345 *      @buf:           Address we're writing.
 346 *      @offset:        Offset of the swap page we're writing to.
 347 *      @hb:            bio completion batch
 348 */
 349
 350static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
 351{
 352        void *src;
 353        int ret;
 354
 355        if (!offset)
 356                return -ENOSPC;
 357
 358        if (hb) {
 359                src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN |
 360                                              __GFP_NORETRY);
 361                if (src) {
 362                        copy_page(src, buf);
 363                } else {
 364                        ret = hib_wait_io(hb); /* Free pages */
 365                        if (ret)
 366                                return ret;
 367                        src = (void *)__get_free_page(__GFP_RECLAIM |
 368                                                      __GFP_NOWARN |
 369                                                      __GFP_NORETRY);
 370                        if (src) {
 371                                copy_page(src, buf);
 372                        } else {
 373                                WARN_ON_ONCE(1);
 374                                hb = NULL;      /* Go synchronous */
 375                                src = buf;
 376                        }
 377                }
 378        } else {
 379                src = buf;
 380        }
 381        return hib_submit_io(WRITE_SYNC, offset, src, hb);
 382}
 383
 384static void release_swap_writer(struct swap_map_handle *handle)
 385{
 386        if (handle->cur)
 387                free_page((unsigned long)handle->cur);
 388        handle->cur = NULL;
 389}
 390
 391static int get_swap_writer(struct swap_map_handle *handle)
 392{
 393        int ret;
 394
 395        ret = swsusp_swap_check();
 396        if (ret) {
 397                if (ret != -ENOSPC)
 398                        printk(KERN_ERR "PM: Cannot find swap device, try "
 399                                        "swapon -a.\n");
 400                return ret;
 401        }
 402        handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
 403        if (!handle->cur) {
 404                ret = -ENOMEM;
 405                goto err_close;
 406        }
 407        handle->cur_swap = alloc_swapdev_block(root_swap);
 408        if (!handle->cur_swap) {
 409                ret = -ENOSPC;
 410                goto err_rel;
 411        }
 412        handle->k = 0;
 413        handle->reqd_free_pages = reqd_free_pages();
 414        handle->first_sector = handle->cur_swap;
 415        return 0;
 416err_rel:
 417        release_swap_writer(handle);
 418err_close:
 419        swsusp_close(FMODE_WRITE);
 420        return ret;
 421}
 422
 423static int swap_write_page(struct swap_map_handle *handle, void *buf,
 424                struct hib_bio_batch *hb)
 425{
 426        int error = 0;
 427        sector_t offset;
 428
 429        if (!handle->cur)
 430                return -EINVAL;
 431        offset = alloc_swapdev_block(root_swap);
 432        error = write_page(buf, offset, hb);
 433        if (error)
 434                return error;
 435        handle->cur->entries[handle->k++] = offset;
 436        if (handle->k >= MAP_PAGE_ENTRIES) {
 437                offset = alloc_swapdev_block(root_swap);
 438                if (!offset)
 439                        return -ENOSPC;
 440                handle->cur->next_swap = offset;
 441                error = write_page(handle->cur, handle->cur_swap, hb);
 442                if (error)
 443                        goto out;
 444                clear_page(handle->cur);
 445                handle->cur_swap = offset;
 446                handle->k = 0;
 447
 448                if (hb && low_free_pages() <= handle->reqd_free_pages) {
 449                        error = hib_wait_io(hb);
 450                        if (error)
 451                                goto out;
 452                        /*
 453                         * Recalculate the number of required free pages, to
 454                         * make sure we never take more than half.
 455                         */
 456                        handle->reqd_free_pages = reqd_free_pages();
 457                }
 458        }
 459 out:
 460        return error;
 461}
 462
 463static int flush_swap_writer(struct swap_map_handle *handle)
 464{
 465        if (handle->cur && handle->cur_swap)
 466                return write_page(handle->cur, handle->cur_swap, NULL);
 467        else
 468                return -EINVAL;
 469}
 470
 471static int swap_writer_finish(struct swap_map_handle *handle,
 472                unsigned int flags, int error)
 473{
 474        if (!error) {
 475                flush_swap_writer(handle);
 476                printk(KERN_INFO "PM: S");
 477                error = mark_swapfiles(handle, flags);
 478                printk("|\n");
 479        }
 480
 481        if (error)
 482                free_all_swap_pages(root_swap);
 483        release_swap_writer(handle);
 484        swsusp_close(FMODE_WRITE);
 485
 486        return error;
 487}
 488
 489/* We need to remember how much compressed data we need to read. */
 490#define LZO_HEADER      sizeof(size_t)
 491
 492/* Number of pages/bytes we'll compress at one time. */
 493#define LZO_UNC_PAGES   32
 494#define LZO_UNC_SIZE    (LZO_UNC_PAGES * PAGE_SIZE)
 495
 496/* Number of pages/bytes we need for compressed data (worst case). */
 497#define LZO_CMP_PAGES   DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
 498                                     LZO_HEADER, PAGE_SIZE)
 499#define LZO_CMP_SIZE    (LZO_CMP_PAGES * PAGE_SIZE)
 500
 501/* Maximum number of threads for compression/decompression. */
 502#define LZO_THREADS     3
 503
 504/* Minimum/maximum number of pages for read buffering. */
 505#define LZO_MIN_RD_PAGES        1024
 506#define LZO_MAX_RD_PAGES        8192
 507
 508
 509/**
 510 *      save_image - save the suspend image data
 511 */
 512
 513static int save_image(struct swap_map_handle *handle,
 514                      struct snapshot_handle *snapshot,
 515                      unsigned int nr_to_write)
 516{
 517        unsigned int m;
 518        int ret;
 519        int nr_pages;
 520        int err2;
 521        struct hib_bio_batch hb;
 522        ktime_t start;
 523        ktime_t stop;
 524
 525        hib_init_batch(&hb);
 526
 527        printk(KERN_INFO "PM: Saving image data pages (%u pages)...\n",
 528                nr_to_write);
 529        m = nr_to_write / 10;
 530        if (!m)
 531                m = 1;
 532        nr_pages = 0;
 533        start = ktime_get();
 534        while (1) {
 535                ret = snapshot_read_next(snapshot);
 536                if (ret <= 0)
 537                        break;
 538                ret = swap_write_page(handle, data_of(*snapshot), &hb);
 539                if (ret)
 540                        break;
 541                if (!(nr_pages % m))
 542                        printk(KERN_INFO "PM: Image saving progress: %3d%%\n",
 543                               nr_pages / m * 10);
 544                nr_pages++;
 545        }
 546        err2 = hib_wait_io(&hb);
 547        stop = ktime_get();
 548        if (!ret)
 549                ret = err2;
 550        if (!ret)
 551                printk(KERN_INFO "PM: Image saving done.\n");
 552        swsusp_show_speed(start, stop, nr_to_write, "Wrote");
 553        return ret;
 554}
 555
 556/**
 557 * Structure used for CRC32.
 558 */
 559struct crc_data {
 560        struct task_struct *thr;                  /* thread */
 561        atomic_t ready;                           /* ready to start flag */
 562        atomic_t stop;                            /* ready to stop flag */
 563        unsigned run_threads;                     /* nr current threads */
 564        wait_queue_head_t go;                     /* start crc update */
 565        wait_queue_head_t done;                   /* crc update done */
 566        u32 *crc32;                               /* points to handle's crc32 */
 567        size_t *unc_len[LZO_THREADS];             /* uncompressed lengths */
 568        unsigned char *unc[LZO_THREADS];          /* uncompressed data */
 569};
 570
 571/**
 572 * CRC32 update function that runs in its own thread.
 573 */
 574static int crc32_threadfn(void *data)
 575{
 576        struct crc_data *d = data;
 577        unsigned i;
 578
 579        while (1) {
 580                wait_event(d->go, atomic_read(&d->ready) ||
 581                                  kthread_should_stop());
 582                if (kthread_should_stop()) {
 583                        d->thr = NULL;
 584                        atomic_set(&d->stop, 1);
 585                        wake_up(&d->done);
 586                        break;
 587                }
 588                atomic_set(&d->ready, 0);
 589
 590                for (i = 0; i < d->run_threads; i++)
 591                        *d->crc32 = crc32_le(*d->crc32,
 592                                             d->unc[i], *d->unc_len[i]);
 593                atomic_set(&d->stop, 1);
 594                wake_up(&d->done);
 595        }
 596        return 0;
 597}
 598/**
 599 * Structure used for LZO data compression.
 600 */
 601struct cmp_data {
 602        struct task_struct *thr;                  /* thread */
 603        atomic_t ready;                           /* ready to start flag */
 604        atomic_t stop;                            /* ready to stop flag */
 605        int ret;                                  /* return code */
 606        wait_queue_head_t go;                     /* start compression */
 607        wait_queue_head_t done;                   /* compression done */
 608        size_t unc_len;                           /* uncompressed length */
 609        size_t cmp_len;                           /* compressed length */
 610        unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
 611        unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
 612        unsigned char wrk[LZO1X_1_MEM_COMPRESS];  /* compression workspace */
 613};
 614
 615/**
 616 * Compression function that runs in its own thread.
 617 */
 618static int lzo_compress_threadfn(void *data)
 619{
 620        struct cmp_data *d = data;
 621
 622        while (1) {
 623                wait_event(d->go, atomic_read(&d->ready) ||
 624                                  kthread_should_stop());
 625                if (kthread_should_stop()) {
 626                        d->thr = NULL;
 627                        d->ret = -1;
 628                        atomic_set(&d->stop, 1);
 629                        wake_up(&d->done);
 630                        break;
 631                }
 632                atomic_set(&d->ready, 0);
 633
 634                d->ret = lzo1x_1_compress(d->unc, d->unc_len,
 635                                          d->cmp + LZO_HEADER, &d->cmp_len,
 636                                          d->wrk);
 637                atomic_set(&d->stop, 1);
 638                wake_up(&d->done);
 639        }
 640        return 0;
 641}
 642
 643/**
 644 * save_image_lzo - Save the suspend image data compressed with LZO.
 645 * @handle: Swap map handle to use for saving the image.
 646 * @snapshot: Image to read data from.
 647 * @nr_to_write: Number of pages to save.
 648 */
 649static int save_image_lzo(struct swap_map_handle *handle,
 650                          struct snapshot_handle *snapshot,
 651                          unsigned int nr_to_write)
 652{
 653        unsigned int m;
 654        int ret = 0;
 655        int nr_pages;
 656        int err2;
 657        struct hib_bio_batch hb;
 658        ktime_t start;
 659        ktime_t stop;
 660        size_t off;
 661        unsigned thr, run_threads, nr_threads;
 662        unsigned char *page = NULL;
 663        struct cmp_data *data = NULL;
 664        struct crc_data *crc = NULL;
 665
 666        hib_init_batch(&hb);
 667
 668        /*
 669         * We'll limit the number of threads for compression to limit memory
 670         * footprint.
 671         */
 672        nr_threads = num_online_cpus() - 1;
 673        nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
 674
 675        page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH);
 676        if (!page) {
 677                printk(KERN_ERR "PM: Failed to allocate LZO page\n");
 678                ret = -ENOMEM;
 679                goto out_clean;
 680        }
 681
 682        data = vmalloc(sizeof(*data) * nr_threads);
 683        if (!data) {
 684                printk(KERN_ERR "PM: Failed to allocate LZO data\n");
 685                ret = -ENOMEM;
 686                goto out_clean;
 687        }
 688        for (thr = 0; thr < nr_threads; thr++)
 689                memset(&data[thr], 0, offsetof(struct cmp_data, go));
 690
 691        crc = kmalloc(sizeof(*crc), GFP_KERNEL);
 692        if (!crc) {
 693                printk(KERN_ERR "PM: Failed to allocate crc\n");
 694                ret = -ENOMEM;
 695                goto out_clean;
 696        }
 697        memset(crc, 0, offsetof(struct crc_data, go));
 698
 699        /*
 700         * Start the compression threads.
 701         */
 702        for (thr = 0; thr < nr_threads; thr++) {
 703                init_waitqueue_head(&data[thr].go);
 704                init_waitqueue_head(&data[thr].done);
 705
 706                data[thr].thr = kthread_run(lzo_compress_threadfn,
 707                                            &data[thr],
 708                                            "image_compress/%u", thr);
 709                if (IS_ERR(data[thr].thr)) {
 710                        data[thr].thr = NULL;
 711                        printk(KERN_ERR
 712                               "PM: Cannot start compression threads\n");
 713                        ret = -ENOMEM;
 714                        goto out_clean;
 715                }
 716        }
 717
 718        /*
 719         * Start the CRC32 thread.
 720         */
 721        init_waitqueue_head(&crc->go);
 722        init_waitqueue_head(&crc->done);
 723
 724        handle->crc32 = 0;
 725        crc->crc32 = &handle->crc32;
 726        for (thr = 0; thr < nr_threads; thr++) {
 727                crc->unc[thr] = data[thr].unc;
 728                crc->unc_len[thr] = &data[thr].unc_len;
 729        }
 730
 731        crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
 732        if (IS_ERR(crc->thr)) {
 733                crc->thr = NULL;
 734                printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
 735                ret = -ENOMEM;
 736                goto out_clean;
 737        }
 738
 739        /*
 740         * Adjust the number of required free pages after all allocations have
 741         * been done. We don't want to run out of pages when writing.
 742         */
 743        handle->reqd_free_pages = reqd_free_pages();
 744
 745        printk(KERN_INFO
 746                "PM: Using %u thread(s) for compression.\n"
 747                "PM: Compressing and saving image data (%u pages)...\n",
 748                nr_threads, nr_to_write);
 749        m = nr_to_write / 10;
 750        if (!m)
 751                m = 1;
 752        nr_pages = 0;
 753        start = ktime_get();
 754        for (;;) {
 755                for (thr = 0; thr < nr_threads; thr++) {
 756                        for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
 757                                ret = snapshot_read_next(snapshot);
 758                                if (ret < 0)
 759                                        goto out_finish;
 760
 761                                if (!ret)
 762                                        break;
 763
 764                                memcpy(data[thr].unc + off,
 765                                       data_of(*snapshot), PAGE_SIZE);
 766
 767                                if (!(nr_pages % m))
 768                                        printk(KERN_INFO
 769                                               "PM: Image saving progress: "
 770                                               "%3d%%\n",
 771                                               nr_pages / m * 10);
 772                                nr_pages++;
 773                        }
 774                        if (!off)
 775                                break;
 776
 777                        data[thr].unc_len = off;
 778
 779                        atomic_set(&data[thr].ready, 1);
 780                        wake_up(&data[thr].go);
 781                }
 782
 783                if (!thr)
 784                        break;
 785
 786                crc->run_threads = thr;
 787                atomic_set(&crc->ready, 1);
 788                wake_up(&crc->go);
 789
 790                for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
 791                        wait_event(data[thr].done,
 792                                   atomic_read(&data[thr].stop));
 793                        atomic_set(&data[thr].stop, 0);
 794
 795                        ret = data[thr].ret;
 796
 797                        if (ret < 0) {
 798                                printk(KERN_ERR "PM: LZO compression failed\n");
 799                                goto out_finish;
 800                        }
 801
 802                        if (unlikely(!data[thr].cmp_len ||
 803                                     data[thr].cmp_len >
 804                                     lzo1x_worst_compress(data[thr].unc_len))) {
 805                                printk(KERN_ERR
 806                                       "PM: Invalid LZO compressed length\n");
 807                                ret = -1;
 808                                goto out_finish;
 809                        }
 810
 811                        *(size_t *)data[thr].cmp = data[thr].cmp_len;
 812
 813                        /*
 814                         * Given we are writing one page at a time to disk, we
 815                         * copy that much from the buffer, although the last
 816                         * bit will likely be smaller than full page. This is
 817                         * OK - we saved the length of the compressed data, so
 818                         * any garbage at the end will be discarded when we
 819                         * read it.
 820                         */
 821                        for (off = 0;
 822                             off < LZO_HEADER + data[thr].cmp_len;
 823                             off += PAGE_SIZE) {
 824                                memcpy(page, data[thr].cmp + off, PAGE_SIZE);
 825
 826                                ret = swap_write_page(handle, page, &hb);
 827                                if (ret)
 828                                        goto out_finish;
 829                        }
 830                }
 831
 832                wait_event(crc->done, atomic_read(&crc->stop));
 833                atomic_set(&crc->stop, 0);
 834        }
 835
 836out_finish:
 837        err2 = hib_wait_io(&hb);
 838        stop = ktime_get();
 839        if (!ret)
 840                ret = err2;
 841        if (!ret)
 842                printk(KERN_INFO "PM: Image saving done.\n");
 843        swsusp_show_speed(start, stop, nr_to_write, "Wrote");
 844out_clean:
 845        if (crc) {
 846                if (crc->thr)
 847                        kthread_stop(crc->thr);
 848                kfree(crc);
 849        }
 850        if (data) {
 851                for (thr = 0; thr < nr_threads; thr++)
 852                        if (data[thr].thr)
 853                                kthread_stop(data[thr].thr);
 854                vfree(data);
 855        }
 856        if (page) free_page((unsigned long)page);
 857
 858        return ret;
 859}
 860
 861/**
 862 *      enough_swap - Make sure we have enough swap to save the image.
 863 *
 864 *      Returns TRUE or FALSE after checking the total amount of swap
 865 *      space avaiable from the resume partition.
 866 */
 867
 868static int enough_swap(unsigned int nr_pages, unsigned int flags)
 869{
 870        unsigned int free_swap = count_swap_pages(root_swap, 1);
 871        unsigned int required;
 872
 873        pr_debug("PM: Free swap pages: %u\n", free_swap);
 874
 875        required = PAGES_FOR_IO + nr_pages;
 876        return free_swap > required;
 877}
 878
 879/**
 880 *      swsusp_write - Write entire image and metadata.
 881 *      @flags: flags to pass to the "boot" kernel in the image header
 882 *
 883 *      It is important _NOT_ to umount filesystems at this point. We want
 884 *      them synced (in case something goes wrong) but we DO not want to mark
 885 *      filesystem clean: it is not. (And it does not matter, if we resume
 886 *      correctly, we'll mark system clean, anyway.)
 887 */
 888
 889int swsusp_write(unsigned int flags)
 890{
 891        struct swap_map_handle handle;
 892        struct snapshot_handle snapshot;
 893        struct swsusp_info *header;
 894        unsigned long pages;
 895        int error;
 896
 897        pages = snapshot_get_image_size();
 898        error = get_swap_writer(&handle);
 899        if (error) {
 900                printk(KERN_ERR "PM: Cannot get swap writer\n");
 901                return error;
 902        }
 903        if (flags & SF_NOCOMPRESS_MODE) {
 904                if (!enough_swap(pages, flags)) {
 905                        printk(KERN_ERR "PM: Not enough free swap\n");
 906                        error = -ENOSPC;
 907                        goto out_finish;
 908                }
 909        }
 910        memset(&snapshot, 0, sizeof(struct snapshot_handle));
 911        error = snapshot_read_next(&snapshot);
 912        if (error < PAGE_SIZE) {
 913                if (error >= 0)
 914                        error = -EFAULT;
 915
 916                goto out_finish;
 917        }
 918        header = (struct swsusp_info *)data_of(snapshot);
 919        error = swap_write_page(&handle, header, NULL);
 920        if (!error) {
 921                error = (flags & SF_NOCOMPRESS_MODE) ?
 922                        save_image(&handle, &snapshot, pages - 1) :
 923                        save_image_lzo(&handle, &snapshot, pages - 1);
 924        }
 925out_finish:
 926        error = swap_writer_finish(&handle, flags, error);
 927        return error;
 928}
 929
 930/**
 931 *      The following functions allow us to read data using a swap map
 932 *      in a file-alike way
 933 */
 934
 935static void release_swap_reader(struct swap_map_handle *handle)
 936{
 937        struct swap_map_page_list *tmp;
 938
 939        while (handle->maps) {
 940                if (handle->maps->map)
 941                        free_page((unsigned long)handle->maps->map);
 942                tmp = handle->maps;
 943                handle->maps = handle->maps->next;
 944                kfree(tmp);
 945        }
 946        handle->cur = NULL;
 947}
 948
 949static int get_swap_reader(struct swap_map_handle *handle,
 950                unsigned int *flags_p)
 951{
 952        int error;
 953        struct swap_map_page_list *tmp, *last;
 954        sector_t offset;
 955
 956        *flags_p = swsusp_header->flags;
 957
 958        if (!swsusp_header->image) /* how can this happen? */
 959                return -EINVAL;
 960
 961        handle->cur = NULL;
 962        last = handle->maps = NULL;
 963        offset = swsusp_header->image;
 964        while (offset) {
 965                tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
 966                if (!tmp) {
 967                        release_swap_reader(handle);
 968                        return -ENOMEM;
 969                }
 970                memset(tmp, 0, sizeof(*tmp));
 971                if (!handle->maps)
 972                        handle->maps = tmp;
 973                if (last)
 974                        last->next = tmp;
 975                last = tmp;
 976
 977                tmp->map = (struct swap_map_page *)
 978                           __get_free_page(__GFP_RECLAIM | __GFP_HIGH);
 979                if (!tmp->map) {
 980                        release_swap_reader(handle);
 981                        return -ENOMEM;
 982                }
 983
 984                error = hib_submit_io(READ_SYNC, offset, tmp->map, NULL);
 985                if (error) {
 986                        release_swap_reader(handle);
 987                        return error;
 988                }
 989                offset = tmp->map->next_swap;
 990        }
 991        handle->k = 0;
 992        handle->cur = handle->maps->map;
 993        return 0;
 994}
 995
 996static int swap_read_page(struct swap_map_handle *handle, void *buf,
 997                struct hib_bio_batch *hb)
 998{
 999        sector_t offset;
1000        int error;
1001        struct swap_map_page_list *tmp;
1002
1003        if (!handle->cur)
1004                return -EINVAL;
1005        offset = handle->cur->entries[handle->k];
1006        if (!offset)
1007                return -EFAULT;
1008        error = hib_submit_io(READ_SYNC, offset, buf, hb);
1009        if (error)
1010                return error;
1011        if (++handle->k >= MAP_PAGE_ENTRIES) {
1012                handle->k = 0;
1013                free_page((unsigned long)handle->maps->map);
1014                tmp = handle->maps;
1015                handle->maps = handle->maps->next;
1016                kfree(tmp);
1017                if (!handle->maps)
1018                        release_swap_reader(handle);
1019                else
1020                        handle->cur = handle->maps->map;
1021        }
1022        return error;
1023}
1024
1025static int swap_reader_finish(struct swap_map_handle *handle)
1026{
1027        release_swap_reader(handle);
1028
1029        return 0;
1030}
1031
1032/**
1033 *      load_image - load the image using the swap map handle
1034 *      @handle and the snapshot handle @snapshot
1035 *      (assume there are @nr_pages pages to load)
1036 */
1037
1038static int load_image(struct swap_map_handle *handle,
1039                      struct snapshot_handle *snapshot,
1040                      unsigned int nr_to_read)
1041{
1042        unsigned int m;
1043        int ret = 0;
1044        ktime_t start;
1045        ktime_t stop;
1046        struct hib_bio_batch hb;
1047        int err2;
1048        unsigned nr_pages;
1049
1050        hib_init_batch(&hb);
1051
1052        printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
1053                nr_to_read);
1054        m = nr_to_read / 10;
1055        if (!m)
1056                m = 1;
1057        nr_pages = 0;
1058        start = ktime_get();
1059        for ( ; ; ) {
1060                ret = snapshot_write_next(snapshot);
1061                if (ret <= 0)
1062                        break;
1063                ret = swap_read_page(handle, data_of(*snapshot), &hb);
1064                if (ret)
1065                        break;
1066                if (snapshot->sync_read)
1067                        ret = hib_wait_io(&hb);
1068                if (ret)
1069                        break;
1070                if (!(nr_pages % m))
1071                        printk(KERN_INFO "PM: Image loading progress: %3d%%\n",
1072                               nr_pages / m * 10);
1073                nr_pages++;
1074        }
1075        err2 = hib_wait_io(&hb);
1076        stop = ktime_get();
1077        if (!ret)
1078                ret = err2;
1079        if (!ret) {
1080                printk(KERN_INFO "PM: Image loading done.\n");
1081                snapshot_write_finalize(snapshot);
1082                if (!snapshot_image_loaded(snapshot))
1083                        ret = -ENODATA;
1084        }
1085        swsusp_show_speed(start, stop, nr_to_read, "Read");
1086        return ret;
1087}
1088
1089/**
1090 * Structure used for LZO data decompression.
1091 */
1092struct dec_data {
1093        struct task_struct *thr;                  /* thread */
1094        atomic_t ready;                           /* ready to start flag */
1095        atomic_t stop;                            /* ready to stop flag */
1096        int ret;                                  /* return code */
1097        wait_queue_head_t go;                     /* start decompression */
1098        wait_queue_head_t done;                   /* decompression done */
1099        size_t unc_len;                           /* uncompressed length */
1100        size_t cmp_len;                           /* compressed length */
1101        unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
1102        unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
1103};
1104
1105/**
1106 * Deompression function that runs in its own thread.
1107 */
1108static int lzo_decompress_threadfn(void *data)
1109{
1110        struct dec_data *d = data;
1111
1112        while (1) {
1113                wait_event(d->go, atomic_read(&d->ready) ||
1114                                  kthread_should_stop());
1115                if (kthread_should_stop()) {
1116                        d->thr = NULL;
1117                        d->ret = -1;
1118                        atomic_set(&d->stop, 1);
1119                        wake_up(&d->done);
1120                        break;
1121                }
1122                atomic_set(&d->ready, 0);
1123
1124                d->unc_len = LZO_UNC_SIZE;
1125                d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1126                                               d->unc, &d->unc_len);
1127                atomic_set(&d->stop, 1);
1128                wake_up(&d->done);
1129        }
1130        return 0;
1131}
1132
1133/**
1134 * load_image_lzo - Load compressed image data and decompress them with LZO.
1135 * @handle: Swap map handle to use for loading data.
1136 * @snapshot: Image to copy uncompressed data into.
1137 * @nr_to_read: Number of pages to load.
1138 */
1139static int load_image_lzo(struct swap_map_handle *handle,
1140                          struct snapshot_handle *snapshot,
1141                          unsigned int nr_to_read)
1142{
1143        unsigned int m;
1144        int ret = 0;
1145        int eof = 0;
1146        struct hib_bio_batch hb;
1147        ktime_t start;
1148        ktime_t stop;
1149        unsigned nr_pages;
1150        size_t off;
1151        unsigned i, thr, run_threads, nr_threads;
1152        unsigned ring = 0, pg = 0, ring_size = 0,
1153                 have = 0, want, need, asked = 0;
1154        unsigned long read_pages = 0;
1155        unsigned char **page = NULL;
1156        struct dec_data *data = NULL;
1157        struct crc_data *crc = NULL;
1158
1159        hib_init_batch(&hb);
1160
1161        /*
1162         * We'll limit the number of threads for decompression to limit memory
1163         * footprint.
1164         */
1165        nr_threads = num_online_cpus() - 1;
1166        nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1167
1168        page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
1169        if (!page) {
1170                printk(KERN_ERR "PM: Failed to allocate LZO page\n");
1171                ret = -ENOMEM;
1172                goto out_clean;
1173        }
1174
1175        data = vmalloc(sizeof(*data) * nr_threads);
1176        if (!data) {
1177                printk(KERN_ERR "PM: Failed to allocate LZO data\n");
1178                ret = -ENOMEM;
1179                goto out_clean;
1180        }
1181        for (thr = 0; thr < nr_threads; thr++)
1182                memset(&data[thr], 0, offsetof(struct dec_data, go));
1183
1184        crc = kmalloc(sizeof(*crc), GFP_KERNEL);
1185        if (!crc) {
1186                printk(KERN_ERR "PM: Failed to allocate crc\n");
1187                ret = -ENOMEM;
1188                goto out_clean;
1189        }
1190        memset(crc, 0, offsetof(struct crc_data, go));
1191
1192        /*
1193         * Start the decompression threads.
1194         */
1195        for (thr = 0; thr < nr_threads; thr++) {
1196                init_waitqueue_head(&data[thr].go);
1197                init_waitqueue_head(&data[thr].done);
1198
1199                data[thr].thr = kthread_run(lzo_decompress_threadfn,
1200                                            &data[thr],
1201                                            "image_decompress/%u", thr);
1202                if (IS_ERR(data[thr].thr)) {
1203                        data[thr].thr = NULL;
1204                        printk(KERN_ERR
1205                               "PM: Cannot start decompression threads\n");
1206                        ret = -ENOMEM;
1207                        goto out_clean;
1208                }
1209        }
1210
1211        /*
1212         * Start the CRC32 thread.
1213         */
1214        init_waitqueue_head(&crc->go);
1215        init_waitqueue_head(&crc->done);
1216
1217        handle->crc32 = 0;
1218        crc->crc32 = &handle->crc32;
1219        for (thr = 0; thr < nr_threads; thr++) {
1220                crc->unc[thr] = data[thr].unc;
1221                crc->unc_len[thr] = &data[thr].unc_len;
1222        }
1223
1224        crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1225        if (IS_ERR(crc->thr)) {
1226                crc->thr = NULL;
1227                printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
1228                ret = -ENOMEM;
1229                goto out_clean;
1230        }
1231
1232        /*
1233         * Set the number of pages for read buffering.
1234         * This is complete guesswork, because we'll only know the real
1235         * picture once prepare_image() is called, which is much later on
1236         * during the image load phase. We'll assume the worst case and
1237         * say that none of the image pages are from high memory.
1238         */
1239        if (low_free_pages() > snapshot_get_image_size())
1240                read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1241        read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1242
1243        for (i = 0; i < read_pages; i++) {
1244                page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1245                                                  __GFP_RECLAIM | __GFP_HIGH :
1246                                                  __GFP_RECLAIM | __GFP_NOWARN |
1247                                                  __GFP_NORETRY);
1248
1249                if (!page[i]) {
1250                        if (i < LZO_CMP_PAGES) {
1251                                ring_size = i;
1252                                printk(KERN_ERR
1253                                       "PM: Failed to allocate LZO pages\n");
1254                                ret = -ENOMEM;
1255                                goto out_clean;
1256                        } else {
1257                                break;
1258                        }
1259                }
1260        }
1261        want = ring_size = i;
1262
1263        printk(KERN_INFO
1264                "PM: Using %u thread(s) for decompression.\n"
1265                "PM: Loading and decompressing image data (%u pages)...\n",
1266                nr_threads, nr_to_read);
1267        m = nr_to_read / 10;
1268        if (!m)
1269                m = 1;
1270        nr_pages = 0;
1271        start = ktime_get();
1272
1273        ret = snapshot_write_next(snapshot);
1274        if (ret <= 0)
1275                goto out_finish;
1276
1277        for(;;) {
1278                for (i = 0; !eof && i < want; i++) {
1279                        ret = swap_read_page(handle, page[ring], &hb);
1280                        if (ret) {
1281                                /*
1282                                 * On real read error, finish. On end of data,
1283                                 * set EOF flag and just exit the read loop.
1284                                 */
1285                                if (handle->cur &&
1286                                    handle->cur->entries[handle->k]) {
1287                                        goto out_finish;
1288                                } else {
1289                                        eof = 1;
1290                                        break;
1291                                }
1292                        }
1293                        if (++ring >= ring_size)
1294                                ring = 0;
1295                }
1296                asked += i;
1297                want -= i;
1298
1299                /*
1300                 * We are out of data, wait for some more.
1301                 */
1302                if (!have) {
1303                        if (!asked)
1304                                break;
1305
1306                        ret = hib_wait_io(&hb);
1307                        if (ret)
1308                                goto out_finish;
1309                        have += asked;
1310                        asked = 0;
1311                        if (eof)
1312                                eof = 2;
1313                }
1314
1315                if (crc->run_threads) {
1316                        wait_event(crc->done, atomic_read(&crc->stop));
1317                        atomic_set(&crc->stop, 0);
1318                        crc->run_threads = 0;
1319                }
1320
1321                for (thr = 0; have && thr < nr_threads; thr++) {
1322                        data[thr].cmp_len = *(size_t *)page[pg];
1323                        if (unlikely(!data[thr].cmp_len ||
1324                                     data[thr].cmp_len >
1325                                     lzo1x_worst_compress(LZO_UNC_SIZE))) {
1326                                printk(KERN_ERR
1327                                       "PM: Invalid LZO compressed length\n");
1328                                ret = -1;
1329                                goto out_finish;
1330                        }
1331
1332                        need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1333                                            PAGE_SIZE);
1334                        if (need > have) {
1335                                if (eof > 1) {
1336                                        ret = -1;
1337                                        goto out_finish;
1338                                }
1339                                break;
1340                        }
1341
1342                        for (off = 0;
1343                             off < LZO_HEADER + data[thr].cmp_len;
1344                             off += PAGE_SIZE) {
1345                                memcpy(data[thr].cmp + off,
1346                                       page[pg], PAGE_SIZE);
1347                                have--;
1348                                want++;
1349                                if (++pg >= ring_size)
1350                                        pg = 0;
1351                        }
1352
1353                        atomic_set(&data[thr].ready, 1);
1354                        wake_up(&data[thr].go);
1355                }
1356
1357                /*
1358                 * Wait for more data while we are decompressing.
1359                 */
1360                if (have < LZO_CMP_PAGES && asked) {
1361                        ret = hib_wait_io(&hb);
1362                        if (ret)
1363                                goto out_finish;
1364                        have += asked;
1365                        asked = 0;
1366                        if (eof)
1367                                eof = 2;
1368                }
1369
1370                for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1371                        wait_event(data[thr].done,
1372                                   atomic_read(&data[thr].stop));
1373                        atomic_set(&data[thr].stop, 0);
1374
1375                        ret = data[thr].ret;
1376
1377                        if (ret < 0) {
1378                                printk(KERN_ERR
1379                                       "PM: LZO decompression failed\n");
1380                                goto out_finish;
1381                        }
1382
1383                        if (unlikely(!data[thr].unc_len ||
1384                                     data[thr].unc_len > LZO_UNC_SIZE ||
1385                                     data[thr].unc_len & (PAGE_SIZE - 1))) {
1386                                printk(KERN_ERR
1387                                       "PM: Invalid LZO uncompressed length\n");
1388                                ret = -1;
1389                                goto out_finish;
1390                        }
1391
1392                        for (off = 0;
1393                             off < data[thr].unc_len; off += PAGE_SIZE) {
1394                                memcpy(data_of(*snapshot),
1395                                       data[thr].unc + off, PAGE_SIZE);
1396
1397                                if (!(nr_pages % m))
1398                                        printk(KERN_INFO
1399                                               "PM: Image loading progress: "
1400                                               "%3d%%\n",
1401                                               nr_pages / m * 10);
1402                                nr_pages++;
1403
1404                                ret = snapshot_write_next(snapshot);
1405                                if (ret <= 0) {
1406                                        crc->run_threads = thr + 1;
1407                                        atomic_set(&crc->ready, 1);
1408                                        wake_up(&crc->go);
1409                                        goto out_finish;
1410                                }
1411                        }
1412                }
1413
1414                crc->run_threads = thr;
1415                atomic_set(&crc->ready, 1);
1416                wake_up(&crc->go);
1417        }
1418
1419out_finish:
1420        if (crc->run_threads) {
1421                wait_event(crc->done, atomic_read(&crc->stop));
1422                atomic_set(&crc->stop, 0);
1423        }
1424        stop = ktime_get();
1425        if (!ret) {
1426                printk(KERN_INFO "PM: Image loading done.\n");
1427                snapshot_write_finalize(snapshot);
1428                if (!snapshot_image_loaded(snapshot))
1429                        ret = -ENODATA;
1430                if (!ret) {
1431                        if (swsusp_header->flags & SF_CRC32_MODE) {
1432                                if(handle->crc32 != swsusp_header->crc32) {
1433                                        printk(KERN_ERR
1434                                               "PM: Invalid image CRC32!\n");
1435                                        ret = -ENODATA;
1436                                }
1437                        }
1438                }
1439        }
1440        swsusp_show_speed(start, stop, nr_to_read, "Read");
1441out_clean:
1442        for (i = 0; i < ring_size; i++)
1443                free_page((unsigned long)page[i]);
1444        if (crc) {
1445                if (crc->thr)
1446                        kthread_stop(crc->thr);
1447                kfree(crc);
1448        }
1449        if (data) {
1450                for (thr = 0; thr < nr_threads; thr++)
1451                        if (data[thr].thr)
1452                                kthread_stop(data[thr].thr);
1453                vfree(data);
1454        }
1455        vfree(page);
1456
1457        return ret;
1458}
1459
1460/**
1461 *      swsusp_read - read the hibernation image.
1462 *      @flags_p: flags passed by the "frozen" kernel in the image header should
1463 *                be written into this memory location
1464 */
1465
1466int swsusp_read(unsigned int *flags_p)
1467{
1468        int error;
1469        struct swap_map_handle handle;
1470        struct snapshot_handle snapshot;
1471        struct swsusp_info *header;
1472
1473        memset(&snapshot, 0, sizeof(struct snapshot_handle));
1474        error = snapshot_write_next(&snapshot);
1475        if (error < PAGE_SIZE)
1476                return error < 0 ? error : -EFAULT;
1477        header = (struct swsusp_info *)data_of(snapshot);
1478        error = get_swap_reader(&handle, flags_p);
1479        if (error)
1480                goto end;
1481        if (!error)
1482                error = swap_read_page(&handle, header, NULL);
1483        if (!error) {
1484                error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1485                        load_image(&handle, &snapshot, header->pages - 1) :
1486                        load_image_lzo(&handle, &snapshot, header->pages - 1);
1487        }
1488        swap_reader_finish(&handle);
1489end:
1490        if (!error)
1491                pr_debug("PM: Image successfully loaded\n");
1492        else
1493                pr_debug("PM: Error %d resuming\n", error);
1494        return error;
1495}
1496
1497/**
1498 *      swsusp_check - Check for swsusp signature in the resume device
1499 */
1500
1501int swsusp_check(void)
1502{
1503        int error;
1504
1505        hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1506                                            FMODE_READ, NULL);
1507        if (!IS_ERR(hib_resume_bdev)) {
1508                set_blocksize(hib_resume_bdev, PAGE_SIZE);
1509                clear_page(swsusp_header);
1510                error = hib_submit_io(READ_SYNC, swsusp_resume_block,
1511                                        swsusp_header, NULL);
1512                if (error)
1513                        goto put;
1514
1515                if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1516                        memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1517                        /* Reset swap signature now */
1518                        error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
1519                                                swsusp_header, NULL);
1520                } else {
1521                        error = -EINVAL;
1522                }
1523
1524put:
1525                if (error)
1526                        blkdev_put(hib_resume_bdev, FMODE_READ);
1527                else
1528                        pr_debug("PM: Image signature found, resuming\n");
1529        } else {
1530                error = PTR_ERR(hib_resume_bdev);
1531        }
1532
1533        if (error)
1534                pr_debug("PM: Image not found (code %d)\n", error);
1535
1536        return error;
1537}
1538
1539/**
1540 *      swsusp_close - close swap device.
1541 */
1542
1543void swsusp_close(fmode_t mode)
1544{
1545        if (IS_ERR(hib_resume_bdev)) {
1546                pr_debug("PM: Image device not initialised\n");
1547                return;
1548        }
1549
1550        blkdev_put(hib_resume_bdev, mode);
1551}
1552
1553/**
1554 *      swsusp_unmark - Unmark swsusp signature in the resume device
1555 */
1556
1557#ifdef CONFIG_SUSPEND
1558int swsusp_unmark(void)
1559{
1560        int error;
1561
1562        hib_submit_io(READ_SYNC, swsusp_resume_block, swsusp_header, NULL);
1563        if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1564                memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1565                error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
1566                                        swsusp_header, NULL);
1567        } else {
1568                printk(KERN_ERR "PM: Cannot find swsusp signature!\n");
1569                error = -ENODEV;
1570        }
1571
1572        /*
1573         * We just returned from suspend, we don't need the image any more.
1574         */
1575        free_all_swap_pages(root_swap);
1576
1577        return error;
1578}
1579#endif
1580
1581static int swsusp_header_init(void)
1582{
1583        swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1584        if (!swsusp_header)
1585                panic("Could not allocate memory for swsusp_header\n");
1586        return 0;
1587}
1588
1589core_initcall(swsusp_header_init);
1590