linux/kernel/power/swap.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/power/swap.c
   3 *
   4 * This file provides functions for reading the suspend image from
   5 * and writing it to a swap partition.
   6 *
   7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
   8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
   9 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
  10 *
  11 * This file is released under the GPLv2.
  12 *
  13 */
  14
  15#define pr_fmt(fmt) "PM: " fmt
  16
  17#include <linux/module.h>
  18#include <linux/file.h>
  19#include <linux/delay.h>
  20#include <linux/bitops.h>
  21#include <linux/genhd.h>
  22#include <linux/device.h>
  23#include <linux/bio.h>
  24#include <linux/blkdev.h>
  25#include <linux/swap.h>
  26#include <linux/swapops.h>
  27#include <linux/pm.h>
  28#include <linux/slab.h>
  29#include <linux/lzo.h>
  30#include <linux/vmalloc.h>
  31#include <linux/cpumask.h>
  32#include <linux/atomic.h>
  33#include <linux/kthread.h>
  34#include <linux/crc32.h>
  35#include <linux/ktime.h>
  36
  37#include "power.h"
  38
  39#define HIBERNATE_SIG   "S1SUSPEND"
  40
  41/*
  42 * When reading an {un,}compressed image, we may restore pages in place,
  43 * in which case some architectures need these pages cleaning before they
  44 * can be executed. We don't know which pages these may be, so clean the lot.
  45 */
  46static bool clean_pages_on_read;
  47static bool clean_pages_on_decompress;
  48
  49/*
  50 *      The swap map is a data structure used for keeping track of each page
  51 *      written to a swap partition.  It consists of many swap_map_page
  52 *      structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
  53 *      These structures are stored on the swap and linked together with the
  54 *      help of the .next_swap member.
  55 *
  56 *      The swap map is created during suspend.  The swap map pages are
  57 *      allocated and populated one at a time, so we only need one memory
  58 *      page to set up the entire structure.
  59 *
  60 *      During resume we pick up all swap_map_page structures into a list.
  61 */
  62
  63#define MAP_PAGE_ENTRIES        (PAGE_SIZE / sizeof(sector_t) - 1)
  64
  65/*
  66 * Number of free pages that are not high.
  67 */
  68static inline unsigned long low_free_pages(void)
  69{
  70        return nr_free_pages() - nr_free_highpages();
  71}
  72
  73/*
  74 * Number of pages required to be kept free while writing the image. Always
  75 * half of all available low pages before the writing starts.
  76 */
  77static inline unsigned long reqd_free_pages(void)
  78{
  79        return low_free_pages() / 2;
  80}
  81
  82struct swap_map_page {
  83        sector_t entries[MAP_PAGE_ENTRIES];
  84        sector_t next_swap;
  85};
  86
  87struct swap_map_page_list {
  88        struct swap_map_page *map;
  89        struct swap_map_page_list *next;
  90};
  91
  92/**
  93 *      The swap_map_handle structure is used for handling swap in
  94 *      a file-alike way
  95 */
  96
  97struct swap_map_handle {
  98        struct swap_map_page *cur;
  99        struct swap_map_page_list *maps;
 100        sector_t cur_swap;
 101        sector_t first_sector;
 102        unsigned int k;
 103        unsigned long reqd_free_pages;
 104        u32 crc32;
 105};
 106
 107struct swsusp_header {
 108        char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
 109                      sizeof(u32)];
 110        u32     crc32;
 111        sector_t image;
 112        unsigned int flags;     /* Flags to pass to the "boot" kernel */
 113        char    orig_sig[10];
 114        char    sig[10];
 115} __packed;
 116
 117static struct swsusp_header *swsusp_header;
 118
 119/**
 120 *      The following functions are used for tracing the allocated
 121 *      swap pages, so that they can be freed in case of an error.
 122 */
 123
 124struct swsusp_extent {
 125        struct rb_node node;
 126        unsigned long start;
 127        unsigned long end;
 128};
 129
 130static struct rb_root swsusp_extents = RB_ROOT;
 131
 132static int swsusp_extents_insert(unsigned long swap_offset)
 133{
 134        struct rb_node **new = &(swsusp_extents.rb_node);
 135        struct rb_node *parent = NULL;
 136        struct swsusp_extent *ext;
 137
 138        /* Figure out where to put the new node */
 139        while (*new) {
 140                ext = rb_entry(*new, struct swsusp_extent, node);
 141                parent = *new;
 142                if (swap_offset < ext->start) {
 143                        /* Try to merge */
 144                        if (swap_offset == ext->start - 1) {
 145                                ext->start--;
 146                                return 0;
 147                        }
 148                        new = &((*new)->rb_left);
 149                } else if (swap_offset > ext->end) {
 150                        /* Try to merge */
 151                        if (swap_offset == ext->end + 1) {
 152                                ext->end++;
 153                                return 0;
 154                        }
 155                        new = &((*new)->rb_right);
 156                } else {
 157                        /* It already is in the tree */
 158                        return -EINVAL;
 159                }
 160        }
 161        /* Add the new node and rebalance the tree. */
 162        ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
 163        if (!ext)
 164                return -ENOMEM;
 165
 166        ext->start = swap_offset;
 167        ext->end = swap_offset;
 168        rb_link_node(&ext->node, parent, new);
 169        rb_insert_color(&ext->node, &swsusp_extents);
 170        return 0;
 171}
 172
 173/**
 174 *      alloc_swapdev_block - allocate a swap page and register that it has
 175 *      been allocated, so that it can be freed in case of an error.
 176 */
 177
 178sector_t alloc_swapdev_block(int swap)
 179{
 180        unsigned long offset;
 181
 182        offset = swp_offset(get_swap_page_of_type(swap));
 183        if (offset) {
 184                if (swsusp_extents_insert(offset))
 185                        swap_free(swp_entry(swap, offset));
 186                else
 187                        return swapdev_block(swap, offset);
 188        }
 189        return 0;
 190}
 191
 192/**
 193 *      free_all_swap_pages - free swap pages allocated for saving image data.
 194 *      It also frees the extents used to register which swap entries had been
 195 *      allocated.
 196 */
 197
 198void free_all_swap_pages(int swap)
 199{
 200        struct rb_node *node;
 201
 202        while ((node = swsusp_extents.rb_node)) {
 203                struct swsusp_extent *ext;
 204                unsigned long offset;
 205
 206                ext = rb_entry(node, struct swsusp_extent, node);
 207                rb_erase(node, &swsusp_extents);
 208                for (offset = ext->start; offset <= ext->end; offset++)
 209                        swap_free(swp_entry(swap, offset));
 210
 211                kfree(ext);
 212        }
 213}
 214
 215int swsusp_swap_in_use(void)
 216{
 217        return (swsusp_extents.rb_node != NULL);
 218}
 219
 220/*
 221 * General things
 222 */
 223
 224static unsigned short root_swap = 0xffff;
 225static struct block_device *hib_resume_bdev;
 226
 227struct hib_bio_batch {
 228        atomic_t                count;
 229        wait_queue_head_t       wait;
 230        blk_status_t            error;
 231};
 232
 233static void hib_init_batch(struct hib_bio_batch *hb)
 234{
 235        atomic_set(&hb->count, 0);
 236        init_waitqueue_head(&hb->wait);
 237        hb->error = BLK_STS_OK;
 238}
 239
 240static void hib_end_io(struct bio *bio)
 241{
 242        struct hib_bio_batch *hb = bio->bi_private;
 243        struct page *page = bio_first_page_all(bio);
 244
 245        if (bio->bi_status) {
 246                pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
 247                         MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
 248                         (unsigned long long)bio->bi_iter.bi_sector);
 249        }
 250
 251        if (bio_data_dir(bio) == WRITE)
 252                put_page(page);
 253        else if (clean_pages_on_read)
 254                flush_icache_range((unsigned long)page_address(page),
 255                                   (unsigned long)page_address(page) + PAGE_SIZE);
 256
 257        if (bio->bi_status && !hb->error)
 258                hb->error = bio->bi_status;
 259        if (atomic_dec_and_test(&hb->count))
 260                wake_up(&hb->wait);
 261
 262        bio_put(bio);
 263}
 264
 265static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
 266                struct hib_bio_batch *hb)
 267{
 268        struct page *page = virt_to_page(addr);
 269        struct bio *bio;
 270        int error = 0;
 271
 272        bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1);
 273        bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
 274        bio_set_dev(bio, hib_resume_bdev);
 275        bio_set_op_attrs(bio, op, op_flags);
 276
 277        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 278                pr_err("Adding page to bio failed at %llu\n",
 279                       (unsigned long long)bio->bi_iter.bi_sector);
 280                bio_put(bio);
 281                return -EFAULT;
 282        }
 283
 284        if (hb) {
 285                bio->bi_end_io = hib_end_io;
 286                bio->bi_private = hb;
 287                atomic_inc(&hb->count);
 288                submit_bio(bio);
 289        } else {
 290                error = submit_bio_wait(bio);
 291                bio_put(bio);
 292        }
 293
 294        return error;
 295}
 296
 297static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
 298{
 299        wait_event(hb->wait, atomic_read(&hb->count) == 0);
 300        return blk_status_to_errno(hb->error);
 301}
 302
 303/*
 304 * Saving part
 305 */
 306
 307static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
 308{
 309        int error;
 310
 311        hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
 312                      swsusp_header, NULL);
 313        if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
 314            !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
 315                memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
 316                memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
 317                swsusp_header->image = handle->first_sector;
 318                swsusp_header->flags = flags;
 319                if (flags & SF_CRC32_MODE)
 320                        swsusp_header->crc32 = handle->crc32;
 321                error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
 322                                      swsusp_resume_block, swsusp_header, NULL);
 323        } else {
 324                pr_err("Swap header not found!\n");
 325                error = -ENODEV;
 326        }
 327        return error;
 328}
 329
 330/**
 331 *      swsusp_swap_check - check if the resume device is a swap device
 332 *      and get its index (if so)
 333 *
 334 *      This is called before saving image
 335 */
 336static int swsusp_swap_check(void)
 337{
 338        int res;
 339
 340        res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
 341                        &hib_resume_bdev);
 342        if (res < 0)
 343                return res;
 344
 345        root_swap = res;
 346        res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
 347        if (res)
 348                return res;
 349
 350        res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
 351        if (res < 0)
 352                blkdev_put(hib_resume_bdev, FMODE_WRITE);
 353
 354        /*
 355         * Update the resume device to the one actually used,
 356         * so the test_resume mode can use it in case it is
 357         * invoked from hibernate() to test the snapshot.
 358         */
 359        swsusp_resume_device = hib_resume_bdev->bd_dev;
 360        return res;
 361}
 362
 363/**
 364 *      write_page - Write one page to given swap location.
 365 *      @buf:           Address we're writing.
 366 *      @offset:        Offset of the swap page we're writing to.
 367 *      @hb:            bio completion batch
 368 */
 369
 370static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
 371{
 372        void *src;
 373        int ret;
 374
 375        if (!offset)
 376                return -ENOSPC;
 377
 378        if (hb) {
 379                src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
 380                                              __GFP_NORETRY);
 381                if (src) {
 382                        copy_page(src, buf);
 383                } else {
 384                        ret = hib_wait_io(hb); /* Free pages */
 385                        if (ret)
 386                                return ret;
 387                        src = (void *)__get_free_page(GFP_NOIO |
 388                                                      __GFP_NOWARN |
 389                                                      __GFP_NORETRY);
 390                        if (src) {
 391                                copy_page(src, buf);
 392                        } else {
 393                                WARN_ON_ONCE(1);
 394                                hb = NULL;      /* Go synchronous */
 395                                src = buf;
 396                        }
 397                }
 398        } else {
 399                src = buf;
 400        }
 401        return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
 402}
 403
 404static void release_swap_writer(struct swap_map_handle *handle)
 405{
 406        if (handle->cur)
 407                free_page((unsigned long)handle->cur);
 408        handle->cur = NULL;
 409}
 410
 411static int get_swap_writer(struct swap_map_handle *handle)
 412{
 413        int ret;
 414
 415        ret = swsusp_swap_check();
 416        if (ret) {
 417                if (ret != -ENOSPC)
 418                        pr_err("Cannot find swap device, try swapon -a\n");
 419                return ret;
 420        }
 421        handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
 422        if (!handle->cur) {
 423                ret = -ENOMEM;
 424                goto err_close;
 425        }
 426        handle->cur_swap = alloc_swapdev_block(root_swap);
 427        if (!handle->cur_swap) {
 428                ret = -ENOSPC;
 429                goto err_rel;
 430        }
 431        handle->k = 0;
 432        handle->reqd_free_pages = reqd_free_pages();
 433        handle->first_sector = handle->cur_swap;
 434        return 0;
 435err_rel:
 436        release_swap_writer(handle);
 437err_close:
 438        swsusp_close(FMODE_WRITE);
 439        return ret;
 440}
 441
 442static int swap_write_page(struct swap_map_handle *handle, void *buf,
 443                struct hib_bio_batch *hb)
 444{
 445        int error = 0;
 446        sector_t offset;
 447
 448        if (!handle->cur)
 449                return -EINVAL;
 450        offset = alloc_swapdev_block(root_swap);
 451        error = write_page(buf, offset, hb);
 452        if (error)
 453                return error;
 454        handle->cur->entries[handle->k++] = offset;
 455        if (handle->k >= MAP_PAGE_ENTRIES) {
 456                offset = alloc_swapdev_block(root_swap);
 457                if (!offset)
 458                        return -ENOSPC;
 459                handle->cur->next_swap = offset;
 460                error = write_page(handle->cur, handle->cur_swap, hb);
 461                if (error)
 462                        goto out;
 463                clear_page(handle->cur);
 464                handle->cur_swap = offset;
 465                handle->k = 0;
 466
 467                if (hb && low_free_pages() <= handle->reqd_free_pages) {
 468                        error = hib_wait_io(hb);
 469                        if (error)
 470                                goto out;
 471                        /*
 472                         * Recalculate the number of required free pages, to
 473                         * make sure we never take more than half.
 474                         */
 475                        handle->reqd_free_pages = reqd_free_pages();
 476                }
 477        }
 478 out:
 479        return error;
 480}
 481
 482static int flush_swap_writer(struct swap_map_handle *handle)
 483{
 484        if (handle->cur && handle->cur_swap)
 485                return write_page(handle->cur, handle->cur_swap, NULL);
 486        else
 487                return -EINVAL;
 488}
 489
 490static int swap_writer_finish(struct swap_map_handle *handle,
 491                unsigned int flags, int error)
 492{
 493        if (!error) {
 494                flush_swap_writer(handle);
 495                pr_info("S");
 496                error = mark_swapfiles(handle, flags);
 497                pr_cont("|\n");
 498        }
 499
 500        if (error)
 501                free_all_swap_pages(root_swap);
 502        release_swap_writer(handle);
 503        swsusp_close(FMODE_WRITE);
 504
 505        return error;
 506}
 507
 508/* We need to remember how much compressed data we need to read. */
 509#define LZO_HEADER      sizeof(size_t)
 510
 511/* Number of pages/bytes we'll compress at one time. */
 512#define LZO_UNC_PAGES   32
 513#define LZO_UNC_SIZE    (LZO_UNC_PAGES * PAGE_SIZE)
 514
 515/* Number of pages/bytes we need for compressed data (worst case). */
 516#define LZO_CMP_PAGES   DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
 517                                     LZO_HEADER, PAGE_SIZE)
 518#define LZO_CMP_SIZE    (LZO_CMP_PAGES * PAGE_SIZE)
 519
 520/* Maximum number of threads for compression/decompression. */
 521#define LZO_THREADS     3
 522
 523/* Minimum/maximum number of pages for read buffering. */
 524#define LZO_MIN_RD_PAGES        1024
 525#define LZO_MAX_RD_PAGES        8192
 526
 527
 528/**
 529 *      save_image - save the suspend image data
 530 */
 531
 532static int save_image(struct swap_map_handle *handle,
 533                      struct snapshot_handle *snapshot,
 534                      unsigned int nr_to_write)
 535{
 536        unsigned int m;
 537        int ret;
 538        int nr_pages;
 539        int err2;
 540        struct hib_bio_batch hb;
 541        ktime_t start;
 542        ktime_t stop;
 543
 544        hib_init_batch(&hb);
 545
 546        pr_info("Saving image data pages (%u pages)...\n",
 547                nr_to_write);
 548        m = nr_to_write / 10;
 549        if (!m)
 550                m = 1;
 551        nr_pages = 0;
 552        start = ktime_get();
 553        while (1) {
 554                ret = snapshot_read_next(snapshot);
 555                if (ret <= 0)
 556                        break;
 557                ret = swap_write_page(handle, data_of(*snapshot), &hb);
 558                if (ret)
 559                        break;
 560                if (!(nr_pages % m))
 561                        pr_info("Image saving progress: %3d%%\n",
 562                                nr_pages / m * 10);
 563                nr_pages++;
 564        }
 565        err2 = hib_wait_io(&hb);
 566        stop = ktime_get();
 567        if (!ret)
 568                ret = err2;
 569        if (!ret)
 570                pr_info("Image saving done\n");
 571        swsusp_show_speed(start, stop, nr_to_write, "Wrote");
 572        return ret;
 573}
 574
 575/**
 576 * Structure used for CRC32.
 577 */
 578struct crc_data {
 579        struct task_struct *thr;                  /* thread */
 580        atomic_t ready;                           /* ready to start flag */
 581        atomic_t stop;                            /* ready to stop flag */
 582        unsigned run_threads;                     /* nr current threads */
 583        wait_queue_head_t go;                     /* start crc update */
 584        wait_queue_head_t done;                   /* crc update done */
 585        u32 *crc32;                               /* points to handle's crc32 */
 586        size_t *unc_len[LZO_THREADS];             /* uncompressed lengths */
 587        unsigned char *unc[LZO_THREADS];          /* uncompressed data */
 588};
 589
 590/**
 591 * CRC32 update function that runs in its own thread.
 592 */
 593static int crc32_threadfn(void *data)
 594{
 595        struct crc_data *d = data;
 596        unsigned i;
 597
 598        while (1) {
 599                wait_event(d->go, atomic_read(&d->ready) ||
 600                                  kthread_should_stop());
 601                if (kthread_should_stop()) {
 602                        d->thr = NULL;
 603                        atomic_set(&d->stop, 1);
 604                        wake_up(&d->done);
 605                        break;
 606                }
 607                atomic_set(&d->ready, 0);
 608
 609                for (i = 0; i < d->run_threads; i++)
 610                        *d->crc32 = crc32_le(*d->crc32,
 611                                             d->unc[i], *d->unc_len[i]);
 612                atomic_set(&d->stop, 1);
 613                wake_up(&d->done);
 614        }
 615        return 0;
 616}
 617/**
 618 * Structure used for LZO data compression.
 619 */
 620struct cmp_data {
 621        struct task_struct *thr;                  /* thread */
 622        atomic_t ready;                           /* ready to start flag */
 623        atomic_t stop;                            /* ready to stop flag */
 624        int ret;                                  /* return code */
 625        wait_queue_head_t go;                     /* start compression */
 626        wait_queue_head_t done;                   /* compression done */
 627        size_t unc_len;                           /* uncompressed length */
 628        size_t cmp_len;                           /* compressed length */
 629        unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
 630        unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
 631        unsigned char wrk[LZO1X_1_MEM_COMPRESS];  /* compression workspace */
 632};
 633
 634/**
 635 * Compression function that runs in its own thread.
 636 */
 637static int lzo_compress_threadfn(void *data)
 638{
 639        struct cmp_data *d = data;
 640
 641        while (1) {
 642                wait_event(d->go, atomic_read(&d->ready) ||
 643                                  kthread_should_stop());
 644                if (kthread_should_stop()) {
 645                        d->thr = NULL;
 646                        d->ret = -1;
 647                        atomic_set(&d->stop, 1);
 648                        wake_up(&d->done);
 649                        break;
 650                }
 651                atomic_set(&d->ready, 0);
 652
 653                d->ret = lzo1x_1_compress(d->unc, d->unc_len,
 654                                          d->cmp + LZO_HEADER, &d->cmp_len,
 655                                          d->wrk);
 656                atomic_set(&d->stop, 1);
 657                wake_up(&d->done);
 658        }
 659        return 0;
 660}
 661
 662/**
 663 * save_image_lzo - Save the suspend image data compressed with LZO.
 664 * @handle: Swap map handle to use for saving the image.
 665 * @snapshot: Image to read data from.
 666 * @nr_to_write: Number of pages to save.
 667 */
 668static int save_image_lzo(struct swap_map_handle *handle,
 669                          struct snapshot_handle *snapshot,
 670                          unsigned int nr_to_write)
 671{
 672        unsigned int m;
 673        int ret = 0;
 674        int nr_pages;
 675        int err2;
 676        struct hib_bio_batch hb;
 677        ktime_t start;
 678        ktime_t stop;
 679        size_t off;
 680        unsigned thr, run_threads, nr_threads;
 681        unsigned char *page = NULL;
 682        struct cmp_data *data = NULL;
 683        struct crc_data *crc = NULL;
 684
 685        hib_init_batch(&hb);
 686
 687        /*
 688         * We'll limit the number of threads for compression to limit memory
 689         * footprint.
 690         */
 691        nr_threads = num_online_cpus() - 1;
 692        nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
 693
 694        page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
 695        if (!page) {
 696                pr_err("Failed to allocate LZO page\n");
 697                ret = -ENOMEM;
 698                goto out_clean;
 699        }
 700
 701        data = vmalloc(array_size(nr_threads, sizeof(*data)));
 702        if (!data) {
 703                pr_err("Failed to allocate LZO data\n");
 704                ret = -ENOMEM;
 705                goto out_clean;
 706        }
 707        for (thr = 0; thr < nr_threads; thr++)
 708                memset(&data[thr], 0, offsetof(struct cmp_data, go));
 709
 710        crc = kmalloc(sizeof(*crc), GFP_KERNEL);
 711        if (!crc) {
 712                pr_err("Failed to allocate crc\n");
 713                ret = -ENOMEM;
 714                goto out_clean;
 715        }
 716        memset(crc, 0, offsetof(struct crc_data, go));
 717
 718        /*
 719         * Start the compression threads.
 720         */
 721        for (thr = 0; thr < nr_threads; thr++) {
 722                init_waitqueue_head(&data[thr].go);
 723                init_waitqueue_head(&data[thr].done);
 724
 725                data[thr].thr = kthread_run(lzo_compress_threadfn,
 726                                            &data[thr],
 727                                            "image_compress/%u", thr);
 728                if (IS_ERR(data[thr].thr)) {
 729                        data[thr].thr = NULL;
 730                        pr_err("Cannot start compression threads\n");
 731                        ret = -ENOMEM;
 732                        goto out_clean;
 733                }
 734        }
 735
 736        /*
 737         * Start the CRC32 thread.
 738         */
 739        init_waitqueue_head(&crc->go);
 740        init_waitqueue_head(&crc->done);
 741
 742        handle->crc32 = 0;
 743        crc->crc32 = &handle->crc32;
 744        for (thr = 0; thr < nr_threads; thr++) {
 745                crc->unc[thr] = data[thr].unc;
 746                crc->unc_len[thr] = &data[thr].unc_len;
 747        }
 748
 749        crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
 750        if (IS_ERR(crc->thr)) {
 751                crc->thr = NULL;
 752                pr_err("Cannot start CRC32 thread\n");
 753                ret = -ENOMEM;
 754                goto out_clean;
 755        }
 756
 757        /*
 758         * Adjust the number of required free pages after all allocations have
 759         * been done. We don't want to run out of pages when writing.
 760         */
 761        handle->reqd_free_pages = reqd_free_pages();
 762
 763        pr_info("Using %u thread(s) for compression\n", nr_threads);
 764        pr_info("Compressing and saving image data (%u pages)...\n",
 765                nr_to_write);
 766        m = nr_to_write / 10;
 767        if (!m)
 768                m = 1;
 769        nr_pages = 0;
 770        start = ktime_get();
 771        for (;;) {
 772                for (thr = 0; thr < nr_threads; thr++) {
 773                        for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
 774                                ret = snapshot_read_next(snapshot);
 775                                if (ret < 0)
 776                                        goto out_finish;
 777
 778                                if (!ret)
 779                                        break;
 780
 781                                memcpy(data[thr].unc + off,
 782                                       data_of(*snapshot), PAGE_SIZE);
 783
 784                                if (!(nr_pages % m))
 785                                        pr_info("Image saving progress: %3d%%\n",
 786                                                nr_pages / m * 10);
 787                                nr_pages++;
 788                        }
 789                        if (!off)
 790                                break;
 791
 792                        data[thr].unc_len = off;
 793
 794                        atomic_set(&data[thr].ready, 1);
 795                        wake_up(&data[thr].go);
 796                }
 797
 798                if (!thr)
 799                        break;
 800
 801                crc->run_threads = thr;
 802                atomic_set(&crc->ready, 1);
 803                wake_up(&crc->go);
 804
 805                for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
 806                        wait_event(data[thr].done,
 807                                   atomic_read(&data[thr].stop));
 808                        atomic_set(&data[thr].stop, 0);
 809
 810                        ret = data[thr].ret;
 811
 812                        if (ret < 0) {
 813                                pr_err("LZO compression failed\n");
 814                                goto out_finish;
 815                        }
 816
 817                        if (unlikely(!data[thr].cmp_len ||
 818                                     data[thr].cmp_len >
 819                                     lzo1x_worst_compress(data[thr].unc_len))) {
 820                                pr_err("Invalid LZO compressed length\n");
 821                                ret = -1;
 822                                goto out_finish;
 823                        }
 824
 825                        *(size_t *)data[thr].cmp = data[thr].cmp_len;
 826
 827                        /*
 828                         * Given we are writing one page at a time to disk, we
 829                         * copy that much from the buffer, although the last
 830                         * bit will likely be smaller than full page. This is
 831                         * OK - we saved the length of the compressed data, so
 832                         * any garbage at the end will be discarded when we
 833                         * read it.
 834                         */
 835                        for (off = 0;
 836                             off < LZO_HEADER + data[thr].cmp_len;
 837                             off += PAGE_SIZE) {
 838                                memcpy(page, data[thr].cmp + off, PAGE_SIZE);
 839
 840                                ret = swap_write_page(handle, page, &hb);
 841                                if (ret)
 842                                        goto out_finish;
 843                        }
 844                }
 845
 846                wait_event(crc->done, atomic_read(&crc->stop));
 847                atomic_set(&crc->stop, 0);
 848        }
 849
 850out_finish:
 851        err2 = hib_wait_io(&hb);
 852        stop = ktime_get();
 853        if (!ret)
 854                ret = err2;
 855        if (!ret)
 856                pr_info("Image saving done\n");
 857        swsusp_show_speed(start, stop, nr_to_write, "Wrote");
 858out_clean:
 859        if (crc) {
 860                if (crc->thr)
 861                        kthread_stop(crc->thr);
 862                kfree(crc);
 863        }
 864        if (data) {
 865                for (thr = 0; thr < nr_threads; thr++)
 866                        if (data[thr].thr)
 867                                kthread_stop(data[thr].thr);
 868                vfree(data);
 869        }
 870        if (page) free_page((unsigned long)page);
 871
 872        return ret;
 873}
 874
 875/**
 876 *      enough_swap - Make sure we have enough swap to save the image.
 877 *
 878 *      Returns TRUE or FALSE after checking the total amount of swap
 879 *      space avaiable from the resume partition.
 880 */
 881
 882static int enough_swap(unsigned int nr_pages)
 883{
 884        unsigned int free_swap = count_swap_pages(root_swap, 1);
 885        unsigned int required;
 886
 887        pr_debug("Free swap pages: %u\n", free_swap);
 888
 889        required = PAGES_FOR_IO + nr_pages;
 890        return free_swap > required;
 891}
 892
 893/**
 894 *      swsusp_write - Write entire image and metadata.
 895 *      @flags: flags to pass to the "boot" kernel in the image header
 896 *
 897 *      It is important _NOT_ to umount filesystems at this point. We want
 898 *      them synced (in case something goes wrong) but we DO not want to mark
 899 *      filesystem clean: it is not. (And it does not matter, if we resume
 900 *      correctly, we'll mark system clean, anyway.)
 901 */
 902
 903int swsusp_write(unsigned int flags)
 904{
 905        struct swap_map_handle handle;
 906        struct snapshot_handle snapshot;
 907        struct swsusp_info *header;
 908        unsigned long pages;
 909        int error;
 910
 911        pages = snapshot_get_image_size();
 912        error = get_swap_writer(&handle);
 913        if (error) {
 914                pr_err("Cannot get swap writer\n");
 915                return error;
 916        }
 917        if (flags & SF_NOCOMPRESS_MODE) {
 918                if (!enough_swap(pages)) {
 919                        pr_err("Not enough free swap\n");
 920                        error = -ENOSPC;
 921                        goto out_finish;
 922                }
 923        }
 924        memset(&snapshot, 0, sizeof(struct snapshot_handle));
 925        error = snapshot_read_next(&snapshot);
 926        if (error < PAGE_SIZE) {
 927                if (error >= 0)
 928                        error = -EFAULT;
 929
 930                goto out_finish;
 931        }
 932        header = (struct swsusp_info *)data_of(snapshot);
 933        error = swap_write_page(&handle, header, NULL);
 934        if (!error) {
 935                error = (flags & SF_NOCOMPRESS_MODE) ?
 936                        save_image(&handle, &snapshot, pages - 1) :
 937                        save_image_lzo(&handle, &snapshot, pages - 1);
 938        }
 939out_finish:
 940        error = swap_writer_finish(&handle, flags, error);
 941        return error;
 942}
 943
 944/**
 945 *      The following functions allow us to read data using a swap map
 946 *      in a file-alike way
 947 */
 948
 949static void release_swap_reader(struct swap_map_handle *handle)
 950{
 951        struct swap_map_page_list *tmp;
 952
 953        while (handle->maps) {
 954                if (handle->maps->map)
 955                        free_page((unsigned long)handle->maps->map);
 956                tmp = handle->maps;
 957                handle->maps = handle->maps->next;
 958                kfree(tmp);
 959        }
 960        handle->cur = NULL;
 961}
 962
 963static int get_swap_reader(struct swap_map_handle *handle,
 964                unsigned int *flags_p)
 965{
 966        int error;
 967        struct swap_map_page_list *tmp, *last;
 968        sector_t offset;
 969
 970        *flags_p = swsusp_header->flags;
 971
 972        if (!swsusp_header->image) /* how can this happen? */
 973                return -EINVAL;
 974
 975        handle->cur = NULL;
 976        last = handle->maps = NULL;
 977        offset = swsusp_header->image;
 978        while (offset) {
 979                tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
 980                if (!tmp) {
 981                        release_swap_reader(handle);
 982                        return -ENOMEM;
 983                }
 984                memset(tmp, 0, sizeof(*tmp));
 985                if (!handle->maps)
 986                        handle->maps = tmp;
 987                if (last)
 988                        last->next = tmp;
 989                last = tmp;
 990
 991                tmp->map = (struct swap_map_page *)
 992                           __get_free_page(GFP_NOIO | __GFP_HIGH);
 993                if (!tmp->map) {
 994                        release_swap_reader(handle);
 995                        return -ENOMEM;
 996                }
 997
 998                error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
 999                if (error) {
1000                        release_swap_reader(handle);
1001                        return error;
1002                }
1003                offset = tmp->map->next_swap;
1004        }
1005        handle->k = 0;
1006        handle->cur = handle->maps->map;
1007        return 0;
1008}
1009
1010static int swap_read_page(struct swap_map_handle *handle, void *buf,
1011                struct hib_bio_batch *hb)
1012{
1013        sector_t offset;
1014        int error;
1015        struct swap_map_page_list *tmp;
1016
1017        if (!handle->cur)
1018                return -EINVAL;
1019        offset = handle->cur->entries[handle->k];
1020        if (!offset)
1021                return -EFAULT;
1022        error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
1023        if (error)
1024                return error;
1025        if (++handle->k >= MAP_PAGE_ENTRIES) {
1026                handle->k = 0;
1027                free_page((unsigned long)handle->maps->map);
1028                tmp = handle->maps;
1029                handle->maps = handle->maps->next;
1030                kfree(tmp);
1031                if (!handle->maps)
1032                        release_swap_reader(handle);
1033                else
1034                        handle->cur = handle->maps->map;
1035        }
1036        return error;
1037}
1038
1039static int swap_reader_finish(struct swap_map_handle *handle)
1040{
1041        release_swap_reader(handle);
1042
1043        return 0;
1044}
1045
1046/**
1047 *      load_image - load the image using the swap map handle
1048 *      @handle and the snapshot handle @snapshot
1049 *      (assume there are @nr_pages pages to load)
1050 */
1051
1052static int load_image(struct swap_map_handle *handle,
1053                      struct snapshot_handle *snapshot,
1054                      unsigned int nr_to_read)
1055{
1056        unsigned int m;
1057        int ret = 0;
1058        ktime_t start;
1059        ktime_t stop;
1060        struct hib_bio_batch hb;
1061        int err2;
1062        unsigned nr_pages;
1063
1064        hib_init_batch(&hb);
1065
1066        clean_pages_on_read = true;
1067        pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1068        m = nr_to_read / 10;
1069        if (!m)
1070                m = 1;
1071        nr_pages = 0;
1072        start = ktime_get();
1073        for ( ; ; ) {
1074                ret = snapshot_write_next(snapshot);
1075                if (ret <= 0)
1076                        break;
1077                ret = swap_read_page(handle, data_of(*snapshot), &hb);
1078                if (ret)
1079                        break;
1080                if (snapshot->sync_read)
1081                        ret = hib_wait_io(&hb);
1082                if (ret)
1083                        break;
1084                if (!(nr_pages % m))
1085                        pr_info("Image loading progress: %3d%%\n",
1086                                nr_pages / m * 10);
1087                nr_pages++;
1088        }
1089        err2 = hib_wait_io(&hb);
1090        stop = ktime_get();
1091        if (!ret)
1092                ret = err2;
1093        if (!ret) {
1094                pr_info("Image loading done\n");
1095                snapshot_write_finalize(snapshot);
1096                if (!snapshot_image_loaded(snapshot))
1097                        ret = -ENODATA;
1098        }
1099        swsusp_show_speed(start, stop, nr_to_read, "Read");
1100        return ret;
1101}
1102
1103/**
1104 * Structure used for LZO data decompression.
1105 */
1106struct dec_data {
1107        struct task_struct *thr;                  /* thread */
1108        atomic_t ready;                           /* ready to start flag */
1109        atomic_t stop;                            /* ready to stop flag */
1110        int ret;                                  /* return code */
1111        wait_queue_head_t go;                     /* start decompression */
1112        wait_queue_head_t done;                   /* decompression done */
1113        size_t unc_len;                           /* uncompressed length */
1114        size_t cmp_len;                           /* compressed length */
1115        unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
1116        unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
1117};
1118
1119/**
1120 * Deompression function that runs in its own thread.
1121 */
1122static int lzo_decompress_threadfn(void *data)
1123{
1124        struct dec_data *d = data;
1125
1126        while (1) {
1127                wait_event(d->go, atomic_read(&d->ready) ||
1128                                  kthread_should_stop());
1129                if (kthread_should_stop()) {
1130                        d->thr = NULL;
1131                        d->ret = -1;
1132                        atomic_set(&d->stop, 1);
1133                        wake_up(&d->done);
1134                        break;
1135                }
1136                atomic_set(&d->ready, 0);
1137
1138                d->unc_len = LZO_UNC_SIZE;
1139                d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1140                                               d->unc, &d->unc_len);
1141                if (clean_pages_on_decompress)
1142                        flush_icache_range((unsigned long)d->unc,
1143                                           (unsigned long)d->unc + d->unc_len);
1144
1145                atomic_set(&d->stop, 1);
1146                wake_up(&d->done);
1147        }
1148        return 0;
1149}
1150
1151/**
1152 * load_image_lzo - Load compressed image data and decompress them with LZO.
1153 * @handle: Swap map handle to use for loading data.
1154 * @snapshot: Image to copy uncompressed data into.
1155 * @nr_to_read: Number of pages to load.
1156 */
1157static int load_image_lzo(struct swap_map_handle *handle,
1158                          struct snapshot_handle *snapshot,
1159                          unsigned int nr_to_read)
1160{
1161        unsigned int m;
1162        int ret = 0;
1163        int eof = 0;
1164        struct hib_bio_batch hb;
1165        ktime_t start;
1166        ktime_t stop;
1167        unsigned nr_pages;
1168        size_t off;
1169        unsigned i, thr, run_threads, nr_threads;
1170        unsigned ring = 0, pg = 0, ring_size = 0,
1171                 have = 0, want, need, asked = 0;
1172        unsigned long read_pages = 0;
1173        unsigned char **page = NULL;
1174        struct dec_data *data = NULL;
1175        struct crc_data *crc = NULL;
1176
1177        hib_init_batch(&hb);
1178
1179        /*
1180         * We'll limit the number of threads for decompression to limit memory
1181         * footprint.
1182         */
1183        nr_threads = num_online_cpus() - 1;
1184        nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1185
1186        page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
1187        if (!page) {
1188                pr_err("Failed to allocate LZO page\n");
1189                ret = -ENOMEM;
1190                goto out_clean;
1191        }
1192
1193        data = vmalloc(array_size(nr_threads, sizeof(*data)));
1194        if (!data) {
1195                pr_err("Failed to allocate LZO data\n");
1196                ret = -ENOMEM;
1197                goto out_clean;
1198        }
1199        for (thr = 0; thr < nr_threads; thr++)
1200                memset(&data[thr], 0, offsetof(struct dec_data, go));
1201
1202        crc = kmalloc(sizeof(*crc), GFP_KERNEL);
1203        if (!crc) {
1204                pr_err("Failed to allocate crc\n");
1205                ret = -ENOMEM;
1206                goto out_clean;
1207        }
1208        memset(crc, 0, offsetof(struct crc_data, go));
1209
1210        clean_pages_on_decompress = true;
1211
1212        /*
1213         * Start the decompression threads.
1214         */
1215        for (thr = 0; thr < nr_threads; thr++) {
1216                init_waitqueue_head(&data[thr].go);
1217                init_waitqueue_head(&data[thr].done);
1218
1219                data[thr].thr = kthread_run(lzo_decompress_threadfn,
1220                                            &data[thr],
1221                                            "image_decompress/%u", thr);
1222                if (IS_ERR(data[thr].thr)) {
1223                        data[thr].thr = NULL;
1224                        pr_err("Cannot start decompression threads\n");
1225                        ret = -ENOMEM;
1226                        goto out_clean;
1227                }
1228        }
1229
1230        /*
1231         * Start the CRC32 thread.
1232         */
1233        init_waitqueue_head(&crc->go);
1234        init_waitqueue_head(&crc->done);
1235
1236        handle->crc32 = 0;
1237        crc->crc32 = &handle->crc32;
1238        for (thr = 0; thr < nr_threads; thr++) {
1239                crc->unc[thr] = data[thr].unc;
1240                crc->unc_len[thr] = &data[thr].unc_len;
1241        }
1242
1243        crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1244        if (IS_ERR(crc->thr)) {
1245                crc->thr = NULL;
1246                pr_err("Cannot start CRC32 thread\n");
1247                ret = -ENOMEM;
1248                goto out_clean;
1249        }
1250
1251        /*
1252         * Set the number of pages for read buffering.
1253         * This is complete guesswork, because we'll only know the real
1254         * picture once prepare_image() is called, which is much later on
1255         * during the image load phase. We'll assume the worst case and
1256         * say that none of the image pages are from high memory.
1257         */
1258        if (low_free_pages() > snapshot_get_image_size())
1259                read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1260        read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1261
1262        for (i = 0; i < read_pages; i++) {
1263                page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1264                                                  GFP_NOIO | __GFP_HIGH :
1265                                                  GFP_NOIO | __GFP_NOWARN |
1266                                                  __GFP_NORETRY);
1267
1268                if (!page[i]) {
1269                        if (i < LZO_CMP_PAGES) {
1270                                ring_size = i;
1271                                pr_err("Failed to allocate LZO pages\n");
1272                                ret = -ENOMEM;
1273                                goto out_clean;
1274                        } else {
1275                                break;
1276                        }
1277                }
1278        }
1279        want = ring_size = i;
1280
1281        pr_info("Using %u thread(s) for decompression\n", nr_threads);
1282        pr_info("Loading and decompressing image data (%u pages)...\n",
1283                nr_to_read);
1284        m = nr_to_read / 10;
1285        if (!m)
1286                m = 1;
1287        nr_pages = 0;
1288        start = ktime_get();
1289
1290        ret = snapshot_write_next(snapshot);
1291        if (ret <= 0)
1292                goto out_finish;
1293
1294        for(;;) {
1295                for (i = 0; !eof && i < want; i++) {
1296                        ret = swap_read_page(handle, page[ring], &hb);
1297                        if (ret) {
1298                                /*
1299                                 * On real read error, finish. On end of data,
1300                                 * set EOF flag and just exit the read loop.
1301                                 */
1302                                if (handle->cur &&
1303                                    handle->cur->entries[handle->k]) {
1304                                        goto out_finish;
1305                                } else {
1306                                        eof = 1;
1307                                        break;
1308                                }
1309                        }
1310                        if (++ring >= ring_size)
1311                                ring = 0;
1312                }
1313                asked += i;
1314                want -= i;
1315
1316                /*
1317                 * We are out of data, wait for some more.
1318                 */
1319                if (!have) {
1320                        if (!asked)
1321                                break;
1322
1323                        ret = hib_wait_io(&hb);
1324                        if (ret)
1325                                goto out_finish;
1326                        have += asked;
1327                        asked = 0;
1328                        if (eof)
1329                                eof = 2;
1330                }
1331
1332                if (crc->run_threads) {
1333                        wait_event(crc->done, atomic_read(&crc->stop));
1334                        atomic_set(&crc->stop, 0);
1335                        crc->run_threads = 0;
1336                }
1337
1338                for (thr = 0; have && thr < nr_threads; thr++) {
1339                        data[thr].cmp_len = *(size_t *)page[pg];
1340                        if (unlikely(!data[thr].cmp_len ||
1341                                     data[thr].cmp_len >
1342                                     lzo1x_worst_compress(LZO_UNC_SIZE))) {
1343                                pr_err("Invalid LZO compressed length\n");
1344                                ret = -1;
1345                                goto out_finish;
1346                        }
1347
1348                        need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1349                                            PAGE_SIZE);
1350                        if (need > have) {
1351                                if (eof > 1) {
1352                                        ret = -1;
1353                                        goto out_finish;
1354                                }
1355                                break;
1356                        }
1357
1358                        for (off = 0;
1359                             off < LZO_HEADER + data[thr].cmp_len;
1360                             off += PAGE_SIZE) {
1361                                memcpy(data[thr].cmp + off,
1362                                       page[pg], PAGE_SIZE);
1363                                have--;
1364                                want++;
1365                                if (++pg >= ring_size)
1366                                        pg = 0;
1367                        }
1368
1369                        atomic_set(&data[thr].ready, 1);
1370                        wake_up(&data[thr].go);
1371                }
1372
1373                /*
1374                 * Wait for more data while we are decompressing.
1375                 */
1376                if (have < LZO_CMP_PAGES && asked) {
1377                        ret = hib_wait_io(&hb);
1378                        if (ret)
1379                                goto out_finish;
1380                        have += asked;
1381                        asked = 0;
1382                        if (eof)
1383                                eof = 2;
1384                }
1385
1386                for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1387                        wait_event(data[thr].done,
1388                                   atomic_read(&data[thr].stop));
1389                        atomic_set(&data[thr].stop, 0);
1390
1391                        ret = data[thr].ret;
1392
1393                        if (ret < 0) {
1394                                pr_err("LZO decompression failed\n");
1395                                goto out_finish;
1396                        }
1397
1398                        if (unlikely(!data[thr].unc_len ||
1399                                     data[thr].unc_len > LZO_UNC_SIZE ||
1400                                     data[thr].unc_len & (PAGE_SIZE - 1))) {
1401                                pr_err("Invalid LZO uncompressed length\n");
1402                                ret = -1;
1403                                goto out_finish;
1404                        }
1405
1406                        for (off = 0;
1407                             off < data[thr].unc_len; off += PAGE_SIZE) {
1408                                memcpy(data_of(*snapshot),
1409                                       data[thr].unc + off, PAGE_SIZE);
1410
1411                                if (!(nr_pages % m))
1412                                        pr_info("Image loading progress: %3d%%\n",
1413                                                nr_pages / m * 10);
1414                                nr_pages++;
1415
1416                                ret = snapshot_write_next(snapshot);
1417                                if (ret <= 0) {
1418                                        crc->run_threads = thr + 1;
1419                                        atomic_set(&crc->ready, 1);
1420                                        wake_up(&crc->go);
1421                                        goto out_finish;
1422                                }
1423                        }
1424                }
1425
1426                crc->run_threads = thr;
1427                atomic_set(&crc->ready, 1);
1428                wake_up(&crc->go);
1429        }
1430
1431out_finish:
1432        if (crc->run_threads) {
1433                wait_event(crc->done, atomic_read(&crc->stop));
1434                atomic_set(&crc->stop, 0);
1435        }
1436        stop = ktime_get();
1437        if (!ret) {
1438                pr_info("Image loading done\n");
1439                snapshot_write_finalize(snapshot);
1440                if (!snapshot_image_loaded(snapshot))
1441                        ret = -ENODATA;
1442                if (!ret) {
1443                        if (swsusp_header->flags & SF_CRC32_MODE) {
1444                                if(handle->crc32 != swsusp_header->crc32) {
1445                                        pr_err("Invalid image CRC32!\n");
1446                                        ret = -ENODATA;
1447                                }
1448                        }
1449                }
1450        }
1451        swsusp_show_speed(start, stop, nr_to_read, "Read");
1452out_clean:
1453        for (i = 0; i < ring_size; i++)
1454                free_page((unsigned long)page[i]);
1455        if (crc) {
1456                if (crc->thr)
1457                        kthread_stop(crc->thr);
1458                kfree(crc);
1459        }
1460        if (data) {
1461                for (thr = 0; thr < nr_threads; thr++)
1462                        if (data[thr].thr)
1463                                kthread_stop(data[thr].thr);
1464                vfree(data);
1465        }
1466        vfree(page);
1467
1468        return ret;
1469}
1470
1471/**
1472 *      swsusp_read - read the hibernation image.
1473 *      @flags_p: flags passed by the "frozen" kernel in the image header should
1474 *                be written into this memory location
1475 */
1476
1477int swsusp_read(unsigned int *flags_p)
1478{
1479        int error;
1480        struct swap_map_handle handle;
1481        struct snapshot_handle snapshot;
1482        struct swsusp_info *header;
1483
1484        memset(&snapshot, 0, sizeof(struct snapshot_handle));
1485        error = snapshot_write_next(&snapshot);
1486        if (error < PAGE_SIZE)
1487                return error < 0 ? error : -EFAULT;
1488        header = (struct swsusp_info *)data_of(snapshot);
1489        error = get_swap_reader(&handle, flags_p);
1490        if (error)
1491                goto end;
1492        if (!error)
1493                error = swap_read_page(&handle, header, NULL);
1494        if (!error) {
1495                error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1496                        load_image(&handle, &snapshot, header->pages - 1) :
1497                        load_image_lzo(&handle, &snapshot, header->pages - 1);
1498        }
1499        swap_reader_finish(&handle);
1500end:
1501        if (!error)
1502                pr_debug("Image successfully loaded\n");
1503        else
1504                pr_debug("Error %d resuming\n", error);
1505        return error;
1506}
1507
1508/**
1509 *      swsusp_check - Check for swsusp signature in the resume device
1510 */
1511
1512int swsusp_check(void)
1513{
1514        int error;
1515
1516        hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1517                                            FMODE_READ, NULL);
1518        if (!IS_ERR(hib_resume_bdev)) {
1519                set_blocksize(hib_resume_bdev, PAGE_SIZE);
1520                clear_page(swsusp_header);
1521                error = hib_submit_io(REQ_OP_READ, 0,
1522                                        swsusp_resume_block,
1523                                        swsusp_header, NULL);
1524                if (error)
1525                        goto put;
1526
1527                if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1528                        memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1529                        /* Reset swap signature now */
1530                        error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1531                                                swsusp_resume_block,
1532                                                swsusp_header, NULL);
1533                } else {
1534                        error = -EINVAL;
1535                }
1536
1537put:
1538                if (error)
1539                        blkdev_put(hib_resume_bdev, FMODE_READ);
1540                else
1541                        pr_debug("Image signature found, resuming\n");
1542        } else {
1543                error = PTR_ERR(hib_resume_bdev);
1544        }
1545
1546        if (error)
1547                pr_debug("Image not found (code %d)\n", error);
1548
1549        return error;
1550}
1551
1552/**
1553 *      swsusp_close - close swap device.
1554 */
1555
1556void swsusp_close(fmode_t mode)
1557{
1558        if (IS_ERR(hib_resume_bdev)) {
1559                pr_debug("Image device not initialised\n");
1560                return;
1561        }
1562
1563        blkdev_put(hib_resume_bdev, mode);
1564}
1565
1566/**
1567 *      swsusp_unmark - Unmark swsusp signature in the resume device
1568 */
1569
1570#ifdef CONFIG_SUSPEND
1571int swsusp_unmark(void)
1572{
1573        int error;
1574
1575        hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
1576                      swsusp_header, NULL);
1577        if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1578                memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1579                error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1580                                        swsusp_resume_block,
1581                                        swsusp_header, NULL);
1582        } else {
1583                pr_err("Cannot find swsusp signature!\n");
1584                error = -ENODEV;
1585        }
1586
1587        /*
1588         * We just returned from suspend, we don't need the image any more.
1589         */
1590        free_all_swap_pages(root_swap);
1591
1592        return error;
1593}
1594#endif
1595
1596static int swsusp_header_init(void)
1597{
1598        swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1599        if (!swsusp_header)
1600                panic("Could not allocate memory for swsusp_header\n");
1601        return 0;
1602}
1603
1604core_initcall(swsusp_header_init);
1605