linux/kernel/power/swap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/kernel/power/swap.c
   4 *
   5 * This file provides functions for reading the suspend image from
   6 * and writing it to a swap partition.
   7 *
   8 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
   9 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
  10 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
  11 */
  12
  13#define pr_fmt(fmt) "PM: " fmt
  14
  15#include <linux/module.h>
  16#include <linux/file.h>
  17#include <linux/delay.h>
  18#include <linux/bitops.h>
  19#include <linux/genhd.h>
  20#include <linux/device.h>
  21#include <linux/bio.h>
  22#include <linux/blkdev.h>
  23#include <linux/swap.h>
  24#include <linux/swapops.h>
  25#include <linux/pm.h>
  26#include <linux/slab.h>
  27#include <linux/lzo.h>
  28#include <linux/vmalloc.h>
  29#include <linux/cpumask.h>
  30#include <linux/atomic.h>
  31#include <linux/kthread.h>
  32#include <linux/crc32.h>
  33#include <linux/ktime.h>
  34
  35#include "power.h"
  36
  37#define HIBERNATE_SIG   "S1SUSPEND"
  38
  39/*
  40 * When reading an {un,}compressed image, we may restore pages in place,
  41 * in which case some architectures need these pages cleaning before they
  42 * can be executed. We don't know which pages these may be, so clean the lot.
  43 */
  44static bool clean_pages_on_read;
  45static bool clean_pages_on_decompress;
  46
  47/*
  48 *      The swap map is a data structure used for keeping track of each page
  49 *      written to a swap partition.  It consists of many swap_map_page
  50 *      structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
  51 *      These structures are stored on the swap and linked together with the
  52 *      help of the .next_swap member.
  53 *
  54 *      The swap map is created during suspend.  The swap map pages are
  55 *      allocated and populated one at a time, so we only need one memory
  56 *      page to set up the entire structure.
  57 *
  58 *      During resume we pick up all swap_map_page structures into a list.
  59 */
  60
  61#define MAP_PAGE_ENTRIES        (PAGE_SIZE / sizeof(sector_t) - 1)
  62
  63/*
  64 * Number of free pages that are not high.
  65 */
  66static inline unsigned long low_free_pages(void)
  67{
  68        return nr_free_pages() - nr_free_highpages();
  69}
  70
  71/*
  72 * Number of pages required to be kept free while writing the image. Always
  73 * half of all available low pages before the writing starts.
  74 */
  75static inline unsigned long reqd_free_pages(void)
  76{
  77        return low_free_pages() / 2;
  78}
  79
  80struct swap_map_page {
  81        sector_t entries[MAP_PAGE_ENTRIES];
  82        sector_t next_swap;
  83};
  84
  85struct swap_map_page_list {
  86        struct swap_map_page *map;
  87        struct swap_map_page_list *next;
  88};
  89
  90/**
  91 *      The swap_map_handle structure is used for handling swap in
  92 *      a file-alike way
  93 */
  94
  95struct swap_map_handle {
  96        struct swap_map_page *cur;
  97        struct swap_map_page_list *maps;
  98        sector_t cur_swap;
  99        sector_t first_sector;
 100        unsigned int k;
 101        unsigned long reqd_free_pages;
 102        u32 crc32;
 103};
 104
 105struct swsusp_header {
 106        char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
 107                      sizeof(u32)];
 108        u32     crc32;
 109        sector_t image;
 110        unsigned int flags;     /* Flags to pass to the "boot" kernel */
 111        char    orig_sig[10];
 112        char    sig[10];
 113} __packed;
 114
 115static struct swsusp_header *swsusp_header;
 116
 117/**
 118 *      The following functions are used for tracing the allocated
 119 *      swap pages, so that they can be freed in case of an error.
 120 */
 121
 122struct swsusp_extent {
 123        struct rb_node node;
 124        unsigned long start;
 125        unsigned long end;
 126};
 127
 128static struct rb_root swsusp_extents = RB_ROOT;
 129
 130static int swsusp_extents_insert(unsigned long swap_offset)
 131{
 132        struct rb_node **new = &(swsusp_extents.rb_node);
 133        struct rb_node *parent = NULL;
 134        struct swsusp_extent *ext;
 135
 136        /* Figure out where to put the new node */
 137        while (*new) {
 138                ext = rb_entry(*new, struct swsusp_extent, node);
 139                parent = *new;
 140                if (swap_offset < ext->start) {
 141                        /* Try to merge */
 142                        if (swap_offset == ext->start - 1) {
 143                                ext->start--;
 144                                return 0;
 145                        }
 146                        new = &((*new)->rb_left);
 147                } else if (swap_offset > ext->end) {
 148                        /* Try to merge */
 149                        if (swap_offset == ext->end + 1) {
 150                                ext->end++;
 151                                return 0;
 152                        }
 153                        new = &((*new)->rb_right);
 154                } else {
 155                        /* It already is in the tree */
 156                        return -EINVAL;
 157                }
 158        }
 159        /* Add the new node and rebalance the tree. */
 160        ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
 161        if (!ext)
 162                return -ENOMEM;
 163
 164        ext->start = swap_offset;
 165        ext->end = swap_offset;
 166        rb_link_node(&ext->node, parent, new);
 167        rb_insert_color(&ext->node, &swsusp_extents);
 168        return 0;
 169}
 170
 171/**
 172 *      alloc_swapdev_block - allocate a swap page and register that it has
 173 *      been allocated, so that it can be freed in case of an error.
 174 */
 175
 176sector_t alloc_swapdev_block(int swap)
 177{
 178        unsigned long offset;
 179
 180        offset = swp_offset(get_swap_page_of_type(swap));
 181        if (offset) {
 182                if (swsusp_extents_insert(offset))
 183                        swap_free(swp_entry(swap, offset));
 184                else
 185                        return swapdev_block(swap, offset);
 186        }
 187        return 0;
 188}
 189
 190/**
 191 *      free_all_swap_pages - free swap pages allocated for saving image data.
 192 *      It also frees the extents used to register which swap entries had been
 193 *      allocated.
 194 */
 195
 196void free_all_swap_pages(int swap)
 197{
 198        struct rb_node *node;
 199
 200        while ((node = swsusp_extents.rb_node)) {
 201                struct swsusp_extent *ext;
 202                unsigned long offset;
 203
 204                ext = rb_entry(node, struct swsusp_extent, node);
 205                rb_erase(node, &swsusp_extents);
 206                for (offset = ext->start; offset <= ext->end; offset++)
 207                        swap_free(swp_entry(swap, offset));
 208
 209                kfree(ext);
 210        }
 211}
 212
 213int swsusp_swap_in_use(void)
 214{
 215        return (swsusp_extents.rb_node != NULL);
 216}
 217
 218/*
 219 * General things
 220 */
 221
 222static unsigned short root_swap = 0xffff;
 223static struct block_device *hib_resume_bdev;
 224
 225struct hib_bio_batch {
 226        atomic_t                count;
 227        wait_queue_head_t       wait;
 228        blk_status_t            error;
 229        struct blk_plug         plug;
 230};
 231
 232static void hib_init_batch(struct hib_bio_batch *hb)
 233{
 234        atomic_set(&hb->count, 0);
 235        init_waitqueue_head(&hb->wait);
 236        hb->error = BLK_STS_OK;
 237        blk_start_plug(&hb->plug);
 238}
 239
 240static void hib_finish_batch(struct hib_bio_batch *hb)
 241{
 242        blk_finish_plug(&hb->plug);
 243}
 244
 245static void hib_end_io(struct bio *bio)
 246{
 247        struct hib_bio_batch *hb = bio->bi_private;
 248        struct page *page = bio_first_page_all(bio);
 249
 250        if (bio->bi_status) {
 251                pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
 252                         MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
 253                         (unsigned long long)bio->bi_iter.bi_sector);
 254        }
 255
 256        if (bio_data_dir(bio) == WRITE)
 257                put_page(page);
 258        else if (clean_pages_on_read)
 259                flush_icache_range((unsigned long)page_address(page),
 260                                   (unsigned long)page_address(page) + PAGE_SIZE);
 261
 262        if (bio->bi_status && !hb->error)
 263                hb->error = bio->bi_status;
 264        if (atomic_dec_and_test(&hb->count))
 265                wake_up(&hb->wait);
 266
 267        bio_put(bio);
 268}
 269
 270static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
 271                struct hib_bio_batch *hb)
 272{
 273        struct page *page = virt_to_page(addr);
 274        struct bio *bio;
 275        int error = 0;
 276
 277        bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1);
 278        bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
 279        bio_set_dev(bio, hib_resume_bdev);
 280        bio_set_op_attrs(bio, op, op_flags);
 281
 282        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 283                pr_err("Adding page to bio failed at %llu\n",
 284                       (unsigned long long)bio->bi_iter.bi_sector);
 285                bio_put(bio);
 286                return -EFAULT;
 287        }
 288
 289        if (hb) {
 290                bio->bi_end_io = hib_end_io;
 291                bio->bi_private = hb;
 292                atomic_inc(&hb->count);
 293                submit_bio(bio);
 294        } else {
 295                error = submit_bio_wait(bio);
 296                bio_put(bio);
 297        }
 298
 299        return error;
 300}
 301
 302static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
 303{
 304        /*
 305         * We are relying on the behavior of blk_plug that a thread with
 306         * a plug will flush the plug list before sleeping.
 307         */
 308        wait_event(hb->wait, atomic_read(&hb->count) == 0);
 309        return blk_status_to_errno(hb->error);
 310}
 311
 312/*
 313 * Saving part
 314 */
 315
 316static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
 317{
 318        int error;
 319
 320        hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
 321                      swsusp_header, NULL);
 322        if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
 323            !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
 324                memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
 325                memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
 326                swsusp_header->image = handle->first_sector;
 327                swsusp_header->flags = flags;
 328                if (flags & SF_CRC32_MODE)
 329                        swsusp_header->crc32 = handle->crc32;
 330                error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
 331                                      swsusp_resume_block, swsusp_header, NULL);
 332        } else {
 333                pr_err("Swap header not found!\n");
 334                error = -ENODEV;
 335        }
 336        return error;
 337}
 338
 339/**
 340 *      swsusp_swap_check - check if the resume device is a swap device
 341 *      and get its index (if so)
 342 *
 343 *      This is called before saving image
 344 */
 345static int swsusp_swap_check(void)
 346{
 347        int res;
 348
 349        if (swsusp_resume_device)
 350                res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
 351        else
 352                res = find_first_swap(&swsusp_resume_device);
 353        if (res < 0)
 354                return res;
 355        root_swap = res;
 356
 357        hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, FMODE_WRITE,
 358                        NULL);
 359        if (IS_ERR(hib_resume_bdev))
 360                return PTR_ERR(hib_resume_bdev);
 361
 362        res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
 363        if (res < 0)
 364                blkdev_put(hib_resume_bdev, FMODE_WRITE);
 365
 366        return res;
 367}
 368
 369/**
 370 *      write_page - Write one page to given swap location.
 371 *      @buf:           Address we're writing.
 372 *      @offset:        Offset of the swap page we're writing to.
 373 *      @hb:            bio completion batch
 374 */
 375
 376static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
 377{
 378        void *src;
 379        int ret;
 380
 381        if (!offset)
 382                return -ENOSPC;
 383
 384        if (hb) {
 385                src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
 386                                              __GFP_NORETRY);
 387                if (src) {
 388                        copy_page(src, buf);
 389                } else {
 390                        ret = hib_wait_io(hb); /* Free pages */
 391                        if (ret)
 392                                return ret;
 393                        src = (void *)__get_free_page(GFP_NOIO |
 394                                                      __GFP_NOWARN |
 395                                                      __GFP_NORETRY);
 396                        if (src) {
 397                                copy_page(src, buf);
 398                        } else {
 399                                WARN_ON_ONCE(1);
 400                                hb = NULL;      /* Go synchronous */
 401                                src = buf;
 402                        }
 403                }
 404        } else {
 405                src = buf;
 406        }
 407        return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
 408}
 409
 410static void release_swap_writer(struct swap_map_handle *handle)
 411{
 412        if (handle->cur)
 413                free_page((unsigned long)handle->cur);
 414        handle->cur = NULL;
 415}
 416
 417static int get_swap_writer(struct swap_map_handle *handle)
 418{
 419        int ret;
 420
 421        ret = swsusp_swap_check();
 422        if (ret) {
 423                if (ret != -ENOSPC)
 424                        pr_err("Cannot find swap device, try swapon -a\n");
 425                return ret;
 426        }
 427        handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
 428        if (!handle->cur) {
 429                ret = -ENOMEM;
 430                goto err_close;
 431        }
 432        handle->cur_swap = alloc_swapdev_block(root_swap);
 433        if (!handle->cur_swap) {
 434                ret = -ENOSPC;
 435                goto err_rel;
 436        }
 437        handle->k = 0;
 438        handle->reqd_free_pages = reqd_free_pages();
 439        handle->first_sector = handle->cur_swap;
 440        return 0;
 441err_rel:
 442        release_swap_writer(handle);
 443err_close:
 444        swsusp_close(FMODE_WRITE);
 445        return ret;
 446}
 447
 448static int swap_write_page(struct swap_map_handle *handle, void *buf,
 449                struct hib_bio_batch *hb)
 450{
 451        int error = 0;
 452        sector_t offset;
 453
 454        if (!handle->cur)
 455                return -EINVAL;
 456        offset = alloc_swapdev_block(root_swap);
 457        error = write_page(buf, offset, hb);
 458        if (error)
 459                return error;
 460        handle->cur->entries[handle->k++] = offset;
 461        if (handle->k >= MAP_PAGE_ENTRIES) {
 462                offset = alloc_swapdev_block(root_swap);
 463                if (!offset)
 464                        return -ENOSPC;
 465                handle->cur->next_swap = offset;
 466                error = write_page(handle->cur, handle->cur_swap, hb);
 467                if (error)
 468                        goto out;
 469                clear_page(handle->cur);
 470                handle->cur_swap = offset;
 471                handle->k = 0;
 472
 473                if (hb && low_free_pages() <= handle->reqd_free_pages) {
 474                        error = hib_wait_io(hb);
 475                        if (error)
 476                                goto out;
 477                        /*
 478                         * Recalculate the number of required free pages, to
 479                         * make sure we never take more than half.
 480                         */
 481                        handle->reqd_free_pages = reqd_free_pages();
 482                }
 483        }
 484 out:
 485        return error;
 486}
 487
 488static int flush_swap_writer(struct swap_map_handle *handle)
 489{
 490        if (handle->cur && handle->cur_swap)
 491                return write_page(handle->cur, handle->cur_swap, NULL);
 492        else
 493                return -EINVAL;
 494}
 495
 496static int swap_writer_finish(struct swap_map_handle *handle,
 497                unsigned int flags, int error)
 498{
 499        if (!error) {
 500                pr_info("S");
 501                error = mark_swapfiles(handle, flags);
 502                pr_cont("|\n");
 503                flush_swap_writer(handle);
 504        }
 505
 506        if (error)
 507                free_all_swap_pages(root_swap);
 508        release_swap_writer(handle);
 509        swsusp_close(FMODE_WRITE);
 510
 511        return error;
 512}
 513
 514/* We need to remember how much compressed data we need to read. */
 515#define LZO_HEADER      sizeof(size_t)
 516
 517/* Number of pages/bytes we'll compress at one time. */
 518#define LZO_UNC_PAGES   32
 519#define LZO_UNC_SIZE    (LZO_UNC_PAGES * PAGE_SIZE)
 520
 521/* Number of pages/bytes we need for compressed data (worst case). */
 522#define LZO_CMP_PAGES   DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
 523                                     LZO_HEADER, PAGE_SIZE)
 524#define LZO_CMP_SIZE    (LZO_CMP_PAGES * PAGE_SIZE)
 525
 526/* Maximum number of threads for compression/decompression. */
 527#define LZO_THREADS     3
 528
 529/* Minimum/maximum number of pages for read buffering. */
 530#define LZO_MIN_RD_PAGES        1024
 531#define LZO_MAX_RD_PAGES        8192
 532
 533
 534/**
 535 *      save_image - save the suspend image data
 536 */
 537
 538static int save_image(struct swap_map_handle *handle,
 539                      struct snapshot_handle *snapshot,
 540                      unsigned int nr_to_write)
 541{
 542        unsigned int m;
 543        int ret;
 544        int nr_pages;
 545        int err2;
 546        struct hib_bio_batch hb;
 547        ktime_t start;
 548        ktime_t stop;
 549
 550        hib_init_batch(&hb);
 551
 552        pr_info("Saving image data pages (%u pages)...\n",
 553                nr_to_write);
 554        m = nr_to_write / 10;
 555        if (!m)
 556                m = 1;
 557        nr_pages = 0;
 558        start = ktime_get();
 559        while (1) {
 560                ret = snapshot_read_next(snapshot);
 561                if (ret <= 0)
 562                        break;
 563                ret = swap_write_page(handle, data_of(*snapshot), &hb);
 564                if (ret)
 565                        break;
 566                if (!(nr_pages % m))
 567                        pr_info("Image saving progress: %3d%%\n",
 568                                nr_pages / m * 10);
 569                nr_pages++;
 570        }
 571        err2 = hib_wait_io(&hb);
 572        hib_finish_batch(&hb);
 573        stop = ktime_get();
 574        if (!ret)
 575                ret = err2;
 576        if (!ret)
 577                pr_info("Image saving done\n");
 578        swsusp_show_speed(start, stop, nr_to_write, "Wrote");
 579        return ret;
 580}
 581
 582/**
 583 * Structure used for CRC32.
 584 */
 585struct crc_data {
 586        struct task_struct *thr;                  /* thread */
 587        atomic_t ready;                           /* ready to start flag */
 588        atomic_t stop;                            /* ready to stop flag */
 589        unsigned run_threads;                     /* nr current threads */
 590        wait_queue_head_t go;                     /* start crc update */
 591        wait_queue_head_t done;                   /* crc update done */
 592        u32 *crc32;                               /* points to handle's crc32 */
 593        size_t *unc_len[LZO_THREADS];             /* uncompressed lengths */
 594        unsigned char *unc[LZO_THREADS];          /* uncompressed data */
 595};
 596
 597/**
 598 * CRC32 update function that runs in its own thread.
 599 */
 600static int crc32_threadfn(void *data)
 601{
 602        struct crc_data *d = data;
 603        unsigned i;
 604
 605        while (1) {
 606                wait_event(d->go, atomic_read(&d->ready) ||
 607                                  kthread_should_stop());
 608                if (kthread_should_stop()) {
 609                        d->thr = NULL;
 610                        atomic_set(&d->stop, 1);
 611                        wake_up(&d->done);
 612                        break;
 613                }
 614                atomic_set(&d->ready, 0);
 615
 616                for (i = 0; i < d->run_threads; i++)
 617                        *d->crc32 = crc32_le(*d->crc32,
 618                                             d->unc[i], *d->unc_len[i]);
 619                atomic_set(&d->stop, 1);
 620                wake_up(&d->done);
 621        }
 622        return 0;
 623}
 624/**
 625 * Structure used for LZO data compression.
 626 */
 627struct cmp_data {
 628        struct task_struct *thr;                  /* thread */
 629        atomic_t ready;                           /* ready to start flag */
 630        atomic_t stop;                            /* ready to stop flag */
 631        int ret;                                  /* return code */
 632        wait_queue_head_t go;                     /* start compression */
 633        wait_queue_head_t done;                   /* compression done */
 634        size_t unc_len;                           /* uncompressed length */
 635        size_t cmp_len;                           /* compressed length */
 636        unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
 637        unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
 638        unsigned char wrk[LZO1X_1_MEM_COMPRESS];  /* compression workspace */
 639};
 640
 641/**
 642 * Compression function that runs in its own thread.
 643 */
 644static int lzo_compress_threadfn(void *data)
 645{
 646        struct cmp_data *d = data;
 647
 648        while (1) {
 649                wait_event(d->go, atomic_read(&d->ready) ||
 650                                  kthread_should_stop());
 651                if (kthread_should_stop()) {
 652                        d->thr = NULL;
 653                        d->ret = -1;
 654                        atomic_set(&d->stop, 1);
 655                        wake_up(&d->done);
 656                        break;
 657                }
 658                atomic_set(&d->ready, 0);
 659
 660                d->ret = lzo1x_1_compress(d->unc, d->unc_len,
 661                                          d->cmp + LZO_HEADER, &d->cmp_len,
 662                                          d->wrk);
 663                atomic_set(&d->stop, 1);
 664                wake_up(&d->done);
 665        }
 666        return 0;
 667}
 668
 669/**
 670 * save_image_lzo - Save the suspend image data compressed with LZO.
 671 * @handle: Swap map handle to use for saving the image.
 672 * @snapshot: Image to read data from.
 673 * @nr_to_write: Number of pages to save.
 674 */
 675static int save_image_lzo(struct swap_map_handle *handle,
 676                          struct snapshot_handle *snapshot,
 677                          unsigned int nr_to_write)
 678{
 679        unsigned int m;
 680        int ret = 0;
 681        int nr_pages;
 682        int err2;
 683        struct hib_bio_batch hb;
 684        ktime_t start;
 685        ktime_t stop;
 686        size_t off;
 687        unsigned thr, run_threads, nr_threads;
 688        unsigned char *page = NULL;
 689        struct cmp_data *data = NULL;
 690        struct crc_data *crc = NULL;
 691
 692        hib_init_batch(&hb);
 693
 694        /*
 695         * We'll limit the number of threads for compression to limit memory
 696         * footprint.
 697         */
 698        nr_threads = num_online_cpus() - 1;
 699        nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
 700
 701        page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
 702        if (!page) {
 703                pr_err("Failed to allocate LZO page\n");
 704                ret = -ENOMEM;
 705                goto out_clean;
 706        }
 707
 708        data = vmalloc(array_size(nr_threads, sizeof(*data)));
 709        if (!data) {
 710                pr_err("Failed to allocate LZO data\n");
 711                ret = -ENOMEM;
 712                goto out_clean;
 713        }
 714        for (thr = 0; thr < nr_threads; thr++)
 715                memset(&data[thr], 0, offsetof(struct cmp_data, go));
 716
 717        crc = kmalloc(sizeof(*crc), GFP_KERNEL);
 718        if (!crc) {
 719                pr_err("Failed to allocate crc\n");
 720                ret = -ENOMEM;
 721                goto out_clean;
 722        }
 723        memset(crc, 0, offsetof(struct crc_data, go));
 724
 725        /*
 726         * Start the compression threads.
 727         */
 728        for (thr = 0; thr < nr_threads; thr++) {
 729                init_waitqueue_head(&data[thr].go);
 730                init_waitqueue_head(&data[thr].done);
 731
 732                data[thr].thr = kthread_run(lzo_compress_threadfn,
 733                                            &data[thr],
 734                                            "image_compress/%u", thr);
 735                if (IS_ERR(data[thr].thr)) {
 736                        data[thr].thr = NULL;
 737                        pr_err("Cannot start compression threads\n");
 738                        ret = -ENOMEM;
 739                        goto out_clean;
 740                }
 741        }
 742
 743        /*
 744         * Start the CRC32 thread.
 745         */
 746        init_waitqueue_head(&crc->go);
 747        init_waitqueue_head(&crc->done);
 748
 749        handle->crc32 = 0;
 750        crc->crc32 = &handle->crc32;
 751        for (thr = 0; thr < nr_threads; thr++) {
 752                crc->unc[thr] = data[thr].unc;
 753                crc->unc_len[thr] = &data[thr].unc_len;
 754        }
 755
 756        crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
 757        if (IS_ERR(crc->thr)) {
 758                crc->thr = NULL;
 759                pr_err("Cannot start CRC32 thread\n");
 760                ret = -ENOMEM;
 761                goto out_clean;
 762        }
 763
 764        /*
 765         * Adjust the number of required free pages after all allocations have
 766         * been done. We don't want to run out of pages when writing.
 767         */
 768        handle->reqd_free_pages = reqd_free_pages();
 769
 770        pr_info("Using %u thread(s) for compression\n", nr_threads);
 771        pr_info("Compressing and saving image data (%u pages)...\n",
 772                nr_to_write);
 773        m = nr_to_write / 10;
 774        if (!m)
 775                m = 1;
 776        nr_pages = 0;
 777        start = ktime_get();
 778        for (;;) {
 779                for (thr = 0; thr < nr_threads; thr++) {
 780                        for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
 781                                ret = snapshot_read_next(snapshot);
 782                                if (ret < 0)
 783                                        goto out_finish;
 784
 785                                if (!ret)
 786                                        break;
 787
 788                                memcpy(data[thr].unc + off,
 789                                       data_of(*snapshot), PAGE_SIZE);
 790
 791                                if (!(nr_pages % m))
 792                                        pr_info("Image saving progress: %3d%%\n",
 793                                                nr_pages / m * 10);
 794                                nr_pages++;
 795                        }
 796                        if (!off)
 797                                break;
 798
 799                        data[thr].unc_len = off;
 800
 801                        atomic_set(&data[thr].ready, 1);
 802                        wake_up(&data[thr].go);
 803                }
 804
 805                if (!thr)
 806                        break;
 807
 808                crc->run_threads = thr;
 809                atomic_set(&crc->ready, 1);
 810                wake_up(&crc->go);
 811
 812                for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
 813                        wait_event(data[thr].done,
 814                                   atomic_read(&data[thr].stop));
 815                        atomic_set(&data[thr].stop, 0);
 816
 817                        ret = data[thr].ret;
 818
 819                        if (ret < 0) {
 820                                pr_err("LZO compression failed\n");
 821                                goto out_finish;
 822                        }
 823
 824                        if (unlikely(!data[thr].cmp_len ||
 825                                     data[thr].cmp_len >
 826                                     lzo1x_worst_compress(data[thr].unc_len))) {
 827                                pr_err("Invalid LZO compressed length\n");
 828                                ret = -1;
 829                                goto out_finish;
 830                        }
 831
 832                        *(size_t *)data[thr].cmp = data[thr].cmp_len;
 833
 834                        /*
 835                         * Given we are writing one page at a time to disk, we
 836                         * copy that much from the buffer, although the last
 837                         * bit will likely be smaller than full page. This is
 838                         * OK - we saved the length of the compressed data, so
 839                         * any garbage at the end will be discarded when we
 840                         * read it.
 841                         */
 842                        for (off = 0;
 843                             off < LZO_HEADER + data[thr].cmp_len;
 844                             off += PAGE_SIZE) {
 845                                memcpy(page, data[thr].cmp + off, PAGE_SIZE);
 846
 847                                ret = swap_write_page(handle, page, &hb);
 848                                if (ret)
 849                                        goto out_finish;
 850                        }
 851                }
 852
 853                wait_event(crc->done, atomic_read(&crc->stop));
 854                atomic_set(&crc->stop, 0);
 855        }
 856
 857out_finish:
 858        err2 = hib_wait_io(&hb);
 859        stop = ktime_get();
 860        if (!ret)
 861                ret = err2;
 862        if (!ret)
 863                pr_info("Image saving done\n");
 864        swsusp_show_speed(start, stop, nr_to_write, "Wrote");
 865out_clean:
 866        hib_finish_batch(&hb);
 867        if (crc) {
 868                if (crc->thr)
 869                        kthread_stop(crc->thr);
 870                kfree(crc);
 871        }
 872        if (data) {
 873                for (thr = 0; thr < nr_threads; thr++)
 874                        if (data[thr].thr)
 875                                kthread_stop(data[thr].thr);
 876                vfree(data);
 877        }
 878        if (page) free_page((unsigned long)page);
 879
 880        return ret;
 881}
 882
 883/**
 884 *      enough_swap - Make sure we have enough swap to save the image.
 885 *
 886 *      Returns TRUE or FALSE after checking the total amount of swap
 887 *      space available from the resume partition.
 888 */
 889
 890static int enough_swap(unsigned int nr_pages)
 891{
 892        unsigned int free_swap = count_swap_pages(root_swap, 1);
 893        unsigned int required;
 894
 895        pr_debug("Free swap pages: %u\n", free_swap);
 896
 897        required = PAGES_FOR_IO + nr_pages;
 898        return free_swap > required;
 899}
 900
 901/**
 902 *      swsusp_write - Write entire image and metadata.
 903 *      @flags: flags to pass to the "boot" kernel in the image header
 904 *
 905 *      It is important _NOT_ to umount filesystems at this point. We want
 906 *      them synced (in case something goes wrong) but we DO not want to mark
 907 *      filesystem clean: it is not. (And it does not matter, if we resume
 908 *      correctly, we'll mark system clean, anyway.)
 909 */
 910
 911int swsusp_write(unsigned int flags)
 912{
 913        struct swap_map_handle handle;
 914        struct snapshot_handle snapshot;
 915        struct swsusp_info *header;
 916        unsigned long pages;
 917        int error;
 918
 919        pages = snapshot_get_image_size();
 920        error = get_swap_writer(&handle);
 921        if (error) {
 922                pr_err("Cannot get swap writer\n");
 923                return error;
 924        }
 925        if (flags & SF_NOCOMPRESS_MODE) {
 926                if (!enough_swap(pages)) {
 927                        pr_err("Not enough free swap\n");
 928                        error = -ENOSPC;
 929                        goto out_finish;
 930                }
 931        }
 932        memset(&snapshot, 0, sizeof(struct snapshot_handle));
 933        error = snapshot_read_next(&snapshot);
 934        if (error < (int)PAGE_SIZE) {
 935                if (error >= 0)
 936                        error = -EFAULT;
 937
 938                goto out_finish;
 939        }
 940        header = (struct swsusp_info *)data_of(snapshot);
 941        error = swap_write_page(&handle, header, NULL);
 942        if (!error) {
 943                error = (flags & SF_NOCOMPRESS_MODE) ?
 944                        save_image(&handle, &snapshot, pages - 1) :
 945                        save_image_lzo(&handle, &snapshot, pages - 1);
 946        }
 947out_finish:
 948        error = swap_writer_finish(&handle, flags, error);
 949        return error;
 950}
 951
 952/**
 953 *      The following functions allow us to read data using a swap map
 954 *      in a file-alike way
 955 */
 956
 957static void release_swap_reader(struct swap_map_handle *handle)
 958{
 959        struct swap_map_page_list *tmp;
 960
 961        while (handle->maps) {
 962                if (handle->maps->map)
 963                        free_page((unsigned long)handle->maps->map);
 964                tmp = handle->maps;
 965                handle->maps = handle->maps->next;
 966                kfree(tmp);
 967        }
 968        handle->cur = NULL;
 969}
 970
 971static int get_swap_reader(struct swap_map_handle *handle,
 972                unsigned int *flags_p)
 973{
 974        int error;
 975        struct swap_map_page_list *tmp, *last;
 976        sector_t offset;
 977
 978        *flags_p = swsusp_header->flags;
 979
 980        if (!swsusp_header->image) /* how can this happen? */
 981                return -EINVAL;
 982
 983        handle->cur = NULL;
 984        last = handle->maps = NULL;
 985        offset = swsusp_header->image;
 986        while (offset) {
 987                tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
 988                if (!tmp) {
 989                        release_swap_reader(handle);
 990                        return -ENOMEM;
 991                }
 992                if (!handle->maps)
 993                        handle->maps = tmp;
 994                if (last)
 995                        last->next = tmp;
 996                last = tmp;
 997
 998                tmp->map = (struct swap_map_page *)
 999                           __get_free_page(GFP_NOIO | __GFP_HIGH);
1000                if (!tmp->map) {
1001                        release_swap_reader(handle);
1002                        return -ENOMEM;
1003                }
1004
1005                error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
1006                if (error) {
1007                        release_swap_reader(handle);
1008                        return error;
1009                }
1010                offset = tmp->map->next_swap;
1011        }
1012        handle->k = 0;
1013        handle->cur = handle->maps->map;
1014        return 0;
1015}
1016
1017static int swap_read_page(struct swap_map_handle *handle, void *buf,
1018                struct hib_bio_batch *hb)
1019{
1020        sector_t offset;
1021        int error;
1022        struct swap_map_page_list *tmp;
1023
1024        if (!handle->cur)
1025                return -EINVAL;
1026        offset = handle->cur->entries[handle->k];
1027        if (!offset)
1028                return -EFAULT;
1029        error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
1030        if (error)
1031                return error;
1032        if (++handle->k >= MAP_PAGE_ENTRIES) {
1033                handle->k = 0;
1034                free_page((unsigned long)handle->maps->map);
1035                tmp = handle->maps;
1036                handle->maps = handle->maps->next;
1037                kfree(tmp);
1038                if (!handle->maps)
1039                        release_swap_reader(handle);
1040                else
1041                        handle->cur = handle->maps->map;
1042        }
1043        return error;
1044}
1045
1046static int swap_reader_finish(struct swap_map_handle *handle)
1047{
1048        release_swap_reader(handle);
1049
1050        return 0;
1051}
1052
1053/**
1054 *      load_image - load the image using the swap map handle
1055 *      @handle and the snapshot handle @snapshot
1056 *      (assume there are @nr_pages pages to load)
1057 */
1058
1059static int load_image(struct swap_map_handle *handle,
1060                      struct snapshot_handle *snapshot,
1061                      unsigned int nr_to_read)
1062{
1063        unsigned int m;
1064        int ret = 0;
1065        ktime_t start;
1066        ktime_t stop;
1067        struct hib_bio_batch hb;
1068        int err2;
1069        unsigned nr_pages;
1070
1071        hib_init_batch(&hb);
1072
1073        clean_pages_on_read = true;
1074        pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1075        m = nr_to_read / 10;
1076        if (!m)
1077                m = 1;
1078        nr_pages = 0;
1079        start = ktime_get();
1080        for ( ; ; ) {
1081                ret = snapshot_write_next(snapshot);
1082                if (ret <= 0)
1083                        break;
1084                ret = swap_read_page(handle, data_of(*snapshot), &hb);
1085                if (ret)
1086                        break;
1087                if (snapshot->sync_read)
1088                        ret = hib_wait_io(&hb);
1089                if (ret)
1090                        break;
1091                if (!(nr_pages % m))
1092                        pr_info("Image loading progress: %3d%%\n",
1093                                nr_pages / m * 10);
1094                nr_pages++;
1095        }
1096        err2 = hib_wait_io(&hb);
1097        hib_finish_batch(&hb);
1098        stop = ktime_get();
1099        if (!ret)
1100                ret = err2;
1101        if (!ret) {
1102                pr_info("Image loading done\n");
1103                snapshot_write_finalize(snapshot);
1104                if (!snapshot_image_loaded(snapshot))
1105                        ret = -ENODATA;
1106        }
1107        swsusp_show_speed(start, stop, nr_to_read, "Read");
1108        return ret;
1109}
1110
1111/**
1112 * Structure used for LZO data decompression.
1113 */
1114struct dec_data {
1115        struct task_struct *thr;                  /* thread */
1116        atomic_t ready;                           /* ready to start flag */
1117        atomic_t stop;                            /* ready to stop flag */
1118        int ret;                                  /* return code */
1119        wait_queue_head_t go;                     /* start decompression */
1120        wait_queue_head_t done;                   /* decompression done */
1121        size_t unc_len;                           /* uncompressed length */
1122        size_t cmp_len;                           /* compressed length */
1123        unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
1124        unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
1125};
1126
1127/**
1128 * Decompression function that runs in its own thread.
1129 */
1130static int lzo_decompress_threadfn(void *data)
1131{
1132        struct dec_data *d = data;
1133
1134        while (1) {
1135                wait_event(d->go, atomic_read(&d->ready) ||
1136                                  kthread_should_stop());
1137                if (kthread_should_stop()) {
1138                        d->thr = NULL;
1139                        d->ret = -1;
1140                        atomic_set(&d->stop, 1);
1141                        wake_up(&d->done);
1142                        break;
1143                }
1144                atomic_set(&d->ready, 0);
1145
1146                d->unc_len = LZO_UNC_SIZE;
1147                d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1148                                               d->unc, &d->unc_len);
1149                if (clean_pages_on_decompress)
1150                        flush_icache_range((unsigned long)d->unc,
1151                                           (unsigned long)d->unc + d->unc_len);
1152
1153                atomic_set(&d->stop, 1);
1154                wake_up(&d->done);
1155        }
1156        return 0;
1157}
1158
1159/**
1160 * load_image_lzo - Load compressed image data and decompress them with LZO.
1161 * @handle: Swap map handle to use for loading data.
1162 * @snapshot: Image to copy uncompressed data into.
1163 * @nr_to_read: Number of pages to load.
1164 */
1165static int load_image_lzo(struct swap_map_handle *handle,
1166                          struct snapshot_handle *snapshot,
1167                          unsigned int nr_to_read)
1168{
1169        unsigned int m;
1170        int ret = 0;
1171        int eof = 0;
1172        struct hib_bio_batch hb;
1173        ktime_t start;
1174        ktime_t stop;
1175        unsigned nr_pages;
1176        size_t off;
1177        unsigned i, thr, run_threads, nr_threads;
1178        unsigned ring = 0, pg = 0, ring_size = 0,
1179                 have = 0, want, need, asked = 0;
1180        unsigned long read_pages = 0;
1181        unsigned char **page = NULL;
1182        struct dec_data *data = NULL;
1183        struct crc_data *crc = NULL;
1184
1185        hib_init_batch(&hb);
1186
1187        /*
1188         * We'll limit the number of threads for decompression to limit memory
1189         * footprint.
1190         */
1191        nr_threads = num_online_cpus() - 1;
1192        nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1193
1194        page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
1195        if (!page) {
1196                pr_err("Failed to allocate LZO page\n");
1197                ret = -ENOMEM;
1198                goto out_clean;
1199        }
1200
1201        data = vmalloc(array_size(nr_threads, sizeof(*data)));
1202        if (!data) {
1203                pr_err("Failed to allocate LZO data\n");
1204                ret = -ENOMEM;
1205                goto out_clean;
1206        }
1207        for (thr = 0; thr < nr_threads; thr++)
1208                memset(&data[thr], 0, offsetof(struct dec_data, go));
1209
1210        crc = kmalloc(sizeof(*crc), GFP_KERNEL);
1211        if (!crc) {
1212                pr_err("Failed to allocate crc\n");
1213                ret = -ENOMEM;
1214                goto out_clean;
1215        }
1216        memset(crc, 0, offsetof(struct crc_data, go));
1217
1218        clean_pages_on_decompress = true;
1219
1220        /*
1221         * Start the decompression threads.
1222         */
1223        for (thr = 0; thr < nr_threads; thr++) {
1224                init_waitqueue_head(&data[thr].go);
1225                init_waitqueue_head(&data[thr].done);
1226
1227                data[thr].thr = kthread_run(lzo_decompress_threadfn,
1228                                            &data[thr],
1229                                            "image_decompress/%u", thr);
1230                if (IS_ERR(data[thr].thr)) {
1231                        data[thr].thr = NULL;
1232                        pr_err("Cannot start decompression threads\n");
1233                        ret = -ENOMEM;
1234                        goto out_clean;
1235                }
1236        }
1237
1238        /*
1239         * Start the CRC32 thread.
1240         */
1241        init_waitqueue_head(&crc->go);
1242        init_waitqueue_head(&crc->done);
1243
1244        handle->crc32 = 0;
1245        crc->crc32 = &handle->crc32;
1246        for (thr = 0; thr < nr_threads; thr++) {
1247                crc->unc[thr] = data[thr].unc;
1248                crc->unc_len[thr] = &data[thr].unc_len;
1249        }
1250
1251        crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1252        if (IS_ERR(crc->thr)) {
1253                crc->thr = NULL;
1254                pr_err("Cannot start CRC32 thread\n");
1255                ret = -ENOMEM;
1256                goto out_clean;
1257        }
1258
1259        /*
1260         * Set the number of pages for read buffering.
1261         * This is complete guesswork, because we'll only know the real
1262         * picture once prepare_image() is called, which is much later on
1263         * during the image load phase. We'll assume the worst case and
1264         * say that none of the image pages are from high memory.
1265         */
1266        if (low_free_pages() > snapshot_get_image_size())
1267                read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1268        read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1269
1270        for (i = 0; i < read_pages; i++) {
1271                page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1272                                                  GFP_NOIO | __GFP_HIGH :
1273                                                  GFP_NOIO | __GFP_NOWARN |
1274                                                  __GFP_NORETRY);
1275
1276                if (!page[i]) {
1277                        if (i < LZO_CMP_PAGES) {
1278                                ring_size = i;
1279                                pr_err("Failed to allocate LZO pages\n");
1280                                ret = -ENOMEM;
1281                                goto out_clean;
1282                        } else {
1283                                break;
1284                        }
1285                }
1286        }
1287        want = ring_size = i;
1288
1289        pr_info("Using %u thread(s) for decompression\n", nr_threads);
1290        pr_info("Loading and decompressing image data (%u pages)...\n",
1291                nr_to_read);
1292        m = nr_to_read / 10;
1293        if (!m)
1294                m = 1;
1295        nr_pages = 0;
1296        start = ktime_get();
1297
1298        ret = snapshot_write_next(snapshot);
1299        if (ret <= 0)
1300                goto out_finish;
1301
1302        for(;;) {
1303                for (i = 0; !eof && i < want; i++) {
1304                        ret = swap_read_page(handle, page[ring], &hb);
1305                        if (ret) {
1306                                /*
1307                                 * On real read error, finish. On end of data,
1308                                 * set EOF flag and just exit the read loop.
1309                                 */
1310                                if (handle->cur &&
1311                                    handle->cur->entries[handle->k]) {
1312                                        goto out_finish;
1313                                } else {
1314                                        eof = 1;
1315                                        break;
1316                                }
1317                        }
1318                        if (++ring >= ring_size)
1319                                ring = 0;
1320                }
1321                asked += i;
1322                want -= i;
1323
1324                /*
1325                 * We are out of data, wait for some more.
1326                 */
1327                if (!have) {
1328                        if (!asked)
1329                                break;
1330
1331                        ret = hib_wait_io(&hb);
1332                        if (ret)
1333                                goto out_finish;
1334                        have += asked;
1335                        asked = 0;
1336                        if (eof)
1337                                eof = 2;
1338                }
1339
1340                if (crc->run_threads) {
1341                        wait_event(crc->done, atomic_read(&crc->stop));
1342                        atomic_set(&crc->stop, 0);
1343                        crc->run_threads = 0;
1344                }
1345
1346                for (thr = 0; have && thr < nr_threads; thr++) {
1347                        data[thr].cmp_len = *(size_t *)page[pg];
1348                        if (unlikely(!data[thr].cmp_len ||
1349                                     data[thr].cmp_len >
1350                                     lzo1x_worst_compress(LZO_UNC_SIZE))) {
1351                                pr_err("Invalid LZO compressed length\n");
1352                                ret = -1;
1353                                goto out_finish;
1354                        }
1355
1356                        need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1357                                            PAGE_SIZE);
1358                        if (need > have) {
1359                                if (eof > 1) {
1360                                        ret = -1;
1361                                        goto out_finish;
1362                                }
1363                                break;
1364                        }
1365
1366                        for (off = 0;
1367                             off < LZO_HEADER + data[thr].cmp_len;
1368                             off += PAGE_SIZE) {
1369                                memcpy(data[thr].cmp + off,
1370                                       page[pg], PAGE_SIZE);
1371                                have--;
1372                                want++;
1373                                if (++pg >= ring_size)
1374                                        pg = 0;
1375                        }
1376
1377                        atomic_set(&data[thr].ready, 1);
1378                        wake_up(&data[thr].go);
1379                }
1380
1381                /*
1382                 * Wait for more data while we are decompressing.
1383                 */
1384                if (have < LZO_CMP_PAGES && asked) {
1385                        ret = hib_wait_io(&hb);
1386                        if (ret)
1387                                goto out_finish;
1388                        have += asked;
1389                        asked = 0;
1390                        if (eof)
1391                                eof = 2;
1392                }
1393
1394                for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1395                        wait_event(data[thr].done,
1396                                   atomic_read(&data[thr].stop));
1397                        atomic_set(&data[thr].stop, 0);
1398
1399                        ret = data[thr].ret;
1400
1401                        if (ret < 0) {
1402                                pr_err("LZO decompression failed\n");
1403                                goto out_finish;
1404                        }
1405
1406                        if (unlikely(!data[thr].unc_len ||
1407                                     data[thr].unc_len > LZO_UNC_SIZE ||
1408                                     data[thr].unc_len & (PAGE_SIZE - 1))) {
1409                                pr_err("Invalid LZO uncompressed length\n");
1410                                ret = -1;
1411                                goto out_finish;
1412                        }
1413
1414                        for (off = 0;
1415                             off < data[thr].unc_len; off += PAGE_SIZE) {
1416                                memcpy(data_of(*snapshot),
1417                                       data[thr].unc + off, PAGE_SIZE);
1418
1419                                if (!(nr_pages % m))
1420                                        pr_info("Image loading progress: %3d%%\n",
1421                                                nr_pages / m * 10);
1422                                nr_pages++;
1423
1424                                ret = snapshot_write_next(snapshot);
1425                                if (ret <= 0) {
1426                                        crc->run_threads = thr + 1;
1427                                        atomic_set(&crc->ready, 1);
1428                                        wake_up(&crc->go);
1429                                        goto out_finish;
1430                                }
1431                        }
1432                }
1433
1434                crc->run_threads = thr;
1435                atomic_set(&crc->ready, 1);
1436                wake_up(&crc->go);
1437        }
1438
1439out_finish:
1440        if (crc->run_threads) {
1441                wait_event(crc->done, atomic_read(&crc->stop));
1442                atomic_set(&crc->stop, 0);
1443        }
1444        stop = ktime_get();
1445        if (!ret) {
1446                pr_info("Image loading done\n");
1447                snapshot_write_finalize(snapshot);
1448                if (!snapshot_image_loaded(snapshot))
1449                        ret = -ENODATA;
1450                if (!ret) {
1451                        if (swsusp_header->flags & SF_CRC32_MODE) {
1452                                if(handle->crc32 != swsusp_header->crc32) {
1453                                        pr_err("Invalid image CRC32!\n");
1454                                        ret = -ENODATA;
1455                                }
1456                        }
1457                }
1458        }
1459        swsusp_show_speed(start, stop, nr_to_read, "Read");
1460out_clean:
1461        hib_finish_batch(&hb);
1462        for (i = 0; i < ring_size; i++)
1463                free_page((unsigned long)page[i]);
1464        if (crc) {
1465                if (crc->thr)
1466                        kthread_stop(crc->thr);
1467                kfree(crc);
1468        }
1469        if (data) {
1470                for (thr = 0; thr < nr_threads; thr++)
1471                        if (data[thr].thr)
1472                                kthread_stop(data[thr].thr);
1473                vfree(data);
1474        }
1475        vfree(page);
1476
1477        return ret;
1478}
1479
1480/**
1481 *      swsusp_read - read the hibernation image.
1482 *      @flags_p: flags passed by the "frozen" kernel in the image header should
1483 *                be written into this memory location
1484 */
1485
1486int swsusp_read(unsigned int *flags_p)
1487{
1488        int error;
1489        struct swap_map_handle handle;
1490        struct snapshot_handle snapshot;
1491        struct swsusp_info *header;
1492
1493        memset(&snapshot, 0, sizeof(struct snapshot_handle));
1494        error = snapshot_write_next(&snapshot);
1495        if (error < (int)PAGE_SIZE)
1496                return error < 0 ? error : -EFAULT;
1497        header = (struct swsusp_info *)data_of(snapshot);
1498        error = get_swap_reader(&handle, flags_p);
1499        if (error)
1500                goto end;
1501        if (!error)
1502                error = swap_read_page(&handle, header, NULL);
1503        if (!error) {
1504                error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1505                        load_image(&handle, &snapshot, header->pages - 1) :
1506                        load_image_lzo(&handle, &snapshot, header->pages - 1);
1507        }
1508        swap_reader_finish(&handle);
1509end:
1510        if (!error)
1511                pr_debug("Image successfully loaded\n");
1512        else
1513                pr_debug("Error %d resuming\n", error);
1514        return error;
1515}
1516
1517/**
1518 *      swsusp_check - Check for swsusp signature in the resume device
1519 */
1520
1521int swsusp_check(void)
1522{
1523        int error;
1524
1525        hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1526                                            FMODE_READ, NULL);
1527        if (!IS_ERR(hib_resume_bdev)) {
1528                set_blocksize(hib_resume_bdev, PAGE_SIZE);
1529                clear_page(swsusp_header);
1530                error = hib_submit_io(REQ_OP_READ, 0,
1531                                        swsusp_resume_block,
1532                                        swsusp_header, NULL);
1533                if (error)
1534                        goto put;
1535
1536                if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1537                        memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1538                        /* Reset swap signature now */
1539                        error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1540                                                swsusp_resume_block,
1541                                                swsusp_header, NULL);
1542                } else {
1543                        error = -EINVAL;
1544                }
1545
1546put:
1547                if (error)
1548                        blkdev_put(hib_resume_bdev, FMODE_READ);
1549                else
1550                        pr_debug("Image signature found, resuming\n");
1551        } else {
1552                error = PTR_ERR(hib_resume_bdev);
1553        }
1554
1555        if (error)
1556                pr_debug("Image not found (code %d)\n", error);
1557
1558        return error;
1559}
1560
1561/**
1562 *      swsusp_close - close swap device.
1563 */
1564
1565void swsusp_close(fmode_t mode)
1566{
1567        if (IS_ERR(hib_resume_bdev)) {
1568                pr_debug("Image device not initialised\n");
1569                return;
1570        }
1571
1572        blkdev_put(hib_resume_bdev, mode);
1573}
1574
1575/**
1576 *      swsusp_unmark - Unmark swsusp signature in the resume device
1577 */
1578
1579#ifdef CONFIG_SUSPEND
1580int swsusp_unmark(void)
1581{
1582        int error;
1583
1584        hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
1585                      swsusp_header, NULL);
1586        if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1587                memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1588                error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1589                                        swsusp_resume_block,
1590                                        swsusp_header, NULL);
1591        } else {
1592                pr_err("Cannot find swsusp signature!\n");
1593                error = -ENODEV;
1594        }
1595
1596        /*
1597         * We just returned from suspend, we don't need the image any more.
1598         */
1599        free_all_swap_pages(root_swap);
1600
1601        return error;
1602}
1603#endif
1604
1605static int __init swsusp_header_init(void)
1606{
1607        swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1608        if (!swsusp_header)
1609                panic("Could not allocate memory for swsusp_header\n");
1610        return 0;
1611}
1612
1613core_initcall(swsusp_header_init);
1614