linux/kernel/power/swap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/kernel/power/swap.c
   4 *
   5 * This file provides functions for reading the suspend image from
   6 * and writing it to a swap partition.
   7 *
   8 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
   9 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
  10 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
  11 */
  12
  13#define pr_fmt(fmt) "PM: " fmt
  14
  15#include <linux/module.h>
  16#include <linux/file.h>
  17#include <linux/delay.h>
  18#include <linux/bitops.h>
  19#include <linux/genhd.h>
  20#include <linux/device.h>
  21#include <linux/bio.h>
  22#include <linux/blkdev.h>
  23#include <linux/swap.h>
  24#include <linux/swapops.h>
  25#include <linux/pm.h>
  26#include <linux/slab.h>
  27#include <linux/lzo.h>
  28#include <linux/vmalloc.h>
  29#include <linux/cpumask.h>
  30#include <linux/atomic.h>
  31#include <linux/kthread.h>
  32#include <linux/crc32.h>
  33#include <linux/ktime.h>
  34
  35#include "power.h"
  36
  37#define HIBERNATE_SIG   "S1SUSPEND"
  38
  39/*
  40 * When reading an {un,}compressed image, we may restore pages in place,
  41 * in which case some architectures need these pages cleaning before they
  42 * can be executed. We don't know which pages these may be, so clean the lot.
  43 */
  44static bool clean_pages_on_read;
  45static bool clean_pages_on_decompress;
  46
  47/*
  48 *      The swap map is a data structure used for keeping track of each page
  49 *      written to a swap partition.  It consists of many swap_map_page
  50 *      structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
  51 *      These structures are stored on the swap and linked together with the
  52 *      help of the .next_swap member.
  53 *
  54 *      The swap map is created during suspend.  The swap map pages are
  55 *      allocated and populated one at a time, so we only need one memory
  56 *      page to set up the entire structure.
  57 *
  58 *      During resume we pick up all swap_map_page structures into a list.
  59 */
  60
  61#define MAP_PAGE_ENTRIES        (PAGE_SIZE / sizeof(sector_t) - 1)
  62
  63/*
  64 * Number of free pages that are not high.
  65 */
  66static inline unsigned long low_free_pages(void)
  67{
  68        return nr_free_pages() - nr_free_highpages();
  69}
  70
  71/*
  72 * Number of pages required to be kept free while writing the image. Always
  73 * half of all available low pages before the writing starts.
  74 */
  75static inline unsigned long reqd_free_pages(void)
  76{
  77        return low_free_pages() / 2;
  78}
  79
  80struct swap_map_page {
  81        sector_t entries[MAP_PAGE_ENTRIES];
  82        sector_t next_swap;
  83};
  84
  85struct swap_map_page_list {
  86        struct swap_map_page *map;
  87        struct swap_map_page_list *next;
  88};
  89
  90/**
  91 *      The swap_map_handle structure is used for handling swap in
  92 *      a file-alike way
  93 */
  94
  95struct swap_map_handle {
  96        struct swap_map_page *cur;
  97        struct swap_map_page_list *maps;
  98        sector_t cur_swap;
  99        sector_t first_sector;
 100        unsigned int k;
 101        unsigned long reqd_free_pages;
 102        u32 crc32;
 103};
 104
 105struct swsusp_header {
 106        char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
 107                      sizeof(u32)];
 108        u32     crc32;
 109        sector_t image;
 110        unsigned int flags;     /* Flags to pass to the "boot" kernel */
 111        char    orig_sig[10];
 112        char    sig[10];
 113} __packed;
 114
 115static struct swsusp_header *swsusp_header;
 116
 117/**
 118 *      The following functions are used for tracing the allocated
 119 *      swap pages, so that they can be freed in case of an error.
 120 */
 121
 122struct swsusp_extent {
 123        struct rb_node node;
 124        unsigned long start;
 125        unsigned long end;
 126};
 127
 128static struct rb_root swsusp_extents = RB_ROOT;
 129
 130static int swsusp_extents_insert(unsigned long swap_offset)
 131{
 132        struct rb_node **new = &(swsusp_extents.rb_node);
 133        struct rb_node *parent = NULL;
 134        struct swsusp_extent *ext;
 135
 136        /* Figure out where to put the new node */
 137        while (*new) {
 138                ext = rb_entry(*new, struct swsusp_extent, node);
 139                parent = *new;
 140                if (swap_offset < ext->start) {
 141                        /* Try to merge */
 142                        if (swap_offset == ext->start - 1) {
 143                                ext->start--;
 144                                return 0;
 145                        }
 146                        new = &((*new)->rb_left);
 147                } else if (swap_offset > ext->end) {
 148                        /* Try to merge */
 149                        if (swap_offset == ext->end + 1) {
 150                                ext->end++;
 151                                return 0;
 152                        }
 153                        new = &((*new)->rb_right);
 154                } else {
 155                        /* It already is in the tree */
 156                        return -EINVAL;
 157                }
 158        }
 159        /* Add the new node and rebalance the tree. */
 160        ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
 161        if (!ext)
 162                return -ENOMEM;
 163
 164        ext->start = swap_offset;
 165        ext->end = swap_offset;
 166        rb_link_node(&ext->node, parent, new);
 167        rb_insert_color(&ext->node, &swsusp_extents);
 168        return 0;
 169}
 170
 171/**
 172 *      alloc_swapdev_block - allocate a swap page and register that it has
 173 *      been allocated, so that it can be freed in case of an error.
 174 */
 175
 176sector_t alloc_swapdev_block(int swap)
 177{
 178        unsigned long offset;
 179
 180        offset = swp_offset(get_swap_page_of_type(swap));
 181        if (offset) {
 182                if (swsusp_extents_insert(offset))
 183                        swap_free(swp_entry(swap, offset));
 184                else
 185                        return swapdev_block(swap, offset);
 186        }
 187        return 0;
 188}
 189
 190/**
 191 *      free_all_swap_pages - free swap pages allocated for saving image data.
 192 *      It also frees the extents used to register which swap entries had been
 193 *      allocated.
 194 */
 195
 196void free_all_swap_pages(int swap)
 197{
 198        struct rb_node *node;
 199
 200        while ((node = swsusp_extents.rb_node)) {
 201                struct swsusp_extent *ext;
 202                unsigned long offset;
 203
 204                ext = rb_entry(node, struct swsusp_extent, node);
 205                rb_erase(node, &swsusp_extents);
 206                for (offset = ext->start; offset <= ext->end; offset++)
 207                        swap_free(swp_entry(swap, offset));
 208
 209                kfree(ext);
 210        }
 211}
 212
 213int swsusp_swap_in_use(void)
 214{
 215        return (swsusp_extents.rb_node != NULL);
 216}
 217
 218/*
 219 * General things
 220 */
 221
 222static unsigned short root_swap = 0xffff;
 223static struct block_device *hib_resume_bdev;
 224
 225struct hib_bio_batch {
 226        atomic_t                count;
 227        wait_queue_head_t       wait;
 228        blk_status_t            error;
 229};
 230
 231static void hib_init_batch(struct hib_bio_batch *hb)
 232{
 233        atomic_set(&hb->count, 0);
 234        init_waitqueue_head(&hb->wait);
 235        hb->error = BLK_STS_OK;
 236}
 237
 238static void hib_end_io(struct bio *bio)
 239{
 240        struct hib_bio_batch *hb = bio->bi_private;
 241        struct page *page = bio_first_page_all(bio);
 242
 243        if (bio->bi_status) {
 244                pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
 245                         MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
 246                         (unsigned long long)bio->bi_iter.bi_sector);
 247        }
 248
 249        if (bio_data_dir(bio) == WRITE)
 250                put_page(page);
 251        else if (clean_pages_on_read)
 252                flush_icache_range((unsigned long)page_address(page),
 253                                   (unsigned long)page_address(page) + PAGE_SIZE);
 254
 255        if (bio->bi_status && !hb->error)
 256                hb->error = bio->bi_status;
 257        if (atomic_dec_and_test(&hb->count))
 258                wake_up(&hb->wait);
 259
 260        bio_put(bio);
 261}
 262
 263static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
 264                struct hib_bio_batch *hb)
 265{
 266        struct page *page = virt_to_page(addr);
 267        struct bio *bio;
 268        int error = 0;
 269
 270        bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1);
 271        bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
 272        bio_set_dev(bio, hib_resume_bdev);
 273        bio_set_op_attrs(bio, op, op_flags);
 274
 275        if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 276                pr_err("Adding page to bio failed at %llu\n",
 277                       (unsigned long long)bio->bi_iter.bi_sector);
 278                bio_put(bio);
 279                return -EFAULT;
 280        }
 281
 282        if (hb) {
 283                bio->bi_end_io = hib_end_io;
 284                bio->bi_private = hb;
 285                atomic_inc(&hb->count);
 286                submit_bio(bio);
 287        } else {
 288                error = submit_bio_wait(bio);
 289                bio_put(bio);
 290        }
 291
 292        return error;
 293}
 294
 295static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
 296{
 297        wait_event(hb->wait, atomic_read(&hb->count) == 0);
 298        return blk_status_to_errno(hb->error);
 299}
 300
 301/*
 302 * Saving part
 303 */
 304
 305static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
 306{
 307        int error;
 308
 309        hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
 310                      swsusp_header, NULL);
 311        if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
 312            !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
 313                memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
 314                memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
 315                swsusp_header->image = handle->first_sector;
 316                swsusp_header->flags = flags;
 317                if (flags & SF_CRC32_MODE)
 318                        swsusp_header->crc32 = handle->crc32;
 319                error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
 320                                      swsusp_resume_block, swsusp_header, NULL);
 321        } else {
 322                pr_err("Swap header not found!\n");
 323                error = -ENODEV;
 324        }
 325        return error;
 326}
 327
 328/**
 329 *      swsusp_swap_check - check if the resume device is a swap device
 330 *      and get its index (if so)
 331 *
 332 *      This is called before saving image
 333 */
 334static int swsusp_swap_check(void)
 335{
 336        int res;
 337
 338        res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
 339                        &hib_resume_bdev);
 340        if (res < 0)
 341                return res;
 342
 343        root_swap = res;
 344        res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
 345        if (res)
 346                return res;
 347
 348        res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
 349        if (res < 0)
 350                blkdev_put(hib_resume_bdev, FMODE_WRITE);
 351
 352        /*
 353         * Update the resume device to the one actually used,
 354         * so the test_resume mode can use it in case it is
 355         * invoked from hibernate() to test the snapshot.
 356         */
 357        swsusp_resume_device = hib_resume_bdev->bd_dev;
 358        return res;
 359}
 360
 361/**
 362 *      write_page - Write one page to given swap location.
 363 *      @buf:           Address we're writing.
 364 *      @offset:        Offset of the swap page we're writing to.
 365 *      @hb:            bio completion batch
 366 */
 367
 368static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
 369{
 370        void *src;
 371        int ret;
 372
 373        if (!offset)
 374                return -ENOSPC;
 375
 376        if (hb) {
 377                src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
 378                                              __GFP_NORETRY);
 379                if (src) {
 380                        copy_page(src, buf);
 381                } else {
 382                        ret = hib_wait_io(hb); /* Free pages */
 383                        if (ret)
 384                                return ret;
 385                        src = (void *)__get_free_page(GFP_NOIO |
 386                                                      __GFP_NOWARN |
 387                                                      __GFP_NORETRY);
 388                        if (src) {
 389                                copy_page(src, buf);
 390                        } else {
 391                                WARN_ON_ONCE(1);
 392                                hb = NULL;      /* Go synchronous */
 393                                src = buf;
 394                        }
 395                }
 396        } else {
 397                src = buf;
 398        }
 399        return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
 400}
 401
 402static void release_swap_writer(struct swap_map_handle *handle)
 403{
 404        if (handle->cur)
 405                free_page((unsigned long)handle->cur);
 406        handle->cur = NULL;
 407}
 408
 409static int get_swap_writer(struct swap_map_handle *handle)
 410{
 411        int ret;
 412
 413        ret = swsusp_swap_check();
 414        if (ret) {
 415                if (ret != -ENOSPC)
 416                        pr_err("Cannot find swap device, try swapon -a\n");
 417                return ret;
 418        }
 419        handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
 420        if (!handle->cur) {
 421                ret = -ENOMEM;
 422                goto err_close;
 423        }
 424        handle->cur_swap = alloc_swapdev_block(root_swap);
 425        if (!handle->cur_swap) {
 426                ret = -ENOSPC;
 427                goto err_rel;
 428        }
 429        handle->k = 0;
 430        handle->reqd_free_pages = reqd_free_pages();
 431        handle->first_sector = handle->cur_swap;
 432        return 0;
 433err_rel:
 434        release_swap_writer(handle);
 435err_close:
 436        swsusp_close(FMODE_WRITE);
 437        return ret;
 438}
 439
 440static int swap_write_page(struct swap_map_handle *handle, void *buf,
 441                struct hib_bio_batch *hb)
 442{
 443        int error = 0;
 444        sector_t offset;
 445
 446        if (!handle->cur)
 447                return -EINVAL;
 448        offset = alloc_swapdev_block(root_swap);
 449        error = write_page(buf, offset, hb);
 450        if (error)
 451                return error;
 452        handle->cur->entries[handle->k++] = offset;
 453        if (handle->k >= MAP_PAGE_ENTRIES) {
 454                offset = alloc_swapdev_block(root_swap);
 455                if (!offset)
 456                        return -ENOSPC;
 457                handle->cur->next_swap = offset;
 458                error = write_page(handle->cur, handle->cur_swap, hb);
 459                if (error)
 460                        goto out;
 461                clear_page(handle->cur);
 462                handle->cur_swap = offset;
 463                handle->k = 0;
 464
 465                if (hb && low_free_pages() <= handle->reqd_free_pages) {
 466                        error = hib_wait_io(hb);
 467                        if (error)
 468                                goto out;
 469                        /*
 470                         * Recalculate the number of required free pages, to
 471                         * make sure we never take more than half.
 472                         */
 473                        handle->reqd_free_pages = reqd_free_pages();
 474                }
 475        }
 476 out:
 477        return error;
 478}
 479
 480static int flush_swap_writer(struct swap_map_handle *handle)
 481{
 482        if (handle->cur && handle->cur_swap)
 483                return write_page(handle->cur, handle->cur_swap, NULL);
 484        else
 485                return -EINVAL;
 486}
 487
 488static int swap_writer_finish(struct swap_map_handle *handle,
 489                unsigned int flags, int error)
 490{
 491        if (!error) {
 492                flush_swap_writer(handle);
 493                pr_info("S");
 494                error = mark_swapfiles(handle, flags);
 495                pr_cont("|\n");
 496        }
 497
 498        if (error)
 499                free_all_swap_pages(root_swap);
 500        release_swap_writer(handle);
 501        swsusp_close(FMODE_WRITE);
 502
 503        return error;
 504}
 505
 506/* We need to remember how much compressed data we need to read. */
 507#define LZO_HEADER      sizeof(size_t)
 508
 509/* Number of pages/bytes we'll compress at one time. */
 510#define LZO_UNC_PAGES   32
 511#define LZO_UNC_SIZE    (LZO_UNC_PAGES * PAGE_SIZE)
 512
 513/* Number of pages/bytes we need for compressed data (worst case). */
 514#define LZO_CMP_PAGES   DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
 515                                     LZO_HEADER, PAGE_SIZE)
 516#define LZO_CMP_SIZE    (LZO_CMP_PAGES * PAGE_SIZE)
 517
 518/* Maximum number of threads for compression/decompression. */
 519#define LZO_THREADS     3
 520
 521/* Minimum/maximum number of pages for read buffering. */
 522#define LZO_MIN_RD_PAGES        1024
 523#define LZO_MAX_RD_PAGES        8192
 524
 525
 526/**
 527 *      save_image - save the suspend image data
 528 */
 529
 530static int save_image(struct swap_map_handle *handle,
 531                      struct snapshot_handle *snapshot,
 532                      unsigned int nr_to_write)
 533{
 534        unsigned int m;
 535        int ret;
 536        int nr_pages;
 537        int err2;
 538        struct hib_bio_batch hb;
 539        ktime_t start;
 540        ktime_t stop;
 541
 542        hib_init_batch(&hb);
 543
 544        pr_info("Saving image data pages (%u pages)...\n",
 545                nr_to_write);
 546        m = nr_to_write / 10;
 547        if (!m)
 548                m = 1;
 549        nr_pages = 0;
 550        start = ktime_get();
 551        while (1) {
 552                ret = snapshot_read_next(snapshot);
 553                if (ret <= 0)
 554                        break;
 555                ret = swap_write_page(handle, data_of(*snapshot), &hb);
 556                if (ret)
 557                        break;
 558                if (!(nr_pages % m))
 559                        pr_info("Image saving progress: %3d%%\n",
 560                                nr_pages / m * 10);
 561                nr_pages++;
 562        }
 563        err2 = hib_wait_io(&hb);
 564        stop = ktime_get();
 565        if (!ret)
 566                ret = err2;
 567        if (!ret)
 568                pr_info("Image saving done\n");
 569        swsusp_show_speed(start, stop, nr_to_write, "Wrote");
 570        return ret;
 571}
 572
 573/**
 574 * Structure used for CRC32.
 575 */
 576struct crc_data {
 577        struct task_struct *thr;                  /* thread */
 578        atomic_t ready;                           /* ready to start flag */
 579        atomic_t stop;                            /* ready to stop flag */
 580        unsigned run_threads;                     /* nr current threads */
 581        wait_queue_head_t go;                     /* start crc update */
 582        wait_queue_head_t done;                   /* crc update done */
 583        u32 *crc32;                               /* points to handle's crc32 */
 584        size_t *unc_len[LZO_THREADS];             /* uncompressed lengths */
 585        unsigned char *unc[LZO_THREADS];          /* uncompressed data */
 586};
 587
 588/**
 589 * CRC32 update function that runs in its own thread.
 590 */
 591static int crc32_threadfn(void *data)
 592{
 593        struct crc_data *d = data;
 594        unsigned i;
 595
 596        while (1) {
 597                wait_event(d->go, atomic_read(&d->ready) ||
 598                                  kthread_should_stop());
 599                if (kthread_should_stop()) {
 600                        d->thr = NULL;
 601                        atomic_set(&d->stop, 1);
 602                        wake_up(&d->done);
 603                        break;
 604                }
 605                atomic_set(&d->ready, 0);
 606
 607                for (i = 0; i < d->run_threads; i++)
 608                        *d->crc32 = crc32_le(*d->crc32,
 609                                             d->unc[i], *d->unc_len[i]);
 610                atomic_set(&d->stop, 1);
 611                wake_up(&d->done);
 612        }
 613        return 0;
 614}
 615/**
 616 * Structure used for LZO data compression.
 617 */
 618struct cmp_data {
 619        struct task_struct *thr;                  /* thread */
 620        atomic_t ready;                           /* ready to start flag */
 621        atomic_t stop;                            /* ready to stop flag */
 622        int ret;                                  /* return code */
 623        wait_queue_head_t go;                     /* start compression */
 624        wait_queue_head_t done;                   /* compression done */
 625        size_t unc_len;                           /* uncompressed length */
 626        size_t cmp_len;                           /* compressed length */
 627        unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
 628        unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
 629        unsigned char wrk[LZO1X_1_MEM_COMPRESS];  /* compression workspace */
 630};
 631
 632/**
 633 * Compression function that runs in its own thread.
 634 */
 635static int lzo_compress_threadfn(void *data)
 636{
 637        struct cmp_data *d = data;
 638
 639        while (1) {
 640                wait_event(d->go, atomic_read(&d->ready) ||
 641                                  kthread_should_stop());
 642                if (kthread_should_stop()) {
 643                        d->thr = NULL;
 644                        d->ret = -1;
 645                        atomic_set(&d->stop, 1);
 646                        wake_up(&d->done);
 647                        break;
 648                }
 649                atomic_set(&d->ready, 0);
 650
 651                d->ret = lzo1x_1_compress(d->unc, d->unc_len,
 652                                          d->cmp + LZO_HEADER, &d->cmp_len,
 653                                          d->wrk);
 654                atomic_set(&d->stop, 1);
 655                wake_up(&d->done);
 656        }
 657        return 0;
 658}
 659
 660/**
 661 * save_image_lzo - Save the suspend image data compressed with LZO.
 662 * @handle: Swap map handle to use for saving the image.
 663 * @snapshot: Image to read data from.
 664 * @nr_to_write: Number of pages to save.
 665 */
 666static int save_image_lzo(struct swap_map_handle *handle,
 667                          struct snapshot_handle *snapshot,
 668                          unsigned int nr_to_write)
 669{
 670        unsigned int m;
 671        int ret = 0;
 672        int nr_pages;
 673        int err2;
 674        struct hib_bio_batch hb;
 675        ktime_t start;
 676        ktime_t stop;
 677        size_t off;
 678        unsigned thr, run_threads, nr_threads;
 679        unsigned char *page = NULL;
 680        struct cmp_data *data = NULL;
 681        struct crc_data *crc = NULL;
 682
 683        hib_init_batch(&hb);
 684
 685        /*
 686         * We'll limit the number of threads for compression to limit memory
 687         * footprint.
 688         */
 689        nr_threads = num_online_cpus() - 1;
 690        nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
 691
 692        page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
 693        if (!page) {
 694                pr_err("Failed to allocate LZO page\n");
 695                ret = -ENOMEM;
 696                goto out_clean;
 697        }
 698
 699        data = vmalloc(array_size(nr_threads, sizeof(*data)));
 700        if (!data) {
 701                pr_err("Failed to allocate LZO data\n");
 702                ret = -ENOMEM;
 703                goto out_clean;
 704        }
 705        for (thr = 0; thr < nr_threads; thr++)
 706                memset(&data[thr], 0, offsetof(struct cmp_data, go));
 707
 708        crc = kmalloc(sizeof(*crc), GFP_KERNEL);
 709        if (!crc) {
 710                pr_err("Failed to allocate crc\n");
 711                ret = -ENOMEM;
 712                goto out_clean;
 713        }
 714        memset(crc, 0, offsetof(struct crc_data, go));
 715
 716        /*
 717         * Start the compression threads.
 718         */
 719        for (thr = 0; thr < nr_threads; thr++) {
 720                init_waitqueue_head(&data[thr].go);
 721                init_waitqueue_head(&data[thr].done);
 722
 723                data[thr].thr = kthread_run(lzo_compress_threadfn,
 724                                            &data[thr],
 725                                            "image_compress/%u", thr);
 726                if (IS_ERR(data[thr].thr)) {
 727                        data[thr].thr = NULL;
 728                        pr_err("Cannot start compression threads\n");
 729                        ret = -ENOMEM;
 730                        goto out_clean;
 731                }
 732        }
 733
 734        /*
 735         * Start the CRC32 thread.
 736         */
 737        init_waitqueue_head(&crc->go);
 738        init_waitqueue_head(&crc->done);
 739
 740        handle->crc32 = 0;
 741        crc->crc32 = &handle->crc32;
 742        for (thr = 0; thr < nr_threads; thr++) {
 743                crc->unc[thr] = data[thr].unc;
 744                crc->unc_len[thr] = &data[thr].unc_len;
 745        }
 746
 747        crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
 748        if (IS_ERR(crc->thr)) {
 749                crc->thr = NULL;
 750                pr_err("Cannot start CRC32 thread\n");
 751                ret = -ENOMEM;
 752                goto out_clean;
 753        }
 754
 755        /*
 756         * Adjust the number of required free pages after all allocations have
 757         * been done. We don't want to run out of pages when writing.
 758         */
 759        handle->reqd_free_pages = reqd_free_pages();
 760
 761        pr_info("Using %u thread(s) for compression\n", nr_threads);
 762        pr_info("Compressing and saving image data (%u pages)...\n",
 763                nr_to_write);
 764        m = nr_to_write / 10;
 765        if (!m)
 766                m = 1;
 767        nr_pages = 0;
 768        start = ktime_get();
 769        for (;;) {
 770                for (thr = 0; thr < nr_threads; thr++) {
 771                        for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
 772                                ret = snapshot_read_next(snapshot);
 773                                if (ret < 0)
 774                                        goto out_finish;
 775
 776                                if (!ret)
 777                                        break;
 778
 779                                memcpy(data[thr].unc + off,
 780                                       data_of(*snapshot), PAGE_SIZE);
 781
 782                                if (!(nr_pages % m))
 783                                        pr_info("Image saving progress: %3d%%\n",
 784                                                nr_pages / m * 10);
 785                                nr_pages++;
 786                        }
 787                        if (!off)
 788                                break;
 789
 790                        data[thr].unc_len = off;
 791
 792                        atomic_set(&data[thr].ready, 1);
 793                        wake_up(&data[thr].go);
 794                }
 795
 796                if (!thr)
 797                        break;
 798
 799                crc->run_threads = thr;
 800                atomic_set(&crc->ready, 1);
 801                wake_up(&crc->go);
 802
 803                for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
 804                        wait_event(data[thr].done,
 805                                   atomic_read(&data[thr].stop));
 806                        atomic_set(&data[thr].stop, 0);
 807
 808                        ret = data[thr].ret;
 809
 810                        if (ret < 0) {
 811                                pr_err("LZO compression failed\n");
 812                                goto out_finish;
 813                        }
 814
 815                        if (unlikely(!data[thr].cmp_len ||
 816                                     data[thr].cmp_len >
 817                                     lzo1x_worst_compress(data[thr].unc_len))) {
 818                                pr_err("Invalid LZO compressed length\n");
 819                                ret = -1;
 820                                goto out_finish;
 821                        }
 822
 823                        *(size_t *)data[thr].cmp = data[thr].cmp_len;
 824
 825                        /*
 826                         * Given we are writing one page at a time to disk, we
 827                         * copy that much from the buffer, although the last
 828                         * bit will likely be smaller than full page. This is
 829                         * OK - we saved the length of the compressed data, so
 830                         * any garbage at the end will be discarded when we
 831                         * read it.
 832                         */
 833                        for (off = 0;
 834                             off < LZO_HEADER + data[thr].cmp_len;
 835                             off += PAGE_SIZE) {
 836                                memcpy(page, data[thr].cmp + off, PAGE_SIZE);
 837
 838                                ret = swap_write_page(handle, page, &hb);
 839                                if (ret)
 840                                        goto out_finish;
 841                        }
 842                }
 843
 844                wait_event(crc->done, atomic_read(&crc->stop));
 845                atomic_set(&crc->stop, 0);
 846        }
 847
 848out_finish:
 849        err2 = hib_wait_io(&hb);
 850        stop = ktime_get();
 851        if (!ret)
 852                ret = err2;
 853        if (!ret)
 854                pr_info("Image saving done\n");
 855        swsusp_show_speed(start, stop, nr_to_write, "Wrote");
 856out_clean:
 857        if (crc) {
 858                if (crc->thr)
 859                        kthread_stop(crc->thr);
 860                kfree(crc);
 861        }
 862        if (data) {
 863                for (thr = 0; thr < nr_threads; thr++)
 864                        if (data[thr].thr)
 865                                kthread_stop(data[thr].thr);
 866                vfree(data);
 867        }
 868        if (page) free_page((unsigned long)page);
 869
 870        return ret;
 871}
 872
 873/**
 874 *      enough_swap - Make sure we have enough swap to save the image.
 875 *
 876 *      Returns TRUE or FALSE after checking the total amount of swap
 877 *      space avaiable from the resume partition.
 878 */
 879
 880static int enough_swap(unsigned int nr_pages)
 881{
 882        unsigned int free_swap = count_swap_pages(root_swap, 1);
 883        unsigned int required;
 884
 885        pr_debug("Free swap pages: %u\n", free_swap);
 886
 887        required = PAGES_FOR_IO + nr_pages;
 888        return free_swap > required;
 889}
 890
 891/**
 892 *      swsusp_write - Write entire image and metadata.
 893 *      @flags: flags to pass to the "boot" kernel in the image header
 894 *
 895 *      It is important _NOT_ to umount filesystems at this point. We want
 896 *      them synced (in case something goes wrong) but we DO not want to mark
 897 *      filesystem clean: it is not. (And it does not matter, if we resume
 898 *      correctly, we'll mark system clean, anyway.)
 899 */
 900
 901int swsusp_write(unsigned int flags)
 902{
 903        struct swap_map_handle handle;
 904        struct snapshot_handle snapshot;
 905        struct swsusp_info *header;
 906        unsigned long pages;
 907        int error;
 908
 909        pages = snapshot_get_image_size();
 910        error = get_swap_writer(&handle);
 911        if (error) {
 912                pr_err("Cannot get swap writer\n");
 913                return error;
 914        }
 915        if (flags & SF_NOCOMPRESS_MODE) {
 916                if (!enough_swap(pages)) {
 917                        pr_err("Not enough free swap\n");
 918                        error = -ENOSPC;
 919                        goto out_finish;
 920                }
 921        }
 922        memset(&snapshot, 0, sizeof(struct snapshot_handle));
 923        error = snapshot_read_next(&snapshot);
 924        if (error < (int)PAGE_SIZE) {
 925                if (error >= 0)
 926                        error = -EFAULT;
 927
 928                goto out_finish;
 929        }
 930        header = (struct swsusp_info *)data_of(snapshot);
 931        error = swap_write_page(&handle, header, NULL);
 932        if (!error) {
 933                error = (flags & SF_NOCOMPRESS_MODE) ?
 934                        save_image(&handle, &snapshot, pages - 1) :
 935                        save_image_lzo(&handle, &snapshot, pages - 1);
 936        }
 937out_finish:
 938        error = swap_writer_finish(&handle, flags, error);
 939        return error;
 940}
 941
 942/**
 943 *      The following functions allow us to read data using a swap map
 944 *      in a file-alike way
 945 */
 946
 947static void release_swap_reader(struct swap_map_handle *handle)
 948{
 949        struct swap_map_page_list *tmp;
 950
 951        while (handle->maps) {
 952                if (handle->maps->map)
 953                        free_page((unsigned long)handle->maps->map);
 954                tmp = handle->maps;
 955                handle->maps = handle->maps->next;
 956                kfree(tmp);
 957        }
 958        handle->cur = NULL;
 959}
 960
 961static int get_swap_reader(struct swap_map_handle *handle,
 962                unsigned int *flags_p)
 963{
 964        int error;
 965        struct swap_map_page_list *tmp, *last;
 966        sector_t offset;
 967
 968        *flags_p = swsusp_header->flags;
 969
 970        if (!swsusp_header->image) /* how can this happen? */
 971                return -EINVAL;
 972
 973        handle->cur = NULL;
 974        last = handle->maps = NULL;
 975        offset = swsusp_header->image;
 976        while (offset) {
 977                tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
 978                if (!tmp) {
 979                        release_swap_reader(handle);
 980                        return -ENOMEM;
 981                }
 982                if (!handle->maps)
 983                        handle->maps = tmp;
 984                if (last)
 985                        last->next = tmp;
 986                last = tmp;
 987
 988                tmp->map = (struct swap_map_page *)
 989                           __get_free_page(GFP_NOIO | __GFP_HIGH);
 990                if (!tmp->map) {
 991                        release_swap_reader(handle);
 992                        return -ENOMEM;
 993                }
 994
 995                error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
 996                if (error) {
 997                        release_swap_reader(handle);
 998                        return error;
 999                }
1000                offset = tmp->map->next_swap;
1001        }
1002        handle->k = 0;
1003        handle->cur = handle->maps->map;
1004        return 0;
1005}
1006
1007static int swap_read_page(struct swap_map_handle *handle, void *buf,
1008                struct hib_bio_batch *hb)
1009{
1010        sector_t offset;
1011        int error;
1012        struct swap_map_page_list *tmp;
1013
1014        if (!handle->cur)
1015                return -EINVAL;
1016        offset = handle->cur->entries[handle->k];
1017        if (!offset)
1018                return -EFAULT;
1019        error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
1020        if (error)
1021                return error;
1022        if (++handle->k >= MAP_PAGE_ENTRIES) {
1023                handle->k = 0;
1024                free_page((unsigned long)handle->maps->map);
1025                tmp = handle->maps;
1026                handle->maps = handle->maps->next;
1027                kfree(tmp);
1028                if (!handle->maps)
1029                        release_swap_reader(handle);
1030                else
1031                        handle->cur = handle->maps->map;
1032        }
1033        return error;
1034}
1035
1036static int swap_reader_finish(struct swap_map_handle *handle)
1037{
1038        release_swap_reader(handle);
1039
1040        return 0;
1041}
1042
1043/**
1044 *      load_image - load the image using the swap map handle
1045 *      @handle and the snapshot handle @snapshot
1046 *      (assume there are @nr_pages pages to load)
1047 */
1048
1049static int load_image(struct swap_map_handle *handle,
1050                      struct snapshot_handle *snapshot,
1051                      unsigned int nr_to_read)
1052{
1053        unsigned int m;
1054        int ret = 0;
1055        ktime_t start;
1056        ktime_t stop;
1057        struct hib_bio_batch hb;
1058        int err2;
1059        unsigned nr_pages;
1060
1061        hib_init_batch(&hb);
1062
1063        clean_pages_on_read = true;
1064        pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1065        m = nr_to_read / 10;
1066        if (!m)
1067                m = 1;
1068        nr_pages = 0;
1069        start = ktime_get();
1070        for ( ; ; ) {
1071                ret = snapshot_write_next(snapshot);
1072                if (ret <= 0)
1073                        break;
1074                ret = swap_read_page(handle, data_of(*snapshot), &hb);
1075                if (ret)
1076                        break;
1077                if (snapshot->sync_read)
1078                        ret = hib_wait_io(&hb);
1079                if (ret)
1080                        break;
1081                if (!(nr_pages % m))
1082                        pr_info("Image loading progress: %3d%%\n",
1083                                nr_pages / m * 10);
1084                nr_pages++;
1085        }
1086        err2 = hib_wait_io(&hb);
1087        stop = ktime_get();
1088        if (!ret)
1089                ret = err2;
1090        if (!ret) {
1091                pr_info("Image loading done\n");
1092                snapshot_write_finalize(snapshot);
1093                if (!snapshot_image_loaded(snapshot))
1094                        ret = -ENODATA;
1095        }
1096        swsusp_show_speed(start, stop, nr_to_read, "Read");
1097        return ret;
1098}
1099
1100/**
1101 * Structure used for LZO data decompression.
1102 */
1103struct dec_data {
1104        struct task_struct *thr;                  /* thread */
1105        atomic_t ready;                           /* ready to start flag */
1106        atomic_t stop;                            /* ready to stop flag */
1107        int ret;                                  /* return code */
1108        wait_queue_head_t go;                     /* start decompression */
1109        wait_queue_head_t done;                   /* decompression done */
1110        size_t unc_len;                           /* uncompressed length */
1111        size_t cmp_len;                           /* compressed length */
1112        unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
1113        unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
1114};
1115
1116/**
1117 * Deompression function that runs in its own thread.
1118 */
1119static int lzo_decompress_threadfn(void *data)
1120{
1121        struct dec_data *d = data;
1122
1123        while (1) {
1124                wait_event(d->go, atomic_read(&d->ready) ||
1125                                  kthread_should_stop());
1126                if (kthread_should_stop()) {
1127                        d->thr = NULL;
1128                        d->ret = -1;
1129                        atomic_set(&d->stop, 1);
1130                        wake_up(&d->done);
1131                        break;
1132                }
1133                atomic_set(&d->ready, 0);
1134
1135                d->unc_len = LZO_UNC_SIZE;
1136                d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1137                                               d->unc, &d->unc_len);
1138                if (clean_pages_on_decompress)
1139                        flush_icache_range((unsigned long)d->unc,
1140                                           (unsigned long)d->unc + d->unc_len);
1141
1142                atomic_set(&d->stop, 1);
1143                wake_up(&d->done);
1144        }
1145        return 0;
1146}
1147
1148/**
1149 * load_image_lzo - Load compressed image data and decompress them with LZO.
1150 * @handle: Swap map handle to use for loading data.
1151 * @snapshot: Image to copy uncompressed data into.
1152 * @nr_to_read: Number of pages to load.
1153 */
1154static int load_image_lzo(struct swap_map_handle *handle,
1155                          struct snapshot_handle *snapshot,
1156                          unsigned int nr_to_read)
1157{
1158        unsigned int m;
1159        int ret = 0;
1160        int eof = 0;
1161        struct hib_bio_batch hb;
1162        ktime_t start;
1163        ktime_t stop;
1164        unsigned nr_pages;
1165        size_t off;
1166        unsigned i, thr, run_threads, nr_threads;
1167        unsigned ring = 0, pg = 0, ring_size = 0,
1168                 have = 0, want, need, asked = 0;
1169        unsigned long read_pages = 0;
1170        unsigned char **page = NULL;
1171        struct dec_data *data = NULL;
1172        struct crc_data *crc = NULL;
1173
1174        hib_init_batch(&hb);
1175
1176        /*
1177         * We'll limit the number of threads for decompression to limit memory
1178         * footprint.
1179         */
1180        nr_threads = num_online_cpus() - 1;
1181        nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1182
1183        page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
1184        if (!page) {
1185                pr_err("Failed to allocate LZO page\n");
1186                ret = -ENOMEM;
1187                goto out_clean;
1188        }
1189
1190        data = vmalloc(array_size(nr_threads, sizeof(*data)));
1191        if (!data) {
1192                pr_err("Failed to allocate LZO data\n");
1193                ret = -ENOMEM;
1194                goto out_clean;
1195        }
1196        for (thr = 0; thr < nr_threads; thr++)
1197                memset(&data[thr], 0, offsetof(struct dec_data, go));
1198
1199        crc = kmalloc(sizeof(*crc), GFP_KERNEL);
1200        if (!crc) {
1201                pr_err("Failed to allocate crc\n");
1202                ret = -ENOMEM;
1203                goto out_clean;
1204        }
1205        memset(crc, 0, offsetof(struct crc_data, go));
1206
1207        clean_pages_on_decompress = true;
1208
1209        /*
1210         * Start the decompression threads.
1211         */
1212        for (thr = 0; thr < nr_threads; thr++) {
1213                init_waitqueue_head(&data[thr].go);
1214                init_waitqueue_head(&data[thr].done);
1215
1216                data[thr].thr = kthread_run(lzo_decompress_threadfn,
1217                                            &data[thr],
1218                                            "image_decompress/%u", thr);
1219                if (IS_ERR(data[thr].thr)) {
1220                        data[thr].thr = NULL;
1221                        pr_err("Cannot start decompression threads\n");
1222                        ret = -ENOMEM;
1223                        goto out_clean;
1224                }
1225        }
1226
1227        /*
1228         * Start the CRC32 thread.
1229         */
1230        init_waitqueue_head(&crc->go);
1231        init_waitqueue_head(&crc->done);
1232
1233        handle->crc32 = 0;
1234        crc->crc32 = &handle->crc32;
1235        for (thr = 0; thr < nr_threads; thr++) {
1236                crc->unc[thr] = data[thr].unc;
1237                crc->unc_len[thr] = &data[thr].unc_len;
1238        }
1239
1240        crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1241        if (IS_ERR(crc->thr)) {
1242                crc->thr = NULL;
1243                pr_err("Cannot start CRC32 thread\n");
1244                ret = -ENOMEM;
1245                goto out_clean;
1246        }
1247
1248        /*
1249         * Set the number of pages for read buffering.
1250         * This is complete guesswork, because we'll only know the real
1251         * picture once prepare_image() is called, which is much later on
1252         * during the image load phase. We'll assume the worst case and
1253         * say that none of the image pages are from high memory.
1254         */
1255        if (low_free_pages() > snapshot_get_image_size())
1256                read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1257        read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1258
1259        for (i = 0; i < read_pages; i++) {
1260                page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1261                                                  GFP_NOIO | __GFP_HIGH :
1262                                                  GFP_NOIO | __GFP_NOWARN |
1263                                                  __GFP_NORETRY);
1264
1265                if (!page[i]) {
1266                        if (i < LZO_CMP_PAGES) {
1267                                ring_size = i;
1268                                pr_err("Failed to allocate LZO pages\n");
1269                                ret = -ENOMEM;
1270                                goto out_clean;
1271                        } else {
1272                                break;
1273                        }
1274                }
1275        }
1276        want = ring_size = i;
1277
1278        pr_info("Using %u thread(s) for decompression\n", nr_threads);
1279        pr_info("Loading and decompressing image data (%u pages)...\n",
1280                nr_to_read);
1281        m = nr_to_read / 10;
1282        if (!m)
1283                m = 1;
1284        nr_pages = 0;
1285        start = ktime_get();
1286
1287        ret = snapshot_write_next(snapshot);
1288        if (ret <= 0)
1289                goto out_finish;
1290
1291        for(;;) {
1292                for (i = 0; !eof && i < want; i++) {
1293                        ret = swap_read_page(handle, page[ring], &hb);
1294                        if (ret) {
1295                                /*
1296                                 * On real read error, finish. On end of data,
1297                                 * set EOF flag and just exit the read loop.
1298                                 */
1299                                if (handle->cur &&
1300                                    handle->cur->entries[handle->k]) {
1301                                        goto out_finish;
1302                                } else {
1303                                        eof = 1;
1304                                        break;
1305                                }
1306                        }
1307                        if (++ring >= ring_size)
1308                                ring = 0;
1309                }
1310                asked += i;
1311                want -= i;
1312
1313                /*
1314                 * We are out of data, wait for some more.
1315                 */
1316                if (!have) {
1317                        if (!asked)
1318                                break;
1319
1320                        ret = hib_wait_io(&hb);
1321                        if (ret)
1322                                goto out_finish;
1323                        have += asked;
1324                        asked = 0;
1325                        if (eof)
1326                                eof = 2;
1327                }
1328
1329                if (crc->run_threads) {
1330                        wait_event(crc->done, atomic_read(&crc->stop));
1331                        atomic_set(&crc->stop, 0);
1332                        crc->run_threads = 0;
1333                }
1334
1335                for (thr = 0; have && thr < nr_threads; thr++) {
1336                        data[thr].cmp_len = *(size_t *)page[pg];
1337                        if (unlikely(!data[thr].cmp_len ||
1338                                     data[thr].cmp_len >
1339                                     lzo1x_worst_compress(LZO_UNC_SIZE))) {
1340                                pr_err("Invalid LZO compressed length\n");
1341                                ret = -1;
1342                                goto out_finish;
1343                        }
1344
1345                        need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1346                                            PAGE_SIZE);
1347                        if (need > have) {
1348                                if (eof > 1) {
1349                                        ret = -1;
1350                                        goto out_finish;
1351                                }
1352                                break;
1353                        }
1354
1355                        for (off = 0;
1356                             off < LZO_HEADER + data[thr].cmp_len;
1357                             off += PAGE_SIZE) {
1358                                memcpy(data[thr].cmp + off,
1359                                       page[pg], PAGE_SIZE);
1360                                have--;
1361                                want++;
1362                                if (++pg >= ring_size)
1363                                        pg = 0;
1364                        }
1365
1366                        atomic_set(&data[thr].ready, 1);
1367                        wake_up(&data[thr].go);
1368                }
1369
1370                /*
1371                 * Wait for more data while we are decompressing.
1372                 */
1373                if (have < LZO_CMP_PAGES && asked) {
1374                        ret = hib_wait_io(&hb);
1375                        if (ret)
1376                                goto out_finish;
1377                        have += asked;
1378                        asked = 0;
1379                        if (eof)
1380                                eof = 2;
1381                }
1382
1383                for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1384                        wait_event(data[thr].done,
1385                                   atomic_read(&data[thr].stop));
1386                        atomic_set(&data[thr].stop, 0);
1387
1388                        ret = data[thr].ret;
1389
1390                        if (ret < 0) {
1391                                pr_err("LZO decompression failed\n");
1392                                goto out_finish;
1393                        }
1394
1395                        if (unlikely(!data[thr].unc_len ||
1396                                     data[thr].unc_len > LZO_UNC_SIZE ||
1397                                     data[thr].unc_len & (PAGE_SIZE - 1))) {
1398                                pr_err("Invalid LZO uncompressed length\n");
1399                                ret = -1;
1400                                goto out_finish;
1401                        }
1402
1403                        for (off = 0;
1404                             off < data[thr].unc_len; off += PAGE_SIZE) {
1405                                memcpy(data_of(*snapshot),
1406                                       data[thr].unc + off, PAGE_SIZE);
1407
1408                                if (!(nr_pages % m))
1409                                        pr_info("Image loading progress: %3d%%\n",
1410                                                nr_pages / m * 10);
1411                                nr_pages++;
1412
1413                                ret = snapshot_write_next(snapshot);
1414                                if (ret <= 0) {
1415                                        crc->run_threads = thr + 1;
1416                                        atomic_set(&crc->ready, 1);
1417                                        wake_up(&crc->go);
1418                                        goto out_finish;
1419                                }
1420                        }
1421                }
1422
1423                crc->run_threads = thr;
1424                atomic_set(&crc->ready, 1);
1425                wake_up(&crc->go);
1426        }
1427
1428out_finish:
1429        if (crc->run_threads) {
1430                wait_event(crc->done, atomic_read(&crc->stop));
1431                atomic_set(&crc->stop, 0);
1432        }
1433        stop = ktime_get();
1434        if (!ret) {
1435                pr_info("Image loading done\n");
1436                snapshot_write_finalize(snapshot);
1437                if (!snapshot_image_loaded(snapshot))
1438                        ret = -ENODATA;
1439                if (!ret) {
1440                        if (swsusp_header->flags & SF_CRC32_MODE) {
1441                                if(handle->crc32 != swsusp_header->crc32) {
1442                                        pr_err("Invalid image CRC32!\n");
1443                                        ret = -ENODATA;
1444                                }
1445                        }
1446                }
1447        }
1448        swsusp_show_speed(start, stop, nr_to_read, "Read");
1449out_clean:
1450        for (i = 0; i < ring_size; i++)
1451                free_page((unsigned long)page[i]);
1452        if (crc) {
1453                if (crc->thr)
1454                        kthread_stop(crc->thr);
1455                kfree(crc);
1456        }
1457        if (data) {
1458                for (thr = 0; thr < nr_threads; thr++)
1459                        if (data[thr].thr)
1460                                kthread_stop(data[thr].thr);
1461                vfree(data);
1462        }
1463        vfree(page);
1464
1465        return ret;
1466}
1467
1468/**
1469 *      swsusp_read - read the hibernation image.
1470 *      @flags_p: flags passed by the "frozen" kernel in the image header should
1471 *                be written into this memory location
1472 */
1473
1474int swsusp_read(unsigned int *flags_p)
1475{
1476        int error;
1477        struct swap_map_handle handle;
1478        struct snapshot_handle snapshot;
1479        struct swsusp_info *header;
1480
1481        memset(&snapshot, 0, sizeof(struct snapshot_handle));
1482        error = snapshot_write_next(&snapshot);
1483        if (error < (int)PAGE_SIZE)
1484                return error < 0 ? error : -EFAULT;
1485        header = (struct swsusp_info *)data_of(snapshot);
1486        error = get_swap_reader(&handle, flags_p);
1487        if (error)
1488                goto end;
1489        if (!error)
1490                error = swap_read_page(&handle, header, NULL);
1491        if (!error) {
1492                error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1493                        load_image(&handle, &snapshot, header->pages - 1) :
1494                        load_image_lzo(&handle, &snapshot, header->pages - 1);
1495        }
1496        swap_reader_finish(&handle);
1497end:
1498        if (!error)
1499                pr_debug("Image successfully loaded\n");
1500        else
1501                pr_debug("Error %d resuming\n", error);
1502        return error;
1503}
1504
1505/**
1506 *      swsusp_check - Check for swsusp signature in the resume device
1507 */
1508
1509int swsusp_check(void)
1510{
1511        int error;
1512
1513        hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1514                                            FMODE_READ, NULL);
1515        if (!IS_ERR(hib_resume_bdev)) {
1516                set_blocksize(hib_resume_bdev, PAGE_SIZE);
1517                clear_page(swsusp_header);
1518                error = hib_submit_io(REQ_OP_READ, 0,
1519                                        swsusp_resume_block,
1520                                        swsusp_header, NULL);
1521                if (error)
1522                        goto put;
1523
1524                if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1525                        memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1526                        /* Reset swap signature now */
1527                        error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1528                                                swsusp_resume_block,
1529                                                swsusp_header, NULL);
1530                } else {
1531                        error = -EINVAL;
1532                }
1533
1534put:
1535                if (error)
1536                        blkdev_put(hib_resume_bdev, FMODE_READ);
1537                else
1538                        pr_debug("Image signature found, resuming\n");
1539        } else {
1540                error = PTR_ERR(hib_resume_bdev);
1541        }
1542
1543        if (error)
1544                pr_debug("Image not found (code %d)\n", error);
1545
1546        return error;
1547}
1548
1549/**
1550 *      swsusp_close - close swap device.
1551 */
1552
1553void swsusp_close(fmode_t mode)
1554{
1555        if (IS_ERR(hib_resume_bdev)) {
1556                pr_debug("Image device not initialised\n");
1557                return;
1558        }
1559
1560        blkdev_put(hib_resume_bdev, mode);
1561}
1562
1563/**
1564 *      swsusp_unmark - Unmark swsusp signature in the resume device
1565 */
1566
1567#ifdef CONFIG_SUSPEND
1568int swsusp_unmark(void)
1569{
1570        int error;
1571
1572        hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
1573                      swsusp_header, NULL);
1574        if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1575                memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1576                error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1577                                        swsusp_resume_block,
1578                                        swsusp_header, NULL);
1579        } else {
1580                pr_err("Cannot find swsusp signature!\n");
1581                error = -ENODEV;
1582        }
1583
1584        /*
1585         * We just returned from suspend, we don't need the image any more.
1586         */
1587        free_all_swap_pages(root_swap);
1588
1589        return error;
1590}
1591#endif
1592
1593static int swsusp_header_init(void)
1594{
1595        swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1596        if (!swsusp_header)
1597                panic("Could not allocate memory for swsusp_header\n");
1598        return 0;
1599}
1600
1601core_initcall(swsusp_header_init);
1602