linux/kernel/power/snapshot.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/power/snapshot.c
   3 *
   4 * This file provides system snapshot/restore functionality for swsusp.
   5 *
   6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
   7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
   8 *
   9 * This file is released under the GPLv2.
  10 *
  11 */
  12
  13#define pr_fmt(fmt) "PM: " fmt
  14
  15#include <linux/version.h>
  16#include <linux/module.h>
  17#include <linux/mm.h>
  18#include <linux/suspend.h>
  19#include <linux/delay.h>
  20#include <linux/bitops.h>
  21#include <linux/spinlock.h>
  22#include <linux/kernel.h>
  23#include <linux/pm.h>
  24#include <linux/device.h>
  25#include <linux/init.h>
  26#include <linux/bootmem.h>
  27#include <linux/nmi.h>
  28#include <linux/syscalls.h>
  29#include <linux/console.h>
  30#include <linux/highmem.h>
  31#include <linux/list.h>
  32#include <linux/slab.h>
  33#include <linux/compiler.h>
  34#include <linux/ktime.h>
  35#include <linux/set_memory.h>
  36
  37#include <linux/uaccess.h>
  38#include <asm/mmu_context.h>
  39#include <asm/pgtable.h>
  40#include <asm/tlbflush.h>
  41#include <asm/io.h>
  42
  43#include "power.h"
  44
  45#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
  46static bool hibernate_restore_protection;
  47static bool hibernate_restore_protection_active;
  48
  49void enable_restore_image_protection(void)
  50{
  51        hibernate_restore_protection = true;
  52}
  53
  54static inline void hibernate_restore_protection_begin(void)
  55{
  56        hibernate_restore_protection_active = hibernate_restore_protection;
  57}
  58
  59static inline void hibernate_restore_protection_end(void)
  60{
  61        hibernate_restore_protection_active = false;
  62}
  63
  64static inline void hibernate_restore_protect_page(void *page_address)
  65{
  66        if (hibernate_restore_protection_active)
  67                set_memory_ro((unsigned long)page_address, 1);
  68}
  69
  70static inline void hibernate_restore_unprotect_page(void *page_address)
  71{
  72        if (hibernate_restore_protection_active)
  73                set_memory_rw((unsigned long)page_address, 1);
  74}
  75#else
  76static inline void hibernate_restore_protection_begin(void) {}
  77static inline void hibernate_restore_protection_end(void) {}
  78static inline void hibernate_restore_protect_page(void *page_address) {}
  79static inline void hibernate_restore_unprotect_page(void *page_address) {}
  80#endif /* CONFIG_STRICT_KERNEL_RWX  && CONFIG_ARCH_HAS_SET_MEMORY */
  81
  82static int swsusp_page_is_free(struct page *);
  83static void swsusp_set_page_forbidden(struct page *);
  84static void swsusp_unset_page_forbidden(struct page *);
  85
  86/*
  87 * Number of bytes to reserve for memory allocations made by device drivers
  88 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
  89 * cause image creation to fail (tunable via /sys/power/reserved_size).
  90 */
  91unsigned long reserved_size;
  92
  93void __init hibernate_reserved_size_init(void)
  94{
  95        reserved_size = SPARE_PAGES * PAGE_SIZE;
  96}
  97
  98/*
  99 * Preferred image size in bytes (tunable via /sys/power/image_size).
 100 * When it is set to N, swsusp will do its best to ensure the image
 101 * size will not exceed N bytes, but if that is impossible, it will
 102 * try to create the smallest image possible.
 103 */
 104unsigned long image_size;
 105
 106void __init hibernate_image_size_init(void)
 107{
 108        image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
 109}
 110
 111/*
 112 * List of PBEs needed for restoring the pages that were allocated before
 113 * the suspend and included in the suspend image, but have also been
 114 * allocated by the "resume" kernel, so their contents cannot be written
 115 * directly to their "original" page frames.
 116 */
 117struct pbe *restore_pblist;
 118
 119/* struct linked_page is used to build chains of pages */
 120
 121#define LINKED_PAGE_DATA_SIZE   (PAGE_SIZE - sizeof(void *))
 122
 123struct linked_page {
 124        struct linked_page *next;
 125        char data[LINKED_PAGE_DATA_SIZE];
 126} __packed;
 127
 128/*
 129 * List of "safe" pages (ie. pages that were not used by the image kernel
 130 * before hibernation) that may be used as temporary storage for image kernel
 131 * memory contents.
 132 */
 133static struct linked_page *safe_pages_list;
 134
 135/* Pointer to an auxiliary buffer (1 page) */
 136static void *buffer;
 137
 138#define PG_ANY          0
 139#define PG_SAFE         1
 140#define PG_UNSAFE_CLEAR 1
 141#define PG_UNSAFE_KEEP  0
 142
 143static unsigned int allocated_unsafe_pages;
 144
 145/**
 146 * get_image_page - Allocate a page for a hibernation image.
 147 * @gfp_mask: GFP mask for the allocation.
 148 * @safe_needed: Get pages that were not used before hibernation (restore only)
 149 *
 150 * During image restoration, for storing the PBE list and the image data, we can
 151 * only use memory pages that do not conflict with the pages used before
 152 * hibernation.  The "unsafe" pages have PageNosaveFree set and we count them
 153 * using allocated_unsafe_pages.
 154 *
 155 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
 156 * swsusp_free() can release it.
 157 */
 158static void *get_image_page(gfp_t gfp_mask, int safe_needed)
 159{
 160        void *res;
 161
 162        res = (void *)get_zeroed_page(gfp_mask);
 163        if (safe_needed)
 164                while (res && swsusp_page_is_free(virt_to_page(res))) {
 165                        /* The page is unsafe, mark it for swsusp_free() */
 166                        swsusp_set_page_forbidden(virt_to_page(res));
 167                        allocated_unsafe_pages++;
 168                        res = (void *)get_zeroed_page(gfp_mask);
 169                }
 170        if (res) {
 171                swsusp_set_page_forbidden(virt_to_page(res));
 172                swsusp_set_page_free(virt_to_page(res));
 173        }
 174        return res;
 175}
 176
 177static void *__get_safe_page(gfp_t gfp_mask)
 178{
 179        if (safe_pages_list) {
 180                void *ret = safe_pages_list;
 181
 182                safe_pages_list = safe_pages_list->next;
 183                memset(ret, 0, PAGE_SIZE);
 184                return ret;
 185        }
 186        return get_image_page(gfp_mask, PG_SAFE);
 187}
 188
 189unsigned long get_safe_page(gfp_t gfp_mask)
 190{
 191        return (unsigned long)__get_safe_page(gfp_mask);
 192}
 193
 194static struct page *alloc_image_page(gfp_t gfp_mask)
 195{
 196        struct page *page;
 197
 198        page = alloc_page(gfp_mask);
 199        if (page) {
 200                swsusp_set_page_forbidden(page);
 201                swsusp_set_page_free(page);
 202        }
 203        return page;
 204}
 205
 206static void recycle_safe_page(void *page_address)
 207{
 208        struct linked_page *lp = page_address;
 209
 210        lp->next = safe_pages_list;
 211        safe_pages_list = lp;
 212}
 213
 214/**
 215 * free_image_page - Free a page allocated for hibernation image.
 216 * @addr: Address of the page to free.
 217 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
 218 *
 219 * The page to free should have been allocated by get_image_page() (page flags
 220 * set by it are affected).
 221 */
 222static inline void free_image_page(void *addr, int clear_nosave_free)
 223{
 224        struct page *page;
 225
 226        BUG_ON(!virt_addr_valid(addr));
 227
 228        page = virt_to_page(addr);
 229
 230        swsusp_unset_page_forbidden(page);
 231        if (clear_nosave_free)
 232                swsusp_unset_page_free(page);
 233
 234        __free_page(page);
 235}
 236
 237static inline void free_list_of_pages(struct linked_page *list,
 238                                      int clear_page_nosave)
 239{
 240        while (list) {
 241                struct linked_page *lp = list->next;
 242
 243                free_image_page(list, clear_page_nosave);
 244                list = lp;
 245        }
 246}
 247
 248/*
 249 * struct chain_allocator is used for allocating small objects out of
 250 * a linked list of pages called 'the chain'.
 251 *
 252 * The chain grows each time when there is no room for a new object in
 253 * the current page.  The allocated objects cannot be freed individually.
 254 * It is only possible to free them all at once, by freeing the entire
 255 * chain.
 256 *
 257 * NOTE: The chain allocator may be inefficient if the allocated objects
 258 * are not much smaller than PAGE_SIZE.
 259 */
 260struct chain_allocator {
 261        struct linked_page *chain;      /* the chain */
 262        unsigned int used_space;        /* total size of objects allocated out
 263                                           of the current page */
 264        gfp_t gfp_mask;         /* mask for allocating pages */
 265        int safe_needed;        /* if set, only "safe" pages are allocated */
 266};
 267
 268static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
 269                       int safe_needed)
 270{
 271        ca->chain = NULL;
 272        ca->used_space = LINKED_PAGE_DATA_SIZE;
 273        ca->gfp_mask = gfp_mask;
 274        ca->safe_needed = safe_needed;
 275}
 276
 277static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
 278{
 279        void *ret;
 280
 281        if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
 282                struct linked_page *lp;
 283
 284                lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
 285                                        get_image_page(ca->gfp_mask, PG_ANY);
 286                if (!lp)
 287                        return NULL;
 288
 289                lp->next = ca->chain;
 290                ca->chain = lp;
 291                ca->used_space = 0;
 292        }
 293        ret = ca->chain->data + ca->used_space;
 294        ca->used_space += size;
 295        return ret;
 296}
 297
 298/**
 299 * Data types related to memory bitmaps.
 300 *
 301 * Memory bitmap is a structure consiting of many linked lists of
 302 * objects.  The main list's elements are of type struct zone_bitmap
 303 * and each of them corresonds to one zone.  For each zone bitmap
 304 * object there is a list of objects of type struct bm_block that
 305 * represent each blocks of bitmap in which information is stored.
 306 *
 307 * struct memory_bitmap contains a pointer to the main list of zone
 308 * bitmap objects, a struct bm_position used for browsing the bitmap,
 309 * and a pointer to the list of pages used for allocating all of the
 310 * zone bitmap objects and bitmap block objects.
 311 *
 312 * NOTE: It has to be possible to lay out the bitmap in memory
 313 * using only allocations of order 0.  Additionally, the bitmap is
 314 * designed to work with arbitrary number of zones (this is over the
 315 * top for now, but let's avoid making unnecessary assumptions ;-).
 316 *
 317 * struct zone_bitmap contains a pointer to a list of bitmap block
 318 * objects and a pointer to the bitmap block object that has been
 319 * most recently used for setting bits.  Additionally, it contains the
 320 * PFNs that correspond to the start and end of the represented zone.
 321 *
 322 * struct bm_block contains a pointer to the memory page in which
 323 * information is stored (in the form of a block of bitmap)
 324 * It also contains the pfns that correspond to the start and end of
 325 * the represented memory area.
 326 *
 327 * The memory bitmap is organized as a radix tree to guarantee fast random
 328 * access to the bits. There is one radix tree for each zone (as returned
 329 * from create_mem_extents).
 330 *
 331 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
 332 * two linked lists for the nodes of the tree, one for the inner nodes and
 333 * one for the leave nodes. The linked leave nodes are used for fast linear
 334 * access of the memory bitmap.
 335 *
 336 * The struct rtree_node represents one node of the radix tree.
 337 */
 338
 339#define BM_END_OF_MAP   (~0UL)
 340
 341#define BM_BITS_PER_BLOCK       (PAGE_SIZE * BITS_PER_BYTE)
 342#define BM_BLOCK_SHIFT          (PAGE_SHIFT + 3)
 343#define BM_BLOCK_MASK           ((1UL << BM_BLOCK_SHIFT) - 1)
 344
 345/*
 346 * struct rtree_node is a wrapper struct to link the nodes
 347 * of the rtree together for easy linear iteration over
 348 * bits and easy freeing
 349 */
 350struct rtree_node {
 351        struct list_head list;
 352        unsigned long *data;
 353};
 354
 355/*
 356 * struct mem_zone_bm_rtree represents a bitmap used for one
 357 * populated memory zone.
 358 */
 359struct mem_zone_bm_rtree {
 360        struct list_head list;          /* Link Zones together         */
 361        struct list_head nodes;         /* Radix Tree inner nodes      */
 362        struct list_head leaves;        /* Radix Tree leaves           */
 363        unsigned long start_pfn;        /* Zone start page frame       */
 364        unsigned long end_pfn;          /* Zone end page frame + 1     */
 365        struct rtree_node *rtree;       /* Radix Tree Root             */
 366        int levels;                     /* Number of Radix Tree Levels */
 367        unsigned int blocks;            /* Number of Bitmap Blocks     */
 368};
 369
 370/* strcut bm_position is used for browsing memory bitmaps */
 371
 372struct bm_position {
 373        struct mem_zone_bm_rtree *zone;
 374        struct rtree_node *node;
 375        unsigned long node_pfn;
 376        int node_bit;
 377};
 378
 379struct memory_bitmap {
 380        struct list_head zones;
 381        struct linked_page *p_list;     /* list of pages used to store zone
 382                                           bitmap objects and bitmap block
 383                                           objects */
 384        struct bm_position cur; /* most recently used bit position */
 385};
 386
 387/* Functions that operate on memory bitmaps */
 388
 389#define BM_ENTRIES_PER_LEVEL    (PAGE_SIZE / sizeof(unsigned long))
 390#if BITS_PER_LONG == 32
 391#define BM_RTREE_LEVEL_SHIFT    (PAGE_SHIFT - 2)
 392#else
 393#define BM_RTREE_LEVEL_SHIFT    (PAGE_SHIFT - 3)
 394#endif
 395#define BM_RTREE_LEVEL_MASK     ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
 396
 397/**
 398 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
 399 *
 400 * This function is used to allocate inner nodes as well as the
 401 * leave nodes of the radix tree. It also adds the node to the
 402 * corresponding linked list passed in by the *list parameter.
 403 */
 404static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
 405                                           struct chain_allocator *ca,
 406                                           struct list_head *list)
 407{
 408        struct rtree_node *node;
 409
 410        node = chain_alloc(ca, sizeof(struct rtree_node));
 411        if (!node)
 412                return NULL;
 413
 414        node->data = get_image_page(gfp_mask, safe_needed);
 415        if (!node->data)
 416                return NULL;
 417
 418        list_add_tail(&node->list, list);
 419
 420        return node;
 421}
 422
 423/**
 424 * add_rtree_block - Add a new leave node to the radix tree.
 425 *
 426 * The leave nodes need to be allocated in order to keep the leaves
 427 * linked list in order. This is guaranteed by the zone->blocks
 428 * counter.
 429 */
 430static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
 431                           int safe_needed, struct chain_allocator *ca)
 432{
 433        struct rtree_node *node, *block, **dst;
 434        unsigned int levels_needed, block_nr;
 435        int i;
 436
 437        block_nr = zone->blocks;
 438        levels_needed = 0;
 439
 440        /* How many levels do we need for this block nr? */
 441        while (block_nr) {
 442                levels_needed += 1;
 443                block_nr >>= BM_RTREE_LEVEL_SHIFT;
 444        }
 445
 446        /* Make sure the rtree has enough levels */
 447        for (i = zone->levels; i < levels_needed; i++) {
 448                node = alloc_rtree_node(gfp_mask, safe_needed, ca,
 449                                        &zone->nodes);
 450                if (!node)
 451                        return -ENOMEM;
 452
 453                node->data[0] = (unsigned long)zone->rtree;
 454                zone->rtree = node;
 455                zone->levels += 1;
 456        }
 457
 458        /* Allocate new block */
 459        block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
 460        if (!block)
 461                return -ENOMEM;
 462
 463        /* Now walk the rtree to insert the block */
 464        node = zone->rtree;
 465        dst = &zone->rtree;
 466        block_nr = zone->blocks;
 467        for (i = zone->levels; i > 0; i--) {
 468                int index;
 469
 470                if (!node) {
 471                        node = alloc_rtree_node(gfp_mask, safe_needed, ca,
 472                                                &zone->nodes);
 473                        if (!node)
 474                                return -ENOMEM;
 475                        *dst = node;
 476                }
 477
 478                index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
 479                index &= BM_RTREE_LEVEL_MASK;
 480                dst = (struct rtree_node **)&((*dst)->data[index]);
 481                node = *dst;
 482        }
 483
 484        zone->blocks += 1;
 485        *dst = block;
 486
 487        return 0;
 488}
 489
 490static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
 491                               int clear_nosave_free);
 492
 493/**
 494 * create_zone_bm_rtree - Create a radix tree for one zone.
 495 *
 496 * Allocated the mem_zone_bm_rtree structure and initializes it.
 497 * This function also allocated and builds the radix tree for the
 498 * zone.
 499 */
 500static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
 501                                                      int safe_needed,
 502                                                      struct chain_allocator *ca,
 503                                                      unsigned long start,
 504                                                      unsigned long end)
 505{
 506        struct mem_zone_bm_rtree *zone;
 507        unsigned int i, nr_blocks;
 508        unsigned long pages;
 509
 510        pages = end - start;
 511        zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
 512        if (!zone)
 513                return NULL;
 514
 515        INIT_LIST_HEAD(&zone->nodes);
 516        INIT_LIST_HEAD(&zone->leaves);
 517        zone->start_pfn = start;
 518        zone->end_pfn = end;
 519        nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
 520
 521        for (i = 0; i < nr_blocks; i++) {
 522                if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
 523                        free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
 524                        return NULL;
 525                }
 526        }
 527
 528        return zone;
 529}
 530
 531/**
 532 * free_zone_bm_rtree - Free the memory of the radix tree.
 533 *
 534 * Free all node pages of the radix tree. The mem_zone_bm_rtree
 535 * structure itself is not freed here nor are the rtree_node
 536 * structs.
 537 */
 538static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
 539                               int clear_nosave_free)
 540{
 541        struct rtree_node *node;
 542
 543        list_for_each_entry(node, &zone->nodes, list)
 544                free_image_page(node->data, clear_nosave_free);
 545
 546        list_for_each_entry(node, &zone->leaves, list)
 547                free_image_page(node->data, clear_nosave_free);
 548}
 549
 550static void memory_bm_position_reset(struct memory_bitmap *bm)
 551{
 552        bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
 553                                  list);
 554        bm->cur.node = list_entry(bm->cur.zone->leaves.next,
 555                                  struct rtree_node, list);
 556        bm->cur.node_pfn = 0;
 557        bm->cur.node_bit = 0;
 558}
 559
 560static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
 561
 562struct mem_extent {
 563        struct list_head hook;
 564        unsigned long start;
 565        unsigned long end;
 566};
 567
 568/**
 569 * free_mem_extents - Free a list of memory extents.
 570 * @list: List of extents to free.
 571 */
 572static void free_mem_extents(struct list_head *list)
 573{
 574        struct mem_extent *ext, *aux;
 575
 576        list_for_each_entry_safe(ext, aux, list, hook) {
 577                list_del(&ext->hook);
 578                kfree(ext);
 579        }
 580}
 581
 582/**
 583 * create_mem_extents - Create a list of memory extents.
 584 * @list: List to put the extents into.
 585 * @gfp_mask: Mask to use for memory allocations.
 586 *
 587 * The extents represent contiguous ranges of PFNs.
 588 */
 589static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
 590{
 591        struct zone *zone;
 592
 593        INIT_LIST_HEAD(list);
 594
 595        for_each_populated_zone(zone) {
 596                unsigned long zone_start, zone_end;
 597                struct mem_extent *ext, *cur, *aux;
 598
 599                zone_start = zone->zone_start_pfn;
 600                zone_end = zone_end_pfn(zone);
 601
 602                list_for_each_entry(ext, list, hook)
 603                        if (zone_start <= ext->end)
 604                                break;
 605
 606                if (&ext->hook == list || zone_end < ext->start) {
 607                        /* New extent is necessary */
 608                        struct mem_extent *new_ext;
 609
 610                        new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
 611                        if (!new_ext) {
 612                                free_mem_extents(list);
 613                                return -ENOMEM;
 614                        }
 615                        new_ext->start = zone_start;
 616                        new_ext->end = zone_end;
 617                        list_add_tail(&new_ext->hook, &ext->hook);
 618                        continue;
 619                }
 620
 621                /* Merge this zone's range of PFNs with the existing one */
 622                if (zone_start < ext->start)
 623                        ext->start = zone_start;
 624                if (zone_end > ext->end)
 625                        ext->end = zone_end;
 626
 627                /* More merging may be possible */
 628                cur = ext;
 629                list_for_each_entry_safe_continue(cur, aux, list, hook) {
 630                        if (zone_end < cur->start)
 631                                break;
 632                        if (zone_end < cur->end)
 633                                ext->end = cur->end;
 634                        list_del(&cur->hook);
 635                        kfree(cur);
 636                }
 637        }
 638
 639        return 0;
 640}
 641
 642/**
 643 * memory_bm_create - Allocate memory for a memory bitmap.
 644 */
 645static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
 646                            int safe_needed)
 647{
 648        struct chain_allocator ca;
 649        struct list_head mem_extents;
 650        struct mem_extent *ext;
 651        int error;
 652
 653        chain_init(&ca, gfp_mask, safe_needed);
 654        INIT_LIST_HEAD(&bm->zones);
 655
 656        error = create_mem_extents(&mem_extents, gfp_mask);
 657        if (error)
 658                return error;
 659
 660        list_for_each_entry(ext, &mem_extents, hook) {
 661                struct mem_zone_bm_rtree *zone;
 662
 663                zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
 664                                            ext->start, ext->end);
 665                if (!zone) {
 666                        error = -ENOMEM;
 667                        goto Error;
 668                }
 669                list_add_tail(&zone->list, &bm->zones);
 670        }
 671
 672        bm->p_list = ca.chain;
 673        memory_bm_position_reset(bm);
 674 Exit:
 675        free_mem_extents(&mem_extents);
 676        return error;
 677
 678 Error:
 679        bm->p_list = ca.chain;
 680        memory_bm_free(bm, PG_UNSAFE_CLEAR);
 681        goto Exit;
 682}
 683
 684/**
 685 * memory_bm_free - Free memory occupied by the memory bitmap.
 686 * @bm: Memory bitmap.
 687 */
 688static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
 689{
 690        struct mem_zone_bm_rtree *zone;
 691
 692        list_for_each_entry(zone, &bm->zones, list)
 693                free_zone_bm_rtree(zone, clear_nosave_free);
 694
 695        free_list_of_pages(bm->p_list, clear_nosave_free);
 696
 697        INIT_LIST_HEAD(&bm->zones);
 698}
 699
 700/**
 701 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
 702 *
 703 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
 704 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
 705 *
 706 * Walk the radix tree to find the page containing the bit that represents @pfn
 707 * and return the position of the bit in @addr and @bit_nr.
 708 */
 709static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
 710                              void **addr, unsigned int *bit_nr)
 711{
 712        struct mem_zone_bm_rtree *curr, *zone;
 713        struct rtree_node *node;
 714        int i, block_nr;
 715
 716        zone = bm->cur.zone;
 717
 718        if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
 719                goto zone_found;
 720
 721        zone = NULL;
 722
 723        /* Find the right zone */
 724        list_for_each_entry(curr, &bm->zones, list) {
 725                if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
 726                        zone = curr;
 727                        break;
 728                }
 729        }
 730
 731        if (!zone)
 732                return -EFAULT;
 733
 734zone_found:
 735        /*
 736         * We have found the zone. Now walk the radix tree to find the leaf node
 737         * for our PFN.
 738         */
 739        node = bm->cur.node;
 740        if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
 741                goto node_found;
 742
 743        node      = zone->rtree;
 744        block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
 745
 746        for (i = zone->levels; i > 0; i--) {
 747                int index;
 748
 749                index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
 750                index &= BM_RTREE_LEVEL_MASK;
 751                BUG_ON(node->data[index] == 0);
 752                node = (struct rtree_node *)node->data[index];
 753        }
 754
 755node_found:
 756        /* Update last position */
 757        bm->cur.zone = zone;
 758        bm->cur.node = node;
 759        bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
 760
 761        /* Set return values */
 762        *addr = node->data;
 763        *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
 764
 765        return 0;
 766}
 767
 768static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
 769{
 770        void *addr;
 771        unsigned int bit;
 772        int error;
 773
 774        error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 775        BUG_ON(error);
 776        set_bit(bit, addr);
 777}
 778
 779static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
 780{
 781        void *addr;
 782        unsigned int bit;
 783        int error;
 784
 785        error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 786        if (!error)
 787                set_bit(bit, addr);
 788
 789        return error;
 790}
 791
 792static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
 793{
 794        void *addr;
 795        unsigned int bit;
 796        int error;
 797
 798        error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 799        BUG_ON(error);
 800        clear_bit(bit, addr);
 801}
 802
 803static void memory_bm_clear_current(struct memory_bitmap *bm)
 804{
 805        int bit;
 806
 807        bit = max(bm->cur.node_bit - 1, 0);
 808        clear_bit(bit, bm->cur.node->data);
 809}
 810
 811static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
 812{
 813        void *addr;
 814        unsigned int bit;
 815        int error;
 816
 817        error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 818        BUG_ON(error);
 819        return test_bit(bit, addr);
 820}
 821
 822static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
 823{
 824        void *addr;
 825        unsigned int bit;
 826
 827        return !memory_bm_find_bit(bm, pfn, &addr, &bit);
 828}
 829
 830/*
 831 * rtree_next_node - Jump to the next leaf node.
 832 *
 833 * Set the position to the beginning of the next node in the
 834 * memory bitmap. This is either the next node in the current
 835 * zone's radix tree or the first node in the radix tree of the
 836 * next zone.
 837 *
 838 * Return true if there is a next node, false otherwise.
 839 */
 840static bool rtree_next_node(struct memory_bitmap *bm)
 841{
 842        if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
 843                bm->cur.node = list_entry(bm->cur.node->list.next,
 844                                          struct rtree_node, list);
 845                bm->cur.node_pfn += BM_BITS_PER_BLOCK;
 846                bm->cur.node_bit  = 0;
 847                touch_softlockup_watchdog();
 848                return true;
 849        }
 850
 851        /* No more nodes, goto next zone */
 852        if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
 853                bm->cur.zone = list_entry(bm->cur.zone->list.next,
 854                                  struct mem_zone_bm_rtree, list);
 855                bm->cur.node = list_entry(bm->cur.zone->leaves.next,
 856                                          struct rtree_node, list);
 857                bm->cur.node_pfn = 0;
 858                bm->cur.node_bit = 0;
 859                return true;
 860        }
 861
 862        /* No more zones */
 863        return false;
 864}
 865
 866/**
 867 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
 868 * @bm: Memory bitmap.
 869 *
 870 * Starting from the last returned position this function searches for the next
 871 * set bit in @bm and returns the PFN represented by it.  If no more bits are
 872 * set, BM_END_OF_MAP is returned.
 873 *
 874 * It is required to run memory_bm_position_reset() before the first call to
 875 * this function for the given memory bitmap.
 876 */
 877static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
 878{
 879        unsigned long bits, pfn, pages;
 880        int bit;
 881
 882        do {
 883                pages     = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
 884                bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
 885                bit       = find_next_bit(bm->cur.node->data, bits,
 886                                          bm->cur.node_bit);
 887                if (bit < bits) {
 888                        pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
 889                        bm->cur.node_bit = bit + 1;
 890                        return pfn;
 891                }
 892        } while (rtree_next_node(bm));
 893
 894        return BM_END_OF_MAP;
 895}
 896
 897/*
 898 * This structure represents a range of page frames the contents of which
 899 * should not be saved during hibernation.
 900 */
 901struct nosave_region {
 902        struct list_head list;
 903        unsigned long start_pfn;
 904        unsigned long end_pfn;
 905};
 906
 907static LIST_HEAD(nosave_regions);
 908
 909static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
 910{
 911        struct rtree_node *node;
 912
 913        list_for_each_entry(node, &zone->nodes, list)
 914                recycle_safe_page(node->data);
 915
 916        list_for_each_entry(node, &zone->leaves, list)
 917                recycle_safe_page(node->data);
 918}
 919
 920static void memory_bm_recycle(struct memory_bitmap *bm)
 921{
 922        struct mem_zone_bm_rtree *zone;
 923        struct linked_page *p_list;
 924
 925        list_for_each_entry(zone, &bm->zones, list)
 926                recycle_zone_bm_rtree(zone);
 927
 928        p_list = bm->p_list;
 929        while (p_list) {
 930                struct linked_page *lp = p_list;
 931
 932                p_list = lp->next;
 933                recycle_safe_page(lp);
 934        }
 935}
 936
 937/**
 938 * register_nosave_region - Register a region of unsaveable memory.
 939 *
 940 * Register a range of page frames the contents of which should not be saved
 941 * during hibernation (to be used in the early initialization code).
 942 */
 943void __init __register_nosave_region(unsigned long start_pfn,
 944                                     unsigned long end_pfn, int use_kmalloc)
 945{
 946        struct nosave_region *region;
 947
 948        if (start_pfn >= end_pfn)
 949                return;
 950
 951        if (!list_empty(&nosave_regions)) {
 952                /* Try to extend the previous region (they should be sorted) */
 953                region = list_entry(nosave_regions.prev,
 954                                        struct nosave_region, list);
 955                if (region->end_pfn == start_pfn) {
 956                        region->end_pfn = end_pfn;
 957                        goto Report;
 958                }
 959        }
 960        if (use_kmalloc) {
 961                /* During init, this shouldn't fail */
 962                region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
 963                BUG_ON(!region);
 964        } else {
 965                /* This allocation cannot fail */
 966                region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
 967        }
 968        region->start_pfn = start_pfn;
 969        region->end_pfn = end_pfn;
 970        list_add_tail(&region->list, &nosave_regions);
 971 Report:
 972        pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
 973                (unsigned long long) start_pfn << PAGE_SHIFT,
 974                ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
 975}
 976
 977/*
 978 * Set bits in this map correspond to the page frames the contents of which
 979 * should not be saved during the suspend.
 980 */
 981static struct memory_bitmap *forbidden_pages_map;
 982
 983/* Set bits in this map correspond to free page frames. */
 984static struct memory_bitmap *free_pages_map;
 985
 986/*
 987 * Each page frame allocated for creating the image is marked by setting the
 988 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
 989 */
 990
 991void swsusp_set_page_free(struct page *page)
 992{
 993        if (free_pages_map)
 994                memory_bm_set_bit(free_pages_map, page_to_pfn(page));
 995}
 996
 997static int swsusp_page_is_free(struct page *page)
 998{
 999        return free_pages_map ?
1000                memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1001}
1002
1003void swsusp_unset_page_free(struct page *page)
1004{
1005        if (free_pages_map)
1006                memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1007}
1008
1009static void swsusp_set_page_forbidden(struct page *page)
1010{
1011        if (forbidden_pages_map)
1012                memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1013}
1014
1015int swsusp_page_is_forbidden(struct page *page)
1016{
1017        return forbidden_pages_map ?
1018                memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1019}
1020
1021static void swsusp_unset_page_forbidden(struct page *page)
1022{
1023        if (forbidden_pages_map)
1024                memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1025}
1026
1027/**
1028 * mark_nosave_pages - Mark pages that should not be saved.
1029 * @bm: Memory bitmap.
1030 *
1031 * Set the bits in @bm that correspond to the page frames the contents of which
1032 * should not be saved.
1033 */
1034static void mark_nosave_pages(struct memory_bitmap *bm)
1035{
1036        struct nosave_region *region;
1037
1038        if (list_empty(&nosave_regions))
1039                return;
1040
1041        list_for_each_entry(region, &nosave_regions, list) {
1042                unsigned long pfn;
1043
1044                pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1045                         (unsigned long long) region->start_pfn << PAGE_SHIFT,
1046                         ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1047                                - 1);
1048
1049                for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1050                        if (pfn_valid(pfn)) {
1051                                /*
1052                                 * It is safe to ignore the result of
1053                                 * mem_bm_set_bit_check() here, since we won't
1054                                 * touch the PFNs for which the error is
1055                                 * returned anyway.
1056                                 */
1057                                mem_bm_set_bit_check(bm, pfn);
1058                        }
1059        }
1060}
1061
1062/**
1063 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1064 *
1065 * Create bitmaps needed for marking page frames that should not be saved and
1066 * free page frames.  The forbidden_pages_map and free_pages_map pointers are
1067 * only modified if everything goes well, because we don't want the bits to be
1068 * touched before both bitmaps are set up.
1069 */
1070int create_basic_memory_bitmaps(void)
1071{
1072        struct memory_bitmap *bm1, *bm2;
1073        int error = 0;
1074
1075        if (forbidden_pages_map && free_pages_map)
1076                return 0;
1077        else
1078                BUG_ON(forbidden_pages_map || free_pages_map);
1079
1080        bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1081        if (!bm1)
1082                return -ENOMEM;
1083
1084        error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1085        if (error)
1086                goto Free_first_object;
1087
1088        bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1089        if (!bm2)
1090                goto Free_first_bitmap;
1091
1092        error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1093        if (error)
1094                goto Free_second_object;
1095
1096        forbidden_pages_map = bm1;
1097        free_pages_map = bm2;
1098        mark_nosave_pages(forbidden_pages_map);
1099
1100        pr_debug("Basic memory bitmaps created\n");
1101
1102        return 0;
1103
1104 Free_second_object:
1105        kfree(bm2);
1106 Free_first_bitmap:
1107        memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1108 Free_first_object:
1109        kfree(bm1);
1110        return -ENOMEM;
1111}
1112
1113/**
1114 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1115 *
1116 * Free memory bitmaps allocated by create_basic_memory_bitmaps().  The
1117 * auxiliary pointers are necessary so that the bitmaps themselves are not
1118 * referred to while they are being freed.
1119 */
1120void free_basic_memory_bitmaps(void)
1121{
1122        struct memory_bitmap *bm1, *bm2;
1123
1124        if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1125                return;
1126
1127        bm1 = forbidden_pages_map;
1128        bm2 = free_pages_map;
1129        forbidden_pages_map = NULL;
1130        free_pages_map = NULL;
1131        memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1132        kfree(bm1);
1133        memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1134        kfree(bm2);
1135
1136        pr_debug("Basic memory bitmaps freed\n");
1137}
1138
1139void clear_free_pages(void)
1140{
1141#ifdef CONFIG_PAGE_POISONING_ZERO
1142        struct memory_bitmap *bm = free_pages_map;
1143        unsigned long pfn;
1144
1145        if (WARN_ON(!(free_pages_map)))
1146                return;
1147
1148        memory_bm_position_reset(bm);
1149        pfn = memory_bm_next_pfn(bm);
1150        while (pfn != BM_END_OF_MAP) {
1151                if (pfn_valid(pfn))
1152                        clear_highpage(pfn_to_page(pfn));
1153
1154                pfn = memory_bm_next_pfn(bm);
1155        }
1156        memory_bm_position_reset(bm);
1157        pr_info("free pages cleared after restore\n");
1158#endif /* PAGE_POISONING_ZERO */
1159}
1160
1161/**
1162 * snapshot_additional_pages - Estimate the number of extra pages needed.
1163 * @zone: Memory zone to carry out the computation for.
1164 *
1165 * Estimate the number of additional pages needed for setting up a hibernation
1166 * image data structures for @zone (usually, the returned value is greater than
1167 * the exact number).
1168 */
1169unsigned int snapshot_additional_pages(struct zone *zone)
1170{
1171        unsigned int rtree, nodes;
1172
1173        rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1174        rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1175                              LINKED_PAGE_DATA_SIZE);
1176        while (nodes > 1) {
1177                nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1178                rtree += nodes;
1179        }
1180
1181        return 2 * rtree;
1182}
1183
1184#ifdef CONFIG_HIGHMEM
1185/**
1186 * count_free_highmem_pages - Compute the total number of free highmem pages.
1187 *
1188 * The returned number is system-wide.
1189 */
1190static unsigned int count_free_highmem_pages(void)
1191{
1192        struct zone *zone;
1193        unsigned int cnt = 0;
1194
1195        for_each_populated_zone(zone)
1196                if (is_highmem(zone))
1197                        cnt += zone_page_state(zone, NR_FREE_PAGES);
1198
1199        return cnt;
1200}
1201
1202/**
1203 * saveable_highmem_page - Check if a highmem page is saveable.
1204 *
1205 * Determine whether a highmem page should be included in a hibernation image.
1206 *
1207 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1208 * and it isn't part of a free chunk of pages.
1209 */
1210static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1211{
1212        struct page *page;
1213
1214        if (!pfn_valid(pfn))
1215                return NULL;
1216
1217        page = pfn_to_page(pfn);
1218        if (page_zone(page) != zone)
1219                return NULL;
1220
1221        BUG_ON(!PageHighMem(page));
1222
1223        if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||
1224            PageReserved(page))
1225                return NULL;
1226
1227        if (page_is_guard(page))
1228                return NULL;
1229
1230        return page;
1231}
1232
1233/**
1234 * count_highmem_pages - Compute the total number of saveable highmem pages.
1235 */
1236static unsigned int count_highmem_pages(void)
1237{
1238        struct zone *zone;
1239        unsigned int n = 0;
1240
1241        for_each_populated_zone(zone) {
1242                unsigned long pfn, max_zone_pfn;
1243
1244                if (!is_highmem(zone))
1245                        continue;
1246
1247                mark_free_pages(zone);
1248                max_zone_pfn = zone_end_pfn(zone);
1249                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1250                        if (saveable_highmem_page(zone, pfn))
1251                                n++;
1252        }
1253        return n;
1254}
1255#else
1256static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1257{
1258        return NULL;
1259}
1260#endif /* CONFIG_HIGHMEM */
1261
1262/**
1263 * saveable_page - Check if the given page is saveable.
1264 *
1265 * Determine whether a non-highmem page should be included in a hibernation
1266 * image.
1267 *
1268 * We should save the page if it isn't Nosave, and is not in the range
1269 * of pages statically defined as 'unsaveable', and it isn't part of
1270 * a free chunk of pages.
1271 */
1272static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1273{
1274        struct page *page;
1275
1276        if (!pfn_valid(pfn))
1277                return NULL;
1278
1279        page = pfn_to_page(pfn);
1280        if (page_zone(page) != zone)
1281                return NULL;
1282
1283        BUG_ON(PageHighMem(page));
1284
1285        if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1286                return NULL;
1287
1288        if (PageReserved(page)
1289            && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1290                return NULL;
1291
1292        if (page_is_guard(page))
1293                return NULL;
1294
1295        return page;
1296}
1297
1298/**
1299 * count_data_pages - Compute the total number of saveable non-highmem pages.
1300 */
1301static unsigned int count_data_pages(void)
1302{
1303        struct zone *zone;
1304        unsigned long pfn, max_zone_pfn;
1305        unsigned int n = 0;
1306
1307        for_each_populated_zone(zone) {
1308                if (is_highmem(zone))
1309                        continue;
1310
1311                mark_free_pages(zone);
1312                max_zone_pfn = zone_end_pfn(zone);
1313                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1314                        if (saveable_page(zone, pfn))
1315                                n++;
1316        }
1317        return n;
1318}
1319
1320/*
1321 * This is needed, because copy_page and memcpy are not usable for copying
1322 * task structs.
1323 */
1324static inline void do_copy_page(long *dst, long *src)
1325{
1326        int n;
1327
1328        for (n = PAGE_SIZE / sizeof(long); n; n--)
1329                *dst++ = *src++;
1330}
1331
1332/**
1333 * safe_copy_page - Copy a page in a safe way.
1334 *
1335 * Check if the page we are going to copy is marked as present in the kernel
1336 * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set
1337 * and in that case kernel_page_present() always returns 'true').
1338 */
1339static void safe_copy_page(void *dst, struct page *s_page)
1340{
1341        if (kernel_page_present(s_page)) {
1342                do_copy_page(dst, page_address(s_page));
1343        } else {
1344                kernel_map_pages(s_page, 1, 1);
1345                do_copy_page(dst, page_address(s_page));
1346                kernel_map_pages(s_page, 1, 0);
1347        }
1348}
1349
1350#ifdef CONFIG_HIGHMEM
1351static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1352{
1353        return is_highmem(zone) ?
1354                saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1355}
1356
1357static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1358{
1359        struct page *s_page, *d_page;
1360        void *src, *dst;
1361
1362        s_page = pfn_to_page(src_pfn);
1363        d_page = pfn_to_page(dst_pfn);
1364        if (PageHighMem(s_page)) {
1365                src = kmap_atomic(s_page);
1366                dst = kmap_atomic(d_page);
1367                do_copy_page(dst, src);
1368                kunmap_atomic(dst);
1369                kunmap_atomic(src);
1370        } else {
1371                if (PageHighMem(d_page)) {
1372                        /*
1373                         * The page pointed to by src may contain some kernel
1374                         * data modified by kmap_atomic()
1375                         */
1376                        safe_copy_page(buffer, s_page);
1377                        dst = kmap_atomic(d_page);
1378                        copy_page(dst, buffer);
1379                        kunmap_atomic(dst);
1380                } else {
1381                        safe_copy_page(page_address(d_page), s_page);
1382                }
1383        }
1384}
1385#else
1386#define page_is_saveable(zone, pfn)     saveable_page(zone, pfn)
1387
1388static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1389{
1390        safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1391                                pfn_to_page(src_pfn));
1392}
1393#endif /* CONFIG_HIGHMEM */
1394
1395static void copy_data_pages(struct memory_bitmap *copy_bm,
1396                            struct memory_bitmap *orig_bm)
1397{
1398        struct zone *zone;
1399        unsigned long pfn;
1400
1401        for_each_populated_zone(zone) {
1402                unsigned long max_zone_pfn;
1403
1404                mark_free_pages(zone);
1405                max_zone_pfn = zone_end_pfn(zone);
1406                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1407                        if (page_is_saveable(zone, pfn))
1408                                memory_bm_set_bit(orig_bm, pfn);
1409        }
1410        memory_bm_position_reset(orig_bm);
1411        memory_bm_position_reset(copy_bm);
1412        for(;;) {
1413                pfn = memory_bm_next_pfn(orig_bm);
1414                if (unlikely(pfn == BM_END_OF_MAP))
1415                        break;
1416                copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1417        }
1418}
1419
1420/* Total number of image pages */
1421static unsigned int nr_copy_pages;
1422/* Number of pages needed for saving the original pfns of the image pages */
1423static unsigned int nr_meta_pages;
1424/*
1425 * Numbers of normal and highmem page frames allocated for hibernation image
1426 * before suspending devices.
1427 */
1428static unsigned int alloc_normal, alloc_highmem;
1429/*
1430 * Memory bitmap used for marking saveable pages (during hibernation) or
1431 * hibernation image pages (during restore)
1432 */
1433static struct memory_bitmap orig_bm;
1434/*
1435 * Memory bitmap used during hibernation for marking allocated page frames that
1436 * will contain copies of saveable pages.  During restore it is initially used
1437 * for marking hibernation image pages, but then the set bits from it are
1438 * duplicated in @orig_bm and it is released.  On highmem systems it is next
1439 * used for marking "safe" highmem pages, but it has to be reinitialized for
1440 * this purpose.
1441 */
1442static struct memory_bitmap copy_bm;
1443
1444/**
1445 * swsusp_free - Free pages allocated for hibernation image.
1446 *
1447 * Image pages are alocated before snapshot creation, so they need to be
1448 * released after resume.
1449 */
1450void swsusp_free(void)
1451{
1452        unsigned long fb_pfn, fr_pfn;
1453
1454        if (!forbidden_pages_map || !free_pages_map)
1455                goto out;
1456
1457        memory_bm_position_reset(forbidden_pages_map);
1458        memory_bm_position_reset(free_pages_map);
1459
1460loop:
1461        fr_pfn = memory_bm_next_pfn(free_pages_map);
1462        fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1463
1464        /*
1465         * Find the next bit set in both bitmaps. This is guaranteed to
1466         * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1467         */
1468        do {
1469                if (fb_pfn < fr_pfn)
1470                        fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1471                if (fr_pfn < fb_pfn)
1472                        fr_pfn = memory_bm_next_pfn(free_pages_map);
1473        } while (fb_pfn != fr_pfn);
1474
1475        if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1476                struct page *page = pfn_to_page(fr_pfn);
1477
1478                memory_bm_clear_current(forbidden_pages_map);
1479                memory_bm_clear_current(free_pages_map);
1480                hibernate_restore_unprotect_page(page_address(page));
1481                __free_page(page);
1482                goto loop;
1483        }
1484
1485out:
1486        nr_copy_pages = 0;
1487        nr_meta_pages = 0;
1488        restore_pblist = NULL;
1489        buffer = NULL;
1490        alloc_normal = 0;
1491        alloc_highmem = 0;
1492        hibernate_restore_protection_end();
1493}
1494
1495/* Helper functions used for the shrinking of memory. */
1496
1497#define GFP_IMAGE       (GFP_KERNEL | __GFP_NOWARN)
1498
1499/**
1500 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1501 * @nr_pages: Number of page frames to allocate.
1502 * @mask: GFP flags to use for the allocation.
1503 *
1504 * Return value: Number of page frames actually allocated
1505 */
1506static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1507{
1508        unsigned long nr_alloc = 0;
1509
1510        while (nr_pages > 0) {
1511                struct page *page;
1512
1513                page = alloc_image_page(mask);
1514                if (!page)
1515                        break;
1516                memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1517                if (PageHighMem(page))
1518                        alloc_highmem++;
1519                else
1520                        alloc_normal++;
1521                nr_pages--;
1522                nr_alloc++;
1523        }
1524
1525        return nr_alloc;
1526}
1527
1528static unsigned long preallocate_image_memory(unsigned long nr_pages,
1529                                              unsigned long avail_normal)
1530{
1531        unsigned long alloc;
1532
1533        if (avail_normal <= alloc_normal)
1534                return 0;
1535
1536        alloc = avail_normal - alloc_normal;
1537        if (nr_pages < alloc)
1538                alloc = nr_pages;
1539
1540        return preallocate_image_pages(alloc, GFP_IMAGE);
1541}
1542
1543#ifdef CONFIG_HIGHMEM
1544static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1545{
1546        return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1547}
1548
1549/**
1550 *  __fraction - Compute (an approximation of) x * (multiplier / base).
1551 */
1552static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1553{
1554        x *= multiplier;
1555        do_div(x, base);
1556        return (unsigned long)x;
1557}
1558
1559static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1560                                                  unsigned long highmem,
1561                                                  unsigned long total)
1562{
1563        unsigned long alloc = __fraction(nr_pages, highmem, total);
1564
1565        return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1566}
1567#else /* CONFIG_HIGHMEM */
1568static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1569{
1570        return 0;
1571}
1572
1573static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1574                                                         unsigned long highmem,
1575                                                         unsigned long total)
1576{
1577        return 0;
1578}
1579#endif /* CONFIG_HIGHMEM */
1580
1581/**
1582 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1583 */
1584static unsigned long free_unnecessary_pages(void)
1585{
1586        unsigned long save, to_free_normal, to_free_highmem, free;
1587
1588        save = count_data_pages();
1589        if (alloc_normal >= save) {
1590                to_free_normal = alloc_normal - save;
1591                save = 0;
1592        } else {
1593                to_free_normal = 0;
1594                save -= alloc_normal;
1595        }
1596        save += count_highmem_pages();
1597        if (alloc_highmem >= save) {
1598                to_free_highmem = alloc_highmem - save;
1599        } else {
1600                to_free_highmem = 0;
1601                save -= alloc_highmem;
1602                if (to_free_normal > save)
1603                        to_free_normal -= save;
1604                else
1605                        to_free_normal = 0;
1606        }
1607        free = to_free_normal + to_free_highmem;
1608
1609        memory_bm_position_reset(&copy_bm);
1610
1611        while (to_free_normal > 0 || to_free_highmem > 0) {
1612                unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1613                struct page *page = pfn_to_page(pfn);
1614
1615                if (PageHighMem(page)) {
1616                        if (!to_free_highmem)
1617                                continue;
1618                        to_free_highmem--;
1619                        alloc_highmem--;
1620                } else {
1621                        if (!to_free_normal)
1622                                continue;
1623                        to_free_normal--;
1624                        alloc_normal--;
1625                }
1626                memory_bm_clear_bit(&copy_bm, pfn);
1627                swsusp_unset_page_forbidden(page);
1628                swsusp_unset_page_free(page);
1629                __free_page(page);
1630        }
1631
1632        return free;
1633}
1634
1635/**
1636 * minimum_image_size - Estimate the minimum acceptable size of an image.
1637 * @saveable: Number of saveable pages in the system.
1638 *
1639 * We want to avoid attempting to free too much memory too hard, so estimate the
1640 * minimum acceptable size of a hibernation image to use as the lower limit for
1641 * preallocating memory.
1642 *
1643 * We assume that the minimum image size should be proportional to
1644 *
1645 * [number of saveable pages] - [number of pages that can be freed in theory]
1646 *
1647 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1648 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
1649 */
1650static unsigned long minimum_image_size(unsigned long saveable)
1651{
1652        unsigned long size;
1653
1654        size = global_node_page_state(NR_SLAB_RECLAIMABLE)
1655                + global_node_page_state(NR_ACTIVE_ANON)
1656                + global_node_page_state(NR_INACTIVE_ANON)
1657                + global_node_page_state(NR_ACTIVE_FILE)
1658                + global_node_page_state(NR_INACTIVE_FILE);
1659
1660        return saveable <= size ? 0 : saveable - size;
1661}
1662
1663/**
1664 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1665 *
1666 * To create a hibernation image it is necessary to make a copy of every page
1667 * frame in use.  We also need a number of page frames to be free during
1668 * hibernation for allocations made while saving the image and for device
1669 * drivers, in case they need to allocate memory from their hibernation
1670 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1671 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1672 * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1673 * total number of available page frames and allocate at least
1674 *
1675 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1676 *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1677 *
1678 * of them, which corresponds to the maximum size of a hibernation image.
1679 *
1680 * If image_size is set below the number following from the above formula,
1681 * the preallocation of memory is continued until the total number of saveable
1682 * pages in the system is below the requested image size or the minimum
1683 * acceptable image size returned by minimum_image_size(), whichever is greater.
1684 */
1685int hibernate_preallocate_memory(void)
1686{
1687        struct zone *zone;
1688        unsigned long saveable, size, max_size, count, highmem, pages = 0;
1689        unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1690        ktime_t start, stop;
1691        int error;
1692
1693        pr_info("Preallocating image memory... ");
1694        start = ktime_get();
1695
1696        error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1697        if (error)
1698                goto err_out;
1699
1700        error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1701        if (error)
1702                goto err_out;
1703
1704        alloc_normal = 0;
1705        alloc_highmem = 0;
1706
1707        /* Count the number of saveable data pages. */
1708        save_highmem = count_highmem_pages();
1709        saveable = count_data_pages();
1710
1711        /*
1712         * Compute the total number of page frames we can use (count) and the
1713         * number of pages needed for image metadata (size).
1714         */
1715        count = saveable;
1716        saveable += save_highmem;
1717        highmem = save_highmem;
1718        size = 0;
1719        for_each_populated_zone(zone) {
1720                size += snapshot_additional_pages(zone);
1721                if (is_highmem(zone))
1722                        highmem += zone_page_state(zone, NR_FREE_PAGES);
1723                else
1724                        count += zone_page_state(zone, NR_FREE_PAGES);
1725        }
1726        avail_normal = count;
1727        count += highmem;
1728        count -= totalreserve_pages;
1729
1730        /* Add number of pages required for page keys (s390 only). */
1731        size += page_key_additional_pages(saveable);
1732
1733        /* Compute the maximum number of saveable pages to leave in memory. */
1734        max_size = (count - (size + PAGES_FOR_IO)) / 2
1735                        - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1736        /* Compute the desired number of image pages specified by image_size. */
1737        size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1738        if (size > max_size)
1739                size = max_size;
1740        /*
1741         * If the desired number of image pages is at least as large as the
1742         * current number of saveable pages in memory, allocate page frames for
1743         * the image and we're done.
1744         */
1745        if (size >= saveable) {
1746                pages = preallocate_image_highmem(save_highmem);
1747                pages += preallocate_image_memory(saveable - pages, avail_normal);
1748                goto out;
1749        }
1750
1751        /* Estimate the minimum size of the image. */
1752        pages = minimum_image_size(saveable);
1753        /*
1754         * To avoid excessive pressure on the normal zone, leave room in it to
1755         * accommodate an image of the minimum size (unless it's already too
1756         * small, in which case don't preallocate pages from it at all).
1757         */
1758        if (avail_normal > pages)
1759                avail_normal -= pages;
1760        else
1761                avail_normal = 0;
1762        if (size < pages)
1763                size = min_t(unsigned long, pages, max_size);
1764
1765        /*
1766         * Let the memory management subsystem know that we're going to need a
1767         * large number of page frames to allocate and make it free some memory.
1768         * NOTE: If this is not done, performance will be hurt badly in some
1769         * test cases.
1770         */
1771        shrink_all_memory(saveable - size);
1772
1773        /*
1774         * The number of saveable pages in memory was too high, so apply some
1775         * pressure to decrease it.  First, make room for the largest possible
1776         * image and fail if that doesn't work.  Next, try to decrease the size
1777         * of the image as much as indicated by 'size' using allocations from
1778         * highmem and non-highmem zones separately.
1779         */
1780        pages_highmem = preallocate_image_highmem(highmem / 2);
1781        alloc = count - max_size;
1782        if (alloc > pages_highmem)
1783                alloc -= pages_highmem;
1784        else
1785                alloc = 0;
1786        pages = preallocate_image_memory(alloc, avail_normal);
1787        if (pages < alloc) {
1788                /* We have exhausted non-highmem pages, try highmem. */
1789                alloc -= pages;
1790                pages += pages_highmem;
1791                pages_highmem = preallocate_image_highmem(alloc);
1792                if (pages_highmem < alloc)
1793                        goto err_out;
1794                pages += pages_highmem;
1795                /*
1796                 * size is the desired number of saveable pages to leave in
1797                 * memory, so try to preallocate (all memory - size) pages.
1798                 */
1799                alloc = (count - pages) - size;
1800                pages += preallocate_image_highmem(alloc);
1801        } else {
1802                /*
1803                 * There are approximately max_size saveable pages at this point
1804                 * and we want to reduce this number down to size.
1805                 */
1806                alloc = max_size - size;
1807                size = preallocate_highmem_fraction(alloc, highmem, count);
1808                pages_highmem += size;
1809                alloc -= size;
1810                size = preallocate_image_memory(alloc, avail_normal);
1811                pages_highmem += preallocate_image_highmem(alloc - size);
1812                pages += pages_highmem + size;
1813        }
1814
1815        /*
1816         * We only need as many page frames for the image as there are saveable
1817         * pages in memory, but we have allocated more.  Release the excessive
1818         * ones now.
1819         */
1820        pages -= free_unnecessary_pages();
1821
1822 out:
1823        stop = ktime_get();
1824        pr_cont("done (allocated %lu pages)\n", pages);
1825        swsusp_show_speed(start, stop, pages, "Allocated");
1826
1827        return 0;
1828
1829 err_out:
1830        pr_cont("\n");
1831        swsusp_free();
1832        return -ENOMEM;
1833}
1834
1835#ifdef CONFIG_HIGHMEM
1836/**
1837 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1838 *
1839 * Compute the number of non-highmem pages that will be necessary for creating
1840 * copies of highmem pages.
1841 */
1842static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1843{
1844        unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1845
1846        if (free_highmem >= nr_highmem)
1847                nr_highmem = 0;
1848        else
1849                nr_highmem -= free_highmem;
1850
1851        return nr_highmem;
1852}
1853#else
1854static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1855#endif /* CONFIG_HIGHMEM */
1856
1857/**
1858 * enough_free_mem - Check if there is enough free memory for the image.
1859 */
1860static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1861{
1862        struct zone *zone;
1863        unsigned int free = alloc_normal;
1864
1865        for_each_populated_zone(zone)
1866                if (!is_highmem(zone))
1867                        free += zone_page_state(zone, NR_FREE_PAGES);
1868
1869        nr_pages += count_pages_for_highmem(nr_highmem);
1870        pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1871                 nr_pages, PAGES_FOR_IO, free);
1872
1873        return free > nr_pages + PAGES_FOR_IO;
1874}
1875
1876#ifdef CONFIG_HIGHMEM
1877/**
1878 * get_highmem_buffer - Allocate a buffer for highmem pages.
1879 *
1880 * If there are some highmem pages in the hibernation image, we may need a
1881 * buffer to copy them and/or load their data.
1882 */
1883static inline int get_highmem_buffer(int safe_needed)
1884{
1885        buffer = get_image_page(GFP_ATOMIC, safe_needed);
1886        return buffer ? 0 : -ENOMEM;
1887}
1888
1889/**
1890 * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1891 *
1892 * Try to allocate as many pages as needed, but if the number of free highmem
1893 * pages is less than that, allocate them all.
1894 */
1895static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1896                                               unsigned int nr_highmem)
1897{
1898        unsigned int to_alloc = count_free_highmem_pages();
1899
1900        if (to_alloc > nr_highmem)
1901                to_alloc = nr_highmem;
1902
1903        nr_highmem -= to_alloc;
1904        while (to_alloc-- > 0) {
1905                struct page *page;
1906
1907                page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1908                memory_bm_set_bit(bm, page_to_pfn(page));
1909        }
1910        return nr_highmem;
1911}
1912#else
1913static inline int get_highmem_buffer(int safe_needed) { return 0; }
1914
1915static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1916                                               unsigned int n) { return 0; }
1917#endif /* CONFIG_HIGHMEM */
1918
1919/**
1920 * swsusp_alloc - Allocate memory for hibernation image.
1921 *
1922 * We first try to allocate as many highmem pages as there are
1923 * saveable highmem pages in the system.  If that fails, we allocate
1924 * non-highmem pages for the copies of the remaining highmem ones.
1925 *
1926 * In this approach it is likely that the copies of highmem pages will
1927 * also be located in the high memory, because of the way in which
1928 * copy_data_pages() works.
1929 */
1930static int swsusp_alloc(struct memory_bitmap *copy_bm,
1931                        unsigned int nr_pages, unsigned int nr_highmem)
1932{
1933        if (nr_highmem > 0) {
1934                if (get_highmem_buffer(PG_ANY))
1935                        goto err_out;
1936                if (nr_highmem > alloc_highmem) {
1937                        nr_highmem -= alloc_highmem;
1938                        nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1939                }
1940        }
1941        if (nr_pages > alloc_normal) {
1942                nr_pages -= alloc_normal;
1943                while (nr_pages-- > 0) {
1944                        struct page *page;
1945
1946                        page = alloc_image_page(GFP_ATOMIC);
1947                        if (!page)
1948                                goto err_out;
1949                        memory_bm_set_bit(copy_bm, page_to_pfn(page));
1950                }
1951        }
1952
1953        return 0;
1954
1955 err_out:
1956        swsusp_free();
1957        return -ENOMEM;
1958}
1959
1960asmlinkage __visible int swsusp_save(void)
1961{
1962        unsigned int nr_pages, nr_highmem;
1963
1964        pr_info("Creating hibernation image:\n");
1965
1966        drain_local_pages(NULL);
1967        nr_pages = count_data_pages();
1968        nr_highmem = count_highmem_pages();
1969        pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
1970
1971        if (!enough_free_mem(nr_pages, nr_highmem)) {
1972                pr_err("Not enough free memory\n");
1973                return -ENOMEM;
1974        }
1975
1976        if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
1977                pr_err("Memory allocation failed\n");
1978                return -ENOMEM;
1979        }
1980
1981        /*
1982         * During allocating of suspend pagedir, new cold pages may appear.
1983         * Kill them.
1984         */
1985        drain_local_pages(NULL);
1986        copy_data_pages(&copy_bm, &orig_bm);
1987
1988        /*
1989         * End of critical section. From now on, we can write to memory,
1990         * but we should not touch disk. This specially means we must _not_
1991         * touch swap space! Except we must write out our image of course.
1992         */
1993
1994        nr_pages += nr_highmem;
1995        nr_copy_pages = nr_pages;
1996        nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1997
1998        pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
1999
2000        return 0;
2001}
2002
2003#ifndef CONFIG_ARCH_HIBERNATION_HEADER
2004static int init_header_complete(struct swsusp_info *info)
2005{
2006        memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2007        info->version_code = LINUX_VERSION_CODE;
2008        return 0;
2009}
2010
2011static char *check_image_kernel(struct swsusp_info *info)
2012{
2013        if (info->version_code != LINUX_VERSION_CODE)
2014                return "kernel version";
2015        if (strcmp(info->uts.sysname,init_utsname()->sysname))
2016                return "system type";
2017        if (strcmp(info->uts.release,init_utsname()->release))
2018                return "kernel release";
2019        if (strcmp(info->uts.version,init_utsname()->version))
2020                return "version";
2021        if (strcmp(info->uts.machine,init_utsname()->machine))
2022                return "machine";
2023        return NULL;
2024}
2025#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2026
2027unsigned long snapshot_get_image_size(void)
2028{
2029        return nr_copy_pages + nr_meta_pages + 1;
2030}
2031
2032static int init_header(struct swsusp_info *info)
2033{
2034        memset(info, 0, sizeof(struct swsusp_info));
2035        info->num_physpages = get_num_physpages();
2036        info->image_pages = nr_copy_pages;
2037        info->pages = snapshot_get_image_size();
2038        info->size = info->pages;
2039        info->size <<= PAGE_SHIFT;
2040        return init_header_complete(info);
2041}
2042
2043/**
2044 * pack_pfns - Prepare PFNs for saving.
2045 * @bm: Memory bitmap.
2046 * @buf: Memory buffer to store the PFNs in.
2047 *
2048 * PFNs corresponding to set bits in @bm are stored in the area of memory
2049 * pointed to by @buf (1 page at a time).
2050 */
2051static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2052{
2053        int j;
2054
2055        for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2056                buf[j] = memory_bm_next_pfn(bm);
2057                if (unlikely(buf[j] == BM_END_OF_MAP))
2058                        break;
2059                /* Save page key for data page (s390 only). */
2060                page_key_read(buf + j);
2061        }
2062}
2063
2064/**
2065 * snapshot_read_next - Get the address to read the next image page from.
2066 * @handle: Snapshot handle to be used for the reading.
2067 *
2068 * On the first call, @handle should point to a zeroed snapshot_handle
2069 * structure.  The structure gets populated then and a pointer to it should be
2070 * passed to this function every next time.
2071 *
2072 * On success, the function returns a positive number.  Then, the caller
2073 * is allowed to read up to the returned number of bytes from the memory
2074 * location computed by the data_of() macro.
2075 *
2076 * The function returns 0 to indicate the end of the data stream condition,
2077 * and negative numbers are returned on errors.  If that happens, the structure
2078 * pointed to by @handle is not updated and should not be used any more.
2079 */
2080int snapshot_read_next(struct snapshot_handle *handle)
2081{
2082        if (handle->cur > nr_meta_pages + nr_copy_pages)
2083                return 0;
2084
2085        if (!buffer) {
2086                /* This makes the buffer be freed by swsusp_free() */
2087                buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2088                if (!buffer)
2089                        return -ENOMEM;
2090        }
2091        if (!handle->cur) {
2092                int error;
2093
2094                error = init_header((struct swsusp_info *)buffer);
2095                if (error)
2096                        return error;
2097                handle->buffer = buffer;
2098                memory_bm_position_reset(&orig_bm);
2099                memory_bm_position_reset(&copy_bm);
2100        } else if (handle->cur <= nr_meta_pages) {
2101                clear_page(buffer);
2102                pack_pfns(buffer, &orig_bm);
2103        } else {
2104                struct page *page;
2105
2106                page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2107                if (PageHighMem(page)) {
2108                        /*
2109                         * Highmem pages are copied to the buffer,
2110                         * because we can't return with a kmapped
2111                         * highmem page (we may not be called again).
2112                         */
2113                        void *kaddr;
2114
2115                        kaddr = kmap_atomic(page);
2116                        copy_page(buffer, kaddr);
2117                        kunmap_atomic(kaddr);
2118                        handle->buffer = buffer;
2119                } else {
2120                        handle->buffer = page_address(page);
2121                }
2122        }
2123        handle->cur++;
2124        return PAGE_SIZE;
2125}
2126
2127static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2128                                    struct memory_bitmap *src)
2129{
2130        unsigned long pfn;
2131
2132        memory_bm_position_reset(src);
2133        pfn = memory_bm_next_pfn(src);
2134        while (pfn != BM_END_OF_MAP) {
2135                memory_bm_set_bit(dst, pfn);
2136                pfn = memory_bm_next_pfn(src);
2137        }
2138}
2139
2140/**
2141 * mark_unsafe_pages - Mark pages that were used before hibernation.
2142 *
2143 * Mark the pages that cannot be used for storing the image during restoration,
2144 * because they conflict with the pages that had been used before hibernation.
2145 */
2146static void mark_unsafe_pages(struct memory_bitmap *bm)
2147{
2148        unsigned long pfn;
2149
2150        /* Clear the "free"/"unsafe" bit for all PFNs */
2151        memory_bm_position_reset(free_pages_map);
2152        pfn = memory_bm_next_pfn(free_pages_map);
2153        while (pfn != BM_END_OF_MAP) {
2154                memory_bm_clear_current(free_pages_map);
2155                pfn = memory_bm_next_pfn(free_pages_map);
2156        }
2157
2158        /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2159        duplicate_memory_bitmap(free_pages_map, bm);
2160
2161        allocated_unsafe_pages = 0;
2162}
2163
2164static int check_header(struct swsusp_info *info)
2165{
2166        char *reason;
2167
2168        reason = check_image_kernel(info);
2169        if (!reason && info->num_physpages != get_num_physpages())
2170                reason = "memory size";
2171        if (reason) {
2172                pr_err("Image mismatch: %s\n", reason);
2173                return -EPERM;
2174        }
2175        return 0;
2176}
2177
2178/**
2179 * load header - Check the image header and copy the data from it.
2180 */
2181static int load_header(struct swsusp_info *info)
2182{
2183        int error;
2184
2185        restore_pblist = NULL;
2186        error = check_header(info);
2187        if (!error) {
2188                nr_copy_pages = info->image_pages;
2189                nr_meta_pages = info->pages - info->image_pages - 1;
2190        }
2191        return error;
2192}
2193
2194/**
2195 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2196 * @bm: Memory bitmap.
2197 * @buf: Area of memory containing the PFNs.
2198 *
2199 * For each element of the array pointed to by @buf (1 page at a time), set the
2200 * corresponding bit in @bm.
2201 */
2202static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2203{
2204        int j;
2205
2206        for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2207                if (unlikely(buf[j] == BM_END_OF_MAP))
2208                        break;
2209
2210                /* Extract and buffer page key for data page (s390 only). */
2211                page_key_memorize(buf + j);
2212
2213                if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2214                        memory_bm_set_bit(bm, buf[j]);
2215                else
2216                        return -EFAULT;
2217        }
2218
2219        return 0;
2220}
2221
2222#ifdef CONFIG_HIGHMEM
2223/*
2224 * struct highmem_pbe is used for creating the list of highmem pages that
2225 * should be restored atomically during the resume from disk, because the page
2226 * frames they have occupied before the suspend are in use.
2227 */
2228struct highmem_pbe {
2229        struct page *copy_page; /* data is here now */
2230        struct page *orig_page; /* data was here before the suspend */
2231        struct highmem_pbe *next;
2232};
2233
2234/*
2235 * List of highmem PBEs needed for restoring the highmem pages that were
2236 * allocated before the suspend and included in the suspend image, but have
2237 * also been allocated by the "resume" kernel, so their contents cannot be
2238 * written directly to their "original" page frames.
2239 */
2240static struct highmem_pbe *highmem_pblist;
2241
2242/**
2243 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2244 * @bm: Memory bitmap.
2245 *
2246 * The bits in @bm that correspond to image pages are assumed to be set.
2247 */
2248static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2249{
2250        unsigned long pfn;
2251        unsigned int cnt = 0;
2252
2253        memory_bm_position_reset(bm);
2254        pfn = memory_bm_next_pfn(bm);
2255        while (pfn != BM_END_OF_MAP) {
2256                if (PageHighMem(pfn_to_page(pfn)))
2257                        cnt++;
2258
2259                pfn = memory_bm_next_pfn(bm);
2260        }
2261        return cnt;
2262}
2263
2264static unsigned int safe_highmem_pages;
2265
2266static struct memory_bitmap *safe_highmem_bm;
2267
2268/**
2269 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2270 * @bm: Pointer to an uninitialized memory bitmap structure.
2271 * @nr_highmem_p: Pointer to the number of highmem image pages.
2272 *
2273 * Try to allocate as many highmem pages as there are highmem image pages
2274 * (@nr_highmem_p points to the variable containing the number of highmem image
2275 * pages).  The pages that are "safe" (ie. will not be overwritten when the
2276 * hibernation image is restored entirely) have the corresponding bits set in
2277 * @bm (it must be unitialized).
2278 *
2279 * NOTE: This function should not be called if there are no highmem image pages.
2280 */
2281static int prepare_highmem_image(struct memory_bitmap *bm,
2282                                 unsigned int *nr_highmem_p)
2283{
2284        unsigned int to_alloc;
2285
2286        if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2287                return -ENOMEM;
2288
2289        if (get_highmem_buffer(PG_SAFE))
2290                return -ENOMEM;
2291
2292        to_alloc = count_free_highmem_pages();
2293        if (to_alloc > *nr_highmem_p)
2294                to_alloc = *nr_highmem_p;
2295        else
2296                *nr_highmem_p = to_alloc;
2297
2298        safe_highmem_pages = 0;
2299        while (to_alloc-- > 0) {
2300                struct page *page;
2301
2302                page = alloc_page(__GFP_HIGHMEM);
2303                if (!swsusp_page_is_free(page)) {
2304                        /* The page is "safe", set its bit the bitmap */
2305                        memory_bm_set_bit(bm, page_to_pfn(page));
2306                        safe_highmem_pages++;
2307                }
2308                /* Mark the page as allocated */
2309                swsusp_set_page_forbidden(page);
2310                swsusp_set_page_free(page);
2311        }
2312        memory_bm_position_reset(bm);
2313        safe_highmem_bm = bm;
2314        return 0;
2315}
2316
2317static struct page *last_highmem_page;
2318
2319/**
2320 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2321 *
2322 * For a given highmem image page get a buffer that suspend_write_next() should
2323 * return to its caller to write to.
2324 *
2325 * If the page is to be saved to its "original" page frame or a copy of
2326 * the page is to be made in the highmem, @buffer is returned.  Otherwise,
2327 * the copy of the page is to be made in normal memory, so the address of
2328 * the copy is returned.
2329 *
2330 * If @buffer is returned, the caller of suspend_write_next() will write
2331 * the page's contents to @buffer, so they will have to be copied to the
2332 * right location on the next call to suspend_write_next() and it is done
2333 * with the help of copy_last_highmem_page().  For this purpose, if
2334 * @buffer is returned, @last_highmem_page is set to the page to which
2335 * the data will have to be copied from @buffer.
2336 */
2337static void *get_highmem_page_buffer(struct page *page,
2338                                     struct chain_allocator *ca)
2339{
2340        struct highmem_pbe *pbe;
2341        void *kaddr;
2342
2343        if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2344                /*
2345                 * We have allocated the "original" page frame and we can
2346                 * use it directly to store the loaded page.
2347                 */
2348                last_highmem_page = page;
2349                return buffer;
2350        }
2351        /*
2352         * The "original" page frame has not been allocated and we have to
2353         * use a "safe" page frame to store the loaded page.
2354         */
2355        pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2356        if (!pbe) {
2357                swsusp_free();
2358                return ERR_PTR(-ENOMEM);
2359        }
2360        pbe->orig_page = page;
2361        if (safe_highmem_pages > 0) {
2362                struct page *tmp;
2363
2364                /* Copy of the page will be stored in high memory */
2365                kaddr = buffer;
2366                tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2367                safe_highmem_pages--;
2368                last_highmem_page = tmp;
2369                pbe->copy_page = tmp;
2370        } else {
2371                /* Copy of the page will be stored in normal memory */
2372                kaddr = safe_pages_list;
2373                safe_pages_list = safe_pages_list->next;
2374                pbe->copy_page = virt_to_page(kaddr);
2375        }
2376        pbe->next = highmem_pblist;
2377        highmem_pblist = pbe;
2378        return kaddr;
2379}
2380
2381/**
2382 * copy_last_highmem_page - Copy most the most recent highmem image page.
2383 *
2384 * Copy the contents of a highmem image from @buffer, where the caller of
2385 * snapshot_write_next() has stored them, to the right location represented by
2386 * @last_highmem_page .
2387 */
2388static void copy_last_highmem_page(void)
2389{
2390        if (last_highmem_page) {
2391                void *dst;
2392
2393                dst = kmap_atomic(last_highmem_page);
2394                copy_page(dst, buffer);
2395                kunmap_atomic(dst);
2396                last_highmem_page = NULL;
2397        }
2398}
2399
2400static inline int last_highmem_page_copied(void)
2401{
2402        return !last_highmem_page;
2403}
2404
2405static inline void free_highmem_data(void)
2406{
2407        if (safe_highmem_bm)
2408                memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2409
2410        if (buffer)
2411                free_image_page(buffer, PG_UNSAFE_CLEAR);
2412}
2413#else
2414static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2415
2416static inline int prepare_highmem_image(struct memory_bitmap *bm,
2417                                        unsigned int *nr_highmem_p) { return 0; }
2418
2419static inline void *get_highmem_page_buffer(struct page *page,
2420                                            struct chain_allocator *ca)
2421{
2422        return ERR_PTR(-EINVAL);
2423}
2424
2425static inline void copy_last_highmem_page(void) {}
2426static inline int last_highmem_page_copied(void) { return 1; }
2427static inline void free_highmem_data(void) {}
2428#endif /* CONFIG_HIGHMEM */
2429
2430#define PBES_PER_LINKED_PAGE    (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2431
2432/**
2433 * prepare_image - Make room for loading hibernation image.
2434 * @new_bm: Unitialized memory bitmap structure.
2435 * @bm: Memory bitmap with unsafe pages marked.
2436 *
2437 * Use @bm to mark the pages that will be overwritten in the process of
2438 * restoring the system memory state from the suspend image ("unsafe" pages)
2439 * and allocate memory for the image.
2440 *
2441 * The idea is to allocate a new memory bitmap first and then allocate
2442 * as many pages as needed for image data, but without specifying what those
2443 * pages will be used for just yet.  Instead, we mark them all as allocated and
2444 * create a lists of "safe" pages to be used later.  On systems with high
2445 * memory a list of "safe" highmem pages is created too.
2446 */
2447static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2448{
2449        unsigned int nr_pages, nr_highmem;
2450        struct linked_page *lp;
2451        int error;
2452
2453        /* If there is no highmem, the buffer will not be necessary */
2454        free_image_page(buffer, PG_UNSAFE_CLEAR);
2455        buffer = NULL;
2456
2457        nr_highmem = count_highmem_image_pages(bm);
2458        mark_unsafe_pages(bm);
2459
2460        error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2461        if (error)
2462                goto Free;
2463
2464        duplicate_memory_bitmap(new_bm, bm);
2465        memory_bm_free(bm, PG_UNSAFE_KEEP);
2466        if (nr_highmem > 0) {
2467                error = prepare_highmem_image(bm, &nr_highmem);
2468                if (error)
2469                        goto Free;
2470        }
2471        /*
2472         * Reserve some safe pages for potential later use.
2473         *
2474         * NOTE: This way we make sure there will be enough safe pages for the
2475         * chain_alloc() in get_buffer().  It is a bit wasteful, but
2476         * nr_copy_pages cannot be greater than 50% of the memory anyway.
2477         *
2478         * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2479         */
2480        nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2481        nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2482        while (nr_pages > 0) {
2483                lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2484                if (!lp) {
2485                        error = -ENOMEM;
2486                        goto Free;
2487                }
2488                lp->next = safe_pages_list;
2489                safe_pages_list = lp;
2490                nr_pages--;
2491        }
2492        /* Preallocate memory for the image */
2493        nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2494        while (nr_pages > 0) {
2495                lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2496                if (!lp) {
2497                        error = -ENOMEM;
2498                        goto Free;
2499                }
2500                if (!swsusp_page_is_free(virt_to_page(lp))) {
2501                        /* The page is "safe", add it to the list */
2502                        lp->next = safe_pages_list;
2503                        safe_pages_list = lp;
2504                }
2505                /* Mark the page as allocated */
2506                swsusp_set_page_forbidden(virt_to_page(lp));
2507                swsusp_set_page_free(virt_to_page(lp));
2508                nr_pages--;
2509        }
2510        return 0;
2511
2512 Free:
2513        swsusp_free();
2514        return error;
2515}
2516
2517/**
2518 * get_buffer - Get the address to store the next image data page.
2519 *
2520 * Get the address that snapshot_write_next() should return to its caller to
2521 * write to.
2522 */
2523static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2524{
2525        struct pbe *pbe;
2526        struct page *page;
2527        unsigned long pfn = memory_bm_next_pfn(bm);
2528
2529        if (pfn == BM_END_OF_MAP)
2530                return ERR_PTR(-EFAULT);
2531
2532        page = pfn_to_page(pfn);
2533        if (PageHighMem(page))
2534                return get_highmem_page_buffer(page, ca);
2535
2536        if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2537                /*
2538                 * We have allocated the "original" page frame and we can
2539                 * use it directly to store the loaded page.
2540                 */
2541                return page_address(page);
2542
2543        /*
2544         * The "original" page frame has not been allocated and we have to
2545         * use a "safe" page frame to store the loaded page.
2546         */
2547        pbe = chain_alloc(ca, sizeof(struct pbe));
2548        if (!pbe) {
2549                swsusp_free();
2550                return ERR_PTR(-ENOMEM);
2551        }
2552        pbe->orig_address = page_address(page);
2553        pbe->address = safe_pages_list;
2554        safe_pages_list = safe_pages_list->next;
2555        pbe->next = restore_pblist;
2556        restore_pblist = pbe;
2557        return pbe->address;
2558}
2559
2560/**
2561 * snapshot_write_next - Get the address to store the next image page.
2562 * @handle: Snapshot handle structure to guide the writing.
2563 *
2564 * On the first call, @handle should point to a zeroed snapshot_handle
2565 * structure.  The structure gets populated then and a pointer to it should be
2566 * passed to this function every next time.
2567 *
2568 * On success, the function returns a positive number.  Then, the caller
2569 * is allowed to write up to the returned number of bytes to the memory
2570 * location computed by the data_of() macro.
2571 *
2572 * The function returns 0 to indicate the "end of file" condition.  Negative
2573 * numbers are returned on errors, in which cases the structure pointed to by
2574 * @handle is not updated and should not be used any more.
2575 */
2576int snapshot_write_next(struct snapshot_handle *handle)
2577{
2578        static struct chain_allocator ca;
2579        int error = 0;
2580
2581        /* Check if we have already loaded the entire image */
2582        if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2583                return 0;
2584
2585        handle->sync_read = 1;
2586
2587        if (!handle->cur) {
2588                if (!buffer)
2589                        /* This makes the buffer be freed by swsusp_free() */
2590                        buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2591
2592                if (!buffer)
2593                        return -ENOMEM;
2594
2595                handle->buffer = buffer;
2596        } else if (handle->cur == 1) {
2597                error = load_header(buffer);
2598                if (error)
2599                        return error;
2600
2601                safe_pages_list = NULL;
2602
2603                error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2604                if (error)
2605                        return error;
2606
2607                /* Allocate buffer for page keys. */
2608                error = page_key_alloc(nr_copy_pages);
2609                if (error)
2610                        return error;
2611
2612                hibernate_restore_protection_begin();
2613        } else if (handle->cur <= nr_meta_pages + 1) {
2614                error = unpack_orig_pfns(buffer, &copy_bm);
2615                if (error)
2616                        return error;
2617
2618                if (handle->cur == nr_meta_pages + 1) {
2619                        error = prepare_image(&orig_bm, &copy_bm);
2620                        if (error)
2621                                return error;
2622
2623                        chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2624                        memory_bm_position_reset(&orig_bm);
2625                        restore_pblist = NULL;
2626                        handle->buffer = get_buffer(&orig_bm, &ca);
2627                        handle->sync_read = 0;
2628                        if (IS_ERR(handle->buffer))
2629                                return PTR_ERR(handle->buffer);
2630                }
2631        } else {
2632                copy_last_highmem_page();
2633                /* Restore page key for data page (s390 only). */
2634                page_key_write(handle->buffer);
2635                hibernate_restore_protect_page(handle->buffer);
2636                handle->buffer = get_buffer(&orig_bm, &ca);
2637                if (IS_ERR(handle->buffer))
2638                        return PTR_ERR(handle->buffer);
2639                if (handle->buffer != buffer)
2640                        handle->sync_read = 0;
2641        }
2642        handle->cur++;
2643        return PAGE_SIZE;
2644}
2645
2646/**
2647 * snapshot_write_finalize - Complete the loading of a hibernation image.
2648 *
2649 * Must be called after the last call to snapshot_write_next() in case the last
2650 * page in the image happens to be a highmem page and its contents should be
2651 * stored in highmem.  Additionally, it recycles bitmap memory that's not
2652 * necessary any more.
2653 */
2654void snapshot_write_finalize(struct snapshot_handle *handle)
2655{
2656        copy_last_highmem_page();
2657        /* Restore page key for data page (s390 only). */
2658        page_key_write(handle->buffer);
2659        page_key_free();
2660        hibernate_restore_protect_page(handle->buffer);
2661        /* Do that only if we have loaded the image entirely */
2662        if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2663                memory_bm_recycle(&orig_bm);
2664                free_highmem_data();
2665        }
2666}
2667
2668int snapshot_image_loaded(struct snapshot_handle *handle)
2669{
2670        return !(!nr_copy_pages || !last_highmem_page_copied() ||
2671                        handle->cur <= nr_meta_pages + nr_copy_pages);
2672}
2673
2674#ifdef CONFIG_HIGHMEM
2675/* Assumes that @buf is ready and points to a "safe" page */
2676static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2677                                       void *buf)
2678{
2679        void *kaddr1, *kaddr2;
2680
2681        kaddr1 = kmap_atomic(p1);
2682        kaddr2 = kmap_atomic(p2);
2683        copy_page(buf, kaddr1);
2684        copy_page(kaddr1, kaddr2);
2685        copy_page(kaddr2, buf);
2686        kunmap_atomic(kaddr2);
2687        kunmap_atomic(kaddr1);
2688}
2689
2690/**
2691 * restore_highmem - Put highmem image pages into their original locations.
2692 *
2693 * For each highmem page that was in use before hibernation and is included in
2694 * the image, and also has been allocated by the "restore" kernel, swap its
2695 * current contents with the previous (ie. "before hibernation") ones.
2696 *
2697 * If the restore eventually fails, we can call this function once again and
2698 * restore the highmem state as seen by the restore kernel.
2699 */
2700int restore_highmem(void)
2701{
2702        struct highmem_pbe *pbe = highmem_pblist;
2703        void *buf;
2704
2705        if (!pbe)
2706                return 0;
2707
2708        buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2709        if (!buf)
2710                return -ENOMEM;
2711
2712        while (pbe) {
2713                swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2714                pbe = pbe->next;
2715        }
2716        free_image_page(buf, PG_UNSAFE_CLEAR);
2717        return 0;
2718}
2719#endif /* CONFIG_HIGHMEM */
2720