linux/kernel/power/snapshot.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/power/snapshot.c
   3 *
   4 * This file provides system snapshot/restore functionality for swsusp.
   5 *
   6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
   7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
   8 *
   9 * This file is released under the GPLv2.
  10 *
  11 */
  12
  13#include <linux/version.h>
  14#include <linux/module.h>
  15#include <linux/mm.h>
  16#include <linux/suspend.h>
  17#include <linux/delay.h>
  18#include <linux/bitops.h>
  19#include <linux/spinlock.h>
  20#include <linux/kernel.h>
  21#include <linux/pm.h>
  22#include <linux/device.h>
  23#include <linux/init.h>
  24#include <linux/bootmem.h>
  25#include <linux/syscalls.h>
  26#include <linux/console.h>
  27#include <linux/highmem.h>
  28#include <linux/list.h>
  29#include <linux/slab.h>
  30#include <linux/compiler.h>
  31#include <linux/ktime.h>
  32
  33#include <linux/uaccess.h>
  34#include <asm/mmu_context.h>
  35#include <asm/pgtable.h>
  36#include <asm/tlbflush.h>
  37#include <asm/io.h>
  38
  39#include "power.h"
  40
  41#ifdef CONFIG_DEBUG_RODATA
  42static bool hibernate_restore_protection;
  43static bool hibernate_restore_protection_active;
  44
  45void enable_restore_image_protection(void)
  46{
  47        hibernate_restore_protection = true;
  48}
  49
  50static inline void hibernate_restore_protection_begin(void)
  51{
  52        hibernate_restore_protection_active = hibernate_restore_protection;
  53}
  54
  55static inline void hibernate_restore_protection_end(void)
  56{
  57        hibernate_restore_protection_active = false;
  58}
  59
  60static inline void hibernate_restore_protect_page(void *page_address)
  61{
  62        if (hibernate_restore_protection_active)
  63                set_memory_ro((unsigned long)page_address, 1);
  64}
  65
  66static inline void hibernate_restore_unprotect_page(void *page_address)
  67{
  68        if (hibernate_restore_protection_active)
  69                set_memory_rw((unsigned long)page_address, 1);
  70}
  71#else
  72static inline void hibernate_restore_protection_begin(void) {}
  73static inline void hibernate_restore_protection_end(void) {}
  74static inline void hibernate_restore_protect_page(void *page_address) {}
  75static inline void hibernate_restore_unprotect_page(void *page_address) {}
  76#endif /* CONFIG_DEBUG_RODATA */
  77
  78static int swsusp_page_is_free(struct page *);
  79static void swsusp_set_page_forbidden(struct page *);
  80static void swsusp_unset_page_forbidden(struct page *);
  81
  82/*
  83 * Number of bytes to reserve for memory allocations made by device drivers
  84 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
  85 * cause image creation to fail (tunable via /sys/power/reserved_size).
  86 */
  87unsigned long reserved_size;
  88
  89void __init hibernate_reserved_size_init(void)
  90{
  91        reserved_size = SPARE_PAGES * PAGE_SIZE;
  92}
  93
  94/*
  95 * Preferred image size in bytes (tunable via /sys/power/image_size).
  96 * When it is set to N, swsusp will do its best to ensure the image
  97 * size will not exceed N bytes, but if that is impossible, it will
  98 * try to create the smallest image possible.
  99 */
 100unsigned long image_size;
 101
 102void __init hibernate_image_size_init(void)
 103{
 104        image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
 105}
 106
 107/*
 108 * List of PBEs needed for restoring the pages that were allocated before
 109 * the suspend and included in the suspend image, but have also been
 110 * allocated by the "resume" kernel, so their contents cannot be written
 111 * directly to their "original" page frames.
 112 */
 113struct pbe *restore_pblist;
 114
 115/* struct linked_page is used to build chains of pages */
 116
 117#define LINKED_PAGE_DATA_SIZE   (PAGE_SIZE - sizeof(void *))
 118
 119struct linked_page {
 120        struct linked_page *next;
 121        char data[LINKED_PAGE_DATA_SIZE];
 122} __packed;
 123
 124/*
 125 * List of "safe" pages (ie. pages that were not used by the image kernel
 126 * before hibernation) that may be used as temporary storage for image kernel
 127 * memory contents.
 128 */
 129static struct linked_page *safe_pages_list;
 130
 131/* Pointer to an auxiliary buffer (1 page) */
 132static void *buffer;
 133
 134#define PG_ANY          0
 135#define PG_SAFE         1
 136#define PG_UNSAFE_CLEAR 1
 137#define PG_UNSAFE_KEEP  0
 138
 139static unsigned int allocated_unsafe_pages;
 140
 141/**
 142 * get_image_page - Allocate a page for a hibernation image.
 143 * @gfp_mask: GFP mask for the allocation.
 144 * @safe_needed: Get pages that were not used before hibernation (restore only)
 145 *
 146 * During image restoration, for storing the PBE list and the image data, we can
 147 * only use memory pages that do not conflict with the pages used before
 148 * hibernation.  The "unsafe" pages have PageNosaveFree set and we count them
 149 * using allocated_unsafe_pages.
 150 *
 151 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
 152 * swsusp_free() can release it.
 153 */
 154static void *get_image_page(gfp_t gfp_mask, int safe_needed)
 155{
 156        void *res;
 157
 158        res = (void *)get_zeroed_page(gfp_mask);
 159        if (safe_needed)
 160                while (res && swsusp_page_is_free(virt_to_page(res))) {
 161                        /* The page is unsafe, mark it for swsusp_free() */
 162                        swsusp_set_page_forbidden(virt_to_page(res));
 163                        allocated_unsafe_pages++;
 164                        res = (void *)get_zeroed_page(gfp_mask);
 165                }
 166        if (res) {
 167                swsusp_set_page_forbidden(virt_to_page(res));
 168                swsusp_set_page_free(virt_to_page(res));
 169        }
 170        return res;
 171}
 172
 173static void *__get_safe_page(gfp_t gfp_mask)
 174{
 175        if (safe_pages_list) {
 176                void *ret = safe_pages_list;
 177
 178                safe_pages_list = safe_pages_list->next;
 179                memset(ret, 0, PAGE_SIZE);
 180                return ret;
 181        }
 182        return get_image_page(gfp_mask, PG_SAFE);
 183}
 184
 185unsigned long get_safe_page(gfp_t gfp_mask)
 186{
 187        return (unsigned long)__get_safe_page(gfp_mask);
 188}
 189
 190static struct page *alloc_image_page(gfp_t gfp_mask)
 191{
 192        struct page *page;
 193
 194        page = alloc_page(gfp_mask);
 195        if (page) {
 196                swsusp_set_page_forbidden(page);
 197                swsusp_set_page_free(page);
 198        }
 199        return page;
 200}
 201
 202static void recycle_safe_page(void *page_address)
 203{
 204        struct linked_page *lp = page_address;
 205
 206        lp->next = safe_pages_list;
 207        safe_pages_list = lp;
 208}
 209
 210/**
 211 * free_image_page - Free a page allocated for hibernation image.
 212 * @addr: Address of the page to free.
 213 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
 214 *
 215 * The page to free should have been allocated by get_image_page() (page flags
 216 * set by it are affected).
 217 */
 218static inline void free_image_page(void *addr, int clear_nosave_free)
 219{
 220        struct page *page;
 221
 222        BUG_ON(!virt_addr_valid(addr));
 223
 224        page = virt_to_page(addr);
 225
 226        swsusp_unset_page_forbidden(page);
 227        if (clear_nosave_free)
 228                swsusp_unset_page_free(page);
 229
 230        __free_page(page);
 231}
 232
 233static inline void free_list_of_pages(struct linked_page *list,
 234                                      int clear_page_nosave)
 235{
 236        while (list) {
 237                struct linked_page *lp = list->next;
 238
 239                free_image_page(list, clear_page_nosave);
 240                list = lp;
 241        }
 242}
 243
 244/*
 245 * struct chain_allocator is used for allocating small objects out of
 246 * a linked list of pages called 'the chain'.
 247 *
 248 * The chain grows each time when there is no room for a new object in
 249 * the current page.  The allocated objects cannot be freed individually.
 250 * It is only possible to free them all at once, by freeing the entire
 251 * chain.
 252 *
 253 * NOTE: The chain allocator may be inefficient if the allocated objects
 254 * are not much smaller than PAGE_SIZE.
 255 */
 256struct chain_allocator {
 257        struct linked_page *chain;      /* the chain */
 258        unsigned int used_space;        /* total size of objects allocated out
 259                                           of the current page */
 260        gfp_t gfp_mask;         /* mask for allocating pages */
 261        int safe_needed;        /* if set, only "safe" pages are allocated */
 262};
 263
 264static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
 265                       int safe_needed)
 266{
 267        ca->chain = NULL;
 268        ca->used_space = LINKED_PAGE_DATA_SIZE;
 269        ca->gfp_mask = gfp_mask;
 270        ca->safe_needed = safe_needed;
 271}
 272
 273static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
 274{
 275        void *ret;
 276
 277        if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
 278                struct linked_page *lp;
 279
 280                lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
 281                                        get_image_page(ca->gfp_mask, PG_ANY);
 282                if (!lp)
 283                        return NULL;
 284
 285                lp->next = ca->chain;
 286                ca->chain = lp;
 287                ca->used_space = 0;
 288        }
 289        ret = ca->chain->data + ca->used_space;
 290        ca->used_space += size;
 291        return ret;
 292}
 293
 294/**
 295 * Data types related to memory bitmaps.
 296 *
 297 * Memory bitmap is a structure consiting of many linked lists of
 298 * objects.  The main list's elements are of type struct zone_bitmap
 299 * and each of them corresonds to one zone.  For each zone bitmap
 300 * object there is a list of objects of type struct bm_block that
 301 * represent each blocks of bitmap in which information is stored.
 302 *
 303 * struct memory_bitmap contains a pointer to the main list of zone
 304 * bitmap objects, a struct bm_position used for browsing the bitmap,
 305 * and a pointer to the list of pages used for allocating all of the
 306 * zone bitmap objects and bitmap block objects.
 307 *
 308 * NOTE: It has to be possible to lay out the bitmap in memory
 309 * using only allocations of order 0.  Additionally, the bitmap is
 310 * designed to work with arbitrary number of zones (this is over the
 311 * top for now, but let's avoid making unnecessary assumptions ;-).
 312 *
 313 * struct zone_bitmap contains a pointer to a list of bitmap block
 314 * objects and a pointer to the bitmap block object that has been
 315 * most recently used for setting bits.  Additionally, it contains the
 316 * PFNs that correspond to the start and end of the represented zone.
 317 *
 318 * struct bm_block contains a pointer to the memory page in which
 319 * information is stored (in the form of a block of bitmap)
 320 * It also contains the pfns that correspond to the start and end of
 321 * the represented memory area.
 322 *
 323 * The memory bitmap is organized as a radix tree to guarantee fast random
 324 * access to the bits. There is one radix tree for each zone (as returned
 325 * from create_mem_extents).
 326 *
 327 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
 328 * two linked lists for the nodes of the tree, one for the inner nodes and
 329 * one for the leave nodes. The linked leave nodes are used for fast linear
 330 * access of the memory bitmap.
 331 *
 332 * The struct rtree_node represents one node of the radix tree.
 333 */
 334
 335#define BM_END_OF_MAP   (~0UL)
 336
 337#define BM_BITS_PER_BLOCK       (PAGE_SIZE * BITS_PER_BYTE)
 338#define BM_BLOCK_SHIFT          (PAGE_SHIFT + 3)
 339#define BM_BLOCK_MASK           ((1UL << BM_BLOCK_SHIFT) - 1)
 340
 341/*
 342 * struct rtree_node is a wrapper struct to link the nodes
 343 * of the rtree together for easy linear iteration over
 344 * bits and easy freeing
 345 */
 346struct rtree_node {
 347        struct list_head list;
 348        unsigned long *data;
 349};
 350
 351/*
 352 * struct mem_zone_bm_rtree represents a bitmap used for one
 353 * populated memory zone.
 354 */
 355struct mem_zone_bm_rtree {
 356        struct list_head list;          /* Link Zones together         */
 357        struct list_head nodes;         /* Radix Tree inner nodes      */
 358        struct list_head leaves;        /* Radix Tree leaves           */
 359        unsigned long start_pfn;        /* Zone start page frame       */
 360        unsigned long end_pfn;          /* Zone end page frame + 1     */
 361        struct rtree_node *rtree;       /* Radix Tree Root             */
 362        int levels;                     /* Number of Radix Tree Levels */
 363        unsigned int blocks;            /* Number of Bitmap Blocks     */
 364};
 365
 366/* strcut bm_position is used for browsing memory bitmaps */
 367
 368struct bm_position {
 369        struct mem_zone_bm_rtree *zone;
 370        struct rtree_node *node;
 371        unsigned long node_pfn;
 372        int node_bit;
 373};
 374
 375struct memory_bitmap {
 376        struct list_head zones;
 377        struct linked_page *p_list;     /* list of pages used to store zone
 378                                           bitmap objects and bitmap block
 379                                           objects */
 380        struct bm_position cur; /* most recently used bit position */
 381};
 382
 383/* Functions that operate on memory bitmaps */
 384
 385#define BM_ENTRIES_PER_LEVEL    (PAGE_SIZE / sizeof(unsigned long))
 386#if BITS_PER_LONG == 32
 387#define BM_RTREE_LEVEL_SHIFT    (PAGE_SHIFT - 2)
 388#else
 389#define BM_RTREE_LEVEL_SHIFT    (PAGE_SHIFT - 3)
 390#endif
 391#define BM_RTREE_LEVEL_MASK     ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
 392
 393/**
 394 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
 395 *
 396 * This function is used to allocate inner nodes as well as the
 397 * leave nodes of the radix tree. It also adds the node to the
 398 * corresponding linked list passed in by the *list parameter.
 399 */
 400static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
 401                                           struct chain_allocator *ca,
 402                                           struct list_head *list)
 403{
 404        struct rtree_node *node;
 405
 406        node = chain_alloc(ca, sizeof(struct rtree_node));
 407        if (!node)
 408                return NULL;
 409
 410        node->data = get_image_page(gfp_mask, safe_needed);
 411        if (!node->data)
 412                return NULL;
 413
 414        list_add_tail(&node->list, list);
 415
 416        return node;
 417}
 418
 419/**
 420 * add_rtree_block - Add a new leave node to the radix tree.
 421 *
 422 * The leave nodes need to be allocated in order to keep the leaves
 423 * linked list in order. This is guaranteed by the zone->blocks
 424 * counter.
 425 */
 426static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
 427                           int safe_needed, struct chain_allocator *ca)
 428{
 429        struct rtree_node *node, *block, **dst;
 430        unsigned int levels_needed, block_nr;
 431        int i;
 432
 433        block_nr = zone->blocks;
 434        levels_needed = 0;
 435
 436        /* How many levels do we need for this block nr? */
 437        while (block_nr) {
 438                levels_needed += 1;
 439                block_nr >>= BM_RTREE_LEVEL_SHIFT;
 440        }
 441
 442        /* Make sure the rtree has enough levels */
 443        for (i = zone->levels; i < levels_needed; i++) {
 444                node = alloc_rtree_node(gfp_mask, safe_needed, ca,
 445                                        &zone->nodes);
 446                if (!node)
 447                        return -ENOMEM;
 448
 449                node->data[0] = (unsigned long)zone->rtree;
 450                zone->rtree = node;
 451                zone->levels += 1;
 452        }
 453
 454        /* Allocate new block */
 455        block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
 456        if (!block)
 457                return -ENOMEM;
 458
 459        /* Now walk the rtree to insert the block */
 460        node = zone->rtree;
 461        dst = &zone->rtree;
 462        block_nr = zone->blocks;
 463        for (i = zone->levels; i > 0; i--) {
 464                int index;
 465
 466                if (!node) {
 467                        node = alloc_rtree_node(gfp_mask, safe_needed, ca,
 468                                                &zone->nodes);
 469                        if (!node)
 470                                return -ENOMEM;
 471                        *dst = node;
 472                }
 473
 474                index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
 475                index &= BM_RTREE_LEVEL_MASK;
 476                dst = (struct rtree_node **)&((*dst)->data[index]);
 477                node = *dst;
 478        }
 479
 480        zone->blocks += 1;
 481        *dst = block;
 482
 483        return 0;
 484}
 485
 486static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
 487                               int clear_nosave_free);
 488
 489/**
 490 * create_zone_bm_rtree - Create a radix tree for one zone.
 491 *
 492 * Allocated the mem_zone_bm_rtree structure and initializes it.
 493 * This function also allocated and builds the radix tree for the
 494 * zone.
 495 */
 496static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
 497                                                      int safe_needed,
 498                                                      struct chain_allocator *ca,
 499                                                      unsigned long start,
 500                                                      unsigned long end)
 501{
 502        struct mem_zone_bm_rtree *zone;
 503        unsigned int i, nr_blocks;
 504        unsigned long pages;
 505
 506        pages = end - start;
 507        zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
 508        if (!zone)
 509                return NULL;
 510
 511        INIT_LIST_HEAD(&zone->nodes);
 512        INIT_LIST_HEAD(&zone->leaves);
 513        zone->start_pfn = start;
 514        zone->end_pfn = end;
 515        nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
 516
 517        for (i = 0; i < nr_blocks; i++) {
 518                if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
 519                        free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
 520                        return NULL;
 521                }
 522        }
 523
 524        return zone;
 525}
 526
 527/**
 528 * free_zone_bm_rtree - Free the memory of the radix tree.
 529 *
 530 * Free all node pages of the radix tree. The mem_zone_bm_rtree
 531 * structure itself is not freed here nor are the rtree_node
 532 * structs.
 533 */
 534static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
 535                               int clear_nosave_free)
 536{
 537        struct rtree_node *node;
 538
 539        list_for_each_entry(node, &zone->nodes, list)
 540                free_image_page(node->data, clear_nosave_free);
 541
 542        list_for_each_entry(node, &zone->leaves, list)
 543                free_image_page(node->data, clear_nosave_free);
 544}
 545
 546static void memory_bm_position_reset(struct memory_bitmap *bm)
 547{
 548        bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
 549                                  list);
 550        bm->cur.node = list_entry(bm->cur.zone->leaves.next,
 551                                  struct rtree_node, list);
 552        bm->cur.node_pfn = 0;
 553        bm->cur.node_bit = 0;
 554}
 555
 556static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
 557
 558struct mem_extent {
 559        struct list_head hook;
 560        unsigned long start;
 561        unsigned long end;
 562};
 563
 564/**
 565 * free_mem_extents - Free a list of memory extents.
 566 * @list: List of extents to free.
 567 */
 568static void free_mem_extents(struct list_head *list)
 569{
 570        struct mem_extent *ext, *aux;
 571
 572        list_for_each_entry_safe(ext, aux, list, hook) {
 573                list_del(&ext->hook);
 574                kfree(ext);
 575        }
 576}
 577
 578/**
 579 * create_mem_extents - Create a list of memory extents.
 580 * @list: List to put the extents into.
 581 * @gfp_mask: Mask to use for memory allocations.
 582 *
 583 * The extents represent contiguous ranges of PFNs.
 584 */
 585static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
 586{
 587        struct zone *zone;
 588
 589        INIT_LIST_HEAD(list);
 590
 591        for_each_populated_zone(zone) {
 592                unsigned long zone_start, zone_end;
 593                struct mem_extent *ext, *cur, *aux;
 594
 595                zone_start = zone->zone_start_pfn;
 596                zone_end = zone_end_pfn(zone);
 597
 598                list_for_each_entry(ext, list, hook)
 599                        if (zone_start <= ext->end)
 600                                break;
 601
 602                if (&ext->hook == list || zone_end < ext->start) {
 603                        /* New extent is necessary */
 604                        struct mem_extent *new_ext;
 605
 606                        new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
 607                        if (!new_ext) {
 608                                free_mem_extents(list);
 609                                return -ENOMEM;
 610                        }
 611                        new_ext->start = zone_start;
 612                        new_ext->end = zone_end;
 613                        list_add_tail(&new_ext->hook, &ext->hook);
 614                        continue;
 615                }
 616
 617                /* Merge this zone's range of PFNs with the existing one */
 618                if (zone_start < ext->start)
 619                        ext->start = zone_start;
 620                if (zone_end > ext->end)
 621                        ext->end = zone_end;
 622
 623                /* More merging may be possible */
 624                cur = ext;
 625                list_for_each_entry_safe_continue(cur, aux, list, hook) {
 626                        if (zone_end < cur->start)
 627                                break;
 628                        if (zone_end < cur->end)
 629                                ext->end = cur->end;
 630                        list_del(&cur->hook);
 631                        kfree(cur);
 632                }
 633        }
 634
 635        return 0;
 636}
 637
 638/**
 639 * memory_bm_create - Allocate memory for a memory bitmap.
 640 */
 641static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
 642                            int safe_needed)
 643{
 644        struct chain_allocator ca;
 645        struct list_head mem_extents;
 646        struct mem_extent *ext;
 647        int error;
 648
 649        chain_init(&ca, gfp_mask, safe_needed);
 650        INIT_LIST_HEAD(&bm->zones);
 651
 652        error = create_mem_extents(&mem_extents, gfp_mask);
 653        if (error)
 654                return error;
 655
 656        list_for_each_entry(ext, &mem_extents, hook) {
 657                struct mem_zone_bm_rtree *zone;
 658
 659                zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
 660                                            ext->start, ext->end);
 661                if (!zone) {
 662                        error = -ENOMEM;
 663                        goto Error;
 664                }
 665                list_add_tail(&zone->list, &bm->zones);
 666        }
 667
 668        bm->p_list = ca.chain;
 669        memory_bm_position_reset(bm);
 670 Exit:
 671        free_mem_extents(&mem_extents);
 672        return error;
 673
 674 Error:
 675        bm->p_list = ca.chain;
 676        memory_bm_free(bm, PG_UNSAFE_CLEAR);
 677        goto Exit;
 678}
 679
 680/**
 681 * memory_bm_free - Free memory occupied by the memory bitmap.
 682 * @bm: Memory bitmap.
 683 */
 684static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
 685{
 686        struct mem_zone_bm_rtree *zone;
 687
 688        list_for_each_entry(zone, &bm->zones, list)
 689                free_zone_bm_rtree(zone, clear_nosave_free);
 690
 691        free_list_of_pages(bm->p_list, clear_nosave_free);
 692
 693        INIT_LIST_HEAD(&bm->zones);
 694}
 695
 696/**
 697 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
 698 *
 699 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
 700 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
 701 *
 702 * Walk the radix tree to find the page containing the bit that represents @pfn
 703 * and return the position of the bit in @addr and @bit_nr.
 704 */
 705static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
 706                              void **addr, unsigned int *bit_nr)
 707{
 708        struct mem_zone_bm_rtree *curr, *zone;
 709        struct rtree_node *node;
 710        int i, block_nr;
 711
 712        zone = bm->cur.zone;
 713
 714        if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
 715                goto zone_found;
 716
 717        zone = NULL;
 718
 719        /* Find the right zone */
 720        list_for_each_entry(curr, &bm->zones, list) {
 721                if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
 722                        zone = curr;
 723                        break;
 724                }
 725        }
 726
 727        if (!zone)
 728                return -EFAULT;
 729
 730zone_found:
 731        /*
 732         * We have found the zone. Now walk the radix tree to find the leaf node
 733         * for our PFN.
 734         */
 735        node = bm->cur.node;
 736        if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
 737                goto node_found;
 738
 739        node      = zone->rtree;
 740        block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
 741
 742        for (i = zone->levels; i > 0; i--) {
 743                int index;
 744
 745                index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
 746                index &= BM_RTREE_LEVEL_MASK;
 747                BUG_ON(node->data[index] == 0);
 748                node = (struct rtree_node *)node->data[index];
 749        }
 750
 751node_found:
 752        /* Update last position */
 753        bm->cur.zone = zone;
 754        bm->cur.node = node;
 755        bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
 756
 757        /* Set return values */
 758        *addr = node->data;
 759        *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
 760
 761        return 0;
 762}
 763
 764static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
 765{
 766        void *addr;
 767        unsigned int bit;
 768        int error;
 769
 770        error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 771        BUG_ON(error);
 772        set_bit(bit, addr);
 773}
 774
 775static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
 776{
 777        void *addr;
 778        unsigned int bit;
 779        int error;
 780
 781        error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 782        if (!error)
 783                set_bit(bit, addr);
 784
 785        return error;
 786}
 787
 788static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
 789{
 790        void *addr;
 791        unsigned int bit;
 792        int error;
 793
 794        error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 795        BUG_ON(error);
 796        clear_bit(bit, addr);
 797}
 798
 799static void memory_bm_clear_current(struct memory_bitmap *bm)
 800{
 801        int bit;
 802
 803        bit = max(bm->cur.node_bit - 1, 0);
 804        clear_bit(bit, bm->cur.node->data);
 805}
 806
 807static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
 808{
 809        void *addr;
 810        unsigned int bit;
 811        int error;
 812
 813        error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 814        BUG_ON(error);
 815        return test_bit(bit, addr);
 816}
 817
 818static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
 819{
 820        void *addr;
 821        unsigned int bit;
 822
 823        return !memory_bm_find_bit(bm, pfn, &addr, &bit);
 824}
 825
 826/*
 827 * rtree_next_node - Jump to the next leaf node.
 828 *
 829 * Set the position to the beginning of the next node in the
 830 * memory bitmap. This is either the next node in the current
 831 * zone's radix tree or the first node in the radix tree of the
 832 * next zone.
 833 *
 834 * Return true if there is a next node, false otherwise.
 835 */
 836static bool rtree_next_node(struct memory_bitmap *bm)
 837{
 838        if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
 839                bm->cur.node = list_entry(bm->cur.node->list.next,
 840                                          struct rtree_node, list);
 841                bm->cur.node_pfn += BM_BITS_PER_BLOCK;
 842                bm->cur.node_bit  = 0;
 843                touch_softlockup_watchdog();
 844                return true;
 845        }
 846
 847        /* No more nodes, goto next zone */
 848        if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
 849                bm->cur.zone = list_entry(bm->cur.zone->list.next,
 850                                  struct mem_zone_bm_rtree, list);
 851                bm->cur.node = list_entry(bm->cur.zone->leaves.next,
 852                                          struct rtree_node, list);
 853                bm->cur.node_pfn = 0;
 854                bm->cur.node_bit = 0;
 855                return true;
 856        }
 857
 858        /* No more zones */
 859        return false;
 860}
 861
 862/**
 863 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
 864 * @bm: Memory bitmap.
 865 *
 866 * Starting from the last returned position this function searches for the next
 867 * set bit in @bm and returns the PFN represented by it.  If no more bits are
 868 * set, BM_END_OF_MAP is returned.
 869 *
 870 * It is required to run memory_bm_position_reset() before the first call to
 871 * this function for the given memory bitmap.
 872 */
 873static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
 874{
 875        unsigned long bits, pfn, pages;
 876        int bit;
 877
 878        do {
 879                pages     = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
 880                bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
 881                bit       = find_next_bit(bm->cur.node->data, bits,
 882                                          bm->cur.node_bit);
 883                if (bit < bits) {
 884                        pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
 885                        bm->cur.node_bit = bit + 1;
 886                        return pfn;
 887                }
 888        } while (rtree_next_node(bm));
 889
 890        return BM_END_OF_MAP;
 891}
 892
 893/*
 894 * This structure represents a range of page frames the contents of which
 895 * should not be saved during hibernation.
 896 */
 897struct nosave_region {
 898        struct list_head list;
 899        unsigned long start_pfn;
 900        unsigned long end_pfn;
 901};
 902
 903static LIST_HEAD(nosave_regions);
 904
 905static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
 906{
 907        struct rtree_node *node;
 908
 909        list_for_each_entry(node, &zone->nodes, list)
 910                recycle_safe_page(node->data);
 911
 912        list_for_each_entry(node, &zone->leaves, list)
 913                recycle_safe_page(node->data);
 914}
 915
 916static void memory_bm_recycle(struct memory_bitmap *bm)
 917{
 918        struct mem_zone_bm_rtree *zone;
 919        struct linked_page *p_list;
 920
 921        list_for_each_entry(zone, &bm->zones, list)
 922                recycle_zone_bm_rtree(zone);
 923
 924        p_list = bm->p_list;
 925        while (p_list) {
 926                struct linked_page *lp = p_list;
 927
 928                p_list = lp->next;
 929                recycle_safe_page(lp);
 930        }
 931}
 932
 933/**
 934 * register_nosave_region - Register a region of unsaveable memory.
 935 *
 936 * Register a range of page frames the contents of which should not be saved
 937 * during hibernation (to be used in the early initialization code).
 938 */
 939void __init __register_nosave_region(unsigned long start_pfn,
 940                                     unsigned long end_pfn, int use_kmalloc)
 941{
 942        struct nosave_region *region;
 943
 944        if (start_pfn >= end_pfn)
 945                return;
 946
 947        if (!list_empty(&nosave_regions)) {
 948                /* Try to extend the previous region (they should be sorted) */
 949                region = list_entry(nosave_regions.prev,
 950                                        struct nosave_region, list);
 951                if (region->end_pfn == start_pfn) {
 952                        region->end_pfn = end_pfn;
 953                        goto Report;
 954                }
 955        }
 956        if (use_kmalloc) {
 957                /* During init, this shouldn't fail */
 958                region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
 959                BUG_ON(!region);
 960        } else {
 961                /* This allocation cannot fail */
 962                region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
 963        }
 964        region->start_pfn = start_pfn;
 965        region->end_pfn = end_pfn;
 966        list_add_tail(&region->list, &nosave_regions);
 967 Report:
 968        printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
 969                (unsigned long long) start_pfn << PAGE_SHIFT,
 970                ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
 971}
 972
 973/*
 974 * Set bits in this map correspond to the page frames the contents of which
 975 * should not be saved during the suspend.
 976 */
 977static struct memory_bitmap *forbidden_pages_map;
 978
 979/* Set bits in this map correspond to free page frames. */
 980static struct memory_bitmap *free_pages_map;
 981
 982/*
 983 * Each page frame allocated for creating the image is marked by setting the
 984 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
 985 */
 986
 987void swsusp_set_page_free(struct page *page)
 988{
 989        if (free_pages_map)
 990                memory_bm_set_bit(free_pages_map, page_to_pfn(page));
 991}
 992
 993static int swsusp_page_is_free(struct page *page)
 994{
 995        return free_pages_map ?
 996                memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
 997}
 998
 999void swsusp_unset_page_free(struct page *page)
1000{
1001        if (free_pages_map)
1002                memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1003}
1004
1005static void swsusp_set_page_forbidden(struct page *page)
1006{
1007        if (forbidden_pages_map)
1008                memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1009}
1010
1011int swsusp_page_is_forbidden(struct page *page)
1012{
1013        return forbidden_pages_map ?
1014                memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1015}
1016
1017static void swsusp_unset_page_forbidden(struct page *page)
1018{
1019        if (forbidden_pages_map)
1020                memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1021}
1022
1023/**
1024 * mark_nosave_pages - Mark pages that should not be saved.
1025 * @bm: Memory bitmap.
1026 *
1027 * Set the bits in @bm that correspond to the page frames the contents of which
1028 * should not be saved.
1029 */
1030static void mark_nosave_pages(struct memory_bitmap *bm)
1031{
1032        struct nosave_region *region;
1033
1034        if (list_empty(&nosave_regions))
1035                return;
1036
1037        list_for_each_entry(region, &nosave_regions, list) {
1038                unsigned long pfn;
1039
1040                pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
1041                         (unsigned long long) region->start_pfn << PAGE_SHIFT,
1042                         ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1043                                - 1);
1044
1045                for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1046                        if (pfn_valid(pfn)) {
1047                                /*
1048                                 * It is safe to ignore the result of
1049                                 * mem_bm_set_bit_check() here, since we won't
1050                                 * touch the PFNs for which the error is
1051                                 * returned anyway.
1052                                 */
1053                                mem_bm_set_bit_check(bm, pfn);
1054                        }
1055        }
1056}
1057
1058/**
1059 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1060 *
1061 * Create bitmaps needed for marking page frames that should not be saved and
1062 * free page frames.  The forbidden_pages_map and free_pages_map pointers are
1063 * only modified if everything goes well, because we don't want the bits to be
1064 * touched before both bitmaps are set up.
1065 */
1066int create_basic_memory_bitmaps(void)
1067{
1068        struct memory_bitmap *bm1, *bm2;
1069        int error = 0;
1070
1071        if (forbidden_pages_map && free_pages_map)
1072                return 0;
1073        else
1074                BUG_ON(forbidden_pages_map || free_pages_map);
1075
1076        bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1077        if (!bm1)
1078                return -ENOMEM;
1079
1080        error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1081        if (error)
1082                goto Free_first_object;
1083
1084        bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1085        if (!bm2)
1086                goto Free_first_bitmap;
1087
1088        error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1089        if (error)
1090                goto Free_second_object;
1091
1092        forbidden_pages_map = bm1;
1093        free_pages_map = bm2;
1094        mark_nosave_pages(forbidden_pages_map);
1095
1096        pr_debug("PM: Basic memory bitmaps created\n");
1097
1098        return 0;
1099
1100 Free_second_object:
1101        kfree(bm2);
1102 Free_first_bitmap:
1103        memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1104 Free_first_object:
1105        kfree(bm1);
1106        return -ENOMEM;
1107}
1108
1109/**
1110 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1111 *
1112 * Free memory bitmaps allocated by create_basic_memory_bitmaps().  The
1113 * auxiliary pointers are necessary so that the bitmaps themselves are not
1114 * referred to while they are being freed.
1115 */
1116void free_basic_memory_bitmaps(void)
1117{
1118        struct memory_bitmap *bm1, *bm2;
1119
1120        if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1121                return;
1122
1123        bm1 = forbidden_pages_map;
1124        bm2 = free_pages_map;
1125        forbidden_pages_map = NULL;
1126        free_pages_map = NULL;
1127        memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1128        kfree(bm1);
1129        memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1130        kfree(bm2);
1131
1132        pr_debug("PM: Basic memory bitmaps freed\n");
1133}
1134
1135void clear_free_pages(void)
1136{
1137#ifdef CONFIG_PAGE_POISONING_ZERO
1138        struct memory_bitmap *bm = free_pages_map;
1139        unsigned long pfn;
1140
1141        if (WARN_ON(!(free_pages_map)))
1142                return;
1143
1144        memory_bm_position_reset(bm);
1145        pfn = memory_bm_next_pfn(bm);
1146        while (pfn != BM_END_OF_MAP) {
1147                if (pfn_valid(pfn))
1148                        clear_highpage(pfn_to_page(pfn));
1149
1150                pfn = memory_bm_next_pfn(bm);
1151        }
1152        memory_bm_position_reset(bm);
1153        pr_info("PM: free pages cleared after restore\n");
1154#endif /* PAGE_POISONING_ZERO */
1155}
1156
1157/**
1158 * snapshot_additional_pages - Estimate the number of extra pages needed.
1159 * @zone: Memory zone to carry out the computation for.
1160 *
1161 * Estimate the number of additional pages needed for setting up a hibernation
1162 * image data structures for @zone (usually, the returned value is greater than
1163 * the exact number).
1164 */
1165unsigned int snapshot_additional_pages(struct zone *zone)
1166{
1167        unsigned int rtree, nodes;
1168
1169        rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1170        rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1171                              LINKED_PAGE_DATA_SIZE);
1172        while (nodes > 1) {
1173                nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1174                rtree += nodes;
1175        }
1176
1177        return 2 * rtree;
1178}
1179
1180#ifdef CONFIG_HIGHMEM
1181/**
1182 * count_free_highmem_pages - Compute the total number of free highmem pages.
1183 *
1184 * The returned number is system-wide.
1185 */
1186static unsigned int count_free_highmem_pages(void)
1187{
1188        struct zone *zone;
1189        unsigned int cnt = 0;
1190
1191        for_each_populated_zone(zone)
1192                if (is_highmem(zone))
1193                        cnt += zone_page_state(zone, NR_FREE_PAGES);
1194
1195        return cnt;
1196}
1197
1198/**
1199 * saveable_highmem_page - Check if a highmem page is saveable.
1200 *
1201 * Determine whether a highmem page should be included in a hibernation image.
1202 *
1203 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1204 * and it isn't part of a free chunk of pages.
1205 */
1206static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1207{
1208        struct page *page;
1209
1210        if (!pfn_valid(pfn))
1211                return NULL;
1212
1213        page = pfn_to_page(pfn);
1214        if (page_zone(page) != zone)
1215                return NULL;
1216
1217        BUG_ON(!PageHighMem(page));
1218
1219        if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||
1220            PageReserved(page))
1221                return NULL;
1222
1223        if (page_is_guard(page))
1224                return NULL;
1225
1226        return page;
1227}
1228
1229/**
1230 * count_highmem_pages - Compute the total number of saveable highmem pages.
1231 */
1232static unsigned int count_highmem_pages(void)
1233{
1234        struct zone *zone;
1235        unsigned int n = 0;
1236
1237        for_each_populated_zone(zone) {
1238                unsigned long pfn, max_zone_pfn;
1239
1240                if (!is_highmem(zone))
1241                        continue;
1242
1243                mark_free_pages(zone);
1244                max_zone_pfn = zone_end_pfn(zone);
1245                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1246                        if (saveable_highmem_page(zone, pfn))
1247                                n++;
1248        }
1249        return n;
1250}
1251#else
1252static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1253{
1254        return NULL;
1255}
1256#endif /* CONFIG_HIGHMEM */
1257
1258/**
1259 * saveable_page - Check if the given page is saveable.
1260 *
1261 * Determine whether a non-highmem page should be included in a hibernation
1262 * image.
1263 *
1264 * We should save the page if it isn't Nosave, and is not in the range
1265 * of pages statically defined as 'unsaveable', and it isn't part of
1266 * a free chunk of pages.
1267 */
1268static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1269{
1270        struct page *page;
1271
1272        if (!pfn_valid(pfn))
1273                return NULL;
1274
1275        page = pfn_to_page(pfn);
1276        if (page_zone(page) != zone)
1277                return NULL;
1278
1279        BUG_ON(PageHighMem(page));
1280
1281        if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1282                return NULL;
1283
1284        if (PageReserved(page)
1285            && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1286                return NULL;
1287
1288        if (page_is_guard(page))
1289                return NULL;
1290
1291        return page;
1292}
1293
1294/**
1295 * count_data_pages - Compute the total number of saveable non-highmem pages.
1296 */
1297static unsigned int count_data_pages(void)
1298{
1299        struct zone *zone;
1300        unsigned long pfn, max_zone_pfn;
1301        unsigned int n = 0;
1302
1303        for_each_populated_zone(zone) {
1304                if (is_highmem(zone))
1305                        continue;
1306
1307                mark_free_pages(zone);
1308                max_zone_pfn = zone_end_pfn(zone);
1309                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1310                        if (saveable_page(zone, pfn))
1311                                n++;
1312        }
1313        return n;
1314}
1315
1316/*
1317 * This is needed, because copy_page and memcpy are not usable for copying
1318 * task structs.
1319 */
1320static inline void do_copy_page(long *dst, long *src)
1321{
1322        int n;
1323
1324        for (n = PAGE_SIZE / sizeof(long); n; n--)
1325                *dst++ = *src++;
1326}
1327
1328/**
1329 * safe_copy_page - Copy a page in a safe way.
1330 *
1331 * Check if the page we are going to copy is marked as present in the kernel
1332 * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set
1333 * and in that case kernel_page_present() always returns 'true').
1334 */
1335static void safe_copy_page(void *dst, struct page *s_page)
1336{
1337        if (kernel_page_present(s_page)) {
1338                do_copy_page(dst, page_address(s_page));
1339        } else {
1340                kernel_map_pages(s_page, 1, 1);
1341                do_copy_page(dst, page_address(s_page));
1342                kernel_map_pages(s_page, 1, 0);
1343        }
1344}
1345
1346#ifdef CONFIG_HIGHMEM
1347static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1348{
1349        return is_highmem(zone) ?
1350                saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1351}
1352
1353static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1354{
1355        struct page *s_page, *d_page;
1356        void *src, *dst;
1357
1358        s_page = pfn_to_page(src_pfn);
1359        d_page = pfn_to_page(dst_pfn);
1360        if (PageHighMem(s_page)) {
1361                src = kmap_atomic(s_page);
1362                dst = kmap_atomic(d_page);
1363                do_copy_page(dst, src);
1364                kunmap_atomic(dst);
1365                kunmap_atomic(src);
1366        } else {
1367                if (PageHighMem(d_page)) {
1368                        /*
1369                         * The page pointed to by src may contain some kernel
1370                         * data modified by kmap_atomic()
1371                         */
1372                        safe_copy_page(buffer, s_page);
1373                        dst = kmap_atomic(d_page);
1374                        copy_page(dst, buffer);
1375                        kunmap_atomic(dst);
1376                } else {
1377                        safe_copy_page(page_address(d_page), s_page);
1378                }
1379        }
1380}
1381#else
1382#define page_is_saveable(zone, pfn)     saveable_page(zone, pfn)
1383
1384static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1385{
1386        safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1387                                pfn_to_page(src_pfn));
1388}
1389#endif /* CONFIG_HIGHMEM */
1390
1391static void copy_data_pages(struct memory_bitmap *copy_bm,
1392                            struct memory_bitmap *orig_bm)
1393{
1394        struct zone *zone;
1395        unsigned long pfn;
1396
1397        for_each_populated_zone(zone) {
1398                unsigned long max_zone_pfn;
1399
1400                mark_free_pages(zone);
1401                max_zone_pfn = zone_end_pfn(zone);
1402                for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1403                        if (page_is_saveable(zone, pfn))
1404                                memory_bm_set_bit(orig_bm, pfn);
1405        }
1406        memory_bm_position_reset(orig_bm);
1407        memory_bm_position_reset(copy_bm);
1408        for(;;) {
1409                pfn = memory_bm_next_pfn(orig_bm);
1410                if (unlikely(pfn == BM_END_OF_MAP))
1411                        break;
1412                copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1413        }
1414}
1415
1416/* Total number of image pages */
1417static unsigned int nr_copy_pages;
1418/* Number of pages needed for saving the original pfns of the image pages */
1419static unsigned int nr_meta_pages;
1420/*
1421 * Numbers of normal and highmem page frames allocated for hibernation image
1422 * before suspending devices.
1423 */
1424unsigned int alloc_normal, alloc_highmem;
1425/*
1426 * Memory bitmap used for marking saveable pages (during hibernation) or
1427 * hibernation image pages (during restore)
1428 */
1429static struct memory_bitmap orig_bm;
1430/*
1431 * Memory bitmap used during hibernation for marking allocated page frames that
1432 * will contain copies of saveable pages.  During restore it is initially used
1433 * for marking hibernation image pages, but then the set bits from it are
1434 * duplicated in @orig_bm and it is released.  On highmem systems it is next
1435 * used for marking "safe" highmem pages, but it has to be reinitialized for
1436 * this purpose.
1437 */
1438static struct memory_bitmap copy_bm;
1439
1440/**
1441 * swsusp_free - Free pages allocated for hibernation image.
1442 *
1443 * Image pages are alocated before snapshot creation, so they need to be
1444 * released after resume.
1445 */
1446void swsusp_free(void)
1447{
1448        unsigned long fb_pfn, fr_pfn;
1449
1450        if (!forbidden_pages_map || !free_pages_map)
1451                goto out;
1452
1453        memory_bm_position_reset(forbidden_pages_map);
1454        memory_bm_position_reset(free_pages_map);
1455
1456loop:
1457        fr_pfn = memory_bm_next_pfn(free_pages_map);
1458        fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1459
1460        /*
1461         * Find the next bit set in both bitmaps. This is guaranteed to
1462         * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1463         */
1464        do {
1465                if (fb_pfn < fr_pfn)
1466                        fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1467                if (fr_pfn < fb_pfn)
1468                        fr_pfn = memory_bm_next_pfn(free_pages_map);
1469        } while (fb_pfn != fr_pfn);
1470
1471        if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1472                struct page *page = pfn_to_page(fr_pfn);
1473
1474                memory_bm_clear_current(forbidden_pages_map);
1475                memory_bm_clear_current(free_pages_map);
1476                hibernate_restore_unprotect_page(page_address(page));
1477                __free_page(page);
1478                goto loop;
1479        }
1480
1481out:
1482        nr_copy_pages = 0;
1483        nr_meta_pages = 0;
1484        restore_pblist = NULL;
1485        buffer = NULL;
1486        alloc_normal = 0;
1487        alloc_highmem = 0;
1488        hibernate_restore_protection_end();
1489}
1490
1491/* Helper functions used for the shrinking of memory. */
1492
1493#define GFP_IMAGE       (GFP_KERNEL | __GFP_NOWARN)
1494
1495/**
1496 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1497 * @nr_pages: Number of page frames to allocate.
1498 * @mask: GFP flags to use for the allocation.
1499 *
1500 * Return value: Number of page frames actually allocated
1501 */
1502static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1503{
1504        unsigned long nr_alloc = 0;
1505
1506        while (nr_pages > 0) {
1507                struct page *page;
1508
1509                page = alloc_image_page(mask);
1510                if (!page)
1511                        break;
1512                memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1513                if (PageHighMem(page))
1514                        alloc_highmem++;
1515                else
1516                        alloc_normal++;
1517                nr_pages--;
1518                nr_alloc++;
1519        }
1520
1521        return nr_alloc;
1522}
1523
1524static unsigned long preallocate_image_memory(unsigned long nr_pages,
1525                                              unsigned long avail_normal)
1526{
1527        unsigned long alloc;
1528
1529        if (avail_normal <= alloc_normal)
1530                return 0;
1531
1532        alloc = avail_normal - alloc_normal;
1533        if (nr_pages < alloc)
1534                alloc = nr_pages;
1535
1536        return preallocate_image_pages(alloc, GFP_IMAGE);
1537}
1538
1539#ifdef CONFIG_HIGHMEM
1540static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1541{
1542        return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1543}
1544
1545/**
1546 *  __fraction - Compute (an approximation of) x * (multiplier / base).
1547 */
1548static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1549{
1550        x *= multiplier;
1551        do_div(x, base);
1552        return (unsigned long)x;
1553}
1554
1555static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1556                                                  unsigned long highmem,
1557                                                  unsigned long total)
1558{
1559        unsigned long alloc = __fraction(nr_pages, highmem, total);
1560
1561        return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1562}
1563#else /* CONFIG_HIGHMEM */
1564static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1565{
1566        return 0;
1567}
1568
1569static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1570                                                         unsigned long highmem,
1571                                                         unsigned long total)
1572{
1573        return 0;
1574}
1575#endif /* CONFIG_HIGHMEM */
1576
1577/**
1578 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1579 */
1580static unsigned long free_unnecessary_pages(void)
1581{
1582        unsigned long save, to_free_normal, to_free_highmem, free;
1583
1584        save = count_data_pages();
1585        if (alloc_normal >= save) {
1586                to_free_normal = alloc_normal - save;
1587                save = 0;
1588        } else {
1589                to_free_normal = 0;
1590                save -= alloc_normal;
1591        }
1592        save += count_highmem_pages();
1593        if (alloc_highmem >= save) {
1594                to_free_highmem = alloc_highmem - save;
1595        } else {
1596                to_free_highmem = 0;
1597                save -= alloc_highmem;
1598                if (to_free_normal > save)
1599                        to_free_normal -= save;
1600                else
1601                        to_free_normal = 0;
1602        }
1603        free = to_free_normal + to_free_highmem;
1604
1605        memory_bm_position_reset(&copy_bm);
1606
1607        while (to_free_normal > 0 || to_free_highmem > 0) {
1608                unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1609                struct page *page = pfn_to_page(pfn);
1610
1611                if (PageHighMem(page)) {
1612                        if (!to_free_highmem)
1613                                continue;
1614                        to_free_highmem--;
1615                        alloc_highmem--;
1616                } else {
1617                        if (!to_free_normal)
1618                                continue;
1619                        to_free_normal--;
1620                        alloc_normal--;
1621                }
1622                memory_bm_clear_bit(&copy_bm, pfn);
1623                swsusp_unset_page_forbidden(page);
1624                swsusp_unset_page_free(page);
1625                __free_page(page);
1626        }
1627
1628        return free;
1629}
1630
1631/**
1632 * minimum_image_size - Estimate the minimum acceptable size of an image.
1633 * @saveable: Number of saveable pages in the system.
1634 *
1635 * We want to avoid attempting to free too much memory too hard, so estimate the
1636 * minimum acceptable size of a hibernation image to use as the lower limit for
1637 * preallocating memory.
1638 *
1639 * We assume that the minimum image size should be proportional to
1640 *
1641 * [number of saveable pages] - [number of pages that can be freed in theory]
1642 *
1643 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1644 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
1645 * minus mapped file pages.
1646 */
1647static unsigned long minimum_image_size(unsigned long saveable)
1648{
1649        unsigned long size;
1650
1651        size = global_page_state(NR_SLAB_RECLAIMABLE)
1652                + global_node_page_state(NR_ACTIVE_ANON)
1653                + global_node_page_state(NR_INACTIVE_ANON)
1654                + global_node_page_state(NR_ACTIVE_FILE)
1655                + global_node_page_state(NR_INACTIVE_FILE)
1656                - global_node_page_state(NR_FILE_MAPPED);
1657
1658        return saveable <= size ? 0 : saveable - size;
1659}
1660
1661/**
1662 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1663 *
1664 * To create a hibernation image it is necessary to make a copy of every page
1665 * frame in use.  We also need a number of page frames to be free during
1666 * hibernation for allocations made while saving the image and for device
1667 * drivers, in case they need to allocate memory from their hibernation
1668 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1669 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1670 * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1671 * total number of available page frames and allocate at least
1672 *
1673 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1674 *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1675 *
1676 * of them, which corresponds to the maximum size of a hibernation image.
1677 *
1678 * If image_size is set below the number following from the above formula,
1679 * the preallocation of memory is continued until the total number of saveable
1680 * pages in the system is below the requested image size or the minimum
1681 * acceptable image size returned by minimum_image_size(), whichever is greater.
1682 */
1683int hibernate_preallocate_memory(void)
1684{
1685        struct zone *zone;
1686        unsigned long saveable, size, max_size, count, highmem, pages = 0;
1687        unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1688        ktime_t start, stop;
1689        int error;
1690
1691        printk(KERN_INFO "PM: Preallocating image memory... ");
1692        start = ktime_get();
1693
1694        error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1695        if (error)
1696                goto err_out;
1697
1698        error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1699        if (error)
1700                goto err_out;
1701
1702        alloc_normal = 0;
1703        alloc_highmem = 0;
1704
1705        /* Count the number of saveable data pages. */
1706        save_highmem = count_highmem_pages();
1707        saveable = count_data_pages();
1708
1709        /*
1710         * Compute the total number of page frames we can use (count) and the
1711         * number of pages needed for image metadata (size).
1712         */
1713        count = saveable;
1714        saveable += save_highmem;
1715        highmem = save_highmem;
1716        size = 0;
1717        for_each_populated_zone(zone) {
1718                size += snapshot_additional_pages(zone);
1719                if (is_highmem(zone))
1720                        highmem += zone_page_state(zone, NR_FREE_PAGES);
1721                else
1722                        count += zone_page_state(zone, NR_FREE_PAGES);
1723        }
1724        avail_normal = count;
1725        count += highmem;
1726        count -= totalreserve_pages;
1727
1728        /* Add number of pages required for page keys (s390 only). */
1729        size += page_key_additional_pages(saveable);
1730
1731        /* Compute the maximum number of saveable pages to leave in memory. */
1732        max_size = (count - (size + PAGES_FOR_IO)) / 2
1733                        - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1734        /* Compute the desired number of image pages specified by image_size. */
1735        size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1736        if (size > max_size)
1737                size = max_size;
1738        /*
1739         * If the desired number of image pages is at least as large as the
1740         * current number of saveable pages in memory, allocate page frames for
1741         * the image and we're done.
1742         */
1743        if (size >= saveable) {
1744                pages = preallocate_image_highmem(save_highmem);
1745                pages += preallocate_image_memory(saveable - pages, avail_normal);
1746                goto out;
1747        }
1748
1749        /* Estimate the minimum size of the image. */
1750        pages = minimum_image_size(saveable);
1751        /*
1752         * To avoid excessive pressure on the normal zone, leave room in it to
1753         * accommodate an image of the minimum size (unless it's already too
1754         * small, in which case don't preallocate pages from it at all).
1755         */
1756        if (avail_normal > pages)
1757                avail_normal -= pages;
1758        else
1759                avail_normal = 0;
1760        if (size < pages)
1761                size = min_t(unsigned long, pages, max_size);
1762
1763        /*
1764         * Let the memory management subsystem know that we're going to need a
1765         * large number of page frames to allocate and make it free some memory.
1766         * NOTE: If this is not done, performance will be hurt badly in some
1767         * test cases.
1768         */
1769        shrink_all_memory(saveable - size);
1770
1771        /*
1772         * The number of saveable pages in memory was too high, so apply some
1773         * pressure to decrease it.  First, make room for the largest possible
1774         * image and fail if that doesn't work.  Next, try to decrease the size
1775         * of the image as much as indicated by 'size' using allocations from
1776         * highmem and non-highmem zones separately.
1777         */
1778        pages_highmem = preallocate_image_highmem(highmem / 2);
1779        alloc = count - max_size;
1780        if (alloc > pages_highmem)
1781                alloc -= pages_highmem;
1782        else
1783                alloc = 0;
1784        pages = preallocate_image_memory(alloc, avail_normal);
1785        if (pages < alloc) {
1786                /* We have exhausted non-highmem pages, try highmem. */
1787                alloc -= pages;
1788                pages += pages_highmem;
1789                pages_highmem = preallocate_image_highmem(alloc);
1790                if (pages_highmem < alloc)
1791                        goto err_out;
1792                pages += pages_highmem;
1793                /*
1794                 * size is the desired number of saveable pages to leave in
1795                 * memory, so try to preallocate (all memory - size) pages.
1796                 */
1797                alloc = (count - pages) - size;
1798                pages += preallocate_image_highmem(alloc);
1799        } else {
1800                /*
1801                 * There are approximately max_size saveable pages at this point
1802                 * and we want to reduce this number down to size.
1803                 */
1804                alloc = max_size - size;
1805                size = preallocate_highmem_fraction(alloc, highmem, count);
1806                pages_highmem += size;
1807                alloc -= size;
1808                size = preallocate_image_memory(alloc, avail_normal);
1809                pages_highmem += preallocate_image_highmem(alloc - size);
1810                pages += pages_highmem + size;
1811        }
1812
1813        /*
1814         * We only need as many page frames for the image as there are saveable
1815         * pages in memory, but we have allocated more.  Release the excessive
1816         * ones now.
1817         */
1818        pages -= free_unnecessary_pages();
1819
1820 out:
1821        stop = ktime_get();
1822        printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1823        swsusp_show_speed(start, stop, pages, "Allocated");
1824
1825        return 0;
1826
1827 err_out:
1828        printk(KERN_CONT "\n");
1829        swsusp_free();
1830        return -ENOMEM;
1831}
1832
1833#ifdef CONFIG_HIGHMEM
1834/**
1835 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1836 *
1837 * Compute the number of non-highmem pages that will be necessary for creating
1838 * copies of highmem pages.
1839 */
1840static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1841{
1842        unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1843
1844        if (free_highmem >= nr_highmem)
1845                nr_highmem = 0;
1846        else
1847                nr_highmem -= free_highmem;
1848
1849        return nr_highmem;
1850}
1851#else
1852static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1853#endif /* CONFIG_HIGHMEM */
1854
1855/**
1856 * enough_free_mem - Check if there is enough free memory for the image.
1857 */
1858static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1859{
1860        struct zone *zone;
1861        unsigned int free = alloc_normal;
1862
1863        for_each_populated_zone(zone)
1864                if (!is_highmem(zone))
1865                        free += zone_page_state(zone, NR_FREE_PAGES);
1866
1867        nr_pages += count_pages_for_highmem(nr_highmem);
1868        pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1869                nr_pages, PAGES_FOR_IO, free);
1870
1871        return free > nr_pages + PAGES_FOR_IO;
1872}
1873
1874#ifdef CONFIG_HIGHMEM
1875/**
1876 * get_highmem_buffer - Allocate a buffer for highmem pages.
1877 *
1878 * If there are some highmem pages in the hibernation image, we may need a
1879 * buffer to copy them and/or load their data.
1880 */
1881static inline int get_highmem_buffer(int safe_needed)
1882{
1883        buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1884        return buffer ? 0 : -ENOMEM;
1885}
1886
1887/**
1888 * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1889 *
1890 * Try to allocate as many pages as needed, but if the number of free highmem
1891 * pages is less than that, allocate them all.
1892 */
1893static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1894                                               unsigned int nr_highmem)
1895{
1896        unsigned int to_alloc = count_free_highmem_pages();
1897
1898        if (to_alloc > nr_highmem)
1899                to_alloc = nr_highmem;
1900
1901        nr_highmem -= to_alloc;
1902        while (to_alloc-- > 0) {
1903                struct page *page;
1904
1905                page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1906                memory_bm_set_bit(bm, page_to_pfn(page));
1907        }
1908        return nr_highmem;
1909}
1910#else
1911static inline int get_highmem_buffer(int safe_needed) { return 0; }
1912
1913static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1914                                               unsigned int n) { return 0; }
1915#endif /* CONFIG_HIGHMEM */
1916
1917/**
1918 * swsusp_alloc - Allocate memory for hibernation image.
1919 *
1920 * We first try to allocate as many highmem pages as there are
1921 * saveable highmem pages in the system.  If that fails, we allocate
1922 * non-highmem pages for the copies of the remaining highmem ones.
1923 *
1924 * In this approach it is likely that the copies of highmem pages will
1925 * also be located in the high memory, because of the way in which
1926 * copy_data_pages() works.
1927 */
1928static int swsusp_alloc(struct memory_bitmap *orig_bm,
1929                        struct memory_bitmap *copy_bm,
1930                        unsigned int nr_pages, unsigned int nr_highmem)
1931{
1932        if (nr_highmem > 0) {
1933                if (get_highmem_buffer(PG_ANY))
1934                        goto err_out;
1935                if (nr_highmem > alloc_highmem) {
1936                        nr_highmem -= alloc_highmem;
1937                        nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1938                }
1939        }
1940        if (nr_pages > alloc_normal) {
1941                nr_pages -= alloc_normal;
1942                while (nr_pages-- > 0) {
1943                        struct page *page;
1944
1945                        page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1946                        if (!page)
1947                                goto err_out;
1948                        memory_bm_set_bit(copy_bm, page_to_pfn(page));
1949                }
1950        }
1951
1952        return 0;
1953
1954 err_out:
1955        swsusp_free();
1956        return -ENOMEM;
1957}
1958
1959asmlinkage __visible int swsusp_save(void)
1960{
1961        unsigned int nr_pages, nr_highmem;
1962
1963        printk(KERN_INFO "PM: Creating hibernation image:\n");
1964
1965        drain_local_pages(NULL);
1966        nr_pages = count_data_pages();
1967        nr_highmem = count_highmem_pages();
1968        printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1969
1970        if (!enough_free_mem(nr_pages, nr_highmem)) {
1971                printk(KERN_ERR "PM: Not enough free memory\n");
1972                return -ENOMEM;
1973        }
1974
1975        if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1976                printk(KERN_ERR "PM: Memory allocation failed\n");
1977                return -ENOMEM;
1978        }
1979
1980        /*
1981         * During allocating of suspend pagedir, new cold pages may appear.
1982         * Kill them.
1983         */
1984        drain_local_pages(NULL);
1985        copy_data_pages(&copy_bm, &orig_bm);
1986
1987        /*
1988         * End of critical section. From now on, we can write to memory,
1989         * but we should not touch disk. This specially means we must _not_
1990         * touch swap space! Except we must write out our image of course.
1991         */
1992
1993        nr_pages += nr_highmem;
1994        nr_copy_pages = nr_pages;
1995        nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1996
1997        printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1998                nr_pages);
1999
2000        return 0;
2001}
2002
2003#ifndef CONFIG_ARCH_HIBERNATION_HEADER
2004static int init_header_complete(struct swsusp_info *info)
2005{
2006        memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2007        info->version_code = LINUX_VERSION_CODE;
2008        return 0;
2009}
2010
2011static char *check_image_kernel(struct swsusp_info *info)
2012{
2013        if (info->version_code != LINUX_VERSION_CODE)
2014                return "kernel version";
2015        if (strcmp(info->uts.sysname,init_utsname()->sysname))
2016                return "system type";
2017        if (strcmp(info->uts.release,init_utsname()->release))
2018                return "kernel release";
2019        if (strcmp(info->uts.version,init_utsname()->version))
2020                return "version";
2021        if (strcmp(info->uts.machine,init_utsname()->machine))
2022                return "machine";
2023        return NULL;
2024}
2025#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2026
2027unsigned long snapshot_get_image_size(void)
2028{
2029        return nr_copy_pages + nr_meta_pages + 1;
2030}
2031
2032static int init_header(struct swsusp_info *info)
2033{
2034        memset(info, 0, sizeof(struct swsusp_info));
2035        info->num_physpages = get_num_physpages();
2036        info->image_pages = nr_copy_pages;
2037        info->pages = snapshot_get_image_size();
2038        info->size = info->pages;
2039        info->size <<= PAGE_SHIFT;
2040        return init_header_complete(info);
2041}
2042
2043/**
2044 * pack_pfns - Prepare PFNs for saving.
2045 * @bm: Memory bitmap.
2046 * @buf: Memory buffer to store the PFNs in.
2047 *
2048 * PFNs corresponding to set bits in @bm are stored in the area of memory
2049 * pointed to by @buf (1 page at a time).
2050 */
2051static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2052{
2053        int j;
2054
2055        for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2056                buf[j] = memory_bm_next_pfn(bm);
2057                if (unlikely(buf[j] == BM_END_OF_MAP))
2058                        break;
2059                /* Save page key for data page (s390 only). */
2060                page_key_read(buf + j);
2061        }
2062}
2063
2064/**
2065 * snapshot_read_next - Get the address to read the next image page from.
2066 * @handle: Snapshot handle to be used for the reading.
2067 *
2068 * On the first call, @handle should point to a zeroed snapshot_handle
2069 * structure.  The structure gets populated then and a pointer to it should be
2070 * passed to this function every next time.
2071 *
2072 * On success, the function returns a positive number.  Then, the caller
2073 * is allowed to read up to the returned number of bytes from the memory
2074 * location computed by the data_of() macro.
2075 *
2076 * The function returns 0 to indicate the end of the data stream condition,
2077 * and negative numbers are returned on errors.  If that happens, the structure
2078 * pointed to by @handle is not updated and should not be used any more.
2079 */
2080int snapshot_read_next(struct snapshot_handle *handle)
2081{
2082        if (handle->cur > nr_meta_pages + nr_copy_pages)
2083                return 0;
2084
2085        if (!buffer) {
2086                /* This makes the buffer be freed by swsusp_free() */
2087                buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2088                if (!buffer)
2089                        return -ENOMEM;
2090        }
2091        if (!handle->cur) {
2092                int error;
2093
2094                error = init_header((struct swsusp_info *)buffer);
2095                if (error)
2096                        return error;
2097                handle->buffer = buffer;
2098                memory_bm_position_reset(&orig_bm);
2099                memory_bm_position_reset(&copy_bm);
2100        } else if (handle->cur <= nr_meta_pages) {
2101                clear_page(buffer);
2102                pack_pfns(buffer, &orig_bm);
2103        } else {
2104                struct page *page;
2105
2106                page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2107                if (PageHighMem(page)) {
2108                        /*
2109                         * Highmem pages are copied to the buffer,
2110                         * because we can't return with a kmapped
2111                         * highmem page (we may not be called again).
2112                         */
2113                        void *kaddr;
2114
2115                        kaddr = kmap_atomic(page);
2116                        copy_page(buffer, kaddr);
2117                        kunmap_atomic(kaddr);
2118                        handle->buffer = buffer;
2119                } else {
2120                        handle->buffer = page_address(page);
2121                }
2122        }
2123        handle->cur++;
2124        return PAGE_SIZE;
2125}
2126
2127static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2128                                    struct memory_bitmap *src)
2129{
2130        unsigned long pfn;
2131
2132        memory_bm_position_reset(src);
2133        pfn = memory_bm_next_pfn(src);
2134        while (pfn != BM_END_OF_MAP) {
2135                memory_bm_set_bit(dst, pfn);
2136                pfn = memory_bm_next_pfn(src);
2137        }
2138}
2139
2140/**
2141 * mark_unsafe_pages - Mark pages that were used before hibernation.
2142 *
2143 * Mark the pages that cannot be used for storing the image during restoration,
2144 * because they conflict with the pages that had been used before hibernation.
2145 */
2146static void mark_unsafe_pages(struct memory_bitmap *bm)
2147{
2148        unsigned long pfn;
2149
2150        /* Clear the "free"/"unsafe" bit for all PFNs */
2151        memory_bm_position_reset(free_pages_map);
2152        pfn = memory_bm_next_pfn(free_pages_map);
2153        while (pfn != BM_END_OF_MAP) {
2154                memory_bm_clear_current(free_pages_map);
2155                pfn = memory_bm_next_pfn(free_pages_map);
2156        }
2157
2158        /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2159        duplicate_memory_bitmap(free_pages_map, bm);
2160
2161        allocated_unsafe_pages = 0;
2162}
2163
2164static int check_header(struct swsusp_info *info)
2165{
2166        char *reason;
2167
2168        reason = check_image_kernel(info);
2169        if (!reason && info->num_physpages != get_num_physpages())
2170                reason = "memory size";
2171        if (reason) {
2172                printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
2173                return -EPERM;
2174        }
2175        return 0;
2176}
2177
2178/**
2179 * load header - Check the image header and copy the data from it.
2180 */
2181static int load_header(struct swsusp_info *info)
2182{
2183        int error;
2184
2185        restore_pblist = NULL;
2186        error = check_header(info);
2187        if (!error) {
2188                nr_copy_pages = info->image_pages;
2189                nr_meta_pages = info->pages - info->image_pages - 1;
2190        }
2191        return error;
2192}
2193
2194/**
2195 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2196 * @bm: Memory bitmap.
2197 * @buf: Area of memory containing the PFNs.
2198 *
2199 * For each element of the array pointed to by @buf (1 page at a time), set the
2200 * corresponding bit in @bm.
2201 */
2202static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2203{
2204        int j;
2205
2206        for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2207                if (unlikely(buf[j] == BM_END_OF_MAP))
2208                        break;
2209
2210                /* Extract and buffer page key for data page (s390 only). */
2211                page_key_memorize(buf + j);
2212
2213                if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2214                        memory_bm_set_bit(bm, buf[j]);
2215                else
2216                        return -EFAULT;
2217        }
2218
2219        return 0;
2220}
2221
2222#ifdef CONFIG_HIGHMEM
2223/*
2224 * struct highmem_pbe is used for creating the list of highmem pages that
2225 * should be restored atomically during the resume from disk, because the page
2226 * frames they have occupied before the suspend are in use.
2227 */
2228struct highmem_pbe {
2229        struct page *copy_page; /* data is here now */
2230        struct page *orig_page; /* data was here before the suspend */
2231        struct highmem_pbe *next;
2232};
2233
2234/*
2235 * List of highmem PBEs needed for restoring the highmem pages that were
2236 * allocated before the suspend and included in the suspend image, but have
2237 * also been allocated by the "resume" kernel, so their contents cannot be
2238 * written directly to their "original" page frames.
2239 */
2240static struct highmem_pbe *highmem_pblist;
2241
2242/**
2243 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2244 * @bm: Memory bitmap.
2245 *
2246 * The bits in @bm that correspond to image pages are assumed to be set.
2247 */
2248static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2249{
2250        unsigned long pfn;
2251        unsigned int cnt = 0;
2252
2253        memory_bm_position_reset(bm);
2254        pfn = memory_bm_next_pfn(bm);
2255        while (pfn != BM_END_OF_MAP) {
2256                if (PageHighMem(pfn_to_page(pfn)))
2257                        cnt++;
2258
2259                pfn = memory_bm_next_pfn(bm);
2260        }
2261        return cnt;
2262}
2263
2264static unsigned int safe_highmem_pages;
2265
2266static struct memory_bitmap *safe_highmem_bm;
2267
2268/**
2269 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2270 * @bm: Pointer to an uninitialized memory bitmap structure.
2271 * @nr_highmem_p: Pointer to the number of highmem image pages.
2272 *
2273 * Try to allocate as many highmem pages as there are highmem image pages
2274 * (@nr_highmem_p points to the variable containing the number of highmem image
2275 * pages).  The pages that are "safe" (ie. will not be overwritten when the
2276 * hibernation image is restored entirely) have the corresponding bits set in
2277 * @bm (it must be unitialized).
2278 *
2279 * NOTE: This function should not be called if there are no highmem image pages.
2280 */
2281static int prepare_highmem_image(struct memory_bitmap *bm,
2282                                 unsigned int *nr_highmem_p)
2283{
2284        unsigned int to_alloc;
2285
2286        if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2287                return -ENOMEM;
2288
2289        if (get_highmem_buffer(PG_SAFE))
2290                return -ENOMEM;
2291
2292        to_alloc = count_free_highmem_pages();
2293        if (to_alloc > *nr_highmem_p)
2294                to_alloc = *nr_highmem_p;
2295        else
2296                *nr_highmem_p = to_alloc;
2297
2298        safe_highmem_pages = 0;
2299        while (to_alloc-- > 0) {
2300                struct page *page;
2301
2302                page = alloc_page(__GFP_HIGHMEM);
2303                if (!swsusp_page_is_free(page)) {
2304                        /* The page is "safe", set its bit the bitmap */
2305                        memory_bm_set_bit(bm, page_to_pfn(page));
2306                        safe_highmem_pages++;
2307                }
2308                /* Mark the page as allocated */
2309                swsusp_set_page_forbidden(page);
2310                swsusp_set_page_free(page);
2311        }
2312        memory_bm_position_reset(bm);
2313        safe_highmem_bm = bm;
2314        return 0;
2315}
2316
2317static struct page *last_highmem_page;
2318
2319/**
2320 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2321 *
2322 * For a given highmem image page get a buffer that suspend_write_next() should
2323 * return to its caller to write to.
2324 *
2325 * If the page is to be saved to its "original" page frame or a copy of
2326 * the page is to be made in the highmem, @buffer is returned.  Otherwise,
2327 * the copy of the page is to be made in normal memory, so the address of
2328 * the copy is returned.
2329 *
2330 * If @buffer is returned, the caller of suspend_write_next() will write
2331 * the page's contents to @buffer, so they will have to be copied to the
2332 * right location on the next call to suspend_write_next() and it is done
2333 * with the help of copy_last_highmem_page().  For this purpose, if
2334 * @buffer is returned, @last_highmem_page is set to the page to which
2335 * the data will have to be copied from @buffer.
2336 */
2337static void *get_highmem_page_buffer(struct page *page,
2338                                     struct chain_allocator *ca)
2339{
2340        struct highmem_pbe *pbe;
2341        void *kaddr;
2342
2343        if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2344                /*
2345                 * We have allocated the "original" page frame and we can
2346                 * use it directly to store the loaded page.
2347                 */
2348                last_highmem_page = page;
2349                return buffer;
2350        }
2351        /*
2352         * The "original" page frame has not been allocated and we have to
2353         * use a "safe" page frame to store the loaded page.
2354         */
2355        pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2356        if (!pbe) {
2357                swsusp_free();
2358                return ERR_PTR(-ENOMEM);
2359        }
2360        pbe->orig_page = page;
2361        if (safe_highmem_pages > 0) {
2362                struct page *tmp;
2363
2364                /* Copy of the page will be stored in high memory */
2365                kaddr = buffer;
2366                tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2367                safe_highmem_pages--;
2368                last_highmem_page = tmp;
2369                pbe->copy_page = tmp;
2370        } else {
2371                /* Copy of the page will be stored in normal memory */
2372                kaddr = safe_pages_list;
2373                safe_pages_list = safe_pages_list->next;
2374                pbe->copy_page = virt_to_page(kaddr);
2375        }
2376        pbe->next = highmem_pblist;
2377        highmem_pblist = pbe;
2378        return kaddr;
2379}
2380
2381/**
2382 * copy_last_highmem_page - Copy most the most recent highmem image page.
2383 *
2384 * Copy the contents of a highmem image from @buffer, where the caller of
2385 * snapshot_write_next() has stored them, to the right location represented by
2386 * @last_highmem_page .
2387 */
2388static void copy_last_highmem_page(void)
2389{
2390        if (last_highmem_page) {
2391                void *dst;
2392
2393                dst = kmap_atomic(last_highmem_page);
2394                copy_page(dst, buffer);
2395                kunmap_atomic(dst);
2396                last_highmem_page = NULL;
2397        }
2398}
2399
2400static inline int last_highmem_page_copied(void)
2401{
2402        return !last_highmem_page;
2403}
2404
2405static inline void free_highmem_data(void)
2406{
2407        if (safe_highmem_bm)
2408                memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2409
2410        if (buffer)
2411                free_image_page(buffer, PG_UNSAFE_CLEAR);
2412}
2413#else
2414static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2415
2416static inline int prepare_highmem_image(struct memory_bitmap *bm,
2417                                        unsigned int *nr_highmem_p) { return 0; }
2418
2419static inline void *get_highmem_page_buffer(struct page *page,
2420                                            struct chain_allocator *ca)
2421{
2422        return ERR_PTR(-EINVAL);
2423}
2424
2425static inline void copy_last_highmem_page(void) {}
2426static inline int last_highmem_page_copied(void) { return 1; }
2427static inline void free_highmem_data(void) {}
2428#endif /* CONFIG_HIGHMEM */
2429
2430#define PBES_PER_LINKED_PAGE    (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2431
2432/**
2433 * prepare_image - Make room for loading hibernation image.
2434 * @new_bm: Unitialized memory bitmap structure.
2435 * @bm: Memory bitmap with unsafe pages marked.
2436 *
2437 * Use @bm to mark the pages that will be overwritten in the process of
2438 * restoring the system memory state from the suspend image ("unsafe" pages)
2439 * and allocate memory for the image.
2440 *
2441 * The idea is to allocate a new memory bitmap first and then allocate
2442 * as many pages as needed for image data, but without specifying what those
2443 * pages will be used for just yet.  Instead, we mark them all as allocated and
2444 * create a lists of "safe" pages to be used later.  On systems with high
2445 * memory a list of "safe" highmem pages is created too.
2446 */
2447static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2448{
2449        unsigned int nr_pages, nr_highmem;
2450        struct linked_page *lp;
2451        int error;
2452
2453        /* If there is no highmem, the buffer will not be necessary */
2454        free_image_page(buffer, PG_UNSAFE_CLEAR);
2455        buffer = NULL;
2456
2457        nr_highmem = count_highmem_image_pages(bm);
2458        mark_unsafe_pages(bm);
2459
2460        error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2461        if (error)
2462                goto Free;
2463
2464        duplicate_memory_bitmap(new_bm, bm);
2465        memory_bm_free(bm, PG_UNSAFE_KEEP);
2466        if (nr_highmem > 0) {
2467                error = prepare_highmem_image(bm, &nr_highmem);
2468                if (error)
2469                        goto Free;
2470        }
2471        /*
2472         * Reserve some safe pages for potential later use.
2473         *
2474         * NOTE: This way we make sure there will be enough safe pages for the
2475         * chain_alloc() in get_buffer().  It is a bit wasteful, but
2476         * nr_copy_pages cannot be greater than 50% of the memory anyway.
2477         *
2478         * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2479         */
2480        nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2481        nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2482        while (nr_pages > 0) {
2483                lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2484                if (!lp) {
2485                        error = -ENOMEM;
2486                        goto Free;
2487                }
2488                lp->next = safe_pages_list;
2489                safe_pages_list = lp;
2490                nr_pages--;
2491        }
2492        /* Preallocate memory for the image */
2493        nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2494        while (nr_pages > 0) {
2495                lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2496                if (!lp) {
2497                        error = -ENOMEM;
2498                        goto Free;
2499                }
2500                if (!swsusp_page_is_free(virt_to_page(lp))) {
2501                        /* The page is "safe", add it to the list */
2502                        lp->next = safe_pages_list;
2503                        safe_pages_list = lp;
2504                }
2505                /* Mark the page as allocated */
2506                swsusp_set_page_forbidden(virt_to_page(lp));
2507                swsusp_set_page_free(virt_to_page(lp));
2508                nr_pages--;
2509        }
2510        return 0;
2511
2512 Free:
2513        swsusp_free();
2514        return error;
2515}
2516
2517/**
2518 * get_buffer - Get the address to store the next image data page.
2519 *
2520 * Get the address that snapshot_write_next() should return to its caller to
2521 * write to.
2522 */
2523static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2524{
2525        struct pbe *pbe;
2526        struct page *page;
2527        unsigned long pfn = memory_bm_next_pfn(bm);
2528
2529        if (pfn == BM_END_OF_MAP)
2530                return ERR_PTR(-EFAULT);
2531
2532        page = pfn_to_page(pfn);
2533        if (PageHighMem(page))
2534                return get_highmem_page_buffer(page, ca);
2535
2536        if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2537                /*
2538                 * We have allocated the "original" page frame and we can
2539                 * use it directly to store the loaded page.
2540                 */
2541                return page_address(page);
2542
2543        /*
2544         * The "original" page frame has not been allocated and we have to
2545         * use a "safe" page frame to store the loaded page.
2546         */
2547        pbe = chain_alloc(ca, sizeof(struct pbe));
2548        if (!pbe) {
2549                swsusp_free();
2550                return ERR_PTR(-ENOMEM);
2551        }
2552        pbe->orig_address = page_address(page);
2553        pbe->address = safe_pages_list;
2554        safe_pages_list = safe_pages_list->next;
2555        pbe->next = restore_pblist;
2556        restore_pblist = pbe;
2557        return pbe->address;
2558}
2559
2560/**
2561 * snapshot_write_next - Get the address to store the next image page.
2562 * @handle: Snapshot handle structure to guide the writing.
2563 *
2564 * On the first call, @handle should point to a zeroed snapshot_handle
2565 * structure.  The structure gets populated then and a pointer to it should be
2566 * passed to this function every next time.
2567 *
2568 * On success, the function returns a positive number.  Then, the caller
2569 * is allowed to write up to the returned number of bytes to the memory
2570 * location computed by the data_of() macro.
2571 *
2572 * The function returns 0 to indicate the "end of file" condition.  Negative
2573 * numbers are returned on errors, in which cases the structure pointed to by
2574 * @handle is not updated and should not be used any more.
2575 */
2576int snapshot_write_next(struct snapshot_handle *handle)
2577{
2578        static struct chain_allocator ca;
2579        int error = 0;
2580
2581        /* Check if we have already loaded the entire image */
2582        if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2583                return 0;
2584
2585        handle->sync_read = 1;
2586
2587        if (!handle->cur) {
2588                if (!buffer)
2589                        /* This makes the buffer be freed by swsusp_free() */
2590                        buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2591
2592                if (!buffer)
2593                        return -ENOMEM;
2594
2595                handle->buffer = buffer;
2596        } else if (handle->cur == 1) {
2597                error = load_header(buffer);
2598                if (error)
2599                        return error;
2600
2601                safe_pages_list = NULL;
2602
2603                error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2604                if (error)
2605                        return error;
2606
2607                /* Allocate buffer for page keys. */
2608                error = page_key_alloc(nr_copy_pages);
2609                if (error)
2610                        return error;
2611
2612                hibernate_restore_protection_begin();
2613        } else if (handle->cur <= nr_meta_pages + 1) {
2614                error = unpack_orig_pfns(buffer, &copy_bm);
2615                if (error)
2616                        return error;
2617
2618                if (handle->cur == nr_meta_pages + 1) {
2619                        error = prepare_image(&orig_bm, &copy_bm);
2620                        if (error)
2621                                return error;
2622
2623                        chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2624                        memory_bm_position_reset(&orig_bm);
2625                        restore_pblist = NULL;
2626                        handle->buffer = get_buffer(&orig_bm, &ca);
2627                        handle->sync_read = 0;
2628                        if (IS_ERR(handle->buffer))
2629                                return PTR_ERR(handle->buffer);
2630                }
2631        } else {
2632                copy_last_highmem_page();
2633                /* Restore page key for data page (s390 only). */
2634                page_key_write(handle->buffer);
2635                hibernate_restore_protect_page(handle->buffer);
2636                handle->buffer = get_buffer(&orig_bm, &ca);
2637                if (IS_ERR(handle->buffer))
2638                        return PTR_ERR(handle->buffer);
2639                if (handle->buffer != buffer)
2640                        handle->sync_read = 0;
2641        }
2642        handle->cur++;
2643        return PAGE_SIZE;
2644}
2645
2646/**
2647 * snapshot_write_finalize - Complete the loading of a hibernation image.
2648 *
2649 * Must be called after the last call to snapshot_write_next() in case the last
2650 * page in the image happens to be a highmem page and its contents should be
2651 * stored in highmem.  Additionally, it recycles bitmap memory that's not
2652 * necessary any more.
2653 */
2654void snapshot_write_finalize(struct snapshot_handle *handle)
2655{
2656        copy_last_highmem_page();
2657        /* Restore page key for data page (s390 only). */
2658        page_key_write(handle->buffer);
2659        page_key_free();
2660        hibernate_restore_protect_page(handle->buffer);
2661        /* Do that only if we have loaded the image entirely */
2662        if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2663                memory_bm_recycle(&orig_bm);
2664                free_highmem_data();
2665        }
2666}
2667
2668int snapshot_image_loaded(struct snapshot_handle *handle)
2669{
2670        return !(!nr_copy_pages || !last_highmem_page_copied() ||
2671                        handle->cur <= nr_meta_pages + nr_copy_pages);
2672}
2673
2674#ifdef CONFIG_HIGHMEM
2675/* Assumes that @buf is ready and points to a "safe" page */
2676static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2677                                       void *buf)
2678{
2679        void *kaddr1, *kaddr2;
2680
2681        kaddr1 = kmap_atomic(p1);
2682        kaddr2 = kmap_atomic(p2);
2683        copy_page(buf, kaddr1);
2684        copy_page(kaddr1, kaddr2);
2685        copy_page(kaddr2, buf);
2686        kunmap_atomic(kaddr2);
2687        kunmap_atomic(kaddr1);
2688}
2689
2690/**
2691 * restore_highmem - Put highmem image pages into their original locations.
2692 *
2693 * For each highmem page that was in use before hibernation and is included in
2694 * the image, and also has been allocated by the "restore" kernel, swap its
2695 * current contents with the previous (ie. "before hibernation") ones.
2696 *
2697 * If the restore eventually fails, we can call this function once again and
2698 * restore the highmem state as seen by the restore kernel.
2699 */
2700int restore_highmem(void)
2701{
2702        struct highmem_pbe *pbe = highmem_pblist;
2703        void *buf;
2704
2705        if (!pbe)
2706                return 0;
2707
2708        buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2709        if (!buf)
2710                return -ENOMEM;
2711
2712        while (pbe) {
2713                swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2714                pbe = pbe->next;
2715        }
2716        free_image_page(buf, PG_UNSAFE_CLEAR);
2717        return 0;
2718}
2719#endif /* CONFIG_HIGHMEM */
2720