1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/version.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/suspend.h>
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/spinlock.h>
20#include <linux/kernel.h>
21#include <linux/pm.h>
22#include <linux/device.h>
23#include <linux/init.h>
24#include <linux/bootmem.h>
25#include <linux/syscalls.h>
26#include <linux/console.h>
27#include <linux/highmem.h>
28#include <linux/list.h>
29#include <linux/slab.h>
30#include <linux/compiler.h>
31#include <linux/ktime.h>
32
33#include <linux/uaccess.h>
34#include <asm/mmu_context.h>
35#include <asm/pgtable.h>
36#include <asm/tlbflush.h>
37#include <asm/io.h>
38
39#include "power.h"
40
41#ifdef CONFIG_DEBUG_RODATA
42static bool hibernate_restore_protection;
43static bool hibernate_restore_protection_active;
44
45void enable_restore_image_protection(void)
46{
47 hibernate_restore_protection = true;
48}
49
50static inline void hibernate_restore_protection_begin(void)
51{
52 hibernate_restore_protection_active = hibernate_restore_protection;
53}
54
55static inline void hibernate_restore_protection_end(void)
56{
57 hibernate_restore_protection_active = false;
58}
59
60static inline void hibernate_restore_protect_page(void *page_address)
61{
62 if (hibernate_restore_protection_active)
63 set_memory_ro((unsigned long)page_address, 1);
64}
65
66static inline void hibernate_restore_unprotect_page(void *page_address)
67{
68 if (hibernate_restore_protection_active)
69 set_memory_rw((unsigned long)page_address, 1);
70}
71#else
72static inline void hibernate_restore_protection_begin(void) {}
73static inline void hibernate_restore_protection_end(void) {}
74static inline void hibernate_restore_protect_page(void *page_address) {}
75static inline void hibernate_restore_unprotect_page(void *page_address) {}
76#endif
77
78static int swsusp_page_is_free(struct page *);
79static void swsusp_set_page_forbidden(struct page *);
80static void swsusp_unset_page_forbidden(struct page *);
81
82
83
84
85
86
87unsigned long reserved_size;
88
89void __init hibernate_reserved_size_init(void)
90{
91 reserved_size = SPARE_PAGES * PAGE_SIZE;
92}
93
94
95
96
97
98
99
100unsigned long image_size;
101
102void __init hibernate_image_size_init(void)
103{
104 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
105}
106
107
108
109
110
111
112
113struct pbe *restore_pblist;
114
115
116
117#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
118
119struct linked_page {
120 struct linked_page *next;
121 char data[LINKED_PAGE_DATA_SIZE];
122} __packed;
123
124
125
126
127
128
129static struct linked_page *safe_pages_list;
130
131
132static void *buffer;
133
134#define PG_ANY 0
135#define PG_SAFE 1
136#define PG_UNSAFE_CLEAR 1
137#define PG_UNSAFE_KEEP 0
138
139static unsigned int allocated_unsafe_pages;
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154static void *get_image_page(gfp_t gfp_mask, int safe_needed)
155{
156 void *res;
157
158 res = (void *)get_zeroed_page(gfp_mask);
159 if (safe_needed)
160 while (res && swsusp_page_is_free(virt_to_page(res))) {
161
162 swsusp_set_page_forbidden(virt_to_page(res));
163 allocated_unsafe_pages++;
164 res = (void *)get_zeroed_page(gfp_mask);
165 }
166 if (res) {
167 swsusp_set_page_forbidden(virt_to_page(res));
168 swsusp_set_page_free(virt_to_page(res));
169 }
170 return res;
171}
172
173static void *__get_safe_page(gfp_t gfp_mask)
174{
175 if (safe_pages_list) {
176 void *ret = safe_pages_list;
177
178 safe_pages_list = safe_pages_list->next;
179 memset(ret, 0, PAGE_SIZE);
180 return ret;
181 }
182 return get_image_page(gfp_mask, PG_SAFE);
183}
184
185unsigned long get_safe_page(gfp_t gfp_mask)
186{
187 return (unsigned long)__get_safe_page(gfp_mask);
188}
189
190static struct page *alloc_image_page(gfp_t gfp_mask)
191{
192 struct page *page;
193
194 page = alloc_page(gfp_mask);
195 if (page) {
196 swsusp_set_page_forbidden(page);
197 swsusp_set_page_free(page);
198 }
199 return page;
200}
201
202static void recycle_safe_page(void *page_address)
203{
204 struct linked_page *lp = page_address;
205
206 lp->next = safe_pages_list;
207 safe_pages_list = lp;
208}
209
210
211
212
213
214
215
216
217
218static inline void free_image_page(void *addr, int clear_nosave_free)
219{
220 struct page *page;
221
222 BUG_ON(!virt_addr_valid(addr));
223
224 page = virt_to_page(addr);
225
226 swsusp_unset_page_forbidden(page);
227 if (clear_nosave_free)
228 swsusp_unset_page_free(page);
229
230 __free_page(page);
231}
232
233static inline void free_list_of_pages(struct linked_page *list,
234 int clear_page_nosave)
235{
236 while (list) {
237 struct linked_page *lp = list->next;
238
239 free_image_page(list, clear_page_nosave);
240 list = lp;
241 }
242}
243
244
245
246
247
248
249
250
251
252
253
254
255
256struct chain_allocator {
257 struct linked_page *chain;
258 unsigned int used_space;
259
260 gfp_t gfp_mask;
261 int safe_needed;
262};
263
264static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
265 int safe_needed)
266{
267 ca->chain = NULL;
268 ca->used_space = LINKED_PAGE_DATA_SIZE;
269 ca->gfp_mask = gfp_mask;
270 ca->safe_needed = safe_needed;
271}
272
273static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
274{
275 void *ret;
276
277 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
278 struct linked_page *lp;
279
280 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
281 get_image_page(ca->gfp_mask, PG_ANY);
282 if (!lp)
283 return NULL;
284
285 lp->next = ca->chain;
286 ca->chain = lp;
287 ca->used_space = 0;
288 }
289 ret = ca->chain->data + ca->used_space;
290 ca->used_space += size;
291 return ret;
292}
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335#define BM_END_OF_MAP (~0UL)
336
337#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
338#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
339#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
340
341
342
343
344
345
346struct rtree_node {
347 struct list_head list;
348 unsigned long *data;
349};
350
351
352
353
354
355struct mem_zone_bm_rtree {
356 struct list_head list;
357 struct list_head nodes;
358 struct list_head leaves;
359 unsigned long start_pfn;
360 unsigned long end_pfn;
361 struct rtree_node *rtree;
362 int levels;
363 unsigned int blocks;
364};
365
366
367
368struct bm_position {
369 struct mem_zone_bm_rtree *zone;
370 struct rtree_node *node;
371 unsigned long node_pfn;
372 int node_bit;
373};
374
375struct memory_bitmap {
376 struct list_head zones;
377 struct linked_page *p_list;
378
379
380 struct bm_position cur;
381};
382
383
384
385#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
386#if BITS_PER_LONG == 32
387#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
388#else
389#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
390#endif
391#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
392
393
394
395
396
397
398
399
400static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
401 struct chain_allocator *ca,
402 struct list_head *list)
403{
404 struct rtree_node *node;
405
406 node = chain_alloc(ca, sizeof(struct rtree_node));
407 if (!node)
408 return NULL;
409
410 node->data = get_image_page(gfp_mask, safe_needed);
411 if (!node->data)
412 return NULL;
413
414 list_add_tail(&node->list, list);
415
416 return node;
417}
418
419
420
421
422
423
424
425
426static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
427 int safe_needed, struct chain_allocator *ca)
428{
429 struct rtree_node *node, *block, **dst;
430 unsigned int levels_needed, block_nr;
431 int i;
432
433 block_nr = zone->blocks;
434 levels_needed = 0;
435
436
437 while (block_nr) {
438 levels_needed += 1;
439 block_nr >>= BM_RTREE_LEVEL_SHIFT;
440 }
441
442
443 for (i = zone->levels; i < levels_needed; i++) {
444 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
445 &zone->nodes);
446 if (!node)
447 return -ENOMEM;
448
449 node->data[0] = (unsigned long)zone->rtree;
450 zone->rtree = node;
451 zone->levels += 1;
452 }
453
454
455 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
456 if (!block)
457 return -ENOMEM;
458
459
460 node = zone->rtree;
461 dst = &zone->rtree;
462 block_nr = zone->blocks;
463 for (i = zone->levels; i > 0; i--) {
464 int index;
465
466 if (!node) {
467 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
468 &zone->nodes);
469 if (!node)
470 return -ENOMEM;
471 *dst = node;
472 }
473
474 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
475 index &= BM_RTREE_LEVEL_MASK;
476 dst = (struct rtree_node **)&((*dst)->data[index]);
477 node = *dst;
478 }
479
480 zone->blocks += 1;
481 *dst = block;
482
483 return 0;
484}
485
486static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
487 int clear_nosave_free);
488
489
490
491
492
493
494
495
496static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
497 int safe_needed,
498 struct chain_allocator *ca,
499 unsigned long start,
500 unsigned long end)
501{
502 struct mem_zone_bm_rtree *zone;
503 unsigned int i, nr_blocks;
504 unsigned long pages;
505
506 pages = end - start;
507 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
508 if (!zone)
509 return NULL;
510
511 INIT_LIST_HEAD(&zone->nodes);
512 INIT_LIST_HEAD(&zone->leaves);
513 zone->start_pfn = start;
514 zone->end_pfn = end;
515 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
516
517 for (i = 0; i < nr_blocks; i++) {
518 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
519 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
520 return NULL;
521 }
522 }
523
524 return zone;
525}
526
527
528
529
530
531
532
533
534static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
535 int clear_nosave_free)
536{
537 struct rtree_node *node;
538
539 list_for_each_entry(node, &zone->nodes, list)
540 free_image_page(node->data, clear_nosave_free);
541
542 list_for_each_entry(node, &zone->leaves, list)
543 free_image_page(node->data, clear_nosave_free);
544}
545
546static void memory_bm_position_reset(struct memory_bitmap *bm)
547{
548 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
549 list);
550 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
551 struct rtree_node, list);
552 bm->cur.node_pfn = 0;
553 bm->cur.node_bit = 0;
554}
555
556static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
557
558struct mem_extent {
559 struct list_head hook;
560 unsigned long start;
561 unsigned long end;
562};
563
564
565
566
567
568static void free_mem_extents(struct list_head *list)
569{
570 struct mem_extent *ext, *aux;
571
572 list_for_each_entry_safe(ext, aux, list, hook) {
573 list_del(&ext->hook);
574 kfree(ext);
575 }
576}
577
578
579
580
581
582
583
584
585static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
586{
587 struct zone *zone;
588
589 INIT_LIST_HEAD(list);
590
591 for_each_populated_zone(zone) {
592 unsigned long zone_start, zone_end;
593 struct mem_extent *ext, *cur, *aux;
594
595 zone_start = zone->zone_start_pfn;
596 zone_end = zone_end_pfn(zone);
597
598 list_for_each_entry(ext, list, hook)
599 if (zone_start <= ext->end)
600 break;
601
602 if (&ext->hook == list || zone_end < ext->start) {
603
604 struct mem_extent *new_ext;
605
606 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
607 if (!new_ext) {
608 free_mem_extents(list);
609 return -ENOMEM;
610 }
611 new_ext->start = zone_start;
612 new_ext->end = zone_end;
613 list_add_tail(&new_ext->hook, &ext->hook);
614 continue;
615 }
616
617
618 if (zone_start < ext->start)
619 ext->start = zone_start;
620 if (zone_end > ext->end)
621 ext->end = zone_end;
622
623
624 cur = ext;
625 list_for_each_entry_safe_continue(cur, aux, list, hook) {
626 if (zone_end < cur->start)
627 break;
628 if (zone_end < cur->end)
629 ext->end = cur->end;
630 list_del(&cur->hook);
631 kfree(cur);
632 }
633 }
634
635 return 0;
636}
637
638
639
640
641static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
642 int safe_needed)
643{
644 struct chain_allocator ca;
645 struct list_head mem_extents;
646 struct mem_extent *ext;
647 int error;
648
649 chain_init(&ca, gfp_mask, safe_needed);
650 INIT_LIST_HEAD(&bm->zones);
651
652 error = create_mem_extents(&mem_extents, gfp_mask);
653 if (error)
654 return error;
655
656 list_for_each_entry(ext, &mem_extents, hook) {
657 struct mem_zone_bm_rtree *zone;
658
659 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
660 ext->start, ext->end);
661 if (!zone) {
662 error = -ENOMEM;
663 goto Error;
664 }
665 list_add_tail(&zone->list, &bm->zones);
666 }
667
668 bm->p_list = ca.chain;
669 memory_bm_position_reset(bm);
670 Exit:
671 free_mem_extents(&mem_extents);
672 return error;
673
674 Error:
675 bm->p_list = ca.chain;
676 memory_bm_free(bm, PG_UNSAFE_CLEAR);
677 goto Exit;
678}
679
680
681
682
683
684static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
685{
686 struct mem_zone_bm_rtree *zone;
687
688 list_for_each_entry(zone, &bm->zones, list)
689 free_zone_bm_rtree(zone, clear_nosave_free);
690
691 free_list_of_pages(bm->p_list, clear_nosave_free);
692
693 INIT_LIST_HEAD(&bm->zones);
694}
695
696
697
698
699
700
701
702
703
704
705static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
706 void **addr, unsigned int *bit_nr)
707{
708 struct mem_zone_bm_rtree *curr, *zone;
709 struct rtree_node *node;
710 int i, block_nr;
711
712 zone = bm->cur.zone;
713
714 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
715 goto zone_found;
716
717 zone = NULL;
718
719
720 list_for_each_entry(curr, &bm->zones, list) {
721 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
722 zone = curr;
723 break;
724 }
725 }
726
727 if (!zone)
728 return -EFAULT;
729
730zone_found:
731
732
733
734
735 node = bm->cur.node;
736 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
737 goto node_found;
738
739 node = zone->rtree;
740 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
741
742 for (i = zone->levels; i > 0; i--) {
743 int index;
744
745 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
746 index &= BM_RTREE_LEVEL_MASK;
747 BUG_ON(node->data[index] == 0);
748 node = (struct rtree_node *)node->data[index];
749 }
750
751node_found:
752
753 bm->cur.zone = zone;
754 bm->cur.node = node;
755 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
756
757
758 *addr = node->data;
759 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
760
761 return 0;
762}
763
764static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
765{
766 void *addr;
767 unsigned int bit;
768 int error;
769
770 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
771 BUG_ON(error);
772 set_bit(bit, addr);
773}
774
775static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
776{
777 void *addr;
778 unsigned int bit;
779 int error;
780
781 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
782 if (!error)
783 set_bit(bit, addr);
784
785 return error;
786}
787
788static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
789{
790 void *addr;
791 unsigned int bit;
792 int error;
793
794 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
795 BUG_ON(error);
796 clear_bit(bit, addr);
797}
798
799static void memory_bm_clear_current(struct memory_bitmap *bm)
800{
801 int bit;
802
803 bit = max(bm->cur.node_bit - 1, 0);
804 clear_bit(bit, bm->cur.node->data);
805}
806
807static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
808{
809 void *addr;
810 unsigned int bit;
811 int error;
812
813 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
814 BUG_ON(error);
815 return test_bit(bit, addr);
816}
817
818static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
819{
820 void *addr;
821 unsigned int bit;
822
823 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
824}
825
826
827
828
829
830
831
832
833
834
835
836static bool rtree_next_node(struct memory_bitmap *bm)
837{
838 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
839 bm->cur.node = list_entry(bm->cur.node->list.next,
840 struct rtree_node, list);
841 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
842 bm->cur.node_bit = 0;
843 touch_softlockup_watchdog();
844 return true;
845 }
846
847
848 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
849 bm->cur.zone = list_entry(bm->cur.zone->list.next,
850 struct mem_zone_bm_rtree, list);
851 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
852 struct rtree_node, list);
853 bm->cur.node_pfn = 0;
854 bm->cur.node_bit = 0;
855 return true;
856 }
857
858
859 return false;
860}
861
862
863
864
865
866
867
868
869
870
871
872
873static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
874{
875 unsigned long bits, pfn, pages;
876 int bit;
877
878 do {
879 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
880 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
881 bit = find_next_bit(bm->cur.node->data, bits,
882 bm->cur.node_bit);
883 if (bit < bits) {
884 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
885 bm->cur.node_bit = bit + 1;
886 return pfn;
887 }
888 } while (rtree_next_node(bm));
889
890 return BM_END_OF_MAP;
891}
892
893
894
895
896
897struct nosave_region {
898 struct list_head list;
899 unsigned long start_pfn;
900 unsigned long end_pfn;
901};
902
903static LIST_HEAD(nosave_regions);
904
905static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
906{
907 struct rtree_node *node;
908
909 list_for_each_entry(node, &zone->nodes, list)
910 recycle_safe_page(node->data);
911
912 list_for_each_entry(node, &zone->leaves, list)
913 recycle_safe_page(node->data);
914}
915
916static void memory_bm_recycle(struct memory_bitmap *bm)
917{
918 struct mem_zone_bm_rtree *zone;
919 struct linked_page *p_list;
920
921 list_for_each_entry(zone, &bm->zones, list)
922 recycle_zone_bm_rtree(zone);
923
924 p_list = bm->p_list;
925 while (p_list) {
926 struct linked_page *lp = p_list;
927
928 p_list = lp->next;
929 recycle_safe_page(lp);
930 }
931}
932
933
934
935
936
937
938
939void __init __register_nosave_region(unsigned long start_pfn,
940 unsigned long end_pfn, int use_kmalloc)
941{
942 struct nosave_region *region;
943
944 if (start_pfn >= end_pfn)
945 return;
946
947 if (!list_empty(&nosave_regions)) {
948
949 region = list_entry(nosave_regions.prev,
950 struct nosave_region, list);
951 if (region->end_pfn == start_pfn) {
952 region->end_pfn = end_pfn;
953 goto Report;
954 }
955 }
956 if (use_kmalloc) {
957
958 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
959 BUG_ON(!region);
960 } else {
961
962 region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
963 }
964 region->start_pfn = start_pfn;
965 region->end_pfn = end_pfn;
966 list_add_tail(®ion->list, &nosave_regions);
967 Report:
968 printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
969 (unsigned long long) start_pfn << PAGE_SHIFT,
970 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
971}
972
973
974
975
976
977static struct memory_bitmap *forbidden_pages_map;
978
979
980static struct memory_bitmap *free_pages_map;
981
982
983
984
985
986
987void swsusp_set_page_free(struct page *page)
988{
989 if (free_pages_map)
990 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
991}
992
993static int swsusp_page_is_free(struct page *page)
994{
995 return free_pages_map ?
996 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
997}
998
999void swsusp_unset_page_free(struct page *page)
1000{
1001 if (free_pages_map)
1002 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1003}
1004
1005static void swsusp_set_page_forbidden(struct page *page)
1006{
1007 if (forbidden_pages_map)
1008 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1009}
1010
1011int swsusp_page_is_forbidden(struct page *page)
1012{
1013 return forbidden_pages_map ?
1014 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1015}
1016
1017static void swsusp_unset_page_forbidden(struct page *page)
1018{
1019 if (forbidden_pages_map)
1020 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1021}
1022
1023
1024
1025
1026
1027
1028
1029
1030static void mark_nosave_pages(struct memory_bitmap *bm)
1031{
1032 struct nosave_region *region;
1033
1034 if (list_empty(&nosave_regions))
1035 return;
1036
1037 list_for_each_entry(region, &nosave_regions, list) {
1038 unsigned long pfn;
1039
1040 pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
1041 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1042 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1043 - 1);
1044
1045 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1046 if (pfn_valid(pfn)) {
1047
1048
1049
1050
1051
1052
1053 mem_bm_set_bit_check(bm, pfn);
1054 }
1055 }
1056}
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066int create_basic_memory_bitmaps(void)
1067{
1068 struct memory_bitmap *bm1, *bm2;
1069 int error = 0;
1070
1071 if (forbidden_pages_map && free_pages_map)
1072 return 0;
1073 else
1074 BUG_ON(forbidden_pages_map || free_pages_map);
1075
1076 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1077 if (!bm1)
1078 return -ENOMEM;
1079
1080 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1081 if (error)
1082 goto Free_first_object;
1083
1084 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1085 if (!bm2)
1086 goto Free_first_bitmap;
1087
1088 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1089 if (error)
1090 goto Free_second_object;
1091
1092 forbidden_pages_map = bm1;
1093 free_pages_map = bm2;
1094 mark_nosave_pages(forbidden_pages_map);
1095
1096 pr_debug("PM: Basic memory bitmaps created\n");
1097
1098 return 0;
1099
1100 Free_second_object:
1101 kfree(bm2);
1102 Free_first_bitmap:
1103 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1104 Free_first_object:
1105 kfree(bm1);
1106 return -ENOMEM;
1107}
1108
1109
1110
1111
1112
1113
1114
1115
1116void free_basic_memory_bitmaps(void)
1117{
1118 struct memory_bitmap *bm1, *bm2;
1119
1120 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1121 return;
1122
1123 bm1 = forbidden_pages_map;
1124 bm2 = free_pages_map;
1125 forbidden_pages_map = NULL;
1126 free_pages_map = NULL;
1127 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1128 kfree(bm1);
1129 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1130 kfree(bm2);
1131
1132 pr_debug("PM: Basic memory bitmaps freed\n");
1133}
1134
1135void clear_free_pages(void)
1136{
1137#ifdef CONFIG_PAGE_POISONING_ZERO
1138 struct memory_bitmap *bm = free_pages_map;
1139 unsigned long pfn;
1140
1141 if (WARN_ON(!(free_pages_map)))
1142 return;
1143
1144 memory_bm_position_reset(bm);
1145 pfn = memory_bm_next_pfn(bm);
1146 while (pfn != BM_END_OF_MAP) {
1147 if (pfn_valid(pfn))
1148 clear_highpage(pfn_to_page(pfn));
1149
1150 pfn = memory_bm_next_pfn(bm);
1151 }
1152 memory_bm_position_reset(bm);
1153 pr_info("PM: free pages cleared after restore\n");
1154#endif
1155}
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165unsigned int snapshot_additional_pages(struct zone *zone)
1166{
1167 unsigned int rtree, nodes;
1168
1169 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1170 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1171 LINKED_PAGE_DATA_SIZE);
1172 while (nodes > 1) {
1173 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1174 rtree += nodes;
1175 }
1176
1177 return 2 * rtree;
1178}
1179
1180#ifdef CONFIG_HIGHMEM
1181
1182
1183
1184
1185
1186static unsigned int count_free_highmem_pages(void)
1187{
1188 struct zone *zone;
1189 unsigned int cnt = 0;
1190
1191 for_each_populated_zone(zone)
1192 if (is_highmem(zone))
1193 cnt += zone_page_state(zone, NR_FREE_PAGES);
1194
1195 return cnt;
1196}
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1207{
1208 struct page *page;
1209
1210 if (!pfn_valid(pfn))
1211 return NULL;
1212
1213 page = pfn_to_page(pfn);
1214 if (page_zone(page) != zone)
1215 return NULL;
1216
1217 BUG_ON(!PageHighMem(page));
1218
1219 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
1220 PageReserved(page))
1221 return NULL;
1222
1223 if (page_is_guard(page))
1224 return NULL;
1225
1226 return page;
1227}
1228
1229
1230
1231
1232static unsigned int count_highmem_pages(void)
1233{
1234 struct zone *zone;
1235 unsigned int n = 0;
1236
1237 for_each_populated_zone(zone) {
1238 unsigned long pfn, max_zone_pfn;
1239
1240 if (!is_highmem(zone))
1241 continue;
1242
1243 mark_free_pages(zone);
1244 max_zone_pfn = zone_end_pfn(zone);
1245 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1246 if (saveable_highmem_page(zone, pfn))
1247 n++;
1248 }
1249 return n;
1250}
1251#else
1252static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1253{
1254 return NULL;
1255}
1256#endif
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1269{
1270 struct page *page;
1271
1272 if (!pfn_valid(pfn))
1273 return NULL;
1274
1275 page = pfn_to_page(pfn);
1276 if (page_zone(page) != zone)
1277 return NULL;
1278
1279 BUG_ON(PageHighMem(page));
1280
1281 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1282 return NULL;
1283
1284 if (PageReserved(page)
1285 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1286 return NULL;
1287
1288 if (page_is_guard(page))
1289 return NULL;
1290
1291 return page;
1292}
1293
1294
1295
1296
1297static unsigned int count_data_pages(void)
1298{
1299 struct zone *zone;
1300 unsigned long pfn, max_zone_pfn;
1301 unsigned int n = 0;
1302
1303 for_each_populated_zone(zone) {
1304 if (is_highmem(zone))
1305 continue;
1306
1307 mark_free_pages(zone);
1308 max_zone_pfn = zone_end_pfn(zone);
1309 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1310 if (saveable_page(zone, pfn))
1311 n++;
1312 }
1313 return n;
1314}
1315
1316
1317
1318
1319
1320static inline void do_copy_page(long *dst, long *src)
1321{
1322 int n;
1323
1324 for (n = PAGE_SIZE / sizeof(long); n; n--)
1325 *dst++ = *src++;
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335static void safe_copy_page(void *dst, struct page *s_page)
1336{
1337 if (kernel_page_present(s_page)) {
1338 do_copy_page(dst, page_address(s_page));
1339 } else {
1340 kernel_map_pages(s_page, 1, 1);
1341 do_copy_page(dst, page_address(s_page));
1342 kernel_map_pages(s_page, 1, 0);
1343 }
1344}
1345
1346#ifdef CONFIG_HIGHMEM
1347static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1348{
1349 return is_highmem(zone) ?
1350 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1351}
1352
1353static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1354{
1355 struct page *s_page, *d_page;
1356 void *src, *dst;
1357
1358 s_page = pfn_to_page(src_pfn);
1359 d_page = pfn_to_page(dst_pfn);
1360 if (PageHighMem(s_page)) {
1361 src = kmap_atomic(s_page);
1362 dst = kmap_atomic(d_page);
1363 do_copy_page(dst, src);
1364 kunmap_atomic(dst);
1365 kunmap_atomic(src);
1366 } else {
1367 if (PageHighMem(d_page)) {
1368
1369
1370
1371
1372 safe_copy_page(buffer, s_page);
1373 dst = kmap_atomic(d_page);
1374 copy_page(dst, buffer);
1375 kunmap_atomic(dst);
1376 } else {
1377 safe_copy_page(page_address(d_page), s_page);
1378 }
1379 }
1380}
1381#else
1382#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1383
1384static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1385{
1386 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1387 pfn_to_page(src_pfn));
1388}
1389#endif
1390
1391static void copy_data_pages(struct memory_bitmap *copy_bm,
1392 struct memory_bitmap *orig_bm)
1393{
1394 struct zone *zone;
1395 unsigned long pfn;
1396
1397 for_each_populated_zone(zone) {
1398 unsigned long max_zone_pfn;
1399
1400 mark_free_pages(zone);
1401 max_zone_pfn = zone_end_pfn(zone);
1402 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1403 if (page_is_saveable(zone, pfn))
1404 memory_bm_set_bit(orig_bm, pfn);
1405 }
1406 memory_bm_position_reset(orig_bm);
1407 memory_bm_position_reset(copy_bm);
1408 for(;;) {
1409 pfn = memory_bm_next_pfn(orig_bm);
1410 if (unlikely(pfn == BM_END_OF_MAP))
1411 break;
1412 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1413 }
1414}
1415
1416
1417static unsigned int nr_copy_pages;
1418
1419static unsigned int nr_meta_pages;
1420
1421
1422
1423
1424unsigned int alloc_normal, alloc_highmem;
1425
1426
1427
1428
1429static struct memory_bitmap orig_bm;
1430
1431
1432
1433
1434
1435
1436
1437
1438static struct memory_bitmap copy_bm;
1439
1440
1441
1442
1443
1444
1445
1446void swsusp_free(void)
1447{
1448 unsigned long fb_pfn, fr_pfn;
1449
1450 if (!forbidden_pages_map || !free_pages_map)
1451 goto out;
1452
1453 memory_bm_position_reset(forbidden_pages_map);
1454 memory_bm_position_reset(free_pages_map);
1455
1456loop:
1457 fr_pfn = memory_bm_next_pfn(free_pages_map);
1458 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1459
1460
1461
1462
1463
1464 do {
1465 if (fb_pfn < fr_pfn)
1466 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1467 if (fr_pfn < fb_pfn)
1468 fr_pfn = memory_bm_next_pfn(free_pages_map);
1469 } while (fb_pfn != fr_pfn);
1470
1471 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1472 struct page *page = pfn_to_page(fr_pfn);
1473
1474 memory_bm_clear_current(forbidden_pages_map);
1475 memory_bm_clear_current(free_pages_map);
1476 hibernate_restore_unprotect_page(page_address(page));
1477 __free_page(page);
1478 goto loop;
1479 }
1480
1481out:
1482 nr_copy_pages = 0;
1483 nr_meta_pages = 0;
1484 restore_pblist = NULL;
1485 buffer = NULL;
1486 alloc_normal = 0;
1487 alloc_highmem = 0;
1488 hibernate_restore_protection_end();
1489}
1490
1491
1492
1493#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1494
1495
1496
1497
1498
1499
1500
1501
1502static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1503{
1504 unsigned long nr_alloc = 0;
1505
1506 while (nr_pages > 0) {
1507 struct page *page;
1508
1509 page = alloc_image_page(mask);
1510 if (!page)
1511 break;
1512 memory_bm_set_bit(©_bm, page_to_pfn(page));
1513 if (PageHighMem(page))
1514 alloc_highmem++;
1515 else
1516 alloc_normal++;
1517 nr_pages--;
1518 nr_alloc++;
1519 }
1520
1521 return nr_alloc;
1522}
1523
1524static unsigned long preallocate_image_memory(unsigned long nr_pages,
1525 unsigned long avail_normal)
1526{
1527 unsigned long alloc;
1528
1529 if (avail_normal <= alloc_normal)
1530 return 0;
1531
1532 alloc = avail_normal - alloc_normal;
1533 if (nr_pages < alloc)
1534 alloc = nr_pages;
1535
1536 return preallocate_image_pages(alloc, GFP_IMAGE);
1537}
1538
1539#ifdef CONFIG_HIGHMEM
1540static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1541{
1542 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1543}
1544
1545
1546
1547
1548static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1549{
1550 x *= multiplier;
1551 do_div(x, base);
1552 return (unsigned long)x;
1553}
1554
1555static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1556 unsigned long highmem,
1557 unsigned long total)
1558{
1559 unsigned long alloc = __fraction(nr_pages, highmem, total);
1560
1561 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1562}
1563#else
1564static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1565{
1566 return 0;
1567}
1568
1569static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1570 unsigned long highmem,
1571 unsigned long total)
1572{
1573 return 0;
1574}
1575#endif
1576
1577
1578
1579
1580static unsigned long free_unnecessary_pages(void)
1581{
1582 unsigned long save, to_free_normal, to_free_highmem, free;
1583
1584 save = count_data_pages();
1585 if (alloc_normal >= save) {
1586 to_free_normal = alloc_normal - save;
1587 save = 0;
1588 } else {
1589 to_free_normal = 0;
1590 save -= alloc_normal;
1591 }
1592 save += count_highmem_pages();
1593 if (alloc_highmem >= save) {
1594 to_free_highmem = alloc_highmem - save;
1595 } else {
1596 to_free_highmem = 0;
1597 save -= alloc_highmem;
1598 if (to_free_normal > save)
1599 to_free_normal -= save;
1600 else
1601 to_free_normal = 0;
1602 }
1603 free = to_free_normal + to_free_highmem;
1604
1605 memory_bm_position_reset(©_bm);
1606
1607 while (to_free_normal > 0 || to_free_highmem > 0) {
1608 unsigned long pfn = memory_bm_next_pfn(©_bm);
1609 struct page *page = pfn_to_page(pfn);
1610
1611 if (PageHighMem(page)) {
1612 if (!to_free_highmem)
1613 continue;
1614 to_free_highmem--;
1615 alloc_highmem--;
1616 } else {
1617 if (!to_free_normal)
1618 continue;
1619 to_free_normal--;
1620 alloc_normal--;
1621 }
1622 memory_bm_clear_bit(©_bm, pfn);
1623 swsusp_unset_page_forbidden(page);
1624 swsusp_unset_page_free(page);
1625 __free_page(page);
1626 }
1627
1628 return free;
1629}
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647static unsigned long minimum_image_size(unsigned long saveable)
1648{
1649 unsigned long size;
1650
1651 size = global_page_state(NR_SLAB_RECLAIMABLE)
1652 + global_node_page_state(NR_ACTIVE_ANON)
1653 + global_node_page_state(NR_INACTIVE_ANON)
1654 + global_node_page_state(NR_ACTIVE_FILE)
1655 + global_node_page_state(NR_INACTIVE_FILE)
1656 - global_node_page_state(NR_FILE_MAPPED);
1657
1658 return saveable <= size ? 0 : saveable - size;
1659}
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683int hibernate_preallocate_memory(void)
1684{
1685 struct zone *zone;
1686 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1687 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1688 ktime_t start, stop;
1689 int error;
1690
1691 printk(KERN_INFO "PM: Preallocating image memory... ");
1692 start = ktime_get();
1693
1694 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1695 if (error)
1696 goto err_out;
1697
1698 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
1699 if (error)
1700 goto err_out;
1701
1702 alloc_normal = 0;
1703 alloc_highmem = 0;
1704
1705
1706 save_highmem = count_highmem_pages();
1707 saveable = count_data_pages();
1708
1709
1710
1711
1712
1713 count = saveable;
1714 saveable += save_highmem;
1715 highmem = save_highmem;
1716 size = 0;
1717 for_each_populated_zone(zone) {
1718 size += snapshot_additional_pages(zone);
1719 if (is_highmem(zone))
1720 highmem += zone_page_state(zone, NR_FREE_PAGES);
1721 else
1722 count += zone_page_state(zone, NR_FREE_PAGES);
1723 }
1724 avail_normal = count;
1725 count += highmem;
1726 count -= totalreserve_pages;
1727
1728
1729 size += page_key_additional_pages(saveable);
1730
1731
1732 max_size = (count - (size + PAGES_FOR_IO)) / 2
1733 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1734
1735 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1736 if (size > max_size)
1737 size = max_size;
1738
1739
1740
1741
1742
1743 if (size >= saveable) {
1744 pages = preallocate_image_highmem(save_highmem);
1745 pages += preallocate_image_memory(saveable - pages, avail_normal);
1746 goto out;
1747 }
1748
1749
1750 pages = minimum_image_size(saveable);
1751
1752
1753
1754
1755
1756 if (avail_normal > pages)
1757 avail_normal -= pages;
1758 else
1759 avail_normal = 0;
1760 if (size < pages)
1761 size = min_t(unsigned long, pages, max_size);
1762
1763
1764
1765
1766
1767
1768
1769 shrink_all_memory(saveable - size);
1770
1771
1772
1773
1774
1775
1776
1777
1778 pages_highmem = preallocate_image_highmem(highmem / 2);
1779 alloc = count - max_size;
1780 if (alloc > pages_highmem)
1781 alloc -= pages_highmem;
1782 else
1783 alloc = 0;
1784 pages = preallocate_image_memory(alloc, avail_normal);
1785 if (pages < alloc) {
1786
1787 alloc -= pages;
1788 pages += pages_highmem;
1789 pages_highmem = preallocate_image_highmem(alloc);
1790 if (pages_highmem < alloc)
1791 goto err_out;
1792 pages += pages_highmem;
1793
1794
1795
1796
1797 alloc = (count - pages) - size;
1798 pages += preallocate_image_highmem(alloc);
1799 } else {
1800
1801
1802
1803
1804 alloc = max_size - size;
1805 size = preallocate_highmem_fraction(alloc, highmem, count);
1806 pages_highmem += size;
1807 alloc -= size;
1808 size = preallocate_image_memory(alloc, avail_normal);
1809 pages_highmem += preallocate_image_highmem(alloc - size);
1810 pages += pages_highmem + size;
1811 }
1812
1813
1814
1815
1816
1817
1818 pages -= free_unnecessary_pages();
1819
1820 out:
1821 stop = ktime_get();
1822 printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1823 swsusp_show_speed(start, stop, pages, "Allocated");
1824
1825 return 0;
1826
1827 err_out:
1828 printk(KERN_CONT "\n");
1829 swsusp_free();
1830 return -ENOMEM;
1831}
1832
1833#ifdef CONFIG_HIGHMEM
1834
1835
1836
1837
1838
1839
1840static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1841{
1842 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1843
1844 if (free_highmem >= nr_highmem)
1845 nr_highmem = 0;
1846 else
1847 nr_highmem -= free_highmem;
1848
1849 return nr_highmem;
1850}
1851#else
1852static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1853#endif
1854
1855
1856
1857
1858static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1859{
1860 struct zone *zone;
1861 unsigned int free = alloc_normal;
1862
1863 for_each_populated_zone(zone)
1864 if (!is_highmem(zone))
1865 free += zone_page_state(zone, NR_FREE_PAGES);
1866
1867 nr_pages += count_pages_for_highmem(nr_highmem);
1868 pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1869 nr_pages, PAGES_FOR_IO, free);
1870
1871 return free > nr_pages + PAGES_FOR_IO;
1872}
1873
1874#ifdef CONFIG_HIGHMEM
1875
1876
1877
1878
1879
1880
1881static inline int get_highmem_buffer(int safe_needed)
1882{
1883 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1884 return buffer ? 0 : -ENOMEM;
1885}
1886
1887
1888
1889
1890
1891
1892
1893static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1894 unsigned int nr_highmem)
1895{
1896 unsigned int to_alloc = count_free_highmem_pages();
1897
1898 if (to_alloc > nr_highmem)
1899 to_alloc = nr_highmem;
1900
1901 nr_highmem -= to_alloc;
1902 while (to_alloc-- > 0) {
1903 struct page *page;
1904
1905 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1906 memory_bm_set_bit(bm, page_to_pfn(page));
1907 }
1908 return nr_highmem;
1909}
1910#else
1911static inline int get_highmem_buffer(int safe_needed) { return 0; }
1912
1913static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1914 unsigned int n) { return 0; }
1915#endif
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928static int swsusp_alloc(struct memory_bitmap *orig_bm,
1929 struct memory_bitmap *copy_bm,
1930 unsigned int nr_pages, unsigned int nr_highmem)
1931{
1932 if (nr_highmem > 0) {
1933 if (get_highmem_buffer(PG_ANY))
1934 goto err_out;
1935 if (nr_highmem > alloc_highmem) {
1936 nr_highmem -= alloc_highmem;
1937 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1938 }
1939 }
1940 if (nr_pages > alloc_normal) {
1941 nr_pages -= alloc_normal;
1942 while (nr_pages-- > 0) {
1943 struct page *page;
1944
1945 page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1946 if (!page)
1947 goto err_out;
1948 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1949 }
1950 }
1951
1952 return 0;
1953
1954 err_out:
1955 swsusp_free();
1956 return -ENOMEM;
1957}
1958
1959asmlinkage __visible int swsusp_save(void)
1960{
1961 unsigned int nr_pages, nr_highmem;
1962
1963 printk(KERN_INFO "PM: Creating hibernation image:\n");
1964
1965 drain_local_pages(NULL);
1966 nr_pages = count_data_pages();
1967 nr_highmem = count_highmem_pages();
1968 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1969
1970 if (!enough_free_mem(nr_pages, nr_highmem)) {
1971 printk(KERN_ERR "PM: Not enough free memory\n");
1972 return -ENOMEM;
1973 }
1974
1975 if (swsusp_alloc(&orig_bm, ©_bm, nr_pages, nr_highmem)) {
1976 printk(KERN_ERR "PM: Memory allocation failed\n");
1977 return -ENOMEM;
1978 }
1979
1980
1981
1982
1983
1984 drain_local_pages(NULL);
1985 copy_data_pages(©_bm, &orig_bm);
1986
1987
1988
1989
1990
1991
1992
1993 nr_pages += nr_highmem;
1994 nr_copy_pages = nr_pages;
1995 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1996
1997 printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1998 nr_pages);
1999
2000 return 0;
2001}
2002
2003#ifndef CONFIG_ARCH_HIBERNATION_HEADER
2004static int init_header_complete(struct swsusp_info *info)
2005{
2006 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2007 info->version_code = LINUX_VERSION_CODE;
2008 return 0;
2009}
2010
2011static char *check_image_kernel(struct swsusp_info *info)
2012{
2013 if (info->version_code != LINUX_VERSION_CODE)
2014 return "kernel version";
2015 if (strcmp(info->uts.sysname,init_utsname()->sysname))
2016 return "system type";
2017 if (strcmp(info->uts.release,init_utsname()->release))
2018 return "kernel release";
2019 if (strcmp(info->uts.version,init_utsname()->version))
2020 return "version";
2021 if (strcmp(info->uts.machine,init_utsname()->machine))
2022 return "machine";
2023 return NULL;
2024}
2025#endif
2026
2027unsigned long snapshot_get_image_size(void)
2028{
2029 return nr_copy_pages + nr_meta_pages + 1;
2030}
2031
2032static int init_header(struct swsusp_info *info)
2033{
2034 memset(info, 0, sizeof(struct swsusp_info));
2035 info->num_physpages = get_num_physpages();
2036 info->image_pages = nr_copy_pages;
2037 info->pages = snapshot_get_image_size();
2038 info->size = info->pages;
2039 info->size <<= PAGE_SHIFT;
2040 return init_header_complete(info);
2041}
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2052{
2053 int j;
2054
2055 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2056 buf[j] = memory_bm_next_pfn(bm);
2057 if (unlikely(buf[j] == BM_END_OF_MAP))
2058 break;
2059
2060 page_key_read(buf + j);
2061 }
2062}
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080int snapshot_read_next(struct snapshot_handle *handle)
2081{
2082 if (handle->cur > nr_meta_pages + nr_copy_pages)
2083 return 0;
2084
2085 if (!buffer) {
2086
2087 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2088 if (!buffer)
2089 return -ENOMEM;
2090 }
2091 if (!handle->cur) {
2092 int error;
2093
2094 error = init_header((struct swsusp_info *)buffer);
2095 if (error)
2096 return error;
2097 handle->buffer = buffer;
2098 memory_bm_position_reset(&orig_bm);
2099 memory_bm_position_reset(©_bm);
2100 } else if (handle->cur <= nr_meta_pages) {
2101 clear_page(buffer);
2102 pack_pfns(buffer, &orig_bm);
2103 } else {
2104 struct page *page;
2105
2106 page = pfn_to_page(memory_bm_next_pfn(©_bm));
2107 if (PageHighMem(page)) {
2108
2109
2110
2111
2112
2113 void *kaddr;
2114
2115 kaddr = kmap_atomic(page);
2116 copy_page(buffer, kaddr);
2117 kunmap_atomic(kaddr);
2118 handle->buffer = buffer;
2119 } else {
2120 handle->buffer = page_address(page);
2121 }
2122 }
2123 handle->cur++;
2124 return PAGE_SIZE;
2125}
2126
2127static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2128 struct memory_bitmap *src)
2129{
2130 unsigned long pfn;
2131
2132 memory_bm_position_reset(src);
2133 pfn = memory_bm_next_pfn(src);
2134 while (pfn != BM_END_OF_MAP) {
2135 memory_bm_set_bit(dst, pfn);
2136 pfn = memory_bm_next_pfn(src);
2137 }
2138}
2139
2140
2141
2142
2143
2144
2145
2146static void mark_unsafe_pages(struct memory_bitmap *bm)
2147{
2148 unsigned long pfn;
2149
2150
2151 memory_bm_position_reset(free_pages_map);
2152 pfn = memory_bm_next_pfn(free_pages_map);
2153 while (pfn != BM_END_OF_MAP) {
2154 memory_bm_clear_current(free_pages_map);
2155 pfn = memory_bm_next_pfn(free_pages_map);
2156 }
2157
2158
2159 duplicate_memory_bitmap(free_pages_map, bm);
2160
2161 allocated_unsafe_pages = 0;
2162}
2163
2164static int check_header(struct swsusp_info *info)
2165{
2166 char *reason;
2167
2168 reason = check_image_kernel(info);
2169 if (!reason && info->num_physpages != get_num_physpages())
2170 reason = "memory size";
2171 if (reason) {
2172 printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
2173 return -EPERM;
2174 }
2175 return 0;
2176}
2177
2178
2179
2180
2181static int load_header(struct swsusp_info *info)
2182{
2183 int error;
2184
2185 restore_pblist = NULL;
2186 error = check_header(info);
2187 if (!error) {
2188 nr_copy_pages = info->image_pages;
2189 nr_meta_pages = info->pages - info->image_pages - 1;
2190 }
2191 return error;
2192}
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2203{
2204 int j;
2205
2206 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2207 if (unlikely(buf[j] == BM_END_OF_MAP))
2208 break;
2209
2210
2211 page_key_memorize(buf + j);
2212
2213 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2214 memory_bm_set_bit(bm, buf[j]);
2215 else
2216 return -EFAULT;
2217 }
2218
2219 return 0;
2220}
2221
2222#ifdef CONFIG_HIGHMEM
2223
2224
2225
2226
2227
2228struct highmem_pbe {
2229 struct page *copy_page;
2230 struct page *orig_page;
2231 struct highmem_pbe *next;
2232};
2233
2234
2235
2236
2237
2238
2239
2240static struct highmem_pbe *highmem_pblist;
2241
2242
2243
2244
2245
2246
2247
2248static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2249{
2250 unsigned long pfn;
2251 unsigned int cnt = 0;
2252
2253 memory_bm_position_reset(bm);
2254 pfn = memory_bm_next_pfn(bm);
2255 while (pfn != BM_END_OF_MAP) {
2256 if (PageHighMem(pfn_to_page(pfn)))
2257 cnt++;
2258
2259 pfn = memory_bm_next_pfn(bm);
2260 }
2261 return cnt;
2262}
2263
2264static unsigned int safe_highmem_pages;
2265
2266static struct memory_bitmap *safe_highmem_bm;
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281static int prepare_highmem_image(struct memory_bitmap *bm,
2282 unsigned int *nr_highmem_p)
2283{
2284 unsigned int to_alloc;
2285
2286 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2287 return -ENOMEM;
2288
2289 if (get_highmem_buffer(PG_SAFE))
2290 return -ENOMEM;
2291
2292 to_alloc = count_free_highmem_pages();
2293 if (to_alloc > *nr_highmem_p)
2294 to_alloc = *nr_highmem_p;
2295 else
2296 *nr_highmem_p = to_alloc;
2297
2298 safe_highmem_pages = 0;
2299 while (to_alloc-- > 0) {
2300 struct page *page;
2301
2302 page = alloc_page(__GFP_HIGHMEM);
2303 if (!swsusp_page_is_free(page)) {
2304
2305 memory_bm_set_bit(bm, page_to_pfn(page));
2306 safe_highmem_pages++;
2307 }
2308
2309 swsusp_set_page_forbidden(page);
2310 swsusp_set_page_free(page);
2311 }
2312 memory_bm_position_reset(bm);
2313 safe_highmem_bm = bm;
2314 return 0;
2315}
2316
2317static struct page *last_highmem_page;
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337static void *get_highmem_page_buffer(struct page *page,
2338 struct chain_allocator *ca)
2339{
2340 struct highmem_pbe *pbe;
2341 void *kaddr;
2342
2343 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2344
2345
2346
2347
2348 last_highmem_page = page;
2349 return buffer;
2350 }
2351
2352
2353
2354
2355 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2356 if (!pbe) {
2357 swsusp_free();
2358 return ERR_PTR(-ENOMEM);
2359 }
2360 pbe->orig_page = page;
2361 if (safe_highmem_pages > 0) {
2362 struct page *tmp;
2363
2364
2365 kaddr = buffer;
2366 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2367 safe_highmem_pages--;
2368 last_highmem_page = tmp;
2369 pbe->copy_page = tmp;
2370 } else {
2371
2372 kaddr = safe_pages_list;
2373 safe_pages_list = safe_pages_list->next;
2374 pbe->copy_page = virt_to_page(kaddr);
2375 }
2376 pbe->next = highmem_pblist;
2377 highmem_pblist = pbe;
2378 return kaddr;
2379}
2380
2381
2382
2383
2384
2385
2386
2387
2388static void copy_last_highmem_page(void)
2389{
2390 if (last_highmem_page) {
2391 void *dst;
2392
2393 dst = kmap_atomic(last_highmem_page);
2394 copy_page(dst, buffer);
2395 kunmap_atomic(dst);
2396 last_highmem_page = NULL;
2397 }
2398}
2399
2400static inline int last_highmem_page_copied(void)
2401{
2402 return !last_highmem_page;
2403}
2404
2405static inline void free_highmem_data(void)
2406{
2407 if (safe_highmem_bm)
2408 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2409
2410 if (buffer)
2411 free_image_page(buffer, PG_UNSAFE_CLEAR);
2412}
2413#else
2414static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2415
2416static inline int prepare_highmem_image(struct memory_bitmap *bm,
2417 unsigned int *nr_highmem_p) { return 0; }
2418
2419static inline void *get_highmem_page_buffer(struct page *page,
2420 struct chain_allocator *ca)
2421{
2422 return ERR_PTR(-EINVAL);
2423}
2424
2425static inline void copy_last_highmem_page(void) {}
2426static inline int last_highmem_page_copied(void) { return 1; }
2427static inline void free_highmem_data(void) {}
2428#endif
2429
2430#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2448{
2449 unsigned int nr_pages, nr_highmem;
2450 struct linked_page *lp;
2451 int error;
2452
2453
2454 free_image_page(buffer, PG_UNSAFE_CLEAR);
2455 buffer = NULL;
2456
2457 nr_highmem = count_highmem_image_pages(bm);
2458 mark_unsafe_pages(bm);
2459
2460 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2461 if (error)
2462 goto Free;
2463
2464 duplicate_memory_bitmap(new_bm, bm);
2465 memory_bm_free(bm, PG_UNSAFE_KEEP);
2466 if (nr_highmem > 0) {
2467 error = prepare_highmem_image(bm, &nr_highmem);
2468 if (error)
2469 goto Free;
2470 }
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2481 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2482 while (nr_pages > 0) {
2483 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2484 if (!lp) {
2485 error = -ENOMEM;
2486 goto Free;
2487 }
2488 lp->next = safe_pages_list;
2489 safe_pages_list = lp;
2490 nr_pages--;
2491 }
2492
2493 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2494 while (nr_pages > 0) {
2495 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2496 if (!lp) {
2497 error = -ENOMEM;
2498 goto Free;
2499 }
2500 if (!swsusp_page_is_free(virt_to_page(lp))) {
2501
2502 lp->next = safe_pages_list;
2503 safe_pages_list = lp;
2504 }
2505
2506 swsusp_set_page_forbidden(virt_to_page(lp));
2507 swsusp_set_page_free(virt_to_page(lp));
2508 nr_pages--;
2509 }
2510 return 0;
2511
2512 Free:
2513 swsusp_free();
2514 return error;
2515}
2516
2517
2518
2519
2520
2521
2522
2523static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2524{
2525 struct pbe *pbe;
2526 struct page *page;
2527 unsigned long pfn = memory_bm_next_pfn(bm);
2528
2529 if (pfn == BM_END_OF_MAP)
2530 return ERR_PTR(-EFAULT);
2531
2532 page = pfn_to_page(pfn);
2533 if (PageHighMem(page))
2534 return get_highmem_page_buffer(page, ca);
2535
2536 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2537
2538
2539
2540
2541 return page_address(page);
2542
2543
2544
2545
2546
2547 pbe = chain_alloc(ca, sizeof(struct pbe));
2548 if (!pbe) {
2549 swsusp_free();
2550 return ERR_PTR(-ENOMEM);
2551 }
2552 pbe->orig_address = page_address(page);
2553 pbe->address = safe_pages_list;
2554 safe_pages_list = safe_pages_list->next;
2555 pbe->next = restore_pblist;
2556 restore_pblist = pbe;
2557 return pbe->address;
2558}
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576int snapshot_write_next(struct snapshot_handle *handle)
2577{
2578 static struct chain_allocator ca;
2579 int error = 0;
2580
2581
2582 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2583 return 0;
2584
2585 handle->sync_read = 1;
2586
2587 if (!handle->cur) {
2588 if (!buffer)
2589
2590 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2591
2592 if (!buffer)
2593 return -ENOMEM;
2594
2595 handle->buffer = buffer;
2596 } else if (handle->cur == 1) {
2597 error = load_header(buffer);
2598 if (error)
2599 return error;
2600
2601 safe_pages_list = NULL;
2602
2603 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
2604 if (error)
2605 return error;
2606
2607
2608 error = page_key_alloc(nr_copy_pages);
2609 if (error)
2610 return error;
2611
2612 hibernate_restore_protection_begin();
2613 } else if (handle->cur <= nr_meta_pages + 1) {
2614 error = unpack_orig_pfns(buffer, ©_bm);
2615 if (error)
2616 return error;
2617
2618 if (handle->cur == nr_meta_pages + 1) {
2619 error = prepare_image(&orig_bm, ©_bm);
2620 if (error)
2621 return error;
2622
2623 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2624 memory_bm_position_reset(&orig_bm);
2625 restore_pblist = NULL;
2626 handle->buffer = get_buffer(&orig_bm, &ca);
2627 handle->sync_read = 0;
2628 if (IS_ERR(handle->buffer))
2629 return PTR_ERR(handle->buffer);
2630 }
2631 } else {
2632 copy_last_highmem_page();
2633
2634 page_key_write(handle->buffer);
2635 hibernate_restore_protect_page(handle->buffer);
2636 handle->buffer = get_buffer(&orig_bm, &ca);
2637 if (IS_ERR(handle->buffer))
2638 return PTR_ERR(handle->buffer);
2639 if (handle->buffer != buffer)
2640 handle->sync_read = 0;
2641 }
2642 handle->cur++;
2643 return PAGE_SIZE;
2644}
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654void snapshot_write_finalize(struct snapshot_handle *handle)
2655{
2656 copy_last_highmem_page();
2657
2658 page_key_write(handle->buffer);
2659 page_key_free();
2660 hibernate_restore_protect_page(handle->buffer);
2661
2662 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2663 memory_bm_recycle(&orig_bm);
2664 free_highmem_data();
2665 }
2666}
2667
2668int snapshot_image_loaded(struct snapshot_handle *handle)
2669{
2670 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2671 handle->cur <= nr_meta_pages + nr_copy_pages);
2672}
2673
2674#ifdef CONFIG_HIGHMEM
2675
2676static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2677 void *buf)
2678{
2679 void *kaddr1, *kaddr2;
2680
2681 kaddr1 = kmap_atomic(p1);
2682 kaddr2 = kmap_atomic(p2);
2683 copy_page(buf, kaddr1);
2684 copy_page(kaddr1, kaddr2);
2685 copy_page(kaddr2, buf);
2686 kunmap_atomic(kaddr2);
2687 kunmap_atomic(kaddr1);
2688}
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700int restore_highmem(void)
2701{
2702 struct highmem_pbe *pbe = highmem_pblist;
2703 void *buf;
2704
2705 if (!pbe)
2706 return 0;
2707
2708 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2709 if (!buf)
2710 return -ENOMEM;
2711
2712 while (pbe) {
2713 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2714 pbe = pbe->next;
2715 }
2716 free_image_page(buf, PG_UNSAFE_CLEAR);
2717 return 0;
2718}
2719#endif
2720