1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) "PM: " fmt
12
13#include <linux/version.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/suspend.h>
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/spinlock.h>
20#include <linux/kernel.h>
21#include <linux/pm.h>
22#include <linux/device.h>
23#include <linux/init.h>
24#include <linux/memblock.h>
25#include <linux/nmi.h>
26#include <linux/syscalls.h>
27#include <linux/console.h>
28#include <linux/highmem.h>
29#include <linux/list.h>
30#include <linux/slab.h>
31#include <linux/compiler.h>
32#include <linux/ktime.h>
33#include <linux/set_memory.h>
34
35#include <linux/uaccess.h>
36#include <asm/mmu_context.h>
37#include <asm/pgtable.h>
38#include <asm/tlbflush.h>
39#include <asm/io.h>
40
41#include "power.h"
42
43#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
44static bool hibernate_restore_protection;
45static bool hibernate_restore_protection_active;
46
47void enable_restore_image_protection(void)
48{
49 hibernate_restore_protection = true;
50}
51
52static inline void hibernate_restore_protection_begin(void)
53{
54 hibernate_restore_protection_active = hibernate_restore_protection;
55}
56
57static inline void hibernate_restore_protection_end(void)
58{
59 hibernate_restore_protection_active = false;
60}
61
62static inline void hibernate_restore_protect_page(void *page_address)
63{
64 if (hibernate_restore_protection_active)
65 set_memory_ro((unsigned long)page_address, 1);
66}
67
68static inline void hibernate_restore_unprotect_page(void *page_address)
69{
70 if (hibernate_restore_protection_active)
71 set_memory_rw((unsigned long)page_address, 1);
72}
73#else
74static inline void hibernate_restore_protection_begin(void) {}
75static inline void hibernate_restore_protection_end(void) {}
76static inline void hibernate_restore_protect_page(void *page_address) {}
77static inline void hibernate_restore_unprotect_page(void *page_address) {}
78#endif
79
80static int swsusp_page_is_free(struct page *);
81static void swsusp_set_page_forbidden(struct page *);
82static void swsusp_unset_page_forbidden(struct page *);
83
84
85
86
87
88
89unsigned long reserved_size;
90
91void __init hibernate_reserved_size_init(void)
92{
93 reserved_size = SPARE_PAGES * PAGE_SIZE;
94}
95
96
97
98
99
100
101
102unsigned long image_size;
103
104void __init hibernate_image_size_init(void)
105{
106 image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
107}
108
109
110
111
112
113
114
115struct pbe *restore_pblist;
116
117
118
119#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
120
121struct linked_page {
122 struct linked_page *next;
123 char data[LINKED_PAGE_DATA_SIZE];
124} __packed;
125
126
127
128
129
130
131static struct linked_page *safe_pages_list;
132
133
134static void *buffer;
135
136#define PG_ANY 0
137#define PG_SAFE 1
138#define PG_UNSAFE_CLEAR 1
139#define PG_UNSAFE_KEEP 0
140
141static unsigned int allocated_unsafe_pages;
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156static void *get_image_page(gfp_t gfp_mask, int safe_needed)
157{
158 void *res;
159
160 res = (void *)get_zeroed_page(gfp_mask);
161 if (safe_needed)
162 while (res && swsusp_page_is_free(virt_to_page(res))) {
163
164 swsusp_set_page_forbidden(virt_to_page(res));
165 allocated_unsafe_pages++;
166 res = (void *)get_zeroed_page(gfp_mask);
167 }
168 if (res) {
169 swsusp_set_page_forbidden(virt_to_page(res));
170 swsusp_set_page_free(virt_to_page(res));
171 }
172 return res;
173}
174
175static void *__get_safe_page(gfp_t gfp_mask)
176{
177 if (safe_pages_list) {
178 void *ret = safe_pages_list;
179
180 safe_pages_list = safe_pages_list->next;
181 memset(ret, 0, PAGE_SIZE);
182 return ret;
183 }
184 return get_image_page(gfp_mask, PG_SAFE);
185}
186
187unsigned long get_safe_page(gfp_t gfp_mask)
188{
189 return (unsigned long)__get_safe_page(gfp_mask);
190}
191
192static struct page *alloc_image_page(gfp_t gfp_mask)
193{
194 struct page *page;
195
196 page = alloc_page(gfp_mask);
197 if (page) {
198 swsusp_set_page_forbidden(page);
199 swsusp_set_page_free(page);
200 }
201 return page;
202}
203
204static void recycle_safe_page(void *page_address)
205{
206 struct linked_page *lp = page_address;
207
208 lp->next = safe_pages_list;
209 safe_pages_list = lp;
210}
211
212
213
214
215
216
217
218
219
220static inline void free_image_page(void *addr, int clear_nosave_free)
221{
222 struct page *page;
223
224 BUG_ON(!virt_addr_valid(addr));
225
226 page = virt_to_page(addr);
227
228 swsusp_unset_page_forbidden(page);
229 if (clear_nosave_free)
230 swsusp_unset_page_free(page);
231
232 __free_page(page);
233}
234
235static inline void free_list_of_pages(struct linked_page *list,
236 int clear_page_nosave)
237{
238 while (list) {
239 struct linked_page *lp = list->next;
240
241 free_image_page(list, clear_page_nosave);
242 list = lp;
243 }
244}
245
246
247
248
249
250
251
252
253
254
255
256
257
258struct chain_allocator {
259 struct linked_page *chain;
260 unsigned int used_space;
261
262 gfp_t gfp_mask;
263 int safe_needed;
264};
265
266static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
267 int safe_needed)
268{
269 ca->chain = NULL;
270 ca->used_space = LINKED_PAGE_DATA_SIZE;
271 ca->gfp_mask = gfp_mask;
272 ca->safe_needed = safe_needed;
273}
274
275static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
276{
277 void *ret;
278
279 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
280 struct linked_page *lp;
281
282 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
283 get_image_page(ca->gfp_mask, PG_ANY);
284 if (!lp)
285 return NULL;
286
287 lp->next = ca->chain;
288 ca->chain = lp;
289 ca->used_space = 0;
290 }
291 ret = ca->chain->data + ca->used_space;
292 ca->used_space += size;
293 return ret;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337#define BM_END_OF_MAP (~0UL)
338
339#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
340#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
341#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
342
343
344
345
346
347
348struct rtree_node {
349 struct list_head list;
350 unsigned long *data;
351};
352
353
354
355
356
357struct mem_zone_bm_rtree {
358 struct list_head list;
359 struct list_head nodes;
360 struct list_head leaves;
361 unsigned long start_pfn;
362 unsigned long end_pfn;
363 struct rtree_node *rtree;
364 int levels;
365 unsigned int blocks;
366};
367
368
369
370struct bm_position {
371 struct mem_zone_bm_rtree *zone;
372 struct rtree_node *node;
373 unsigned long node_pfn;
374 int node_bit;
375};
376
377struct memory_bitmap {
378 struct list_head zones;
379 struct linked_page *p_list;
380
381
382 struct bm_position cur;
383};
384
385
386
387#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
388#if BITS_PER_LONG == 32
389#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
390#else
391#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
392#endif
393#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
394
395
396
397
398
399
400
401
402static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
403 struct chain_allocator *ca,
404 struct list_head *list)
405{
406 struct rtree_node *node;
407
408 node = chain_alloc(ca, sizeof(struct rtree_node));
409 if (!node)
410 return NULL;
411
412 node->data = get_image_page(gfp_mask, safe_needed);
413 if (!node->data)
414 return NULL;
415
416 list_add_tail(&node->list, list);
417
418 return node;
419}
420
421
422
423
424
425
426
427
428static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
429 int safe_needed, struct chain_allocator *ca)
430{
431 struct rtree_node *node, *block, **dst;
432 unsigned int levels_needed, block_nr;
433 int i;
434
435 block_nr = zone->blocks;
436 levels_needed = 0;
437
438
439 while (block_nr) {
440 levels_needed += 1;
441 block_nr >>= BM_RTREE_LEVEL_SHIFT;
442 }
443
444
445 for (i = zone->levels; i < levels_needed; i++) {
446 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
447 &zone->nodes);
448 if (!node)
449 return -ENOMEM;
450
451 node->data[0] = (unsigned long)zone->rtree;
452 zone->rtree = node;
453 zone->levels += 1;
454 }
455
456
457 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
458 if (!block)
459 return -ENOMEM;
460
461
462 node = zone->rtree;
463 dst = &zone->rtree;
464 block_nr = zone->blocks;
465 for (i = zone->levels; i > 0; i--) {
466 int index;
467
468 if (!node) {
469 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
470 &zone->nodes);
471 if (!node)
472 return -ENOMEM;
473 *dst = node;
474 }
475
476 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
477 index &= BM_RTREE_LEVEL_MASK;
478 dst = (struct rtree_node **)&((*dst)->data[index]);
479 node = *dst;
480 }
481
482 zone->blocks += 1;
483 *dst = block;
484
485 return 0;
486}
487
488static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
489 int clear_nosave_free);
490
491
492
493
494
495
496
497
498static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
499 int safe_needed,
500 struct chain_allocator *ca,
501 unsigned long start,
502 unsigned long end)
503{
504 struct mem_zone_bm_rtree *zone;
505 unsigned int i, nr_blocks;
506 unsigned long pages;
507
508 pages = end - start;
509 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
510 if (!zone)
511 return NULL;
512
513 INIT_LIST_HEAD(&zone->nodes);
514 INIT_LIST_HEAD(&zone->leaves);
515 zone->start_pfn = start;
516 zone->end_pfn = end;
517 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
518
519 for (i = 0; i < nr_blocks; i++) {
520 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
521 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
522 return NULL;
523 }
524 }
525
526 return zone;
527}
528
529
530
531
532
533
534
535
536static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
537 int clear_nosave_free)
538{
539 struct rtree_node *node;
540
541 list_for_each_entry(node, &zone->nodes, list)
542 free_image_page(node->data, clear_nosave_free);
543
544 list_for_each_entry(node, &zone->leaves, list)
545 free_image_page(node->data, clear_nosave_free);
546}
547
548static void memory_bm_position_reset(struct memory_bitmap *bm)
549{
550 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
551 list);
552 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
553 struct rtree_node, list);
554 bm->cur.node_pfn = 0;
555 bm->cur.node_bit = 0;
556}
557
558static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
559
560struct mem_extent {
561 struct list_head hook;
562 unsigned long start;
563 unsigned long end;
564};
565
566
567
568
569
570static void free_mem_extents(struct list_head *list)
571{
572 struct mem_extent *ext, *aux;
573
574 list_for_each_entry_safe(ext, aux, list, hook) {
575 list_del(&ext->hook);
576 kfree(ext);
577 }
578}
579
580
581
582
583
584
585
586
587static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
588{
589 struct zone *zone;
590
591 INIT_LIST_HEAD(list);
592
593 for_each_populated_zone(zone) {
594 unsigned long zone_start, zone_end;
595 struct mem_extent *ext, *cur, *aux;
596
597 zone_start = zone->zone_start_pfn;
598 zone_end = zone_end_pfn(zone);
599
600 list_for_each_entry(ext, list, hook)
601 if (zone_start <= ext->end)
602 break;
603
604 if (&ext->hook == list || zone_end < ext->start) {
605
606 struct mem_extent *new_ext;
607
608 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
609 if (!new_ext) {
610 free_mem_extents(list);
611 return -ENOMEM;
612 }
613 new_ext->start = zone_start;
614 new_ext->end = zone_end;
615 list_add_tail(&new_ext->hook, &ext->hook);
616 continue;
617 }
618
619
620 if (zone_start < ext->start)
621 ext->start = zone_start;
622 if (zone_end > ext->end)
623 ext->end = zone_end;
624
625
626 cur = ext;
627 list_for_each_entry_safe_continue(cur, aux, list, hook) {
628 if (zone_end < cur->start)
629 break;
630 if (zone_end < cur->end)
631 ext->end = cur->end;
632 list_del(&cur->hook);
633 kfree(cur);
634 }
635 }
636
637 return 0;
638}
639
640
641
642
643static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
644 int safe_needed)
645{
646 struct chain_allocator ca;
647 struct list_head mem_extents;
648 struct mem_extent *ext;
649 int error;
650
651 chain_init(&ca, gfp_mask, safe_needed);
652 INIT_LIST_HEAD(&bm->zones);
653
654 error = create_mem_extents(&mem_extents, gfp_mask);
655 if (error)
656 return error;
657
658 list_for_each_entry(ext, &mem_extents, hook) {
659 struct mem_zone_bm_rtree *zone;
660
661 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
662 ext->start, ext->end);
663 if (!zone) {
664 error = -ENOMEM;
665 goto Error;
666 }
667 list_add_tail(&zone->list, &bm->zones);
668 }
669
670 bm->p_list = ca.chain;
671 memory_bm_position_reset(bm);
672 Exit:
673 free_mem_extents(&mem_extents);
674 return error;
675
676 Error:
677 bm->p_list = ca.chain;
678 memory_bm_free(bm, PG_UNSAFE_CLEAR);
679 goto Exit;
680}
681
682
683
684
685
686static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
687{
688 struct mem_zone_bm_rtree *zone;
689
690 list_for_each_entry(zone, &bm->zones, list)
691 free_zone_bm_rtree(zone, clear_nosave_free);
692
693 free_list_of_pages(bm->p_list, clear_nosave_free);
694
695 INIT_LIST_HEAD(&bm->zones);
696}
697
698
699
700
701
702
703
704
705
706
707static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
708 void **addr, unsigned int *bit_nr)
709{
710 struct mem_zone_bm_rtree *curr, *zone;
711 struct rtree_node *node;
712 int i, block_nr;
713
714 zone = bm->cur.zone;
715
716 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
717 goto zone_found;
718
719 zone = NULL;
720
721
722 list_for_each_entry(curr, &bm->zones, list) {
723 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
724 zone = curr;
725 break;
726 }
727 }
728
729 if (!zone)
730 return -EFAULT;
731
732zone_found:
733
734
735
736
737 node = bm->cur.node;
738 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
739 goto node_found;
740
741 node = zone->rtree;
742 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
743
744 for (i = zone->levels; i > 0; i--) {
745 int index;
746
747 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
748 index &= BM_RTREE_LEVEL_MASK;
749 BUG_ON(node->data[index] == 0);
750 node = (struct rtree_node *)node->data[index];
751 }
752
753node_found:
754
755 bm->cur.zone = zone;
756 bm->cur.node = node;
757 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
758
759
760 *addr = node->data;
761 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
762
763 return 0;
764}
765
766static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
767{
768 void *addr;
769 unsigned int bit;
770 int error;
771
772 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
773 BUG_ON(error);
774 set_bit(bit, addr);
775}
776
777static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
778{
779 void *addr;
780 unsigned int bit;
781 int error;
782
783 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
784 if (!error)
785 set_bit(bit, addr);
786
787 return error;
788}
789
790static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
791{
792 void *addr;
793 unsigned int bit;
794 int error;
795
796 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
797 BUG_ON(error);
798 clear_bit(bit, addr);
799}
800
801static void memory_bm_clear_current(struct memory_bitmap *bm)
802{
803 int bit;
804
805 bit = max(bm->cur.node_bit - 1, 0);
806 clear_bit(bit, bm->cur.node->data);
807}
808
809static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
810{
811 void *addr;
812 unsigned int bit;
813 int error;
814
815 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
816 BUG_ON(error);
817 return test_bit(bit, addr);
818}
819
820static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
821{
822 void *addr;
823 unsigned int bit;
824
825 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
826}
827
828
829
830
831
832
833
834
835
836
837
838static bool rtree_next_node(struct memory_bitmap *bm)
839{
840 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
841 bm->cur.node = list_entry(bm->cur.node->list.next,
842 struct rtree_node, list);
843 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
844 bm->cur.node_bit = 0;
845 touch_softlockup_watchdog();
846 return true;
847 }
848
849
850 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
851 bm->cur.zone = list_entry(bm->cur.zone->list.next,
852 struct mem_zone_bm_rtree, list);
853 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
854 struct rtree_node, list);
855 bm->cur.node_pfn = 0;
856 bm->cur.node_bit = 0;
857 return true;
858 }
859
860
861 return false;
862}
863
864
865
866
867
868
869
870
871
872
873
874
875static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
876{
877 unsigned long bits, pfn, pages;
878 int bit;
879
880 do {
881 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
882 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
883 bit = find_next_bit(bm->cur.node->data, bits,
884 bm->cur.node_bit);
885 if (bit < bits) {
886 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
887 bm->cur.node_bit = bit + 1;
888 return pfn;
889 }
890 } while (rtree_next_node(bm));
891
892 return BM_END_OF_MAP;
893}
894
895
896
897
898
899struct nosave_region {
900 struct list_head list;
901 unsigned long start_pfn;
902 unsigned long end_pfn;
903};
904
905static LIST_HEAD(nosave_regions);
906
907static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
908{
909 struct rtree_node *node;
910
911 list_for_each_entry(node, &zone->nodes, list)
912 recycle_safe_page(node->data);
913
914 list_for_each_entry(node, &zone->leaves, list)
915 recycle_safe_page(node->data);
916}
917
918static void memory_bm_recycle(struct memory_bitmap *bm)
919{
920 struct mem_zone_bm_rtree *zone;
921 struct linked_page *p_list;
922
923 list_for_each_entry(zone, &bm->zones, list)
924 recycle_zone_bm_rtree(zone);
925
926 p_list = bm->p_list;
927 while (p_list) {
928 struct linked_page *lp = p_list;
929
930 p_list = lp->next;
931 recycle_safe_page(lp);
932 }
933}
934
935
936
937
938
939
940
941void __init __register_nosave_region(unsigned long start_pfn,
942 unsigned long end_pfn, int use_kmalloc)
943{
944 struct nosave_region *region;
945
946 if (start_pfn >= end_pfn)
947 return;
948
949 if (!list_empty(&nosave_regions)) {
950
951 region = list_entry(nosave_regions.prev,
952 struct nosave_region, list);
953 if (region->end_pfn == start_pfn) {
954 region->end_pfn = end_pfn;
955 goto Report;
956 }
957 }
958 if (use_kmalloc) {
959
960 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
961 BUG_ON(!region);
962 } else {
963
964 region = memblock_alloc(sizeof(struct nosave_region),
965 SMP_CACHE_BYTES);
966 if (!region)
967 panic("%s: Failed to allocate %zu bytes\n", __func__,
968 sizeof(struct nosave_region));
969 }
970 region->start_pfn = start_pfn;
971 region->end_pfn = end_pfn;
972 list_add_tail(®ion->list, &nosave_regions);
973 Report:
974 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
975 (unsigned long long) start_pfn << PAGE_SHIFT,
976 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
977}
978
979
980
981
982
983static struct memory_bitmap *forbidden_pages_map;
984
985
986static struct memory_bitmap *free_pages_map;
987
988
989
990
991
992
993void swsusp_set_page_free(struct page *page)
994{
995 if (free_pages_map)
996 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
997}
998
999static int swsusp_page_is_free(struct page *page)
1000{
1001 return free_pages_map ?
1002 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1003}
1004
1005void swsusp_unset_page_free(struct page *page)
1006{
1007 if (free_pages_map)
1008 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1009}
1010
1011static void swsusp_set_page_forbidden(struct page *page)
1012{
1013 if (forbidden_pages_map)
1014 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1015}
1016
1017int swsusp_page_is_forbidden(struct page *page)
1018{
1019 return forbidden_pages_map ?
1020 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1021}
1022
1023static void swsusp_unset_page_forbidden(struct page *page)
1024{
1025 if (forbidden_pages_map)
1026 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036static void mark_nosave_pages(struct memory_bitmap *bm)
1037{
1038 struct nosave_region *region;
1039
1040 if (list_empty(&nosave_regions))
1041 return;
1042
1043 list_for_each_entry(region, &nosave_regions, list) {
1044 unsigned long pfn;
1045
1046 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1047 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1048 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1049 - 1);
1050
1051 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1052 if (pfn_valid(pfn)) {
1053
1054
1055
1056
1057
1058
1059 mem_bm_set_bit_check(bm, pfn);
1060 }
1061 }
1062}
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072int create_basic_memory_bitmaps(void)
1073{
1074 struct memory_bitmap *bm1, *bm2;
1075 int error = 0;
1076
1077 if (forbidden_pages_map && free_pages_map)
1078 return 0;
1079 else
1080 BUG_ON(forbidden_pages_map || free_pages_map);
1081
1082 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1083 if (!bm1)
1084 return -ENOMEM;
1085
1086 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1087 if (error)
1088 goto Free_first_object;
1089
1090 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1091 if (!bm2)
1092 goto Free_first_bitmap;
1093
1094 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1095 if (error)
1096 goto Free_second_object;
1097
1098 forbidden_pages_map = bm1;
1099 free_pages_map = bm2;
1100 mark_nosave_pages(forbidden_pages_map);
1101
1102 pr_debug("Basic memory bitmaps created\n");
1103
1104 return 0;
1105
1106 Free_second_object:
1107 kfree(bm2);
1108 Free_first_bitmap:
1109 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1110 Free_first_object:
1111 kfree(bm1);
1112 return -ENOMEM;
1113}
1114
1115
1116
1117
1118
1119
1120
1121
1122void free_basic_memory_bitmaps(void)
1123{
1124 struct memory_bitmap *bm1, *bm2;
1125
1126 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1127 return;
1128
1129 bm1 = forbidden_pages_map;
1130 bm2 = free_pages_map;
1131 forbidden_pages_map = NULL;
1132 free_pages_map = NULL;
1133 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1134 kfree(bm1);
1135 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1136 kfree(bm2);
1137
1138 pr_debug("Basic memory bitmaps freed\n");
1139}
1140
1141void clear_free_pages(void)
1142{
1143#ifdef CONFIG_PAGE_POISONING_ZERO
1144 struct memory_bitmap *bm = free_pages_map;
1145 unsigned long pfn;
1146
1147 if (WARN_ON(!(free_pages_map)))
1148 return;
1149
1150 memory_bm_position_reset(bm);
1151 pfn = memory_bm_next_pfn(bm);
1152 while (pfn != BM_END_OF_MAP) {
1153 if (pfn_valid(pfn))
1154 clear_highpage(pfn_to_page(pfn));
1155
1156 pfn = memory_bm_next_pfn(bm);
1157 }
1158 memory_bm_position_reset(bm);
1159 pr_info("free pages cleared after restore\n");
1160#endif
1161}
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171unsigned int snapshot_additional_pages(struct zone *zone)
1172{
1173 unsigned int rtree, nodes;
1174
1175 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1176 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1177 LINKED_PAGE_DATA_SIZE);
1178 while (nodes > 1) {
1179 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1180 rtree += nodes;
1181 }
1182
1183 return 2 * rtree;
1184}
1185
1186#ifdef CONFIG_HIGHMEM
1187
1188
1189
1190
1191
1192static unsigned int count_free_highmem_pages(void)
1193{
1194 struct zone *zone;
1195 unsigned int cnt = 0;
1196
1197 for_each_populated_zone(zone)
1198 if (is_highmem(zone))
1199 cnt += zone_page_state(zone, NR_FREE_PAGES);
1200
1201 return cnt;
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1213{
1214 struct page *page;
1215
1216 if (!pfn_valid(pfn))
1217 return NULL;
1218
1219 page = pfn_to_online_page(pfn);
1220 if (!page || page_zone(page) != zone)
1221 return NULL;
1222
1223 BUG_ON(!PageHighMem(page));
1224
1225 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1226 return NULL;
1227
1228 if (PageReserved(page) || PageOffline(page))
1229 return NULL;
1230
1231 if (page_is_guard(page))
1232 return NULL;
1233
1234 return page;
1235}
1236
1237
1238
1239
1240static unsigned int count_highmem_pages(void)
1241{
1242 struct zone *zone;
1243 unsigned int n = 0;
1244
1245 for_each_populated_zone(zone) {
1246 unsigned long pfn, max_zone_pfn;
1247
1248 if (!is_highmem(zone))
1249 continue;
1250
1251 mark_free_pages(zone);
1252 max_zone_pfn = zone_end_pfn(zone);
1253 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1254 if (saveable_highmem_page(zone, pfn))
1255 n++;
1256 }
1257 return n;
1258}
1259#else
1260static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1261{
1262 return NULL;
1263}
1264#endif
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1277{
1278 struct page *page;
1279
1280 if (!pfn_valid(pfn))
1281 return NULL;
1282
1283 page = pfn_to_online_page(pfn);
1284 if (!page || page_zone(page) != zone)
1285 return NULL;
1286
1287 BUG_ON(PageHighMem(page));
1288
1289 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1290 return NULL;
1291
1292 if (PageOffline(page))
1293 return NULL;
1294
1295 if (PageReserved(page)
1296 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1297 return NULL;
1298
1299 if (page_is_guard(page))
1300 return NULL;
1301
1302 return page;
1303}
1304
1305
1306
1307
1308static unsigned int count_data_pages(void)
1309{
1310 struct zone *zone;
1311 unsigned long pfn, max_zone_pfn;
1312 unsigned int n = 0;
1313
1314 for_each_populated_zone(zone) {
1315 if (is_highmem(zone))
1316 continue;
1317
1318 mark_free_pages(zone);
1319 max_zone_pfn = zone_end_pfn(zone);
1320 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1321 if (saveable_page(zone, pfn))
1322 n++;
1323 }
1324 return n;
1325}
1326
1327
1328
1329
1330
1331static inline void do_copy_page(long *dst, long *src)
1332{
1333 int n;
1334
1335 for (n = PAGE_SIZE / sizeof(long); n; n--)
1336 *dst++ = *src++;
1337}
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347static void safe_copy_page(void *dst, struct page *s_page)
1348{
1349 if (kernel_page_present(s_page)) {
1350 do_copy_page(dst, page_address(s_page));
1351 } else {
1352 kernel_map_pages(s_page, 1, 1);
1353 do_copy_page(dst, page_address(s_page));
1354 kernel_map_pages(s_page, 1, 0);
1355 }
1356}
1357
1358#ifdef CONFIG_HIGHMEM
1359static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1360{
1361 return is_highmem(zone) ?
1362 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1363}
1364
1365static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1366{
1367 struct page *s_page, *d_page;
1368 void *src, *dst;
1369
1370 s_page = pfn_to_page(src_pfn);
1371 d_page = pfn_to_page(dst_pfn);
1372 if (PageHighMem(s_page)) {
1373 src = kmap_atomic(s_page);
1374 dst = kmap_atomic(d_page);
1375 do_copy_page(dst, src);
1376 kunmap_atomic(dst);
1377 kunmap_atomic(src);
1378 } else {
1379 if (PageHighMem(d_page)) {
1380
1381
1382
1383
1384 safe_copy_page(buffer, s_page);
1385 dst = kmap_atomic(d_page);
1386 copy_page(dst, buffer);
1387 kunmap_atomic(dst);
1388 } else {
1389 safe_copy_page(page_address(d_page), s_page);
1390 }
1391 }
1392}
1393#else
1394#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1395
1396static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1397{
1398 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1399 pfn_to_page(src_pfn));
1400}
1401#endif
1402
1403static void copy_data_pages(struct memory_bitmap *copy_bm,
1404 struct memory_bitmap *orig_bm)
1405{
1406 struct zone *zone;
1407 unsigned long pfn;
1408
1409 for_each_populated_zone(zone) {
1410 unsigned long max_zone_pfn;
1411
1412 mark_free_pages(zone);
1413 max_zone_pfn = zone_end_pfn(zone);
1414 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1415 if (page_is_saveable(zone, pfn))
1416 memory_bm_set_bit(orig_bm, pfn);
1417 }
1418 memory_bm_position_reset(orig_bm);
1419 memory_bm_position_reset(copy_bm);
1420 for(;;) {
1421 pfn = memory_bm_next_pfn(orig_bm);
1422 if (unlikely(pfn == BM_END_OF_MAP))
1423 break;
1424 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1425 }
1426}
1427
1428
1429static unsigned int nr_copy_pages;
1430
1431static unsigned int nr_meta_pages;
1432
1433
1434
1435
1436static unsigned int alloc_normal, alloc_highmem;
1437
1438
1439
1440
1441static struct memory_bitmap orig_bm;
1442
1443
1444
1445
1446
1447
1448
1449
1450static struct memory_bitmap copy_bm;
1451
1452
1453
1454
1455
1456
1457
1458void swsusp_free(void)
1459{
1460 unsigned long fb_pfn, fr_pfn;
1461
1462 if (!forbidden_pages_map || !free_pages_map)
1463 goto out;
1464
1465 memory_bm_position_reset(forbidden_pages_map);
1466 memory_bm_position_reset(free_pages_map);
1467
1468loop:
1469 fr_pfn = memory_bm_next_pfn(free_pages_map);
1470 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1471
1472
1473
1474
1475
1476 do {
1477 if (fb_pfn < fr_pfn)
1478 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1479 if (fr_pfn < fb_pfn)
1480 fr_pfn = memory_bm_next_pfn(free_pages_map);
1481 } while (fb_pfn != fr_pfn);
1482
1483 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1484 struct page *page = pfn_to_page(fr_pfn);
1485
1486 memory_bm_clear_current(forbidden_pages_map);
1487 memory_bm_clear_current(free_pages_map);
1488 hibernate_restore_unprotect_page(page_address(page));
1489 __free_page(page);
1490 goto loop;
1491 }
1492
1493out:
1494 nr_copy_pages = 0;
1495 nr_meta_pages = 0;
1496 restore_pblist = NULL;
1497 buffer = NULL;
1498 alloc_normal = 0;
1499 alloc_highmem = 0;
1500 hibernate_restore_protection_end();
1501}
1502
1503
1504
1505#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1506
1507
1508
1509
1510
1511
1512
1513
1514static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1515{
1516 unsigned long nr_alloc = 0;
1517
1518 while (nr_pages > 0) {
1519 struct page *page;
1520
1521 page = alloc_image_page(mask);
1522 if (!page)
1523 break;
1524 memory_bm_set_bit(©_bm, page_to_pfn(page));
1525 if (PageHighMem(page))
1526 alloc_highmem++;
1527 else
1528 alloc_normal++;
1529 nr_pages--;
1530 nr_alloc++;
1531 }
1532
1533 return nr_alloc;
1534}
1535
1536static unsigned long preallocate_image_memory(unsigned long nr_pages,
1537 unsigned long avail_normal)
1538{
1539 unsigned long alloc;
1540
1541 if (avail_normal <= alloc_normal)
1542 return 0;
1543
1544 alloc = avail_normal - alloc_normal;
1545 if (nr_pages < alloc)
1546 alloc = nr_pages;
1547
1548 return preallocate_image_pages(alloc, GFP_IMAGE);
1549}
1550
1551#ifdef CONFIG_HIGHMEM
1552static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1553{
1554 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1555}
1556
1557
1558
1559
1560static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1561{
1562 x *= multiplier;
1563 do_div(x, base);
1564 return (unsigned long)x;
1565}
1566
1567static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1568 unsigned long highmem,
1569 unsigned long total)
1570{
1571 unsigned long alloc = __fraction(nr_pages, highmem, total);
1572
1573 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1574}
1575#else
1576static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1577{
1578 return 0;
1579}
1580
1581static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1582 unsigned long highmem,
1583 unsigned long total)
1584{
1585 return 0;
1586}
1587#endif
1588
1589
1590
1591
1592static unsigned long free_unnecessary_pages(void)
1593{
1594 unsigned long save, to_free_normal, to_free_highmem, free;
1595
1596 save = count_data_pages();
1597 if (alloc_normal >= save) {
1598 to_free_normal = alloc_normal - save;
1599 save = 0;
1600 } else {
1601 to_free_normal = 0;
1602 save -= alloc_normal;
1603 }
1604 save += count_highmem_pages();
1605 if (alloc_highmem >= save) {
1606 to_free_highmem = alloc_highmem - save;
1607 } else {
1608 to_free_highmem = 0;
1609 save -= alloc_highmem;
1610 if (to_free_normal > save)
1611 to_free_normal -= save;
1612 else
1613 to_free_normal = 0;
1614 }
1615 free = to_free_normal + to_free_highmem;
1616
1617 memory_bm_position_reset(©_bm);
1618
1619 while (to_free_normal > 0 || to_free_highmem > 0) {
1620 unsigned long pfn = memory_bm_next_pfn(©_bm);
1621 struct page *page = pfn_to_page(pfn);
1622
1623 if (PageHighMem(page)) {
1624 if (!to_free_highmem)
1625 continue;
1626 to_free_highmem--;
1627 alloc_highmem--;
1628 } else {
1629 if (!to_free_normal)
1630 continue;
1631 to_free_normal--;
1632 alloc_normal--;
1633 }
1634 memory_bm_clear_bit(©_bm, pfn);
1635 swsusp_unset_page_forbidden(page);
1636 swsusp_unset_page_free(page);
1637 __free_page(page);
1638 }
1639
1640 return free;
1641}
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658static unsigned long minimum_image_size(unsigned long saveable)
1659{
1660 unsigned long size;
1661
1662 size = global_node_page_state(NR_SLAB_RECLAIMABLE)
1663 + global_node_page_state(NR_ACTIVE_ANON)
1664 + global_node_page_state(NR_INACTIVE_ANON)
1665 + global_node_page_state(NR_ACTIVE_FILE)
1666 + global_node_page_state(NR_INACTIVE_FILE);
1667
1668 return saveable <= size ? 0 : saveable - size;
1669}
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693int hibernate_preallocate_memory(void)
1694{
1695 struct zone *zone;
1696 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1697 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1698 ktime_t start, stop;
1699 int error;
1700
1701 pr_info("Preallocating image memory... ");
1702 start = ktime_get();
1703
1704 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1705 if (error)
1706 goto err_out;
1707
1708 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
1709 if (error)
1710 goto err_out;
1711
1712 alloc_normal = 0;
1713 alloc_highmem = 0;
1714
1715
1716 save_highmem = count_highmem_pages();
1717 saveable = count_data_pages();
1718
1719
1720
1721
1722
1723 count = saveable;
1724 saveable += save_highmem;
1725 highmem = save_highmem;
1726 size = 0;
1727 for_each_populated_zone(zone) {
1728 size += snapshot_additional_pages(zone);
1729 if (is_highmem(zone))
1730 highmem += zone_page_state(zone, NR_FREE_PAGES);
1731 else
1732 count += zone_page_state(zone, NR_FREE_PAGES);
1733 }
1734 avail_normal = count;
1735 count += highmem;
1736 count -= totalreserve_pages;
1737
1738
1739 size += page_key_additional_pages(saveable);
1740
1741
1742 max_size = (count - (size + PAGES_FOR_IO)) / 2
1743 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1744
1745 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1746 if (size > max_size)
1747 size = max_size;
1748
1749
1750
1751
1752
1753 if (size >= saveable) {
1754 pages = preallocate_image_highmem(save_highmem);
1755 pages += preallocate_image_memory(saveable - pages, avail_normal);
1756 goto out;
1757 }
1758
1759
1760 pages = minimum_image_size(saveable);
1761
1762
1763
1764
1765
1766 if (avail_normal > pages)
1767 avail_normal -= pages;
1768 else
1769 avail_normal = 0;
1770 if (size < pages)
1771 size = min_t(unsigned long, pages, max_size);
1772
1773
1774
1775
1776
1777
1778
1779 shrink_all_memory(saveable - size);
1780
1781
1782
1783
1784
1785
1786
1787
1788 pages_highmem = preallocate_image_highmem(highmem / 2);
1789 alloc = count - max_size;
1790 if (alloc > pages_highmem)
1791 alloc -= pages_highmem;
1792 else
1793 alloc = 0;
1794 pages = preallocate_image_memory(alloc, avail_normal);
1795 if (pages < alloc) {
1796
1797 alloc -= pages;
1798 pages += pages_highmem;
1799 pages_highmem = preallocate_image_highmem(alloc);
1800 if (pages_highmem < alloc)
1801 goto err_out;
1802 pages += pages_highmem;
1803
1804
1805
1806
1807 alloc = (count - pages) - size;
1808 pages += preallocate_image_highmem(alloc);
1809 } else {
1810
1811
1812
1813
1814 alloc = max_size - size;
1815 size = preallocate_highmem_fraction(alloc, highmem, count);
1816 pages_highmem += size;
1817 alloc -= size;
1818 size = preallocate_image_memory(alloc, avail_normal);
1819 pages_highmem += preallocate_image_highmem(alloc - size);
1820 pages += pages_highmem + size;
1821 }
1822
1823
1824
1825
1826
1827
1828 pages -= free_unnecessary_pages();
1829
1830 out:
1831 stop = ktime_get();
1832 pr_cont("done (allocated %lu pages)\n", pages);
1833 swsusp_show_speed(start, stop, pages, "Allocated");
1834
1835 return 0;
1836
1837 err_out:
1838 pr_cont("\n");
1839 swsusp_free();
1840 return -ENOMEM;
1841}
1842
1843#ifdef CONFIG_HIGHMEM
1844
1845
1846
1847
1848
1849
1850static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1851{
1852 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1853
1854 if (free_highmem >= nr_highmem)
1855 nr_highmem = 0;
1856 else
1857 nr_highmem -= free_highmem;
1858
1859 return nr_highmem;
1860}
1861#else
1862static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1863#endif
1864
1865
1866
1867
1868static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1869{
1870 struct zone *zone;
1871 unsigned int free = alloc_normal;
1872
1873 for_each_populated_zone(zone)
1874 if (!is_highmem(zone))
1875 free += zone_page_state(zone, NR_FREE_PAGES);
1876
1877 nr_pages += count_pages_for_highmem(nr_highmem);
1878 pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1879 nr_pages, PAGES_FOR_IO, free);
1880
1881 return free > nr_pages + PAGES_FOR_IO;
1882}
1883
1884#ifdef CONFIG_HIGHMEM
1885
1886
1887
1888
1889
1890
1891static inline int get_highmem_buffer(int safe_needed)
1892{
1893 buffer = get_image_page(GFP_ATOMIC, safe_needed);
1894 return buffer ? 0 : -ENOMEM;
1895}
1896
1897
1898
1899
1900
1901
1902
1903static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1904 unsigned int nr_highmem)
1905{
1906 unsigned int to_alloc = count_free_highmem_pages();
1907
1908 if (to_alloc > nr_highmem)
1909 to_alloc = nr_highmem;
1910
1911 nr_highmem -= to_alloc;
1912 while (to_alloc-- > 0) {
1913 struct page *page;
1914
1915 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1916 memory_bm_set_bit(bm, page_to_pfn(page));
1917 }
1918 return nr_highmem;
1919}
1920#else
1921static inline int get_highmem_buffer(int safe_needed) { return 0; }
1922
1923static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1924 unsigned int n) { return 0; }
1925#endif
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938static int swsusp_alloc(struct memory_bitmap *copy_bm,
1939 unsigned int nr_pages, unsigned int nr_highmem)
1940{
1941 if (nr_highmem > 0) {
1942 if (get_highmem_buffer(PG_ANY))
1943 goto err_out;
1944 if (nr_highmem > alloc_highmem) {
1945 nr_highmem -= alloc_highmem;
1946 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1947 }
1948 }
1949 if (nr_pages > alloc_normal) {
1950 nr_pages -= alloc_normal;
1951 while (nr_pages-- > 0) {
1952 struct page *page;
1953
1954 page = alloc_image_page(GFP_ATOMIC);
1955 if (!page)
1956 goto err_out;
1957 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1958 }
1959 }
1960
1961 return 0;
1962
1963 err_out:
1964 swsusp_free();
1965 return -ENOMEM;
1966}
1967
1968asmlinkage __visible int swsusp_save(void)
1969{
1970 unsigned int nr_pages, nr_highmem;
1971
1972 pr_info("Creating hibernation image:\n");
1973
1974 drain_local_pages(NULL);
1975 nr_pages = count_data_pages();
1976 nr_highmem = count_highmem_pages();
1977 pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
1978
1979 if (!enough_free_mem(nr_pages, nr_highmem)) {
1980 pr_err("Not enough free memory\n");
1981 return -ENOMEM;
1982 }
1983
1984 if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) {
1985 pr_err("Memory allocation failed\n");
1986 return -ENOMEM;
1987 }
1988
1989
1990
1991
1992
1993 drain_local_pages(NULL);
1994 copy_data_pages(©_bm, &orig_bm);
1995
1996
1997
1998
1999
2000
2001
2002 nr_pages += nr_highmem;
2003 nr_copy_pages = nr_pages;
2004 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
2005
2006 pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
2007
2008 return 0;
2009}
2010
2011#ifndef CONFIG_ARCH_HIBERNATION_HEADER
2012static int init_header_complete(struct swsusp_info *info)
2013{
2014 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2015 info->version_code = LINUX_VERSION_CODE;
2016 return 0;
2017}
2018
2019static char *check_image_kernel(struct swsusp_info *info)
2020{
2021 if (info->version_code != LINUX_VERSION_CODE)
2022 return "kernel version";
2023 if (strcmp(info->uts.sysname,init_utsname()->sysname))
2024 return "system type";
2025 if (strcmp(info->uts.release,init_utsname()->release))
2026 return "kernel release";
2027 if (strcmp(info->uts.version,init_utsname()->version))
2028 return "version";
2029 if (strcmp(info->uts.machine,init_utsname()->machine))
2030 return "machine";
2031 return NULL;
2032}
2033#endif
2034
2035unsigned long snapshot_get_image_size(void)
2036{
2037 return nr_copy_pages + nr_meta_pages + 1;
2038}
2039
2040static int init_header(struct swsusp_info *info)
2041{
2042 memset(info, 0, sizeof(struct swsusp_info));
2043 info->num_physpages = get_num_physpages();
2044 info->image_pages = nr_copy_pages;
2045 info->pages = snapshot_get_image_size();
2046 info->size = info->pages;
2047 info->size <<= PAGE_SHIFT;
2048 return init_header_complete(info);
2049}
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2060{
2061 int j;
2062
2063 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2064 buf[j] = memory_bm_next_pfn(bm);
2065 if (unlikely(buf[j] == BM_END_OF_MAP))
2066 break;
2067
2068 page_key_read(buf + j);
2069 }
2070}
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088int snapshot_read_next(struct snapshot_handle *handle)
2089{
2090 if (handle->cur > nr_meta_pages + nr_copy_pages)
2091 return 0;
2092
2093 if (!buffer) {
2094
2095 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2096 if (!buffer)
2097 return -ENOMEM;
2098 }
2099 if (!handle->cur) {
2100 int error;
2101
2102 error = init_header((struct swsusp_info *)buffer);
2103 if (error)
2104 return error;
2105 handle->buffer = buffer;
2106 memory_bm_position_reset(&orig_bm);
2107 memory_bm_position_reset(©_bm);
2108 } else if (handle->cur <= nr_meta_pages) {
2109 clear_page(buffer);
2110 pack_pfns(buffer, &orig_bm);
2111 } else {
2112 struct page *page;
2113
2114 page = pfn_to_page(memory_bm_next_pfn(©_bm));
2115 if (PageHighMem(page)) {
2116
2117
2118
2119
2120
2121 void *kaddr;
2122
2123 kaddr = kmap_atomic(page);
2124 copy_page(buffer, kaddr);
2125 kunmap_atomic(kaddr);
2126 handle->buffer = buffer;
2127 } else {
2128 handle->buffer = page_address(page);
2129 }
2130 }
2131 handle->cur++;
2132 return PAGE_SIZE;
2133}
2134
2135static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2136 struct memory_bitmap *src)
2137{
2138 unsigned long pfn;
2139
2140 memory_bm_position_reset(src);
2141 pfn = memory_bm_next_pfn(src);
2142 while (pfn != BM_END_OF_MAP) {
2143 memory_bm_set_bit(dst, pfn);
2144 pfn = memory_bm_next_pfn(src);
2145 }
2146}
2147
2148
2149
2150
2151
2152
2153
2154static void mark_unsafe_pages(struct memory_bitmap *bm)
2155{
2156 unsigned long pfn;
2157
2158
2159 memory_bm_position_reset(free_pages_map);
2160 pfn = memory_bm_next_pfn(free_pages_map);
2161 while (pfn != BM_END_OF_MAP) {
2162 memory_bm_clear_current(free_pages_map);
2163 pfn = memory_bm_next_pfn(free_pages_map);
2164 }
2165
2166
2167 duplicate_memory_bitmap(free_pages_map, bm);
2168
2169 allocated_unsafe_pages = 0;
2170}
2171
2172static int check_header(struct swsusp_info *info)
2173{
2174 char *reason;
2175
2176 reason = check_image_kernel(info);
2177 if (!reason && info->num_physpages != get_num_physpages())
2178 reason = "memory size";
2179 if (reason) {
2180 pr_err("Image mismatch: %s\n", reason);
2181 return -EPERM;
2182 }
2183 return 0;
2184}
2185
2186
2187
2188
2189static int load_header(struct swsusp_info *info)
2190{
2191 int error;
2192
2193 restore_pblist = NULL;
2194 error = check_header(info);
2195 if (!error) {
2196 nr_copy_pages = info->image_pages;
2197 nr_meta_pages = info->pages - info->image_pages - 1;
2198 }
2199 return error;
2200}
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2211{
2212 int j;
2213
2214 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2215 if (unlikely(buf[j] == BM_END_OF_MAP))
2216 break;
2217
2218
2219 page_key_memorize(buf + j);
2220
2221 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2222 memory_bm_set_bit(bm, buf[j]);
2223 else
2224 return -EFAULT;
2225 }
2226
2227 return 0;
2228}
2229
2230#ifdef CONFIG_HIGHMEM
2231
2232
2233
2234
2235
2236struct highmem_pbe {
2237 struct page *copy_page;
2238 struct page *orig_page;
2239 struct highmem_pbe *next;
2240};
2241
2242
2243
2244
2245
2246
2247
2248static struct highmem_pbe *highmem_pblist;
2249
2250
2251
2252
2253
2254
2255
2256static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2257{
2258 unsigned long pfn;
2259 unsigned int cnt = 0;
2260
2261 memory_bm_position_reset(bm);
2262 pfn = memory_bm_next_pfn(bm);
2263 while (pfn != BM_END_OF_MAP) {
2264 if (PageHighMem(pfn_to_page(pfn)))
2265 cnt++;
2266
2267 pfn = memory_bm_next_pfn(bm);
2268 }
2269 return cnt;
2270}
2271
2272static unsigned int safe_highmem_pages;
2273
2274static struct memory_bitmap *safe_highmem_bm;
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289static int prepare_highmem_image(struct memory_bitmap *bm,
2290 unsigned int *nr_highmem_p)
2291{
2292 unsigned int to_alloc;
2293
2294 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2295 return -ENOMEM;
2296
2297 if (get_highmem_buffer(PG_SAFE))
2298 return -ENOMEM;
2299
2300 to_alloc = count_free_highmem_pages();
2301 if (to_alloc > *nr_highmem_p)
2302 to_alloc = *nr_highmem_p;
2303 else
2304 *nr_highmem_p = to_alloc;
2305
2306 safe_highmem_pages = 0;
2307 while (to_alloc-- > 0) {
2308 struct page *page;
2309
2310 page = alloc_page(__GFP_HIGHMEM);
2311 if (!swsusp_page_is_free(page)) {
2312
2313 memory_bm_set_bit(bm, page_to_pfn(page));
2314 safe_highmem_pages++;
2315 }
2316
2317 swsusp_set_page_forbidden(page);
2318 swsusp_set_page_free(page);
2319 }
2320 memory_bm_position_reset(bm);
2321 safe_highmem_bm = bm;
2322 return 0;
2323}
2324
2325static struct page *last_highmem_page;
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345static void *get_highmem_page_buffer(struct page *page,
2346 struct chain_allocator *ca)
2347{
2348 struct highmem_pbe *pbe;
2349 void *kaddr;
2350
2351 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2352
2353
2354
2355
2356 last_highmem_page = page;
2357 return buffer;
2358 }
2359
2360
2361
2362
2363 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2364 if (!pbe) {
2365 swsusp_free();
2366 return ERR_PTR(-ENOMEM);
2367 }
2368 pbe->orig_page = page;
2369 if (safe_highmem_pages > 0) {
2370 struct page *tmp;
2371
2372
2373 kaddr = buffer;
2374 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2375 safe_highmem_pages--;
2376 last_highmem_page = tmp;
2377 pbe->copy_page = tmp;
2378 } else {
2379
2380 kaddr = safe_pages_list;
2381 safe_pages_list = safe_pages_list->next;
2382 pbe->copy_page = virt_to_page(kaddr);
2383 }
2384 pbe->next = highmem_pblist;
2385 highmem_pblist = pbe;
2386 return kaddr;
2387}
2388
2389
2390
2391
2392
2393
2394
2395
2396static void copy_last_highmem_page(void)
2397{
2398 if (last_highmem_page) {
2399 void *dst;
2400
2401 dst = kmap_atomic(last_highmem_page);
2402 copy_page(dst, buffer);
2403 kunmap_atomic(dst);
2404 last_highmem_page = NULL;
2405 }
2406}
2407
2408static inline int last_highmem_page_copied(void)
2409{
2410 return !last_highmem_page;
2411}
2412
2413static inline void free_highmem_data(void)
2414{
2415 if (safe_highmem_bm)
2416 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2417
2418 if (buffer)
2419 free_image_page(buffer, PG_UNSAFE_CLEAR);
2420}
2421#else
2422static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2423
2424static inline int prepare_highmem_image(struct memory_bitmap *bm,
2425 unsigned int *nr_highmem_p) { return 0; }
2426
2427static inline void *get_highmem_page_buffer(struct page *page,
2428 struct chain_allocator *ca)
2429{
2430 return ERR_PTR(-EINVAL);
2431}
2432
2433static inline void copy_last_highmem_page(void) {}
2434static inline int last_highmem_page_copied(void) { return 1; }
2435static inline void free_highmem_data(void) {}
2436#endif
2437
2438#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2456{
2457 unsigned int nr_pages, nr_highmem;
2458 struct linked_page *lp;
2459 int error;
2460
2461
2462 free_image_page(buffer, PG_UNSAFE_CLEAR);
2463 buffer = NULL;
2464
2465 nr_highmem = count_highmem_image_pages(bm);
2466 mark_unsafe_pages(bm);
2467
2468 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2469 if (error)
2470 goto Free;
2471
2472 duplicate_memory_bitmap(new_bm, bm);
2473 memory_bm_free(bm, PG_UNSAFE_KEEP);
2474 if (nr_highmem > 0) {
2475 error = prepare_highmem_image(bm, &nr_highmem);
2476 if (error)
2477 goto Free;
2478 }
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2489 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2490 while (nr_pages > 0) {
2491 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2492 if (!lp) {
2493 error = -ENOMEM;
2494 goto Free;
2495 }
2496 lp->next = safe_pages_list;
2497 safe_pages_list = lp;
2498 nr_pages--;
2499 }
2500
2501 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2502 while (nr_pages > 0) {
2503 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2504 if (!lp) {
2505 error = -ENOMEM;
2506 goto Free;
2507 }
2508 if (!swsusp_page_is_free(virt_to_page(lp))) {
2509
2510 lp->next = safe_pages_list;
2511 safe_pages_list = lp;
2512 }
2513
2514 swsusp_set_page_forbidden(virt_to_page(lp));
2515 swsusp_set_page_free(virt_to_page(lp));
2516 nr_pages--;
2517 }
2518 return 0;
2519
2520 Free:
2521 swsusp_free();
2522 return error;
2523}
2524
2525
2526
2527
2528
2529
2530
2531static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2532{
2533 struct pbe *pbe;
2534 struct page *page;
2535 unsigned long pfn = memory_bm_next_pfn(bm);
2536
2537 if (pfn == BM_END_OF_MAP)
2538 return ERR_PTR(-EFAULT);
2539
2540 page = pfn_to_page(pfn);
2541 if (PageHighMem(page))
2542 return get_highmem_page_buffer(page, ca);
2543
2544 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2545
2546
2547
2548
2549 return page_address(page);
2550
2551
2552
2553
2554
2555 pbe = chain_alloc(ca, sizeof(struct pbe));
2556 if (!pbe) {
2557 swsusp_free();
2558 return ERR_PTR(-ENOMEM);
2559 }
2560 pbe->orig_address = page_address(page);
2561 pbe->address = safe_pages_list;
2562 safe_pages_list = safe_pages_list->next;
2563 pbe->next = restore_pblist;
2564 restore_pblist = pbe;
2565 return pbe->address;
2566}
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584int snapshot_write_next(struct snapshot_handle *handle)
2585{
2586 static struct chain_allocator ca;
2587 int error = 0;
2588
2589
2590 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2591 return 0;
2592
2593 handle->sync_read = 1;
2594
2595 if (!handle->cur) {
2596 if (!buffer)
2597
2598 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2599
2600 if (!buffer)
2601 return -ENOMEM;
2602
2603 handle->buffer = buffer;
2604 } else if (handle->cur == 1) {
2605 error = load_header(buffer);
2606 if (error)
2607 return error;
2608
2609 safe_pages_list = NULL;
2610
2611 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
2612 if (error)
2613 return error;
2614
2615
2616 error = page_key_alloc(nr_copy_pages);
2617 if (error)
2618 return error;
2619
2620 hibernate_restore_protection_begin();
2621 } else if (handle->cur <= nr_meta_pages + 1) {
2622 error = unpack_orig_pfns(buffer, ©_bm);
2623 if (error)
2624 return error;
2625
2626 if (handle->cur == nr_meta_pages + 1) {
2627 error = prepare_image(&orig_bm, ©_bm);
2628 if (error)
2629 return error;
2630
2631 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2632 memory_bm_position_reset(&orig_bm);
2633 restore_pblist = NULL;
2634 handle->buffer = get_buffer(&orig_bm, &ca);
2635 handle->sync_read = 0;
2636 if (IS_ERR(handle->buffer))
2637 return PTR_ERR(handle->buffer);
2638 }
2639 } else {
2640 copy_last_highmem_page();
2641
2642 page_key_write(handle->buffer);
2643 hibernate_restore_protect_page(handle->buffer);
2644 handle->buffer = get_buffer(&orig_bm, &ca);
2645 if (IS_ERR(handle->buffer))
2646 return PTR_ERR(handle->buffer);
2647 if (handle->buffer != buffer)
2648 handle->sync_read = 0;
2649 }
2650 handle->cur++;
2651 return PAGE_SIZE;
2652}
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662void snapshot_write_finalize(struct snapshot_handle *handle)
2663{
2664 copy_last_highmem_page();
2665
2666 page_key_write(handle->buffer);
2667 page_key_free();
2668 hibernate_restore_protect_page(handle->buffer);
2669
2670 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2671 memory_bm_recycle(&orig_bm);
2672 free_highmem_data();
2673 }
2674}
2675
2676int snapshot_image_loaded(struct snapshot_handle *handle)
2677{
2678 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2679 handle->cur <= nr_meta_pages + nr_copy_pages);
2680}
2681
2682#ifdef CONFIG_HIGHMEM
2683
2684static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2685 void *buf)
2686{
2687 void *kaddr1, *kaddr2;
2688
2689 kaddr1 = kmap_atomic(p1);
2690 kaddr2 = kmap_atomic(p2);
2691 copy_page(buf, kaddr1);
2692 copy_page(kaddr1, kaddr2);
2693 copy_page(kaddr2, buf);
2694 kunmap_atomic(kaddr2);
2695 kunmap_atomic(kaddr1);
2696}
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708int restore_highmem(void)
2709{
2710 struct highmem_pbe *pbe = highmem_pblist;
2711 void *buf;
2712
2713 if (!pbe)
2714 return 0;
2715
2716 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2717 if (!buf)
2718 return -ENOMEM;
2719
2720 while (pbe) {
2721 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2722 pbe = pbe->next;
2723 }
2724 free_image_page(buf, PG_UNSAFE_CLEAR);
2725 return 0;
2726}
2727#endif
2728