1
2
3
4
5
6
7
8
9
10
11
12
13#define pr_fmt(fmt) "PM: " fmt
14
15#include <linux/version.h>
16#include <linux/module.h>
17#include <linux/mm.h>
18#include <linux/suspend.h>
19#include <linux/delay.h>
20#include <linux/bitops.h>
21#include <linux/spinlock.h>
22#include <linux/kernel.h>
23#include <linux/pm.h>
24#include <linux/device.h>
25#include <linux/init.h>
26#include <linux/bootmem.h>
27#include <linux/nmi.h>
28#include <linux/syscalls.h>
29#include <linux/console.h>
30#include <linux/highmem.h>
31#include <linux/list.h>
32#include <linux/slab.h>
33#include <linux/compiler.h>
34#include <linux/ktime.h>
35#include <linux/set_memory.h>
36
37#include <linux/uaccess.h>
38#include <asm/mmu_context.h>
39#include <asm/pgtable.h>
40#include <asm/tlbflush.h>
41#include <asm/io.h>
42
43#include "power.h"
44
45#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
46static bool hibernate_restore_protection;
47static bool hibernate_restore_protection_active;
48
49void enable_restore_image_protection(void)
50{
51 hibernate_restore_protection = true;
52}
53
54static inline void hibernate_restore_protection_begin(void)
55{
56 hibernate_restore_protection_active = hibernate_restore_protection;
57}
58
59static inline void hibernate_restore_protection_end(void)
60{
61 hibernate_restore_protection_active = false;
62}
63
64static inline void hibernate_restore_protect_page(void *page_address)
65{
66 if (hibernate_restore_protection_active)
67 set_memory_ro((unsigned long)page_address, 1);
68}
69
70static inline void hibernate_restore_unprotect_page(void *page_address)
71{
72 if (hibernate_restore_protection_active)
73 set_memory_rw((unsigned long)page_address, 1);
74}
75#else
76static inline void hibernate_restore_protection_begin(void) {}
77static inline void hibernate_restore_protection_end(void) {}
78static inline void hibernate_restore_protect_page(void *page_address) {}
79static inline void hibernate_restore_unprotect_page(void *page_address) {}
80#endif
81
82static int swsusp_page_is_free(struct page *);
83static void swsusp_set_page_forbidden(struct page *);
84static void swsusp_unset_page_forbidden(struct page *);
85
86
87
88
89
90
91unsigned long reserved_size;
92
93void __init hibernate_reserved_size_init(void)
94{
95 reserved_size = SPARE_PAGES * PAGE_SIZE;
96}
97
98
99
100
101
102
103
104unsigned long image_size;
105
106void __init hibernate_image_size_init(void)
107{
108 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
109}
110
111
112
113
114
115
116
117struct pbe *restore_pblist;
118
119
120
121#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
122
123struct linked_page {
124 struct linked_page *next;
125 char data[LINKED_PAGE_DATA_SIZE];
126} __packed;
127
128
129
130
131
132
133static struct linked_page *safe_pages_list;
134
135
136static void *buffer;
137
138#define PG_ANY 0
139#define PG_SAFE 1
140#define PG_UNSAFE_CLEAR 1
141#define PG_UNSAFE_KEEP 0
142
143static unsigned int allocated_unsafe_pages;
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158static void *get_image_page(gfp_t gfp_mask, int safe_needed)
159{
160 void *res;
161
162 res = (void *)get_zeroed_page(gfp_mask);
163 if (safe_needed)
164 while (res && swsusp_page_is_free(virt_to_page(res))) {
165
166 swsusp_set_page_forbidden(virt_to_page(res));
167 allocated_unsafe_pages++;
168 res = (void *)get_zeroed_page(gfp_mask);
169 }
170 if (res) {
171 swsusp_set_page_forbidden(virt_to_page(res));
172 swsusp_set_page_free(virt_to_page(res));
173 }
174 return res;
175}
176
177static void *__get_safe_page(gfp_t gfp_mask)
178{
179 if (safe_pages_list) {
180 void *ret = safe_pages_list;
181
182 safe_pages_list = safe_pages_list->next;
183 memset(ret, 0, PAGE_SIZE);
184 return ret;
185 }
186 return get_image_page(gfp_mask, PG_SAFE);
187}
188
189unsigned long get_safe_page(gfp_t gfp_mask)
190{
191 return (unsigned long)__get_safe_page(gfp_mask);
192}
193
194static struct page *alloc_image_page(gfp_t gfp_mask)
195{
196 struct page *page;
197
198 page = alloc_page(gfp_mask);
199 if (page) {
200 swsusp_set_page_forbidden(page);
201 swsusp_set_page_free(page);
202 }
203 return page;
204}
205
206static void recycle_safe_page(void *page_address)
207{
208 struct linked_page *lp = page_address;
209
210 lp->next = safe_pages_list;
211 safe_pages_list = lp;
212}
213
214
215
216
217
218
219
220
221
222static inline void free_image_page(void *addr, int clear_nosave_free)
223{
224 struct page *page;
225
226 BUG_ON(!virt_addr_valid(addr));
227
228 page = virt_to_page(addr);
229
230 swsusp_unset_page_forbidden(page);
231 if (clear_nosave_free)
232 swsusp_unset_page_free(page);
233
234 __free_page(page);
235}
236
237static inline void free_list_of_pages(struct linked_page *list,
238 int clear_page_nosave)
239{
240 while (list) {
241 struct linked_page *lp = list->next;
242
243 free_image_page(list, clear_page_nosave);
244 list = lp;
245 }
246}
247
248
249
250
251
252
253
254
255
256
257
258
259
260struct chain_allocator {
261 struct linked_page *chain;
262 unsigned int used_space;
263
264 gfp_t gfp_mask;
265 int safe_needed;
266};
267
268static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
269 int safe_needed)
270{
271 ca->chain = NULL;
272 ca->used_space = LINKED_PAGE_DATA_SIZE;
273 ca->gfp_mask = gfp_mask;
274 ca->safe_needed = safe_needed;
275}
276
277static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
278{
279 void *ret;
280
281 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
282 struct linked_page *lp;
283
284 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
285 get_image_page(ca->gfp_mask, PG_ANY);
286 if (!lp)
287 return NULL;
288
289 lp->next = ca->chain;
290 ca->chain = lp;
291 ca->used_space = 0;
292 }
293 ret = ca->chain->data + ca->used_space;
294 ca->used_space += size;
295 return ret;
296}
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339#define BM_END_OF_MAP (~0UL)
340
341#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
342#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
343#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
344
345
346
347
348
349
350struct rtree_node {
351 struct list_head list;
352 unsigned long *data;
353};
354
355
356
357
358
359struct mem_zone_bm_rtree {
360 struct list_head list;
361 struct list_head nodes;
362 struct list_head leaves;
363 unsigned long start_pfn;
364 unsigned long end_pfn;
365 struct rtree_node *rtree;
366 int levels;
367 unsigned int blocks;
368};
369
370
371
372struct bm_position {
373 struct mem_zone_bm_rtree *zone;
374 struct rtree_node *node;
375 unsigned long node_pfn;
376 int node_bit;
377};
378
379struct memory_bitmap {
380 struct list_head zones;
381 struct linked_page *p_list;
382
383
384 struct bm_position cur;
385};
386
387
388
389#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
390#if BITS_PER_LONG == 32
391#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
392#else
393#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
394#endif
395#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
396
397
398
399
400
401
402
403
404static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
405 struct chain_allocator *ca,
406 struct list_head *list)
407{
408 struct rtree_node *node;
409
410 node = chain_alloc(ca, sizeof(struct rtree_node));
411 if (!node)
412 return NULL;
413
414 node->data = get_image_page(gfp_mask, safe_needed);
415 if (!node->data)
416 return NULL;
417
418 list_add_tail(&node->list, list);
419
420 return node;
421}
422
423
424
425
426
427
428
429
430static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
431 int safe_needed, struct chain_allocator *ca)
432{
433 struct rtree_node *node, *block, **dst;
434 unsigned int levels_needed, block_nr;
435 int i;
436
437 block_nr = zone->blocks;
438 levels_needed = 0;
439
440
441 while (block_nr) {
442 levels_needed += 1;
443 block_nr >>= BM_RTREE_LEVEL_SHIFT;
444 }
445
446
447 for (i = zone->levels; i < levels_needed; i++) {
448 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
449 &zone->nodes);
450 if (!node)
451 return -ENOMEM;
452
453 node->data[0] = (unsigned long)zone->rtree;
454 zone->rtree = node;
455 zone->levels += 1;
456 }
457
458
459 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
460 if (!block)
461 return -ENOMEM;
462
463
464 node = zone->rtree;
465 dst = &zone->rtree;
466 block_nr = zone->blocks;
467 for (i = zone->levels; i > 0; i--) {
468 int index;
469
470 if (!node) {
471 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
472 &zone->nodes);
473 if (!node)
474 return -ENOMEM;
475 *dst = node;
476 }
477
478 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
479 index &= BM_RTREE_LEVEL_MASK;
480 dst = (struct rtree_node **)&((*dst)->data[index]);
481 node = *dst;
482 }
483
484 zone->blocks += 1;
485 *dst = block;
486
487 return 0;
488}
489
490static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
491 int clear_nosave_free);
492
493
494
495
496
497
498
499
500static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
501 int safe_needed,
502 struct chain_allocator *ca,
503 unsigned long start,
504 unsigned long end)
505{
506 struct mem_zone_bm_rtree *zone;
507 unsigned int i, nr_blocks;
508 unsigned long pages;
509
510 pages = end - start;
511 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
512 if (!zone)
513 return NULL;
514
515 INIT_LIST_HEAD(&zone->nodes);
516 INIT_LIST_HEAD(&zone->leaves);
517 zone->start_pfn = start;
518 zone->end_pfn = end;
519 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
520
521 for (i = 0; i < nr_blocks; i++) {
522 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
523 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
524 return NULL;
525 }
526 }
527
528 return zone;
529}
530
531
532
533
534
535
536
537
538static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
539 int clear_nosave_free)
540{
541 struct rtree_node *node;
542
543 list_for_each_entry(node, &zone->nodes, list)
544 free_image_page(node->data, clear_nosave_free);
545
546 list_for_each_entry(node, &zone->leaves, list)
547 free_image_page(node->data, clear_nosave_free);
548}
549
550static void memory_bm_position_reset(struct memory_bitmap *bm)
551{
552 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
553 list);
554 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
555 struct rtree_node, list);
556 bm->cur.node_pfn = 0;
557 bm->cur.node_bit = 0;
558}
559
560static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
561
562struct mem_extent {
563 struct list_head hook;
564 unsigned long start;
565 unsigned long end;
566};
567
568
569
570
571
572static void free_mem_extents(struct list_head *list)
573{
574 struct mem_extent *ext, *aux;
575
576 list_for_each_entry_safe(ext, aux, list, hook) {
577 list_del(&ext->hook);
578 kfree(ext);
579 }
580}
581
582
583
584
585
586
587
588
589static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
590{
591 struct zone *zone;
592
593 INIT_LIST_HEAD(list);
594
595 for_each_populated_zone(zone) {
596 unsigned long zone_start, zone_end;
597 struct mem_extent *ext, *cur, *aux;
598
599 zone_start = zone->zone_start_pfn;
600 zone_end = zone_end_pfn(zone);
601
602 list_for_each_entry(ext, list, hook)
603 if (zone_start <= ext->end)
604 break;
605
606 if (&ext->hook == list || zone_end < ext->start) {
607
608 struct mem_extent *new_ext;
609
610 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
611 if (!new_ext) {
612 free_mem_extents(list);
613 return -ENOMEM;
614 }
615 new_ext->start = zone_start;
616 new_ext->end = zone_end;
617 list_add_tail(&new_ext->hook, &ext->hook);
618 continue;
619 }
620
621
622 if (zone_start < ext->start)
623 ext->start = zone_start;
624 if (zone_end > ext->end)
625 ext->end = zone_end;
626
627
628 cur = ext;
629 list_for_each_entry_safe_continue(cur, aux, list, hook) {
630 if (zone_end < cur->start)
631 break;
632 if (zone_end < cur->end)
633 ext->end = cur->end;
634 list_del(&cur->hook);
635 kfree(cur);
636 }
637 }
638
639 return 0;
640}
641
642
643
644
645static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
646 int safe_needed)
647{
648 struct chain_allocator ca;
649 struct list_head mem_extents;
650 struct mem_extent *ext;
651 int error;
652
653 chain_init(&ca, gfp_mask, safe_needed);
654 INIT_LIST_HEAD(&bm->zones);
655
656 error = create_mem_extents(&mem_extents, gfp_mask);
657 if (error)
658 return error;
659
660 list_for_each_entry(ext, &mem_extents, hook) {
661 struct mem_zone_bm_rtree *zone;
662
663 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
664 ext->start, ext->end);
665 if (!zone) {
666 error = -ENOMEM;
667 goto Error;
668 }
669 list_add_tail(&zone->list, &bm->zones);
670 }
671
672 bm->p_list = ca.chain;
673 memory_bm_position_reset(bm);
674 Exit:
675 free_mem_extents(&mem_extents);
676 return error;
677
678 Error:
679 bm->p_list = ca.chain;
680 memory_bm_free(bm, PG_UNSAFE_CLEAR);
681 goto Exit;
682}
683
684
685
686
687
688static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
689{
690 struct mem_zone_bm_rtree *zone;
691
692 list_for_each_entry(zone, &bm->zones, list)
693 free_zone_bm_rtree(zone, clear_nosave_free);
694
695 free_list_of_pages(bm->p_list, clear_nosave_free);
696
697 INIT_LIST_HEAD(&bm->zones);
698}
699
700
701
702
703
704
705
706
707
708
709static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
710 void **addr, unsigned int *bit_nr)
711{
712 struct mem_zone_bm_rtree *curr, *zone;
713 struct rtree_node *node;
714 int i, block_nr;
715
716 zone = bm->cur.zone;
717
718 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
719 goto zone_found;
720
721 zone = NULL;
722
723
724 list_for_each_entry(curr, &bm->zones, list) {
725 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
726 zone = curr;
727 break;
728 }
729 }
730
731 if (!zone)
732 return -EFAULT;
733
734zone_found:
735
736
737
738
739 node = bm->cur.node;
740 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
741 goto node_found;
742
743 node = zone->rtree;
744 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
745
746 for (i = zone->levels; i > 0; i--) {
747 int index;
748
749 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
750 index &= BM_RTREE_LEVEL_MASK;
751 BUG_ON(node->data[index] == 0);
752 node = (struct rtree_node *)node->data[index];
753 }
754
755node_found:
756
757 bm->cur.zone = zone;
758 bm->cur.node = node;
759 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
760
761
762 *addr = node->data;
763 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
764
765 return 0;
766}
767
768static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
769{
770 void *addr;
771 unsigned int bit;
772 int error;
773
774 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
775 BUG_ON(error);
776 set_bit(bit, addr);
777}
778
779static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
780{
781 void *addr;
782 unsigned int bit;
783 int error;
784
785 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
786 if (!error)
787 set_bit(bit, addr);
788
789 return error;
790}
791
792static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
793{
794 void *addr;
795 unsigned int bit;
796 int error;
797
798 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
799 BUG_ON(error);
800 clear_bit(bit, addr);
801}
802
803static void memory_bm_clear_current(struct memory_bitmap *bm)
804{
805 int bit;
806
807 bit = max(bm->cur.node_bit - 1, 0);
808 clear_bit(bit, bm->cur.node->data);
809}
810
811static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
812{
813 void *addr;
814 unsigned int bit;
815 int error;
816
817 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
818 BUG_ON(error);
819 return test_bit(bit, addr);
820}
821
822static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
823{
824 void *addr;
825 unsigned int bit;
826
827 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
828}
829
830
831
832
833
834
835
836
837
838
839
840static bool rtree_next_node(struct memory_bitmap *bm)
841{
842 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
843 bm->cur.node = list_entry(bm->cur.node->list.next,
844 struct rtree_node, list);
845 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
846 bm->cur.node_bit = 0;
847 touch_softlockup_watchdog();
848 return true;
849 }
850
851
852 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
853 bm->cur.zone = list_entry(bm->cur.zone->list.next,
854 struct mem_zone_bm_rtree, list);
855 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
856 struct rtree_node, list);
857 bm->cur.node_pfn = 0;
858 bm->cur.node_bit = 0;
859 return true;
860 }
861
862
863 return false;
864}
865
866
867
868
869
870
871
872
873
874
875
876
877static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
878{
879 unsigned long bits, pfn, pages;
880 int bit;
881
882 do {
883 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
884 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
885 bit = find_next_bit(bm->cur.node->data, bits,
886 bm->cur.node_bit);
887 if (bit < bits) {
888 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
889 bm->cur.node_bit = bit + 1;
890 return pfn;
891 }
892 } while (rtree_next_node(bm));
893
894 return BM_END_OF_MAP;
895}
896
897
898
899
900
901struct nosave_region {
902 struct list_head list;
903 unsigned long start_pfn;
904 unsigned long end_pfn;
905};
906
907static LIST_HEAD(nosave_regions);
908
909static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
910{
911 struct rtree_node *node;
912
913 list_for_each_entry(node, &zone->nodes, list)
914 recycle_safe_page(node->data);
915
916 list_for_each_entry(node, &zone->leaves, list)
917 recycle_safe_page(node->data);
918}
919
920static void memory_bm_recycle(struct memory_bitmap *bm)
921{
922 struct mem_zone_bm_rtree *zone;
923 struct linked_page *p_list;
924
925 list_for_each_entry(zone, &bm->zones, list)
926 recycle_zone_bm_rtree(zone);
927
928 p_list = bm->p_list;
929 while (p_list) {
930 struct linked_page *lp = p_list;
931
932 p_list = lp->next;
933 recycle_safe_page(lp);
934 }
935}
936
937
938
939
940
941
942
943void __init __register_nosave_region(unsigned long start_pfn,
944 unsigned long end_pfn, int use_kmalloc)
945{
946 struct nosave_region *region;
947
948 if (start_pfn >= end_pfn)
949 return;
950
951 if (!list_empty(&nosave_regions)) {
952
953 region = list_entry(nosave_regions.prev,
954 struct nosave_region, list);
955 if (region->end_pfn == start_pfn) {
956 region->end_pfn = end_pfn;
957 goto Report;
958 }
959 }
960 if (use_kmalloc) {
961
962 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
963 BUG_ON(!region);
964 } else {
965
966 region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
967 }
968 region->start_pfn = start_pfn;
969 region->end_pfn = end_pfn;
970 list_add_tail(®ion->list, &nosave_regions);
971 Report:
972 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
973 (unsigned long long) start_pfn << PAGE_SHIFT,
974 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
975}
976
977
978
979
980
981static struct memory_bitmap *forbidden_pages_map;
982
983
984static struct memory_bitmap *free_pages_map;
985
986
987
988
989
990
991void swsusp_set_page_free(struct page *page)
992{
993 if (free_pages_map)
994 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
995}
996
997static int swsusp_page_is_free(struct page *page)
998{
999 return free_pages_map ?
1000 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1001}
1002
1003void swsusp_unset_page_free(struct page *page)
1004{
1005 if (free_pages_map)
1006 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1007}
1008
1009static void swsusp_set_page_forbidden(struct page *page)
1010{
1011 if (forbidden_pages_map)
1012 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1013}
1014
1015int swsusp_page_is_forbidden(struct page *page)
1016{
1017 return forbidden_pages_map ?
1018 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1019}
1020
1021static void swsusp_unset_page_forbidden(struct page *page)
1022{
1023 if (forbidden_pages_map)
1024 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1025}
1026
1027
1028
1029
1030
1031
1032
1033
1034static void mark_nosave_pages(struct memory_bitmap *bm)
1035{
1036 struct nosave_region *region;
1037
1038 if (list_empty(&nosave_regions))
1039 return;
1040
1041 list_for_each_entry(region, &nosave_regions, list) {
1042 unsigned long pfn;
1043
1044 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1045 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1046 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1047 - 1);
1048
1049 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1050 if (pfn_valid(pfn)) {
1051
1052
1053
1054
1055
1056
1057 mem_bm_set_bit_check(bm, pfn);
1058 }
1059 }
1060}
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070int create_basic_memory_bitmaps(void)
1071{
1072 struct memory_bitmap *bm1, *bm2;
1073 int error = 0;
1074
1075 if (forbidden_pages_map && free_pages_map)
1076 return 0;
1077 else
1078 BUG_ON(forbidden_pages_map || free_pages_map);
1079
1080 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1081 if (!bm1)
1082 return -ENOMEM;
1083
1084 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1085 if (error)
1086 goto Free_first_object;
1087
1088 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1089 if (!bm2)
1090 goto Free_first_bitmap;
1091
1092 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1093 if (error)
1094 goto Free_second_object;
1095
1096 forbidden_pages_map = bm1;
1097 free_pages_map = bm2;
1098 mark_nosave_pages(forbidden_pages_map);
1099
1100 pr_debug("Basic memory bitmaps created\n");
1101
1102 return 0;
1103
1104 Free_second_object:
1105 kfree(bm2);
1106 Free_first_bitmap:
1107 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1108 Free_first_object:
1109 kfree(bm1);
1110 return -ENOMEM;
1111}
1112
1113
1114
1115
1116
1117
1118
1119
1120void free_basic_memory_bitmaps(void)
1121{
1122 struct memory_bitmap *bm1, *bm2;
1123
1124 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1125 return;
1126
1127 bm1 = forbidden_pages_map;
1128 bm2 = free_pages_map;
1129 forbidden_pages_map = NULL;
1130 free_pages_map = NULL;
1131 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1132 kfree(bm1);
1133 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1134 kfree(bm2);
1135
1136 pr_debug("Basic memory bitmaps freed\n");
1137}
1138
1139void clear_free_pages(void)
1140{
1141#ifdef CONFIG_PAGE_POISONING_ZERO
1142 struct memory_bitmap *bm = free_pages_map;
1143 unsigned long pfn;
1144
1145 if (WARN_ON(!(free_pages_map)))
1146 return;
1147
1148 memory_bm_position_reset(bm);
1149 pfn = memory_bm_next_pfn(bm);
1150 while (pfn != BM_END_OF_MAP) {
1151 if (pfn_valid(pfn))
1152 clear_highpage(pfn_to_page(pfn));
1153
1154 pfn = memory_bm_next_pfn(bm);
1155 }
1156 memory_bm_position_reset(bm);
1157 pr_info("free pages cleared after restore\n");
1158#endif
1159}
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169unsigned int snapshot_additional_pages(struct zone *zone)
1170{
1171 unsigned int rtree, nodes;
1172
1173 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1174 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1175 LINKED_PAGE_DATA_SIZE);
1176 while (nodes > 1) {
1177 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1178 rtree += nodes;
1179 }
1180
1181 return 2 * rtree;
1182}
1183
1184#ifdef CONFIG_HIGHMEM
1185
1186
1187
1188
1189
1190static unsigned int count_free_highmem_pages(void)
1191{
1192 struct zone *zone;
1193 unsigned int cnt = 0;
1194
1195 for_each_populated_zone(zone)
1196 if (is_highmem(zone))
1197 cnt += zone_page_state(zone, NR_FREE_PAGES);
1198
1199 return cnt;
1200}
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1211{
1212 struct page *page;
1213
1214 if (!pfn_valid(pfn))
1215 return NULL;
1216
1217 page = pfn_to_page(pfn);
1218 if (page_zone(page) != zone)
1219 return NULL;
1220
1221 BUG_ON(!PageHighMem(page));
1222
1223 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
1224 PageReserved(page))
1225 return NULL;
1226
1227 if (page_is_guard(page))
1228 return NULL;
1229
1230 return page;
1231}
1232
1233
1234
1235
1236static unsigned int count_highmem_pages(void)
1237{
1238 struct zone *zone;
1239 unsigned int n = 0;
1240
1241 for_each_populated_zone(zone) {
1242 unsigned long pfn, max_zone_pfn;
1243
1244 if (!is_highmem(zone))
1245 continue;
1246
1247 mark_free_pages(zone);
1248 max_zone_pfn = zone_end_pfn(zone);
1249 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1250 if (saveable_highmem_page(zone, pfn))
1251 n++;
1252 }
1253 return n;
1254}
1255#else
1256static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1257{
1258 return NULL;
1259}
1260#endif
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1273{
1274 struct page *page;
1275
1276 if (!pfn_valid(pfn))
1277 return NULL;
1278
1279 page = pfn_to_page(pfn);
1280 if (page_zone(page) != zone)
1281 return NULL;
1282
1283 BUG_ON(PageHighMem(page));
1284
1285 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1286 return NULL;
1287
1288 if (PageReserved(page)
1289 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1290 return NULL;
1291
1292 if (page_is_guard(page))
1293 return NULL;
1294
1295 return page;
1296}
1297
1298
1299
1300
1301static unsigned int count_data_pages(void)
1302{
1303 struct zone *zone;
1304 unsigned long pfn, max_zone_pfn;
1305 unsigned int n = 0;
1306
1307 for_each_populated_zone(zone) {
1308 if (is_highmem(zone))
1309 continue;
1310
1311 mark_free_pages(zone);
1312 max_zone_pfn = zone_end_pfn(zone);
1313 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1314 if (saveable_page(zone, pfn))
1315 n++;
1316 }
1317 return n;
1318}
1319
1320
1321
1322
1323
1324static inline void do_copy_page(long *dst, long *src)
1325{
1326 int n;
1327
1328 for (n = PAGE_SIZE / sizeof(long); n; n--)
1329 *dst++ = *src++;
1330}
1331
1332
1333
1334
1335
1336
1337
1338
1339static void safe_copy_page(void *dst, struct page *s_page)
1340{
1341 if (kernel_page_present(s_page)) {
1342 do_copy_page(dst, page_address(s_page));
1343 } else {
1344 kernel_map_pages(s_page, 1, 1);
1345 do_copy_page(dst, page_address(s_page));
1346 kernel_map_pages(s_page, 1, 0);
1347 }
1348}
1349
1350#ifdef CONFIG_HIGHMEM
1351static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1352{
1353 return is_highmem(zone) ?
1354 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1355}
1356
1357static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1358{
1359 struct page *s_page, *d_page;
1360 void *src, *dst;
1361
1362 s_page = pfn_to_page(src_pfn);
1363 d_page = pfn_to_page(dst_pfn);
1364 if (PageHighMem(s_page)) {
1365 src = kmap_atomic(s_page);
1366 dst = kmap_atomic(d_page);
1367 do_copy_page(dst, src);
1368 kunmap_atomic(dst);
1369 kunmap_atomic(src);
1370 } else {
1371 if (PageHighMem(d_page)) {
1372
1373
1374
1375
1376 safe_copy_page(buffer, s_page);
1377 dst = kmap_atomic(d_page);
1378 copy_page(dst, buffer);
1379 kunmap_atomic(dst);
1380 } else {
1381 safe_copy_page(page_address(d_page), s_page);
1382 }
1383 }
1384}
1385#else
1386#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1387
1388static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1389{
1390 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1391 pfn_to_page(src_pfn));
1392}
1393#endif
1394
1395static void copy_data_pages(struct memory_bitmap *copy_bm,
1396 struct memory_bitmap *orig_bm)
1397{
1398 struct zone *zone;
1399 unsigned long pfn;
1400
1401 for_each_populated_zone(zone) {
1402 unsigned long max_zone_pfn;
1403
1404 mark_free_pages(zone);
1405 max_zone_pfn = zone_end_pfn(zone);
1406 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1407 if (page_is_saveable(zone, pfn))
1408 memory_bm_set_bit(orig_bm, pfn);
1409 }
1410 memory_bm_position_reset(orig_bm);
1411 memory_bm_position_reset(copy_bm);
1412 for(;;) {
1413 pfn = memory_bm_next_pfn(orig_bm);
1414 if (unlikely(pfn == BM_END_OF_MAP))
1415 break;
1416 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1417 }
1418}
1419
1420
1421static unsigned int nr_copy_pages;
1422
1423static unsigned int nr_meta_pages;
1424
1425
1426
1427
1428static unsigned int alloc_normal, alloc_highmem;
1429
1430
1431
1432
1433static struct memory_bitmap orig_bm;
1434
1435
1436
1437
1438
1439
1440
1441
1442static struct memory_bitmap copy_bm;
1443
1444
1445
1446
1447
1448
1449
1450void swsusp_free(void)
1451{
1452 unsigned long fb_pfn, fr_pfn;
1453
1454 if (!forbidden_pages_map || !free_pages_map)
1455 goto out;
1456
1457 memory_bm_position_reset(forbidden_pages_map);
1458 memory_bm_position_reset(free_pages_map);
1459
1460loop:
1461 fr_pfn = memory_bm_next_pfn(free_pages_map);
1462 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1463
1464
1465
1466
1467
1468 do {
1469 if (fb_pfn < fr_pfn)
1470 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1471 if (fr_pfn < fb_pfn)
1472 fr_pfn = memory_bm_next_pfn(free_pages_map);
1473 } while (fb_pfn != fr_pfn);
1474
1475 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1476 struct page *page = pfn_to_page(fr_pfn);
1477
1478 memory_bm_clear_current(forbidden_pages_map);
1479 memory_bm_clear_current(free_pages_map);
1480 hibernate_restore_unprotect_page(page_address(page));
1481 __free_page(page);
1482 goto loop;
1483 }
1484
1485out:
1486 nr_copy_pages = 0;
1487 nr_meta_pages = 0;
1488 restore_pblist = NULL;
1489 buffer = NULL;
1490 alloc_normal = 0;
1491 alloc_highmem = 0;
1492 hibernate_restore_protection_end();
1493}
1494
1495
1496
1497#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1498
1499
1500
1501
1502
1503
1504
1505
1506static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1507{
1508 unsigned long nr_alloc = 0;
1509
1510 while (nr_pages > 0) {
1511 struct page *page;
1512
1513 page = alloc_image_page(mask);
1514 if (!page)
1515 break;
1516 memory_bm_set_bit(©_bm, page_to_pfn(page));
1517 if (PageHighMem(page))
1518 alloc_highmem++;
1519 else
1520 alloc_normal++;
1521 nr_pages--;
1522 nr_alloc++;
1523 }
1524
1525 return nr_alloc;
1526}
1527
1528static unsigned long preallocate_image_memory(unsigned long nr_pages,
1529 unsigned long avail_normal)
1530{
1531 unsigned long alloc;
1532
1533 if (avail_normal <= alloc_normal)
1534 return 0;
1535
1536 alloc = avail_normal - alloc_normal;
1537 if (nr_pages < alloc)
1538 alloc = nr_pages;
1539
1540 return preallocate_image_pages(alloc, GFP_IMAGE);
1541}
1542
1543#ifdef CONFIG_HIGHMEM
1544static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1545{
1546 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1547}
1548
1549
1550
1551
1552static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1553{
1554 x *= multiplier;
1555 do_div(x, base);
1556 return (unsigned long)x;
1557}
1558
1559static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1560 unsigned long highmem,
1561 unsigned long total)
1562{
1563 unsigned long alloc = __fraction(nr_pages, highmem, total);
1564
1565 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1566}
1567#else
1568static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1569{
1570 return 0;
1571}
1572
1573static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1574 unsigned long highmem,
1575 unsigned long total)
1576{
1577 return 0;
1578}
1579#endif
1580
1581
1582
1583
1584static unsigned long free_unnecessary_pages(void)
1585{
1586 unsigned long save, to_free_normal, to_free_highmem, free;
1587
1588 save = count_data_pages();
1589 if (alloc_normal >= save) {
1590 to_free_normal = alloc_normal - save;
1591 save = 0;
1592 } else {
1593 to_free_normal = 0;
1594 save -= alloc_normal;
1595 }
1596 save += count_highmem_pages();
1597 if (alloc_highmem >= save) {
1598 to_free_highmem = alloc_highmem - save;
1599 } else {
1600 to_free_highmem = 0;
1601 save -= alloc_highmem;
1602 if (to_free_normal > save)
1603 to_free_normal -= save;
1604 else
1605 to_free_normal = 0;
1606 }
1607 free = to_free_normal + to_free_highmem;
1608
1609 memory_bm_position_reset(©_bm);
1610
1611 while (to_free_normal > 0 || to_free_highmem > 0) {
1612 unsigned long pfn = memory_bm_next_pfn(©_bm);
1613 struct page *page = pfn_to_page(pfn);
1614
1615 if (PageHighMem(page)) {
1616 if (!to_free_highmem)
1617 continue;
1618 to_free_highmem--;
1619 alloc_highmem--;
1620 } else {
1621 if (!to_free_normal)
1622 continue;
1623 to_free_normal--;
1624 alloc_normal--;
1625 }
1626 memory_bm_clear_bit(©_bm, pfn);
1627 swsusp_unset_page_forbidden(page);
1628 swsusp_unset_page_free(page);
1629 __free_page(page);
1630 }
1631
1632 return free;
1633}
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650static unsigned long minimum_image_size(unsigned long saveable)
1651{
1652 unsigned long size;
1653
1654 size = global_node_page_state(NR_SLAB_RECLAIMABLE)
1655 + global_node_page_state(NR_ACTIVE_ANON)
1656 + global_node_page_state(NR_INACTIVE_ANON)
1657 + global_node_page_state(NR_ACTIVE_FILE)
1658 + global_node_page_state(NR_INACTIVE_FILE);
1659
1660 return saveable <= size ? 0 : saveable - size;
1661}
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685int hibernate_preallocate_memory(void)
1686{
1687 struct zone *zone;
1688 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1689 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1690 ktime_t start, stop;
1691 int error;
1692
1693 pr_info("Preallocating image memory... ");
1694 start = ktime_get();
1695
1696 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1697 if (error)
1698 goto err_out;
1699
1700 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
1701 if (error)
1702 goto err_out;
1703
1704 alloc_normal = 0;
1705 alloc_highmem = 0;
1706
1707
1708 save_highmem = count_highmem_pages();
1709 saveable = count_data_pages();
1710
1711
1712
1713
1714
1715 count = saveable;
1716 saveable += save_highmem;
1717 highmem = save_highmem;
1718 size = 0;
1719 for_each_populated_zone(zone) {
1720 size += snapshot_additional_pages(zone);
1721 if (is_highmem(zone))
1722 highmem += zone_page_state(zone, NR_FREE_PAGES);
1723 else
1724 count += zone_page_state(zone, NR_FREE_PAGES);
1725 }
1726 avail_normal = count;
1727 count += highmem;
1728 count -= totalreserve_pages;
1729
1730
1731 size += page_key_additional_pages(saveable);
1732
1733
1734 max_size = (count - (size + PAGES_FOR_IO)) / 2
1735 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1736
1737 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1738 if (size > max_size)
1739 size = max_size;
1740
1741
1742
1743
1744
1745 if (size >= saveable) {
1746 pages = preallocate_image_highmem(save_highmem);
1747 pages += preallocate_image_memory(saveable - pages, avail_normal);
1748 goto out;
1749 }
1750
1751
1752 pages = minimum_image_size(saveable);
1753
1754
1755
1756
1757
1758 if (avail_normal > pages)
1759 avail_normal -= pages;
1760 else
1761 avail_normal = 0;
1762 if (size < pages)
1763 size = min_t(unsigned long, pages, max_size);
1764
1765
1766
1767
1768
1769
1770
1771 shrink_all_memory(saveable - size);
1772
1773
1774
1775
1776
1777
1778
1779
1780 pages_highmem = preallocate_image_highmem(highmem / 2);
1781 alloc = count - max_size;
1782 if (alloc > pages_highmem)
1783 alloc -= pages_highmem;
1784 else
1785 alloc = 0;
1786 pages = preallocate_image_memory(alloc, avail_normal);
1787 if (pages < alloc) {
1788
1789 alloc -= pages;
1790 pages += pages_highmem;
1791 pages_highmem = preallocate_image_highmem(alloc);
1792 if (pages_highmem < alloc)
1793 goto err_out;
1794 pages += pages_highmem;
1795
1796
1797
1798
1799 alloc = (count - pages) - size;
1800 pages += preallocate_image_highmem(alloc);
1801 } else {
1802
1803
1804
1805
1806 alloc = max_size - size;
1807 size = preallocate_highmem_fraction(alloc, highmem, count);
1808 pages_highmem += size;
1809 alloc -= size;
1810 size = preallocate_image_memory(alloc, avail_normal);
1811 pages_highmem += preallocate_image_highmem(alloc - size);
1812 pages += pages_highmem + size;
1813 }
1814
1815
1816
1817
1818
1819
1820 pages -= free_unnecessary_pages();
1821
1822 out:
1823 stop = ktime_get();
1824 pr_cont("done (allocated %lu pages)\n", pages);
1825 swsusp_show_speed(start, stop, pages, "Allocated");
1826
1827 return 0;
1828
1829 err_out:
1830 pr_cont("\n");
1831 swsusp_free();
1832 return -ENOMEM;
1833}
1834
1835#ifdef CONFIG_HIGHMEM
1836
1837
1838
1839
1840
1841
1842static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1843{
1844 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1845
1846 if (free_highmem >= nr_highmem)
1847 nr_highmem = 0;
1848 else
1849 nr_highmem -= free_highmem;
1850
1851 return nr_highmem;
1852}
1853#else
1854static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1855#endif
1856
1857
1858
1859
1860static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1861{
1862 struct zone *zone;
1863 unsigned int free = alloc_normal;
1864
1865 for_each_populated_zone(zone)
1866 if (!is_highmem(zone))
1867 free += zone_page_state(zone, NR_FREE_PAGES);
1868
1869 nr_pages += count_pages_for_highmem(nr_highmem);
1870 pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1871 nr_pages, PAGES_FOR_IO, free);
1872
1873 return free > nr_pages + PAGES_FOR_IO;
1874}
1875
1876#ifdef CONFIG_HIGHMEM
1877
1878
1879
1880
1881
1882
1883static inline int get_highmem_buffer(int safe_needed)
1884{
1885 buffer = get_image_page(GFP_ATOMIC, safe_needed);
1886 return buffer ? 0 : -ENOMEM;
1887}
1888
1889
1890
1891
1892
1893
1894
1895static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1896 unsigned int nr_highmem)
1897{
1898 unsigned int to_alloc = count_free_highmem_pages();
1899
1900 if (to_alloc > nr_highmem)
1901 to_alloc = nr_highmem;
1902
1903 nr_highmem -= to_alloc;
1904 while (to_alloc-- > 0) {
1905 struct page *page;
1906
1907 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1908 memory_bm_set_bit(bm, page_to_pfn(page));
1909 }
1910 return nr_highmem;
1911}
1912#else
1913static inline int get_highmem_buffer(int safe_needed) { return 0; }
1914
1915static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1916 unsigned int n) { return 0; }
1917#endif
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930static int swsusp_alloc(struct memory_bitmap *copy_bm,
1931 unsigned int nr_pages, unsigned int nr_highmem)
1932{
1933 if (nr_highmem > 0) {
1934 if (get_highmem_buffer(PG_ANY))
1935 goto err_out;
1936 if (nr_highmem > alloc_highmem) {
1937 nr_highmem -= alloc_highmem;
1938 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1939 }
1940 }
1941 if (nr_pages > alloc_normal) {
1942 nr_pages -= alloc_normal;
1943 while (nr_pages-- > 0) {
1944 struct page *page;
1945
1946 page = alloc_image_page(GFP_ATOMIC);
1947 if (!page)
1948 goto err_out;
1949 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1950 }
1951 }
1952
1953 return 0;
1954
1955 err_out:
1956 swsusp_free();
1957 return -ENOMEM;
1958}
1959
1960asmlinkage __visible int swsusp_save(void)
1961{
1962 unsigned int nr_pages, nr_highmem;
1963
1964 pr_info("Creating hibernation image:\n");
1965
1966 drain_local_pages(NULL);
1967 nr_pages = count_data_pages();
1968 nr_highmem = count_highmem_pages();
1969 pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
1970
1971 if (!enough_free_mem(nr_pages, nr_highmem)) {
1972 pr_err("Not enough free memory\n");
1973 return -ENOMEM;
1974 }
1975
1976 if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) {
1977 pr_err("Memory allocation failed\n");
1978 return -ENOMEM;
1979 }
1980
1981
1982
1983
1984
1985 drain_local_pages(NULL);
1986 copy_data_pages(©_bm, &orig_bm);
1987
1988
1989
1990
1991
1992
1993
1994 nr_pages += nr_highmem;
1995 nr_copy_pages = nr_pages;
1996 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1997
1998 pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
1999
2000 return 0;
2001}
2002
2003#ifndef CONFIG_ARCH_HIBERNATION_HEADER
2004static int init_header_complete(struct swsusp_info *info)
2005{
2006 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2007 info->version_code = LINUX_VERSION_CODE;
2008 return 0;
2009}
2010
2011static char *check_image_kernel(struct swsusp_info *info)
2012{
2013 if (info->version_code != LINUX_VERSION_CODE)
2014 return "kernel version";
2015 if (strcmp(info->uts.sysname,init_utsname()->sysname))
2016 return "system type";
2017 if (strcmp(info->uts.release,init_utsname()->release))
2018 return "kernel release";
2019 if (strcmp(info->uts.version,init_utsname()->version))
2020 return "version";
2021 if (strcmp(info->uts.machine,init_utsname()->machine))
2022 return "machine";
2023 return NULL;
2024}
2025#endif
2026
2027unsigned long snapshot_get_image_size(void)
2028{
2029 return nr_copy_pages + nr_meta_pages + 1;
2030}
2031
2032static int init_header(struct swsusp_info *info)
2033{
2034 memset(info, 0, sizeof(struct swsusp_info));
2035 info->num_physpages = get_num_physpages();
2036 info->image_pages = nr_copy_pages;
2037 info->pages = snapshot_get_image_size();
2038 info->size = info->pages;
2039 info->size <<= PAGE_SHIFT;
2040 return init_header_complete(info);
2041}
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2052{
2053 int j;
2054
2055 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2056 buf[j] = memory_bm_next_pfn(bm);
2057 if (unlikely(buf[j] == BM_END_OF_MAP))
2058 break;
2059
2060 page_key_read(buf + j);
2061 }
2062}
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080int snapshot_read_next(struct snapshot_handle *handle)
2081{
2082 if (handle->cur > nr_meta_pages + nr_copy_pages)
2083 return 0;
2084
2085 if (!buffer) {
2086
2087 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2088 if (!buffer)
2089 return -ENOMEM;
2090 }
2091 if (!handle->cur) {
2092 int error;
2093
2094 error = init_header((struct swsusp_info *)buffer);
2095 if (error)
2096 return error;
2097 handle->buffer = buffer;
2098 memory_bm_position_reset(&orig_bm);
2099 memory_bm_position_reset(©_bm);
2100 } else if (handle->cur <= nr_meta_pages) {
2101 clear_page(buffer);
2102 pack_pfns(buffer, &orig_bm);
2103 } else {
2104 struct page *page;
2105
2106 page = pfn_to_page(memory_bm_next_pfn(©_bm));
2107 if (PageHighMem(page)) {
2108
2109
2110
2111
2112
2113 void *kaddr;
2114
2115 kaddr = kmap_atomic(page);
2116 copy_page(buffer, kaddr);
2117 kunmap_atomic(kaddr);
2118 handle->buffer = buffer;
2119 } else {
2120 handle->buffer = page_address(page);
2121 }
2122 }
2123 handle->cur++;
2124 return PAGE_SIZE;
2125}
2126
2127static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2128 struct memory_bitmap *src)
2129{
2130 unsigned long pfn;
2131
2132 memory_bm_position_reset(src);
2133 pfn = memory_bm_next_pfn(src);
2134 while (pfn != BM_END_OF_MAP) {
2135 memory_bm_set_bit(dst, pfn);
2136 pfn = memory_bm_next_pfn(src);
2137 }
2138}
2139
2140
2141
2142
2143
2144
2145
2146static void mark_unsafe_pages(struct memory_bitmap *bm)
2147{
2148 unsigned long pfn;
2149
2150
2151 memory_bm_position_reset(free_pages_map);
2152 pfn = memory_bm_next_pfn(free_pages_map);
2153 while (pfn != BM_END_OF_MAP) {
2154 memory_bm_clear_current(free_pages_map);
2155 pfn = memory_bm_next_pfn(free_pages_map);
2156 }
2157
2158
2159 duplicate_memory_bitmap(free_pages_map, bm);
2160
2161 allocated_unsafe_pages = 0;
2162}
2163
2164static int check_header(struct swsusp_info *info)
2165{
2166 char *reason;
2167
2168 reason = check_image_kernel(info);
2169 if (!reason && info->num_physpages != get_num_physpages())
2170 reason = "memory size";
2171 if (reason) {
2172 pr_err("Image mismatch: %s\n", reason);
2173 return -EPERM;
2174 }
2175 return 0;
2176}
2177
2178
2179
2180
2181static int load_header(struct swsusp_info *info)
2182{
2183 int error;
2184
2185 restore_pblist = NULL;
2186 error = check_header(info);
2187 if (!error) {
2188 nr_copy_pages = info->image_pages;
2189 nr_meta_pages = info->pages - info->image_pages - 1;
2190 }
2191 return error;
2192}
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2203{
2204 int j;
2205
2206 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2207 if (unlikely(buf[j] == BM_END_OF_MAP))
2208 break;
2209
2210
2211 page_key_memorize(buf + j);
2212
2213 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2214 memory_bm_set_bit(bm, buf[j]);
2215 else
2216 return -EFAULT;
2217 }
2218
2219 return 0;
2220}
2221
2222#ifdef CONFIG_HIGHMEM
2223
2224
2225
2226
2227
2228struct highmem_pbe {
2229 struct page *copy_page;
2230 struct page *orig_page;
2231 struct highmem_pbe *next;
2232};
2233
2234
2235
2236
2237
2238
2239
2240static struct highmem_pbe *highmem_pblist;
2241
2242
2243
2244
2245
2246
2247
2248static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2249{
2250 unsigned long pfn;
2251 unsigned int cnt = 0;
2252
2253 memory_bm_position_reset(bm);
2254 pfn = memory_bm_next_pfn(bm);
2255 while (pfn != BM_END_OF_MAP) {
2256 if (PageHighMem(pfn_to_page(pfn)))
2257 cnt++;
2258
2259 pfn = memory_bm_next_pfn(bm);
2260 }
2261 return cnt;
2262}
2263
2264static unsigned int safe_highmem_pages;
2265
2266static struct memory_bitmap *safe_highmem_bm;
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281static int prepare_highmem_image(struct memory_bitmap *bm,
2282 unsigned int *nr_highmem_p)
2283{
2284 unsigned int to_alloc;
2285
2286 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2287 return -ENOMEM;
2288
2289 if (get_highmem_buffer(PG_SAFE))
2290 return -ENOMEM;
2291
2292 to_alloc = count_free_highmem_pages();
2293 if (to_alloc > *nr_highmem_p)
2294 to_alloc = *nr_highmem_p;
2295 else
2296 *nr_highmem_p = to_alloc;
2297
2298 safe_highmem_pages = 0;
2299 while (to_alloc-- > 0) {
2300 struct page *page;
2301
2302 page = alloc_page(__GFP_HIGHMEM);
2303 if (!swsusp_page_is_free(page)) {
2304
2305 memory_bm_set_bit(bm, page_to_pfn(page));
2306 safe_highmem_pages++;
2307 }
2308
2309 swsusp_set_page_forbidden(page);
2310 swsusp_set_page_free(page);
2311 }
2312 memory_bm_position_reset(bm);
2313 safe_highmem_bm = bm;
2314 return 0;
2315}
2316
2317static struct page *last_highmem_page;
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337static void *get_highmem_page_buffer(struct page *page,
2338 struct chain_allocator *ca)
2339{
2340 struct highmem_pbe *pbe;
2341 void *kaddr;
2342
2343 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2344
2345
2346
2347
2348 last_highmem_page = page;
2349 return buffer;
2350 }
2351
2352
2353
2354
2355 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2356 if (!pbe) {
2357 swsusp_free();
2358 return ERR_PTR(-ENOMEM);
2359 }
2360 pbe->orig_page = page;
2361 if (safe_highmem_pages > 0) {
2362 struct page *tmp;
2363
2364
2365 kaddr = buffer;
2366 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2367 safe_highmem_pages--;
2368 last_highmem_page = tmp;
2369 pbe->copy_page = tmp;
2370 } else {
2371
2372 kaddr = safe_pages_list;
2373 safe_pages_list = safe_pages_list->next;
2374 pbe->copy_page = virt_to_page(kaddr);
2375 }
2376 pbe->next = highmem_pblist;
2377 highmem_pblist = pbe;
2378 return kaddr;
2379}
2380
2381
2382
2383
2384
2385
2386
2387
2388static void copy_last_highmem_page(void)
2389{
2390 if (last_highmem_page) {
2391 void *dst;
2392
2393 dst = kmap_atomic(last_highmem_page);
2394 copy_page(dst, buffer);
2395 kunmap_atomic(dst);
2396 last_highmem_page = NULL;
2397 }
2398}
2399
2400static inline int last_highmem_page_copied(void)
2401{
2402 return !last_highmem_page;
2403}
2404
2405static inline void free_highmem_data(void)
2406{
2407 if (safe_highmem_bm)
2408 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2409
2410 if (buffer)
2411 free_image_page(buffer, PG_UNSAFE_CLEAR);
2412}
2413#else
2414static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2415
2416static inline int prepare_highmem_image(struct memory_bitmap *bm,
2417 unsigned int *nr_highmem_p) { return 0; }
2418
2419static inline void *get_highmem_page_buffer(struct page *page,
2420 struct chain_allocator *ca)
2421{
2422 return ERR_PTR(-EINVAL);
2423}
2424
2425static inline void copy_last_highmem_page(void) {}
2426static inline int last_highmem_page_copied(void) { return 1; }
2427static inline void free_highmem_data(void) {}
2428#endif
2429
2430#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2448{
2449 unsigned int nr_pages, nr_highmem;
2450 struct linked_page *lp;
2451 int error;
2452
2453
2454 free_image_page(buffer, PG_UNSAFE_CLEAR);
2455 buffer = NULL;
2456
2457 nr_highmem = count_highmem_image_pages(bm);
2458 mark_unsafe_pages(bm);
2459
2460 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2461 if (error)
2462 goto Free;
2463
2464 duplicate_memory_bitmap(new_bm, bm);
2465 memory_bm_free(bm, PG_UNSAFE_KEEP);
2466 if (nr_highmem > 0) {
2467 error = prepare_highmem_image(bm, &nr_highmem);
2468 if (error)
2469 goto Free;
2470 }
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2481 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2482 while (nr_pages > 0) {
2483 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2484 if (!lp) {
2485 error = -ENOMEM;
2486 goto Free;
2487 }
2488 lp->next = safe_pages_list;
2489 safe_pages_list = lp;
2490 nr_pages--;
2491 }
2492
2493 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2494 while (nr_pages > 0) {
2495 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2496 if (!lp) {
2497 error = -ENOMEM;
2498 goto Free;
2499 }
2500 if (!swsusp_page_is_free(virt_to_page(lp))) {
2501
2502 lp->next = safe_pages_list;
2503 safe_pages_list = lp;
2504 }
2505
2506 swsusp_set_page_forbidden(virt_to_page(lp));
2507 swsusp_set_page_free(virt_to_page(lp));
2508 nr_pages--;
2509 }
2510 return 0;
2511
2512 Free:
2513 swsusp_free();
2514 return error;
2515}
2516
2517
2518
2519
2520
2521
2522
2523static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2524{
2525 struct pbe *pbe;
2526 struct page *page;
2527 unsigned long pfn = memory_bm_next_pfn(bm);
2528
2529 if (pfn == BM_END_OF_MAP)
2530 return ERR_PTR(-EFAULT);
2531
2532 page = pfn_to_page(pfn);
2533 if (PageHighMem(page))
2534 return get_highmem_page_buffer(page, ca);
2535
2536 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2537
2538
2539
2540
2541 return page_address(page);
2542
2543
2544
2545
2546
2547 pbe = chain_alloc(ca, sizeof(struct pbe));
2548 if (!pbe) {
2549 swsusp_free();
2550 return ERR_PTR(-ENOMEM);
2551 }
2552 pbe->orig_address = page_address(page);
2553 pbe->address = safe_pages_list;
2554 safe_pages_list = safe_pages_list->next;
2555 pbe->next = restore_pblist;
2556 restore_pblist = pbe;
2557 return pbe->address;
2558}
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576int snapshot_write_next(struct snapshot_handle *handle)
2577{
2578 static struct chain_allocator ca;
2579 int error = 0;
2580
2581
2582 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2583 return 0;
2584
2585 handle->sync_read = 1;
2586
2587 if (!handle->cur) {
2588 if (!buffer)
2589
2590 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2591
2592 if (!buffer)
2593 return -ENOMEM;
2594
2595 handle->buffer = buffer;
2596 } else if (handle->cur == 1) {
2597 error = load_header(buffer);
2598 if (error)
2599 return error;
2600
2601 safe_pages_list = NULL;
2602
2603 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
2604 if (error)
2605 return error;
2606
2607
2608 error = page_key_alloc(nr_copy_pages);
2609 if (error)
2610 return error;
2611
2612 hibernate_restore_protection_begin();
2613 } else if (handle->cur <= nr_meta_pages + 1) {
2614 error = unpack_orig_pfns(buffer, ©_bm);
2615 if (error)
2616 return error;
2617
2618 if (handle->cur == nr_meta_pages + 1) {
2619 error = prepare_image(&orig_bm, ©_bm);
2620 if (error)
2621 return error;
2622
2623 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2624 memory_bm_position_reset(&orig_bm);
2625 restore_pblist = NULL;
2626 handle->buffer = get_buffer(&orig_bm, &ca);
2627 handle->sync_read = 0;
2628 if (IS_ERR(handle->buffer))
2629 return PTR_ERR(handle->buffer);
2630 }
2631 } else {
2632 copy_last_highmem_page();
2633
2634 page_key_write(handle->buffer);
2635 hibernate_restore_protect_page(handle->buffer);
2636 handle->buffer = get_buffer(&orig_bm, &ca);
2637 if (IS_ERR(handle->buffer))
2638 return PTR_ERR(handle->buffer);
2639 if (handle->buffer != buffer)
2640 handle->sync_read = 0;
2641 }
2642 handle->cur++;
2643 return PAGE_SIZE;
2644}
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654void snapshot_write_finalize(struct snapshot_handle *handle)
2655{
2656 copy_last_highmem_page();
2657
2658 page_key_write(handle->buffer);
2659 page_key_free();
2660 hibernate_restore_protect_page(handle->buffer);
2661
2662 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2663 memory_bm_recycle(&orig_bm);
2664 free_highmem_data();
2665 }
2666}
2667
2668int snapshot_image_loaded(struct snapshot_handle *handle)
2669{
2670 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2671 handle->cur <= nr_meta_pages + nr_copy_pages);
2672}
2673
2674#ifdef CONFIG_HIGHMEM
2675
2676static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2677 void *buf)
2678{
2679 void *kaddr1, *kaddr2;
2680
2681 kaddr1 = kmap_atomic(p1);
2682 kaddr2 = kmap_atomic(p2);
2683 copy_page(buf, kaddr1);
2684 copy_page(kaddr1, kaddr2);
2685 copy_page(kaddr2, buf);
2686 kunmap_atomic(kaddr2);
2687 kunmap_atomic(kaddr1);
2688}
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700int restore_highmem(void)
2701{
2702 struct highmem_pbe *pbe = highmem_pblist;
2703 void *buf;
2704
2705 if (!pbe)
2706 return 0;
2707
2708 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2709 if (!buf)
2710 return -ENOMEM;
2711
2712 while (pbe) {
2713 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2714 pbe = pbe->next;
2715 }
2716 free_image_page(buf, PG_UNSAFE_CLEAR);
2717 return 0;
2718}
2719#endif
2720