1
2
3
4
5
6
7
8
9
10
11
12
13#define pr_fmt(fmt) "PM: " fmt
14
15#include <linux/version.h>
16#include <linux/module.h>
17#include <linux/mm.h>
18#include <linux/suspend.h>
19#include <linux/delay.h>
20#include <linux/bitops.h>
21#include <linux/spinlock.h>
22#include <linux/kernel.h>
23#include <linux/pm.h>
24#include <linux/device.h>
25#include <linux/init.h>
26#include <linux/memblock.h>
27#include <linux/nmi.h>
28#include <linux/syscalls.h>
29#include <linux/console.h>
30#include <linux/highmem.h>
31#include <linux/list.h>
32#include <linux/slab.h>
33#include <linux/compiler.h>
34#include <linux/ktime.h>
35#include <linux/set_memory.h>
36
37#include <linux/uaccess.h>
38#include <asm/mmu_context.h>
39#include <asm/pgtable.h>
40#include <asm/tlbflush.h>
41#include <asm/io.h>
42
43#include "power.h"
44
45#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
46static bool hibernate_restore_protection;
47static bool hibernate_restore_protection_active;
48
49void enable_restore_image_protection(void)
50{
51 hibernate_restore_protection = true;
52}
53
54static inline void hibernate_restore_protection_begin(void)
55{
56 hibernate_restore_protection_active = hibernate_restore_protection;
57}
58
59static inline void hibernate_restore_protection_end(void)
60{
61 hibernate_restore_protection_active = false;
62}
63
64static inline void hibernate_restore_protect_page(void *page_address)
65{
66 if (hibernate_restore_protection_active)
67 set_memory_ro((unsigned long)page_address, 1);
68}
69
70static inline void hibernate_restore_unprotect_page(void *page_address)
71{
72 if (hibernate_restore_protection_active)
73 set_memory_rw((unsigned long)page_address, 1);
74}
75#else
76static inline void hibernate_restore_protection_begin(void) {}
77static inline void hibernate_restore_protection_end(void) {}
78static inline void hibernate_restore_protect_page(void *page_address) {}
79static inline void hibernate_restore_unprotect_page(void *page_address) {}
80#endif
81
82static int swsusp_page_is_free(struct page *);
83static void swsusp_set_page_forbidden(struct page *);
84static void swsusp_unset_page_forbidden(struct page *);
85
86
87
88
89
90
91unsigned long reserved_size;
92
93void __init hibernate_reserved_size_init(void)
94{
95 reserved_size = SPARE_PAGES * PAGE_SIZE;
96}
97
98
99
100
101
102
103
104unsigned long image_size;
105
106void __init hibernate_image_size_init(void)
107{
108 image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
109}
110
111
112
113
114
115
116
117struct pbe *restore_pblist;
118
119
120
121#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
122
123struct linked_page {
124 struct linked_page *next;
125 char data[LINKED_PAGE_DATA_SIZE];
126} __packed;
127
128
129
130
131
132
133static struct linked_page *safe_pages_list;
134
135
136static void *buffer;
137
138#define PG_ANY 0
139#define PG_SAFE 1
140#define PG_UNSAFE_CLEAR 1
141#define PG_UNSAFE_KEEP 0
142
143static unsigned int allocated_unsafe_pages;
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158static void *get_image_page(gfp_t gfp_mask, int safe_needed)
159{
160 void *res;
161
162 res = (void *)get_zeroed_page(gfp_mask);
163 if (safe_needed)
164 while (res && swsusp_page_is_free(virt_to_page(res))) {
165
166 swsusp_set_page_forbidden(virt_to_page(res));
167 allocated_unsafe_pages++;
168 res = (void *)get_zeroed_page(gfp_mask);
169 }
170 if (res) {
171 swsusp_set_page_forbidden(virt_to_page(res));
172 swsusp_set_page_free(virt_to_page(res));
173 }
174 return res;
175}
176
177static void *__get_safe_page(gfp_t gfp_mask)
178{
179 if (safe_pages_list) {
180 void *ret = safe_pages_list;
181
182 safe_pages_list = safe_pages_list->next;
183 memset(ret, 0, PAGE_SIZE);
184 return ret;
185 }
186 return get_image_page(gfp_mask, PG_SAFE);
187}
188
189unsigned long get_safe_page(gfp_t gfp_mask)
190{
191 return (unsigned long)__get_safe_page(gfp_mask);
192}
193
194static struct page *alloc_image_page(gfp_t gfp_mask)
195{
196 struct page *page;
197
198 page = alloc_page(gfp_mask);
199 if (page) {
200 swsusp_set_page_forbidden(page);
201 swsusp_set_page_free(page);
202 }
203 return page;
204}
205
206static void recycle_safe_page(void *page_address)
207{
208 struct linked_page *lp = page_address;
209
210 lp->next = safe_pages_list;
211 safe_pages_list = lp;
212}
213
214
215
216
217
218
219
220
221
222static inline void free_image_page(void *addr, int clear_nosave_free)
223{
224 struct page *page;
225
226 BUG_ON(!virt_addr_valid(addr));
227
228 page = virt_to_page(addr);
229
230 swsusp_unset_page_forbidden(page);
231 if (clear_nosave_free)
232 swsusp_unset_page_free(page);
233
234 __free_page(page);
235}
236
237static inline void free_list_of_pages(struct linked_page *list,
238 int clear_page_nosave)
239{
240 while (list) {
241 struct linked_page *lp = list->next;
242
243 free_image_page(list, clear_page_nosave);
244 list = lp;
245 }
246}
247
248
249
250
251
252
253
254
255
256
257
258
259
260struct chain_allocator {
261 struct linked_page *chain;
262 unsigned int used_space;
263
264 gfp_t gfp_mask;
265 int safe_needed;
266};
267
268static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
269 int safe_needed)
270{
271 ca->chain = NULL;
272 ca->used_space = LINKED_PAGE_DATA_SIZE;
273 ca->gfp_mask = gfp_mask;
274 ca->safe_needed = safe_needed;
275}
276
277static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
278{
279 void *ret;
280
281 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
282 struct linked_page *lp;
283
284 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
285 get_image_page(ca->gfp_mask, PG_ANY);
286 if (!lp)
287 return NULL;
288
289 lp->next = ca->chain;
290 ca->chain = lp;
291 ca->used_space = 0;
292 }
293 ret = ca->chain->data + ca->used_space;
294 ca->used_space += size;
295 return ret;
296}
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339#define BM_END_OF_MAP (~0UL)
340
341#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
342#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
343#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
344
345
346
347
348
349
350struct rtree_node {
351 struct list_head list;
352 unsigned long *data;
353};
354
355
356
357
358
359struct mem_zone_bm_rtree {
360 struct list_head list;
361 struct list_head nodes;
362 struct list_head leaves;
363 unsigned long start_pfn;
364 unsigned long end_pfn;
365 struct rtree_node *rtree;
366 int levels;
367 unsigned int blocks;
368};
369
370
371
372struct bm_position {
373 struct mem_zone_bm_rtree *zone;
374 struct rtree_node *node;
375 unsigned long node_pfn;
376 int node_bit;
377};
378
379struct memory_bitmap {
380 struct list_head zones;
381 struct linked_page *p_list;
382
383
384 struct bm_position cur;
385};
386
387
388
389#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
390#if BITS_PER_LONG == 32
391#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
392#else
393#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
394#endif
395#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
396
397
398
399
400
401
402
403
404static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
405 struct chain_allocator *ca,
406 struct list_head *list)
407{
408 struct rtree_node *node;
409
410 node = chain_alloc(ca, sizeof(struct rtree_node));
411 if (!node)
412 return NULL;
413
414 node->data = get_image_page(gfp_mask, safe_needed);
415 if (!node->data)
416 return NULL;
417
418 list_add_tail(&node->list, list);
419
420 return node;
421}
422
423
424
425
426
427
428
429
430static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
431 int safe_needed, struct chain_allocator *ca)
432{
433 struct rtree_node *node, *block, **dst;
434 unsigned int levels_needed, block_nr;
435 int i;
436
437 block_nr = zone->blocks;
438 levels_needed = 0;
439
440
441 while (block_nr) {
442 levels_needed += 1;
443 block_nr >>= BM_RTREE_LEVEL_SHIFT;
444 }
445
446
447 for (i = zone->levels; i < levels_needed; i++) {
448 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
449 &zone->nodes);
450 if (!node)
451 return -ENOMEM;
452
453 node->data[0] = (unsigned long)zone->rtree;
454 zone->rtree = node;
455 zone->levels += 1;
456 }
457
458
459 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
460 if (!block)
461 return -ENOMEM;
462
463
464 node = zone->rtree;
465 dst = &zone->rtree;
466 block_nr = zone->blocks;
467 for (i = zone->levels; i > 0; i--) {
468 int index;
469
470 if (!node) {
471 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
472 &zone->nodes);
473 if (!node)
474 return -ENOMEM;
475 *dst = node;
476 }
477
478 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
479 index &= BM_RTREE_LEVEL_MASK;
480 dst = (struct rtree_node **)&((*dst)->data[index]);
481 node = *dst;
482 }
483
484 zone->blocks += 1;
485 *dst = block;
486
487 return 0;
488}
489
490static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
491 int clear_nosave_free);
492
493
494
495
496
497
498
499
500static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
501 int safe_needed,
502 struct chain_allocator *ca,
503 unsigned long start,
504 unsigned long end)
505{
506 struct mem_zone_bm_rtree *zone;
507 unsigned int i, nr_blocks;
508 unsigned long pages;
509
510 pages = end - start;
511 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
512 if (!zone)
513 return NULL;
514
515 INIT_LIST_HEAD(&zone->nodes);
516 INIT_LIST_HEAD(&zone->leaves);
517 zone->start_pfn = start;
518 zone->end_pfn = end;
519 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
520
521 for (i = 0; i < nr_blocks; i++) {
522 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
523 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
524 return NULL;
525 }
526 }
527
528 return zone;
529}
530
531
532
533
534
535
536
537
538static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
539 int clear_nosave_free)
540{
541 struct rtree_node *node;
542
543 list_for_each_entry(node, &zone->nodes, list)
544 free_image_page(node->data, clear_nosave_free);
545
546 list_for_each_entry(node, &zone->leaves, list)
547 free_image_page(node->data, clear_nosave_free);
548}
549
550static void memory_bm_position_reset(struct memory_bitmap *bm)
551{
552 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
553 list);
554 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
555 struct rtree_node, list);
556 bm->cur.node_pfn = 0;
557 bm->cur.node_bit = 0;
558}
559
560static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
561
562struct mem_extent {
563 struct list_head hook;
564 unsigned long start;
565 unsigned long end;
566};
567
568
569
570
571
572static void free_mem_extents(struct list_head *list)
573{
574 struct mem_extent *ext, *aux;
575
576 list_for_each_entry_safe(ext, aux, list, hook) {
577 list_del(&ext->hook);
578 kfree(ext);
579 }
580}
581
582
583
584
585
586
587
588
589static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
590{
591 struct zone *zone;
592
593 INIT_LIST_HEAD(list);
594
595 for_each_populated_zone(zone) {
596 unsigned long zone_start, zone_end;
597 struct mem_extent *ext, *cur, *aux;
598
599 zone_start = zone->zone_start_pfn;
600 zone_end = zone_end_pfn(zone);
601
602 list_for_each_entry(ext, list, hook)
603 if (zone_start <= ext->end)
604 break;
605
606 if (&ext->hook == list || zone_end < ext->start) {
607
608 struct mem_extent *new_ext;
609
610 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
611 if (!new_ext) {
612 free_mem_extents(list);
613 return -ENOMEM;
614 }
615 new_ext->start = zone_start;
616 new_ext->end = zone_end;
617 list_add_tail(&new_ext->hook, &ext->hook);
618 continue;
619 }
620
621
622 if (zone_start < ext->start)
623 ext->start = zone_start;
624 if (zone_end > ext->end)
625 ext->end = zone_end;
626
627
628 cur = ext;
629 list_for_each_entry_safe_continue(cur, aux, list, hook) {
630 if (zone_end < cur->start)
631 break;
632 if (zone_end < cur->end)
633 ext->end = cur->end;
634 list_del(&cur->hook);
635 kfree(cur);
636 }
637 }
638
639 return 0;
640}
641
642
643
644
645static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
646 int safe_needed)
647{
648 struct chain_allocator ca;
649 struct list_head mem_extents;
650 struct mem_extent *ext;
651 int error;
652
653 chain_init(&ca, gfp_mask, safe_needed);
654 INIT_LIST_HEAD(&bm->zones);
655
656 error = create_mem_extents(&mem_extents, gfp_mask);
657 if (error)
658 return error;
659
660 list_for_each_entry(ext, &mem_extents, hook) {
661 struct mem_zone_bm_rtree *zone;
662
663 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
664 ext->start, ext->end);
665 if (!zone) {
666 error = -ENOMEM;
667 goto Error;
668 }
669 list_add_tail(&zone->list, &bm->zones);
670 }
671
672 bm->p_list = ca.chain;
673 memory_bm_position_reset(bm);
674 Exit:
675 free_mem_extents(&mem_extents);
676 return error;
677
678 Error:
679 bm->p_list = ca.chain;
680 memory_bm_free(bm, PG_UNSAFE_CLEAR);
681 goto Exit;
682}
683
684
685
686
687
688static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
689{
690 struct mem_zone_bm_rtree *zone;
691
692 list_for_each_entry(zone, &bm->zones, list)
693 free_zone_bm_rtree(zone, clear_nosave_free);
694
695 free_list_of_pages(bm->p_list, clear_nosave_free);
696
697 INIT_LIST_HEAD(&bm->zones);
698}
699
700
701
702
703
704
705
706
707
708
709static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
710 void **addr, unsigned int *bit_nr)
711{
712 struct mem_zone_bm_rtree *curr, *zone;
713 struct rtree_node *node;
714 int i, block_nr;
715
716 zone = bm->cur.zone;
717
718 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
719 goto zone_found;
720
721 zone = NULL;
722
723
724 list_for_each_entry(curr, &bm->zones, list) {
725 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
726 zone = curr;
727 break;
728 }
729 }
730
731 if (!zone)
732 return -EFAULT;
733
734zone_found:
735
736
737
738
739 node = bm->cur.node;
740 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
741 goto node_found;
742
743 node = zone->rtree;
744 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
745
746 for (i = zone->levels; i > 0; i--) {
747 int index;
748
749 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
750 index &= BM_RTREE_LEVEL_MASK;
751 BUG_ON(node->data[index] == 0);
752 node = (struct rtree_node *)node->data[index];
753 }
754
755node_found:
756
757 bm->cur.zone = zone;
758 bm->cur.node = node;
759 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
760
761
762 *addr = node->data;
763 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
764
765 return 0;
766}
767
768static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
769{
770 void *addr;
771 unsigned int bit;
772 int error;
773
774 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
775 BUG_ON(error);
776 set_bit(bit, addr);
777}
778
779static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
780{
781 void *addr;
782 unsigned int bit;
783 int error;
784
785 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
786 if (!error)
787 set_bit(bit, addr);
788
789 return error;
790}
791
792static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
793{
794 void *addr;
795 unsigned int bit;
796 int error;
797
798 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
799 BUG_ON(error);
800 clear_bit(bit, addr);
801}
802
803static void memory_bm_clear_current(struct memory_bitmap *bm)
804{
805 int bit;
806
807 bit = max(bm->cur.node_bit - 1, 0);
808 clear_bit(bit, bm->cur.node->data);
809}
810
811static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
812{
813 void *addr;
814 unsigned int bit;
815 int error;
816
817 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
818 BUG_ON(error);
819 return test_bit(bit, addr);
820}
821
822static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
823{
824 void *addr;
825 unsigned int bit;
826
827 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
828}
829
830
831
832
833
834
835
836
837
838
839
840static bool rtree_next_node(struct memory_bitmap *bm)
841{
842 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
843 bm->cur.node = list_entry(bm->cur.node->list.next,
844 struct rtree_node, list);
845 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
846 bm->cur.node_bit = 0;
847 touch_softlockup_watchdog();
848 return true;
849 }
850
851
852 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
853 bm->cur.zone = list_entry(bm->cur.zone->list.next,
854 struct mem_zone_bm_rtree, list);
855 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
856 struct rtree_node, list);
857 bm->cur.node_pfn = 0;
858 bm->cur.node_bit = 0;
859 return true;
860 }
861
862
863 return false;
864}
865
866
867
868
869
870
871
872
873
874
875
876
877static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
878{
879 unsigned long bits, pfn, pages;
880 int bit;
881
882 do {
883 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
884 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
885 bit = find_next_bit(bm->cur.node->data, bits,
886 bm->cur.node_bit);
887 if (bit < bits) {
888 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
889 bm->cur.node_bit = bit + 1;
890 return pfn;
891 }
892 } while (rtree_next_node(bm));
893
894 return BM_END_OF_MAP;
895}
896
897
898
899
900
901struct nosave_region {
902 struct list_head list;
903 unsigned long start_pfn;
904 unsigned long end_pfn;
905};
906
907static LIST_HEAD(nosave_regions);
908
909static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
910{
911 struct rtree_node *node;
912
913 list_for_each_entry(node, &zone->nodes, list)
914 recycle_safe_page(node->data);
915
916 list_for_each_entry(node, &zone->leaves, list)
917 recycle_safe_page(node->data);
918}
919
920static void memory_bm_recycle(struct memory_bitmap *bm)
921{
922 struct mem_zone_bm_rtree *zone;
923 struct linked_page *p_list;
924
925 list_for_each_entry(zone, &bm->zones, list)
926 recycle_zone_bm_rtree(zone);
927
928 p_list = bm->p_list;
929 while (p_list) {
930 struct linked_page *lp = p_list;
931
932 p_list = lp->next;
933 recycle_safe_page(lp);
934 }
935}
936
937
938
939
940
941
942
943void __init __register_nosave_region(unsigned long start_pfn,
944 unsigned long end_pfn, int use_kmalloc)
945{
946 struct nosave_region *region;
947
948 if (start_pfn >= end_pfn)
949 return;
950
951 if (!list_empty(&nosave_regions)) {
952
953 region = list_entry(nosave_regions.prev,
954 struct nosave_region, list);
955 if (region->end_pfn == start_pfn) {
956 region->end_pfn = end_pfn;
957 goto Report;
958 }
959 }
960 if (use_kmalloc) {
961
962 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
963 BUG_ON(!region);
964 } else {
965
966 region = memblock_alloc(sizeof(struct nosave_region),
967 SMP_CACHE_BYTES);
968 }
969 region->start_pfn = start_pfn;
970 region->end_pfn = end_pfn;
971 list_add_tail(®ion->list, &nosave_regions);
972 Report:
973 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
974 (unsigned long long) start_pfn << PAGE_SHIFT,
975 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
976}
977
978
979
980
981
982static struct memory_bitmap *forbidden_pages_map;
983
984
985static struct memory_bitmap *free_pages_map;
986
987
988
989
990
991
992void swsusp_set_page_free(struct page *page)
993{
994 if (free_pages_map)
995 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
996}
997
998static int swsusp_page_is_free(struct page *page)
999{
1000 return free_pages_map ?
1001 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1002}
1003
1004void swsusp_unset_page_free(struct page *page)
1005{
1006 if (free_pages_map)
1007 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1008}
1009
1010static void swsusp_set_page_forbidden(struct page *page)
1011{
1012 if (forbidden_pages_map)
1013 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1014}
1015
1016int swsusp_page_is_forbidden(struct page *page)
1017{
1018 return forbidden_pages_map ?
1019 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1020}
1021
1022static void swsusp_unset_page_forbidden(struct page *page)
1023{
1024 if (forbidden_pages_map)
1025 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1026}
1027
1028
1029
1030
1031
1032
1033
1034
1035static void mark_nosave_pages(struct memory_bitmap *bm)
1036{
1037 struct nosave_region *region;
1038
1039 if (list_empty(&nosave_regions))
1040 return;
1041
1042 list_for_each_entry(region, &nosave_regions, list) {
1043 unsigned long pfn;
1044
1045 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1046 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1047 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1048 - 1);
1049
1050 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1051 if (pfn_valid(pfn)) {
1052
1053
1054
1055
1056
1057
1058 mem_bm_set_bit_check(bm, pfn);
1059 }
1060 }
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071int create_basic_memory_bitmaps(void)
1072{
1073 struct memory_bitmap *bm1, *bm2;
1074 int error = 0;
1075
1076 if (forbidden_pages_map && free_pages_map)
1077 return 0;
1078 else
1079 BUG_ON(forbidden_pages_map || free_pages_map);
1080
1081 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1082 if (!bm1)
1083 return -ENOMEM;
1084
1085 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1086 if (error)
1087 goto Free_first_object;
1088
1089 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1090 if (!bm2)
1091 goto Free_first_bitmap;
1092
1093 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1094 if (error)
1095 goto Free_second_object;
1096
1097 forbidden_pages_map = bm1;
1098 free_pages_map = bm2;
1099 mark_nosave_pages(forbidden_pages_map);
1100
1101 pr_debug("Basic memory bitmaps created\n");
1102
1103 return 0;
1104
1105 Free_second_object:
1106 kfree(bm2);
1107 Free_first_bitmap:
1108 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1109 Free_first_object:
1110 kfree(bm1);
1111 return -ENOMEM;
1112}
1113
1114
1115
1116
1117
1118
1119
1120
1121void free_basic_memory_bitmaps(void)
1122{
1123 struct memory_bitmap *bm1, *bm2;
1124
1125 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1126 return;
1127
1128 bm1 = forbidden_pages_map;
1129 bm2 = free_pages_map;
1130 forbidden_pages_map = NULL;
1131 free_pages_map = NULL;
1132 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1133 kfree(bm1);
1134 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1135 kfree(bm2);
1136
1137 pr_debug("Basic memory bitmaps freed\n");
1138}
1139
1140void clear_free_pages(void)
1141{
1142#ifdef CONFIG_PAGE_POISONING_ZERO
1143 struct memory_bitmap *bm = free_pages_map;
1144 unsigned long pfn;
1145
1146 if (WARN_ON(!(free_pages_map)))
1147 return;
1148
1149 memory_bm_position_reset(bm);
1150 pfn = memory_bm_next_pfn(bm);
1151 while (pfn != BM_END_OF_MAP) {
1152 if (pfn_valid(pfn))
1153 clear_highpage(pfn_to_page(pfn));
1154
1155 pfn = memory_bm_next_pfn(bm);
1156 }
1157 memory_bm_position_reset(bm);
1158 pr_info("free pages cleared after restore\n");
1159#endif
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170unsigned int snapshot_additional_pages(struct zone *zone)
1171{
1172 unsigned int rtree, nodes;
1173
1174 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1175 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1176 LINKED_PAGE_DATA_SIZE);
1177 while (nodes > 1) {
1178 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1179 rtree += nodes;
1180 }
1181
1182 return 2 * rtree;
1183}
1184
1185#ifdef CONFIG_HIGHMEM
1186
1187
1188
1189
1190
1191static unsigned int count_free_highmem_pages(void)
1192{
1193 struct zone *zone;
1194 unsigned int cnt = 0;
1195
1196 for_each_populated_zone(zone)
1197 if (is_highmem(zone))
1198 cnt += zone_page_state(zone, NR_FREE_PAGES);
1199
1200 return cnt;
1201}
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1212{
1213 struct page *page;
1214
1215 if (!pfn_valid(pfn))
1216 return NULL;
1217
1218 page = pfn_to_page(pfn);
1219 if (page_zone(page) != zone)
1220 return NULL;
1221
1222 BUG_ON(!PageHighMem(page));
1223
1224 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
1225 PageReserved(page))
1226 return NULL;
1227
1228 if (page_is_guard(page))
1229 return NULL;
1230
1231 return page;
1232}
1233
1234
1235
1236
1237static unsigned int count_highmem_pages(void)
1238{
1239 struct zone *zone;
1240 unsigned int n = 0;
1241
1242 for_each_populated_zone(zone) {
1243 unsigned long pfn, max_zone_pfn;
1244
1245 if (!is_highmem(zone))
1246 continue;
1247
1248 mark_free_pages(zone);
1249 max_zone_pfn = zone_end_pfn(zone);
1250 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1251 if (saveable_highmem_page(zone, pfn))
1252 n++;
1253 }
1254 return n;
1255}
1256#else
1257static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1258{
1259 return NULL;
1260}
1261#endif
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1274{
1275 struct page *page;
1276
1277 if (!pfn_valid(pfn))
1278 return NULL;
1279
1280 page = pfn_to_page(pfn);
1281 if (page_zone(page) != zone)
1282 return NULL;
1283
1284 BUG_ON(PageHighMem(page));
1285
1286 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1287 return NULL;
1288
1289 if (PageReserved(page)
1290 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1291 return NULL;
1292
1293 if (page_is_guard(page))
1294 return NULL;
1295
1296 return page;
1297}
1298
1299
1300
1301
1302static unsigned int count_data_pages(void)
1303{
1304 struct zone *zone;
1305 unsigned long pfn, max_zone_pfn;
1306 unsigned int n = 0;
1307
1308 for_each_populated_zone(zone) {
1309 if (is_highmem(zone))
1310 continue;
1311
1312 mark_free_pages(zone);
1313 max_zone_pfn = zone_end_pfn(zone);
1314 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1315 if (saveable_page(zone, pfn))
1316 n++;
1317 }
1318 return n;
1319}
1320
1321
1322
1323
1324
1325static inline void do_copy_page(long *dst, long *src)
1326{
1327 int n;
1328
1329 for (n = PAGE_SIZE / sizeof(long); n; n--)
1330 *dst++ = *src++;
1331}
1332
1333
1334
1335
1336
1337
1338
1339
1340static void safe_copy_page(void *dst, struct page *s_page)
1341{
1342 if (kernel_page_present(s_page)) {
1343 do_copy_page(dst, page_address(s_page));
1344 } else {
1345 kernel_map_pages(s_page, 1, 1);
1346 do_copy_page(dst, page_address(s_page));
1347 kernel_map_pages(s_page, 1, 0);
1348 }
1349}
1350
1351#ifdef CONFIG_HIGHMEM
1352static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1353{
1354 return is_highmem(zone) ?
1355 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1356}
1357
1358static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1359{
1360 struct page *s_page, *d_page;
1361 void *src, *dst;
1362
1363 s_page = pfn_to_page(src_pfn);
1364 d_page = pfn_to_page(dst_pfn);
1365 if (PageHighMem(s_page)) {
1366 src = kmap_atomic(s_page);
1367 dst = kmap_atomic(d_page);
1368 do_copy_page(dst, src);
1369 kunmap_atomic(dst);
1370 kunmap_atomic(src);
1371 } else {
1372 if (PageHighMem(d_page)) {
1373
1374
1375
1376
1377 safe_copy_page(buffer, s_page);
1378 dst = kmap_atomic(d_page);
1379 copy_page(dst, buffer);
1380 kunmap_atomic(dst);
1381 } else {
1382 safe_copy_page(page_address(d_page), s_page);
1383 }
1384 }
1385}
1386#else
1387#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1388
1389static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1390{
1391 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1392 pfn_to_page(src_pfn));
1393}
1394#endif
1395
1396static void copy_data_pages(struct memory_bitmap *copy_bm,
1397 struct memory_bitmap *orig_bm)
1398{
1399 struct zone *zone;
1400 unsigned long pfn;
1401
1402 for_each_populated_zone(zone) {
1403 unsigned long max_zone_pfn;
1404
1405 mark_free_pages(zone);
1406 max_zone_pfn = zone_end_pfn(zone);
1407 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1408 if (page_is_saveable(zone, pfn))
1409 memory_bm_set_bit(orig_bm, pfn);
1410 }
1411 memory_bm_position_reset(orig_bm);
1412 memory_bm_position_reset(copy_bm);
1413 for(;;) {
1414 pfn = memory_bm_next_pfn(orig_bm);
1415 if (unlikely(pfn == BM_END_OF_MAP))
1416 break;
1417 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1418 }
1419}
1420
1421
1422static unsigned int nr_copy_pages;
1423
1424static unsigned int nr_meta_pages;
1425
1426
1427
1428
1429static unsigned int alloc_normal, alloc_highmem;
1430
1431
1432
1433
1434static struct memory_bitmap orig_bm;
1435
1436
1437
1438
1439
1440
1441
1442
1443static struct memory_bitmap copy_bm;
1444
1445
1446
1447
1448
1449
1450
1451void swsusp_free(void)
1452{
1453 unsigned long fb_pfn, fr_pfn;
1454
1455 if (!forbidden_pages_map || !free_pages_map)
1456 goto out;
1457
1458 memory_bm_position_reset(forbidden_pages_map);
1459 memory_bm_position_reset(free_pages_map);
1460
1461loop:
1462 fr_pfn = memory_bm_next_pfn(free_pages_map);
1463 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1464
1465
1466
1467
1468
1469 do {
1470 if (fb_pfn < fr_pfn)
1471 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1472 if (fr_pfn < fb_pfn)
1473 fr_pfn = memory_bm_next_pfn(free_pages_map);
1474 } while (fb_pfn != fr_pfn);
1475
1476 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1477 struct page *page = pfn_to_page(fr_pfn);
1478
1479 memory_bm_clear_current(forbidden_pages_map);
1480 memory_bm_clear_current(free_pages_map);
1481 hibernate_restore_unprotect_page(page_address(page));
1482 __free_page(page);
1483 goto loop;
1484 }
1485
1486out:
1487 nr_copy_pages = 0;
1488 nr_meta_pages = 0;
1489 restore_pblist = NULL;
1490 buffer = NULL;
1491 alloc_normal = 0;
1492 alloc_highmem = 0;
1493 hibernate_restore_protection_end();
1494}
1495
1496
1497
1498#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1499
1500
1501
1502
1503
1504
1505
1506
1507static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1508{
1509 unsigned long nr_alloc = 0;
1510
1511 while (nr_pages > 0) {
1512 struct page *page;
1513
1514 page = alloc_image_page(mask);
1515 if (!page)
1516 break;
1517 memory_bm_set_bit(©_bm, page_to_pfn(page));
1518 if (PageHighMem(page))
1519 alloc_highmem++;
1520 else
1521 alloc_normal++;
1522 nr_pages--;
1523 nr_alloc++;
1524 }
1525
1526 return nr_alloc;
1527}
1528
1529static unsigned long preallocate_image_memory(unsigned long nr_pages,
1530 unsigned long avail_normal)
1531{
1532 unsigned long alloc;
1533
1534 if (avail_normal <= alloc_normal)
1535 return 0;
1536
1537 alloc = avail_normal - alloc_normal;
1538 if (nr_pages < alloc)
1539 alloc = nr_pages;
1540
1541 return preallocate_image_pages(alloc, GFP_IMAGE);
1542}
1543
1544#ifdef CONFIG_HIGHMEM
1545static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1546{
1547 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1548}
1549
1550
1551
1552
1553static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1554{
1555 x *= multiplier;
1556 do_div(x, base);
1557 return (unsigned long)x;
1558}
1559
1560static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1561 unsigned long highmem,
1562 unsigned long total)
1563{
1564 unsigned long alloc = __fraction(nr_pages, highmem, total);
1565
1566 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1567}
1568#else
1569static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1570{
1571 return 0;
1572}
1573
1574static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1575 unsigned long highmem,
1576 unsigned long total)
1577{
1578 return 0;
1579}
1580#endif
1581
1582
1583
1584
1585static unsigned long free_unnecessary_pages(void)
1586{
1587 unsigned long save, to_free_normal, to_free_highmem, free;
1588
1589 save = count_data_pages();
1590 if (alloc_normal >= save) {
1591 to_free_normal = alloc_normal - save;
1592 save = 0;
1593 } else {
1594 to_free_normal = 0;
1595 save -= alloc_normal;
1596 }
1597 save += count_highmem_pages();
1598 if (alloc_highmem >= save) {
1599 to_free_highmem = alloc_highmem - save;
1600 } else {
1601 to_free_highmem = 0;
1602 save -= alloc_highmem;
1603 if (to_free_normal > save)
1604 to_free_normal -= save;
1605 else
1606 to_free_normal = 0;
1607 }
1608 free = to_free_normal + to_free_highmem;
1609
1610 memory_bm_position_reset(©_bm);
1611
1612 while (to_free_normal > 0 || to_free_highmem > 0) {
1613 unsigned long pfn = memory_bm_next_pfn(©_bm);
1614 struct page *page = pfn_to_page(pfn);
1615
1616 if (PageHighMem(page)) {
1617 if (!to_free_highmem)
1618 continue;
1619 to_free_highmem--;
1620 alloc_highmem--;
1621 } else {
1622 if (!to_free_normal)
1623 continue;
1624 to_free_normal--;
1625 alloc_normal--;
1626 }
1627 memory_bm_clear_bit(©_bm, pfn);
1628 swsusp_unset_page_forbidden(page);
1629 swsusp_unset_page_free(page);
1630 __free_page(page);
1631 }
1632
1633 return free;
1634}
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651static unsigned long minimum_image_size(unsigned long saveable)
1652{
1653 unsigned long size;
1654
1655 size = global_node_page_state(NR_SLAB_RECLAIMABLE)
1656 + global_node_page_state(NR_ACTIVE_ANON)
1657 + global_node_page_state(NR_INACTIVE_ANON)
1658 + global_node_page_state(NR_ACTIVE_FILE)
1659 + global_node_page_state(NR_INACTIVE_FILE);
1660
1661 return saveable <= size ? 0 : saveable - size;
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686int hibernate_preallocate_memory(void)
1687{
1688 struct zone *zone;
1689 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1690 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1691 ktime_t start, stop;
1692 int error;
1693
1694 pr_info("Preallocating image memory... ");
1695 start = ktime_get();
1696
1697 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1698 if (error)
1699 goto err_out;
1700
1701 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
1702 if (error)
1703 goto err_out;
1704
1705 alloc_normal = 0;
1706 alloc_highmem = 0;
1707
1708
1709 save_highmem = count_highmem_pages();
1710 saveable = count_data_pages();
1711
1712
1713
1714
1715
1716 count = saveable;
1717 saveable += save_highmem;
1718 highmem = save_highmem;
1719 size = 0;
1720 for_each_populated_zone(zone) {
1721 size += snapshot_additional_pages(zone);
1722 if (is_highmem(zone))
1723 highmem += zone_page_state(zone, NR_FREE_PAGES);
1724 else
1725 count += zone_page_state(zone, NR_FREE_PAGES);
1726 }
1727 avail_normal = count;
1728 count += highmem;
1729 count -= totalreserve_pages;
1730
1731
1732 size += page_key_additional_pages(saveable);
1733
1734
1735 max_size = (count - (size + PAGES_FOR_IO)) / 2
1736 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1737
1738 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1739 if (size > max_size)
1740 size = max_size;
1741
1742
1743
1744
1745
1746 if (size >= saveable) {
1747 pages = preallocate_image_highmem(save_highmem);
1748 pages += preallocate_image_memory(saveable - pages, avail_normal);
1749 goto out;
1750 }
1751
1752
1753 pages = minimum_image_size(saveable);
1754
1755
1756
1757
1758
1759 if (avail_normal > pages)
1760 avail_normal -= pages;
1761 else
1762 avail_normal = 0;
1763 if (size < pages)
1764 size = min_t(unsigned long, pages, max_size);
1765
1766
1767
1768
1769
1770
1771
1772 shrink_all_memory(saveable - size);
1773
1774
1775
1776
1777
1778
1779
1780
1781 pages_highmem = preallocate_image_highmem(highmem / 2);
1782 alloc = count - max_size;
1783 if (alloc > pages_highmem)
1784 alloc -= pages_highmem;
1785 else
1786 alloc = 0;
1787 pages = preallocate_image_memory(alloc, avail_normal);
1788 if (pages < alloc) {
1789
1790 alloc -= pages;
1791 pages += pages_highmem;
1792 pages_highmem = preallocate_image_highmem(alloc);
1793 if (pages_highmem < alloc)
1794 goto err_out;
1795 pages += pages_highmem;
1796
1797
1798
1799
1800 alloc = (count - pages) - size;
1801 pages += preallocate_image_highmem(alloc);
1802 } else {
1803
1804
1805
1806
1807 alloc = max_size - size;
1808 size = preallocate_highmem_fraction(alloc, highmem, count);
1809 pages_highmem += size;
1810 alloc -= size;
1811 size = preallocate_image_memory(alloc, avail_normal);
1812 pages_highmem += preallocate_image_highmem(alloc - size);
1813 pages += pages_highmem + size;
1814 }
1815
1816
1817
1818
1819
1820
1821 pages -= free_unnecessary_pages();
1822
1823 out:
1824 stop = ktime_get();
1825 pr_cont("done (allocated %lu pages)\n", pages);
1826 swsusp_show_speed(start, stop, pages, "Allocated");
1827
1828 return 0;
1829
1830 err_out:
1831 pr_cont("\n");
1832 swsusp_free();
1833 return -ENOMEM;
1834}
1835
1836#ifdef CONFIG_HIGHMEM
1837
1838
1839
1840
1841
1842
1843static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1844{
1845 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1846
1847 if (free_highmem >= nr_highmem)
1848 nr_highmem = 0;
1849 else
1850 nr_highmem -= free_highmem;
1851
1852 return nr_highmem;
1853}
1854#else
1855static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1856#endif
1857
1858
1859
1860
1861static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1862{
1863 struct zone *zone;
1864 unsigned int free = alloc_normal;
1865
1866 for_each_populated_zone(zone)
1867 if (!is_highmem(zone))
1868 free += zone_page_state(zone, NR_FREE_PAGES);
1869
1870 nr_pages += count_pages_for_highmem(nr_highmem);
1871 pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1872 nr_pages, PAGES_FOR_IO, free);
1873
1874 return free > nr_pages + PAGES_FOR_IO;
1875}
1876
1877#ifdef CONFIG_HIGHMEM
1878
1879
1880
1881
1882
1883
1884static inline int get_highmem_buffer(int safe_needed)
1885{
1886 buffer = get_image_page(GFP_ATOMIC, safe_needed);
1887 return buffer ? 0 : -ENOMEM;
1888}
1889
1890
1891
1892
1893
1894
1895
1896static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1897 unsigned int nr_highmem)
1898{
1899 unsigned int to_alloc = count_free_highmem_pages();
1900
1901 if (to_alloc > nr_highmem)
1902 to_alloc = nr_highmem;
1903
1904 nr_highmem -= to_alloc;
1905 while (to_alloc-- > 0) {
1906 struct page *page;
1907
1908 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1909 memory_bm_set_bit(bm, page_to_pfn(page));
1910 }
1911 return nr_highmem;
1912}
1913#else
1914static inline int get_highmem_buffer(int safe_needed) { return 0; }
1915
1916static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1917 unsigned int n) { return 0; }
1918#endif
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931static int swsusp_alloc(struct memory_bitmap *copy_bm,
1932 unsigned int nr_pages, unsigned int nr_highmem)
1933{
1934 if (nr_highmem > 0) {
1935 if (get_highmem_buffer(PG_ANY))
1936 goto err_out;
1937 if (nr_highmem > alloc_highmem) {
1938 nr_highmem -= alloc_highmem;
1939 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1940 }
1941 }
1942 if (nr_pages > alloc_normal) {
1943 nr_pages -= alloc_normal;
1944 while (nr_pages-- > 0) {
1945 struct page *page;
1946
1947 page = alloc_image_page(GFP_ATOMIC);
1948 if (!page)
1949 goto err_out;
1950 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1951 }
1952 }
1953
1954 return 0;
1955
1956 err_out:
1957 swsusp_free();
1958 return -ENOMEM;
1959}
1960
1961asmlinkage __visible int swsusp_save(void)
1962{
1963 unsigned int nr_pages, nr_highmem;
1964
1965 pr_info("Creating hibernation image:\n");
1966
1967 drain_local_pages(NULL);
1968 nr_pages = count_data_pages();
1969 nr_highmem = count_highmem_pages();
1970 pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
1971
1972 if (!enough_free_mem(nr_pages, nr_highmem)) {
1973 pr_err("Not enough free memory\n");
1974 return -ENOMEM;
1975 }
1976
1977 if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) {
1978 pr_err("Memory allocation failed\n");
1979 return -ENOMEM;
1980 }
1981
1982
1983
1984
1985
1986 drain_local_pages(NULL);
1987 copy_data_pages(©_bm, &orig_bm);
1988
1989
1990
1991
1992
1993
1994
1995 nr_pages += nr_highmem;
1996 nr_copy_pages = nr_pages;
1997 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1998
1999 pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
2000
2001 return 0;
2002}
2003
2004#ifndef CONFIG_ARCH_HIBERNATION_HEADER
2005static int init_header_complete(struct swsusp_info *info)
2006{
2007 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2008 info->version_code = LINUX_VERSION_CODE;
2009 return 0;
2010}
2011
2012static char *check_image_kernel(struct swsusp_info *info)
2013{
2014 if (info->version_code != LINUX_VERSION_CODE)
2015 return "kernel version";
2016 if (strcmp(info->uts.sysname,init_utsname()->sysname))
2017 return "system type";
2018 if (strcmp(info->uts.release,init_utsname()->release))
2019 return "kernel release";
2020 if (strcmp(info->uts.version,init_utsname()->version))
2021 return "version";
2022 if (strcmp(info->uts.machine,init_utsname()->machine))
2023 return "machine";
2024 return NULL;
2025}
2026#endif
2027
2028unsigned long snapshot_get_image_size(void)
2029{
2030 return nr_copy_pages + nr_meta_pages + 1;
2031}
2032
2033static int init_header(struct swsusp_info *info)
2034{
2035 memset(info, 0, sizeof(struct swsusp_info));
2036 info->num_physpages = get_num_physpages();
2037 info->image_pages = nr_copy_pages;
2038 info->pages = snapshot_get_image_size();
2039 info->size = info->pages;
2040 info->size <<= PAGE_SHIFT;
2041 return init_header_complete(info);
2042}
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2053{
2054 int j;
2055
2056 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2057 buf[j] = memory_bm_next_pfn(bm);
2058 if (unlikely(buf[j] == BM_END_OF_MAP))
2059 break;
2060
2061 page_key_read(buf + j);
2062 }
2063}
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081int snapshot_read_next(struct snapshot_handle *handle)
2082{
2083 if (handle->cur > nr_meta_pages + nr_copy_pages)
2084 return 0;
2085
2086 if (!buffer) {
2087
2088 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2089 if (!buffer)
2090 return -ENOMEM;
2091 }
2092 if (!handle->cur) {
2093 int error;
2094
2095 error = init_header((struct swsusp_info *)buffer);
2096 if (error)
2097 return error;
2098 handle->buffer = buffer;
2099 memory_bm_position_reset(&orig_bm);
2100 memory_bm_position_reset(©_bm);
2101 } else if (handle->cur <= nr_meta_pages) {
2102 clear_page(buffer);
2103 pack_pfns(buffer, &orig_bm);
2104 } else {
2105 struct page *page;
2106
2107 page = pfn_to_page(memory_bm_next_pfn(©_bm));
2108 if (PageHighMem(page)) {
2109
2110
2111
2112
2113
2114 void *kaddr;
2115
2116 kaddr = kmap_atomic(page);
2117 copy_page(buffer, kaddr);
2118 kunmap_atomic(kaddr);
2119 handle->buffer = buffer;
2120 } else {
2121 handle->buffer = page_address(page);
2122 }
2123 }
2124 handle->cur++;
2125 return PAGE_SIZE;
2126}
2127
2128static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2129 struct memory_bitmap *src)
2130{
2131 unsigned long pfn;
2132
2133 memory_bm_position_reset(src);
2134 pfn = memory_bm_next_pfn(src);
2135 while (pfn != BM_END_OF_MAP) {
2136 memory_bm_set_bit(dst, pfn);
2137 pfn = memory_bm_next_pfn(src);
2138 }
2139}
2140
2141
2142
2143
2144
2145
2146
2147static void mark_unsafe_pages(struct memory_bitmap *bm)
2148{
2149 unsigned long pfn;
2150
2151
2152 memory_bm_position_reset(free_pages_map);
2153 pfn = memory_bm_next_pfn(free_pages_map);
2154 while (pfn != BM_END_OF_MAP) {
2155 memory_bm_clear_current(free_pages_map);
2156 pfn = memory_bm_next_pfn(free_pages_map);
2157 }
2158
2159
2160 duplicate_memory_bitmap(free_pages_map, bm);
2161
2162 allocated_unsafe_pages = 0;
2163}
2164
2165static int check_header(struct swsusp_info *info)
2166{
2167 char *reason;
2168
2169 reason = check_image_kernel(info);
2170 if (!reason && info->num_physpages != get_num_physpages())
2171 reason = "memory size";
2172 if (reason) {
2173 pr_err("Image mismatch: %s\n", reason);
2174 return -EPERM;
2175 }
2176 return 0;
2177}
2178
2179
2180
2181
2182static int load_header(struct swsusp_info *info)
2183{
2184 int error;
2185
2186 restore_pblist = NULL;
2187 error = check_header(info);
2188 if (!error) {
2189 nr_copy_pages = info->image_pages;
2190 nr_meta_pages = info->pages - info->image_pages - 1;
2191 }
2192 return error;
2193}
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2204{
2205 int j;
2206
2207 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2208 if (unlikely(buf[j] == BM_END_OF_MAP))
2209 break;
2210
2211
2212 page_key_memorize(buf + j);
2213
2214 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2215 memory_bm_set_bit(bm, buf[j]);
2216 else
2217 return -EFAULT;
2218 }
2219
2220 return 0;
2221}
2222
2223#ifdef CONFIG_HIGHMEM
2224
2225
2226
2227
2228
2229struct highmem_pbe {
2230 struct page *copy_page;
2231 struct page *orig_page;
2232 struct highmem_pbe *next;
2233};
2234
2235
2236
2237
2238
2239
2240
2241static struct highmem_pbe *highmem_pblist;
2242
2243
2244
2245
2246
2247
2248
2249static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2250{
2251 unsigned long pfn;
2252 unsigned int cnt = 0;
2253
2254 memory_bm_position_reset(bm);
2255 pfn = memory_bm_next_pfn(bm);
2256 while (pfn != BM_END_OF_MAP) {
2257 if (PageHighMem(pfn_to_page(pfn)))
2258 cnt++;
2259
2260 pfn = memory_bm_next_pfn(bm);
2261 }
2262 return cnt;
2263}
2264
2265static unsigned int safe_highmem_pages;
2266
2267static struct memory_bitmap *safe_highmem_bm;
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282static int prepare_highmem_image(struct memory_bitmap *bm,
2283 unsigned int *nr_highmem_p)
2284{
2285 unsigned int to_alloc;
2286
2287 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2288 return -ENOMEM;
2289
2290 if (get_highmem_buffer(PG_SAFE))
2291 return -ENOMEM;
2292
2293 to_alloc = count_free_highmem_pages();
2294 if (to_alloc > *nr_highmem_p)
2295 to_alloc = *nr_highmem_p;
2296 else
2297 *nr_highmem_p = to_alloc;
2298
2299 safe_highmem_pages = 0;
2300 while (to_alloc-- > 0) {
2301 struct page *page;
2302
2303 page = alloc_page(__GFP_HIGHMEM);
2304 if (!swsusp_page_is_free(page)) {
2305
2306 memory_bm_set_bit(bm, page_to_pfn(page));
2307 safe_highmem_pages++;
2308 }
2309
2310 swsusp_set_page_forbidden(page);
2311 swsusp_set_page_free(page);
2312 }
2313 memory_bm_position_reset(bm);
2314 safe_highmem_bm = bm;
2315 return 0;
2316}
2317
2318static struct page *last_highmem_page;
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338static void *get_highmem_page_buffer(struct page *page,
2339 struct chain_allocator *ca)
2340{
2341 struct highmem_pbe *pbe;
2342 void *kaddr;
2343
2344 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2345
2346
2347
2348
2349 last_highmem_page = page;
2350 return buffer;
2351 }
2352
2353
2354
2355
2356 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2357 if (!pbe) {
2358 swsusp_free();
2359 return ERR_PTR(-ENOMEM);
2360 }
2361 pbe->orig_page = page;
2362 if (safe_highmem_pages > 0) {
2363 struct page *tmp;
2364
2365
2366 kaddr = buffer;
2367 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2368 safe_highmem_pages--;
2369 last_highmem_page = tmp;
2370 pbe->copy_page = tmp;
2371 } else {
2372
2373 kaddr = safe_pages_list;
2374 safe_pages_list = safe_pages_list->next;
2375 pbe->copy_page = virt_to_page(kaddr);
2376 }
2377 pbe->next = highmem_pblist;
2378 highmem_pblist = pbe;
2379 return kaddr;
2380}
2381
2382
2383
2384
2385
2386
2387
2388
2389static void copy_last_highmem_page(void)
2390{
2391 if (last_highmem_page) {
2392 void *dst;
2393
2394 dst = kmap_atomic(last_highmem_page);
2395 copy_page(dst, buffer);
2396 kunmap_atomic(dst);
2397 last_highmem_page = NULL;
2398 }
2399}
2400
2401static inline int last_highmem_page_copied(void)
2402{
2403 return !last_highmem_page;
2404}
2405
2406static inline void free_highmem_data(void)
2407{
2408 if (safe_highmem_bm)
2409 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2410
2411 if (buffer)
2412 free_image_page(buffer, PG_UNSAFE_CLEAR);
2413}
2414#else
2415static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2416
2417static inline int prepare_highmem_image(struct memory_bitmap *bm,
2418 unsigned int *nr_highmem_p) { return 0; }
2419
2420static inline void *get_highmem_page_buffer(struct page *page,
2421 struct chain_allocator *ca)
2422{
2423 return ERR_PTR(-EINVAL);
2424}
2425
2426static inline void copy_last_highmem_page(void) {}
2427static inline int last_highmem_page_copied(void) { return 1; }
2428static inline void free_highmem_data(void) {}
2429#endif
2430
2431#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2449{
2450 unsigned int nr_pages, nr_highmem;
2451 struct linked_page *lp;
2452 int error;
2453
2454
2455 free_image_page(buffer, PG_UNSAFE_CLEAR);
2456 buffer = NULL;
2457
2458 nr_highmem = count_highmem_image_pages(bm);
2459 mark_unsafe_pages(bm);
2460
2461 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2462 if (error)
2463 goto Free;
2464
2465 duplicate_memory_bitmap(new_bm, bm);
2466 memory_bm_free(bm, PG_UNSAFE_KEEP);
2467 if (nr_highmem > 0) {
2468 error = prepare_highmem_image(bm, &nr_highmem);
2469 if (error)
2470 goto Free;
2471 }
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2482 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2483 while (nr_pages > 0) {
2484 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2485 if (!lp) {
2486 error = -ENOMEM;
2487 goto Free;
2488 }
2489 lp->next = safe_pages_list;
2490 safe_pages_list = lp;
2491 nr_pages--;
2492 }
2493
2494 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2495 while (nr_pages > 0) {
2496 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2497 if (!lp) {
2498 error = -ENOMEM;
2499 goto Free;
2500 }
2501 if (!swsusp_page_is_free(virt_to_page(lp))) {
2502
2503 lp->next = safe_pages_list;
2504 safe_pages_list = lp;
2505 }
2506
2507 swsusp_set_page_forbidden(virt_to_page(lp));
2508 swsusp_set_page_free(virt_to_page(lp));
2509 nr_pages--;
2510 }
2511 return 0;
2512
2513 Free:
2514 swsusp_free();
2515 return error;
2516}
2517
2518
2519
2520
2521
2522
2523
2524static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2525{
2526 struct pbe *pbe;
2527 struct page *page;
2528 unsigned long pfn = memory_bm_next_pfn(bm);
2529
2530 if (pfn == BM_END_OF_MAP)
2531 return ERR_PTR(-EFAULT);
2532
2533 page = pfn_to_page(pfn);
2534 if (PageHighMem(page))
2535 return get_highmem_page_buffer(page, ca);
2536
2537 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2538
2539
2540
2541
2542 return page_address(page);
2543
2544
2545
2546
2547
2548 pbe = chain_alloc(ca, sizeof(struct pbe));
2549 if (!pbe) {
2550 swsusp_free();
2551 return ERR_PTR(-ENOMEM);
2552 }
2553 pbe->orig_address = page_address(page);
2554 pbe->address = safe_pages_list;
2555 safe_pages_list = safe_pages_list->next;
2556 pbe->next = restore_pblist;
2557 restore_pblist = pbe;
2558 return pbe->address;
2559}
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577int snapshot_write_next(struct snapshot_handle *handle)
2578{
2579 static struct chain_allocator ca;
2580 int error = 0;
2581
2582
2583 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2584 return 0;
2585
2586 handle->sync_read = 1;
2587
2588 if (!handle->cur) {
2589 if (!buffer)
2590
2591 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2592
2593 if (!buffer)
2594 return -ENOMEM;
2595
2596 handle->buffer = buffer;
2597 } else if (handle->cur == 1) {
2598 error = load_header(buffer);
2599 if (error)
2600 return error;
2601
2602 safe_pages_list = NULL;
2603
2604 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
2605 if (error)
2606 return error;
2607
2608
2609 error = page_key_alloc(nr_copy_pages);
2610 if (error)
2611 return error;
2612
2613 hibernate_restore_protection_begin();
2614 } else if (handle->cur <= nr_meta_pages + 1) {
2615 error = unpack_orig_pfns(buffer, ©_bm);
2616 if (error)
2617 return error;
2618
2619 if (handle->cur == nr_meta_pages + 1) {
2620 error = prepare_image(&orig_bm, ©_bm);
2621 if (error)
2622 return error;
2623
2624 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2625 memory_bm_position_reset(&orig_bm);
2626 restore_pblist = NULL;
2627 handle->buffer = get_buffer(&orig_bm, &ca);
2628 handle->sync_read = 0;
2629 if (IS_ERR(handle->buffer))
2630 return PTR_ERR(handle->buffer);
2631 }
2632 } else {
2633 copy_last_highmem_page();
2634
2635 page_key_write(handle->buffer);
2636 hibernate_restore_protect_page(handle->buffer);
2637 handle->buffer = get_buffer(&orig_bm, &ca);
2638 if (IS_ERR(handle->buffer))
2639 return PTR_ERR(handle->buffer);
2640 if (handle->buffer != buffer)
2641 handle->sync_read = 0;
2642 }
2643 handle->cur++;
2644 return PAGE_SIZE;
2645}
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655void snapshot_write_finalize(struct snapshot_handle *handle)
2656{
2657 copy_last_highmem_page();
2658
2659 page_key_write(handle->buffer);
2660 page_key_free();
2661 hibernate_restore_protect_page(handle->buffer);
2662
2663 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2664 memory_bm_recycle(&orig_bm);
2665 free_highmem_data();
2666 }
2667}
2668
2669int snapshot_image_loaded(struct snapshot_handle *handle)
2670{
2671 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2672 handle->cur <= nr_meta_pages + nr_copy_pages);
2673}
2674
2675#ifdef CONFIG_HIGHMEM
2676
2677static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2678 void *buf)
2679{
2680 void *kaddr1, *kaddr2;
2681
2682 kaddr1 = kmap_atomic(p1);
2683 kaddr2 = kmap_atomic(p2);
2684 copy_page(buf, kaddr1);
2685 copy_page(kaddr1, kaddr2);
2686 copy_page(kaddr2, buf);
2687 kunmap_atomic(kaddr2);
2688 kunmap_atomic(kaddr1);
2689}
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701int restore_highmem(void)
2702{
2703 struct highmem_pbe *pbe = highmem_pblist;
2704 void *buf;
2705
2706 if (!pbe)
2707 return 0;
2708
2709 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2710 if (!buf)
2711 return -ENOMEM;
2712
2713 while (pbe) {
2714 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2715 pbe = pbe->next;
2716 }
2717 free_image_page(buf, PG_UNSAFE_CLEAR);
2718 return 0;
2719}
2720#endif
2721