1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/version.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/suspend.h>
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/spinlock.h>
20#include <linux/kernel.h>
21#include <linux/pm.h>
22#include <linux/device.h>
23#include <linux/init.h>
24#include <linux/bootmem.h>
25#include <linux/syscalls.h>
26#include <linux/console.h>
27#include <linux/highmem.h>
28#include <linux/list.h>
29#include <linux/slab.h>
30#include <linux/compiler.h>
31
32#include <asm/uaccess.h>
33#include <asm/mmu_context.h>
34#include <asm/pgtable.h>
35#include <asm/tlbflush.h>
36#include <asm/io.h>
37
38#include "power.h"
39
40static int swsusp_page_is_free(struct page *);
41static void swsusp_set_page_forbidden(struct page *);
42static void swsusp_unset_page_forbidden(struct page *);
43
44
45
46
47
48
49unsigned long reserved_size;
50
51void __init hibernate_reserved_size_init(void)
52{
53 reserved_size = SPARE_PAGES * PAGE_SIZE;
54}
55
56
57
58
59
60
61
62unsigned long image_size;
63
64void __init hibernate_image_size_init(void)
65{
66 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
67}
68
69
70
71
72
73
74struct pbe *restore_pblist;
75
76
77static void *buffer;
78
79
80
81
82
83
84
85
86
87
88
89#define PG_ANY 0
90#define PG_SAFE 1
91#define PG_UNSAFE_CLEAR 1
92#define PG_UNSAFE_KEEP 0
93
94static unsigned int allocated_unsafe_pages;
95
96static void *get_image_page(gfp_t gfp_mask, int safe_needed)
97{
98 void *res;
99
100 res = (void *)get_zeroed_page(gfp_mask);
101 if (safe_needed)
102 while (res && swsusp_page_is_free(virt_to_page(res))) {
103
104 swsusp_set_page_forbidden(virt_to_page(res));
105 allocated_unsafe_pages++;
106 res = (void *)get_zeroed_page(gfp_mask);
107 }
108 if (res) {
109 swsusp_set_page_forbidden(virt_to_page(res));
110 swsusp_set_page_free(virt_to_page(res));
111 }
112 return res;
113}
114
115unsigned long get_safe_page(gfp_t gfp_mask)
116{
117 return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
118}
119
120static struct page *alloc_image_page(gfp_t gfp_mask)
121{
122 struct page *page;
123
124 page = alloc_page(gfp_mask);
125 if (page) {
126 swsusp_set_page_forbidden(page);
127 swsusp_set_page_free(page);
128 }
129 return page;
130}
131
132
133
134
135
136
137static inline void free_image_page(void *addr, int clear_nosave_free)
138{
139 struct page *page;
140
141 BUG_ON(!virt_addr_valid(addr));
142
143 page = virt_to_page(addr);
144
145 swsusp_unset_page_forbidden(page);
146 if (clear_nosave_free)
147 swsusp_unset_page_free(page);
148
149 __free_page(page);
150}
151
152
153
154#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
155
156struct linked_page {
157 struct linked_page *next;
158 char data[LINKED_PAGE_DATA_SIZE];
159} __packed;
160
161static inline void
162free_list_of_pages(struct linked_page *list, int clear_page_nosave)
163{
164 while (list) {
165 struct linked_page *lp = list->next;
166
167 free_image_page(list, clear_page_nosave);
168 list = lp;
169 }
170}
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185struct chain_allocator {
186 struct linked_page *chain;
187 unsigned int used_space;
188
189
190 gfp_t gfp_mask;
191 int safe_needed;
192};
193
194static void
195chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
196{
197 ca->chain = NULL;
198 ca->used_space = LINKED_PAGE_DATA_SIZE;
199 ca->gfp_mask = gfp_mask;
200 ca->safe_needed = safe_needed;
201}
202
203static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
204{
205 void *ret;
206
207 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
208 struct linked_page *lp;
209
210 lp = get_image_page(ca->gfp_mask, ca->safe_needed);
211 if (!lp)
212 return NULL;
213
214 lp->next = ca->chain;
215 ca->chain = lp;
216 ca->used_space = 0;
217 }
218 ret = ca->chain->data + ca->used_space;
219 ca->used_space += size;
220 return ret;
221}
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253#define BM_END_OF_MAP (~0UL)
254
255#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
256
257struct bm_block {
258 struct list_head hook;
259 unsigned long start_pfn;
260 unsigned long end_pfn;
261 unsigned long *data;
262};
263
264static inline unsigned long bm_block_bits(struct bm_block *bb)
265{
266 return bb->end_pfn - bb->start_pfn;
267}
268
269
270
271struct bm_position {
272 struct bm_block *block;
273 int bit;
274};
275
276struct memory_bitmap {
277 struct list_head blocks;
278 struct linked_page *p_list;
279
280
281
282 struct bm_position cur;
283};
284
285
286
287static void memory_bm_position_reset(struct memory_bitmap *bm)
288{
289 bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
290 bm->cur.bit = 0;
291}
292
293static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
294
295
296
297
298
299
300
301static int create_bm_block_list(unsigned long pages,
302 struct list_head *list,
303 struct chain_allocator *ca)
304{
305 unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
306
307 while (nr_blocks-- > 0) {
308 struct bm_block *bb;
309
310 bb = chain_alloc(ca, sizeof(struct bm_block));
311 if (!bb)
312 return -ENOMEM;
313 list_add(&bb->hook, list);
314 }
315
316 return 0;
317}
318
319struct mem_extent {
320 struct list_head hook;
321 unsigned long start;
322 unsigned long end;
323};
324
325
326
327
328
329static void free_mem_extents(struct list_head *list)
330{
331 struct mem_extent *ext, *aux;
332
333 list_for_each_entry_safe(ext, aux, list, hook) {
334 list_del(&ext->hook);
335 kfree(ext);
336 }
337}
338
339
340
341
342
343
344
345static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
346{
347 struct zone *zone;
348
349 INIT_LIST_HEAD(list);
350
351 for_each_populated_zone(zone) {
352 unsigned long zone_start, zone_end;
353 struct mem_extent *ext, *cur, *aux;
354
355 zone_start = zone->zone_start_pfn;
356 zone_end = zone_end_pfn(zone);
357
358 list_for_each_entry(ext, list, hook)
359 if (zone_start <= ext->end)
360 break;
361
362 if (&ext->hook == list || zone_end < ext->start) {
363
364 struct mem_extent *new_ext;
365
366 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
367 if (!new_ext) {
368 free_mem_extents(list);
369 return -ENOMEM;
370 }
371 new_ext->start = zone_start;
372 new_ext->end = zone_end;
373 list_add_tail(&new_ext->hook, &ext->hook);
374 continue;
375 }
376
377
378 if (zone_start < ext->start)
379 ext->start = zone_start;
380 if (zone_end > ext->end)
381 ext->end = zone_end;
382
383
384 cur = ext;
385 list_for_each_entry_safe_continue(cur, aux, list, hook) {
386 if (zone_end < cur->start)
387 break;
388 if (zone_end < cur->end)
389 ext->end = cur->end;
390 list_del(&cur->hook);
391 kfree(cur);
392 }
393 }
394
395 return 0;
396}
397
398
399
400
401static int
402memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
403{
404 struct chain_allocator ca;
405 struct list_head mem_extents;
406 struct mem_extent *ext;
407 int error;
408
409 chain_init(&ca, gfp_mask, safe_needed);
410 INIT_LIST_HEAD(&bm->blocks);
411
412 error = create_mem_extents(&mem_extents, gfp_mask);
413 if (error)
414 return error;
415
416 list_for_each_entry(ext, &mem_extents, hook) {
417 struct bm_block *bb;
418 unsigned long pfn = ext->start;
419 unsigned long pages = ext->end - ext->start;
420
421 bb = list_entry(bm->blocks.prev, struct bm_block, hook);
422
423 error = create_bm_block_list(pages, bm->blocks.prev, &ca);
424 if (error)
425 goto Error;
426
427 list_for_each_entry_continue(bb, &bm->blocks, hook) {
428 bb->data = get_image_page(gfp_mask, safe_needed);
429 if (!bb->data) {
430 error = -ENOMEM;
431 goto Error;
432 }
433
434 bb->start_pfn = pfn;
435 if (pages >= BM_BITS_PER_BLOCK) {
436 pfn += BM_BITS_PER_BLOCK;
437 pages -= BM_BITS_PER_BLOCK;
438 } else {
439
440 pfn += pages;
441 }
442 bb->end_pfn = pfn;
443 }
444 }
445
446 bm->p_list = ca.chain;
447 memory_bm_position_reset(bm);
448 Exit:
449 free_mem_extents(&mem_extents);
450 return error;
451
452 Error:
453 bm->p_list = ca.chain;
454 memory_bm_free(bm, PG_UNSAFE_CLEAR);
455 goto Exit;
456}
457
458
459
460
461static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
462{
463 struct bm_block *bb;
464
465 list_for_each_entry(bb, &bm->blocks, hook)
466 if (bb->data)
467 free_image_page(bb->data, clear_nosave_free);
468
469 free_list_of_pages(bm->p_list, clear_nosave_free);
470
471 INIT_LIST_HEAD(&bm->blocks);
472}
473
474
475
476
477
478
479static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
480 void **addr, unsigned int *bit_nr)
481{
482 struct bm_block *bb;
483
484
485
486
487
488 bb = bm->cur.block;
489 if (pfn < bb->start_pfn)
490 list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
491 if (pfn >= bb->start_pfn)
492 break;
493
494 if (pfn >= bb->end_pfn)
495 list_for_each_entry_continue(bb, &bm->blocks, hook)
496 if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
497 break;
498
499 if (&bb->hook == &bm->blocks)
500 return -EFAULT;
501
502
503 bm->cur.block = bb;
504 pfn -= bb->start_pfn;
505 bm->cur.bit = pfn + 1;
506 *bit_nr = pfn;
507 *addr = bb->data;
508 return 0;
509}
510
511static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
512{
513 void *addr;
514 unsigned int bit;
515 int error;
516
517 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
518 BUG_ON(error);
519 set_bit(bit, addr);
520}
521
522static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
523{
524 void *addr;
525 unsigned int bit;
526 int error;
527
528 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
529 if (!error)
530 set_bit(bit, addr);
531 return error;
532}
533
534static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
535{
536 void *addr;
537 unsigned int bit;
538 int error;
539
540 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
541 BUG_ON(error);
542 clear_bit(bit, addr);
543}
544
545static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
546{
547 void *addr;
548 unsigned int bit;
549 int error;
550
551 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
552 BUG_ON(error);
553 return test_bit(bit, addr);
554}
555
556static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
557{
558 void *addr;
559 unsigned int bit;
560
561 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
562}
563
564
565
566
567
568
569
570
571
572
573static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
574{
575 struct bm_block *bb;
576 int bit;
577
578 bb = bm->cur.block;
579 do {
580 bit = bm->cur.bit;
581 bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
582 if (bit < bm_block_bits(bb))
583 goto Return_pfn;
584
585 bb = list_entry(bb->hook.next, struct bm_block, hook);
586 bm->cur.block = bb;
587 bm->cur.bit = 0;
588 } while (&bb->hook != &bm->blocks);
589
590 memory_bm_position_reset(bm);
591 return BM_END_OF_MAP;
592
593 Return_pfn:
594 bm->cur.bit = bit + 1;
595 return bb->start_pfn + bit;
596}
597
598
599
600
601
602
603struct nosave_region {
604 struct list_head list;
605 unsigned long start_pfn;
606 unsigned long end_pfn;
607};
608
609static LIST_HEAD(nosave_regions);
610
611
612
613
614
615
616
617void __init
618__register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
619 int use_kmalloc)
620{
621 struct nosave_region *region;
622
623 if (start_pfn >= end_pfn)
624 return;
625
626 if (!list_empty(&nosave_regions)) {
627
628 region = list_entry(nosave_regions.prev,
629 struct nosave_region, list);
630 if (region->end_pfn == start_pfn) {
631 region->end_pfn = end_pfn;
632 goto Report;
633 }
634 }
635 if (use_kmalloc) {
636
637 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
638 BUG_ON(!region);
639 } else
640
641 region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
642 region->start_pfn = start_pfn;
643 region->end_pfn = end_pfn;
644 list_add_tail(®ion->list, &nosave_regions);
645 Report:
646 printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
647 (unsigned long long) start_pfn << PAGE_SHIFT,
648 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
649}
650
651
652
653
654
655static struct memory_bitmap *forbidden_pages_map;
656
657
658static struct memory_bitmap *free_pages_map;
659
660
661
662
663
664
665void swsusp_set_page_free(struct page *page)
666{
667 if (free_pages_map)
668 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
669}
670
671static int swsusp_page_is_free(struct page *page)
672{
673 return free_pages_map ?
674 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
675}
676
677void swsusp_unset_page_free(struct page *page)
678{
679 if (free_pages_map)
680 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
681}
682
683static void swsusp_set_page_forbidden(struct page *page)
684{
685 if (forbidden_pages_map)
686 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
687}
688
689int swsusp_page_is_forbidden(struct page *page)
690{
691 return forbidden_pages_map ?
692 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
693}
694
695static void swsusp_unset_page_forbidden(struct page *page)
696{
697 if (forbidden_pages_map)
698 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
699}
700
701
702
703
704
705
706static void mark_nosave_pages(struct memory_bitmap *bm)
707{
708 struct nosave_region *region;
709
710 if (list_empty(&nosave_regions))
711 return;
712
713 list_for_each_entry(region, &nosave_regions, list) {
714 unsigned long pfn;
715
716 pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
717 (unsigned long long) region->start_pfn << PAGE_SHIFT,
718 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
719 - 1);
720
721 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
722 if (pfn_valid(pfn)) {
723
724
725
726
727
728
729 mem_bm_set_bit_check(bm, pfn);
730 }
731 }
732}
733
734
735
736
737
738
739
740
741
742int create_basic_memory_bitmaps(void)
743{
744 struct memory_bitmap *bm1, *bm2;
745 int error = 0;
746
747 if (forbidden_pages_map && free_pages_map)
748 return 0;
749 else
750 BUG_ON(forbidden_pages_map || free_pages_map);
751
752 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
753 if (!bm1)
754 return -ENOMEM;
755
756 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
757 if (error)
758 goto Free_first_object;
759
760 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
761 if (!bm2)
762 goto Free_first_bitmap;
763
764 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
765 if (error)
766 goto Free_second_object;
767
768 forbidden_pages_map = bm1;
769 free_pages_map = bm2;
770 mark_nosave_pages(forbidden_pages_map);
771
772 pr_debug("PM: Basic memory bitmaps created\n");
773
774 return 0;
775
776 Free_second_object:
777 kfree(bm2);
778 Free_first_bitmap:
779 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
780 Free_first_object:
781 kfree(bm1);
782 return -ENOMEM;
783}
784
785
786
787
788
789
790
791
792void free_basic_memory_bitmaps(void)
793{
794 struct memory_bitmap *bm1, *bm2;
795
796 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
797 return;
798
799 bm1 = forbidden_pages_map;
800 bm2 = free_pages_map;
801 forbidden_pages_map = NULL;
802 free_pages_map = NULL;
803 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
804 kfree(bm1);
805 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
806 kfree(bm2);
807
808 pr_debug("PM: Basic memory bitmaps freed\n");
809}
810
811
812
813
814
815
816
817unsigned int snapshot_additional_pages(struct zone *zone)
818{
819 unsigned int res;
820
821 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
822 res += DIV_ROUND_UP(res * sizeof(struct bm_block),
823 LINKED_PAGE_DATA_SIZE);
824 return 2 * res;
825}
826
827#ifdef CONFIG_HIGHMEM
828
829
830
831
832
833static unsigned int count_free_highmem_pages(void)
834{
835 struct zone *zone;
836 unsigned int cnt = 0;
837
838 for_each_populated_zone(zone)
839 if (is_highmem(zone))
840 cnt += zone_page_state(zone, NR_FREE_PAGES);
841
842 return cnt;
843}
844
845
846
847
848
849
850
851
852static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
853{
854 struct page *page;
855
856 if (!pfn_valid(pfn))
857 return NULL;
858
859 page = pfn_to_page(pfn);
860 if (page_zone(page) != zone)
861 return NULL;
862
863 BUG_ON(!PageHighMem(page));
864
865 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
866 PageReserved(page))
867 return NULL;
868
869 if (page_is_guard(page))
870 return NULL;
871
872 return page;
873}
874
875
876
877
878
879
880static unsigned int count_highmem_pages(void)
881{
882 struct zone *zone;
883 unsigned int n = 0;
884
885 for_each_populated_zone(zone) {
886 unsigned long pfn, max_zone_pfn;
887
888 if (!is_highmem(zone))
889 continue;
890
891 mark_free_pages(zone);
892 max_zone_pfn = zone_end_pfn(zone);
893 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
894 if (saveable_highmem_page(zone, pfn))
895 n++;
896 }
897 return n;
898}
899#else
900static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
901{
902 return NULL;
903}
904#endif
905
906
907
908
909
910
911
912
913
914static struct page *saveable_page(struct zone *zone, unsigned long pfn)
915{
916 struct page *page;
917
918 if (!pfn_valid(pfn))
919 return NULL;
920
921 page = pfn_to_page(pfn);
922 if (page_zone(page) != zone)
923 return NULL;
924
925 BUG_ON(PageHighMem(page));
926
927 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
928 return NULL;
929
930 if (PageReserved(page)
931 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
932 return NULL;
933
934 if (page_is_guard(page))
935 return NULL;
936
937 return page;
938}
939
940
941
942
943
944
945static unsigned int count_data_pages(void)
946{
947 struct zone *zone;
948 unsigned long pfn, max_zone_pfn;
949 unsigned int n = 0;
950
951 for_each_populated_zone(zone) {
952 if (is_highmem(zone))
953 continue;
954
955 mark_free_pages(zone);
956 max_zone_pfn = zone_end_pfn(zone);
957 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
958 if (saveable_page(zone, pfn))
959 n++;
960 }
961 return n;
962}
963
964
965
966
967static inline void do_copy_page(long *dst, long *src)
968{
969 int n;
970
971 for (n = PAGE_SIZE / sizeof(long); n; n--)
972 *dst++ = *src++;
973}
974
975
976
977
978
979
980
981
982static void safe_copy_page(void *dst, struct page *s_page)
983{
984 if (kernel_page_present(s_page)) {
985 do_copy_page(dst, page_address(s_page));
986 } else {
987 kernel_map_pages(s_page, 1, 1);
988 do_copy_page(dst, page_address(s_page));
989 kernel_map_pages(s_page, 1, 0);
990 }
991}
992
993
994#ifdef CONFIG_HIGHMEM
995static inline struct page *
996page_is_saveable(struct zone *zone, unsigned long pfn)
997{
998 return is_highmem(zone) ?
999 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1000}
1001
1002static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1003{
1004 struct page *s_page, *d_page;
1005 void *src, *dst;
1006
1007 s_page = pfn_to_page(src_pfn);
1008 d_page = pfn_to_page(dst_pfn);
1009 if (PageHighMem(s_page)) {
1010 src = kmap_atomic(s_page);
1011 dst = kmap_atomic(d_page);
1012 do_copy_page(dst, src);
1013 kunmap_atomic(dst);
1014 kunmap_atomic(src);
1015 } else {
1016 if (PageHighMem(d_page)) {
1017
1018
1019
1020 safe_copy_page(buffer, s_page);
1021 dst = kmap_atomic(d_page);
1022 copy_page(dst, buffer);
1023 kunmap_atomic(dst);
1024 } else {
1025 safe_copy_page(page_address(d_page), s_page);
1026 }
1027 }
1028}
1029#else
1030#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1031
1032static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1033{
1034 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1035 pfn_to_page(src_pfn));
1036}
1037#endif
1038
1039static void
1040copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
1041{
1042 struct zone *zone;
1043 unsigned long pfn;
1044
1045 for_each_populated_zone(zone) {
1046 unsigned long max_zone_pfn;
1047
1048 mark_free_pages(zone);
1049 max_zone_pfn = zone_end_pfn(zone);
1050 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1051 if (page_is_saveable(zone, pfn))
1052 memory_bm_set_bit(orig_bm, pfn);
1053 }
1054 memory_bm_position_reset(orig_bm);
1055 memory_bm_position_reset(copy_bm);
1056 for(;;) {
1057 pfn = memory_bm_next_pfn(orig_bm);
1058 if (unlikely(pfn == BM_END_OF_MAP))
1059 break;
1060 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1061 }
1062}
1063
1064
1065static unsigned int nr_copy_pages;
1066
1067static unsigned int nr_meta_pages;
1068
1069
1070
1071
1072unsigned int alloc_normal, alloc_highmem;
1073
1074
1075
1076
1077static struct memory_bitmap orig_bm;
1078
1079
1080
1081
1082
1083
1084
1085
1086static struct memory_bitmap copy_bm;
1087
1088
1089
1090
1091
1092
1093
1094
1095void swsusp_free(void)
1096{
1097 struct zone *zone;
1098 unsigned long pfn, max_zone_pfn;
1099
1100 for_each_populated_zone(zone) {
1101 max_zone_pfn = zone_end_pfn(zone);
1102 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1103 if (pfn_valid(pfn)) {
1104 struct page *page = pfn_to_page(pfn);
1105
1106 if (swsusp_page_is_forbidden(page) &&
1107 swsusp_page_is_free(page)) {
1108 swsusp_unset_page_forbidden(page);
1109 swsusp_unset_page_free(page);
1110 __free_page(page);
1111 }
1112 }
1113 }
1114 nr_copy_pages = 0;
1115 nr_meta_pages = 0;
1116 restore_pblist = NULL;
1117 buffer = NULL;
1118 alloc_normal = 0;
1119 alloc_highmem = 0;
1120}
1121
1122
1123
1124#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1125
1126
1127
1128
1129
1130
1131
1132
1133static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1134{
1135 unsigned long nr_alloc = 0;
1136
1137 while (nr_pages > 0) {
1138 struct page *page;
1139
1140 page = alloc_image_page(mask);
1141 if (!page)
1142 break;
1143 memory_bm_set_bit(©_bm, page_to_pfn(page));
1144 if (PageHighMem(page))
1145 alloc_highmem++;
1146 else
1147 alloc_normal++;
1148 nr_pages--;
1149 nr_alloc++;
1150 }
1151
1152 return nr_alloc;
1153}
1154
1155static unsigned long preallocate_image_memory(unsigned long nr_pages,
1156 unsigned long avail_normal)
1157{
1158 unsigned long alloc;
1159
1160 if (avail_normal <= alloc_normal)
1161 return 0;
1162
1163 alloc = avail_normal - alloc_normal;
1164 if (nr_pages < alloc)
1165 alloc = nr_pages;
1166
1167 return preallocate_image_pages(alloc, GFP_IMAGE);
1168}
1169
1170#ifdef CONFIG_HIGHMEM
1171static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1172{
1173 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1174}
1175
1176
1177
1178
1179static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1180{
1181 x *= multiplier;
1182 do_div(x, base);
1183 return (unsigned long)x;
1184}
1185
1186static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1187 unsigned long highmem,
1188 unsigned long total)
1189{
1190 unsigned long alloc = __fraction(nr_pages, highmem, total);
1191
1192 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1193}
1194#else
1195static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1196{
1197 return 0;
1198}
1199
1200static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1201 unsigned long highmem,
1202 unsigned long total)
1203{
1204 return 0;
1205}
1206#endif
1207
1208
1209
1210
1211static void free_unnecessary_pages(void)
1212{
1213 unsigned long save, to_free_normal, to_free_highmem;
1214
1215 save = count_data_pages();
1216 if (alloc_normal >= save) {
1217 to_free_normal = alloc_normal - save;
1218 save = 0;
1219 } else {
1220 to_free_normal = 0;
1221 save -= alloc_normal;
1222 }
1223 save += count_highmem_pages();
1224 if (alloc_highmem >= save) {
1225 to_free_highmem = alloc_highmem - save;
1226 } else {
1227 to_free_highmem = 0;
1228 save -= alloc_highmem;
1229 if (to_free_normal > save)
1230 to_free_normal -= save;
1231 else
1232 to_free_normal = 0;
1233 }
1234
1235 memory_bm_position_reset(©_bm);
1236
1237 while (to_free_normal > 0 || to_free_highmem > 0) {
1238 unsigned long pfn = memory_bm_next_pfn(©_bm);
1239 struct page *page = pfn_to_page(pfn);
1240
1241 if (PageHighMem(page)) {
1242 if (!to_free_highmem)
1243 continue;
1244 to_free_highmem--;
1245 alloc_highmem--;
1246 } else {
1247 if (!to_free_normal)
1248 continue;
1249 to_free_normal--;
1250 alloc_normal--;
1251 }
1252 memory_bm_clear_bit(©_bm, pfn);
1253 swsusp_unset_page_forbidden(page);
1254 swsusp_unset_page_free(page);
1255 __free_page(page);
1256 }
1257}
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275static unsigned long minimum_image_size(unsigned long saveable)
1276{
1277 unsigned long size;
1278
1279 size = global_page_state(NR_SLAB_RECLAIMABLE)
1280 + global_page_state(NR_ACTIVE_ANON)
1281 + global_page_state(NR_INACTIVE_ANON)
1282 + global_page_state(NR_ACTIVE_FILE)
1283 + global_page_state(NR_INACTIVE_FILE)
1284 - global_page_state(NR_FILE_MAPPED);
1285
1286 return saveable <= size ? 0 : saveable - size;
1287}
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311int hibernate_preallocate_memory(void)
1312{
1313 struct zone *zone;
1314 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1315 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1316 struct timeval start, stop;
1317 int error;
1318
1319 printk(KERN_INFO "PM: Preallocating image memory... ");
1320 do_gettimeofday(&start);
1321
1322 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1323 if (error)
1324 goto err_out;
1325
1326 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
1327 if (error)
1328 goto err_out;
1329
1330 alloc_normal = 0;
1331 alloc_highmem = 0;
1332
1333
1334 save_highmem = count_highmem_pages();
1335 saveable = count_data_pages();
1336
1337
1338
1339
1340
1341 count = saveable;
1342 saveable += save_highmem;
1343 highmem = save_highmem;
1344 size = 0;
1345 for_each_populated_zone(zone) {
1346 size += snapshot_additional_pages(zone);
1347 if (is_highmem(zone))
1348 highmem += zone_page_state(zone, NR_FREE_PAGES);
1349 else
1350 count += zone_page_state(zone, NR_FREE_PAGES);
1351 }
1352 avail_normal = count;
1353 count += highmem;
1354 count -= totalreserve_pages;
1355
1356
1357 size += page_key_additional_pages(saveable);
1358
1359
1360 max_size = (count - (size + PAGES_FOR_IO)) / 2
1361 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1362
1363 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1364 if (size > max_size)
1365 size = max_size;
1366
1367
1368
1369
1370
1371 if (size >= saveable) {
1372 pages = preallocate_image_highmem(save_highmem);
1373 pages += preallocate_image_memory(saveable - pages, avail_normal);
1374 goto out;
1375 }
1376
1377
1378 pages = minimum_image_size(saveable);
1379
1380
1381
1382
1383
1384 if (avail_normal > pages)
1385 avail_normal -= pages;
1386 else
1387 avail_normal = 0;
1388 if (size < pages)
1389 size = min_t(unsigned long, pages, max_size);
1390
1391
1392
1393
1394
1395
1396
1397 shrink_all_memory(saveable - size);
1398
1399
1400
1401
1402
1403
1404
1405
1406 pages_highmem = preallocate_image_highmem(highmem / 2);
1407 alloc = count - max_size;
1408 if (alloc > pages_highmem)
1409 alloc -= pages_highmem;
1410 else
1411 alloc = 0;
1412 pages = preallocate_image_memory(alloc, avail_normal);
1413 if (pages < alloc) {
1414
1415 alloc -= pages;
1416 pages += pages_highmem;
1417 pages_highmem = preallocate_image_highmem(alloc);
1418 if (pages_highmem < alloc)
1419 goto err_out;
1420 pages += pages_highmem;
1421
1422
1423
1424
1425 alloc = (count - pages) - size;
1426 pages += preallocate_image_highmem(alloc);
1427 } else {
1428
1429
1430
1431
1432 alloc = max_size - size;
1433 size = preallocate_highmem_fraction(alloc, highmem, count);
1434 pages_highmem += size;
1435 alloc -= size;
1436 size = preallocate_image_memory(alloc, avail_normal);
1437 pages_highmem += preallocate_image_highmem(alloc - size);
1438 pages += pages_highmem + size;
1439 }
1440
1441
1442
1443
1444
1445
1446 free_unnecessary_pages();
1447
1448 out:
1449 do_gettimeofday(&stop);
1450 printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1451 swsusp_show_speed(&start, &stop, pages, "Allocated");
1452
1453 return 0;
1454
1455 err_out:
1456 printk(KERN_CONT "\n");
1457 swsusp_free();
1458 return -ENOMEM;
1459}
1460
1461#ifdef CONFIG_HIGHMEM
1462
1463
1464
1465
1466
1467static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1468{
1469 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1470
1471 if (free_highmem >= nr_highmem)
1472 nr_highmem = 0;
1473 else
1474 nr_highmem -= free_highmem;
1475
1476 return nr_highmem;
1477}
1478#else
1479static unsigned int
1480count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1481#endif
1482
1483
1484
1485
1486
1487
1488static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1489{
1490 struct zone *zone;
1491 unsigned int free = alloc_normal;
1492
1493 for_each_populated_zone(zone)
1494 if (!is_highmem(zone))
1495 free += zone_page_state(zone, NR_FREE_PAGES);
1496
1497 nr_pages += count_pages_for_highmem(nr_highmem);
1498 pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1499 nr_pages, PAGES_FOR_IO, free);
1500
1501 return free > nr_pages + PAGES_FOR_IO;
1502}
1503
1504#ifdef CONFIG_HIGHMEM
1505
1506
1507
1508
1509
1510static inline int get_highmem_buffer(int safe_needed)
1511{
1512 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1513 return buffer ? 0 : -ENOMEM;
1514}
1515
1516
1517
1518
1519
1520
1521
1522static inline unsigned int
1523alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
1524{
1525 unsigned int to_alloc = count_free_highmem_pages();
1526
1527 if (to_alloc > nr_highmem)
1528 to_alloc = nr_highmem;
1529
1530 nr_highmem -= to_alloc;
1531 while (to_alloc-- > 0) {
1532 struct page *page;
1533
1534 page = alloc_image_page(__GFP_HIGHMEM);
1535 memory_bm_set_bit(bm, page_to_pfn(page));
1536 }
1537 return nr_highmem;
1538}
1539#else
1540static inline int get_highmem_buffer(int safe_needed) { return 0; }
1541
1542static inline unsigned int
1543alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
1544#endif
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558static int
1559swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1560 unsigned int nr_pages, unsigned int nr_highmem)
1561{
1562 if (nr_highmem > 0) {
1563 if (get_highmem_buffer(PG_ANY))
1564 goto err_out;
1565 if (nr_highmem > alloc_highmem) {
1566 nr_highmem -= alloc_highmem;
1567 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1568 }
1569 }
1570 if (nr_pages > alloc_normal) {
1571 nr_pages -= alloc_normal;
1572 while (nr_pages-- > 0) {
1573 struct page *page;
1574
1575 page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1576 if (!page)
1577 goto err_out;
1578 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1579 }
1580 }
1581
1582 return 0;
1583
1584 err_out:
1585 swsusp_free();
1586 return -ENOMEM;
1587}
1588
1589asmlinkage __visible int swsusp_save(void)
1590{
1591 unsigned int nr_pages, nr_highmem;
1592
1593 printk(KERN_INFO "PM: Creating hibernation image:\n");
1594
1595 drain_local_pages(NULL);
1596 nr_pages = count_data_pages();
1597 nr_highmem = count_highmem_pages();
1598 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1599
1600 if (!enough_free_mem(nr_pages, nr_highmem)) {
1601 printk(KERN_ERR "PM: Not enough free memory\n");
1602 return -ENOMEM;
1603 }
1604
1605 if (swsusp_alloc(&orig_bm, ©_bm, nr_pages, nr_highmem)) {
1606 printk(KERN_ERR "PM: Memory allocation failed\n");
1607 return -ENOMEM;
1608 }
1609
1610
1611
1612
1613 drain_local_pages(NULL);
1614 copy_data_pages(©_bm, &orig_bm);
1615
1616
1617
1618
1619
1620
1621
1622 nr_pages += nr_highmem;
1623 nr_copy_pages = nr_pages;
1624 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1625
1626 printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1627 nr_pages);
1628
1629 return 0;
1630}
1631
1632#ifndef CONFIG_ARCH_HIBERNATION_HEADER
1633static int init_header_complete(struct swsusp_info *info)
1634{
1635 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1636 info->version_code = LINUX_VERSION_CODE;
1637 return 0;
1638}
1639
1640static char *check_image_kernel(struct swsusp_info *info)
1641{
1642 if (info->version_code != LINUX_VERSION_CODE)
1643 return "kernel version";
1644 if (strcmp(info->uts.sysname,init_utsname()->sysname))
1645 return "system type";
1646 if (strcmp(info->uts.release,init_utsname()->release))
1647 return "kernel release";
1648 if (strcmp(info->uts.version,init_utsname()->version))
1649 return "version";
1650 if (strcmp(info->uts.machine,init_utsname()->machine))
1651 return "machine";
1652 return NULL;
1653}
1654#endif
1655
1656unsigned long snapshot_get_image_size(void)
1657{
1658 return nr_copy_pages + nr_meta_pages + 1;
1659}
1660
1661static int init_header(struct swsusp_info *info)
1662{
1663 memset(info, 0, sizeof(struct swsusp_info));
1664 info->num_physpages = get_num_physpages();
1665 info->image_pages = nr_copy_pages;
1666 info->pages = snapshot_get_image_size();
1667 info->size = info->pages;
1668 info->size <<= PAGE_SHIFT;
1669 return init_header_complete(info);
1670}
1671
1672
1673
1674
1675
1676
1677static inline void
1678pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1679{
1680 int j;
1681
1682 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1683 buf[j] = memory_bm_next_pfn(bm);
1684 if (unlikely(buf[j] == BM_END_OF_MAP))
1685 break;
1686
1687 page_key_read(buf + j);
1688 }
1689}
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708int snapshot_read_next(struct snapshot_handle *handle)
1709{
1710 if (handle->cur > nr_meta_pages + nr_copy_pages)
1711 return 0;
1712
1713 if (!buffer) {
1714
1715 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1716 if (!buffer)
1717 return -ENOMEM;
1718 }
1719 if (!handle->cur) {
1720 int error;
1721
1722 error = init_header((struct swsusp_info *)buffer);
1723 if (error)
1724 return error;
1725 handle->buffer = buffer;
1726 memory_bm_position_reset(&orig_bm);
1727 memory_bm_position_reset(©_bm);
1728 } else if (handle->cur <= nr_meta_pages) {
1729 clear_page(buffer);
1730 pack_pfns(buffer, &orig_bm);
1731 } else {
1732 struct page *page;
1733
1734 page = pfn_to_page(memory_bm_next_pfn(©_bm));
1735 if (PageHighMem(page)) {
1736
1737
1738
1739
1740 void *kaddr;
1741
1742 kaddr = kmap_atomic(page);
1743 copy_page(buffer, kaddr);
1744 kunmap_atomic(kaddr);
1745 handle->buffer = buffer;
1746 } else {
1747 handle->buffer = page_address(page);
1748 }
1749 }
1750 handle->cur++;
1751 return PAGE_SIZE;
1752}
1753
1754
1755
1756
1757
1758
1759
1760static int mark_unsafe_pages(struct memory_bitmap *bm)
1761{
1762 struct zone *zone;
1763 unsigned long pfn, max_zone_pfn;
1764
1765
1766 for_each_populated_zone(zone) {
1767 max_zone_pfn = zone_end_pfn(zone);
1768 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1769 if (pfn_valid(pfn))
1770 swsusp_unset_page_free(pfn_to_page(pfn));
1771 }
1772
1773
1774 memory_bm_position_reset(bm);
1775 do {
1776 pfn = memory_bm_next_pfn(bm);
1777 if (likely(pfn != BM_END_OF_MAP)) {
1778 if (likely(pfn_valid(pfn)))
1779 swsusp_set_page_free(pfn_to_page(pfn));
1780 else
1781 return -EFAULT;
1782 }
1783 } while (pfn != BM_END_OF_MAP);
1784
1785 allocated_unsafe_pages = 0;
1786
1787 return 0;
1788}
1789
1790static void
1791duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
1792{
1793 unsigned long pfn;
1794
1795 memory_bm_position_reset(src);
1796 pfn = memory_bm_next_pfn(src);
1797 while (pfn != BM_END_OF_MAP) {
1798 memory_bm_set_bit(dst, pfn);
1799 pfn = memory_bm_next_pfn(src);
1800 }
1801}
1802
1803static int check_header(struct swsusp_info *info)
1804{
1805 char *reason;
1806
1807 reason = check_image_kernel(info);
1808 if (!reason && info->num_physpages != get_num_physpages())
1809 reason = "memory size";
1810 if (reason) {
1811 printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
1812 return -EPERM;
1813 }
1814 return 0;
1815}
1816
1817
1818
1819
1820
1821static int
1822load_header(struct swsusp_info *info)
1823{
1824 int error;
1825
1826 restore_pblist = NULL;
1827 error = check_header(info);
1828 if (!error) {
1829 nr_copy_pages = info->image_pages;
1830 nr_meta_pages = info->pages - info->image_pages - 1;
1831 }
1832 return error;
1833}
1834
1835
1836
1837
1838
1839static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1840{
1841 int j;
1842
1843 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1844 if (unlikely(buf[j] == BM_END_OF_MAP))
1845 break;
1846
1847
1848 page_key_memorize(buf + j);
1849
1850 if (memory_bm_pfn_present(bm, buf[j]))
1851 memory_bm_set_bit(bm, buf[j]);
1852 else
1853 return -EFAULT;
1854 }
1855
1856 return 0;
1857}
1858
1859
1860
1861
1862static struct linked_page *safe_pages_list;
1863
1864#ifdef CONFIG_HIGHMEM
1865
1866
1867
1868
1869struct highmem_pbe {
1870 struct page *copy_page;
1871 struct page *orig_page;
1872 struct highmem_pbe *next;
1873};
1874
1875
1876
1877
1878
1879
1880static struct highmem_pbe *highmem_pblist;
1881
1882
1883
1884
1885
1886
1887
1888static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
1889{
1890 unsigned long pfn;
1891 unsigned int cnt = 0;
1892
1893 memory_bm_position_reset(bm);
1894 pfn = memory_bm_next_pfn(bm);
1895 while (pfn != BM_END_OF_MAP) {
1896 if (PageHighMem(pfn_to_page(pfn)))
1897 cnt++;
1898
1899 pfn = memory_bm_next_pfn(bm);
1900 }
1901 return cnt;
1902}
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916static unsigned int safe_highmem_pages;
1917
1918static struct memory_bitmap *safe_highmem_bm;
1919
1920static int
1921prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1922{
1923 unsigned int to_alloc;
1924
1925 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
1926 return -ENOMEM;
1927
1928 if (get_highmem_buffer(PG_SAFE))
1929 return -ENOMEM;
1930
1931 to_alloc = count_free_highmem_pages();
1932 if (to_alloc > *nr_highmem_p)
1933 to_alloc = *nr_highmem_p;
1934 else
1935 *nr_highmem_p = to_alloc;
1936
1937 safe_highmem_pages = 0;
1938 while (to_alloc-- > 0) {
1939 struct page *page;
1940
1941 page = alloc_page(__GFP_HIGHMEM);
1942 if (!swsusp_page_is_free(page)) {
1943
1944 memory_bm_set_bit(bm, page_to_pfn(page));
1945 safe_highmem_pages++;
1946 }
1947
1948 swsusp_set_page_forbidden(page);
1949 swsusp_set_page_free(page);
1950 }
1951 memory_bm_position_reset(bm);
1952 safe_highmem_bm = bm;
1953 return 0;
1954}
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973static struct page *last_highmem_page;
1974
1975static void *
1976get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1977{
1978 struct highmem_pbe *pbe;
1979 void *kaddr;
1980
1981 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
1982
1983
1984
1985 last_highmem_page = page;
1986 return buffer;
1987 }
1988
1989
1990
1991 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
1992 if (!pbe) {
1993 swsusp_free();
1994 return ERR_PTR(-ENOMEM);
1995 }
1996 pbe->orig_page = page;
1997 if (safe_highmem_pages > 0) {
1998 struct page *tmp;
1999
2000
2001 kaddr = buffer;
2002 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2003 safe_highmem_pages--;
2004 last_highmem_page = tmp;
2005 pbe->copy_page = tmp;
2006 } else {
2007
2008 kaddr = safe_pages_list;
2009 safe_pages_list = safe_pages_list->next;
2010 pbe->copy_page = virt_to_page(kaddr);
2011 }
2012 pbe->next = highmem_pblist;
2013 highmem_pblist = pbe;
2014 return kaddr;
2015}
2016
2017
2018
2019
2020
2021
2022
2023static void copy_last_highmem_page(void)
2024{
2025 if (last_highmem_page) {
2026 void *dst;
2027
2028 dst = kmap_atomic(last_highmem_page);
2029 copy_page(dst, buffer);
2030 kunmap_atomic(dst);
2031 last_highmem_page = NULL;
2032 }
2033}
2034
2035static inline int last_highmem_page_copied(void)
2036{
2037 return !last_highmem_page;
2038}
2039
2040static inline void free_highmem_data(void)
2041{
2042 if (safe_highmem_bm)
2043 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2044
2045 if (buffer)
2046 free_image_page(buffer, PG_UNSAFE_CLEAR);
2047}
2048#else
2049static inline int get_safe_write_buffer(void) { return 0; }
2050
2051static unsigned int
2052count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2053
2054static inline int
2055prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2056{
2057 return 0;
2058}
2059
2060static inline void *
2061get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2062{
2063 return ERR_PTR(-EINVAL);
2064}
2065
2066static inline void copy_last_highmem_page(void) {}
2067static inline int last_highmem_page_copied(void) { return 1; }
2068static inline void free_highmem_data(void) {}
2069#endif
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2086
2087static int
2088prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2089{
2090 unsigned int nr_pages, nr_highmem;
2091 struct linked_page *sp_list, *lp;
2092 int error;
2093
2094
2095 free_image_page(buffer, PG_UNSAFE_CLEAR);
2096 buffer = NULL;
2097
2098 nr_highmem = count_highmem_image_pages(bm);
2099 error = mark_unsafe_pages(bm);
2100 if (error)
2101 goto Free;
2102
2103 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2104 if (error)
2105 goto Free;
2106
2107 duplicate_memory_bitmap(new_bm, bm);
2108 memory_bm_free(bm, PG_UNSAFE_KEEP);
2109 if (nr_highmem > 0) {
2110 error = prepare_highmem_image(bm, &nr_highmem);
2111 if (error)
2112 goto Free;
2113 }
2114
2115
2116
2117
2118
2119
2120 sp_list = NULL;
2121
2122 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2123 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2124 while (nr_pages > 0) {
2125 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2126 if (!lp) {
2127 error = -ENOMEM;
2128 goto Free;
2129 }
2130 lp->next = sp_list;
2131 sp_list = lp;
2132 nr_pages--;
2133 }
2134
2135 safe_pages_list = NULL;
2136 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2137 while (nr_pages > 0) {
2138 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2139 if (!lp) {
2140 error = -ENOMEM;
2141 goto Free;
2142 }
2143 if (!swsusp_page_is_free(virt_to_page(lp))) {
2144
2145 lp->next = safe_pages_list;
2146 safe_pages_list = lp;
2147 }
2148
2149 swsusp_set_page_forbidden(virt_to_page(lp));
2150 swsusp_set_page_free(virt_to_page(lp));
2151 nr_pages--;
2152 }
2153
2154 while (sp_list) {
2155 lp = sp_list->next;
2156 free_image_page(sp_list, PG_UNSAFE_CLEAR);
2157 sp_list = lp;
2158 }
2159 return 0;
2160
2161 Free:
2162 swsusp_free();
2163 return error;
2164}
2165
2166
2167
2168
2169
2170
2171static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2172{
2173 struct pbe *pbe;
2174 struct page *page;
2175 unsigned long pfn = memory_bm_next_pfn(bm);
2176
2177 if (pfn == BM_END_OF_MAP)
2178 return ERR_PTR(-EFAULT);
2179
2180 page = pfn_to_page(pfn);
2181 if (PageHighMem(page))
2182 return get_highmem_page_buffer(page, ca);
2183
2184 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2185
2186
2187
2188 return page_address(page);
2189
2190
2191
2192
2193 pbe = chain_alloc(ca, sizeof(struct pbe));
2194 if (!pbe) {
2195 swsusp_free();
2196 return ERR_PTR(-ENOMEM);
2197 }
2198 pbe->orig_address = page_address(page);
2199 pbe->address = safe_pages_list;
2200 safe_pages_list = safe_pages_list->next;
2201 pbe->next = restore_pblist;
2202 restore_pblist = pbe;
2203 return pbe->address;
2204}
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223int snapshot_write_next(struct snapshot_handle *handle)
2224{
2225 static struct chain_allocator ca;
2226 int error = 0;
2227
2228
2229 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2230 return 0;
2231
2232 handle->sync_read = 1;
2233
2234 if (!handle->cur) {
2235 if (!buffer)
2236
2237 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2238
2239 if (!buffer)
2240 return -ENOMEM;
2241
2242 handle->buffer = buffer;
2243 } else if (handle->cur == 1) {
2244 error = load_header(buffer);
2245 if (error)
2246 return error;
2247
2248 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
2249 if (error)
2250 return error;
2251
2252
2253 error = page_key_alloc(nr_copy_pages);
2254 if (error)
2255 return error;
2256
2257 } else if (handle->cur <= nr_meta_pages + 1) {
2258 error = unpack_orig_pfns(buffer, ©_bm);
2259 if (error)
2260 return error;
2261
2262 if (handle->cur == nr_meta_pages + 1) {
2263 error = prepare_image(&orig_bm, ©_bm);
2264 if (error)
2265 return error;
2266
2267 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2268 memory_bm_position_reset(&orig_bm);
2269 restore_pblist = NULL;
2270 handle->buffer = get_buffer(&orig_bm, &ca);
2271 handle->sync_read = 0;
2272 if (IS_ERR(handle->buffer))
2273 return PTR_ERR(handle->buffer);
2274 }
2275 } else {
2276 copy_last_highmem_page();
2277
2278 page_key_write(handle->buffer);
2279 handle->buffer = get_buffer(&orig_bm, &ca);
2280 if (IS_ERR(handle->buffer))
2281 return PTR_ERR(handle->buffer);
2282 if (handle->buffer != buffer)
2283 handle->sync_read = 0;
2284 }
2285 handle->cur++;
2286 return PAGE_SIZE;
2287}
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297void snapshot_write_finalize(struct snapshot_handle *handle)
2298{
2299 copy_last_highmem_page();
2300
2301 page_key_write(handle->buffer);
2302 page_key_free();
2303
2304 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2305 memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
2306 free_highmem_data();
2307 }
2308}
2309
2310int snapshot_image_loaded(struct snapshot_handle *handle)
2311{
2312 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2313 handle->cur <= nr_meta_pages + nr_copy_pages);
2314}
2315
2316#ifdef CONFIG_HIGHMEM
2317
2318static inline void
2319swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2320{
2321 void *kaddr1, *kaddr2;
2322
2323 kaddr1 = kmap_atomic(p1);
2324 kaddr2 = kmap_atomic(p2);
2325 copy_page(buf, kaddr1);
2326 copy_page(kaddr1, kaddr2);
2327 copy_page(kaddr2, buf);
2328 kunmap_atomic(kaddr2);
2329 kunmap_atomic(kaddr1);
2330}
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342int restore_highmem(void)
2343{
2344 struct highmem_pbe *pbe = highmem_pblist;
2345 void *buf;
2346
2347 if (!pbe)
2348 return 0;
2349
2350 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2351 if (!buf)
2352 return -ENOMEM;
2353
2354 while (pbe) {
2355 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2356 pbe = pbe->next;
2357 }
2358 free_image_page(buf, PG_UNSAFE_CLEAR);
2359 return 0;
2360}
2361#endif
2362