1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/version.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/suspend.h>
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/spinlock.h>
20#include <linux/kernel.h>
21#include <linux/pm.h>
22#include <linux/device.h>
23#include <linux/init.h>
24#include <linux/bootmem.h>
25#include <linux/syscalls.h>
26#include <linux/console.h>
27#include <linux/highmem.h>
28#include <linux/list.h>
29#include <linux/slab.h>
30#include <linux/compiler.h>
31
32#include <asm/uaccess.h>
33#include <asm/mmu_context.h>
34#include <asm/pgtable.h>
35#include <asm/tlbflush.h>
36#include <asm/io.h>
37
38#include "power.h"
39
40static int swsusp_page_is_free(struct page *);
41static void swsusp_set_page_forbidden(struct page *);
42static void swsusp_unset_page_forbidden(struct page *);
43
44
45
46
47
48
49unsigned long reserved_size;
50
51void __init hibernate_reserved_size_init(void)
52{
53 reserved_size = SPARE_PAGES * PAGE_SIZE;
54}
55
56
57
58
59
60
61
62unsigned long image_size;
63
64void __init hibernate_image_size_init(void)
65{
66 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
67}
68
69
70
71
72
73
74struct pbe *restore_pblist;
75
76
77static void *buffer;
78
79
80
81
82
83
84
85
86
87
88
89#define PG_ANY 0
90#define PG_SAFE 1
91#define PG_UNSAFE_CLEAR 1
92#define PG_UNSAFE_KEEP 0
93
94static unsigned int allocated_unsafe_pages;
95
96static void *get_image_page(gfp_t gfp_mask, int safe_needed)
97{
98 void *res;
99
100 res = (void *)get_zeroed_page(gfp_mask);
101 if (safe_needed)
102 while (res && swsusp_page_is_free(virt_to_page(res))) {
103
104 swsusp_set_page_forbidden(virt_to_page(res));
105 allocated_unsafe_pages++;
106 res = (void *)get_zeroed_page(gfp_mask);
107 }
108 if (res) {
109 swsusp_set_page_forbidden(virt_to_page(res));
110 swsusp_set_page_free(virt_to_page(res));
111 }
112 return res;
113}
114
115unsigned long get_safe_page(gfp_t gfp_mask)
116{
117 return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
118}
119
120static struct page *alloc_image_page(gfp_t gfp_mask)
121{
122 struct page *page;
123
124 page = alloc_page(gfp_mask);
125 if (page) {
126 swsusp_set_page_forbidden(page);
127 swsusp_set_page_free(page);
128 }
129 return page;
130}
131
132
133
134
135
136
137static inline void free_image_page(void *addr, int clear_nosave_free)
138{
139 struct page *page;
140
141 BUG_ON(!virt_addr_valid(addr));
142
143 page = virt_to_page(addr);
144
145 swsusp_unset_page_forbidden(page);
146 if (clear_nosave_free)
147 swsusp_unset_page_free(page);
148
149 __free_page(page);
150}
151
152
153
154#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
155
156struct linked_page {
157 struct linked_page *next;
158 char data[LINKED_PAGE_DATA_SIZE];
159} __packed;
160
161static inline void
162free_list_of_pages(struct linked_page *list, int clear_page_nosave)
163{
164 while (list) {
165 struct linked_page *lp = list->next;
166
167 free_image_page(list, clear_page_nosave);
168 list = lp;
169 }
170}
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185struct chain_allocator {
186 struct linked_page *chain;
187 unsigned int used_space;
188
189
190 gfp_t gfp_mask;
191 int safe_needed;
192};
193
194static void
195chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
196{
197 ca->chain = NULL;
198 ca->used_space = LINKED_PAGE_DATA_SIZE;
199 ca->gfp_mask = gfp_mask;
200 ca->safe_needed = safe_needed;
201}
202
203static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
204{
205 void *ret;
206
207 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
208 struct linked_page *lp;
209
210 lp = get_image_page(ca->gfp_mask, ca->safe_needed);
211 if (!lp)
212 return NULL;
213
214 lp->next = ca->chain;
215 ca->chain = lp;
216 ca->used_space = 0;
217 }
218 ret = ca->chain->data + ca->used_space;
219 ca->used_space += size;
220 return ret;
221}
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253#define BM_END_OF_MAP (~0UL)
254
255#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
256
257struct bm_block {
258 struct list_head hook;
259 unsigned long start_pfn;
260 unsigned long end_pfn;
261 unsigned long *data;
262};
263
264static inline unsigned long bm_block_bits(struct bm_block *bb)
265{
266 return bb->end_pfn - bb->start_pfn;
267}
268
269
270
271struct bm_position {
272 struct bm_block *block;
273 int bit;
274};
275
276struct memory_bitmap {
277 struct list_head blocks;
278 struct linked_page *p_list;
279
280
281
282 struct bm_position cur;
283};
284
285
286
287static void memory_bm_position_reset(struct memory_bitmap *bm)
288{
289 bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
290 bm->cur.bit = 0;
291}
292
293static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
294
295
296
297
298
299
300
301static int create_bm_block_list(unsigned long pages,
302 struct list_head *list,
303 struct chain_allocator *ca)
304{
305 unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
306
307 while (nr_blocks-- > 0) {
308 struct bm_block *bb;
309
310 bb = chain_alloc(ca, sizeof(struct bm_block));
311 if (!bb)
312 return -ENOMEM;
313 list_add(&bb->hook, list);
314 }
315
316 return 0;
317}
318
319struct mem_extent {
320 struct list_head hook;
321 unsigned long start;
322 unsigned long end;
323};
324
325
326
327
328
329static void free_mem_extents(struct list_head *list)
330{
331 struct mem_extent *ext, *aux;
332
333 list_for_each_entry_safe(ext, aux, list, hook) {
334 list_del(&ext->hook);
335 kfree(ext);
336 }
337}
338
339
340
341
342
343
344
345static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
346{
347 struct zone *zone;
348
349 INIT_LIST_HEAD(list);
350
351 for_each_populated_zone(zone) {
352 unsigned long zone_start, zone_end;
353 struct mem_extent *ext, *cur, *aux;
354
355 zone_start = zone->zone_start_pfn;
356 zone_end = zone->zone_start_pfn + zone->spanned_pages;
357
358 list_for_each_entry(ext, list, hook)
359 if (zone_start <= ext->end)
360 break;
361
362 if (&ext->hook == list || zone_end < ext->start) {
363
364 struct mem_extent *new_ext;
365
366 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
367 if (!new_ext) {
368 free_mem_extents(list);
369 return -ENOMEM;
370 }
371 new_ext->start = zone_start;
372 new_ext->end = zone_end;
373 list_add_tail(&new_ext->hook, &ext->hook);
374 continue;
375 }
376
377
378 if (zone_start < ext->start)
379 ext->start = zone_start;
380 if (zone_end > ext->end)
381 ext->end = zone_end;
382
383
384 cur = ext;
385 list_for_each_entry_safe_continue(cur, aux, list, hook) {
386 if (zone_end < cur->start)
387 break;
388 if (zone_end < cur->end)
389 ext->end = cur->end;
390 list_del(&cur->hook);
391 kfree(cur);
392 }
393 }
394
395 return 0;
396}
397
398
399
400
401static int
402memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
403{
404 struct chain_allocator ca;
405 struct list_head mem_extents;
406 struct mem_extent *ext;
407 int error;
408
409 chain_init(&ca, gfp_mask, safe_needed);
410 INIT_LIST_HEAD(&bm->blocks);
411
412 error = create_mem_extents(&mem_extents, gfp_mask);
413 if (error)
414 return error;
415
416 list_for_each_entry(ext, &mem_extents, hook) {
417 struct bm_block *bb;
418 unsigned long pfn = ext->start;
419 unsigned long pages = ext->end - ext->start;
420
421 bb = list_entry(bm->blocks.prev, struct bm_block, hook);
422
423 error = create_bm_block_list(pages, bm->blocks.prev, &ca);
424 if (error)
425 goto Error;
426
427 list_for_each_entry_continue(bb, &bm->blocks, hook) {
428 bb->data = get_image_page(gfp_mask, safe_needed);
429 if (!bb->data) {
430 error = -ENOMEM;
431 goto Error;
432 }
433
434 bb->start_pfn = pfn;
435 if (pages >= BM_BITS_PER_BLOCK) {
436 pfn += BM_BITS_PER_BLOCK;
437 pages -= BM_BITS_PER_BLOCK;
438 } else {
439
440 pfn += pages;
441 }
442 bb->end_pfn = pfn;
443 }
444 }
445
446 bm->p_list = ca.chain;
447 memory_bm_position_reset(bm);
448 Exit:
449 free_mem_extents(&mem_extents);
450 return error;
451
452 Error:
453 bm->p_list = ca.chain;
454 memory_bm_free(bm, PG_UNSAFE_CLEAR);
455 goto Exit;
456}
457
458
459
460
461static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
462{
463 struct bm_block *bb;
464
465 list_for_each_entry(bb, &bm->blocks, hook)
466 if (bb->data)
467 free_image_page(bb->data, clear_nosave_free);
468
469 free_list_of_pages(bm->p_list, clear_nosave_free);
470
471 INIT_LIST_HEAD(&bm->blocks);
472}
473
474
475
476
477
478
479static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
480 void **addr, unsigned int *bit_nr)
481{
482 struct bm_block *bb;
483
484
485
486
487
488 bb = bm->cur.block;
489 if (pfn < bb->start_pfn)
490 list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
491 if (pfn >= bb->start_pfn)
492 break;
493
494 if (pfn >= bb->end_pfn)
495 list_for_each_entry_continue(bb, &bm->blocks, hook)
496 if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
497 break;
498
499 if (&bb->hook == &bm->blocks)
500 return -EFAULT;
501
502
503 bm->cur.block = bb;
504 pfn -= bb->start_pfn;
505 bm->cur.bit = pfn + 1;
506 *bit_nr = pfn;
507 *addr = bb->data;
508 return 0;
509}
510
511static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
512{
513 void *addr;
514 unsigned int bit;
515 int error;
516
517 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
518 BUG_ON(error);
519 set_bit(bit, addr);
520}
521
522static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
523{
524 void *addr;
525 unsigned int bit;
526 int error;
527
528 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
529 if (!error)
530 set_bit(bit, addr);
531 return error;
532}
533
534static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
535{
536 void *addr;
537 unsigned int bit;
538 int error;
539
540 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
541 BUG_ON(error);
542 clear_bit(bit, addr);
543}
544
545static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
546{
547 void *addr;
548 unsigned int bit;
549 int error;
550
551 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
552 BUG_ON(error);
553 return test_bit(bit, addr);
554}
555
556static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
557{
558 void *addr;
559 unsigned int bit;
560
561 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
562}
563
564
565
566
567
568
569
570
571
572
573static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
574{
575 struct bm_block *bb;
576 int bit;
577
578 bb = bm->cur.block;
579 do {
580 bit = bm->cur.bit;
581 bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
582 if (bit < bm_block_bits(bb))
583 goto Return_pfn;
584
585 bb = list_entry(bb->hook.next, struct bm_block, hook);
586 bm->cur.block = bb;
587 bm->cur.bit = 0;
588 } while (&bb->hook != &bm->blocks);
589
590 memory_bm_position_reset(bm);
591 return BM_END_OF_MAP;
592
593 Return_pfn:
594 bm->cur.bit = bit + 1;
595 return bb->start_pfn + bit;
596}
597
598
599
600
601
602
603struct nosave_region {
604 struct list_head list;
605 unsigned long start_pfn;
606 unsigned long end_pfn;
607};
608
609static LIST_HEAD(nosave_regions);
610
611
612
613
614
615
616
617void __init
618__register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
619 int use_kmalloc)
620{
621 struct nosave_region *region;
622
623 if (start_pfn >= end_pfn)
624 return;
625
626 if (!list_empty(&nosave_regions)) {
627
628 region = list_entry(nosave_regions.prev,
629 struct nosave_region, list);
630 if (region->end_pfn == start_pfn) {
631 region->end_pfn = end_pfn;
632 goto Report;
633 }
634 }
635 if (use_kmalloc) {
636
637 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
638 BUG_ON(!region);
639 } else
640
641 region = alloc_bootmem(sizeof(struct nosave_region));
642 region->start_pfn = start_pfn;
643 region->end_pfn = end_pfn;
644 list_add_tail(®ion->list, &nosave_regions);
645 Report:
646 printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
647 (unsigned long long) start_pfn << PAGE_SHIFT,
648 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
649}
650
651
652
653
654
655static struct memory_bitmap *forbidden_pages_map;
656
657
658static struct memory_bitmap *free_pages_map;
659
660
661
662
663
664
665void swsusp_set_page_free(struct page *page)
666{
667 if (free_pages_map)
668 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
669}
670
671static int swsusp_page_is_free(struct page *page)
672{
673 return free_pages_map ?
674 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
675}
676
677void swsusp_unset_page_free(struct page *page)
678{
679 if (free_pages_map)
680 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
681}
682
683static void swsusp_set_page_forbidden(struct page *page)
684{
685 if (forbidden_pages_map)
686 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
687}
688
689int swsusp_page_is_forbidden(struct page *page)
690{
691 return forbidden_pages_map ?
692 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
693}
694
695static void swsusp_unset_page_forbidden(struct page *page)
696{
697 if (forbidden_pages_map)
698 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
699}
700
701
702
703
704
705
706static void mark_nosave_pages(struct memory_bitmap *bm)
707{
708 struct nosave_region *region;
709
710 if (list_empty(&nosave_regions))
711 return;
712
713 list_for_each_entry(region, &nosave_regions, list) {
714 unsigned long pfn;
715
716 pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
717 (unsigned long long) region->start_pfn << PAGE_SHIFT,
718 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
719 - 1);
720
721 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
722 if (pfn_valid(pfn)) {
723
724
725
726
727
728
729 mem_bm_set_bit_check(bm, pfn);
730 }
731 }
732}
733
734
735
736
737
738
739
740
741
742int create_basic_memory_bitmaps(void)
743{
744 struct memory_bitmap *bm1, *bm2;
745 int error = 0;
746
747 BUG_ON(forbidden_pages_map || free_pages_map);
748
749 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
750 if (!bm1)
751 return -ENOMEM;
752
753 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
754 if (error)
755 goto Free_first_object;
756
757 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
758 if (!bm2)
759 goto Free_first_bitmap;
760
761 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
762 if (error)
763 goto Free_second_object;
764
765 forbidden_pages_map = bm1;
766 free_pages_map = bm2;
767 mark_nosave_pages(forbidden_pages_map);
768
769 pr_debug("PM: Basic memory bitmaps created\n");
770
771 return 0;
772
773 Free_second_object:
774 kfree(bm2);
775 Free_first_bitmap:
776 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
777 Free_first_object:
778 kfree(bm1);
779 return -ENOMEM;
780}
781
782
783
784
785
786
787
788
789void free_basic_memory_bitmaps(void)
790{
791 struct memory_bitmap *bm1, *bm2;
792
793 BUG_ON(!(forbidden_pages_map && free_pages_map));
794
795 bm1 = forbidden_pages_map;
796 bm2 = free_pages_map;
797 forbidden_pages_map = NULL;
798 free_pages_map = NULL;
799 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
800 kfree(bm1);
801 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
802 kfree(bm2);
803
804 pr_debug("PM: Basic memory bitmaps freed\n");
805}
806
807
808
809
810
811
812
813unsigned int snapshot_additional_pages(struct zone *zone)
814{
815 unsigned int res;
816
817 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
818 res += DIV_ROUND_UP(res * sizeof(struct bm_block),
819 LINKED_PAGE_DATA_SIZE);
820 return 2 * res;
821}
822
823#ifdef CONFIG_HIGHMEM
824
825
826
827
828
829static unsigned int count_free_highmem_pages(void)
830{
831 struct zone *zone;
832 unsigned int cnt = 0;
833
834 for_each_populated_zone(zone)
835 if (is_highmem(zone))
836 cnt += zone_page_state(zone, NR_FREE_PAGES);
837
838 return cnt;
839}
840
841
842
843
844
845
846
847
848static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
849{
850 struct page *page;
851
852 if (!pfn_valid(pfn))
853 return NULL;
854
855 page = pfn_to_page(pfn);
856 if (page_zone(page) != zone)
857 return NULL;
858
859 BUG_ON(!PageHighMem(page));
860
861 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
862 return NULL;
863
864 if (PageReserved(page) || PageOffline(page))
865 return NULL;
866
867 if (page_is_guard(page))
868 return NULL;
869
870 return page;
871}
872
873
874
875
876
877
878static unsigned int count_highmem_pages(void)
879{
880 struct zone *zone;
881 unsigned int n = 0;
882
883 for_each_populated_zone(zone) {
884 unsigned long pfn, max_zone_pfn;
885
886 if (!is_highmem(zone))
887 continue;
888
889 mark_free_pages(zone);
890 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
891 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
892 if (saveable_highmem_page(zone, pfn))
893 n++;
894 }
895 return n;
896}
897#else
898static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
899{
900 return NULL;
901}
902#endif
903
904
905
906
907
908
909
910
911
912static struct page *saveable_page(struct zone *zone, unsigned long pfn)
913{
914 struct page *page;
915
916 if (!pfn_valid(pfn))
917 return NULL;
918
919 page = pfn_to_page(pfn);
920 if (page_zone(page) != zone)
921 return NULL;
922
923 BUG_ON(PageHighMem(page));
924
925 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
926 return NULL;
927
928 if (PageOffline(page))
929 return NULL;
930
931 if (PageReserved(page)
932 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
933 return NULL;
934
935 if (page_is_guard(page))
936 return NULL;
937
938 return page;
939}
940
941
942
943
944
945
946static unsigned int count_data_pages(void)
947{
948 struct zone *zone;
949 unsigned long pfn, max_zone_pfn;
950 unsigned int n = 0;
951
952 for_each_populated_zone(zone) {
953 if (is_highmem(zone))
954 continue;
955
956 mark_free_pages(zone);
957 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
958 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
959 if (saveable_page(zone, pfn))
960 n++;
961 }
962 return n;
963}
964
965
966
967
968static inline void do_copy_page(long *dst, long *src)
969{
970 int n;
971
972 for (n = PAGE_SIZE / sizeof(long); n; n--)
973 *dst++ = *src++;
974}
975
976
977
978
979
980
981
982
983static void safe_copy_page(void *dst, struct page *s_page)
984{
985 if (kernel_page_present(s_page)) {
986 do_copy_page(dst, page_address(s_page));
987 } else {
988 kernel_map_pages(s_page, 1, 1);
989 do_copy_page(dst, page_address(s_page));
990 kernel_map_pages(s_page, 1, 0);
991 }
992}
993
994
995#ifdef CONFIG_HIGHMEM
996static inline struct page *
997page_is_saveable(struct zone *zone, unsigned long pfn)
998{
999 return is_highmem(zone) ?
1000 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1001}
1002
1003static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1004{
1005 struct page *s_page, *d_page;
1006 void *src, *dst;
1007
1008 s_page = pfn_to_page(src_pfn);
1009 d_page = pfn_to_page(dst_pfn);
1010 if (PageHighMem(s_page)) {
1011 src = kmap_atomic(s_page);
1012 dst = kmap_atomic(d_page);
1013 do_copy_page(dst, src);
1014 kunmap_atomic(dst);
1015 kunmap_atomic(src);
1016 } else {
1017 if (PageHighMem(d_page)) {
1018
1019
1020
1021 safe_copy_page(buffer, s_page);
1022 dst = kmap_atomic(d_page);
1023 copy_page(dst, buffer);
1024 kunmap_atomic(dst);
1025 } else {
1026 safe_copy_page(page_address(d_page), s_page);
1027 }
1028 }
1029}
1030#else
1031#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1032
1033static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1034{
1035 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1036 pfn_to_page(src_pfn));
1037}
1038#endif
1039
1040static void
1041copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
1042{
1043 struct zone *zone;
1044 unsigned long pfn;
1045
1046 for_each_populated_zone(zone) {
1047 unsigned long max_zone_pfn;
1048
1049 mark_free_pages(zone);
1050 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1051 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1052 if (page_is_saveable(zone, pfn))
1053 memory_bm_set_bit(orig_bm, pfn);
1054 }
1055 memory_bm_position_reset(orig_bm);
1056 memory_bm_position_reset(copy_bm);
1057 for(;;) {
1058 pfn = memory_bm_next_pfn(orig_bm);
1059 if (unlikely(pfn == BM_END_OF_MAP))
1060 break;
1061 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1062 }
1063}
1064
1065
1066static unsigned int nr_copy_pages;
1067
1068static unsigned int nr_meta_pages;
1069
1070
1071
1072
1073unsigned int alloc_normal, alloc_highmem;
1074
1075
1076
1077
1078static struct memory_bitmap orig_bm;
1079
1080
1081
1082
1083
1084
1085
1086
1087static struct memory_bitmap copy_bm;
1088
1089
1090
1091
1092
1093
1094
1095
1096void swsusp_free(void)
1097{
1098 struct zone *zone;
1099 unsigned long pfn, max_zone_pfn;
1100
1101 for_each_populated_zone(zone) {
1102 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1103 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1104 if (pfn_valid(pfn)) {
1105 struct page *page = pfn_to_page(pfn);
1106
1107 if (swsusp_page_is_forbidden(page) &&
1108 swsusp_page_is_free(page)) {
1109 swsusp_unset_page_forbidden(page);
1110 swsusp_unset_page_free(page);
1111 __free_page(page);
1112 }
1113 }
1114 }
1115 nr_copy_pages = 0;
1116 nr_meta_pages = 0;
1117 restore_pblist = NULL;
1118 buffer = NULL;
1119 alloc_normal = 0;
1120 alloc_highmem = 0;
1121}
1122
1123
1124
1125#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1126
1127
1128
1129
1130
1131
1132
1133
1134static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1135{
1136 unsigned long nr_alloc = 0;
1137
1138 while (nr_pages > 0) {
1139 struct page *page;
1140
1141 page = alloc_image_page(mask);
1142 if (!page)
1143 break;
1144 memory_bm_set_bit(©_bm, page_to_pfn(page));
1145 if (PageHighMem(page))
1146 alloc_highmem++;
1147 else
1148 alloc_normal++;
1149 nr_pages--;
1150 nr_alloc++;
1151 }
1152
1153 return nr_alloc;
1154}
1155
1156static unsigned long preallocate_image_memory(unsigned long nr_pages,
1157 unsigned long avail_normal)
1158{
1159 unsigned long alloc;
1160
1161 if (avail_normal <= alloc_normal)
1162 return 0;
1163
1164 alloc = avail_normal - alloc_normal;
1165 if (nr_pages < alloc)
1166 alloc = nr_pages;
1167
1168 return preallocate_image_pages(alloc, GFP_IMAGE);
1169}
1170
1171#ifdef CONFIG_HIGHMEM
1172static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1173{
1174 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1175}
1176
1177
1178
1179
1180static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1181{
1182 x *= multiplier;
1183 do_div(x, base);
1184 return (unsigned long)x;
1185}
1186
1187static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1188 unsigned long highmem,
1189 unsigned long total)
1190{
1191 unsigned long alloc = __fraction(nr_pages, highmem, total);
1192
1193 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1194}
1195#else
1196static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1197{
1198 return 0;
1199}
1200
1201static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1202 unsigned long highmem,
1203 unsigned long total)
1204{
1205 return 0;
1206}
1207#endif
1208
1209
1210
1211
1212static void free_unnecessary_pages(void)
1213{
1214 unsigned long save, to_free_normal, to_free_highmem;
1215
1216 save = count_data_pages();
1217 if (alloc_normal >= save) {
1218 to_free_normal = alloc_normal - save;
1219 save = 0;
1220 } else {
1221 to_free_normal = 0;
1222 save -= alloc_normal;
1223 }
1224 save += count_highmem_pages();
1225 if (alloc_highmem >= save) {
1226 to_free_highmem = alloc_highmem - save;
1227 } else {
1228 to_free_highmem = 0;
1229 save -= alloc_highmem;
1230 if (to_free_normal > save)
1231 to_free_normal -= save;
1232 else
1233 to_free_normal = 0;
1234 }
1235
1236 memory_bm_position_reset(©_bm);
1237
1238 while (to_free_normal > 0 || to_free_highmem > 0) {
1239 unsigned long pfn = memory_bm_next_pfn(©_bm);
1240 struct page *page = pfn_to_page(pfn);
1241
1242 if (PageHighMem(page)) {
1243 if (!to_free_highmem)
1244 continue;
1245 to_free_highmem--;
1246 alloc_highmem--;
1247 } else {
1248 if (!to_free_normal)
1249 continue;
1250 to_free_normal--;
1251 alloc_normal--;
1252 }
1253 memory_bm_clear_bit(©_bm, pfn);
1254 swsusp_unset_page_forbidden(page);
1255 swsusp_unset_page_free(page);
1256 __free_page(page);
1257 }
1258}
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276static unsigned long minimum_image_size(unsigned long saveable)
1277{
1278 unsigned long size;
1279
1280 size = global_page_state(NR_SLAB_RECLAIMABLE)
1281 + global_page_state(NR_ACTIVE_ANON)
1282 + global_page_state(NR_INACTIVE_ANON)
1283 + global_page_state(NR_ACTIVE_FILE)
1284 + global_page_state(NR_INACTIVE_FILE)
1285 - global_page_state(NR_FILE_MAPPED);
1286
1287 return saveable <= size ? 0 : saveable - size;
1288}
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312int hibernate_preallocate_memory(void)
1313{
1314 struct zone *zone;
1315 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1316 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1317 struct timeval start, stop;
1318 int error;
1319
1320 printk(KERN_INFO "PM: Preallocating image memory... ");
1321 do_gettimeofday(&start);
1322
1323 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1324 if (error)
1325 goto err_out;
1326
1327 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
1328 if (error)
1329 goto err_out;
1330
1331 alloc_normal = 0;
1332 alloc_highmem = 0;
1333
1334
1335 save_highmem = count_highmem_pages();
1336 saveable = count_data_pages();
1337
1338
1339
1340
1341
1342 count = saveable;
1343 saveable += save_highmem;
1344 highmem = save_highmem;
1345 size = 0;
1346 for_each_populated_zone(zone) {
1347 size += snapshot_additional_pages(zone);
1348 if (is_highmem(zone))
1349 highmem += zone_page_state(zone, NR_FREE_PAGES);
1350 else
1351 count += zone_page_state(zone, NR_FREE_PAGES);
1352 }
1353 avail_normal = count;
1354 count += highmem;
1355 count -= totalreserve_pages;
1356
1357
1358 size += page_key_additional_pages(saveable);
1359
1360
1361 max_size = (count - (size + PAGES_FOR_IO)) / 2
1362 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1363
1364 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1365 if (size > max_size)
1366 size = max_size;
1367
1368
1369
1370
1371
1372 if (size >= saveable) {
1373 pages = preallocate_image_highmem(save_highmem);
1374 pages += preallocate_image_memory(saveable - pages, avail_normal);
1375 goto out;
1376 }
1377
1378
1379 pages = minimum_image_size(saveable);
1380
1381
1382
1383
1384
1385 if (avail_normal > pages)
1386 avail_normal -= pages;
1387 else
1388 avail_normal = 0;
1389 if (size < pages)
1390 size = min_t(unsigned long, pages, max_size);
1391
1392
1393
1394
1395
1396
1397
1398 shrink_all_memory(saveable - size);
1399
1400
1401
1402
1403
1404
1405
1406
1407 pages_highmem = preallocate_image_highmem(highmem / 2);
1408 alloc = count - max_size;
1409 if (alloc > pages_highmem)
1410 alloc -= pages_highmem;
1411 else
1412 alloc = 0;
1413 pages = preallocate_image_memory(alloc, avail_normal);
1414 if (pages < alloc) {
1415
1416 alloc -= pages;
1417 pages += pages_highmem;
1418 pages_highmem = preallocate_image_highmem(alloc);
1419 if (pages_highmem < alloc)
1420 goto err_out;
1421 pages += pages_highmem;
1422
1423
1424
1425
1426 alloc = (count - pages) - size;
1427 pages += preallocate_image_highmem(alloc);
1428 } else {
1429
1430
1431
1432
1433 alloc = max_size - size;
1434 size = preallocate_highmem_fraction(alloc, highmem, count);
1435 pages_highmem += size;
1436 alloc -= size;
1437 size = preallocate_image_memory(alloc, avail_normal);
1438 pages_highmem += preallocate_image_highmem(alloc - size);
1439 pages += pages_highmem + size;
1440 }
1441
1442
1443
1444
1445
1446
1447 free_unnecessary_pages();
1448
1449 out:
1450 do_gettimeofday(&stop);
1451 printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1452 swsusp_show_speed(&start, &stop, pages, "Allocated");
1453
1454 return 0;
1455
1456 err_out:
1457 printk(KERN_CONT "\n");
1458 swsusp_free();
1459 return -ENOMEM;
1460}
1461
1462#ifdef CONFIG_HIGHMEM
1463
1464
1465
1466
1467
1468static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1469{
1470 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1471
1472 if (free_highmem >= nr_highmem)
1473 nr_highmem = 0;
1474 else
1475 nr_highmem -= free_highmem;
1476
1477 return nr_highmem;
1478}
1479#else
1480static unsigned int
1481count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1482#endif
1483
1484
1485
1486
1487
1488
1489static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1490{
1491 struct zone *zone;
1492 unsigned int free = alloc_normal;
1493
1494 for_each_populated_zone(zone)
1495 if (!is_highmem(zone))
1496 free += zone_page_state(zone, NR_FREE_PAGES);
1497
1498 nr_pages += count_pages_for_highmem(nr_highmem);
1499 pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1500 nr_pages, PAGES_FOR_IO, free);
1501
1502 return free > nr_pages + PAGES_FOR_IO;
1503}
1504
1505#ifdef CONFIG_HIGHMEM
1506
1507
1508
1509
1510
1511static inline int get_highmem_buffer(int safe_needed)
1512{
1513 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1514 return buffer ? 0 : -ENOMEM;
1515}
1516
1517
1518
1519
1520
1521
1522
1523static inline unsigned int
1524alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
1525{
1526 unsigned int to_alloc = count_free_highmem_pages();
1527
1528 if (to_alloc > nr_highmem)
1529 to_alloc = nr_highmem;
1530
1531 nr_highmem -= to_alloc;
1532 while (to_alloc-- > 0) {
1533 struct page *page;
1534
1535 page = alloc_image_page(__GFP_HIGHMEM);
1536 memory_bm_set_bit(bm, page_to_pfn(page));
1537 }
1538 return nr_highmem;
1539}
1540#else
1541static inline int get_highmem_buffer(int safe_needed) { return 0; }
1542
1543static inline unsigned int
1544alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
1545#endif
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559static int
1560swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1561 unsigned int nr_pages, unsigned int nr_highmem)
1562{
1563 if (nr_highmem > 0) {
1564 if (get_highmem_buffer(PG_ANY))
1565 goto err_out;
1566 if (nr_highmem > alloc_highmem) {
1567 nr_highmem -= alloc_highmem;
1568 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1569 }
1570 }
1571 if (nr_pages > alloc_normal) {
1572 nr_pages -= alloc_normal;
1573 while (nr_pages-- > 0) {
1574 struct page *page;
1575
1576 page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1577 if (!page)
1578 goto err_out;
1579 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1580 }
1581 }
1582
1583 return 0;
1584
1585 err_out:
1586 swsusp_free();
1587 return -ENOMEM;
1588}
1589
1590asmlinkage int swsusp_save(void)
1591{
1592 unsigned int nr_pages, nr_highmem;
1593
1594 printk(KERN_INFO "PM: Creating hibernation image:\n");
1595
1596 drain_local_pages(NULL);
1597 nr_pages = count_data_pages();
1598 nr_highmem = count_highmem_pages();
1599 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1600
1601 if (!enough_free_mem(nr_pages, nr_highmem)) {
1602 printk(KERN_ERR "PM: Not enough free memory\n");
1603 return -ENOMEM;
1604 }
1605
1606 if (swsusp_alloc(&orig_bm, ©_bm, nr_pages, nr_highmem)) {
1607 printk(KERN_ERR "PM: Memory allocation failed\n");
1608 return -ENOMEM;
1609 }
1610
1611
1612
1613
1614 drain_local_pages(NULL);
1615 copy_data_pages(©_bm, &orig_bm);
1616
1617
1618
1619
1620
1621
1622
1623 nr_pages += nr_highmem;
1624 nr_copy_pages = nr_pages;
1625 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1626
1627 printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1628 nr_pages);
1629
1630 return 0;
1631}
1632
1633#ifndef CONFIG_ARCH_HIBERNATION_HEADER
1634static int init_header_complete(struct swsusp_info *info)
1635{
1636 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1637 info->version_code = LINUX_VERSION_CODE;
1638 return 0;
1639}
1640
1641static char *check_image_kernel(struct swsusp_info *info)
1642{
1643 if (info->version_code != LINUX_VERSION_CODE)
1644 return "kernel version";
1645 if (strcmp(info->uts.sysname,init_utsname()->sysname))
1646 return "system type";
1647 if (strcmp(info->uts.release,init_utsname()->release))
1648 return "kernel release";
1649 if (strcmp(info->uts.version,init_utsname()->version))
1650 return "version";
1651 if (strcmp(info->uts.machine,init_utsname()->machine))
1652 return "machine";
1653 return NULL;
1654}
1655#endif
1656
1657unsigned long snapshot_get_image_size(void)
1658{
1659 return nr_copy_pages + nr_meta_pages + 1;
1660}
1661
1662static int init_header(struct swsusp_info *info)
1663{
1664 memset(info, 0, sizeof(struct swsusp_info));
1665 info->num_physpages = num_physpages;
1666 info->image_pages = nr_copy_pages;
1667 info->pages = snapshot_get_image_size();
1668 info->size = info->pages;
1669 info->size <<= PAGE_SHIFT;
1670 return init_header_complete(info);
1671}
1672
1673
1674
1675
1676
1677
1678static inline void
1679pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1680{
1681 int j;
1682
1683 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1684 buf[j] = memory_bm_next_pfn(bm);
1685 if (unlikely(buf[j] == BM_END_OF_MAP))
1686 break;
1687
1688 page_key_read(buf + j);
1689 }
1690}
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709int snapshot_read_next(struct snapshot_handle *handle)
1710{
1711 if (handle->cur > nr_meta_pages + nr_copy_pages)
1712 return 0;
1713
1714 if (!buffer) {
1715
1716 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1717 if (!buffer)
1718 return -ENOMEM;
1719 }
1720 if (!handle->cur) {
1721 int error;
1722
1723 error = init_header((struct swsusp_info *)buffer);
1724 if (error)
1725 return error;
1726 handle->buffer = buffer;
1727 memory_bm_position_reset(&orig_bm);
1728 memory_bm_position_reset(©_bm);
1729 } else if (handle->cur <= nr_meta_pages) {
1730 clear_page(buffer);
1731 pack_pfns(buffer, &orig_bm);
1732 } else {
1733 struct page *page;
1734
1735 page = pfn_to_page(memory_bm_next_pfn(©_bm));
1736 if (PageHighMem(page)) {
1737
1738
1739
1740
1741 void *kaddr;
1742
1743 kaddr = kmap_atomic(page);
1744 copy_page(buffer, kaddr);
1745 kunmap_atomic(kaddr);
1746 handle->buffer = buffer;
1747 } else {
1748 handle->buffer = page_address(page);
1749 }
1750 }
1751 handle->cur++;
1752 return PAGE_SIZE;
1753}
1754
1755
1756
1757
1758
1759
1760
1761static int mark_unsafe_pages(struct memory_bitmap *bm)
1762{
1763 struct zone *zone;
1764 unsigned long pfn, max_zone_pfn;
1765
1766
1767 for_each_populated_zone(zone) {
1768 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1769 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1770 if (pfn_valid(pfn))
1771 swsusp_unset_page_free(pfn_to_page(pfn));
1772 }
1773
1774
1775 memory_bm_position_reset(bm);
1776 do {
1777 pfn = memory_bm_next_pfn(bm);
1778 if (likely(pfn != BM_END_OF_MAP)) {
1779 if (likely(pfn_valid(pfn)))
1780 swsusp_set_page_free(pfn_to_page(pfn));
1781 else
1782 return -EFAULT;
1783 }
1784 } while (pfn != BM_END_OF_MAP);
1785
1786 allocated_unsafe_pages = 0;
1787
1788 return 0;
1789}
1790
1791static void
1792duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
1793{
1794 unsigned long pfn;
1795
1796 memory_bm_position_reset(src);
1797 pfn = memory_bm_next_pfn(src);
1798 while (pfn != BM_END_OF_MAP) {
1799 memory_bm_set_bit(dst, pfn);
1800 pfn = memory_bm_next_pfn(src);
1801 }
1802}
1803
1804static int check_header(struct swsusp_info *info)
1805{
1806 char *reason;
1807
1808 reason = check_image_kernel(info);
1809 if (!reason && info->num_physpages != num_physpages)
1810 reason = "memory size";
1811 if (reason) {
1812 printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
1813 return -EPERM;
1814 }
1815 return 0;
1816}
1817
1818
1819
1820
1821
1822static int
1823load_header(struct swsusp_info *info)
1824{
1825 int error;
1826
1827 restore_pblist = NULL;
1828 error = check_header(info);
1829 if (!error) {
1830 nr_copy_pages = info->image_pages;
1831 nr_meta_pages = info->pages - info->image_pages - 1;
1832 }
1833 return error;
1834}
1835
1836
1837
1838
1839
1840static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1841{
1842 int j;
1843
1844 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1845 if (unlikely(buf[j] == BM_END_OF_MAP))
1846 break;
1847
1848
1849 page_key_memorize(buf + j);
1850
1851 if (memory_bm_pfn_present(bm, buf[j]))
1852 memory_bm_set_bit(bm, buf[j]);
1853 else
1854 return -EFAULT;
1855 }
1856
1857 return 0;
1858}
1859
1860
1861
1862
1863static struct linked_page *safe_pages_list;
1864
1865#ifdef CONFIG_HIGHMEM
1866
1867
1868
1869
1870struct highmem_pbe {
1871 struct page *copy_page;
1872 struct page *orig_page;
1873 struct highmem_pbe *next;
1874};
1875
1876
1877
1878
1879
1880
1881static struct highmem_pbe *highmem_pblist;
1882
1883
1884
1885
1886
1887
1888
1889static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
1890{
1891 unsigned long pfn;
1892 unsigned int cnt = 0;
1893
1894 memory_bm_position_reset(bm);
1895 pfn = memory_bm_next_pfn(bm);
1896 while (pfn != BM_END_OF_MAP) {
1897 if (PageHighMem(pfn_to_page(pfn)))
1898 cnt++;
1899
1900 pfn = memory_bm_next_pfn(bm);
1901 }
1902 return cnt;
1903}
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917static unsigned int safe_highmem_pages;
1918
1919static struct memory_bitmap *safe_highmem_bm;
1920
1921static int
1922prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1923{
1924 unsigned int to_alloc;
1925
1926 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
1927 return -ENOMEM;
1928
1929 if (get_highmem_buffer(PG_SAFE))
1930 return -ENOMEM;
1931
1932 to_alloc = count_free_highmem_pages();
1933 if (to_alloc > *nr_highmem_p)
1934 to_alloc = *nr_highmem_p;
1935 else
1936 *nr_highmem_p = to_alloc;
1937
1938 safe_highmem_pages = 0;
1939 while (to_alloc-- > 0) {
1940 struct page *page;
1941
1942 page = alloc_page(__GFP_HIGHMEM);
1943 if (!swsusp_page_is_free(page)) {
1944
1945 memory_bm_set_bit(bm, page_to_pfn(page));
1946 safe_highmem_pages++;
1947 }
1948
1949 swsusp_set_page_forbidden(page);
1950 swsusp_set_page_free(page);
1951 }
1952 memory_bm_position_reset(bm);
1953 safe_highmem_bm = bm;
1954 return 0;
1955}
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974static struct page *last_highmem_page;
1975
1976static void *
1977get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1978{
1979 struct highmem_pbe *pbe;
1980 void *kaddr;
1981
1982 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
1983
1984
1985
1986 last_highmem_page = page;
1987 return buffer;
1988 }
1989
1990
1991
1992 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
1993 if (!pbe) {
1994 swsusp_free();
1995 return ERR_PTR(-ENOMEM);
1996 }
1997 pbe->orig_page = page;
1998 if (safe_highmem_pages > 0) {
1999 struct page *tmp;
2000
2001
2002 kaddr = buffer;
2003 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2004 safe_highmem_pages--;
2005 last_highmem_page = tmp;
2006 pbe->copy_page = tmp;
2007 } else {
2008
2009 kaddr = safe_pages_list;
2010 safe_pages_list = safe_pages_list->next;
2011 pbe->copy_page = virt_to_page(kaddr);
2012 }
2013 pbe->next = highmem_pblist;
2014 highmem_pblist = pbe;
2015 return kaddr;
2016}
2017
2018
2019
2020
2021
2022
2023
2024static void copy_last_highmem_page(void)
2025{
2026 if (last_highmem_page) {
2027 void *dst;
2028
2029 dst = kmap_atomic(last_highmem_page);
2030 copy_page(dst, buffer);
2031 kunmap_atomic(dst);
2032 last_highmem_page = NULL;
2033 }
2034}
2035
2036static inline int last_highmem_page_copied(void)
2037{
2038 return !last_highmem_page;
2039}
2040
2041static inline void free_highmem_data(void)
2042{
2043 if (safe_highmem_bm)
2044 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2045
2046 if (buffer)
2047 free_image_page(buffer, PG_UNSAFE_CLEAR);
2048}
2049#else
2050static inline int get_safe_write_buffer(void) { return 0; }
2051
2052static unsigned int
2053count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2054
2055static inline int
2056prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2057{
2058 return 0;
2059}
2060
2061static inline void *
2062get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2063{
2064 return ERR_PTR(-EINVAL);
2065}
2066
2067static inline void copy_last_highmem_page(void) {}
2068static inline int last_highmem_page_copied(void) { return 1; }
2069static inline void free_highmem_data(void) {}
2070#endif
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2087
2088static int
2089prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2090{
2091 unsigned int nr_pages, nr_highmem;
2092 struct linked_page *sp_list, *lp;
2093 int error;
2094
2095
2096 free_image_page(buffer, PG_UNSAFE_CLEAR);
2097 buffer = NULL;
2098
2099 nr_highmem = count_highmem_image_pages(bm);
2100 error = mark_unsafe_pages(bm);
2101 if (error)
2102 goto Free;
2103
2104 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2105 if (error)
2106 goto Free;
2107
2108 duplicate_memory_bitmap(new_bm, bm);
2109 memory_bm_free(bm, PG_UNSAFE_KEEP);
2110 if (nr_highmem > 0) {
2111 error = prepare_highmem_image(bm, &nr_highmem);
2112 if (error)
2113 goto Free;
2114 }
2115
2116
2117
2118
2119
2120
2121 sp_list = NULL;
2122
2123 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2124 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2125 while (nr_pages > 0) {
2126 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2127 if (!lp) {
2128 error = -ENOMEM;
2129 goto Free;
2130 }
2131 lp->next = sp_list;
2132 sp_list = lp;
2133 nr_pages--;
2134 }
2135
2136 safe_pages_list = NULL;
2137 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2138 while (nr_pages > 0) {
2139 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2140 if (!lp) {
2141 error = -ENOMEM;
2142 goto Free;
2143 }
2144 if (!swsusp_page_is_free(virt_to_page(lp))) {
2145
2146 lp->next = safe_pages_list;
2147 safe_pages_list = lp;
2148 }
2149
2150 swsusp_set_page_forbidden(virt_to_page(lp));
2151 swsusp_set_page_free(virt_to_page(lp));
2152 nr_pages--;
2153 }
2154
2155 while (sp_list) {
2156 lp = sp_list->next;
2157 free_image_page(sp_list, PG_UNSAFE_CLEAR);
2158 sp_list = lp;
2159 }
2160 return 0;
2161
2162 Free:
2163 swsusp_free();
2164 return error;
2165}
2166
2167
2168
2169
2170
2171
2172static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2173{
2174 struct pbe *pbe;
2175 struct page *page;
2176 unsigned long pfn = memory_bm_next_pfn(bm);
2177
2178 if (pfn == BM_END_OF_MAP)
2179 return ERR_PTR(-EFAULT);
2180
2181 page = pfn_to_page(pfn);
2182 if (PageHighMem(page))
2183 return get_highmem_page_buffer(page, ca);
2184
2185 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2186
2187
2188
2189 return page_address(page);
2190
2191
2192
2193
2194 pbe = chain_alloc(ca, sizeof(struct pbe));
2195 if (!pbe) {
2196 swsusp_free();
2197 return ERR_PTR(-ENOMEM);
2198 }
2199 pbe->orig_address = page_address(page);
2200 pbe->address = safe_pages_list;
2201 safe_pages_list = safe_pages_list->next;
2202 pbe->next = restore_pblist;
2203 restore_pblist = pbe;
2204 return pbe->address;
2205}
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224int snapshot_write_next(struct snapshot_handle *handle)
2225{
2226 static struct chain_allocator ca;
2227 int error = 0;
2228
2229
2230 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2231 return 0;
2232
2233 handle->sync_read = 1;
2234
2235 if (!handle->cur) {
2236 if (!buffer)
2237
2238 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2239
2240 if (!buffer)
2241 return -ENOMEM;
2242
2243 handle->buffer = buffer;
2244 } else if (handle->cur == 1) {
2245 error = load_header(buffer);
2246 if (error)
2247 return error;
2248
2249 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
2250 if (error)
2251 return error;
2252
2253
2254 error = page_key_alloc(nr_copy_pages);
2255 if (error)
2256 return error;
2257
2258 } else if (handle->cur <= nr_meta_pages + 1) {
2259 error = unpack_orig_pfns(buffer, ©_bm);
2260 if (error)
2261 return error;
2262
2263 if (handle->cur == nr_meta_pages + 1) {
2264 error = prepare_image(&orig_bm, ©_bm);
2265 if (error)
2266 return error;
2267
2268 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2269 memory_bm_position_reset(&orig_bm);
2270 restore_pblist = NULL;
2271 handle->buffer = get_buffer(&orig_bm, &ca);
2272 handle->sync_read = 0;
2273 if (IS_ERR(handle->buffer))
2274 return PTR_ERR(handle->buffer);
2275 }
2276 } else {
2277 copy_last_highmem_page();
2278
2279 page_key_write(handle->buffer);
2280 handle->buffer = get_buffer(&orig_bm, &ca);
2281 if (IS_ERR(handle->buffer))
2282 return PTR_ERR(handle->buffer);
2283 if (handle->buffer != buffer)
2284 handle->sync_read = 0;
2285 }
2286 handle->cur++;
2287 return PAGE_SIZE;
2288}
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298void snapshot_write_finalize(struct snapshot_handle *handle)
2299{
2300 copy_last_highmem_page();
2301
2302 page_key_write(handle->buffer);
2303 page_key_free();
2304
2305 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2306 memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
2307 free_highmem_data();
2308 }
2309}
2310
2311int snapshot_image_loaded(struct snapshot_handle *handle)
2312{
2313 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2314 handle->cur <= nr_meta_pages + nr_copy_pages);
2315}
2316
2317#ifdef CONFIG_HIGHMEM
2318
2319static inline void
2320swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2321{
2322 void *kaddr1, *kaddr2;
2323
2324 kaddr1 = kmap_atomic(p1);
2325 kaddr2 = kmap_atomic(p2);
2326 copy_page(buf, kaddr1);
2327 copy_page(kaddr1, kaddr2);
2328 copy_page(kaddr2, buf);
2329 kunmap_atomic(kaddr2);
2330 kunmap_atomic(kaddr1);
2331}
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343int restore_highmem(void)
2344{
2345 struct highmem_pbe *pbe = highmem_pblist;
2346 void *buf;
2347
2348 if (!pbe)
2349 return 0;
2350
2351 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2352 if (!buf)
2353 return -ENOMEM;
2354
2355 while (pbe) {
2356 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2357 pbe = pbe->next;
2358 }
2359 free_image_page(buf, PG_UNSAFE_CLEAR);
2360 return 0;
2361}
2362#endif
2363