1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/version.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/suspend.h>
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/spinlock.h>
20#include <linux/kernel.h>
21#include <linux/pm.h>
22#include <linux/device.h>
23#include <linux/init.h>
24#include <linux/bootmem.h>
25#include <linux/syscalls.h>
26#include <linux/console.h>
27#include <linux/highmem.h>
28#include <linux/list.h>
29
30#include <asm/uaccess.h>
31#include <asm/mmu_context.h>
32#include <asm/pgtable.h>
33#include <asm/tlbflush.h>
34#include <asm/io.h>
35
36#include "power.h"
37
38static int swsusp_page_is_free(struct page *);
39static void swsusp_set_page_forbidden(struct page *);
40static void swsusp_unset_page_forbidden(struct page *);
41
42
43
44
45
46
47
48unsigned long image_size = 500 * 1024 * 1024;
49
50
51
52
53
54
55struct pbe *restore_pblist;
56
57
58static void *buffer;
59
60
61
62
63
64
65
66
67
68
69
70#define PG_ANY 0
71#define PG_SAFE 1
72#define PG_UNSAFE_CLEAR 1
73#define PG_UNSAFE_KEEP 0
74
75static unsigned int allocated_unsafe_pages;
76
77static void *get_image_page(gfp_t gfp_mask, int safe_needed)
78{
79 void *res;
80
81 res = (void *)get_zeroed_page(gfp_mask);
82 if (safe_needed)
83 while (res && swsusp_page_is_free(virt_to_page(res))) {
84
85 swsusp_set_page_forbidden(virt_to_page(res));
86 allocated_unsafe_pages++;
87 res = (void *)get_zeroed_page(gfp_mask);
88 }
89 if (res) {
90 swsusp_set_page_forbidden(virt_to_page(res));
91 swsusp_set_page_free(virt_to_page(res));
92 }
93 return res;
94}
95
96unsigned long get_safe_page(gfp_t gfp_mask)
97{
98 return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
99}
100
101static struct page *alloc_image_page(gfp_t gfp_mask)
102{
103 struct page *page;
104
105 page = alloc_page(gfp_mask);
106 if (page) {
107 swsusp_set_page_forbidden(page);
108 swsusp_set_page_free(page);
109 }
110 return page;
111}
112
113
114
115
116
117
118static inline void free_image_page(void *addr, int clear_nosave_free)
119{
120 struct page *page;
121
122 BUG_ON(!virt_addr_valid(addr));
123
124 page = virt_to_page(addr);
125
126 swsusp_unset_page_forbidden(page);
127 if (clear_nosave_free)
128 swsusp_unset_page_free(page);
129
130 __free_page(page);
131}
132
133
134
135#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
136
137struct linked_page {
138 struct linked_page *next;
139 char data[LINKED_PAGE_DATA_SIZE];
140} __attribute__((packed));
141
142static inline void
143free_list_of_pages(struct linked_page *list, int clear_page_nosave)
144{
145 while (list) {
146 struct linked_page *lp = list->next;
147
148 free_image_page(list, clear_page_nosave);
149 list = lp;
150 }
151}
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166struct chain_allocator {
167 struct linked_page *chain;
168 unsigned int used_space;
169
170
171 gfp_t gfp_mask;
172 int safe_needed;
173};
174
175static void
176chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
177{
178 ca->chain = NULL;
179 ca->used_space = LINKED_PAGE_DATA_SIZE;
180 ca->gfp_mask = gfp_mask;
181 ca->safe_needed = safe_needed;
182}
183
184static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
185{
186 void *ret;
187
188 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
189 struct linked_page *lp;
190
191 lp = get_image_page(ca->gfp_mask, ca->safe_needed);
192 if (!lp)
193 return NULL;
194
195 lp->next = ca->chain;
196 ca->chain = lp;
197 ca->used_space = 0;
198 }
199 ret = ca->chain->data + ca->used_space;
200 ca->used_space += size;
201 return ret;
202}
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234#define BM_END_OF_MAP (~0UL)
235
236#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
237
238struct bm_block {
239 struct list_head hook;
240 unsigned long start_pfn;
241 unsigned long end_pfn;
242 unsigned long *data;
243};
244
245static inline unsigned long bm_block_bits(struct bm_block *bb)
246{
247 return bb->end_pfn - bb->start_pfn;
248}
249
250
251
252struct bm_position {
253 struct bm_block *block;
254 int bit;
255};
256
257struct memory_bitmap {
258 struct list_head blocks;
259 struct linked_page *p_list;
260
261
262
263 struct bm_position cur;
264};
265
266
267
268static void memory_bm_position_reset(struct memory_bitmap *bm)
269{
270 bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
271 bm->cur.bit = 0;
272}
273
274static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
275
276
277
278
279
280
281
282static int create_bm_block_list(unsigned long pages,
283 struct list_head *list,
284 struct chain_allocator *ca)
285{
286 unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
287
288 while (nr_blocks-- > 0) {
289 struct bm_block *bb;
290
291 bb = chain_alloc(ca, sizeof(struct bm_block));
292 if (!bb)
293 return -ENOMEM;
294 list_add(&bb->hook, list);
295 }
296
297 return 0;
298}
299
300struct mem_extent {
301 struct list_head hook;
302 unsigned long start;
303 unsigned long end;
304};
305
306
307
308
309
310static void free_mem_extents(struct list_head *list)
311{
312 struct mem_extent *ext, *aux;
313
314 list_for_each_entry_safe(ext, aux, list, hook) {
315 list_del(&ext->hook);
316 kfree(ext);
317 }
318}
319
320
321
322
323
324
325
326static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
327{
328 struct zone *zone;
329
330 INIT_LIST_HEAD(list);
331
332 for_each_populated_zone(zone) {
333 unsigned long zone_start, zone_end;
334 struct mem_extent *ext, *cur, *aux;
335
336 zone_start = zone->zone_start_pfn;
337 zone_end = zone->zone_start_pfn + zone->spanned_pages;
338
339 list_for_each_entry(ext, list, hook)
340 if (zone_start <= ext->end)
341 break;
342
343 if (&ext->hook == list || zone_end < ext->start) {
344
345 struct mem_extent *new_ext;
346
347 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
348 if (!new_ext) {
349 free_mem_extents(list);
350 return -ENOMEM;
351 }
352 new_ext->start = zone_start;
353 new_ext->end = zone_end;
354 list_add_tail(&new_ext->hook, &ext->hook);
355 continue;
356 }
357
358
359 if (zone_start < ext->start)
360 ext->start = zone_start;
361 if (zone_end > ext->end)
362 ext->end = zone_end;
363
364
365 cur = ext;
366 list_for_each_entry_safe_continue(cur, aux, list, hook) {
367 if (zone_end < cur->start)
368 break;
369 if (zone_end < cur->end)
370 ext->end = cur->end;
371 list_del(&cur->hook);
372 kfree(cur);
373 }
374 }
375
376 return 0;
377}
378
379
380
381
382static int
383memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
384{
385 struct chain_allocator ca;
386 struct list_head mem_extents;
387 struct mem_extent *ext;
388 int error;
389
390 chain_init(&ca, gfp_mask, safe_needed);
391 INIT_LIST_HEAD(&bm->blocks);
392
393 error = create_mem_extents(&mem_extents, gfp_mask);
394 if (error)
395 return error;
396
397 list_for_each_entry(ext, &mem_extents, hook) {
398 struct bm_block *bb;
399 unsigned long pfn = ext->start;
400 unsigned long pages = ext->end - ext->start;
401
402 bb = list_entry(bm->blocks.prev, struct bm_block, hook);
403
404 error = create_bm_block_list(pages, bm->blocks.prev, &ca);
405 if (error)
406 goto Error;
407
408 list_for_each_entry_continue(bb, &bm->blocks, hook) {
409 bb->data = get_image_page(gfp_mask, safe_needed);
410 if (!bb->data) {
411 error = -ENOMEM;
412 goto Error;
413 }
414
415 bb->start_pfn = pfn;
416 if (pages >= BM_BITS_PER_BLOCK) {
417 pfn += BM_BITS_PER_BLOCK;
418 pages -= BM_BITS_PER_BLOCK;
419 } else {
420
421 pfn += pages;
422 }
423 bb->end_pfn = pfn;
424 }
425 }
426
427 bm->p_list = ca.chain;
428 memory_bm_position_reset(bm);
429 Exit:
430 free_mem_extents(&mem_extents);
431 return error;
432
433 Error:
434 bm->p_list = ca.chain;
435 memory_bm_free(bm, PG_UNSAFE_CLEAR);
436 goto Exit;
437}
438
439
440
441
442static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
443{
444 struct bm_block *bb;
445
446 list_for_each_entry(bb, &bm->blocks, hook)
447 if (bb->data)
448 free_image_page(bb->data, clear_nosave_free);
449
450 free_list_of_pages(bm->p_list, clear_nosave_free);
451
452 INIT_LIST_HEAD(&bm->blocks);
453}
454
455
456
457
458
459
460static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
461 void **addr, unsigned int *bit_nr)
462{
463 struct bm_block *bb;
464
465
466
467
468
469 bb = bm->cur.block;
470 if (pfn < bb->start_pfn)
471 list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
472 if (pfn >= bb->start_pfn)
473 break;
474
475 if (pfn >= bb->end_pfn)
476 list_for_each_entry_continue(bb, &bm->blocks, hook)
477 if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
478 break;
479
480 if (&bb->hook == &bm->blocks)
481 return -EFAULT;
482
483
484 bm->cur.block = bb;
485 pfn -= bb->start_pfn;
486 bm->cur.bit = pfn + 1;
487 *bit_nr = pfn;
488 *addr = bb->data;
489 return 0;
490}
491
492static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
493{
494 void *addr;
495 unsigned int bit;
496 int error;
497
498 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
499 BUG_ON(error);
500 set_bit(bit, addr);
501}
502
503static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
504{
505 void *addr;
506 unsigned int bit;
507 int error;
508
509 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
510 if (!error)
511 set_bit(bit, addr);
512 return error;
513}
514
515static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
516{
517 void *addr;
518 unsigned int bit;
519 int error;
520
521 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
522 BUG_ON(error);
523 clear_bit(bit, addr);
524}
525
526static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
527{
528 void *addr;
529 unsigned int bit;
530 int error;
531
532 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
533 BUG_ON(error);
534 return test_bit(bit, addr);
535}
536
537static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
538{
539 void *addr;
540 unsigned int bit;
541
542 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
543}
544
545
546
547
548
549
550
551
552
553
554static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
555{
556 struct bm_block *bb;
557 int bit;
558
559 bb = bm->cur.block;
560 do {
561 bit = bm->cur.bit;
562 bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
563 if (bit < bm_block_bits(bb))
564 goto Return_pfn;
565
566 bb = list_entry(bb->hook.next, struct bm_block, hook);
567 bm->cur.block = bb;
568 bm->cur.bit = 0;
569 } while (&bb->hook != &bm->blocks);
570
571 memory_bm_position_reset(bm);
572 return BM_END_OF_MAP;
573
574 Return_pfn:
575 bm->cur.bit = bit + 1;
576 return bb->start_pfn + bit;
577}
578
579
580
581
582
583
584struct nosave_region {
585 struct list_head list;
586 unsigned long start_pfn;
587 unsigned long end_pfn;
588};
589
590static LIST_HEAD(nosave_regions);
591
592
593
594
595
596
597
598void __init
599__register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
600 int use_kmalloc)
601{
602 struct nosave_region *region;
603
604 if (start_pfn >= end_pfn)
605 return;
606
607 if (!list_empty(&nosave_regions)) {
608
609 region = list_entry(nosave_regions.prev,
610 struct nosave_region, list);
611 if (region->end_pfn == start_pfn) {
612 region->end_pfn = end_pfn;
613 goto Report;
614 }
615 }
616 if (use_kmalloc) {
617
618 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
619 BUG_ON(!region);
620 } else
621
622 region = alloc_bootmem(sizeof(struct nosave_region));
623 region->start_pfn = start_pfn;
624 region->end_pfn = end_pfn;
625 list_add_tail(®ion->list, &nosave_regions);
626 Report:
627 printk(KERN_INFO "PM: Registered nosave memory: %016lx - %016lx\n",
628 start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
629}
630
631
632
633
634
635static struct memory_bitmap *forbidden_pages_map;
636
637
638static struct memory_bitmap *free_pages_map;
639
640
641
642
643
644
645void swsusp_set_page_free(struct page *page)
646{
647 if (free_pages_map)
648 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
649}
650
651static int swsusp_page_is_free(struct page *page)
652{
653 return free_pages_map ?
654 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
655}
656
657void swsusp_unset_page_free(struct page *page)
658{
659 if (free_pages_map)
660 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
661}
662
663static void swsusp_set_page_forbidden(struct page *page)
664{
665 if (forbidden_pages_map)
666 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
667}
668
669int swsusp_page_is_forbidden(struct page *page)
670{
671 return forbidden_pages_map ?
672 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
673}
674
675static void swsusp_unset_page_forbidden(struct page *page)
676{
677 if (forbidden_pages_map)
678 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
679}
680
681
682
683
684
685
686static void mark_nosave_pages(struct memory_bitmap *bm)
687{
688 struct nosave_region *region;
689
690 if (list_empty(&nosave_regions))
691 return;
692
693 list_for_each_entry(region, &nosave_regions, list) {
694 unsigned long pfn;
695
696 pr_debug("PM: Marking nosave pages: %016lx - %016lx\n",
697 region->start_pfn << PAGE_SHIFT,
698 region->end_pfn << PAGE_SHIFT);
699
700 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
701 if (pfn_valid(pfn)) {
702
703
704
705
706
707
708 mem_bm_set_bit_check(bm, pfn);
709 }
710 }
711}
712
713
714
715
716
717
718
719
720
721int create_basic_memory_bitmaps(void)
722{
723 struct memory_bitmap *bm1, *bm2;
724 int error = 0;
725
726 BUG_ON(forbidden_pages_map || free_pages_map);
727
728 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
729 if (!bm1)
730 return -ENOMEM;
731
732 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
733 if (error)
734 goto Free_first_object;
735
736 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
737 if (!bm2)
738 goto Free_first_bitmap;
739
740 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
741 if (error)
742 goto Free_second_object;
743
744 forbidden_pages_map = bm1;
745 free_pages_map = bm2;
746 mark_nosave_pages(forbidden_pages_map);
747
748 pr_debug("PM: Basic memory bitmaps created\n");
749
750 return 0;
751
752 Free_second_object:
753 kfree(bm2);
754 Free_first_bitmap:
755 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
756 Free_first_object:
757 kfree(bm1);
758 return -ENOMEM;
759}
760
761
762
763
764
765
766
767
768void free_basic_memory_bitmaps(void)
769{
770 struct memory_bitmap *bm1, *bm2;
771
772 BUG_ON(!(forbidden_pages_map && free_pages_map));
773
774 bm1 = forbidden_pages_map;
775 bm2 = free_pages_map;
776 forbidden_pages_map = NULL;
777 free_pages_map = NULL;
778 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
779 kfree(bm1);
780 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
781 kfree(bm2);
782
783 pr_debug("PM: Basic memory bitmaps freed\n");
784}
785
786
787
788
789
790
791
792unsigned int snapshot_additional_pages(struct zone *zone)
793{
794 unsigned int res;
795
796 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
797 res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
798 return 2 * res;
799}
800
801#ifdef CONFIG_HIGHMEM
802
803
804
805
806
807static unsigned int count_free_highmem_pages(void)
808{
809 struct zone *zone;
810 unsigned int cnt = 0;
811
812 for_each_populated_zone(zone)
813 if (is_highmem(zone))
814 cnt += zone_page_state(zone, NR_FREE_PAGES);
815
816 return cnt;
817}
818
819
820
821
822
823
824
825
826static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
827{
828 struct page *page;
829
830 if (!pfn_valid(pfn))
831 return NULL;
832
833 page = pfn_to_page(pfn);
834 if (page_zone(page) != zone)
835 return NULL;
836
837 BUG_ON(!PageHighMem(page));
838
839 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
840 PageReserved(page))
841 return NULL;
842
843 return page;
844}
845
846
847
848
849
850
851static unsigned int count_highmem_pages(void)
852{
853 struct zone *zone;
854 unsigned int n = 0;
855
856 for_each_populated_zone(zone) {
857 unsigned long pfn, max_zone_pfn;
858
859 if (!is_highmem(zone))
860 continue;
861
862 mark_free_pages(zone);
863 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
864 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
865 if (saveable_highmem_page(zone, pfn))
866 n++;
867 }
868 return n;
869}
870#else
871static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
872{
873 return NULL;
874}
875#endif
876
877
878
879
880
881
882
883
884
885static struct page *saveable_page(struct zone *zone, unsigned long pfn)
886{
887 struct page *page;
888
889 if (!pfn_valid(pfn))
890 return NULL;
891
892 page = pfn_to_page(pfn);
893 if (page_zone(page) != zone)
894 return NULL;
895
896 BUG_ON(PageHighMem(page));
897
898 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
899 return NULL;
900
901 if (PageReserved(page)
902 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
903 return NULL;
904
905 return page;
906}
907
908
909
910
911
912
913static unsigned int count_data_pages(void)
914{
915 struct zone *zone;
916 unsigned long pfn, max_zone_pfn;
917 unsigned int n = 0;
918
919 for_each_populated_zone(zone) {
920 if (is_highmem(zone))
921 continue;
922
923 mark_free_pages(zone);
924 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
925 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
926 if (saveable_page(zone, pfn))
927 n++;
928 }
929 return n;
930}
931
932
933
934
935static inline void do_copy_page(long *dst, long *src)
936{
937 int n;
938
939 for (n = PAGE_SIZE / sizeof(long); n; n--)
940 *dst++ = *src++;
941}
942
943
944
945
946
947
948
949
950static void safe_copy_page(void *dst, struct page *s_page)
951{
952 if (kernel_page_present(s_page)) {
953 do_copy_page(dst, page_address(s_page));
954 } else {
955 kernel_map_pages(s_page, 1, 1);
956 do_copy_page(dst, page_address(s_page));
957 kernel_map_pages(s_page, 1, 0);
958 }
959}
960
961
962#ifdef CONFIG_HIGHMEM
963static inline struct page *
964page_is_saveable(struct zone *zone, unsigned long pfn)
965{
966 return is_highmem(zone) ?
967 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
968}
969
970static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
971{
972 struct page *s_page, *d_page;
973 void *src, *dst;
974
975 s_page = pfn_to_page(src_pfn);
976 d_page = pfn_to_page(dst_pfn);
977 if (PageHighMem(s_page)) {
978 src = kmap_atomic(s_page, KM_USER0);
979 dst = kmap_atomic(d_page, KM_USER1);
980 do_copy_page(dst, src);
981 kunmap_atomic(src, KM_USER0);
982 kunmap_atomic(dst, KM_USER1);
983 } else {
984 if (PageHighMem(d_page)) {
985
986
987
988 safe_copy_page(buffer, s_page);
989 dst = kmap_atomic(d_page, KM_USER0);
990 memcpy(dst, buffer, PAGE_SIZE);
991 kunmap_atomic(dst, KM_USER0);
992 } else {
993 safe_copy_page(page_address(d_page), s_page);
994 }
995 }
996}
997#else
998#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
999
1000static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1001{
1002 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1003 pfn_to_page(src_pfn));
1004}
1005#endif
1006
1007static void
1008copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
1009{
1010 struct zone *zone;
1011 unsigned long pfn;
1012
1013 for_each_populated_zone(zone) {
1014 unsigned long max_zone_pfn;
1015
1016 mark_free_pages(zone);
1017 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1018 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1019 if (page_is_saveable(zone, pfn))
1020 memory_bm_set_bit(orig_bm, pfn);
1021 }
1022 memory_bm_position_reset(orig_bm);
1023 memory_bm_position_reset(copy_bm);
1024 for(;;) {
1025 pfn = memory_bm_next_pfn(orig_bm);
1026 if (unlikely(pfn == BM_END_OF_MAP))
1027 break;
1028 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1029 }
1030}
1031
1032
1033static unsigned int nr_copy_pages;
1034
1035static unsigned int nr_meta_pages;
1036
1037
1038
1039
1040unsigned int alloc_normal, alloc_highmem;
1041
1042
1043
1044
1045static struct memory_bitmap orig_bm;
1046
1047
1048
1049
1050
1051
1052
1053
1054static struct memory_bitmap copy_bm;
1055
1056
1057
1058
1059
1060
1061
1062
1063void swsusp_free(void)
1064{
1065 struct zone *zone;
1066 unsigned long pfn, max_zone_pfn;
1067
1068 for_each_populated_zone(zone) {
1069 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1070 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1071 if (pfn_valid(pfn)) {
1072 struct page *page = pfn_to_page(pfn);
1073
1074 if (swsusp_page_is_forbidden(page) &&
1075 swsusp_page_is_free(page)) {
1076 swsusp_unset_page_forbidden(page);
1077 swsusp_unset_page_free(page);
1078 __free_page(page);
1079 }
1080 }
1081 }
1082 nr_copy_pages = 0;
1083 nr_meta_pages = 0;
1084 restore_pblist = NULL;
1085 buffer = NULL;
1086 alloc_normal = 0;
1087 alloc_highmem = 0;
1088}
1089
1090
1091
1092#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1093
1094
1095
1096
1097
1098
1099
1100
1101static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1102{
1103 unsigned long nr_alloc = 0;
1104
1105 while (nr_pages > 0) {
1106 struct page *page;
1107
1108 page = alloc_image_page(mask);
1109 if (!page)
1110 break;
1111 memory_bm_set_bit(©_bm, page_to_pfn(page));
1112 if (PageHighMem(page))
1113 alloc_highmem++;
1114 else
1115 alloc_normal++;
1116 nr_pages--;
1117 nr_alloc++;
1118 }
1119
1120 return nr_alloc;
1121}
1122
1123static unsigned long preallocate_image_memory(unsigned long nr_pages)
1124{
1125 return preallocate_image_pages(nr_pages, GFP_IMAGE);
1126}
1127
1128#ifdef CONFIG_HIGHMEM
1129static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1130{
1131 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1132}
1133
1134
1135
1136
1137static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1138{
1139 x *= multiplier;
1140 do_div(x, base);
1141 return (unsigned long)x;
1142}
1143
1144static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1145 unsigned long highmem,
1146 unsigned long total)
1147{
1148 unsigned long alloc = __fraction(nr_pages, highmem, total);
1149
1150 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1151}
1152#else
1153static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1154{
1155 return 0;
1156}
1157
1158static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1159 unsigned long highmem,
1160 unsigned long total)
1161{
1162 return 0;
1163}
1164#endif
1165
1166
1167
1168
1169static void free_unnecessary_pages(void)
1170{
1171 unsigned long save_highmem, to_free_normal, to_free_highmem;
1172
1173 to_free_normal = alloc_normal - count_data_pages();
1174 save_highmem = count_highmem_pages();
1175 if (alloc_highmem > save_highmem) {
1176 to_free_highmem = alloc_highmem - save_highmem;
1177 } else {
1178 to_free_highmem = 0;
1179 to_free_normal -= save_highmem - alloc_highmem;
1180 }
1181
1182 memory_bm_position_reset(©_bm);
1183
1184 while (to_free_normal > 0 && to_free_highmem > 0) {
1185 unsigned long pfn = memory_bm_next_pfn(©_bm);
1186 struct page *page = pfn_to_page(pfn);
1187
1188 if (PageHighMem(page)) {
1189 if (!to_free_highmem)
1190 continue;
1191 to_free_highmem--;
1192 alloc_highmem--;
1193 } else {
1194 if (!to_free_normal)
1195 continue;
1196 to_free_normal--;
1197 alloc_normal--;
1198 }
1199 memory_bm_clear_bit(©_bm, pfn);
1200 swsusp_unset_page_forbidden(page);
1201 swsusp_unset_page_free(page);
1202 __free_page(page);
1203 }
1204}
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222static unsigned long minimum_image_size(unsigned long saveable)
1223{
1224 unsigned long size;
1225
1226 size = global_page_state(NR_SLAB_RECLAIMABLE)
1227 + global_page_state(NR_ACTIVE_ANON)
1228 + global_page_state(NR_INACTIVE_ANON)
1229 + global_page_state(NR_ACTIVE_FILE)
1230 + global_page_state(NR_INACTIVE_FILE)
1231 - global_page_state(NR_FILE_MAPPED);
1232
1233 return saveable <= size ? 0 : saveable - size;
1234}
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256int hibernate_preallocate_memory(void)
1257{
1258 struct zone *zone;
1259 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1260 unsigned long alloc, save_highmem, pages_highmem;
1261 struct timeval start, stop;
1262 int error;
1263
1264 printk(KERN_INFO "PM: Preallocating image memory... ");
1265 do_gettimeofday(&start);
1266
1267 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1268 if (error)
1269 goto err_out;
1270
1271 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
1272 if (error)
1273 goto err_out;
1274
1275 alloc_normal = 0;
1276 alloc_highmem = 0;
1277
1278
1279 save_highmem = count_highmem_pages();
1280 saveable = count_data_pages();
1281
1282
1283
1284
1285
1286 count = saveable;
1287 saveable += save_highmem;
1288 highmem = save_highmem;
1289 size = 0;
1290 for_each_populated_zone(zone) {
1291 size += snapshot_additional_pages(zone);
1292 if (is_highmem(zone))
1293 highmem += zone_page_state(zone, NR_FREE_PAGES);
1294 else
1295 count += zone_page_state(zone, NR_FREE_PAGES);
1296 }
1297 count += highmem;
1298 count -= totalreserve_pages;
1299
1300
1301 max_size = (count - (size + PAGES_FOR_IO)) / 2 - 2 * SPARE_PAGES;
1302 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1303 if (size > max_size)
1304 size = max_size;
1305
1306
1307
1308
1309 if (size >= saveable) {
1310 pages = preallocate_image_highmem(save_highmem);
1311 pages += preallocate_image_memory(saveable - pages);
1312 goto out;
1313 }
1314
1315
1316 pages = minimum_image_size(saveable);
1317 if (size < pages)
1318 size = min_t(unsigned long, pages, max_size);
1319
1320
1321
1322
1323
1324
1325
1326 shrink_all_memory(saveable - size);
1327
1328
1329
1330
1331
1332
1333
1334
1335 pages_highmem = preallocate_image_highmem(highmem / 2);
1336 alloc = (count - max_size) - pages_highmem;
1337 pages = preallocate_image_memory(alloc);
1338 if (pages < alloc)
1339 goto err_out;
1340 size = max_size - size;
1341 alloc = size;
1342 size = preallocate_highmem_fraction(size, highmem, count);
1343 pages_highmem += size;
1344 alloc -= size;
1345 pages += preallocate_image_memory(alloc);
1346 pages += pages_highmem;
1347
1348
1349
1350
1351
1352
1353 free_unnecessary_pages();
1354
1355 out:
1356 do_gettimeofday(&stop);
1357 printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1358 swsusp_show_speed(&start, &stop, pages, "Allocated");
1359
1360 return 0;
1361
1362 err_out:
1363 printk(KERN_CONT "\n");
1364 swsusp_free();
1365 return -ENOMEM;
1366}
1367
1368#ifdef CONFIG_HIGHMEM
1369
1370
1371
1372
1373
1374static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1375{
1376 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1377
1378 if (free_highmem >= nr_highmem)
1379 nr_highmem = 0;
1380 else
1381 nr_highmem -= free_highmem;
1382
1383 return nr_highmem;
1384}
1385#else
1386static unsigned int
1387count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1388#endif
1389
1390
1391
1392
1393
1394
1395static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1396{
1397 struct zone *zone;
1398 unsigned int free = alloc_normal;
1399
1400 for_each_populated_zone(zone)
1401 if (!is_highmem(zone))
1402 free += zone_page_state(zone, NR_FREE_PAGES);
1403
1404 nr_pages += count_pages_for_highmem(nr_highmem);
1405 pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1406 nr_pages, PAGES_FOR_IO, free);
1407
1408 return free > nr_pages + PAGES_FOR_IO;
1409}
1410
1411#ifdef CONFIG_HIGHMEM
1412
1413
1414
1415
1416
1417static inline int get_highmem_buffer(int safe_needed)
1418{
1419 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1420 return buffer ? 0 : -ENOMEM;
1421}
1422
1423
1424
1425
1426
1427
1428
1429static inline unsigned int
1430alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
1431{
1432 unsigned int to_alloc = count_free_highmem_pages();
1433
1434 if (to_alloc > nr_highmem)
1435 to_alloc = nr_highmem;
1436
1437 nr_highmem -= to_alloc;
1438 while (to_alloc-- > 0) {
1439 struct page *page;
1440
1441 page = alloc_image_page(__GFP_HIGHMEM);
1442 memory_bm_set_bit(bm, page_to_pfn(page));
1443 }
1444 return nr_highmem;
1445}
1446#else
1447static inline int get_highmem_buffer(int safe_needed) { return 0; }
1448
1449static inline unsigned int
1450alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
1451#endif
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465static int
1466swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1467 unsigned int nr_pages, unsigned int nr_highmem)
1468{
1469 int error = 0;
1470
1471 if (nr_highmem > 0) {
1472 error = get_highmem_buffer(PG_ANY);
1473 if (error)
1474 goto err_out;
1475 if (nr_highmem > alloc_highmem) {
1476 nr_highmem -= alloc_highmem;
1477 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1478 }
1479 }
1480 if (nr_pages > alloc_normal) {
1481 nr_pages -= alloc_normal;
1482 while (nr_pages-- > 0) {
1483 struct page *page;
1484
1485 page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1486 if (!page)
1487 goto err_out;
1488 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1489 }
1490 }
1491
1492 return 0;
1493
1494 err_out:
1495 swsusp_free();
1496 return error;
1497}
1498
1499asmlinkage int swsusp_save(void)
1500{
1501 unsigned int nr_pages, nr_highmem;
1502
1503 printk(KERN_INFO "PM: Creating hibernation image: \n");
1504
1505 drain_local_pages(NULL);
1506 nr_pages = count_data_pages();
1507 nr_highmem = count_highmem_pages();
1508 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1509
1510 if (!enough_free_mem(nr_pages, nr_highmem)) {
1511 printk(KERN_ERR "PM: Not enough free memory\n");
1512 return -ENOMEM;
1513 }
1514
1515 if (swsusp_alloc(&orig_bm, ©_bm, nr_pages, nr_highmem)) {
1516 printk(KERN_ERR "PM: Memory allocation failed\n");
1517 return -ENOMEM;
1518 }
1519
1520
1521
1522
1523 drain_local_pages(NULL);
1524 copy_data_pages(©_bm, &orig_bm);
1525
1526
1527
1528
1529
1530
1531
1532 nr_pages += nr_highmem;
1533 nr_copy_pages = nr_pages;
1534 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1535
1536 printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1537 nr_pages);
1538
1539 return 0;
1540}
1541
1542#ifndef CONFIG_ARCH_HIBERNATION_HEADER
1543static int init_header_complete(struct swsusp_info *info)
1544{
1545 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1546 info->version_code = LINUX_VERSION_CODE;
1547 return 0;
1548}
1549
1550static char *check_image_kernel(struct swsusp_info *info)
1551{
1552 if (info->version_code != LINUX_VERSION_CODE)
1553 return "kernel version";
1554 if (strcmp(info->uts.sysname,init_utsname()->sysname))
1555 return "system type";
1556 if (strcmp(info->uts.release,init_utsname()->release))
1557 return "kernel release";
1558 if (strcmp(info->uts.version,init_utsname()->version))
1559 return "version";
1560 if (strcmp(info->uts.machine,init_utsname()->machine))
1561 return "machine";
1562 return NULL;
1563}
1564#endif
1565
1566unsigned long snapshot_get_image_size(void)
1567{
1568 return nr_copy_pages + nr_meta_pages + 1;
1569}
1570
1571static int init_header(struct swsusp_info *info)
1572{
1573 memset(info, 0, sizeof(struct swsusp_info));
1574 info->num_physpages = num_physpages;
1575 info->image_pages = nr_copy_pages;
1576 info->pages = snapshot_get_image_size();
1577 info->size = info->pages;
1578 info->size <<= PAGE_SHIFT;
1579 return init_header_complete(info);
1580}
1581
1582
1583
1584
1585
1586
1587static inline void
1588pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1589{
1590 int j;
1591
1592 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1593 buf[j] = memory_bm_next_pfn(bm);
1594 if (unlikely(buf[j] == BM_END_OF_MAP))
1595 break;
1596 }
1597}
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621int snapshot_read_next(struct snapshot_handle *handle, size_t count)
1622{
1623 if (handle->cur > nr_meta_pages + nr_copy_pages)
1624 return 0;
1625
1626 if (!buffer) {
1627
1628 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1629 if (!buffer)
1630 return -ENOMEM;
1631 }
1632 if (!handle->offset) {
1633 int error;
1634
1635 error = init_header((struct swsusp_info *)buffer);
1636 if (error)
1637 return error;
1638 handle->buffer = buffer;
1639 memory_bm_position_reset(&orig_bm);
1640 memory_bm_position_reset(©_bm);
1641 }
1642 if (handle->prev < handle->cur) {
1643 if (handle->cur <= nr_meta_pages) {
1644 memset(buffer, 0, PAGE_SIZE);
1645 pack_pfns(buffer, &orig_bm);
1646 } else {
1647 struct page *page;
1648
1649 page = pfn_to_page(memory_bm_next_pfn(©_bm));
1650 if (PageHighMem(page)) {
1651
1652
1653
1654
1655 void *kaddr;
1656
1657 kaddr = kmap_atomic(page, KM_USER0);
1658 memcpy(buffer, kaddr, PAGE_SIZE);
1659 kunmap_atomic(kaddr, KM_USER0);
1660 handle->buffer = buffer;
1661 } else {
1662 handle->buffer = page_address(page);
1663 }
1664 }
1665 handle->prev = handle->cur;
1666 }
1667 handle->buf_offset = handle->cur_offset;
1668 if (handle->cur_offset + count >= PAGE_SIZE) {
1669 count = PAGE_SIZE - handle->cur_offset;
1670 handle->cur_offset = 0;
1671 handle->cur++;
1672 } else {
1673 handle->cur_offset += count;
1674 }
1675 handle->offset += count;
1676 return count;
1677}
1678
1679
1680
1681
1682
1683
1684
1685static int mark_unsafe_pages(struct memory_bitmap *bm)
1686{
1687 struct zone *zone;
1688 unsigned long pfn, max_zone_pfn;
1689
1690
1691 for_each_populated_zone(zone) {
1692 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1693 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1694 if (pfn_valid(pfn))
1695 swsusp_unset_page_free(pfn_to_page(pfn));
1696 }
1697
1698
1699 memory_bm_position_reset(bm);
1700 do {
1701 pfn = memory_bm_next_pfn(bm);
1702 if (likely(pfn != BM_END_OF_MAP)) {
1703 if (likely(pfn_valid(pfn)))
1704 swsusp_set_page_free(pfn_to_page(pfn));
1705 else
1706 return -EFAULT;
1707 }
1708 } while (pfn != BM_END_OF_MAP);
1709
1710 allocated_unsafe_pages = 0;
1711
1712 return 0;
1713}
1714
1715static void
1716duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
1717{
1718 unsigned long pfn;
1719
1720 memory_bm_position_reset(src);
1721 pfn = memory_bm_next_pfn(src);
1722 while (pfn != BM_END_OF_MAP) {
1723 memory_bm_set_bit(dst, pfn);
1724 pfn = memory_bm_next_pfn(src);
1725 }
1726}
1727
1728static int check_header(struct swsusp_info *info)
1729{
1730 char *reason;
1731
1732 reason = check_image_kernel(info);
1733 if (!reason && info->num_physpages != num_physpages)
1734 reason = "memory size";
1735 if (reason) {
1736 printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
1737 return -EPERM;
1738 }
1739 return 0;
1740}
1741
1742
1743
1744
1745
1746static int
1747load_header(struct swsusp_info *info)
1748{
1749 int error;
1750
1751 restore_pblist = NULL;
1752 error = check_header(info);
1753 if (!error) {
1754 nr_copy_pages = info->image_pages;
1755 nr_meta_pages = info->pages - info->image_pages - 1;
1756 }
1757 return error;
1758}
1759
1760
1761
1762
1763
1764static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1765{
1766 int j;
1767
1768 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1769 if (unlikely(buf[j] == BM_END_OF_MAP))
1770 break;
1771
1772 if (memory_bm_pfn_present(bm, buf[j]))
1773 memory_bm_set_bit(bm, buf[j]);
1774 else
1775 return -EFAULT;
1776 }
1777
1778 return 0;
1779}
1780
1781
1782
1783
1784static struct linked_page *safe_pages_list;
1785
1786#ifdef CONFIG_HIGHMEM
1787
1788
1789
1790
1791struct highmem_pbe {
1792 struct page *copy_page;
1793 struct page *orig_page;
1794 struct highmem_pbe *next;
1795};
1796
1797
1798
1799
1800
1801
1802static struct highmem_pbe *highmem_pblist;
1803
1804
1805
1806
1807
1808
1809
1810static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
1811{
1812 unsigned long pfn;
1813 unsigned int cnt = 0;
1814
1815 memory_bm_position_reset(bm);
1816 pfn = memory_bm_next_pfn(bm);
1817 while (pfn != BM_END_OF_MAP) {
1818 if (PageHighMem(pfn_to_page(pfn)))
1819 cnt++;
1820
1821 pfn = memory_bm_next_pfn(bm);
1822 }
1823 return cnt;
1824}
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838static unsigned int safe_highmem_pages;
1839
1840static struct memory_bitmap *safe_highmem_bm;
1841
1842static int
1843prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1844{
1845 unsigned int to_alloc;
1846
1847 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
1848 return -ENOMEM;
1849
1850 if (get_highmem_buffer(PG_SAFE))
1851 return -ENOMEM;
1852
1853 to_alloc = count_free_highmem_pages();
1854 if (to_alloc > *nr_highmem_p)
1855 to_alloc = *nr_highmem_p;
1856 else
1857 *nr_highmem_p = to_alloc;
1858
1859 safe_highmem_pages = 0;
1860 while (to_alloc-- > 0) {
1861 struct page *page;
1862
1863 page = alloc_page(__GFP_HIGHMEM);
1864 if (!swsusp_page_is_free(page)) {
1865
1866 memory_bm_set_bit(bm, page_to_pfn(page));
1867 safe_highmem_pages++;
1868 }
1869
1870 swsusp_set_page_forbidden(page);
1871 swsusp_set_page_free(page);
1872 }
1873 memory_bm_position_reset(bm);
1874 safe_highmem_bm = bm;
1875 return 0;
1876}
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895static struct page *last_highmem_page;
1896
1897static void *
1898get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1899{
1900 struct highmem_pbe *pbe;
1901 void *kaddr;
1902
1903 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
1904
1905
1906
1907 last_highmem_page = page;
1908 return buffer;
1909 }
1910
1911
1912
1913 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
1914 if (!pbe) {
1915 swsusp_free();
1916 return ERR_PTR(-ENOMEM);
1917 }
1918 pbe->orig_page = page;
1919 if (safe_highmem_pages > 0) {
1920 struct page *tmp;
1921
1922
1923 kaddr = buffer;
1924 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
1925 safe_highmem_pages--;
1926 last_highmem_page = tmp;
1927 pbe->copy_page = tmp;
1928 } else {
1929
1930 kaddr = safe_pages_list;
1931 safe_pages_list = safe_pages_list->next;
1932 pbe->copy_page = virt_to_page(kaddr);
1933 }
1934 pbe->next = highmem_pblist;
1935 highmem_pblist = pbe;
1936 return kaddr;
1937}
1938
1939
1940
1941
1942
1943
1944
1945static void copy_last_highmem_page(void)
1946{
1947 if (last_highmem_page) {
1948 void *dst;
1949
1950 dst = kmap_atomic(last_highmem_page, KM_USER0);
1951 memcpy(dst, buffer, PAGE_SIZE);
1952 kunmap_atomic(dst, KM_USER0);
1953 last_highmem_page = NULL;
1954 }
1955}
1956
1957static inline int last_highmem_page_copied(void)
1958{
1959 return !last_highmem_page;
1960}
1961
1962static inline void free_highmem_data(void)
1963{
1964 if (safe_highmem_bm)
1965 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
1966
1967 if (buffer)
1968 free_image_page(buffer, PG_UNSAFE_CLEAR);
1969}
1970#else
1971static inline int get_safe_write_buffer(void) { return 0; }
1972
1973static unsigned int
1974count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
1975
1976static inline int
1977prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1978{
1979 return 0;
1980}
1981
1982static inline void *
1983get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1984{
1985 return ERR_PTR(-EINVAL);
1986}
1987
1988static inline void copy_last_highmem_page(void) {}
1989static inline int last_highmem_page_copied(void) { return 1; }
1990static inline void free_highmem_data(void) {}
1991#endif
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2008
2009static int
2010prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2011{
2012 unsigned int nr_pages, nr_highmem;
2013 struct linked_page *sp_list, *lp;
2014 int error;
2015
2016
2017 free_image_page(buffer, PG_UNSAFE_CLEAR);
2018 buffer = NULL;
2019
2020 nr_highmem = count_highmem_image_pages(bm);
2021 error = mark_unsafe_pages(bm);
2022 if (error)
2023 goto Free;
2024
2025 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2026 if (error)
2027 goto Free;
2028
2029 duplicate_memory_bitmap(new_bm, bm);
2030 memory_bm_free(bm, PG_UNSAFE_KEEP);
2031 if (nr_highmem > 0) {
2032 error = prepare_highmem_image(bm, &nr_highmem);
2033 if (error)
2034 goto Free;
2035 }
2036
2037
2038
2039
2040
2041
2042 sp_list = NULL;
2043
2044 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2045 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2046 while (nr_pages > 0) {
2047 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2048 if (!lp) {
2049 error = -ENOMEM;
2050 goto Free;
2051 }
2052 lp->next = sp_list;
2053 sp_list = lp;
2054 nr_pages--;
2055 }
2056
2057 safe_pages_list = NULL;
2058 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2059 while (nr_pages > 0) {
2060 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2061 if (!lp) {
2062 error = -ENOMEM;
2063 goto Free;
2064 }
2065 if (!swsusp_page_is_free(virt_to_page(lp))) {
2066
2067 lp->next = safe_pages_list;
2068 safe_pages_list = lp;
2069 }
2070
2071 swsusp_set_page_forbidden(virt_to_page(lp));
2072 swsusp_set_page_free(virt_to_page(lp));
2073 nr_pages--;
2074 }
2075
2076 while (sp_list) {
2077 lp = sp_list->next;
2078 free_image_page(sp_list, PG_UNSAFE_CLEAR);
2079 sp_list = lp;
2080 }
2081 return 0;
2082
2083 Free:
2084 swsusp_free();
2085 return error;
2086}
2087
2088
2089
2090
2091
2092
2093static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2094{
2095 struct pbe *pbe;
2096 struct page *page;
2097 unsigned long pfn = memory_bm_next_pfn(bm);
2098
2099 if (pfn == BM_END_OF_MAP)
2100 return ERR_PTR(-EFAULT);
2101
2102 page = pfn_to_page(pfn);
2103 if (PageHighMem(page))
2104 return get_highmem_page_buffer(page, ca);
2105
2106 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2107
2108
2109
2110 return page_address(page);
2111
2112
2113
2114
2115 pbe = chain_alloc(ca, sizeof(struct pbe));
2116 if (!pbe) {
2117 swsusp_free();
2118 return ERR_PTR(-ENOMEM);
2119 }
2120 pbe->orig_address = page_address(page);
2121 pbe->address = safe_pages_list;
2122 safe_pages_list = safe_pages_list->next;
2123 pbe->next = restore_pblist;
2124 restore_pblist = pbe;
2125 return pbe->address;
2126}
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150int snapshot_write_next(struct snapshot_handle *handle, size_t count)
2151{
2152 static struct chain_allocator ca;
2153 int error = 0;
2154
2155
2156 if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
2157 return 0;
2158
2159 if (handle->offset == 0) {
2160 if (!buffer)
2161
2162 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2163
2164 if (!buffer)
2165 return -ENOMEM;
2166
2167 handle->buffer = buffer;
2168 }
2169 handle->sync_read = 1;
2170 if (handle->prev < handle->cur) {
2171 if (handle->prev == 0) {
2172 error = load_header(buffer);
2173 if (error)
2174 return error;
2175
2176 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
2177 if (error)
2178 return error;
2179
2180 } else if (handle->prev <= nr_meta_pages) {
2181 error = unpack_orig_pfns(buffer, ©_bm);
2182 if (error)
2183 return error;
2184
2185 if (handle->prev == nr_meta_pages) {
2186 error = prepare_image(&orig_bm, ©_bm);
2187 if (error)
2188 return error;
2189
2190 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2191 memory_bm_position_reset(&orig_bm);
2192 restore_pblist = NULL;
2193 handle->buffer = get_buffer(&orig_bm, &ca);
2194 handle->sync_read = 0;
2195 if (IS_ERR(handle->buffer))
2196 return PTR_ERR(handle->buffer);
2197 }
2198 } else {
2199 copy_last_highmem_page();
2200 handle->buffer = get_buffer(&orig_bm, &ca);
2201 if (IS_ERR(handle->buffer))
2202 return PTR_ERR(handle->buffer);
2203 if (handle->buffer != buffer)
2204 handle->sync_read = 0;
2205 }
2206 handle->prev = handle->cur;
2207 }
2208 handle->buf_offset = handle->cur_offset;
2209 if (handle->cur_offset + count >= PAGE_SIZE) {
2210 count = PAGE_SIZE - handle->cur_offset;
2211 handle->cur_offset = 0;
2212 handle->cur++;
2213 } else {
2214 handle->cur_offset += count;
2215 }
2216 handle->offset += count;
2217 return count;
2218}
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228void snapshot_write_finalize(struct snapshot_handle *handle)
2229{
2230 copy_last_highmem_page();
2231
2232 if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) {
2233 memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
2234 free_highmem_data();
2235 }
2236}
2237
2238int snapshot_image_loaded(struct snapshot_handle *handle)
2239{
2240 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2241 handle->cur <= nr_meta_pages + nr_copy_pages);
2242}
2243
2244#ifdef CONFIG_HIGHMEM
2245
2246static inline void
2247swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2248{
2249 void *kaddr1, *kaddr2;
2250
2251 kaddr1 = kmap_atomic(p1, KM_USER0);
2252 kaddr2 = kmap_atomic(p2, KM_USER1);
2253 memcpy(buf, kaddr1, PAGE_SIZE);
2254 memcpy(kaddr1, kaddr2, PAGE_SIZE);
2255 memcpy(kaddr2, buf, PAGE_SIZE);
2256 kunmap_atomic(kaddr1, KM_USER0);
2257 kunmap_atomic(kaddr2, KM_USER1);
2258}
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270int restore_highmem(void)
2271{
2272 struct highmem_pbe *pbe = highmem_pblist;
2273 void *buf;
2274
2275 if (!pbe)
2276 return 0;
2277
2278 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2279 if (!buf)
2280 return -ENOMEM;
2281
2282 while (pbe) {
2283 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2284 pbe = pbe->next;
2285 }
2286 free_image_page(buf, PG_UNSAFE_CLEAR);
2287 return 0;
2288}
2289#endif
2290