1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/version.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/suspend.h>
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/spinlock.h>
20#include <linux/kernel.h>
21#include <linux/pm.h>
22#include <linux/device.h>
23#include <linux/init.h>
24#include <linux/bootmem.h>
25#include <linux/syscalls.h>
26#include <linux/console.h>
27#include <linux/highmem.h>
28
29#include <asm/uaccess.h>
30#include <asm/mmu_context.h>
31#include <asm/pgtable.h>
32#include <asm/tlbflush.h>
33#include <asm/io.h>
34
35#include "power.h"
36
37static int swsusp_page_is_free(struct page *);
38static void swsusp_set_page_forbidden(struct page *);
39static void swsusp_unset_page_forbidden(struct page *);
40
41
42
43
44
45
46struct pbe *restore_pblist;
47
48
49static void *buffer;
50
51
52
53
54
55
56
57
58
59
60
61#define PG_ANY 0
62#define PG_SAFE 1
63#define PG_UNSAFE_CLEAR 1
64#define PG_UNSAFE_KEEP 0
65
66static unsigned int allocated_unsafe_pages;
67
68static void *get_image_page(gfp_t gfp_mask, int safe_needed)
69{
70 void *res;
71
72 res = (void *)get_zeroed_page(gfp_mask);
73 if (safe_needed)
74 while (res && swsusp_page_is_free(virt_to_page(res))) {
75
76 swsusp_set_page_forbidden(virt_to_page(res));
77 allocated_unsafe_pages++;
78 res = (void *)get_zeroed_page(gfp_mask);
79 }
80 if (res) {
81 swsusp_set_page_forbidden(virt_to_page(res));
82 swsusp_set_page_free(virt_to_page(res));
83 }
84 return res;
85}
86
87unsigned long get_safe_page(gfp_t gfp_mask)
88{
89 return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
90}
91
92static struct page *alloc_image_page(gfp_t gfp_mask)
93{
94 struct page *page;
95
96 page = alloc_page(gfp_mask);
97 if (page) {
98 swsusp_set_page_forbidden(page);
99 swsusp_set_page_free(page);
100 }
101 return page;
102}
103
104
105
106
107
108
109static inline void free_image_page(void *addr, int clear_nosave_free)
110{
111 struct page *page;
112
113 BUG_ON(!virt_addr_valid(addr));
114
115 page = virt_to_page(addr);
116
117 swsusp_unset_page_forbidden(page);
118 if (clear_nosave_free)
119 swsusp_unset_page_free(page);
120
121 __free_page(page);
122}
123
124
125
126#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
127
128struct linked_page {
129 struct linked_page *next;
130 char data[LINKED_PAGE_DATA_SIZE];
131} __attribute__((packed));
132
133static inline void
134free_list_of_pages(struct linked_page *list, int clear_page_nosave)
135{
136 while (list) {
137 struct linked_page *lp = list->next;
138
139 free_image_page(list, clear_page_nosave);
140 list = lp;
141 }
142}
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157struct chain_allocator {
158 struct linked_page *chain;
159 unsigned int used_space;
160
161
162 gfp_t gfp_mask;
163 int safe_needed;
164};
165
166static void
167chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
168{
169 ca->chain = NULL;
170 ca->used_space = LINKED_PAGE_DATA_SIZE;
171 ca->gfp_mask = gfp_mask;
172 ca->safe_needed = safe_needed;
173}
174
175static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
176{
177 void *ret;
178
179 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
180 struct linked_page *lp;
181
182 lp = get_image_page(ca->gfp_mask, ca->safe_needed);
183 if (!lp)
184 return NULL;
185
186 lp->next = ca->chain;
187 ca->chain = lp;
188 ca->used_space = 0;
189 }
190 ret = ca->chain->data + ca->used_space;
191 ca->used_space += size;
192 return ret;
193}
194
195static void chain_free(struct chain_allocator *ca, int clear_page_nosave)
196{
197 free_list_of_pages(ca->chain, clear_page_nosave);
198 memset(ca, 0, sizeof(struct chain_allocator));
199}
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233#define BM_END_OF_MAP (~0UL)
234
235#define BM_CHUNKS_PER_BLOCK (PAGE_SIZE / sizeof(long))
236#define BM_BITS_PER_CHUNK (sizeof(long) << 3)
237#define BM_BITS_PER_BLOCK (PAGE_SIZE << 3)
238
239struct bm_block {
240 struct bm_block *next;
241 unsigned long start_pfn;
242 unsigned long end_pfn;
243 unsigned int size;
244 unsigned long *data;
245};
246
247struct zone_bitmap {
248 struct zone_bitmap *next;
249 unsigned long start_pfn;
250 unsigned long end_pfn;
251 struct bm_block *bm_blocks;
252 struct bm_block *cur_block;
253};
254
255
256
257struct bm_position {
258 struct zone_bitmap *zone_bm;
259 struct bm_block *block;
260 int chunk;
261 int bit;
262};
263
264struct memory_bitmap {
265 struct zone_bitmap *zone_bm_list;
266 struct linked_page *p_list;
267
268
269
270 struct bm_position cur;
271};
272
273
274
275static inline void memory_bm_reset_chunk(struct memory_bitmap *bm)
276{
277 bm->cur.chunk = 0;
278 bm->cur.bit = -1;
279}
280
281static void memory_bm_position_reset(struct memory_bitmap *bm)
282{
283 struct zone_bitmap *zone_bm;
284
285 zone_bm = bm->zone_bm_list;
286 bm->cur.zone_bm = zone_bm;
287 bm->cur.block = zone_bm->bm_blocks;
288 memory_bm_reset_chunk(bm);
289}
290
291static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
292
293
294
295
296
297static inline struct bm_block *
298create_bm_block_list(unsigned int nr_blocks, struct chain_allocator *ca)
299{
300 struct bm_block *bblist = NULL;
301
302 while (nr_blocks-- > 0) {
303 struct bm_block *bb;
304
305 bb = chain_alloc(ca, sizeof(struct bm_block));
306 if (!bb)
307 return NULL;
308
309 bb->next = bblist;
310 bblist = bb;
311 }
312 return bblist;
313}
314
315
316
317
318
319static inline struct zone_bitmap *
320create_zone_bm_list(unsigned int nr_zones, struct chain_allocator *ca)
321{
322 struct zone_bitmap *zbmlist = NULL;
323
324 while (nr_zones-- > 0) {
325 struct zone_bitmap *zbm;
326
327 zbm = chain_alloc(ca, sizeof(struct zone_bitmap));
328 if (!zbm)
329 return NULL;
330
331 zbm->next = zbmlist;
332 zbmlist = zbm;
333 }
334 return zbmlist;
335}
336
337
338
339
340
341static int
342memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
343{
344 struct chain_allocator ca;
345 struct zone *zone;
346 struct zone_bitmap *zone_bm;
347 struct bm_block *bb;
348 unsigned int nr;
349
350 chain_init(&ca, gfp_mask, safe_needed);
351
352
353 nr = 0;
354 for_each_zone(zone)
355 if (populated_zone(zone))
356 nr++;
357
358
359 zone_bm = create_zone_bm_list(nr, &ca);
360 bm->zone_bm_list = zone_bm;
361 if (!zone_bm) {
362 chain_free(&ca, PG_UNSAFE_CLEAR);
363 return -ENOMEM;
364 }
365
366
367 for_each_zone(zone) {
368 unsigned long pfn;
369
370 if (!populated_zone(zone))
371 continue;
372
373 zone_bm->start_pfn = zone->zone_start_pfn;
374 zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages;
375
376 nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
377 bb = create_bm_block_list(nr, &ca);
378 zone_bm->bm_blocks = bb;
379 zone_bm->cur_block = bb;
380 if (!bb)
381 goto Free;
382
383 nr = zone->spanned_pages;
384 pfn = zone->zone_start_pfn;
385
386 while (bb) {
387 unsigned long *ptr;
388
389 ptr = get_image_page(gfp_mask, safe_needed);
390 bb->data = ptr;
391 if (!ptr)
392 goto Free;
393
394 bb->start_pfn = pfn;
395 if (nr >= BM_BITS_PER_BLOCK) {
396 pfn += BM_BITS_PER_BLOCK;
397 bb->size = BM_CHUNKS_PER_BLOCK;
398 nr -= BM_BITS_PER_BLOCK;
399 } else {
400
401 pfn += nr;
402 bb->size = DIV_ROUND_UP(nr, BM_BITS_PER_CHUNK);
403 }
404 bb->end_pfn = pfn;
405 bb = bb->next;
406 }
407 zone_bm = zone_bm->next;
408 }
409 bm->p_list = ca.chain;
410 memory_bm_position_reset(bm);
411 return 0;
412
413 Free:
414 bm->p_list = ca.chain;
415 memory_bm_free(bm, PG_UNSAFE_CLEAR);
416 return -ENOMEM;
417}
418
419
420
421
422
423static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
424{
425 struct zone_bitmap *zone_bm;
426
427
428 zone_bm = bm->zone_bm_list;
429 while (zone_bm) {
430 struct bm_block *bb;
431
432 bb = zone_bm->bm_blocks;
433 while (bb) {
434 if (bb->data)
435 free_image_page(bb->data, clear_nosave_free);
436 bb = bb->next;
437 }
438 zone_bm = zone_bm->next;
439 }
440 free_list_of_pages(bm->p_list, clear_nosave_free);
441 bm->zone_bm_list = NULL;
442}
443
444
445
446
447
448
449
450static void memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
451 void **addr, unsigned int *bit_nr)
452{
453 struct zone_bitmap *zone_bm;
454 struct bm_block *bb;
455
456
457 zone_bm = bm->cur.zone_bm;
458 if (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
459 zone_bm = bm->zone_bm_list;
460
461 while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
462 zone_bm = zone_bm->next;
463
464 BUG_ON(!zone_bm);
465 }
466 bm->cur.zone_bm = zone_bm;
467 }
468
469 bb = zone_bm->cur_block;
470 if (pfn < bb->start_pfn)
471 bb = zone_bm->bm_blocks;
472
473 while (pfn >= bb->end_pfn) {
474 bb = bb->next;
475
476 BUG_ON(!bb);
477 }
478 zone_bm->cur_block = bb;
479 pfn -= bb->start_pfn;
480 *bit_nr = pfn % BM_BITS_PER_CHUNK;
481 *addr = bb->data + pfn / BM_BITS_PER_CHUNK;
482}
483
484static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
485{
486 void *addr;
487 unsigned int bit;
488
489 memory_bm_find_bit(bm, pfn, &addr, &bit);
490 set_bit(bit, addr);
491}
492
493static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
494{
495 void *addr;
496 unsigned int bit;
497
498 memory_bm_find_bit(bm, pfn, &addr, &bit);
499 clear_bit(bit, addr);
500}
501
502static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
503{
504 void *addr;
505 unsigned int bit;
506
507 memory_bm_find_bit(bm, pfn, &addr, &bit);
508 return test_bit(bit, addr);
509}
510
511
512
513
514
515static inline int next_bit_in_chunk(int bit, unsigned long *chunk_p)
516{
517 bit++;
518 while (bit < BM_BITS_PER_CHUNK) {
519 if (test_bit(bit, chunk_p))
520 return bit;
521
522 bit++;
523 }
524 return -1;
525}
526
527
528
529static inline int next_chunk_in_block(int n, struct bm_block *bb)
530{
531 n++;
532 while (n < bb->size) {
533 if (bb->data[n])
534 return n;
535
536 n++;
537 }
538 return -1;
539}
540
541
542
543
544
545
546
547
548
549
550static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
551{
552 struct zone_bitmap *zone_bm;
553 struct bm_block *bb;
554 int chunk;
555 int bit;
556
557 do {
558 bb = bm->cur.block;
559 do {
560 chunk = bm->cur.chunk;
561 bit = bm->cur.bit;
562 do {
563 bit = next_bit_in_chunk(bit, bb->data + chunk);
564 if (bit >= 0)
565 goto Return_pfn;
566
567 chunk = next_chunk_in_block(chunk, bb);
568 bit = -1;
569 } while (chunk >= 0);
570 bb = bb->next;
571 bm->cur.block = bb;
572 memory_bm_reset_chunk(bm);
573 } while (bb);
574 zone_bm = bm->cur.zone_bm->next;
575 if (zone_bm) {
576 bm->cur.zone_bm = zone_bm;
577 bm->cur.block = zone_bm->bm_blocks;
578 memory_bm_reset_chunk(bm);
579 }
580 } while (zone_bm);
581 memory_bm_position_reset(bm);
582 return BM_END_OF_MAP;
583
584 Return_pfn:
585 bm->cur.chunk = chunk;
586 bm->cur.bit = bit;
587 return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit;
588}
589
590
591
592
593
594
595struct nosave_region {
596 struct list_head list;
597 unsigned long start_pfn;
598 unsigned long end_pfn;
599};
600
601static LIST_HEAD(nosave_regions);
602
603
604
605
606
607
608
609void __init
610__register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
611 int use_kmalloc)
612{
613 struct nosave_region *region;
614
615 if (start_pfn >= end_pfn)
616 return;
617
618 if (!list_empty(&nosave_regions)) {
619
620 region = list_entry(nosave_regions.prev,
621 struct nosave_region, list);
622 if (region->end_pfn == start_pfn) {
623 region->end_pfn = end_pfn;
624 goto Report;
625 }
626 }
627 if (use_kmalloc) {
628
629 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
630 BUG_ON(!region);
631 } else
632
633 region = alloc_bootmem_low(sizeof(struct nosave_region));
634 region->start_pfn = start_pfn;
635 region->end_pfn = end_pfn;
636 list_add_tail(®ion->list, &nosave_regions);
637 Report:
638 printk("swsusp: Registered nosave memory region: %016lx - %016lx\n",
639 start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
640}
641
642
643
644
645
646static struct memory_bitmap *forbidden_pages_map;
647
648
649static struct memory_bitmap *free_pages_map;
650
651
652
653
654
655
656void swsusp_set_page_free(struct page *page)
657{
658 if (free_pages_map)
659 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
660}
661
662static int swsusp_page_is_free(struct page *page)
663{
664 return free_pages_map ?
665 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
666}
667
668void swsusp_unset_page_free(struct page *page)
669{
670 if (free_pages_map)
671 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
672}
673
674static void swsusp_set_page_forbidden(struct page *page)
675{
676 if (forbidden_pages_map)
677 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
678}
679
680int swsusp_page_is_forbidden(struct page *page)
681{
682 return forbidden_pages_map ?
683 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
684}
685
686static void swsusp_unset_page_forbidden(struct page *page)
687{
688 if (forbidden_pages_map)
689 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
690}
691
692
693
694
695
696
697static void mark_nosave_pages(struct memory_bitmap *bm)
698{
699 struct nosave_region *region;
700
701 if (list_empty(&nosave_regions))
702 return;
703
704 list_for_each_entry(region, &nosave_regions, list) {
705 unsigned long pfn;
706
707 printk("swsusp: Marking nosave pages: %016lx - %016lx\n",
708 region->start_pfn << PAGE_SHIFT,
709 region->end_pfn << PAGE_SHIFT);
710
711 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
712 if (pfn_valid(pfn))
713 memory_bm_set_bit(bm, pfn);
714 }
715}
716
717
718
719
720
721
722
723
724
725int create_basic_memory_bitmaps(void)
726{
727 struct memory_bitmap *bm1, *bm2;
728 int error = 0;
729
730 BUG_ON(forbidden_pages_map || free_pages_map);
731
732 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
733 if (!bm1)
734 return -ENOMEM;
735
736 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
737 if (error)
738 goto Free_first_object;
739
740 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
741 if (!bm2)
742 goto Free_first_bitmap;
743
744 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
745 if (error)
746 goto Free_second_object;
747
748 forbidden_pages_map = bm1;
749 free_pages_map = bm2;
750 mark_nosave_pages(forbidden_pages_map);
751
752 printk("swsusp: Basic memory bitmaps created\n");
753
754 return 0;
755
756 Free_second_object:
757 kfree(bm2);
758 Free_first_bitmap:
759 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
760 Free_first_object:
761 kfree(bm1);
762 return -ENOMEM;
763}
764
765
766
767
768
769
770
771
772void free_basic_memory_bitmaps(void)
773{
774 struct memory_bitmap *bm1, *bm2;
775
776 BUG_ON(!(forbidden_pages_map && free_pages_map));
777
778 bm1 = forbidden_pages_map;
779 bm2 = free_pages_map;
780 forbidden_pages_map = NULL;
781 free_pages_map = NULL;
782 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
783 kfree(bm1);
784 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
785 kfree(bm2);
786
787 printk("swsusp: Basic memory bitmaps freed\n");
788}
789
790
791
792
793
794
795
796unsigned int snapshot_additional_pages(struct zone *zone)
797{
798 unsigned int res;
799
800 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
801 res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
802 return 2 * res;
803}
804
805#ifdef CONFIG_HIGHMEM
806
807
808
809
810
811static unsigned int count_free_highmem_pages(void)
812{
813 struct zone *zone;
814 unsigned int cnt = 0;
815
816 for_each_zone(zone)
817 if (populated_zone(zone) && is_highmem(zone))
818 cnt += zone_page_state(zone, NR_FREE_PAGES);
819
820 return cnt;
821}
822
823
824
825
826
827
828
829
830
831static struct page *saveable_highmem_page(unsigned long pfn)
832{
833 struct page *page;
834
835 if (!pfn_valid(pfn))
836 return NULL;
837
838 page = pfn_to_page(pfn);
839
840 BUG_ON(!PageHighMem(page));
841
842 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
843 PageReserved(page))
844 return NULL;
845
846 return page;
847}
848
849
850
851
852
853
854unsigned int count_highmem_pages(void)
855{
856 struct zone *zone;
857 unsigned int n = 0;
858
859 for_each_zone(zone) {
860 unsigned long pfn, max_zone_pfn;
861
862 if (!is_highmem(zone))
863 continue;
864
865 mark_free_pages(zone);
866 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
867 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
868 if (saveable_highmem_page(pfn))
869 n++;
870 }
871 return n;
872}
873#else
874static inline void *saveable_highmem_page(unsigned long pfn) { return NULL; }
875static inline unsigned int count_highmem_pages(void) { return 0; }
876#endif
877
878
879
880
881
882
883
884
885
886
887static struct page *saveable_page(unsigned long pfn)
888{
889 struct page *page;
890
891 if (!pfn_valid(pfn))
892 return NULL;
893
894 page = pfn_to_page(pfn);
895
896 BUG_ON(PageHighMem(page));
897
898 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
899 return NULL;
900
901 if (PageReserved(page) && pfn_is_nosave(pfn))
902 return NULL;
903
904 return page;
905}
906
907
908
909
910
911
912unsigned int count_data_pages(void)
913{
914 struct zone *zone;
915 unsigned long pfn, max_zone_pfn;
916 unsigned int n = 0;
917
918 for_each_zone(zone) {
919 if (is_highmem(zone))
920 continue;
921
922 mark_free_pages(zone);
923 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
924 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
925 if(saveable_page(pfn))
926 n++;
927 }
928 return n;
929}
930
931
932
933
934static inline void do_copy_page(long *dst, long *src)
935{
936 int n;
937
938 for (n = PAGE_SIZE / sizeof(long); n; n--)
939 *dst++ = *src++;
940}
941
942#ifdef CONFIG_HIGHMEM
943static inline struct page *
944page_is_saveable(struct zone *zone, unsigned long pfn)
945{
946 return is_highmem(zone) ?
947 saveable_highmem_page(pfn) : saveable_page(pfn);
948}
949
950static inline void
951copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
952{
953 struct page *s_page, *d_page;
954 void *src, *dst;
955
956 s_page = pfn_to_page(src_pfn);
957 d_page = pfn_to_page(dst_pfn);
958 if (PageHighMem(s_page)) {
959 src = kmap_atomic(s_page, KM_USER0);
960 dst = kmap_atomic(d_page, KM_USER1);
961 do_copy_page(dst, src);
962 kunmap_atomic(src, KM_USER0);
963 kunmap_atomic(dst, KM_USER1);
964 } else {
965 src = page_address(s_page);
966 if (PageHighMem(d_page)) {
967
968
969
970 do_copy_page(buffer, src);
971 dst = kmap_atomic(pfn_to_page(dst_pfn), KM_USER0);
972 memcpy(dst, buffer, PAGE_SIZE);
973 kunmap_atomic(dst, KM_USER0);
974 } else {
975 dst = page_address(d_page);
976 do_copy_page(dst, src);
977 }
978 }
979}
980#else
981#define page_is_saveable(zone, pfn) saveable_page(pfn)
982
983static inline void
984copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
985{
986 do_copy_page(page_address(pfn_to_page(dst_pfn)),
987 page_address(pfn_to_page(src_pfn)));
988}
989#endif
990
991static void
992copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
993{
994 struct zone *zone;
995 unsigned long pfn;
996
997 for_each_zone(zone) {
998 unsigned long max_zone_pfn;
999
1000 mark_free_pages(zone);
1001 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1002 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1003 if (page_is_saveable(zone, pfn))
1004 memory_bm_set_bit(orig_bm, pfn);
1005 }
1006 memory_bm_position_reset(orig_bm);
1007 memory_bm_position_reset(copy_bm);
1008 for(;;) {
1009 pfn = memory_bm_next_pfn(orig_bm);
1010 if (unlikely(pfn == BM_END_OF_MAP))
1011 break;
1012 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1013 }
1014}
1015
1016
1017static unsigned int nr_copy_pages;
1018
1019static unsigned int nr_meta_pages;
1020
1021
1022
1023
1024
1025
1026
1027
1028void swsusp_free(void)
1029{
1030 struct zone *zone;
1031 unsigned long pfn, max_zone_pfn;
1032
1033 for_each_zone(zone) {
1034 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1035 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1036 if (pfn_valid(pfn)) {
1037 struct page *page = pfn_to_page(pfn);
1038
1039 if (swsusp_page_is_forbidden(page) &&
1040 swsusp_page_is_free(page)) {
1041 swsusp_unset_page_forbidden(page);
1042 swsusp_unset_page_free(page);
1043 __free_page(page);
1044 }
1045 }
1046 }
1047 nr_copy_pages = 0;
1048 nr_meta_pages = 0;
1049 restore_pblist = NULL;
1050 buffer = NULL;
1051}
1052
1053#ifdef CONFIG_HIGHMEM
1054
1055
1056
1057
1058
1059static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1060{
1061 unsigned int free_highmem = count_free_highmem_pages();
1062
1063 if (free_highmem >= nr_highmem)
1064 nr_highmem = 0;
1065 else
1066 nr_highmem -= free_highmem;
1067
1068 return nr_highmem;
1069}
1070#else
1071static unsigned int
1072count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1073#endif
1074
1075
1076
1077
1078
1079
1080static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1081{
1082 struct zone *zone;
1083 unsigned int free = 0, meta = 0;
1084
1085 for_each_zone(zone) {
1086 meta += snapshot_additional_pages(zone);
1087 if (!is_highmem(zone))
1088 free += zone_page_state(zone, NR_FREE_PAGES);
1089 }
1090
1091 nr_pages += count_pages_for_highmem(nr_highmem);
1092 pr_debug("swsusp: Normal pages needed: %u + %u + %u, available pages: %u\n",
1093 nr_pages, PAGES_FOR_IO, meta, free);
1094
1095 return free > nr_pages + PAGES_FOR_IO + meta;
1096}
1097
1098#ifdef CONFIG_HIGHMEM
1099
1100
1101
1102
1103
1104static inline int get_highmem_buffer(int safe_needed)
1105{
1106 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1107 return buffer ? 0 : -ENOMEM;
1108}
1109
1110
1111
1112
1113
1114
1115
1116static inline unsigned int
1117alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
1118{
1119 unsigned int to_alloc = count_free_highmem_pages();
1120
1121 if (to_alloc > nr_highmem)
1122 to_alloc = nr_highmem;
1123
1124 nr_highmem -= to_alloc;
1125 while (to_alloc-- > 0) {
1126 struct page *page;
1127
1128 page = alloc_image_page(__GFP_HIGHMEM);
1129 memory_bm_set_bit(bm, page_to_pfn(page));
1130 }
1131 return nr_highmem;
1132}
1133#else
1134static inline int get_highmem_buffer(int safe_needed) { return 0; }
1135
1136static inline unsigned int
1137alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
1138#endif
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152static int
1153swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1154 unsigned int nr_pages, unsigned int nr_highmem)
1155{
1156 int error;
1157
1158 error = memory_bm_create(orig_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
1159 if (error)
1160 goto Free;
1161
1162 error = memory_bm_create(copy_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
1163 if (error)
1164 goto Free;
1165
1166 if (nr_highmem > 0) {
1167 error = get_highmem_buffer(PG_ANY);
1168 if (error)
1169 goto Free;
1170
1171 nr_pages += alloc_highmem_image_pages(copy_bm, nr_highmem);
1172 }
1173 while (nr_pages-- > 0) {
1174 struct page *page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1175
1176 if (!page)
1177 goto Free;
1178
1179 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1180 }
1181 return 0;
1182
1183 Free:
1184 swsusp_free();
1185 return -ENOMEM;
1186}
1187
1188
1189
1190
1191static struct memory_bitmap orig_bm;
1192
1193
1194
1195
1196
1197
1198
1199static struct memory_bitmap copy_bm;
1200
1201asmlinkage int swsusp_save(void)
1202{
1203 unsigned int nr_pages, nr_highmem;
1204
1205 printk("swsusp: critical section: \n");
1206
1207 drain_local_pages();
1208 nr_pages = count_data_pages();
1209 nr_highmem = count_highmem_pages();
1210 printk("swsusp: Need to copy %u pages\n", nr_pages + nr_highmem);
1211
1212 if (!enough_free_mem(nr_pages, nr_highmem)) {
1213 printk(KERN_ERR "swsusp: Not enough free memory\n");
1214 return -ENOMEM;
1215 }
1216
1217 if (swsusp_alloc(&orig_bm, ©_bm, nr_pages, nr_highmem)) {
1218 printk(KERN_ERR "swsusp: Memory allocation failed\n");
1219 return -ENOMEM;
1220 }
1221
1222
1223
1224
1225 drain_local_pages();
1226 copy_data_pages(©_bm, &orig_bm);
1227
1228
1229
1230
1231
1232
1233
1234 nr_pages += nr_highmem;
1235 nr_copy_pages = nr_pages;
1236 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1237
1238 printk("swsusp: critical section: done (%d pages copied)\n", nr_pages);
1239
1240 return 0;
1241}
1242
1243#ifndef CONFIG_ARCH_HIBERNATION_HEADER
1244static int init_header_complete(struct swsusp_info *info)
1245{
1246 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1247 info->version_code = LINUX_VERSION_CODE;
1248 return 0;
1249}
1250
1251static char *check_image_kernel(struct swsusp_info *info)
1252{
1253 if (info->version_code != LINUX_VERSION_CODE)
1254 return "kernel version";
1255 if (strcmp(info->uts.sysname,init_utsname()->sysname))
1256 return "system type";
1257 if (strcmp(info->uts.release,init_utsname()->release))
1258 return "kernel release";
1259 if (strcmp(info->uts.version,init_utsname()->version))
1260 return "version";
1261 if (strcmp(info->uts.machine,init_utsname()->machine))
1262 return "machine";
1263 return NULL;
1264}
1265#endif
1266
1267static int init_header(struct swsusp_info *info)
1268{
1269 memset(info, 0, sizeof(struct swsusp_info));
1270 info->num_physpages = num_physpages;
1271 info->image_pages = nr_copy_pages;
1272 info->pages = nr_copy_pages + nr_meta_pages + 1;
1273 info->size = info->pages;
1274 info->size <<= PAGE_SHIFT;
1275 return init_header_complete(info);
1276}
1277
1278
1279
1280
1281
1282
1283static inline void
1284pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1285{
1286 int j;
1287
1288 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1289 buf[j] = memory_bm_next_pfn(bm);
1290 if (unlikely(buf[j] == BM_END_OF_MAP))
1291 break;
1292 }
1293}
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317int snapshot_read_next(struct snapshot_handle *handle, size_t count)
1318{
1319 if (handle->cur > nr_meta_pages + nr_copy_pages)
1320 return 0;
1321
1322 if (!buffer) {
1323
1324 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1325 if (!buffer)
1326 return -ENOMEM;
1327 }
1328 if (!handle->offset) {
1329 int error;
1330
1331 error = init_header((struct swsusp_info *)buffer);
1332 if (error)
1333 return error;
1334 handle->buffer = buffer;
1335 memory_bm_position_reset(&orig_bm);
1336 memory_bm_position_reset(©_bm);
1337 }
1338 if (handle->prev < handle->cur) {
1339 if (handle->cur <= nr_meta_pages) {
1340 memset(buffer, 0, PAGE_SIZE);
1341 pack_pfns(buffer, &orig_bm);
1342 } else {
1343 struct page *page;
1344
1345 page = pfn_to_page(memory_bm_next_pfn(©_bm));
1346 if (PageHighMem(page)) {
1347
1348
1349
1350
1351 void *kaddr;
1352
1353 kaddr = kmap_atomic(page, KM_USER0);
1354 memcpy(buffer, kaddr, PAGE_SIZE);
1355 kunmap_atomic(kaddr, KM_USER0);
1356 handle->buffer = buffer;
1357 } else {
1358 handle->buffer = page_address(page);
1359 }
1360 }
1361 handle->prev = handle->cur;
1362 }
1363 handle->buf_offset = handle->cur_offset;
1364 if (handle->cur_offset + count >= PAGE_SIZE) {
1365 count = PAGE_SIZE - handle->cur_offset;
1366 handle->cur_offset = 0;
1367 handle->cur++;
1368 } else {
1369 handle->cur_offset += count;
1370 }
1371 handle->offset += count;
1372 return count;
1373}
1374
1375
1376
1377
1378
1379
1380
1381static int mark_unsafe_pages(struct memory_bitmap *bm)
1382{
1383 struct zone *zone;
1384 unsigned long pfn, max_zone_pfn;
1385
1386
1387 for_each_zone(zone) {
1388 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1389 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1390 if (pfn_valid(pfn))
1391 swsusp_unset_page_free(pfn_to_page(pfn));
1392 }
1393
1394
1395 memory_bm_position_reset(bm);
1396 do {
1397 pfn = memory_bm_next_pfn(bm);
1398 if (likely(pfn != BM_END_OF_MAP)) {
1399 if (likely(pfn_valid(pfn)))
1400 swsusp_set_page_free(pfn_to_page(pfn));
1401 else
1402 return -EFAULT;
1403 }
1404 } while (pfn != BM_END_OF_MAP);
1405
1406 allocated_unsafe_pages = 0;
1407
1408 return 0;
1409}
1410
1411static void
1412duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
1413{
1414 unsigned long pfn;
1415
1416 memory_bm_position_reset(src);
1417 pfn = memory_bm_next_pfn(src);
1418 while (pfn != BM_END_OF_MAP) {
1419 memory_bm_set_bit(dst, pfn);
1420 pfn = memory_bm_next_pfn(src);
1421 }
1422}
1423
1424static int check_header(struct swsusp_info *info)
1425{
1426 char *reason;
1427
1428 reason = check_image_kernel(info);
1429 if (!reason && info->num_physpages != num_physpages)
1430 reason = "memory size";
1431 if (reason) {
1432 printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
1433 return -EPERM;
1434 }
1435 return 0;
1436}
1437
1438
1439
1440
1441
1442static int
1443load_header(struct swsusp_info *info)
1444{
1445 int error;
1446
1447 restore_pblist = NULL;
1448 error = check_header(info);
1449 if (!error) {
1450 nr_copy_pages = info->image_pages;
1451 nr_meta_pages = info->pages - info->image_pages - 1;
1452 }
1453 return error;
1454}
1455
1456
1457
1458
1459
1460
1461static inline void
1462unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1463{
1464 int j;
1465
1466 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1467 if (unlikely(buf[j] == BM_END_OF_MAP))
1468 break;
1469
1470 memory_bm_set_bit(bm, buf[j]);
1471 }
1472}
1473
1474
1475
1476
1477static struct linked_page *safe_pages_list;
1478
1479#ifdef CONFIG_HIGHMEM
1480
1481
1482
1483
1484struct highmem_pbe {
1485 struct page *copy_page;
1486 struct page *orig_page;
1487 struct highmem_pbe *next;
1488};
1489
1490
1491
1492
1493
1494
1495static struct highmem_pbe *highmem_pblist;
1496
1497
1498
1499
1500
1501
1502
1503static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
1504{
1505 unsigned long pfn;
1506 unsigned int cnt = 0;
1507
1508 memory_bm_position_reset(bm);
1509 pfn = memory_bm_next_pfn(bm);
1510 while (pfn != BM_END_OF_MAP) {
1511 if (PageHighMem(pfn_to_page(pfn)))
1512 cnt++;
1513
1514 pfn = memory_bm_next_pfn(bm);
1515 }
1516 return cnt;
1517}
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531static unsigned int safe_highmem_pages;
1532
1533static struct memory_bitmap *safe_highmem_bm;
1534
1535static int
1536prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1537{
1538 unsigned int to_alloc;
1539
1540 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
1541 return -ENOMEM;
1542
1543 if (get_highmem_buffer(PG_SAFE))
1544 return -ENOMEM;
1545
1546 to_alloc = count_free_highmem_pages();
1547 if (to_alloc > *nr_highmem_p)
1548 to_alloc = *nr_highmem_p;
1549 else
1550 *nr_highmem_p = to_alloc;
1551
1552 safe_highmem_pages = 0;
1553 while (to_alloc-- > 0) {
1554 struct page *page;
1555
1556 page = alloc_page(__GFP_HIGHMEM);
1557 if (!swsusp_page_is_free(page)) {
1558
1559 memory_bm_set_bit(bm, page_to_pfn(page));
1560 safe_highmem_pages++;
1561 }
1562
1563 swsusp_set_page_forbidden(page);
1564 swsusp_set_page_free(page);
1565 }
1566 memory_bm_position_reset(bm);
1567 safe_highmem_bm = bm;
1568 return 0;
1569}
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588static struct page *last_highmem_page;
1589
1590static void *
1591get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1592{
1593 struct highmem_pbe *pbe;
1594 void *kaddr;
1595
1596 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
1597
1598
1599
1600 last_highmem_page = page;
1601 return buffer;
1602 }
1603
1604
1605
1606 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
1607 if (!pbe) {
1608 swsusp_free();
1609 return NULL;
1610 }
1611 pbe->orig_page = page;
1612 if (safe_highmem_pages > 0) {
1613 struct page *tmp;
1614
1615
1616 kaddr = buffer;
1617 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
1618 safe_highmem_pages--;
1619 last_highmem_page = tmp;
1620 pbe->copy_page = tmp;
1621 } else {
1622
1623 kaddr = safe_pages_list;
1624 safe_pages_list = safe_pages_list->next;
1625 pbe->copy_page = virt_to_page(kaddr);
1626 }
1627 pbe->next = highmem_pblist;
1628 highmem_pblist = pbe;
1629 return kaddr;
1630}
1631
1632
1633
1634
1635
1636
1637
1638static void copy_last_highmem_page(void)
1639{
1640 if (last_highmem_page) {
1641 void *dst;
1642
1643 dst = kmap_atomic(last_highmem_page, KM_USER0);
1644 memcpy(dst, buffer, PAGE_SIZE);
1645 kunmap_atomic(dst, KM_USER0);
1646 last_highmem_page = NULL;
1647 }
1648}
1649
1650static inline int last_highmem_page_copied(void)
1651{
1652 return !last_highmem_page;
1653}
1654
1655static inline void free_highmem_data(void)
1656{
1657 if (safe_highmem_bm)
1658 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
1659
1660 if (buffer)
1661 free_image_page(buffer, PG_UNSAFE_CLEAR);
1662}
1663#else
1664static inline int get_safe_write_buffer(void) { return 0; }
1665
1666static unsigned int
1667count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
1668
1669static inline int
1670prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
1671{
1672 return 0;
1673}
1674
1675static inline void *
1676get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1677{
1678 return NULL;
1679}
1680
1681static inline void copy_last_highmem_page(void) {}
1682static inline int last_highmem_page_copied(void) { return 1; }
1683static inline void free_highmem_data(void) {}
1684#endif
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
1701
1702static int
1703prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
1704{
1705 unsigned int nr_pages, nr_highmem;
1706 struct linked_page *sp_list, *lp;
1707 int error;
1708
1709
1710 free_image_page(buffer, PG_UNSAFE_CLEAR);
1711 buffer = NULL;
1712
1713 nr_highmem = count_highmem_image_pages(bm);
1714 error = mark_unsafe_pages(bm);
1715 if (error)
1716 goto Free;
1717
1718 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
1719 if (error)
1720 goto Free;
1721
1722 duplicate_memory_bitmap(new_bm, bm);
1723 memory_bm_free(bm, PG_UNSAFE_KEEP);
1724 if (nr_highmem > 0) {
1725 error = prepare_highmem_image(bm, &nr_highmem);
1726 if (error)
1727 goto Free;
1728 }
1729
1730
1731
1732
1733
1734
1735 sp_list = NULL;
1736
1737 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
1738 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
1739 while (nr_pages > 0) {
1740 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
1741 if (!lp) {
1742 error = -ENOMEM;
1743 goto Free;
1744 }
1745 lp->next = sp_list;
1746 sp_list = lp;
1747 nr_pages--;
1748 }
1749
1750 safe_pages_list = NULL;
1751 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
1752 while (nr_pages > 0) {
1753 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
1754 if (!lp) {
1755 error = -ENOMEM;
1756 goto Free;
1757 }
1758 if (!swsusp_page_is_free(virt_to_page(lp))) {
1759
1760 lp->next = safe_pages_list;
1761 safe_pages_list = lp;
1762 }
1763
1764 swsusp_set_page_forbidden(virt_to_page(lp));
1765 swsusp_set_page_free(virt_to_page(lp));
1766 nr_pages--;
1767 }
1768
1769 while (sp_list) {
1770 lp = sp_list->next;
1771 free_image_page(sp_list, PG_UNSAFE_CLEAR);
1772 sp_list = lp;
1773 }
1774 return 0;
1775
1776 Free:
1777 swsusp_free();
1778 return error;
1779}
1780
1781
1782
1783
1784
1785
1786static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
1787{
1788 struct pbe *pbe;
1789 struct page *page = pfn_to_page(memory_bm_next_pfn(bm));
1790
1791 if (PageHighMem(page))
1792 return get_highmem_page_buffer(page, ca);
1793
1794 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
1795
1796
1797
1798 return page_address(page);
1799
1800
1801
1802
1803 pbe = chain_alloc(ca, sizeof(struct pbe));
1804 if (!pbe) {
1805 swsusp_free();
1806 return NULL;
1807 }
1808 pbe->orig_address = page_address(page);
1809 pbe->address = safe_pages_list;
1810 safe_pages_list = safe_pages_list->next;
1811 pbe->next = restore_pblist;
1812 restore_pblist = pbe;
1813 return pbe->address;
1814}
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838int snapshot_write_next(struct snapshot_handle *handle, size_t count)
1839{
1840 static struct chain_allocator ca;
1841 int error = 0;
1842
1843
1844 if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages)
1845 return 0;
1846
1847 if (handle->offset == 0) {
1848 if (!buffer)
1849
1850 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1851
1852 if (!buffer)
1853 return -ENOMEM;
1854
1855 handle->buffer = buffer;
1856 }
1857 handle->sync_read = 1;
1858 if (handle->prev < handle->cur) {
1859 if (handle->prev == 0) {
1860 error = load_header(buffer);
1861 if (error)
1862 return error;
1863
1864 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
1865 if (error)
1866 return error;
1867
1868 } else if (handle->prev <= nr_meta_pages) {
1869 unpack_orig_pfns(buffer, ©_bm);
1870 if (handle->prev == nr_meta_pages) {
1871 error = prepare_image(&orig_bm, ©_bm);
1872 if (error)
1873 return error;
1874
1875 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
1876 memory_bm_position_reset(&orig_bm);
1877 restore_pblist = NULL;
1878 handle->buffer = get_buffer(&orig_bm, &ca);
1879 handle->sync_read = 0;
1880 if (!handle->buffer)
1881 return -ENOMEM;
1882 }
1883 } else {
1884 copy_last_highmem_page();
1885 handle->buffer = get_buffer(&orig_bm, &ca);
1886 if (handle->buffer != buffer)
1887 handle->sync_read = 0;
1888 }
1889 handle->prev = handle->cur;
1890 }
1891 handle->buf_offset = handle->cur_offset;
1892 if (handle->cur_offset + count >= PAGE_SIZE) {
1893 count = PAGE_SIZE - handle->cur_offset;
1894 handle->cur_offset = 0;
1895 handle->cur++;
1896 } else {
1897 handle->cur_offset += count;
1898 }
1899 handle->offset += count;
1900 return count;
1901}
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911void snapshot_write_finalize(struct snapshot_handle *handle)
1912{
1913 copy_last_highmem_page();
1914
1915 if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) {
1916 memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
1917 free_highmem_data();
1918 }
1919}
1920
1921int snapshot_image_loaded(struct snapshot_handle *handle)
1922{
1923 return !(!nr_copy_pages || !last_highmem_page_copied() ||
1924 handle->cur <= nr_meta_pages + nr_copy_pages);
1925}
1926
1927#ifdef CONFIG_HIGHMEM
1928
1929static inline void
1930swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
1931{
1932 void *kaddr1, *kaddr2;
1933
1934 kaddr1 = kmap_atomic(p1, KM_USER0);
1935 kaddr2 = kmap_atomic(p2, KM_USER1);
1936 memcpy(buf, kaddr1, PAGE_SIZE);
1937 memcpy(kaddr1, kaddr2, PAGE_SIZE);
1938 memcpy(kaddr2, buf, PAGE_SIZE);
1939 kunmap_atomic(kaddr1, KM_USER0);
1940 kunmap_atomic(kaddr2, KM_USER1);
1941}
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953int restore_highmem(void)
1954{
1955 struct highmem_pbe *pbe = highmem_pblist;
1956 void *buf;
1957
1958 if (!pbe)
1959 return 0;
1960
1961 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
1962 if (!buf)
1963 return -ENOMEM;
1964
1965 while (pbe) {
1966 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
1967 pbe = pbe->next;
1968 }
1969 free_image_page(buf, PG_UNSAFE_CLEAR);
1970 return 0;
1971}
1972#endif
1973