1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/errno.h>
18#include <linux/mm.h>
19#include <linux/fs.h>
20#include <linux/mman.h>
21#include <linux/sched.h>
22#include <linux/rwsem.h>
23#include <linux/pagemap.h>
24#include <linux/rmap.h>
25#include <linux/spinlock.h>
26#include <linux/jhash.h>
27#include <linux/delay.h>
28#include <linux/kthread.h>
29#include <linux/wait.h>
30#include <linux/slab.h>
31#include <linux/rbtree.h>
32#include <linux/memory.h>
33#include <linux/mmu_notifier.h>
34#include <linux/swap.h>
35#include <linux/ksm.h>
36#include <linux/hash.h>
37#include <linux/freezer.h>
38#include <linux/oom.h>
39
40#include <asm/tlbflush.h>
41#include "internal.h"
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90struct mm_slot {
91 struct hlist_node link;
92 struct list_head mm_list;
93 struct rmap_item *rmap_list;
94 struct mm_struct *mm;
95};
96
97
98
99
100
101
102
103
104
105
106struct ksm_scan {
107 struct mm_slot *mm_slot;
108 unsigned long address;
109 struct rmap_item **rmap_list;
110 unsigned long seqnr;
111};
112
113
114
115
116
117
118
119struct stable_node {
120 struct rb_node node;
121 struct hlist_head hlist;
122 unsigned long kpfn;
123};
124
125
126
127
128
129
130
131
132
133
134
135
136struct rmap_item {
137 struct rmap_item *rmap_list;
138 struct anon_vma *anon_vma;
139 struct mm_struct *mm;
140 unsigned long address;
141 unsigned int oldchecksum;
142 union {
143 struct rb_node node;
144 struct {
145 struct stable_node *head;
146 struct hlist_node hlist;
147 };
148 };
149};
150
151#define SEQNR_MASK 0x0ff
152#define UNSTABLE_FLAG 0x100
153#define STABLE_FLAG 0x200
154
155
156static struct rb_root root_stable_tree = RB_ROOT;
157static struct rb_root root_unstable_tree = RB_ROOT;
158
159#define MM_SLOTS_HASH_SHIFT 10
160#define MM_SLOTS_HASH_HEADS (1 << MM_SLOTS_HASH_SHIFT)
161static struct hlist_head mm_slots_hash[MM_SLOTS_HASH_HEADS];
162
163static struct mm_slot ksm_mm_head = {
164 .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
165};
166static struct ksm_scan ksm_scan = {
167 .mm_slot = &ksm_mm_head,
168};
169
170static struct kmem_cache *rmap_item_cache;
171static struct kmem_cache *stable_node_cache;
172static struct kmem_cache *mm_slot_cache;
173
174
175static unsigned long ksm_pages_shared;
176
177
178static unsigned long ksm_pages_sharing;
179
180
181static unsigned long ksm_pages_unshared;
182
183
184static unsigned long ksm_rmap_items;
185
186
187static unsigned int ksm_thread_pages_to_scan = 100;
188
189
190static unsigned int ksm_thread_sleep_millisecs = 20;
191
192#define KSM_RUN_STOP 0
193#define KSM_RUN_MERGE 1
194#define KSM_RUN_UNMERGE 2
195static unsigned int ksm_run = KSM_RUN_STOP;
196
197static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
198static DEFINE_MUTEX(ksm_thread_mutex);
199static DEFINE_SPINLOCK(ksm_mmlist_lock);
200
201#define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
202 sizeof(struct __struct), __alignof__(struct __struct),\
203 (__flags), NULL)
204
205static int __init ksm_slab_init(void)
206{
207 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
208 if (!rmap_item_cache)
209 goto out;
210
211 stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
212 if (!stable_node_cache)
213 goto out_free1;
214
215 mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
216 if (!mm_slot_cache)
217 goto out_free2;
218
219 return 0;
220
221out_free2:
222 kmem_cache_destroy(stable_node_cache);
223out_free1:
224 kmem_cache_destroy(rmap_item_cache);
225out:
226 return -ENOMEM;
227}
228
229static void __init ksm_slab_free(void)
230{
231 kmem_cache_destroy(mm_slot_cache);
232 kmem_cache_destroy(stable_node_cache);
233 kmem_cache_destroy(rmap_item_cache);
234 mm_slot_cache = NULL;
235}
236
237static inline struct rmap_item *alloc_rmap_item(void)
238{
239 struct rmap_item *rmap_item;
240
241 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
242 if (rmap_item)
243 ksm_rmap_items++;
244 return rmap_item;
245}
246
247static inline void free_rmap_item(struct rmap_item *rmap_item)
248{
249 ksm_rmap_items--;
250 rmap_item->mm = NULL;
251 kmem_cache_free(rmap_item_cache, rmap_item);
252}
253
254static inline struct stable_node *alloc_stable_node(void)
255{
256 return kmem_cache_alloc(stable_node_cache, GFP_KERNEL);
257}
258
259static inline void free_stable_node(struct stable_node *stable_node)
260{
261 kmem_cache_free(stable_node_cache, stable_node);
262}
263
264static inline struct mm_slot *alloc_mm_slot(void)
265{
266 if (!mm_slot_cache)
267 return NULL;
268 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
269}
270
271static inline void free_mm_slot(struct mm_slot *mm_slot)
272{
273 kmem_cache_free(mm_slot_cache, mm_slot);
274}
275
276static struct mm_slot *get_mm_slot(struct mm_struct *mm)
277{
278 struct mm_slot *mm_slot;
279 struct hlist_head *bucket;
280 struct hlist_node *node;
281
282 bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)];
283 hlist_for_each_entry(mm_slot, node, bucket, link) {
284 if (mm == mm_slot->mm)
285 return mm_slot;
286 }
287 return NULL;
288}
289
290static void insert_to_mm_slots_hash(struct mm_struct *mm,
291 struct mm_slot *mm_slot)
292{
293 struct hlist_head *bucket;
294
295 bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)];
296 mm_slot->mm = mm;
297 hlist_add_head(&mm_slot->link, bucket);
298}
299
300static inline int in_stable_tree(struct rmap_item *rmap_item)
301{
302 return rmap_item->address & STABLE_FLAG;
303}
304
305
306
307
308
309
310
311
312
313static inline bool ksm_test_exit(struct mm_struct *mm)
314{
315 return atomic_read(&mm->mm_users) == 0;
316}
317
318
319
320
321
322
323
324
325
326
327
328
329static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
330{
331 struct page *page;
332 int ret = 0;
333
334 do {
335 cond_resched();
336 page = follow_page(vma, addr, FOLL_GET);
337 if (IS_ERR_OR_NULL(page))
338 break;
339 if (PageKsm(page))
340 ret = handle_mm_fault(vma->vm_mm, vma, addr,
341 FAULT_FLAG_WRITE);
342 else
343 ret = VM_FAULT_WRITE;
344 put_page(page);
345 } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
375}
376
377static void break_cow(struct rmap_item *rmap_item)
378{
379 struct mm_struct *mm = rmap_item->mm;
380 unsigned long addr = rmap_item->address;
381 struct vm_area_struct *vma;
382
383
384
385
386
387 put_anon_vma(rmap_item->anon_vma);
388
389 down_read(&mm->mmap_sem);
390 if (ksm_test_exit(mm))
391 goto out;
392 vma = find_vma(mm, addr);
393 if (!vma || vma->vm_start > addr)
394 goto out;
395 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
396 goto out;
397 break_ksm(vma, addr);
398out:
399 up_read(&mm->mmap_sem);
400}
401
402static struct page *page_trans_compound_anon(struct page *page)
403{
404 if (PageTransCompound(page)) {
405 struct page *head = compound_trans_head(page);
406
407
408
409
410 if (PageAnon(head))
411 return head;
412 }
413 return NULL;
414}
415
416static struct page *get_mergeable_page(struct rmap_item *rmap_item)
417{
418 struct mm_struct *mm = rmap_item->mm;
419 unsigned long addr = rmap_item->address;
420 struct vm_area_struct *vma;
421 struct page *page;
422
423 down_read(&mm->mmap_sem);
424 if (ksm_test_exit(mm))
425 goto out;
426 vma = find_vma(mm, addr);
427 if (!vma || vma->vm_start > addr)
428 goto out;
429 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
430 goto out;
431
432 page = follow_page(vma, addr, FOLL_GET);
433 if (IS_ERR_OR_NULL(page))
434 goto out;
435 if (PageAnon(page) || page_trans_compound_anon(page)) {
436 flush_anon_page(vma, page, addr);
437 flush_dcache_page(page);
438 } else {
439 put_page(page);
440out: page = NULL;
441 }
442 up_read(&mm->mmap_sem);
443 return page;
444}
445
446static void remove_node_from_stable_tree(struct stable_node *stable_node)
447{
448 struct rmap_item *rmap_item;
449 struct hlist_node *hlist;
450
451 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
452 if (rmap_item->hlist.next)
453 ksm_pages_sharing--;
454 else
455 ksm_pages_shared--;
456 put_anon_vma(rmap_item->anon_vma);
457 rmap_item->address &= PAGE_MASK;
458 cond_resched();
459 }
460
461 rb_erase(&stable_node->node, &root_stable_tree);
462 free_stable_node(stable_node);
463}
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494static struct page *get_ksm_page(struct stable_node *stable_node)
495{
496 struct page *page;
497 void *expected_mapping;
498
499 page = pfn_to_page(stable_node->kpfn);
500 expected_mapping = (void *)stable_node +
501 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
502 rcu_read_lock();
503 if (page->mapping != expected_mapping)
504 goto stale;
505 if (!get_page_unless_zero(page))
506 goto stale;
507 if (page->mapping != expected_mapping) {
508 put_page(page);
509 goto stale;
510 }
511 rcu_read_unlock();
512 return page;
513stale:
514 rcu_read_unlock();
515 remove_node_from_stable_tree(stable_node);
516 return NULL;
517}
518
519
520
521
522
523static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
524{
525 if (rmap_item->address & STABLE_FLAG) {
526 struct stable_node *stable_node;
527 struct page *page;
528
529 stable_node = rmap_item->head;
530 page = get_ksm_page(stable_node);
531 if (!page)
532 goto out;
533
534 lock_page(page);
535 hlist_del(&rmap_item->hlist);
536 unlock_page(page);
537 put_page(page);
538
539 if (stable_node->hlist.first)
540 ksm_pages_sharing--;
541 else
542 ksm_pages_shared--;
543
544 put_anon_vma(rmap_item->anon_vma);
545 rmap_item->address &= PAGE_MASK;
546
547 } else if (rmap_item->address & UNSTABLE_FLAG) {
548 unsigned char age;
549
550
551
552
553
554
555
556 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
557 BUG_ON(age > 1);
558 if (!age)
559 rb_erase(&rmap_item->node, &root_unstable_tree);
560
561 ksm_pages_unshared--;
562 rmap_item->address &= PAGE_MASK;
563 }
564out:
565 cond_resched();
566}
567
568static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
569 struct rmap_item **rmap_list)
570{
571 while (*rmap_list) {
572 struct rmap_item *rmap_item = *rmap_list;
573 *rmap_list = rmap_item->rmap_list;
574 remove_rmap_item_from_tree(rmap_item);
575 free_rmap_item(rmap_item);
576 }
577}
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592static int unmerge_ksm_pages(struct vm_area_struct *vma,
593 unsigned long start, unsigned long end)
594{
595 unsigned long addr;
596 int err = 0;
597
598 for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
599 if (ksm_test_exit(vma->vm_mm))
600 break;
601 if (signal_pending(current))
602 err = -ERESTARTSYS;
603 else
604 err = break_ksm(vma, addr);
605 }
606 return err;
607}
608
609#ifdef CONFIG_SYSFS
610
611
612
613static int unmerge_and_remove_all_rmap_items(void)
614{
615 struct mm_slot *mm_slot;
616 struct mm_struct *mm;
617 struct vm_area_struct *vma;
618 int err = 0;
619
620 spin_lock(&ksm_mmlist_lock);
621 ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
622 struct mm_slot, mm_list);
623 spin_unlock(&ksm_mmlist_lock);
624
625 for (mm_slot = ksm_scan.mm_slot;
626 mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
627 mm = mm_slot->mm;
628 down_read(&mm->mmap_sem);
629 for (vma = mm->mmap; vma; vma = vma->vm_next) {
630 if (ksm_test_exit(mm))
631 break;
632 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
633 continue;
634 err = unmerge_ksm_pages(vma,
635 vma->vm_start, vma->vm_end);
636 if (err)
637 goto error;
638 }
639
640 remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
641
642 spin_lock(&ksm_mmlist_lock);
643 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
644 struct mm_slot, mm_list);
645 if (ksm_test_exit(mm)) {
646 hlist_del(&mm_slot->link);
647 list_del(&mm_slot->mm_list);
648 spin_unlock(&ksm_mmlist_lock);
649
650 free_mm_slot(mm_slot);
651 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
652 up_read(&mm->mmap_sem);
653 mmdrop(mm);
654 } else {
655 spin_unlock(&ksm_mmlist_lock);
656 up_read(&mm->mmap_sem);
657 }
658 }
659
660 ksm_scan.seqnr = 0;
661 return 0;
662
663error:
664 up_read(&mm->mmap_sem);
665 spin_lock(&ksm_mmlist_lock);
666 ksm_scan.mm_slot = &ksm_mm_head;
667 spin_unlock(&ksm_mmlist_lock);
668 return err;
669}
670#endif
671
672static u32 calc_checksum(struct page *page)
673{
674 u32 checksum;
675 void *addr = kmap_atomic(page, KM_USER0);
676 checksum = jhash2(addr, PAGE_SIZE / 4, 17);
677 kunmap_atomic(addr, KM_USER0);
678 return checksum;
679}
680
681static int memcmp_pages(struct page *page1, struct page *page2)
682{
683 char *addr1, *addr2;
684 int ret;
685
686 addr1 = kmap_atomic(page1, KM_USER0);
687 addr2 = kmap_atomic(page2, KM_USER1);
688 ret = memcmp(addr1, addr2, PAGE_SIZE);
689 kunmap_atomic(addr2, KM_USER1);
690 kunmap_atomic(addr1, KM_USER0);
691 return ret;
692}
693
694static inline int pages_identical(struct page *page1, struct page *page2)
695{
696 return !memcmp_pages(page1, page2);
697}
698
699static int write_protect_page(struct vm_area_struct *vma, struct page *page,
700 pte_t *orig_pte)
701{
702 struct mm_struct *mm = vma->vm_mm;
703 unsigned long addr;
704 pte_t *ptep;
705 spinlock_t *ptl;
706 int swapped;
707 int err = -EFAULT;
708
709 addr = page_address_in_vma(page, vma);
710 if (addr == -EFAULT)
711 goto out;
712
713 BUG_ON(PageTransCompound(page));
714 ptep = page_check_address(page, mm, addr, &ptl, 0);
715 if (!ptep)
716 goto out;
717
718 if (pte_write(*ptep) || pte_dirty(*ptep)) {
719 pte_t entry;
720
721 swapped = PageSwapCache(page);
722 flush_cache_page(vma, addr, page_to_pfn(page));
723
724
725
726
727
728
729
730
731
732 entry = ptep_clear_flush(vma, addr, ptep);
733
734
735
736
737 if (page_mapcount(page) + 1 + swapped != page_count(page)) {
738 set_pte_at(mm, addr, ptep, entry);
739 goto out_unlock;
740 }
741 if (pte_dirty(entry))
742 set_page_dirty(page);
743 entry = pte_mkclean(pte_wrprotect(entry));
744 set_pte_at_notify(mm, addr, ptep, entry);
745 }
746 *orig_pte = *ptep;
747 err = 0;
748
749out_unlock:
750 pte_unmap_unlock(ptep, ptl);
751out:
752 return err;
753}
754
755
756
757
758
759
760
761
762
763
764static int replace_page(struct vm_area_struct *vma, struct page *page,
765 struct page *kpage, pte_t orig_pte)
766{
767 struct mm_struct *mm = vma->vm_mm;
768 pgd_t *pgd;
769 pud_t *pud;
770 pmd_t *pmd;
771 pte_t *ptep;
772 spinlock_t *ptl;
773 unsigned long addr;
774 int err = -EFAULT;
775
776 addr = page_address_in_vma(page, vma);
777 if (addr == -EFAULT)
778 goto out;
779
780 pgd = pgd_offset(mm, addr);
781 if (!pgd_present(*pgd))
782 goto out;
783
784 pud = pud_offset(pgd, addr);
785 if (!pud_present(*pud))
786 goto out;
787
788 pmd = pmd_offset(pud, addr);
789 BUG_ON(pmd_trans_huge(*pmd));
790 if (!pmd_present(*pmd))
791 goto out;
792
793 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
794 if (!pte_same(*ptep, orig_pte)) {
795 pte_unmap_unlock(ptep, ptl);
796 goto out;
797 }
798
799 get_page(kpage);
800 page_add_anon_rmap(kpage, vma, addr);
801
802 flush_cache_page(vma, addr, pte_pfn(*ptep));
803 ptep_clear_flush(vma, addr, ptep);
804 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
805
806 page_remove_rmap(page);
807 if (!page_mapped(page))
808 try_to_free_swap(page);
809 put_page(page);
810
811 pte_unmap_unlock(ptep, ptl);
812 err = 0;
813out:
814 return err;
815}
816
817static int page_trans_compound_anon_split(struct page *page)
818{
819 int ret = 0;
820 struct page *transhuge_head = page_trans_compound_anon(page);
821 if (transhuge_head) {
822
823 if (get_page_unless_zero(transhuge_head)) {
824
825
826
827
828 if (PageAnon(transhuge_head))
829 ret = split_huge_page(transhuge_head);
830 else
831
832
833
834
835 ret = 1;
836 put_page(transhuge_head);
837 } else
838
839 ret = 1;
840 }
841 return ret;
842}
843
844
845
846
847
848
849
850
851
852
853static int try_to_merge_one_page(struct vm_area_struct *vma,
854 struct page *page, struct page *kpage)
855{
856 pte_t orig_pte = __pte(0);
857 int err = -EFAULT;
858
859 if (page == kpage)
860 return 0;
861
862 if (!(vma->vm_flags & VM_MERGEABLE))
863 goto out;
864 if (PageTransCompound(page) && page_trans_compound_anon_split(page))
865 goto out;
866 BUG_ON(PageTransCompound(page));
867 if (!PageAnon(page))
868 goto out;
869
870
871
872
873
874
875
876
877 if (!trylock_page(page))
878 goto out;
879
880
881
882
883
884
885 if (write_protect_page(vma, page, &orig_pte) == 0) {
886 if (!kpage) {
887
888
889
890
891
892 set_page_stable_node(page, NULL);
893 mark_page_accessed(page);
894 err = 0;
895 } else if (pages_identical(page, kpage))
896 err = replace_page(vma, page, kpage, orig_pte);
897 }
898
899 if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
900 munlock_vma_page(page);
901 if (!PageMlocked(kpage)) {
902 unlock_page(page);
903 lock_page(kpage);
904 mlock_vma_page(kpage);
905 page = kpage;
906 }
907 }
908
909 unlock_page(page);
910out:
911 return err;
912}
913
914
915
916
917
918
919
920static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
921 struct page *page, struct page *kpage)
922{
923 struct mm_struct *mm = rmap_item->mm;
924 struct vm_area_struct *vma;
925 int err = -EFAULT;
926
927 down_read(&mm->mmap_sem);
928 if (ksm_test_exit(mm))
929 goto out;
930 vma = find_vma(mm, rmap_item->address);
931 if (!vma || vma->vm_start > rmap_item->address)
932 goto out;
933
934 err = try_to_merge_one_page(vma, page, kpage);
935 if (err)
936 goto out;
937
938
939 rmap_item->anon_vma = vma->anon_vma;
940 get_anon_vma(vma->anon_vma);
941out:
942 up_read(&mm->mmap_sem);
943 return err;
944}
945
946
947
948
949
950
951
952
953
954
955
956static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
957 struct page *page,
958 struct rmap_item *tree_rmap_item,
959 struct page *tree_page)
960{
961 int err;
962
963 err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
964 if (!err) {
965 err = try_to_merge_with_ksm_page(tree_rmap_item,
966 tree_page, page);
967
968
969
970
971 if (err)
972 break_cow(rmap_item);
973 }
974 return err ? NULL : page;
975}
976
977
978
979
980
981
982
983
984
985
986static struct page *stable_tree_search(struct page *page)
987{
988 struct rb_node *node = root_stable_tree.rb_node;
989 struct stable_node *stable_node;
990
991 stable_node = page_stable_node(page);
992 if (stable_node) {
993 get_page(page);
994 return page;
995 }
996
997 while (node) {
998 struct page *tree_page;
999 int ret;
1000
1001 cond_resched();
1002 stable_node = rb_entry(node, struct stable_node, node);
1003 tree_page = get_ksm_page(stable_node);
1004 if (!tree_page)
1005 return NULL;
1006
1007 ret = memcmp_pages(page, tree_page);
1008
1009 if (ret < 0) {
1010 put_page(tree_page);
1011 node = node->rb_left;
1012 } else if (ret > 0) {
1013 put_page(tree_page);
1014 node = node->rb_right;
1015 } else
1016 return tree_page;
1017 }
1018
1019 return NULL;
1020}
1021
1022
1023
1024
1025
1026
1027
1028
1029static struct stable_node *stable_tree_insert(struct page *kpage)
1030{
1031 struct rb_node **new = &root_stable_tree.rb_node;
1032 struct rb_node *parent = NULL;
1033 struct stable_node *stable_node;
1034
1035 while (*new) {
1036 struct page *tree_page;
1037 int ret;
1038
1039 cond_resched();
1040 stable_node = rb_entry(*new, struct stable_node, node);
1041 tree_page = get_ksm_page(stable_node);
1042 if (!tree_page)
1043 return NULL;
1044
1045 ret = memcmp_pages(kpage, tree_page);
1046 put_page(tree_page);
1047
1048 parent = *new;
1049 if (ret < 0)
1050 new = &parent->rb_left;
1051 else if (ret > 0)
1052 new = &parent->rb_right;
1053 else {
1054
1055
1056
1057
1058
1059 return NULL;
1060 }
1061 }
1062
1063 stable_node = alloc_stable_node();
1064 if (!stable_node)
1065 return NULL;
1066
1067 rb_link_node(&stable_node->node, parent, new);
1068 rb_insert_color(&stable_node->node, &root_stable_tree);
1069
1070 INIT_HLIST_HEAD(&stable_node->hlist);
1071
1072 stable_node->kpfn = page_to_pfn(kpage);
1073 set_page_stable_node(kpage, stable_node);
1074
1075 return stable_node;
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092static
1093struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
1094 struct page *page,
1095 struct page **tree_pagep)
1096
1097{
1098 struct rb_node **new = &root_unstable_tree.rb_node;
1099 struct rb_node *parent = NULL;
1100
1101 while (*new) {
1102 struct rmap_item *tree_rmap_item;
1103 struct page *tree_page;
1104 int ret;
1105
1106 cond_resched();
1107 tree_rmap_item = rb_entry(*new, struct rmap_item, node);
1108 tree_page = get_mergeable_page(tree_rmap_item);
1109 if (IS_ERR_OR_NULL(tree_page))
1110 return NULL;
1111
1112
1113
1114
1115 if (page == tree_page) {
1116 put_page(tree_page);
1117 return NULL;
1118 }
1119
1120 ret = memcmp_pages(page, tree_page);
1121
1122 parent = *new;
1123 if (ret < 0) {
1124 put_page(tree_page);
1125 new = &parent->rb_left;
1126 } else if (ret > 0) {
1127 put_page(tree_page);
1128 new = &parent->rb_right;
1129 } else {
1130 *tree_pagep = tree_page;
1131 return tree_rmap_item;
1132 }
1133 }
1134
1135 rmap_item->address |= UNSTABLE_FLAG;
1136 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
1137 rb_link_node(&rmap_item->node, parent, new);
1138 rb_insert_color(&rmap_item->node, &root_unstable_tree);
1139
1140 ksm_pages_unshared++;
1141 return NULL;
1142}
1143
1144
1145
1146
1147
1148
1149static void stable_tree_append(struct rmap_item *rmap_item,
1150 struct stable_node *stable_node)
1151{
1152 rmap_item->head = stable_node;
1153 rmap_item->address |= STABLE_FLAG;
1154 hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
1155
1156 if (rmap_item->hlist.next)
1157 ksm_pages_sharing++;
1158 else
1159 ksm_pages_shared++;
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1172{
1173 struct rmap_item *tree_rmap_item;
1174 struct page *tree_page = NULL;
1175 struct stable_node *stable_node;
1176 struct page *kpage;
1177 unsigned int checksum;
1178 int err;
1179
1180 remove_rmap_item_from_tree(rmap_item);
1181
1182
1183 kpage = stable_tree_search(page);
1184 if (kpage) {
1185 err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
1186 if (!err) {
1187
1188
1189
1190
1191 lock_page(kpage);
1192 stable_tree_append(rmap_item, page_stable_node(kpage));
1193 unlock_page(kpage);
1194 }
1195 put_page(kpage);
1196 return;
1197 }
1198
1199
1200
1201
1202
1203
1204
1205 checksum = calc_checksum(page);
1206 if (rmap_item->oldchecksum != checksum) {
1207 rmap_item->oldchecksum = checksum;
1208 return;
1209 }
1210
1211 tree_rmap_item =
1212 unstable_tree_search_insert(rmap_item, page, &tree_page);
1213 if (tree_rmap_item) {
1214 kpage = try_to_merge_two_pages(rmap_item, page,
1215 tree_rmap_item, tree_page);
1216 put_page(tree_page);
1217
1218
1219
1220
1221
1222 if (kpage) {
1223 remove_rmap_item_from_tree(tree_rmap_item);
1224
1225 lock_page(kpage);
1226 stable_node = stable_tree_insert(kpage);
1227 if (stable_node) {
1228 stable_tree_append(tree_rmap_item, stable_node);
1229 stable_tree_append(rmap_item, stable_node);
1230 }
1231 unlock_page(kpage);
1232
1233
1234
1235
1236
1237
1238
1239 if (!stable_node) {
1240 break_cow(tree_rmap_item);
1241 break_cow(rmap_item);
1242 }
1243 }
1244 }
1245}
1246
1247static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
1248 struct rmap_item **rmap_list,
1249 unsigned long addr)
1250{
1251 struct rmap_item *rmap_item;
1252
1253 while (*rmap_list) {
1254 rmap_item = *rmap_list;
1255 if ((rmap_item->address & PAGE_MASK) == addr)
1256 return rmap_item;
1257 if (rmap_item->address > addr)
1258 break;
1259 *rmap_list = rmap_item->rmap_list;
1260 remove_rmap_item_from_tree(rmap_item);
1261 free_rmap_item(rmap_item);
1262 }
1263
1264 rmap_item = alloc_rmap_item();
1265 if (rmap_item) {
1266
1267 rmap_item->mm = mm_slot->mm;
1268 rmap_item->address = addr;
1269 rmap_item->rmap_list = *rmap_list;
1270 *rmap_list = rmap_item;
1271 }
1272 return rmap_item;
1273}
1274
1275static struct rmap_item *scan_get_next_rmap_item(struct page **page)
1276{
1277 struct mm_struct *mm;
1278 struct mm_slot *slot;
1279 struct vm_area_struct *vma;
1280 struct rmap_item *rmap_item;
1281
1282 if (list_empty(&ksm_mm_head.mm_list))
1283 return NULL;
1284
1285 slot = ksm_scan.mm_slot;
1286 if (slot == &ksm_mm_head) {
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297 lru_add_drain_all();
1298
1299 root_unstable_tree = RB_ROOT;
1300
1301 spin_lock(&ksm_mmlist_lock);
1302 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
1303 ksm_scan.mm_slot = slot;
1304 spin_unlock(&ksm_mmlist_lock);
1305
1306
1307
1308
1309 if (slot == &ksm_mm_head)
1310 return NULL;
1311next_mm:
1312 ksm_scan.address = 0;
1313 ksm_scan.rmap_list = &slot->rmap_list;
1314 }
1315
1316 mm = slot->mm;
1317 down_read(&mm->mmap_sem);
1318 if (ksm_test_exit(mm))
1319 vma = NULL;
1320 else
1321 vma = find_vma(mm, ksm_scan.address);
1322
1323 for (; vma; vma = vma->vm_next) {
1324 if (!(vma->vm_flags & VM_MERGEABLE))
1325 continue;
1326 if (ksm_scan.address < vma->vm_start)
1327 ksm_scan.address = vma->vm_start;
1328 if (!vma->anon_vma)
1329 ksm_scan.address = vma->vm_end;
1330
1331 while (ksm_scan.address < vma->vm_end) {
1332 if (ksm_test_exit(mm))
1333 break;
1334 *page = follow_page(vma, ksm_scan.address, FOLL_GET);
1335 if (IS_ERR_OR_NULL(*page)) {
1336 ksm_scan.address += PAGE_SIZE;
1337 cond_resched();
1338 continue;
1339 }
1340 if (PageAnon(*page) ||
1341 page_trans_compound_anon(*page)) {
1342 flush_anon_page(vma, *page, ksm_scan.address);
1343 flush_dcache_page(*page);
1344 rmap_item = get_next_rmap_item(slot,
1345 ksm_scan.rmap_list, ksm_scan.address);
1346 if (rmap_item) {
1347 ksm_scan.rmap_list =
1348 &rmap_item->rmap_list;
1349 ksm_scan.address += PAGE_SIZE;
1350 } else
1351 put_page(*page);
1352 up_read(&mm->mmap_sem);
1353 return rmap_item;
1354 }
1355 put_page(*page);
1356 ksm_scan.address += PAGE_SIZE;
1357 cond_resched();
1358 }
1359 }
1360
1361 if (ksm_test_exit(mm)) {
1362 ksm_scan.address = 0;
1363 ksm_scan.rmap_list = &slot->rmap_list;
1364 }
1365
1366
1367
1368
1369 remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
1370
1371 spin_lock(&ksm_mmlist_lock);
1372 ksm_scan.mm_slot = list_entry(slot->mm_list.next,
1373 struct mm_slot, mm_list);
1374 if (ksm_scan.address == 0) {
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384 hlist_del(&slot->link);
1385 list_del(&slot->mm_list);
1386 spin_unlock(&ksm_mmlist_lock);
1387
1388 free_mm_slot(slot);
1389 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1390 up_read(&mm->mmap_sem);
1391 mmdrop(mm);
1392 } else {
1393 spin_unlock(&ksm_mmlist_lock);
1394 up_read(&mm->mmap_sem);
1395 }
1396
1397
1398 slot = ksm_scan.mm_slot;
1399 if (slot != &ksm_mm_head)
1400 goto next_mm;
1401
1402 ksm_scan.seqnr++;
1403 return NULL;
1404}
1405
1406
1407
1408
1409
1410static void ksm_do_scan(unsigned int scan_npages)
1411{
1412 struct rmap_item *rmap_item;
1413 struct page *uninitialized_var(page);
1414
1415 while (scan_npages-- && likely(!freezing(current))) {
1416 cond_resched();
1417 rmap_item = scan_get_next_rmap_item(&page);
1418 if (!rmap_item)
1419 return;
1420 if (!PageKsm(page) || !in_stable_tree(rmap_item))
1421 cmp_and_merge_page(page, rmap_item);
1422 put_page(page);
1423 }
1424}
1425
1426static int ksmd_should_run(void)
1427{
1428 return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
1429}
1430
1431static int ksm_scan_thread(void *nothing)
1432{
1433 set_freezable();
1434 set_user_nice(current, 5);
1435
1436 while (!kthread_should_stop()) {
1437 mutex_lock(&ksm_thread_mutex);
1438 if (ksmd_should_run())
1439 ksm_do_scan(ksm_thread_pages_to_scan);
1440 mutex_unlock(&ksm_thread_mutex);
1441
1442 try_to_freeze();
1443
1444 if (ksmd_should_run()) {
1445 schedule_timeout_interruptible(
1446 msecs_to_jiffies(ksm_thread_sleep_millisecs));
1447 } else {
1448 wait_event_freezable(ksm_thread_wait,
1449 ksmd_should_run() || kthread_should_stop());
1450 }
1451 }
1452 return 0;
1453}
1454
1455int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
1456 unsigned long end, int advice, unsigned long *vm_flags)
1457{
1458 struct mm_struct *mm = vma->vm_mm;
1459 int err;
1460
1461 switch (advice) {
1462 case MADV_MERGEABLE:
1463
1464
1465
1466 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
1467 VM_PFNMAP | VM_IO | VM_DONTEXPAND |
1468 VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
1469 VM_NONLINEAR | VM_MIXEDMAP | VM_SAO))
1470 return 0;
1471
1472 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
1473 err = __ksm_enter(mm);
1474 if (err)
1475 return err;
1476 }
1477
1478 *vm_flags |= VM_MERGEABLE;
1479 break;
1480
1481 case MADV_UNMERGEABLE:
1482 if (!(*vm_flags & VM_MERGEABLE))
1483 return 0;
1484
1485 if (vma->anon_vma) {
1486 err = unmerge_ksm_pages(vma, start, end);
1487 if (err)
1488 return err;
1489 }
1490
1491 *vm_flags &= ~VM_MERGEABLE;
1492 break;
1493 }
1494
1495 return 0;
1496}
1497
1498int __ksm_enter(struct mm_struct *mm)
1499{
1500 struct mm_slot *mm_slot;
1501 int needs_wakeup;
1502
1503 mm_slot = alloc_mm_slot();
1504 if (!mm_slot)
1505 return -ENOMEM;
1506
1507
1508 needs_wakeup = list_empty(&ksm_mm_head.mm_list);
1509
1510 spin_lock(&ksm_mmlist_lock);
1511 insert_to_mm_slots_hash(mm, mm_slot);
1512
1513
1514
1515
1516
1517 list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list);
1518 spin_unlock(&ksm_mmlist_lock);
1519
1520 set_bit(MMF_VM_MERGEABLE, &mm->flags);
1521 atomic_inc(&mm->mm_count);
1522
1523 if (needs_wakeup)
1524 wake_up_interruptible(&ksm_thread_wait);
1525
1526 return 0;
1527}
1528
1529void __ksm_exit(struct mm_struct *mm)
1530{
1531 struct mm_slot *mm_slot;
1532 int easy_to_free = 0;
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543 spin_lock(&ksm_mmlist_lock);
1544 mm_slot = get_mm_slot(mm);
1545 if (mm_slot && ksm_scan.mm_slot != mm_slot) {
1546 if (!mm_slot->rmap_list) {
1547 hlist_del(&mm_slot->link);
1548 list_del(&mm_slot->mm_list);
1549 easy_to_free = 1;
1550 } else {
1551 list_move(&mm_slot->mm_list,
1552 &ksm_scan.mm_slot->mm_list);
1553 }
1554 }
1555 spin_unlock(&ksm_mmlist_lock);
1556
1557 if (easy_to_free) {
1558 free_mm_slot(mm_slot);
1559 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1560 mmdrop(mm);
1561 } else if (mm_slot) {
1562 down_write(&mm->mmap_sem);
1563 up_write(&mm->mmap_sem);
1564 }
1565}
1566
1567struct page *ksm_does_need_to_copy(struct page *page,
1568 struct vm_area_struct *vma, unsigned long address)
1569{
1570 struct page *new_page;
1571
1572 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1573 if (new_page) {
1574 copy_user_highpage(new_page, page, address, vma);
1575
1576 SetPageDirty(new_page);
1577 __SetPageUptodate(new_page);
1578 SetPageSwapBacked(new_page);
1579 __set_page_locked(new_page);
1580
1581 if (page_evictable(new_page, vma))
1582 lru_cache_add_lru(new_page, LRU_ACTIVE_ANON);
1583 else
1584 add_page_to_unevictable_list(new_page);
1585 }
1586
1587 return new_page;
1588}
1589
1590int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
1591 unsigned long *vm_flags)
1592{
1593 struct stable_node *stable_node;
1594 struct rmap_item *rmap_item;
1595 struct hlist_node *hlist;
1596 unsigned int mapcount = page_mapcount(page);
1597 int referenced = 0;
1598 int search_new_forks = 0;
1599
1600 VM_BUG_ON(!PageKsm(page));
1601 VM_BUG_ON(!PageLocked(page));
1602
1603 stable_node = page_stable_node(page);
1604 if (!stable_node)
1605 return 0;
1606again:
1607 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
1608 struct anon_vma *anon_vma = rmap_item->anon_vma;
1609 struct anon_vma_chain *vmac;
1610 struct vm_area_struct *vma;
1611
1612 anon_vma_lock(anon_vma);
1613 list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
1614 vma = vmac->vma;
1615 if (rmap_item->address < vma->vm_start ||
1616 rmap_item->address >= vma->vm_end)
1617 continue;
1618
1619
1620
1621
1622
1623
1624 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
1625 continue;
1626
1627 if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
1628 continue;
1629
1630 referenced += page_referenced_one(page, vma,
1631 rmap_item->address, &mapcount, vm_flags);
1632 if (!search_new_forks || !mapcount)
1633 break;
1634 }
1635 anon_vma_unlock(anon_vma);
1636 if (!mapcount)
1637 goto out;
1638 }
1639 if (!search_new_forks++)
1640 goto again;
1641out:
1642 return referenced;
1643}
1644
1645int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
1646{
1647 struct stable_node *stable_node;
1648 struct hlist_node *hlist;
1649 struct rmap_item *rmap_item;
1650 int ret = SWAP_AGAIN;
1651 int search_new_forks = 0;
1652
1653 VM_BUG_ON(!PageKsm(page));
1654 VM_BUG_ON(!PageLocked(page));
1655
1656 stable_node = page_stable_node(page);
1657 if (!stable_node)
1658 return SWAP_FAIL;
1659again:
1660 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
1661 struct anon_vma *anon_vma = rmap_item->anon_vma;
1662 struct anon_vma_chain *vmac;
1663 struct vm_area_struct *vma;
1664
1665 anon_vma_lock(anon_vma);
1666 list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
1667 vma = vmac->vma;
1668 if (rmap_item->address < vma->vm_start ||
1669 rmap_item->address >= vma->vm_end)
1670 continue;
1671
1672
1673
1674
1675
1676
1677 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
1678 continue;
1679
1680 ret = try_to_unmap_one(page, vma,
1681 rmap_item->address, flags);
1682 if (ret != SWAP_AGAIN || !page_mapped(page)) {
1683 anon_vma_unlock(anon_vma);
1684 goto out;
1685 }
1686 }
1687 anon_vma_unlock(anon_vma);
1688 }
1689 if (!search_new_forks++)
1690 goto again;
1691out:
1692 return ret;
1693}
1694
1695#ifdef CONFIG_MIGRATION
1696int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
1697 struct vm_area_struct *, unsigned long, void *), void *arg)
1698{
1699 struct stable_node *stable_node;
1700 struct hlist_node *hlist;
1701 struct rmap_item *rmap_item;
1702 int ret = SWAP_AGAIN;
1703 int search_new_forks = 0;
1704
1705 VM_BUG_ON(!PageKsm(page));
1706 VM_BUG_ON(!PageLocked(page));
1707
1708 stable_node = page_stable_node(page);
1709 if (!stable_node)
1710 return ret;
1711again:
1712 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
1713 struct anon_vma *anon_vma = rmap_item->anon_vma;
1714 struct anon_vma_chain *vmac;
1715 struct vm_area_struct *vma;
1716
1717 anon_vma_lock(anon_vma);
1718 list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
1719 vma = vmac->vma;
1720 if (rmap_item->address < vma->vm_start ||
1721 rmap_item->address >= vma->vm_end)
1722 continue;
1723
1724
1725
1726
1727
1728
1729 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
1730 continue;
1731
1732 ret = rmap_one(page, vma, rmap_item->address, arg);
1733 if (ret != SWAP_AGAIN) {
1734 anon_vma_unlock(anon_vma);
1735 goto out;
1736 }
1737 }
1738 anon_vma_unlock(anon_vma);
1739 }
1740 if (!search_new_forks++)
1741 goto again;
1742out:
1743 return ret;
1744}
1745
1746void ksm_migrate_page(struct page *newpage, struct page *oldpage)
1747{
1748 struct stable_node *stable_node;
1749
1750 VM_BUG_ON(!PageLocked(oldpage));
1751 VM_BUG_ON(!PageLocked(newpage));
1752 VM_BUG_ON(newpage->mapping != oldpage->mapping);
1753
1754 stable_node = page_stable_node(newpage);
1755 if (stable_node) {
1756 VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage));
1757 stable_node->kpfn = page_to_pfn(newpage);
1758 }
1759}
1760#endif
1761
1762#ifdef CONFIG_MEMORY_HOTREMOVE
1763static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn,
1764 unsigned long end_pfn)
1765{
1766 struct rb_node *node;
1767
1768 for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) {
1769 struct stable_node *stable_node;
1770
1771 stable_node = rb_entry(node, struct stable_node, node);
1772 if (stable_node->kpfn >= start_pfn &&
1773 stable_node->kpfn < end_pfn)
1774 return stable_node;
1775 }
1776 return NULL;
1777}
1778
1779static int ksm_memory_callback(struct notifier_block *self,
1780 unsigned long action, void *arg)
1781{
1782 struct memory_notify *mn = arg;
1783 struct stable_node *stable_node;
1784
1785 switch (action) {
1786 case MEM_GOING_OFFLINE:
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796 mutex_lock_nested(&ksm_thread_mutex, SINGLE_DEPTH_NESTING);
1797 break;
1798
1799 case MEM_OFFLINE:
1800
1801
1802
1803
1804
1805 while ((stable_node = ksm_check_stable_tree(mn->start_pfn,
1806 mn->start_pfn + mn->nr_pages)) != NULL)
1807 remove_node_from_stable_tree(stable_node);
1808
1809
1810 case MEM_CANCEL_OFFLINE:
1811 mutex_unlock(&ksm_thread_mutex);
1812 break;
1813 }
1814 return NOTIFY_OK;
1815}
1816#endif
1817
1818#ifdef CONFIG_SYSFS
1819
1820
1821
1822
1823#define KSM_ATTR_RO(_name) \
1824 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1825#define KSM_ATTR(_name) \
1826 static struct kobj_attribute _name##_attr = \
1827 __ATTR(_name, 0644, _name##_show, _name##_store)
1828
1829static ssize_t sleep_millisecs_show(struct kobject *kobj,
1830 struct kobj_attribute *attr, char *buf)
1831{
1832 return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs);
1833}
1834
1835static ssize_t sleep_millisecs_store(struct kobject *kobj,
1836 struct kobj_attribute *attr,
1837 const char *buf, size_t count)
1838{
1839 unsigned long msecs;
1840 int err;
1841
1842 err = strict_strtoul(buf, 10, &msecs);
1843 if (err || msecs > UINT_MAX)
1844 return -EINVAL;
1845
1846 ksm_thread_sleep_millisecs = msecs;
1847
1848 return count;
1849}
1850KSM_ATTR(sleep_millisecs);
1851
1852static ssize_t pages_to_scan_show(struct kobject *kobj,
1853 struct kobj_attribute *attr, char *buf)
1854{
1855 return sprintf(buf, "%u\n", ksm_thread_pages_to_scan);
1856}
1857
1858static ssize_t pages_to_scan_store(struct kobject *kobj,
1859 struct kobj_attribute *attr,
1860 const char *buf, size_t count)
1861{
1862 int err;
1863 unsigned long nr_pages;
1864
1865 err = strict_strtoul(buf, 10, &nr_pages);
1866 if (err || nr_pages > UINT_MAX)
1867 return -EINVAL;
1868
1869 ksm_thread_pages_to_scan = nr_pages;
1870
1871 return count;
1872}
1873KSM_ATTR(pages_to_scan);
1874
1875static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
1876 char *buf)
1877{
1878 return sprintf(buf, "%u\n", ksm_run);
1879}
1880
1881static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
1882 const char *buf, size_t count)
1883{
1884 int err;
1885 unsigned long flags;
1886
1887 err = strict_strtoul(buf, 10, &flags);
1888 if (err || flags > UINT_MAX)
1889 return -EINVAL;
1890 if (flags > KSM_RUN_UNMERGE)
1891 return -EINVAL;
1892
1893
1894
1895
1896
1897
1898
1899
1900 mutex_lock(&ksm_thread_mutex);
1901 if (ksm_run != flags) {
1902 ksm_run = flags;
1903 if (flags & KSM_RUN_UNMERGE) {
1904 int oom_score_adj;
1905
1906 oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
1907 err = unmerge_and_remove_all_rmap_items();
1908 test_set_oom_score_adj(oom_score_adj);
1909 if (err) {
1910 ksm_run = KSM_RUN_STOP;
1911 count = err;
1912 }
1913 }
1914 }
1915 mutex_unlock(&ksm_thread_mutex);
1916
1917 if (flags & KSM_RUN_MERGE)
1918 wake_up_interruptible(&ksm_thread_wait);
1919
1920 return count;
1921}
1922KSM_ATTR(run);
1923
1924static ssize_t pages_shared_show(struct kobject *kobj,
1925 struct kobj_attribute *attr, char *buf)
1926{
1927 return sprintf(buf, "%lu\n", ksm_pages_shared);
1928}
1929KSM_ATTR_RO(pages_shared);
1930
1931static ssize_t pages_sharing_show(struct kobject *kobj,
1932 struct kobj_attribute *attr, char *buf)
1933{
1934 return sprintf(buf, "%lu\n", ksm_pages_sharing);
1935}
1936KSM_ATTR_RO(pages_sharing);
1937
1938static ssize_t pages_unshared_show(struct kobject *kobj,
1939 struct kobj_attribute *attr, char *buf)
1940{
1941 return sprintf(buf, "%lu\n", ksm_pages_unshared);
1942}
1943KSM_ATTR_RO(pages_unshared);
1944
1945static ssize_t pages_volatile_show(struct kobject *kobj,
1946 struct kobj_attribute *attr, char *buf)
1947{
1948 long ksm_pages_volatile;
1949
1950 ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
1951 - ksm_pages_sharing - ksm_pages_unshared;
1952
1953
1954
1955
1956 if (ksm_pages_volatile < 0)
1957 ksm_pages_volatile = 0;
1958 return sprintf(buf, "%ld\n", ksm_pages_volatile);
1959}
1960KSM_ATTR_RO(pages_volatile);
1961
1962static ssize_t full_scans_show(struct kobject *kobj,
1963 struct kobj_attribute *attr, char *buf)
1964{
1965 return sprintf(buf, "%lu\n", ksm_scan.seqnr);
1966}
1967KSM_ATTR_RO(full_scans);
1968
1969static struct attribute *ksm_attrs[] = {
1970 &sleep_millisecs_attr.attr,
1971 &pages_to_scan_attr.attr,
1972 &run_attr.attr,
1973 &pages_shared_attr.attr,
1974 &pages_sharing_attr.attr,
1975 &pages_unshared_attr.attr,
1976 &pages_volatile_attr.attr,
1977 &full_scans_attr.attr,
1978 NULL,
1979};
1980
1981static struct attribute_group ksm_attr_group = {
1982 .attrs = ksm_attrs,
1983 .name = "ksm",
1984};
1985#endif
1986
1987static int __init ksm_init(void)
1988{
1989 struct task_struct *ksm_thread;
1990 int err;
1991
1992 err = ksm_slab_init();
1993 if (err)
1994 goto out;
1995
1996 ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
1997 if (IS_ERR(ksm_thread)) {
1998 printk(KERN_ERR "ksm: creating kthread failed\n");
1999 err = PTR_ERR(ksm_thread);
2000 goto out_free;
2001 }
2002
2003#ifdef CONFIG_SYSFS
2004 err = sysfs_create_group(mm_kobj, &ksm_attr_group);
2005 if (err) {
2006 printk(KERN_ERR "ksm: register sysfs failed\n");
2007 kthread_stop(ksm_thread);
2008 goto out_free;
2009 }
2010#else
2011 ksm_run = KSM_RUN_MERGE;
2012
2013#endif
2014
2015#ifdef CONFIG_MEMORY_HOTREMOVE
2016
2017
2018
2019
2020 hotplug_memory_notifier(ksm_memory_callback, 100);
2021#endif
2022 return 0;
2023
2024out_free:
2025 ksm_slab_free();
2026out:
2027 return err;
2028}
2029module_init(ksm_init)
2030