1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/mm.h>
49#include <linux/pagemap.h>
50#include <linux/swap.h>
51#include <linux/swapops.h>
52#include <linux/slab.h>
53#include <linux/init.h>
54#include <linux/ksm.h>
55#include <linux/rmap.h>
56#include <linux/rcupdate.h>
57#include <linux/export.h>
58#include <linux/memcontrol.h>
59#include <linux/mmu_notifier.h>
60#include <linux/migrate.h>
61#include <linux/hugetlb.h>
62#include <linux/backing-dev.h>
63#include <linux/page_idle.h>
64
65#include <asm/tlbflush.h>
66
67#include <trace/events/tlb.h>
68
69#include "internal.h"
70
71static struct kmem_cache *anon_vma_cachep;
72static struct kmem_cache *anon_vma_chain_cachep;
73
74static inline struct anon_vma *anon_vma_alloc(void)
75{
76 struct anon_vma *anon_vma;
77
78 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
79 if (anon_vma) {
80 atomic_set(&anon_vma->refcount, 1);
81 anon_vma->degree = 1;
82 anon_vma->parent = anon_vma;
83
84
85
86
87 anon_vma->root = anon_vma;
88 }
89
90 return anon_vma;
91}
92
93static inline void anon_vma_free(struct anon_vma *anon_vma)
94{
95 VM_BUG_ON(atomic_read(&anon_vma->refcount));
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114 might_sleep();
115 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
116 anon_vma_lock_write(anon_vma);
117 anon_vma_unlock_write(anon_vma);
118 }
119
120 kmem_cache_free(anon_vma_cachep, anon_vma);
121}
122
123static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
124{
125 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
126}
127
128static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
129{
130 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
131}
132
133static void anon_vma_chain_link(struct vm_area_struct *vma,
134 struct anon_vma_chain *avc,
135 struct anon_vma *anon_vma)
136{
137 avc->vma = vma;
138 avc->anon_vma = anon_vma;
139 list_add(&avc->same_vma, &vma->anon_vma_chain);
140 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
141}
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170int anon_vma_prepare(struct vm_area_struct *vma)
171{
172 struct anon_vma *anon_vma = vma->anon_vma;
173 struct anon_vma_chain *avc;
174
175 might_sleep();
176 if (unlikely(!anon_vma)) {
177 struct mm_struct *mm = vma->vm_mm;
178 struct anon_vma *allocated;
179
180 avc = anon_vma_chain_alloc(GFP_KERNEL);
181 if (!avc)
182 goto out_enomem;
183
184 anon_vma = find_mergeable_anon_vma(vma);
185 allocated = NULL;
186 if (!anon_vma) {
187 anon_vma = anon_vma_alloc();
188 if (unlikely(!anon_vma))
189 goto out_enomem_free_avc;
190 allocated = anon_vma;
191 }
192
193 anon_vma_lock_write(anon_vma);
194
195 spin_lock(&mm->page_table_lock);
196 if (likely(!vma->anon_vma)) {
197 vma->anon_vma = anon_vma;
198 anon_vma_chain_link(vma, avc, anon_vma);
199
200 anon_vma->degree++;
201 allocated = NULL;
202 avc = NULL;
203 }
204 spin_unlock(&mm->page_table_lock);
205 anon_vma_unlock_write(anon_vma);
206
207 if (unlikely(allocated))
208 put_anon_vma(allocated);
209 if (unlikely(avc))
210 anon_vma_chain_free(avc);
211 }
212 return 0;
213
214 out_enomem_free_avc:
215 anon_vma_chain_free(avc);
216 out_enomem:
217 return -ENOMEM;
218}
219
220
221
222
223
224
225
226
227
228static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
229{
230 struct anon_vma *new_root = anon_vma->root;
231 if (new_root != root) {
232 if (WARN_ON_ONCE(root))
233 up_write(&root->rwsem);
234 root = new_root;
235 down_write(&root->rwsem);
236 }
237 return root;
238}
239
240static inline void unlock_anon_vma_root(struct anon_vma *root)
241{
242 if (root)
243 up_write(&root->rwsem);
244}
245
246
247
248
249
250
251
252
253
254
255
256
257
258int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
259{
260 struct anon_vma_chain *avc, *pavc;
261 struct anon_vma *root = NULL;
262
263 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
264 struct anon_vma *anon_vma;
265
266 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
267 if (unlikely(!avc)) {
268 unlock_anon_vma_root(root);
269 root = NULL;
270 avc = anon_vma_chain_alloc(GFP_KERNEL);
271 if (!avc)
272 goto enomem_failure;
273 }
274 anon_vma = pavc->anon_vma;
275 root = lock_anon_vma_root(root, anon_vma);
276 anon_vma_chain_link(dst, avc, anon_vma);
277
278
279
280
281
282
283
284
285
286 if (!dst->anon_vma && anon_vma != src->anon_vma &&
287 anon_vma->degree < 2)
288 dst->anon_vma = anon_vma;
289 }
290 if (dst->anon_vma)
291 dst->anon_vma->degree++;
292 unlock_anon_vma_root(root);
293 return 0;
294
295 enomem_failure:
296
297
298
299
300
301
302 dst->anon_vma = NULL;
303 unlink_anon_vmas(dst);
304 return -ENOMEM;
305}
306
307
308
309
310
311
312int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
313{
314 struct anon_vma_chain *avc;
315 struct anon_vma *anon_vma;
316 int error;
317
318
319 if (!pvma->anon_vma)
320 return 0;
321
322
323 vma->anon_vma = NULL;
324
325
326
327
328
329 error = anon_vma_clone(vma, pvma);
330 if (error)
331 return error;
332
333
334 if (vma->anon_vma)
335 return 0;
336
337
338 anon_vma = anon_vma_alloc();
339 if (!anon_vma)
340 goto out_error;
341 avc = anon_vma_chain_alloc(GFP_KERNEL);
342 if (!avc)
343 goto out_error_free_anon_vma;
344
345
346
347
348
349 anon_vma->root = pvma->anon_vma->root;
350 anon_vma->parent = pvma->anon_vma;
351
352
353
354
355
356 get_anon_vma(anon_vma->root);
357
358 vma->anon_vma = anon_vma;
359 anon_vma_lock_write(anon_vma);
360 anon_vma_chain_link(vma, avc, anon_vma);
361 anon_vma->parent->degree++;
362 anon_vma_unlock_write(anon_vma);
363
364 return 0;
365
366 out_error_free_anon_vma:
367 put_anon_vma(anon_vma);
368 out_error:
369 unlink_anon_vmas(vma);
370 return -ENOMEM;
371}
372
373void unlink_anon_vmas(struct vm_area_struct *vma)
374{
375 struct anon_vma_chain *avc, *next;
376 struct anon_vma *root = NULL;
377
378
379
380
381
382 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
383 struct anon_vma *anon_vma = avc->anon_vma;
384
385 root = lock_anon_vma_root(root, anon_vma);
386 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
387
388
389
390
391
392 if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
393 anon_vma->parent->degree--;
394 continue;
395 }
396
397 list_del(&avc->same_vma);
398 anon_vma_chain_free(avc);
399 }
400 if (vma->anon_vma)
401 vma->anon_vma->degree--;
402 unlock_anon_vma_root(root);
403
404
405
406
407
408
409 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
410 struct anon_vma *anon_vma = avc->anon_vma;
411
412 VM_WARN_ON(anon_vma->degree);
413 put_anon_vma(anon_vma);
414
415 list_del(&avc->same_vma);
416 anon_vma_chain_free(avc);
417 }
418}
419
420static void anon_vma_ctor(void *data)
421{
422 struct anon_vma *anon_vma = data;
423
424 init_rwsem(&anon_vma->rwsem);
425 atomic_set(&anon_vma->refcount, 0);
426 anon_vma->rb_root = RB_ROOT;
427}
428
429void __init anon_vma_init(void)
430{
431 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
432 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
433 anon_vma_ctor);
434 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
435 SLAB_PANIC|SLAB_ACCOUNT);
436}
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461struct anon_vma *page_get_anon_vma(struct page *page)
462{
463 struct anon_vma *anon_vma = NULL;
464 unsigned long anon_mapping;
465
466 rcu_read_lock();
467 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
468 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
469 goto out;
470 if (!page_mapped(page))
471 goto out;
472
473 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
474 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
475 anon_vma = NULL;
476 goto out;
477 }
478
479
480
481
482
483
484
485
486 if (!page_mapped(page)) {
487 rcu_read_unlock();
488 put_anon_vma(anon_vma);
489 return NULL;
490 }
491out:
492 rcu_read_unlock();
493
494 return anon_vma;
495}
496
497
498
499
500
501
502
503
504struct anon_vma *page_lock_anon_vma_read(struct page *page)
505{
506 struct anon_vma *anon_vma = NULL;
507 struct anon_vma *root_anon_vma;
508 unsigned long anon_mapping;
509
510 rcu_read_lock();
511 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
512 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
513 goto out;
514 if (!page_mapped(page))
515 goto out;
516
517 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
518 root_anon_vma = READ_ONCE(anon_vma->root);
519 if (down_read_trylock(&root_anon_vma->rwsem)) {
520
521
522
523
524
525 if (!page_mapped(page)) {
526 up_read(&root_anon_vma->rwsem);
527 anon_vma = NULL;
528 }
529 goto out;
530 }
531
532
533 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
534 anon_vma = NULL;
535 goto out;
536 }
537
538 if (!page_mapped(page)) {
539 rcu_read_unlock();
540 put_anon_vma(anon_vma);
541 return NULL;
542 }
543
544
545 rcu_read_unlock();
546 anon_vma_lock_read(anon_vma);
547
548 if (atomic_dec_and_test(&anon_vma->refcount)) {
549
550
551
552
553
554 anon_vma_unlock_read(anon_vma);
555 __put_anon_vma(anon_vma);
556 anon_vma = NULL;
557 }
558
559 return anon_vma;
560
561out:
562 rcu_read_unlock();
563 return anon_vma;
564}
565
566void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
567{
568 anon_vma_unlock_read(anon_vma);
569}
570
571#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
572
573
574
575
576
577
578void try_to_unmap_flush(void)
579{
580 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
581 int cpu;
582
583 if (!tlb_ubc->flush_required)
584 return;
585
586 cpu = get_cpu();
587
588 if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) {
589 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
590 local_flush_tlb();
591 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
592 }
593
594 if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids)
595 flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL);
596 cpumask_clear(&tlb_ubc->cpumask);
597 tlb_ubc->flush_required = false;
598 tlb_ubc->writable = false;
599 put_cpu();
600}
601
602
603void try_to_unmap_flush_dirty(void)
604{
605 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
606
607 if (tlb_ubc->writable)
608 try_to_unmap_flush();
609}
610
611static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
612 struct page *page, bool writable)
613{
614 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
615
616 cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
617 tlb_ubc->flush_required = true;
618
619
620
621
622
623
624 if (writable)
625 tlb_ubc->writable = true;
626}
627
628
629
630
631
632static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
633{
634 bool should_defer = false;
635
636 if (!(flags & TTU_BATCH_FLUSH))
637 return false;
638
639
640 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
641 should_defer = true;
642 put_cpu();
643
644 return should_defer;
645}
646#else
647static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
648 struct page *page, bool writable)
649{
650}
651
652static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
653{
654 return false;
655}
656#endif
657
658
659
660
661
662unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
663{
664 unsigned long address;
665 if (PageAnon(page)) {
666 struct anon_vma *page__anon_vma = page_anon_vma(page);
667
668
669
670
671 if (!vma->anon_vma || !page__anon_vma ||
672 vma->anon_vma->root != page__anon_vma->root)
673 return -EFAULT;
674 } else if (page->mapping) {
675 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
676 return -EFAULT;
677 } else
678 return -EFAULT;
679 address = __vma_address(page, vma);
680 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
681 return -EFAULT;
682 return address;
683}
684
685pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
686{
687 pgd_t *pgd;
688 pud_t *pud;
689 pmd_t *pmd = NULL;
690 pmd_t pmde;
691
692 pgd = pgd_offset(mm, address);
693 if (!pgd_present(*pgd))
694 goto out;
695
696 pud = pud_offset(pgd, address);
697 if (!pud_present(*pud))
698 goto out;
699
700 pmd = pmd_offset(pud, address);
701
702
703
704
705
706 pmde = *pmd;
707 barrier();
708 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
709 pmd = NULL;
710out:
711 return pmd;
712}
713
714
715
716
717
718
719
720
721
722
723pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
724 unsigned long address, spinlock_t **ptlp, int sync)
725{
726 pmd_t *pmd;
727 pte_t *pte;
728 spinlock_t *ptl;
729
730 if (unlikely(PageHuge(page))) {
731
732 pte = huge_pte_offset(mm, address);
733 if (!pte)
734 return NULL;
735
736 ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
737 goto check;
738 }
739
740 pmd = mm_find_pmd(mm, address);
741 if (!pmd)
742 return NULL;
743
744 pte = pte_offset_map(pmd, address);
745
746 if (!sync && !pte_present(*pte)) {
747 pte_unmap(pte);
748 return NULL;
749 }
750
751 ptl = pte_lockptr(mm, pmd);
752check:
753 spin_lock(ptl);
754 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
755 *ptlp = ptl;
756 return pte;
757 }
758 pte_unmap_unlock(pte, ptl);
759 return NULL;
760}
761
762
763
764
765
766
767
768
769
770
771int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
772{
773 unsigned long address;
774 pte_t *pte;
775 spinlock_t *ptl;
776
777 address = __vma_address(page, vma);
778 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
779 return 0;
780 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
781 if (!pte)
782 return 0;
783 pte_unmap_unlock(pte, ptl);
784
785 return 1;
786}
787
788#ifdef CONFIG_TRANSPARENT_HUGEPAGE
789
790
791
792
793
794
795
796bool page_check_address_transhuge(struct page *page, struct mm_struct *mm,
797 unsigned long address, pmd_t **pmdp,
798 pte_t **ptep, spinlock_t **ptlp)
799{
800 pgd_t *pgd;
801 pud_t *pud;
802 pmd_t *pmd;
803 pte_t *pte;
804 spinlock_t *ptl;
805
806 if (unlikely(PageHuge(page))) {
807
808 pte = huge_pte_offset(mm, address);
809 if (!pte)
810 return false;
811
812 ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
813 pmd = NULL;
814 goto check_pte;
815 }
816
817 pgd = pgd_offset(mm, address);
818 if (!pgd_present(*pgd))
819 return false;
820 pud = pud_offset(pgd, address);
821 if (!pud_present(*pud))
822 return false;
823 pmd = pmd_offset(pud, address);
824
825 if (pmd_trans_huge(*pmd)) {
826 ptl = pmd_lock(mm, pmd);
827 if (!pmd_present(*pmd))
828 goto unlock_pmd;
829 if (unlikely(!pmd_trans_huge(*pmd))) {
830 spin_unlock(ptl);
831 goto map_pte;
832 }
833
834 if (pmd_page(*pmd) != page)
835 goto unlock_pmd;
836
837 pte = NULL;
838 goto found;
839unlock_pmd:
840 spin_unlock(ptl);
841 return false;
842 } else {
843 pmd_t pmde = *pmd;
844
845 barrier();
846 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
847 return false;
848 }
849map_pte:
850 pte = pte_offset_map(pmd, address);
851 if (!pte_present(*pte)) {
852 pte_unmap(pte);
853 return false;
854 }
855
856 ptl = pte_lockptr(mm, pmd);
857check_pte:
858 spin_lock(ptl);
859
860 if (!pte_present(*pte)) {
861 pte_unmap_unlock(pte, ptl);
862 return false;
863 }
864
865
866 if (pte_pfn(*pte) - page_to_pfn(page) >= hpage_nr_pages(page)) {
867 pte_unmap_unlock(pte, ptl);
868 return false;
869 }
870found:
871 *ptep = pte;
872 *pmdp = pmd;
873 *ptlp = ptl;
874 return true;
875}
876#endif
877
878struct page_referenced_arg {
879 int mapcount;
880 int referenced;
881 unsigned long vm_flags;
882 struct mem_cgroup *memcg;
883};
884
885
886
887static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
888 unsigned long address, void *arg)
889{
890 struct mm_struct *mm = vma->vm_mm;
891 struct page_referenced_arg *pra = arg;
892 pmd_t *pmd;
893 pte_t *pte;
894 spinlock_t *ptl;
895 int referenced = 0;
896
897 if (!page_check_address_transhuge(page, mm, address, &pmd, &pte, &ptl))
898 return SWAP_AGAIN;
899
900 if (vma->vm_flags & VM_LOCKED) {
901 if (pte)
902 pte_unmap(pte);
903 spin_unlock(ptl);
904 pra->vm_flags |= VM_LOCKED;
905 return SWAP_FAIL;
906 }
907
908 if (pte) {
909 if (ptep_clear_flush_young_notify(vma, address, pte)) {
910
911
912
913
914
915
916
917 if (likely(!(vma->vm_flags & VM_SEQ_READ)))
918 referenced++;
919 }
920 pte_unmap(pte);
921 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
922 if (pmdp_clear_flush_young_notify(vma, address, pmd))
923 referenced++;
924 } else {
925
926 WARN_ON_ONCE(1);
927 }
928 spin_unlock(ptl);
929
930 if (referenced)
931 clear_page_idle(page);
932 if (test_and_clear_page_young(page))
933 referenced++;
934
935 if (referenced) {
936 pra->referenced++;
937 pra->vm_flags |= vma->vm_flags;
938 }
939
940 pra->mapcount--;
941 if (!pra->mapcount)
942 return SWAP_SUCCESS;
943
944 return SWAP_AGAIN;
945}
946
947static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
948{
949 struct page_referenced_arg *pra = arg;
950 struct mem_cgroup *memcg = pra->memcg;
951
952 if (!mm_match_cgroup(vma->vm_mm, memcg))
953 return true;
954
955 return false;
956}
957
958
959
960
961
962
963
964
965
966
967
968int page_referenced(struct page *page,
969 int is_locked,
970 struct mem_cgroup *memcg,
971 unsigned long *vm_flags)
972{
973 int ret;
974 int we_locked = 0;
975 struct page_referenced_arg pra = {
976 .mapcount = total_mapcount(page),
977 .memcg = memcg,
978 };
979 struct rmap_walk_control rwc = {
980 .rmap_one = page_referenced_one,
981 .arg = (void *)&pra,
982 .anon_lock = page_lock_anon_vma_read,
983 };
984
985 *vm_flags = 0;
986 if (!page_mapped(page))
987 return 0;
988
989 if (!page_rmapping(page))
990 return 0;
991
992 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
993 we_locked = trylock_page(page);
994 if (!we_locked)
995 return 1;
996 }
997
998
999
1000
1001
1002
1003 if (memcg) {
1004 rwc.invalid_vma = invalid_page_referenced_vma;
1005 }
1006
1007 ret = rmap_walk(page, &rwc);
1008 *vm_flags = pra.vm_flags;
1009
1010 if (we_locked)
1011 unlock_page(page);
1012
1013 return pra.referenced;
1014}
1015
1016static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
1017 unsigned long address, void *arg)
1018{
1019 struct mm_struct *mm = vma->vm_mm;
1020 pte_t *pte;
1021 spinlock_t *ptl;
1022 int ret = 0;
1023 int *cleaned = arg;
1024
1025 pte = page_check_address(page, mm, address, &ptl, 1);
1026 if (!pte)
1027 goto out;
1028
1029 if (pte_dirty(*pte) || pte_write(*pte)) {
1030 pte_t entry;
1031
1032 flush_cache_page(vma, address, pte_pfn(*pte));
1033 entry = ptep_clear_flush(vma, address, pte);
1034 entry = pte_wrprotect(entry);
1035 entry = pte_mkclean(entry);
1036 set_pte_at(mm, address, pte, entry);
1037 ret = 1;
1038 }
1039
1040 pte_unmap_unlock(pte, ptl);
1041
1042 if (ret) {
1043 mmu_notifier_invalidate_page(mm, address);
1044 (*cleaned)++;
1045 }
1046out:
1047 return SWAP_AGAIN;
1048}
1049
1050static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
1051{
1052 if (vma->vm_flags & VM_SHARED)
1053 return false;
1054
1055 return true;
1056}
1057
1058int page_mkclean(struct page *page)
1059{
1060 int cleaned = 0;
1061 struct address_space *mapping;
1062 struct rmap_walk_control rwc = {
1063 .arg = (void *)&cleaned,
1064 .rmap_one = page_mkclean_one,
1065 .invalid_vma = invalid_mkclean_vma,
1066 };
1067
1068 BUG_ON(!PageLocked(page));
1069
1070 if (!page_mapped(page))
1071 return 0;
1072
1073 mapping = page_mapping(page);
1074 if (!mapping)
1075 return 0;
1076
1077 rmap_walk(page, &rwc);
1078
1079 return cleaned;
1080}
1081EXPORT_SYMBOL_GPL(page_mkclean);
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
1094{
1095 struct anon_vma *anon_vma = vma->anon_vma;
1096
1097 page = compound_head(page);
1098
1099 VM_BUG_ON_PAGE(!PageLocked(page), page);
1100 VM_BUG_ON_VMA(!anon_vma, vma);
1101
1102 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1103
1104
1105
1106
1107
1108 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1109}
1110
1111
1112
1113
1114
1115
1116
1117
1118static void __page_set_anon_rmap(struct page *page,
1119 struct vm_area_struct *vma, unsigned long address, int exclusive)
1120{
1121 struct anon_vma *anon_vma = vma->anon_vma;
1122
1123 BUG_ON(!anon_vma);
1124
1125 if (PageAnon(page))
1126 return;
1127
1128
1129
1130
1131
1132
1133 if (!exclusive)
1134 anon_vma = anon_vma->root;
1135
1136 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1137 page->mapping = (struct address_space *) anon_vma;
1138 page->index = linear_page_index(vma, address);
1139}
1140
1141
1142
1143
1144
1145
1146
1147static void __page_check_anon_rmap(struct page *page,
1148 struct vm_area_struct *vma, unsigned long address)
1149{
1150#ifdef CONFIG_DEBUG_VM
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
1164 BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address));
1165#endif
1166}
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180void page_add_anon_rmap(struct page *page,
1181 struct vm_area_struct *vma, unsigned long address, bool compound)
1182{
1183 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
1184}
1185
1186
1187
1188
1189
1190
1191void do_page_add_anon_rmap(struct page *page,
1192 struct vm_area_struct *vma, unsigned long address, int flags)
1193{
1194 bool compound = flags & RMAP_COMPOUND;
1195 bool first;
1196
1197 if (compound) {
1198 atomic_t *mapcount;
1199 VM_BUG_ON_PAGE(!PageLocked(page), page);
1200 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1201 mapcount = compound_mapcount_ptr(page);
1202 first = atomic_inc_and_test(mapcount);
1203 } else {
1204 first = atomic_inc_and_test(&page->_mapcount);
1205 }
1206
1207 if (first) {
1208 int nr = compound ? hpage_nr_pages(page) : 1;
1209
1210
1211
1212
1213
1214
1215 if (compound)
1216 __inc_node_page_state(page, NR_ANON_THPS);
1217 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
1218 }
1219 if (unlikely(PageKsm(page)))
1220 return;
1221
1222 VM_BUG_ON_PAGE(!PageLocked(page), page);
1223
1224
1225 if (first)
1226 __page_set_anon_rmap(page, vma, address,
1227 flags & RMAP_EXCLUSIVE);
1228 else
1229 __page_check_anon_rmap(page, vma, address);
1230}
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243void page_add_new_anon_rmap(struct page *page,
1244 struct vm_area_struct *vma, unsigned long address, bool compound)
1245{
1246 int nr = compound ? hpage_nr_pages(page) : 1;
1247
1248 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1249 __SetPageSwapBacked(page);
1250 if (compound) {
1251 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1252
1253 atomic_set(compound_mapcount_ptr(page), 0);
1254 __inc_node_page_state(page, NR_ANON_THPS);
1255 } else {
1256
1257 VM_BUG_ON_PAGE(PageTransCompound(page), page);
1258
1259 atomic_set(&page->_mapcount, 0);
1260 }
1261 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
1262 __page_set_anon_rmap(page, vma, address, 1);
1263}
1264
1265
1266
1267
1268
1269
1270
1271void page_add_file_rmap(struct page *page, bool compound)
1272{
1273 int i, nr = 1;
1274
1275 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
1276 lock_page_memcg(page);
1277 if (compound && PageTransHuge(page)) {
1278 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1279 if (atomic_inc_and_test(&page[i]._mapcount))
1280 nr++;
1281 }
1282 if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
1283 goto out;
1284 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1285 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
1286 } else {
1287 if (PageTransCompound(page) && page_mapping(page)) {
1288 VM_WARN_ON_ONCE(!PageLocked(page));
1289
1290 SetPageDoubleMap(compound_head(page));
1291 if (PageMlocked(page))
1292 clear_page_mlock(compound_head(page));
1293 }
1294 if (!atomic_inc_and_test(&page->_mapcount))
1295 goto out;
1296 }
1297 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
1298 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
1299out:
1300 unlock_page_memcg(page);
1301}
1302
1303static void page_remove_file_rmap(struct page *page, bool compound)
1304{
1305 int i, nr = 1;
1306
1307 VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1308 lock_page_memcg(page);
1309
1310
1311 if (unlikely(PageHuge(page))) {
1312
1313 atomic_dec(compound_mapcount_ptr(page));
1314 goto out;
1315 }
1316
1317
1318 if (compound && PageTransHuge(page)) {
1319 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1320 if (atomic_add_negative(-1, &page[i]._mapcount))
1321 nr++;
1322 }
1323 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1324 goto out;
1325 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1326 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
1327 } else {
1328 if (!atomic_add_negative(-1, &page->_mapcount))
1329 goto out;
1330 }
1331
1332
1333
1334
1335
1336
1337 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
1338 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
1339
1340 if (unlikely(PageMlocked(page)))
1341 clear_page_mlock(page);
1342out:
1343 unlock_page_memcg(page);
1344}
1345
1346static void page_remove_anon_compound_rmap(struct page *page)
1347{
1348 int i, nr;
1349
1350 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1351 return;
1352
1353
1354 if (unlikely(PageHuge(page)))
1355 return;
1356
1357 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1358 return;
1359
1360 __dec_node_page_state(page, NR_ANON_THPS);
1361
1362 if (TestClearPageDoubleMap(page)) {
1363
1364
1365
1366
1367 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1368 if (atomic_add_negative(-1, &page[i]._mapcount))
1369 nr++;
1370 }
1371 } else {
1372 nr = HPAGE_PMD_NR;
1373 }
1374
1375 if (unlikely(PageMlocked(page)))
1376 clear_page_mlock(page);
1377
1378 if (nr) {
1379 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);
1380 deferred_split_huge_page(page);
1381 }
1382}
1383
1384
1385
1386
1387
1388
1389
1390
1391void page_remove_rmap(struct page *page, bool compound)
1392{
1393 if (!PageAnon(page))
1394 return page_remove_file_rmap(page, compound);
1395
1396 if (compound)
1397 return page_remove_anon_compound_rmap(page);
1398
1399
1400 if (!atomic_add_negative(-1, &page->_mapcount))
1401 return;
1402
1403
1404
1405
1406
1407
1408 __dec_node_page_state(page, NR_ANON_MAPPED);
1409
1410 if (unlikely(PageMlocked(page)))
1411 clear_page_mlock(page);
1412
1413 if (PageTransCompound(page))
1414 deferred_split_huge_page(compound_head(page));
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425}
1426
1427struct rmap_private {
1428 enum ttu_flags flags;
1429 int lazyfreed;
1430};
1431
1432
1433
1434
1435static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1436 unsigned long address, void *arg)
1437{
1438 struct mm_struct *mm = vma->vm_mm;
1439 pte_t *pte;
1440 pte_t pteval;
1441 spinlock_t *ptl;
1442 int ret = SWAP_AGAIN;
1443 struct rmap_private *rp = arg;
1444 enum ttu_flags flags = rp->flags;
1445
1446
1447 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
1448 goto out;
1449
1450 if (flags & TTU_SPLIT_HUGE_PMD) {
1451 split_huge_pmd_address(vma, address,
1452 flags & TTU_MIGRATION, page);
1453
1454 if (page_mapcount(page) == 0)
1455 goto out;
1456 }
1457
1458 pte = page_check_address(page, mm, address, &ptl,
1459 PageTransCompound(page));
1460 if (!pte)
1461 goto out;
1462
1463
1464
1465
1466
1467
1468 if (!(flags & TTU_IGNORE_MLOCK)) {
1469 if (vma->vm_flags & VM_LOCKED) {
1470
1471 if (!PageTransCompound(page)) {
1472
1473
1474
1475
1476 mlock_vma_page(page);
1477 }
1478 ret = SWAP_MLOCK;
1479 goto out_unmap;
1480 }
1481 if (flags & TTU_MUNLOCK)
1482 goto out_unmap;
1483 }
1484 if (!(flags & TTU_IGNORE_ACCESS)) {
1485 if (ptep_clear_flush_young_notify(vma, address, pte)) {
1486 ret = SWAP_FAIL;
1487 goto out_unmap;
1488 }
1489 }
1490
1491
1492 flush_cache_page(vma, address, page_to_pfn(page));
1493 if (should_defer_flush(mm, flags)) {
1494
1495
1496
1497
1498
1499
1500
1501 pteval = ptep_get_and_clear(mm, address, pte);
1502
1503 set_tlb_ubc_flush_pending(mm, page, pte_dirty(pteval));
1504 } else {
1505 pteval = ptep_clear_flush(vma, address, pte);
1506 }
1507
1508
1509 if (pte_dirty(pteval))
1510 set_page_dirty(page);
1511
1512
1513 update_hiwater_rss(mm);
1514
1515 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1516 if (PageHuge(page)) {
1517 hugetlb_count_sub(1 << compound_order(page), mm);
1518 } else {
1519 dec_mm_counter(mm, mm_counter(page));
1520 }
1521 set_pte_at(mm, address, pte,
1522 swp_entry_to_pte(make_hwpoison_entry(page)));
1523 } else if (pte_unused(pteval)) {
1524
1525
1526
1527
1528
1529 dec_mm_counter(mm, mm_counter(page));
1530 } else if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION)) {
1531 swp_entry_t entry;
1532 pte_t swp_pte;
1533
1534
1535
1536
1537
1538 entry = make_migration_entry(page, pte_write(pteval));
1539 swp_pte = swp_entry_to_pte(entry);
1540 if (pte_soft_dirty(pteval))
1541 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1542 set_pte_at(mm, address, pte, swp_pte);
1543 } else if (PageAnon(page)) {
1544 swp_entry_t entry = { .val = page_private(page) };
1545 pte_t swp_pte;
1546
1547
1548
1549
1550 VM_BUG_ON_PAGE(!PageSwapCache(page), page);
1551
1552 if (!PageDirty(page) && (flags & TTU_LZFREE)) {
1553
1554 dec_mm_counter(mm, MM_ANONPAGES);
1555 rp->lazyfreed++;
1556 goto discard;
1557 }
1558
1559 if (swap_duplicate(entry) < 0) {
1560 set_pte_at(mm, address, pte, pteval);
1561 ret = SWAP_FAIL;
1562 goto out_unmap;
1563 }
1564 if (list_empty(&mm->mmlist)) {
1565 spin_lock(&mmlist_lock);
1566 if (list_empty(&mm->mmlist))
1567 list_add(&mm->mmlist, &init_mm.mmlist);
1568 spin_unlock(&mmlist_lock);
1569 }
1570 dec_mm_counter(mm, MM_ANONPAGES);
1571 inc_mm_counter(mm, MM_SWAPENTS);
1572 swp_pte = swp_entry_to_pte(entry);
1573 if (pte_soft_dirty(pteval))
1574 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1575 set_pte_at(mm, address, pte, swp_pte);
1576 } else
1577 dec_mm_counter(mm, mm_counter_file(page));
1578
1579discard:
1580 page_remove_rmap(page, PageHuge(page));
1581 put_page(page);
1582
1583out_unmap:
1584 pte_unmap_unlock(pte, ptl);
1585 if (ret != SWAP_FAIL && ret != SWAP_MLOCK && !(flags & TTU_MUNLOCK))
1586 mmu_notifier_invalidate_page(mm, address);
1587out:
1588 return ret;
1589}
1590
1591bool is_vma_temporary_stack(struct vm_area_struct *vma)
1592{
1593 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1594
1595 if (!maybe_stack)
1596 return false;
1597
1598 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1599 VM_STACK_INCOMPLETE_SETUP)
1600 return true;
1601
1602 return false;
1603}
1604
1605static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1606{
1607 return is_vma_temporary_stack(vma);
1608}
1609
1610static int page_mapcount_is_zero(struct page *page)
1611{
1612 return !page_mapcount(page);
1613}
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629int try_to_unmap(struct page *page, enum ttu_flags flags)
1630{
1631 int ret;
1632 struct rmap_private rp = {
1633 .flags = flags,
1634 .lazyfreed = 0,
1635 };
1636
1637 struct rmap_walk_control rwc = {
1638 .rmap_one = try_to_unmap_one,
1639 .arg = &rp,
1640 .done = page_mapcount_is_zero,
1641 .anon_lock = page_lock_anon_vma_read,
1642 };
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652 if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
1653 rwc.invalid_vma = invalid_migration_vma;
1654
1655 if (flags & TTU_RMAP_LOCKED)
1656 ret = rmap_walk_locked(page, &rwc);
1657 else
1658 ret = rmap_walk(page, &rwc);
1659
1660 if (ret != SWAP_MLOCK && !page_mapcount(page)) {
1661 ret = SWAP_SUCCESS;
1662 if (rp.lazyfreed && !PageDirty(page))
1663 ret = SWAP_LZFREE;
1664 }
1665 return ret;
1666}
1667
1668static int page_not_mapped(struct page *page)
1669{
1670 return !page_mapped(page);
1671};
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688int try_to_munlock(struct page *page)
1689{
1690 int ret;
1691 struct rmap_private rp = {
1692 .flags = TTU_MUNLOCK,
1693 .lazyfreed = 0,
1694 };
1695
1696 struct rmap_walk_control rwc = {
1697 .rmap_one = try_to_unmap_one,
1698 .arg = &rp,
1699 .done = page_not_mapped,
1700 .anon_lock = page_lock_anon_vma_read,
1701
1702 };
1703
1704 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1705
1706 ret = rmap_walk(page, &rwc);
1707 return ret;
1708}
1709
1710void __put_anon_vma(struct anon_vma *anon_vma)
1711{
1712 struct anon_vma *root = anon_vma->root;
1713
1714 anon_vma_free(anon_vma);
1715 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1716 anon_vma_free(root);
1717}
1718
1719static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1720 struct rmap_walk_control *rwc)
1721{
1722 struct anon_vma *anon_vma;
1723
1724 if (rwc->anon_lock)
1725 return rwc->anon_lock(page);
1726
1727
1728
1729
1730
1731
1732
1733 anon_vma = page_anon_vma(page);
1734 if (!anon_vma)
1735 return NULL;
1736
1737 anon_vma_lock_read(anon_vma);
1738 return anon_vma;
1739}
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
1756 bool locked)
1757{
1758 struct anon_vma *anon_vma;
1759 pgoff_t pgoff;
1760 struct anon_vma_chain *avc;
1761 int ret = SWAP_AGAIN;
1762
1763 if (locked) {
1764 anon_vma = page_anon_vma(page);
1765
1766 VM_BUG_ON_PAGE(!anon_vma, page);
1767 } else {
1768 anon_vma = rmap_walk_anon_lock(page, rwc);
1769 }
1770 if (!anon_vma)
1771 return ret;
1772
1773 pgoff = page_to_pgoff(page);
1774 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1775 struct vm_area_struct *vma = avc->vma;
1776 unsigned long address = vma_address(page, vma);
1777
1778 cond_resched();
1779
1780 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1781 continue;
1782
1783 ret = rwc->rmap_one(page, vma, address, rwc->arg);
1784 if (ret != SWAP_AGAIN)
1785 break;
1786 if (rwc->done && rwc->done(page))
1787 break;
1788 }
1789
1790 if (!locked)
1791 anon_vma_unlock_read(anon_vma);
1792 return ret;
1793}
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
1809 bool locked)
1810{
1811 struct address_space *mapping = page_mapping(page);
1812 pgoff_t pgoff;
1813 struct vm_area_struct *vma;
1814 int ret = SWAP_AGAIN;
1815
1816
1817
1818
1819
1820
1821
1822 VM_BUG_ON_PAGE(!PageLocked(page), page);
1823
1824 if (!mapping)
1825 return ret;
1826
1827 pgoff = page_to_pgoff(page);
1828 if (!locked)
1829 i_mmap_lock_read(mapping);
1830 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1831 unsigned long address = vma_address(page, vma);
1832
1833 cond_resched();
1834
1835 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1836 continue;
1837
1838 ret = rwc->rmap_one(page, vma, address, rwc->arg);
1839 if (ret != SWAP_AGAIN)
1840 goto done;
1841 if (rwc->done && rwc->done(page))
1842 goto done;
1843 }
1844
1845done:
1846 if (!locked)
1847 i_mmap_unlock_read(mapping);
1848 return ret;
1849}
1850
1851int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1852{
1853 if (unlikely(PageKsm(page)))
1854 return rmap_walk_ksm(page, rwc);
1855 else if (PageAnon(page))
1856 return rmap_walk_anon(page, rwc, false);
1857 else
1858 return rmap_walk_file(page, rwc, false);
1859}
1860
1861
1862int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
1863{
1864
1865 VM_BUG_ON_PAGE(PageKsm(page), page);
1866 if (PageAnon(page))
1867 return rmap_walk_anon(page, rwc, true);
1868 else
1869 return rmap_walk_file(page, rwc, true);
1870}
1871
1872#ifdef CONFIG_HUGETLB_PAGE
1873
1874
1875
1876
1877
1878static void __hugepage_set_anon_rmap(struct page *page,
1879 struct vm_area_struct *vma, unsigned long address, int exclusive)
1880{
1881 struct anon_vma *anon_vma = vma->anon_vma;
1882
1883 BUG_ON(!anon_vma);
1884
1885 if (PageAnon(page))
1886 return;
1887 if (!exclusive)
1888 anon_vma = anon_vma->root;
1889
1890 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1891 page->mapping = (struct address_space *) anon_vma;
1892 page->index = linear_page_index(vma, address);
1893}
1894
1895void hugepage_add_anon_rmap(struct page *page,
1896 struct vm_area_struct *vma, unsigned long address)
1897{
1898 struct anon_vma *anon_vma = vma->anon_vma;
1899 int first;
1900
1901 BUG_ON(!PageLocked(page));
1902 BUG_ON(!anon_vma);
1903
1904 first = atomic_inc_and_test(compound_mapcount_ptr(page));
1905 if (first)
1906 __hugepage_set_anon_rmap(page, vma, address, 0);
1907}
1908
1909void hugepage_add_new_anon_rmap(struct page *page,
1910 struct vm_area_struct *vma, unsigned long address)
1911{
1912 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1913 atomic_set(compound_mapcount_ptr(page), 0);
1914 __hugepage_set_anon_rmap(page, vma, address, 1);
1915}
1916#endif
1917