1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/mm.h>
11#include <linux/sched.h>
12#include <linux/sched/coredump.h>
13#include <linux/sched/numa_balancing.h>
14#include <linux/highmem.h>
15#include <linux/hugetlb.h>
16#include <linux/mmu_notifier.h>
17#include <linux/rmap.h>
18#include <linux/swap.h>
19#include <linux/shrinker.h>
20#include <linux/mm_inline.h>
21#include <linux/swapops.h>
22#include <linux/dax.h>
23#include <linux/khugepaged.h>
24#include <linux/freezer.h>
25#include <linux/pfn_t.h>
26#include <linux/mman.h>
27#include <linux/memremap.h>
28#include <linux/pagemap.h>
29#include <linux/debugfs.h>
30#include <linux/migrate.h>
31#include <linux/hashtable.h>
32#include <linux/userfaultfd_k.h>
33#include <linux/page_idle.h>
34#include <linux/shmem_fs.h>
35#include <linux/oom.h>
36
37#include <asm/tlb.h>
38#include <asm/pgalloc.h>
39#include "internal.h"
40
41
42
43
44
45
46
47
48
49unsigned long transparent_hugepage_flags __read_mostly =
50#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
51 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
52#endif
53#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
54 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
55#endif
56 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
57 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
58 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
59
60static struct shrinker deferred_split_shrinker;
61
62static atomic_t huge_zero_refcount;
63struct page *huge_zero_page __read_mostly;
64
65static struct page *get_huge_zero_page(void)
66{
67 struct page *zero_page;
68retry:
69 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
70 return READ_ONCE(huge_zero_page);
71
72 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
73 HPAGE_PMD_ORDER);
74 if (!zero_page) {
75 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
76 return NULL;
77 }
78 count_vm_event(THP_ZERO_PAGE_ALLOC);
79 preempt_disable();
80 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
81 preempt_enable();
82 __free_pages(zero_page, compound_order(zero_page));
83 goto retry;
84 }
85
86
87 atomic_set(&huge_zero_refcount, 2);
88 preempt_enable();
89 return READ_ONCE(huge_zero_page);
90}
91
92static void put_huge_zero_page(void)
93{
94
95
96
97
98 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
99}
100
101struct page *mm_get_huge_zero_page(struct mm_struct *mm)
102{
103 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
104 return READ_ONCE(huge_zero_page);
105
106 if (!get_huge_zero_page())
107 return NULL;
108
109 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
110 put_huge_zero_page();
111
112 return READ_ONCE(huge_zero_page);
113}
114
115void mm_put_huge_zero_page(struct mm_struct *mm)
116{
117 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
118 put_huge_zero_page();
119}
120
121static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
122 struct shrink_control *sc)
123{
124
125 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
126}
127
128static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
129 struct shrink_control *sc)
130{
131 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
132 struct page *zero_page = xchg(&huge_zero_page, NULL);
133 BUG_ON(zero_page == NULL);
134 __free_pages(zero_page, compound_order(zero_page));
135 return HPAGE_PMD_NR;
136 }
137
138 return 0;
139}
140
141static struct shrinker huge_zero_page_shrinker = {
142 .count_objects = shrink_huge_zero_page_count,
143 .scan_objects = shrink_huge_zero_page_scan,
144 .seeks = DEFAULT_SEEKS,
145};
146
147#ifdef CONFIG_SYSFS
148static ssize_t enabled_show(struct kobject *kobj,
149 struct kobj_attribute *attr, char *buf)
150{
151 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
152 return sprintf(buf, "[always] madvise never\n");
153 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags))
154 return sprintf(buf, "always [madvise] never\n");
155 else
156 return sprintf(buf, "always madvise [never]\n");
157}
158
159static ssize_t enabled_store(struct kobject *kobj,
160 struct kobj_attribute *attr,
161 const char *buf, size_t count)
162{
163 ssize_t ret = count;
164
165 if (!memcmp("always", buf,
166 min(sizeof("always")-1, count))) {
167 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
168 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
169 } else if (!memcmp("madvise", buf,
170 min(sizeof("madvise")-1, count))) {
171 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
172 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
173 } else if (!memcmp("never", buf,
174 min(sizeof("never")-1, count))) {
175 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
176 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
177 } else
178 ret = -EINVAL;
179
180 if (ret > 0) {
181 int err = start_stop_khugepaged();
182 if (err)
183 ret = err;
184 }
185 return ret;
186}
187static struct kobj_attribute enabled_attr =
188 __ATTR(enabled, 0644, enabled_show, enabled_store);
189
190ssize_t single_hugepage_flag_show(struct kobject *kobj,
191 struct kobj_attribute *attr, char *buf,
192 enum transparent_hugepage_flag flag)
193{
194 return sprintf(buf, "%d\n",
195 !!test_bit(flag, &transparent_hugepage_flags));
196}
197
198ssize_t single_hugepage_flag_store(struct kobject *kobj,
199 struct kobj_attribute *attr,
200 const char *buf, size_t count,
201 enum transparent_hugepage_flag flag)
202{
203 unsigned long value;
204 int ret;
205
206 ret = kstrtoul(buf, 10, &value);
207 if (ret < 0)
208 return ret;
209 if (value > 1)
210 return -EINVAL;
211
212 if (value)
213 set_bit(flag, &transparent_hugepage_flags);
214 else
215 clear_bit(flag, &transparent_hugepage_flags);
216
217 return count;
218}
219
220static ssize_t defrag_show(struct kobject *kobj,
221 struct kobj_attribute *attr, char *buf)
222{
223 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
224 return sprintf(buf, "[always] defer defer+madvise madvise never\n");
225 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
226 return sprintf(buf, "always [defer] defer+madvise madvise never\n");
227 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
228 return sprintf(buf, "always defer [defer+madvise] madvise never\n");
229 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
230 return sprintf(buf, "always defer defer+madvise [madvise] never\n");
231 return sprintf(buf, "always defer defer+madvise madvise [never]\n");
232}
233
234static ssize_t defrag_store(struct kobject *kobj,
235 struct kobj_attribute *attr,
236 const char *buf, size_t count)
237{
238 if (!memcmp("always", buf,
239 min(sizeof("always")-1, count))) {
240 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
241 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
242 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
243 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
244 } else if (!memcmp("defer+madvise", buf,
245 min(sizeof("defer+madvise")-1, count))) {
246 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
247 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
248 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
249 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
250 } else if (!memcmp("defer", buf,
251 min(sizeof("defer")-1, count))) {
252 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
253 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
254 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
255 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
256 } else if (!memcmp("madvise", buf,
257 min(sizeof("madvise")-1, count))) {
258 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
259 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
260 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
261 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
262 } else if (!memcmp("never", buf,
263 min(sizeof("never")-1, count))) {
264 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
265 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
266 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
267 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
268 } else
269 return -EINVAL;
270
271 return count;
272}
273static struct kobj_attribute defrag_attr =
274 __ATTR(defrag, 0644, defrag_show, defrag_store);
275
276static ssize_t use_zero_page_show(struct kobject *kobj,
277 struct kobj_attribute *attr, char *buf)
278{
279 return single_hugepage_flag_show(kobj, attr, buf,
280 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
281}
282static ssize_t use_zero_page_store(struct kobject *kobj,
283 struct kobj_attribute *attr, const char *buf, size_t count)
284{
285 return single_hugepage_flag_store(kobj, attr, buf, count,
286 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
287}
288static struct kobj_attribute use_zero_page_attr =
289 __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
290
291static ssize_t hpage_pmd_size_show(struct kobject *kobj,
292 struct kobj_attribute *attr, char *buf)
293{
294 return sprintf(buf, "%lu\n", HPAGE_PMD_SIZE);
295}
296static struct kobj_attribute hpage_pmd_size_attr =
297 __ATTR_RO(hpage_pmd_size);
298
299#ifdef CONFIG_DEBUG_VM
300static ssize_t debug_cow_show(struct kobject *kobj,
301 struct kobj_attribute *attr, char *buf)
302{
303 return single_hugepage_flag_show(kobj, attr, buf,
304 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
305}
306static ssize_t debug_cow_store(struct kobject *kobj,
307 struct kobj_attribute *attr,
308 const char *buf, size_t count)
309{
310 return single_hugepage_flag_store(kobj, attr, buf, count,
311 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
312}
313static struct kobj_attribute debug_cow_attr =
314 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
315#endif
316
317static struct attribute *hugepage_attr[] = {
318 &enabled_attr.attr,
319 &defrag_attr.attr,
320 &use_zero_page_attr.attr,
321 &hpage_pmd_size_attr.attr,
322#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
323 &shmem_enabled_attr.attr,
324#endif
325#ifdef CONFIG_DEBUG_VM
326 &debug_cow_attr.attr,
327#endif
328 NULL,
329};
330
331static const struct attribute_group hugepage_attr_group = {
332 .attrs = hugepage_attr,
333};
334
335static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
336{
337 int err;
338
339 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
340 if (unlikely(!*hugepage_kobj)) {
341 pr_err("failed to create transparent hugepage kobject\n");
342 return -ENOMEM;
343 }
344
345 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
346 if (err) {
347 pr_err("failed to register transparent hugepage group\n");
348 goto delete_obj;
349 }
350
351 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
352 if (err) {
353 pr_err("failed to register transparent hugepage group\n");
354 goto remove_hp_group;
355 }
356
357 return 0;
358
359remove_hp_group:
360 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
361delete_obj:
362 kobject_put(*hugepage_kobj);
363 return err;
364}
365
366static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
367{
368 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
369 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
370 kobject_put(hugepage_kobj);
371}
372#else
373static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
374{
375 return 0;
376}
377
378static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
379{
380}
381#endif
382
383static int __init hugepage_init(void)
384{
385 int err;
386 struct kobject *hugepage_kobj;
387
388 if (!has_transparent_hugepage()) {
389 transparent_hugepage_flags = 0;
390 return -EINVAL;
391 }
392
393
394
395
396 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER);
397
398
399
400
401 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
402
403 err = hugepage_init_sysfs(&hugepage_kobj);
404 if (err)
405 goto err_sysfs;
406
407 err = khugepaged_init();
408 if (err)
409 goto err_slab;
410
411 err = register_shrinker(&huge_zero_page_shrinker);
412 if (err)
413 goto err_hzp_shrinker;
414 err = register_shrinker(&deferred_split_shrinker);
415 if (err)
416 goto err_split_shrinker;
417
418
419
420
421
422
423 if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
424 transparent_hugepage_flags = 0;
425 return 0;
426 }
427
428 err = start_stop_khugepaged();
429 if (err)
430 goto err_khugepaged;
431
432 return 0;
433err_khugepaged:
434 unregister_shrinker(&deferred_split_shrinker);
435err_split_shrinker:
436 unregister_shrinker(&huge_zero_page_shrinker);
437err_hzp_shrinker:
438 khugepaged_destroy();
439err_slab:
440 hugepage_exit_sysfs(hugepage_kobj);
441err_sysfs:
442 return err;
443}
444subsys_initcall(hugepage_init);
445
446static int __init setup_transparent_hugepage(char *str)
447{
448 int ret = 0;
449 if (!str)
450 goto out;
451 if (!strcmp(str, "always")) {
452 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
453 &transparent_hugepage_flags);
454 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
455 &transparent_hugepage_flags);
456 ret = 1;
457 } else if (!strcmp(str, "madvise")) {
458 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
459 &transparent_hugepage_flags);
460 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
461 &transparent_hugepage_flags);
462 ret = 1;
463 } else if (!strcmp(str, "never")) {
464 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
465 &transparent_hugepage_flags);
466 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
467 &transparent_hugepage_flags);
468 ret = 1;
469 }
470out:
471 if (!ret)
472 pr_warn("transparent_hugepage= cannot parse, ignored\n");
473 return ret;
474}
475__setup("transparent_hugepage=", setup_transparent_hugepage);
476
477pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
478{
479 if (likely(vma->vm_flags & VM_WRITE))
480 pmd = pmd_mkwrite(pmd);
481 return pmd;
482}
483
484static inline struct list_head *page_deferred_list(struct page *page)
485{
486
487
488
489
490 return (struct list_head *)&page[2].mapping;
491}
492
493void prep_transhuge_page(struct page *page)
494{
495
496
497
498
499
500 INIT_LIST_HEAD(page_deferred_list(page));
501 set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
502}
503
504unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long len,
505 loff_t off, unsigned long flags, unsigned long size)
506{
507 unsigned long addr;
508 loff_t off_end = off + len;
509 loff_t off_align = round_up(off, size);
510 unsigned long len_pad;
511
512 if (off_end <= off_align || (off_end - off_align) < size)
513 return 0;
514
515 len_pad = len + size;
516 if (len_pad < len || (off + len_pad) < off)
517 return 0;
518
519 addr = current->mm->get_unmapped_area(filp, 0, len_pad,
520 off >> PAGE_SHIFT, flags);
521 if (IS_ERR_VALUE(addr))
522 return 0;
523
524 addr += (off - addr) & (size - 1);
525 return addr;
526}
527
528unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
529 unsigned long len, unsigned long pgoff, unsigned long flags)
530{
531 loff_t off = (loff_t)pgoff << PAGE_SHIFT;
532
533 if (addr)
534 goto out;
535 if (!IS_DAX(filp->f_mapping->host) || !IS_ENABLED(CONFIG_FS_DAX_PMD))
536 goto out;
537
538 addr = __thp_get_unmapped_area(filp, len, off, flags, PMD_SIZE);
539 if (addr)
540 return addr;
541
542 out:
543 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
544}
545EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
546
547static int __do_huge_pmd_anonymous_page(struct vm_fault *vmf, struct page *page,
548 gfp_t gfp)
549{
550 struct vm_area_struct *vma = vmf->vma;
551 struct mem_cgroup *memcg;
552 pgtable_t pgtable;
553 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
554 int ret = 0;
555
556 VM_BUG_ON_PAGE(!PageCompound(page), page);
557
558 if (mem_cgroup_try_charge(page, vma->vm_mm, gfp | __GFP_NORETRY, &memcg,
559 true)) {
560 put_page(page);
561 count_vm_event(THP_FAULT_FALLBACK);
562 return VM_FAULT_FALLBACK;
563 }
564
565 pgtable = pte_alloc_one(vma->vm_mm, haddr);
566 if (unlikely(!pgtable)) {
567 ret = VM_FAULT_OOM;
568 goto release;
569 }
570
571 clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
572
573
574
575
576
577 __SetPageUptodate(page);
578
579 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
580 if (unlikely(!pmd_none(*vmf->pmd))) {
581 goto unlock_release;
582 } else {
583 pmd_t entry;
584
585 ret = check_stable_address_space(vma->vm_mm);
586 if (ret)
587 goto unlock_release;
588
589
590 if (userfaultfd_missing(vma)) {
591 int ret;
592
593 spin_unlock(vmf->ptl);
594 mem_cgroup_cancel_charge(page, memcg, true);
595 put_page(page);
596 pte_free(vma->vm_mm, pgtable);
597 ret = handle_userfault(vmf, VM_UFFD_MISSING);
598 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
599 return ret;
600 }
601
602 entry = mk_huge_pmd(page, vma->vm_page_prot);
603 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
604 page_add_new_anon_rmap(page, vma, haddr, true);
605 mem_cgroup_commit_charge(page, memcg, false, true);
606 lru_cache_add_active_or_unevictable(page, vma);
607 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
608 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
609 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
610 mm_inc_nr_ptes(vma->vm_mm);
611 spin_unlock(vmf->ptl);
612 count_vm_event(THP_FAULT_ALLOC);
613 }
614
615 return 0;
616unlock_release:
617 spin_unlock(vmf->ptl);
618release:
619 if (pgtable)
620 pte_free(vma->vm_mm, pgtable);
621 mem_cgroup_cancel_charge(page, memcg, true);
622 put_page(page);
623 return ret;
624
625}
626
627
628
629
630
631
632
633
634
635
636static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
637{
638 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
639
640 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
641 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
642 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
643 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
644 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
645 return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
646 __GFP_KSWAPD_RECLAIM);
647 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
648 return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
649 0);
650 return GFP_TRANSHUGE_LIGHT;
651}
652
653
654static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
655 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
656 struct page *zero_page)
657{
658 pmd_t entry;
659 if (!pmd_none(*pmd))
660 return false;
661 entry = mk_pmd(zero_page, vma->vm_page_prot);
662 entry = pmd_mkhuge(entry);
663 if (pgtable)
664 pgtable_trans_huge_deposit(mm, pmd, pgtable);
665 set_pmd_at(mm, haddr, pmd, entry);
666 mm_inc_nr_ptes(mm);
667 return true;
668}
669
670int do_huge_pmd_anonymous_page(struct vm_fault *vmf)
671{
672 struct vm_area_struct *vma = vmf->vma;
673 gfp_t gfp;
674 struct page *page;
675 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
676
677 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
678 return VM_FAULT_FALLBACK;
679 if (unlikely(anon_vma_prepare(vma)))
680 return VM_FAULT_OOM;
681 if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
682 return VM_FAULT_OOM;
683 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
684 !mm_forbids_zeropage(vma->vm_mm) &&
685 transparent_hugepage_use_zero_page()) {
686 pgtable_t pgtable;
687 struct page *zero_page;
688 bool set;
689 int ret;
690 pgtable = pte_alloc_one(vma->vm_mm, haddr);
691 if (unlikely(!pgtable))
692 return VM_FAULT_OOM;
693 zero_page = mm_get_huge_zero_page(vma->vm_mm);
694 if (unlikely(!zero_page)) {
695 pte_free(vma->vm_mm, pgtable);
696 count_vm_event(THP_FAULT_FALLBACK);
697 return VM_FAULT_FALLBACK;
698 }
699 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
700 ret = 0;
701 set = false;
702 if (pmd_none(*vmf->pmd)) {
703 ret = check_stable_address_space(vma->vm_mm);
704 if (ret) {
705 spin_unlock(vmf->ptl);
706 } else if (userfaultfd_missing(vma)) {
707 spin_unlock(vmf->ptl);
708 ret = handle_userfault(vmf, VM_UFFD_MISSING);
709 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
710 } else {
711 set_huge_zero_page(pgtable, vma->vm_mm, vma,
712 haddr, vmf->pmd, zero_page);
713 spin_unlock(vmf->ptl);
714 set = true;
715 }
716 } else
717 spin_unlock(vmf->ptl);
718 if (!set)
719 pte_free(vma->vm_mm, pgtable);
720 return ret;
721 }
722 gfp = alloc_hugepage_direct_gfpmask(vma);
723 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
724 if (unlikely(!page)) {
725 count_vm_event(THP_FAULT_FALLBACK);
726 return VM_FAULT_FALLBACK;
727 }
728 prep_transhuge_page(page);
729 return __do_huge_pmd_anonymous_page(vmf, page, gfp);
730}
731
732static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
733 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
734 pgtable_t pgtable)
735{
736 struct mm_struct *mm = vma->vm_mm;
737 pmd_t entry;
738 spinlock_t *ptl;
739
740 ptl = pmd_lock(mm, pmd);
741 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
742 if (pfn_t_devmap(pfn))
743 entry = pmd_mkdevmap(entry);
744 if (write) {
745 entry = pmd_mkyoung(pmd_mkdirty(entry));
746 entry = maybe_pmd_mkwrite(entry, vma);
747 }
748
749 if (pgtable) {
750 pgtable_trans_huge_deposit(mm, pmd, pgtable);
751 mm_inc_nr_ptes(mm);
752 }
753
754 set_pmd_at(mm, addr, pmd, entry);
755 update_mmu_cache_pmd(vma, addr, pmd);
756 spin_unlock(ptl);
757}
758
759int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
760 pmd_t *pmd, pfn_t pfn, bool write)
761{
762 pgprot_t pgprot = vma->vm_page_prot;
763 pgtable_t pgtable = NULL;
764
765
766
767
768
769 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
770 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
771 (VM_PFNMAP|VM_MIXEDMAP));
772 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
773 BUG_ON(!pfn_t_devmap(pfn));
774
775 if (addr < vma->vm_start || addr >= vma->vm_end)
776 return VM_FAULT_SIGBUS;
777
778 if (arch_needs_pgtable_deposit()) {
779 pgtable = pte_alloc_one(vma->vm_mm, addr);
780 if (!pgtable)
781 return VM_FAULT_OOM;
782 }
783
784 track_pfn_insert(vma, &pgprot, pfn);
785
786 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable);
787 return VM_FAULT_NOPAGE;
788}
789EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
790
791#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
792static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
793{
794 if (likely(vma->vm_flags & VM_WRITE))
795 pud = pud_mkwrite(pud);
796 return pud;
797}
798
799static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
800 pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
801{
802 struct mm_struct *mm = vma->vm_mm;
803 pud_t entry;
804 spinlock_t *ptl;
805
806 ptl = pud_lock(mm, pud);
807 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
808 if (pfn_t_devmap(pfn))
809 entry = pud_mkdevmap(entry);
810 if (write) {
811 entry = pud_mkyoung(pud_mkdirty(entry));
812 entry = maybe_pud_mkwrite(entry, vma);
813 }
814 set_pud_at(mm, addr, pud, entry);
815 update_mmu_cache_pud(vma, addr, pud);
816 spin_unlock(ptl);
817}
818
819int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
820 pud_t *pud, pfn_t pfn, bool write)
821{
822 pgprot_t pgprot = vma->vm_page_prot;
823
824
825
826
827
828 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
829 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
830 (VM_PFNMAP|VM_MIXEDMAP));
831 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
832 BUG_ON(!pfn_t_devmap(pfn));
833
834 if (addr < vma->vm_start || addr >= vma->vm_end)
835 return VM_FAULT_SIGBUS;
836
837 track_pfn_insert(vma, &pgprot, pfn);
838
839 insert_pfn_pud(vma, addr, pud, pfn, pgprot, write);
840 return VM_FAULT_NOPAGE;
841}
842EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
843#endif
844
845static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
846 pmd_t *pmd, int flags)
847{
848 pmd_t _pmd;
849
850 _pmd = pmd_mkyoung(*pmd);
851 if (flags & FOLL_WRITE)
852 _pmd = pmd_mkdirty(_pmd);
853 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
854 pmd, _pmd, flags & FOLL_WRITE))
855 update_mmu_cache_pmd(vma, addr, pmd);
856}
857
858struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
859 pmd_t *pmd, int flags)
860{
861 unsigned long pfn = pmd_pfn(*pmd);
862 struct mm_struct *mm = vma->vm_mm;
863 struct dev_pagemap *pgmap;
864 struct page *page;
865
866 assert_spin_locked(pmd_lockptr(mm, pmd));
867
868
869
870
871
872 WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
873
874 if (flags & FOLL_WRITE && !pmd_write(*pmd))
875 return NULL;
876
877 if (pmd_present(*pmd) && pmd_devmap(*pmd))
878 ;
879 else
880 return NULL;
881
882 if (flags & FOLL_TOUCH)
883 touch_pmd(vma, addr, pmd, flags);
884
885
886
887
888
889 if (!(flags & FOLL_GET))
890 return ERR_PTR(-EEXIST);
891
892 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
893 pgmap = get_dev_pagemap(pfn, NULL);
894 if (!pgmap)
895 return ERR_PTR(-EFAULT);
896 page = pfn_to_page(pfn);
897 get_page(page);
898 put_dev_pagemap(pgmap);
899
900 return page;
901}
902
903int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
904 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
905 struct vm_area_struct *vma)
906{
907 spinlock_t *dst_ptl, *src_ptl;
908 struct page *src_page;
909 pmd_t pmd;
910 pgtable_t pgtable = NULL;
911 int ret = -ENOMEM;
912
913
914 if (!vma_is_anonymous(vma))
915 return 0;
916
917 pgtable = pte_alloc_one(dst_mm, addr);
918 if (unlikely(!pgtable))
919 goto out;
920
921 dst_ptl = pmd_lock(dst_mm, dst_pmd);
922 src_ptl = pmd_lockptr(src_mm, src_pmd);
923 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
924
925 ret = -EAGAIN;
926 pmd = *src_pmd;
927
928#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
929 if (unlikely(is_swap_pmd(pmd))) {
930 swp_entry_t entry = pmd_to_swp_entry(pmd);
931
932 VM_BUG_ON(!is_pmd_migration_entry(pmd));
933 if (is_write_migration_entry(entry)) {
934 make_migration_entry_read(&entry);
935 pmd = swp_entry_to_pmd(entry);
936 if (pmd_swp_soft_dirty(*src_pmd))
937 pmd = pmd_swp_mksoft_dirty(pmd);
938 set_pmd_at(src_mm, addr, src_pmd, pmd);
939 }
940 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
941 mm_inc_nr_ptes(dst_mm);
942 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
943 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
944 ret = 0;
945 goto out_unlock;
946 }
947#endif
948
949 if (unlikely(!pmd_trans_huge(pmd))) {
950 pte_free(dst_mm, pgtable);
951 goto out_unlock;
952 }
953
954
955
956
957
958 if (is_huge_zero_pmd(pmd)) {
959 struct page *zero_page;
960
961
962
963
964
965 zero_page = mm_get_huge_zero_page(dst_mm);
966 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
967 zero_page);
968 ret = 0;
969 goto out_unlock;
970 }
971
972 src_page = pmd_page(pmd);
973 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
974 get_page(src_page);
975 page_dup_rmap(src_page, true);
976 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
977 mm_inc_nr_ptes(dst_mm);
978 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
979
980 pmdp_set_wrprotect(src_mm, addr, src_pmd);
981 pmd = pmd_mkold(pmd_wrprotect(pmd));
982 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
983
984 ret = 0;
985out_unlock:
986 spin_unlock(src_ptl);
987 spin_unlock(dst_ptl);
988out:
989 return ret;
990}
991
992#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
993static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
994 pud_t *pud, int flags)
995{
996 pud_t _pud;
997
998 _pud = pud_mkyoung(*pud);
999 if (flags & FOLL_WRITE)
1000 _pud = pud_mkdirty(_pud);
1001 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1002 pud, _pud, flags & FOLL_WRITE))
1003 update_mmu_cache_pud(vma, addr, pud);
1004}
1005
1006struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1007 pud_t *pud, int flags)
1008{
1009 unsigned long pfn = pud_pfn(*pud);
1010 struct mm_struct *mm = vma->vm_mm;
1011 struct dev_pagemap *pgmap;
1012 struct page *page;
1013
1014 assert_spin_locked(pud_lockptr(mm, pud));
1015
1016 if (flags & FOLL_WRITE && !pud_write(*pud))
1017 return NULL;
1018
1019 if (pud_present(*pud) && pud_devmap(*pud))
1020 ;
1021 else
1022 return NULL;
1023
1024 if (flags & FOLL_TOUCH)
1025 touch_pud(vma, addr, pud, flags);
1026
1027
1028
1029
1030
1031 if (!(flags & FOLL_GET))
1032 return ERR_PTR(-EEXIST);
1033
1034 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
1035 pgmap = get_dev_pagemap(pfn, NULL);
1036 if (!pgmap)
1037 return ERR_PTR(-EFAULT);
1038 page = pfn_to_page(pfn);
1039 get_page(page);
1040 put_dev_pagemap(pgmap);
1041
1042 return page;
1043}
1044
1045int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1046 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1047 struct vm_area_struct *vma)
1048{
1049 spinlock_t *dst_ptl, *src_ptl;
1050 pud_t pud;
1051 int ret;
1052
1053 dst_ptl = pud_lock(dst_mm, dst_pud);
1054 src_ptl = pud_lockptr(src_mm, src_pud);
1055 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1056
1057 ret = -EAGAIN;
1058 pud = *src_pud;
1059 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1060 goto out_unlock;
1061
1062
1063
1064
1065
1066
1067 if (is_huge_zero_pud(pud)) {
1068
1069 }
1070
1071 pudp_set_wrprotect(src_mm, addr, src_pud);
1072 pud = pud_mkold(pud_wrprotect(pud));
1073 set_pud_at(dst_mm, addr, dst_pud, pud);
1074
1075 ret = 0;
1076out_unlock:
1077 spin_unlock(src_ptl);
1078 spin_unlock(dst_ptl);
1079 return ret;
1080}
1081
1082void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1083{
1084 pud_t entry;
1085 unsigned long haddr;
1086 bool write = vmf->flags & FAULT_FLAG_WRITE;
1087
1088 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1089 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1090 goto unlock;
1091
1092 entry = pud_mkyoung(orig_pud);
1093 if (write)
1094 entry = pud_mkdirty(entry);
1095 haddr = vmf->address & HPAGE_PUD_MASK;
1096 if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write))
1097 update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);
1098
1099unlock:
1100 spin_unlock(vmf->ptl);
1101}
1102#endif
1103
1104void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
1105{
1106 pmd_t entry;
1107 unsigned long haddr;
1108 bool write = vmf->flags & FAULT_FLAG_WRITE;
1109
1110 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1111 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
1112 goto unlock;
1113
1114 entry = pmd_mkyoung(orig_pmd);
1115 if (write)
1116 entry = pmd_mkdirty(entry);
1117 haddr = vmf->address & HPAGE_PMD_MASK;
1118 if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
1119 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
1120
1121unlock:
1122 spin_unlock(vmf->ptl);
1123}
1124
1125static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
1126 struct page *page)
1127{
1128 struct vm_area_struct *vma = vmf->vma;
1129 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1130 struct mem_cgroup *memcg;
1131 pgtable_t pgtable;
1132 pmd_t _pmd;
1133 int ret = 0, i;
1134 struct page **pages;
1135 unsigned long mmun_start;
1136 unsigned long mmun_end;
1137
1138 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
1139 GFP_KERNEL);
1140 if (unlikely(!pages)) {
1141 ret |= VM_FAULT_OOM;
1142 goto out;
1143 }
1144
1145 for (i = 0; i < HPAGE_PMD_NR; i++) {
1146 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
1147 vmf->address, page_to_nid(page));
1148 if (unlikely(!pages[i] ||
1149 mem_cgroup_try_charge(pages[i], vma->vm_mm,
1150 GFP_KERNEL, &memcg, false))) {
1151 if (pages[i])
1152 put_page(pages[i]);
1153 while (--i >= 0) {
1154 memcg = (void *)page_private(pages[i]);
1155 set_page_private(pages[i], 0);
1156 mem_cgroup_cancel_charge(pages[i], memcg,
1157 false);
1158 put_page(pages[i]);
1159 }
1160 kfree(pages);
1161 ret |= VM_FAULT_OOM;
1162 goto out;
1163 }
1164 set_page_private(pages[i], (unsigned long)memcg);
1165 }
1166
1167 for (i = 0; i < HPAGE_PMD_NR; i++) {
1168 copy_user_highpage(pages[i], page + i,
1169 haddr + PAGE_SIZE * i, vma);
1170 __SetPageUptodate(pages[i]);
1171 cond_resched();
1172 }
1173
1174 mmun_start = haddr;
1175 mmun_end = haddr + HPAGE_PMD_SIZE;
1176 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
1177
1178 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1179 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
1180 goto out_free_pages;
1181 VM_BUG_ON_PAGE(!PageHead(page), page);
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
1192
1193 pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd);
1194 pmd_populate(vma->vm_mm, &_pmd, pgtable);
1195
1196 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1197 pte_t entry;
1198 entry = mk_pte(pages[i], vma->vm_page_prot);
1199 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1200 memcg = (void *)page_private(pages[i]);
1201 set_page_private(pages[i], 0);
1202 page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false);
1203 mem_cgroup_commit_charge(pages[i], memcg, false, false);
1204 lru_cache_add_active_or_unevictable(pages[i], vma);
1205 vmf->pte = pte_offset_map(&_pmd, haddr);
1206 VM_BUG_ON(!pte_none(*vmf->pte));
1207 set_pte_at(vma->vm_mm, haddr, vmf->pte, entry);
1208 pte_unmap(vmf->pte);
1209 }
1210 kfree(pages);
1211
1212 smp_wmb();
1213 pmd_populate(vma->vm_mm, vmf->pmd, pgtable);
1214 page_remove_rmap(page, true);
1215 spin_unlock(vmf->ptl);
1216
1217
1218
1219
1220
1221 mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start,
1222 mmun_end);
1223
1224 ret |= VM_FAULT_WRITE;
1225 put_page(page);
1226
1227out:
1228 return ret;
1229
1230out_free_pages:
1231 spin_unlock(vmf->ptl);
1232 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
1233 for (i = 0; i < HPAGE_PMD_NR; i++) {
1234 memcg = (void *)page_private(pages[i]);
1235 set_page_private(pages[i], 0);
1236 mem_cgroup_cancel_charge(pages[i], memcg, false);
1237 put_page(pages[i]);
1238 }
1239 kfree(pages);
1240 goto out;
1241}
1242
1243int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
1244{
1245 struct vm_area_struct *vma = vmf->vma;
1246 struct page *page = NULL, *new_page;
1247 struct mem_cgroup *memcg;
1248 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1249 unsigned long mmun_start;
1250 unsigned long mmun_end;
1251 gfp_t huge_gfp;
1252 int ret = 0;
1253
1254 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1255 VM_BUG_ON_VMA(!vma->anon_vma, vma);
1256 if (is_huge_zero_pmd(orig_pmd))
1257 goto alloc;
1258 spin_lock(vmf->ptl);
1259 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
1260 goto out_unlock;
1261
1262 page = pmd_page(orig_pmd);
1263 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
1264
1265
1266
1267
1268 if (!trylock_page(page)) {
1269 get_page(page);
1270 spin_unlock(vmf->ptl);
1271 lock_page(page);
1272 spin_lock(vmf->ptl);
1273 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1274 unlock_page(page);
1275 put_page(page);
1276 goto out_unlock;
1277 }
1278 put_page(page);
1279 }
1280 if (reuse_swap_page(page, NULL)) {
1281 pmd_t entry;
1282 entry = pmd_mkyoung(orig_pmd);
1283 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1284 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1285 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1286 ret |= VM_FAULT_WRITE;
1287 unlock_page(page);
1288 goto out_unlock;
1289 }
1290 unlock_page(page);
1291 get_page(page);
1292 spin_unlock(vmf->ptl);
1293alloc:
1294 if (transparent_hugepage_enabled(vma) &&
1295 !transparent_hugepage_debug_cow()) {
1296 huge_gfp = alloc_hugepage_direct_gfpmask(vma);
1297 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
1298 } else
1299 new_page = NULL;
1300
1301 if (likely(new_page)) {
1302 prep_transhuge_page(new_page);
1303 } else {
1304 if (!page) {
1305 split_huge_pmd(vma, vmf->pmd, vmf->address);
1306 ret |= VM_FAULT_FALLBACK;
1307 } else {
1308 ret = do_huge_pmd_wp_page_fallback(vmf, orig_pmd, page);
1309 if (ret & VM_FAULT_OOM) {
1310 split_huge_pmd(vma, vmf->pmd, vmf->address);
1311 ret |= VM_FAULT_FALLBACK;
1312 }
1313 put_page(page);
1314 }
1315 count_vm_event(THP_FAULT_FALLBACK);
1316 goto out;
1317 }
1318
1319 if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm,
1320 huge_gfp | __GFP_NORETRY, &memcg, true))) {
1321 put_page(new_page);
1322 split_huge_pmd(vma, vmf->pmd, vmf->address);
1323 if (page)
1324 put_page(page);
1325 ret |= VM_FAULT_FALLBACK;
1326 count_vm_event(THP_FAULT_FALLBACK);
1327 goto out;
1328 }
1329
1330 count_vm_event(THP_FAULT_ALLOC);
1331
1332 if (!page)
1333 clear_huge_page(new_page, vmf->address, HPAGE_PMD_NR);
1334 else
1335 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
1336 __SetPageUptodate(new_page);
1337
1338 mmun_start = haddr;
1339 mmun_end = haddr + HPAGE_PMD_SIZE;
1340 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
1341
1342 spin_lock(vmf->ptl);
1343 if (page)
1344 put_page(page);
1345 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1346 spin_unlock(vmf->ptl);
1347 mem_cgroup_cancel_charge(new_page, memcg, true);
1348 put_page(new_page);
1349 goto out_mn;
1350 } else {
1351 pmd_t entry;
1352 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1353 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1354 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd);
1355 page_add_new_anon_rmap(new_page, vma, haddr, true);
1356 mem_cgroup_commit_charge(new_page, memcg, false, true);
1357 lru_cache_add_active_or_unevictable(new_page, vma);
1358 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
1359 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1360 if (!page) {
1361 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1362 } else {
1363 VM_BUG_ON_PAGE(!PageHead(page), page);
1364 page_remove_rmap(page, true);
1365 put_page(page);
1366 }
1367 ret |= VM_FAULT_WRITE;
1368 }
1369 spin_unlock(vmf->ptl);
1370out_mn:
1371
1372
1373
1374
1375 mmu_notifier_invalidate_range_only_end(vma->vm_mm, mmun_start,
1376 mmun_end);
1377out:
1378 return ret;
1379out_unlock:
1380 spin_unlock(vmf->ptl);
1381 return ret;
1382}
1383
1384
1385
1386
1387
1388static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
1389{
1390 return pmd_write(pmd) ||
1391 ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
1392}
1393
1394struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1395 unsigned long addr,
1396 pmd_t *pmd,
1397 unsigned int flags)
1398{
1399 struct mm_struct *mm = vma->vm_mm;
1400 struct page *page = NULL;
1401
1402 assert_spin_locked(pmd_lockptr(mm, pmd));
1403
1404 if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
1405 goto out;
1406
1407
1408 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1409 return ERR_PTR(-EFAULT);
1410
1411
1412 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
1413 goto out;
1414
1415 page = pmd_page(*pmd);
1416 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
1417 if (flags & FOLL_TOUCH)
1418 touch_pmd(vma, addr, pmd, flags);
1419 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441 if (PageAnon(page) && compound_mapcount(page) != 1)
1442 goto skip_mlock;
1443 if (PageDoubleMap(page) || !page->mapping)
1444 goto skip_mlock;
1445 if (!trylock_page(page))
1446 goto skip_mlock;
1447 lru_add_drain();
1448 if (page->mapping && !PageDoubleMap(page))
1449 mlock_vma_page(page);
1450 unlock_page(page);
1451 }
1452skip_mlock:
1453 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1454 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
1455 if (flags & FOLL_GET)
1456 get_page(page);
1457
1458out:
1459 return page;
1460}
1461
1462
1463int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
1464{
1465 struct vm_area_struct *vma = vmf->vma;
1466 struct anon_vma *anon_vma = NULL;
1467 struct page *page;
1468 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1469 int page_nid = -1, this_nid = numa_node_id();
1470 int target_nid, last_cpupid = -1;
1471 bool page_locked;
1472 bool migrated = false;
1473 bool was_writable;
1474 int flags = 0;
1475
1476 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1477 if (unlikely(!pmd_same(pmd, *vmf->pmd)))
1478 goto out_unlock;
1479
1480
1481
1482
1483
1484
1485 if (unlikely(pmd_trans_migrating(*vmf->pmd))) {
1486 page = pmd_page(*vmf->pmd);
1487 if (!get_page_unless_zero(page))
1488 goto out_unlock;
1489 spin_unlock(vmf->ptl);
1490 wait_on_page_locked(page);
1491 put_page(page);
1492 goto out;
1493 }
1494
1495 page = pmd_page(pmd);
1496 BUG_ON(is_huge_zero_page(page));
1497 page_nid = page_to_nid(page);
1498 last_cpupid = page_cpupid_last(page);
1499 count_vm_numa_event(NUMA_HINT_FAULTS);
1500 if (page_nid == this_nid) {
1501 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1502 flags |= TNF_FAULT_LOCAL;
1503 }
1504
1505
1506 if (!pmd_savedwrite(pmd))
1507 flags |= TNF_NO_GROUP;
1508
1509
1510
1511
1512
1513 page_locked = trylock_page(page);
1514 target_nid = mpol_misplaced(page, vma, haddr);
1515 if (target_nid == -1) {
1516
1517 if (page_locked)
1518 goto clear_pmdnuma;
1519 }
1520
1521
1522 if (!page_locked) {
1523 page_nid = -1;
1524 if (!get_page_unless_zero(page))
1525 goto out_unlock;
1526 spin_unlock(vmf->ptl);
1527 wait_on_page_locked(page);
1528 put_page(page);
1529 goto out;
1530 }
1531
1532
1533
1534
1535
1536 get_page(page);
1537 spin_unlock(vmf->ptl);
1538 anon_vma = page_lock_anon_vma_read(page);
1539
1540
1541 spin_lock(vmf->ptl);
1542 if (unlikely(!pmd_same(pmd, *vmf->pmd))) {
1543 unlock_page(page);
1544 put_page(page);
1545 page_nid = -1;
1546 goto out_unlock;
1547 }
1548
1549
1550 if (unlikely(!anon_vma)) {
1551 put_page(page);
1552 page_nid = -1;
1553 goto clear_pmdnuma;
1554 }
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567 if (mm_tlb_flush_pending(vma->vm_mm))
1568 flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
1569
1570
1571
1572
1573
1574 spin_unlock(vmf->ptl);
1575
1576 migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
1577 vmf->pmd, pmd, vmf->address, page, target_nid);
1578 if (migrated) {
1579 flags |= TNF_MIGRATED;
1580 page_nid = target_nid;
1581 } else
1582 flags |= TNF_MIGRATE_FAIL;
1583
1584 goto out;
1585clear_pmdnuma:
1586 BUG_ON(!PageLocked(page));
1587 was_writable = pmd_savedwrite(pmd);
1588 pmd = pmd_modify(pmd, vma->vm_page_prot);
1589 pmd = pmd_mkyoung(pmd);
1590 if (was_writable)
1591 pmd = pmd_mkwrite(pmd);
1592 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1593 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1594 unlock_page(page);
1595out_unlock:
1596 spin_unlock(vmf->ptl);
1597
1598out:
1599 if (anon_vma)
1600 page_unlock_anon_vma_read(anon_vma);
1601
1602 if (page_nid != -1)
1603 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
1604 flags);
1605
1606 return 0;
1607}
1608
1609
1610
1611
1612
1613bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1614 pmd_t *pmd, unsigned long addr, unsigned long next)
1615{
1616 spinlock_t *ptl;
1617 pmd_t orig_pmd;
1618 struct page *page;
1619 struct mm_struct *mm = tlb->mm;
1620 bool ret = false;
1621
1622 tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE);
1623
1624 ptl = pmd_trans_huge_lock(pmd, vma);
1625 if (!ptl)
1626 goto out_unlocked;
1627
1628 orig_pmd = *pmd;
1629 if (is_huge_zero_pmd(orig_pmd))
1630 goto out;
1631
1632 if (unlikely(!pmd_present(orig_pmd))) {
1633 VM_BUG_ON(thp_migration_supported() &&
1634 !is_pmd_migration_entry(orig_pmd));
1635 goto out;
1636 }
1637
1638 page = pmd_page(orig_pmd);
1639
1640
1641
1642
1643 if (page_mapcount(page) != 1)
1644 goto out;
1645
1646 if (!trylock_page(page))
1647 goto out;
1648
1649
1650
1651
1652
1653 if (next - addr != HPAGE_PMD_SIZE) {
1654 get_page(page);
1655 spin_unlock(ptl);
1656 split_huge_page(page);
1657 unlock_page(page);
1658 put_page(page);
1659 goto out_unlocked;
1660 }
1661
1662 if (PageDirty(page))
1663 ClearPageDirty(page);
1664 unlock_page(page);
1665
1666 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1667 pmdp_invalidate(vma, addr, pmd);
1668 orig_pmd = pmd_mkold(orig_pmd);
1669 orig_pmd = pmd_mkclean(orig_pmd);
1670
1671 set_pmd_at(mm, addr, pmd, orig_pmd);
1672 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1673 }
1674
1675 mark_page_lazyfree(page);
1676 ret = true;
1677out:
1678 spin_unlock(ptl);
1679out_unlocked:
1680 return ret;
1681}
1682
1683static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1684{
1685 pgtable_t pgtable;
1686
1687 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1688 pte_free(mm, pgtable);
1689 mm_dec_nr_ptes(mm);
1690}
1691
1692int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1693 pmd_t *pmd, unsigned long addr)
1694{
1695 pmd_t orig_pmd;
1696 spinlock_t *ptl;
1697
1698 tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE);
1699
1700 ptl = __pmd_trans_huge_lock(pmd, vma);
1701 if (!ptl)
1702 return 0;
1703
1704
1705
1706
1707
1708
1709 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1710 tlb->fullmm);
1711 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1712 if (vma_is_dax(vma)) {
1713 if (arch_needs_pgtable_deposit())
1714 zap_deposited_table(tlb->mm, pmd);
1715 spin_unlock(ptl);
1716 if (is_huge_zero_pmd(orig_pmd))
1717 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
1718 } else if (is_huge_zero_pmd(orig_pmd)) {
1719 zap_deposited_table(tlb->mm, pmd);
1720 spin_unlock(ptl);
1721 tlb_remove_page_size(tlb, pmd_page(orig_pmd), HPAGE_PMD_SIZE);
1722 } else {
1723 struct page *page = NULL;
1724 int flush_needed = 1;
1725
1726 if (pmd_present(orig_pmd)) {
1727 page = pmd_page(orig_pmd);
1728 page_remove_rmap(page, true);
1729 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1730 VM_BUG_ON_PAGE(!PageHead(page), page);
1731 } else if (thp_migration_supported()) {
1732 swp_entry_t entry;
1733
1734 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1735 entry = pmd_to_swp_entry(orig_pmd);
1736 page = pfn_to_page(swp_offset(entry));
1737 flush_needed = 0;
1738 } else
1739 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1740
1741 if (PageAnon(page)) {
1742 zap_deposited_table(tlb->mm, pmd);
1743 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1744 } else {
1745 if (arch_needs_pgtable_deposit())
1746 zap_deposited_table(tlb->mm, pmd);
1747 add_mm_counter(tlb->mm, MM_FILEPAGES, -HPAGE_PMD_NR);
1748 }
1749
1750 spin_unlock(ptl);
1751 if (flush_needed)
1752 tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
1753 }
1754 return 1;
1755}
1756
1757#ifndef pmd_move_must_withdraw
1758static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1759 spinlock_t *old_pmd_ptl,
1760 struct vm_area_struct *vma)
1761{
1762
1763
1764
1765
1766
1767
1768 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1769}
1770#endif
1771
1772static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1773{
1774#ifdef CONFIG_MEM_SOFT_DIRTY
1775 if (unlikely(is_pmd_migration_entry(pmd)))
1776 pmd = pmd_swp_mksoft_dirty(pmd);
1777 else if (pmd_present(pmd))
1778 pmd = pmd_mksoft_dirty(pmd);
1779#endif
1780 return pmd;
1781}
1782
1783bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1784 unsigned long new_addr, unsigned long old_end,
1785 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
1786{
1787 spinlock_t *old_ptl, *new_ptl;
1788 pmd_t pmd;
1789 struct mm_struct *mm = vma->vm_mm;
1790 bool force_flush = false;
1791
1792 if ((old_addr & ~HPAGE_PMD_MASK) ||
1793 (new_addr & ~HPAGE_PMD_MASK) ||
1794 old_end - old_addr < HPAGE_PMD_SIZE)
1795 return false;
1796
1797
1798
1799
1800
1801 if (WARN_ON(!pmd_none(*new_pmd))) {
1802 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1803 return false;
1804 }
1805
1806
1807
1808
1809
1810 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1811 if (old_ptl) {
1812 new_ptl = pmd_lockptr(mm, new_pmd);
1813 if (new_ptl != old_ptl)
1814 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1815 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1816 if (pmd_present(pmd) && pmd_dirty(pmd))
1817 force_flush = true;
1818 VM_BUG_ON(!pmd_none(*new_pmd));
1819
1820 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
1821 pgtable_t pgtable;
1822 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1823 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1824 }
1825 pmd = move_soft_dirty_pmd(pmd);
1826 set_pmd_at(mm, new_addr, new_pmd, pmd);
1827 if (new_ptl != old_ptl)
1828 spin_unlock(new_ptl);
1829 if (force_flush)
1830 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1831 else
1832 *need_flush = true;
1833 spin_unlock(old_ptl);
1834 return true;
1835 }
1836 return false;
1837}
1838
1839
1840
1841
1842
1843
1844
1845int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1846 unsigned long addr, pgprot_t newprot, int prot_numa)
1847{
1848 struct mm_struct *mm = vma->vm_mm;
1849 spinlock_t *ptl;
1850 pmd_t entry;
1851 bool preserve_write;
1852 int ret;
1853
1854 ptl = __pmd_trans_huge_lock(pmd, vma);
1855 if (!ptl)
1856 return 0;
1857
1858 preserve_write = prot_numa && pmd_write(*pmd);
1859 ret = 1;
1860
1861#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1862 if (is_swap_pmd(*pmd)) {
1863 swp_entry_t entry = pmd_to_swp_entry(*pmd);
1864
1865 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
1866 if (is_write_migration_entry(entry)) {
1867 pmd_t newpmd;
1868
1869
1870
1871
1872 make_migration_entry_read(&entry);
1873 newpmd = swp_entry_to_pmd(entry);
1874 if (pmd_swp_soft_dirty(*pmd))
1875 newpmd = pmd_swp_mksoft_dirty(newpmd);
1876 set_pmd_at(mm, addr, pmd, newpmd);
1877 }
1878 goto unlock;
1879 }
1880#endif
1881
1882
1883
1884
1885
1886
1887 if (prot_numa && is_huge_zero_pmd(*pmd))
1888 goto unlock;
1889
1890 if (prot_numa && pmd_protnone(*pmd))
1891 goto unlock;
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914 entry = pmdp_invalidate(vma, addr, pmd);
1915
1916 entry = pmd_modify(entry, newprot);
1917 if (preserve_write)
1918 entry = pmd_mk_savedwrite(entry);
1919 ret = HPAGE_PMD_NR;
1920 set_pmd_at(mm, addr, pmd, entry);
1921 BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
1922unlock:
1923 spin_unlock(ptl);
1924 return ret;
1925}
1926
1927
1928
1929
1930
1931
1932
1933spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1934{
1935 spinlock_t *ptl;
1936 ptl = pmd_lock(vma->vm_mm, pmd);
1937 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
1938 pmd_devmap(*pmd)))
1939 return ptl;
1940 spin_unlock(ptl);
1941 return NULL;
1942}
1943
1944
1945
1946
1947
1948
1949
1950spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
1951{
1952 spinlock_t *ptl;
1953
1954 ptl = pud_lock(vma->vm_mm, pud);
1955 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
1956 return ptl;
1957 spin_unlock(ptl);
1958 return NULL;
1959}
1960
1961#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1962int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
1963 pud_t *pud, unsigned long addr)
1964{
1965 pud_t orig_pud;
1966 spinlock_t *ptl;
1967
1968 ptl = __pud_trans_huge_lock(pud, vma);
1969 if (!ptl)
1970 return 0;
1971
1972
1973
1974
1975
1976
1977 orig_pud = pudp_huge_get_and_clear_full(tlb->mm, addr, pud,
1978 tlb->fullmm);
1979 tlb_remove_pud_tlb_entry(tlb, pud, addr);
1980 if (vma_is_dax(vma)) {
1981 spin_unlock(ptl);
1982
1983 } else {
1984
1985 BUG();
1986 }
1987 return 1;
1988}
1989
1990static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
1991 unsigned long haddr)
1992{
1993 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
1994 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
1995 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
1996 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
1997
1998 count_vm_event(THP_SPLIT_PUD);
1999
2000 pudp_huge_clear_flush_notify(vma, haddr, pud);
2001}
2002
2003void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2004 unsigned long address)
2005{
2006 spinlock_t *ptl;
2007 struct mm_struct *mm = vma->vm_mm;
2008 unsigned long haddr = address & HPAGE_PUD_MASK;
2009
2010 mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PUD_SIZE);
2011 ptl = pud_lock(mm, pud);
2012 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2013 goto out;
2014 __split_huge_pud_locked(vma, pud, haddr);
2015
2016out:
2017 spin_unlock(ptl);
2018
2019
2020
2021
2022 mmu_notifier_invalidate_range_only_end(mm, haddr, haddr +
2023 HPAGE_PUD_SIZE);
2024}
2025#endif
2026
2027static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2028 unsigned long haddr, pmd_t *pmd)
2029{
2030 struct mm_struct *mm = vma->vm_mm;
2031 pgtable_t pgtable;
2032 pmd_t _pmd;
2033 int i;
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043 pmdp_huge_clear_flush(vma, haddr, pmd);
2044
2045 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2046 pmd_populate(mm, &_pmd, pgtable);
2047
2048 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2049 pte_t *pte, entry;
2050 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2051 entry = pte_mkspecial(entry);
2052 pte = pte_offset_map(&_pmd, haddr);
2053 VM_BUG_ON(!pte_none(*pte));
2054 set_pte_at(mm, haddr, pte, entry);
2055 pte_unmap(pte);
2056 }
2057 smp_wmb();
2058 pmd_populate(mm, pmd, pgtable);
2059}
2060
2061static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2062 unsigned long haddr, bool freeze)
2063{
2064 struct mm_struct *mm = vma->vm_mm;
2065 struct page *page;
2066 pgtable_t pgtable;
2067 pmd_t old_pmd, _pmd;
2068 bool young, write, soft_dirty, pmd_migration = false;
2069 unsigned long addr;
2070 int i;
2071
2072 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2073 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2074 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2075 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2076 && !pmd_devmap(*pmd));
2077
2078 count_vm_event(THP_SPLIT_PMD);
2079
2080 if (!vma_is_anonymous(vma)) {
2081 _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2082
2083
2084
2085
2086 if (arch_needs_pgtable_deposit())
2087 zap_deposited_table(mm, pmd);
2088 if (vma_is_dax(vma))
2089 return;
2090 page = pmd_page(_pmd);
2091 if (!PageReferenced(page) && pmd_young(_pmd))
2092 SetPageReferenced(page);
2093 page_remove_rmap(page, true);
2094 put_page(page);
2095 add_mm_counter(mm, MM_FILEPAGES, -HPAGE_PMD_NR);
2096 return;
2097 } else if (is_huge_zero_pmd(*pmd)) {
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2108 }
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2131
2132#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2133 pmd_migration = is_pmd_migration_entry(old_pmd);
2134 if (pmd_migration) {
2135 swp_entry_t entry;
2136
2137 entry = pmd_to_swp_entry(old_pmd);
2138 page = pfn_to_page(swp_offset(entry));
2139 } else
2140#endif
2141 page = pmd_page(old_pmd);
2142 VM_BUG_ON_PAGE(!page_count(page), page);
2143 page_ref_add(page, HPAGE_PMD_NR - 1);
2144 if (pmd_dirty(old_pmd))
2145 SetPageDirty(page);
2146 write = pmd_write(old_pmd);
2147 young = pmd_young(old_pmd);
2148 soft_dirty = pmd_soft_dirty(old_pmd);
2149
2150
2151
2152
2153
2154 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2155 pmd_populate(mm, &_pmd, pgtable);
2156
2157 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2158 pte_t entry, *pte;
2159
2160
2161
2162
2163
2164 if (freeze || pmd_migration) {
2165 swp_entry_t swp_entry;
2166 swp_entry = make_migration_entry(page + i, write);
2167 entry = swp_entry_to_pte(swp_entry);
2168 if (soft_dirty)
2169 entry = pte_swp_mksoft_dirty(entry);
2170 } else {
2171 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
2172 entry = maybe_mkwrite(entry, vma);
2173 if (!write)
2174 entry = pte_wrprotect(entry);
2175 if (!young)
2176 entry = pte_mkold(entry);
2177 if (soft_dirty)
2178 entry = pte_mksoft_dirty(entry);
2179 }
2180 pte = pte_offset_map(&_pmd, addr);
2181 BUG_ON(!pte_none(*pte));
2182 set_pte_at(mm, addr, pte, entry);
2183 atomic_inc(&page[i]._mapcount);
2184 pte_unmap(pte);
2185 }
2186
2187
2188
2189
2190
2191 if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
2192 for (i = 0; i < HPAGE_PMD_NR; i++)
2193 atomic_inc(&page[i]._mapcount);
2194 }
2195
2196 if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
2197
2198 __dec_node_page_state(page, NR_ANON_THPS);
2199 if (TestClearPageDoubleMap(page)) {
2200
2201 for (i = 0; i < HPAGE_PMD_NR; i++)
2202 atomic_dec(&page[i]._mapcount);
2203 }
2204 }
2205
2206 smp_wmb();
2207 pmd_populate(mm, pmd, pgtable);
2208
2209 if (freeze) {
2210 for (i = 0; i < HPAGE_PMD_NR; i++) {
2211 page_remove_rmap(page + i, false);
2212 put_page(page + i);
2213 }
2214 }
2215}
2216
2217void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2218 unsigned long address, bool freeze, struct page *page)
2219{
2220 spinlock_t *ptl;
2221 struct mm_struct *mm = vma->vm_mm;
2222 unsigned long haddr = address & HPAGE_PMD_MASK;
2223
2224 mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
2225 ptl = pmd_lock(mm, pmd);
2226
2227
2228
2229
2230
2231 VM_BUG_ON(freeze && !page);
2232 if (page && page != pmd_page(*pmd))
2233 goto out;
2234
2235 if (pmd_trans_huge(*pmd)) {
2236 page = pmd_page(*pmd);
2237 if (PageMlocked(page))
2238 clear_page_mlock(page);
2239 } else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
2240 goto out;
2241 __split_huge_pmd_locked(vma, pmd, haddr, freeze);
2242out:
2243 spin_unlock(ptl);
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257 mmu_notifier_invalidate_range_only_end(mm, haddr, haddr +
2258 HPAGE_PMD_SIZE);
2259}
2260
2261void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2262 bool freeze, struct page *page)
2263{
2264 pgd_t *pgd;
2265 p4d_t *p4d;
2266 pud_t *pud;
2267 pmd_t *pmd;
2268
2269 pgd = pgd_offset(vma->vm_mm, address);
2270 if (!pgd_present(*pgd))
2271 return;
2272
2273 p4d = p4d_offset(pgd, address);
2274 if (!p4d_present(*p4d))
2275 return;
2276
2277 pud = pud_offset(p4d, address);
2278 if (!pud_present(*pud))
2279 return;
2280
2281 pmd = pmd_offset(pud, address);
2282
2283 __split_huge_pmd(vma, pmd, address, freeze, page);
2284}
2285
2286void vma_adjust_trans_huge(struct vm_area_struct *vma,
2287 unsigned long start,
2288 unsigned long end,
2289 long adjust_next)
2290{
2291
2292
2293
2294
2295
2296 if (start & ~HPAGE_PMD_MASK &&
2297 (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2298 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2299 split_huge_pmd_address(vma, start, false, NULL);
2300
2301
2302
2303
2304
2305
2306 if (end & ~HPAGE_PMD_MASK &&
2307 (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2308 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2309 split_huge_pmd_address(vma, end, false, NULL);
2310
2311
2312
2313
2314
2315
2316 if (adjust_next > 0) {
2317 struct vm_area_struct *next = vma->vm_next;
2318 unsigned long nstart = next->vm_start;
2319 nstart += adjust_next << PAGE_SHIFT;
2320 if (nstart & ~HPAGE_PMD_MASK &&
2321 (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2322 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2323 split_huge_pmd_address(next, nstart, false, NULL);
2324 }
2325}
2326
2327static void freeze_page(struct page *page)
2328{
2329 enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
2330 TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
2331 bool unmap_success;
2332
2333 VM_BUG_ON_PAGE(!PageHead(page), page);
2334
2335 if (PageAnon(page))
2336 ttu_flags |= TTU_SPLIT_FREEZE;
2337
2338 unmap_success = try_to_unmap(page, ttu_flags);
2339 VM_BUG_ON_PAGE(!unmap_success, page);
2340}
2341
2342static void unfreeze_page(struct page *page)
2343{
2344 int i;
2345 if (PageTransHuge(page)) {
2346 remove_migration_ptes(page, page, true);
2347 } else {
2348 for (i = 0; i < HPAGE_PMD_NR; i++)
2349 remove_migration_ptes(page + i, page + i, true);
2350 }
2351}
2352
2353static void __split_huge_page_tail(struct page *head, int tail,
2354 struct lruvec *lruvec, struct list_head *list)
2355{
2356 struct page *page_tail = head + tail;
2357
2358 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
2359 VM_BUG_ON_PAGE(page_ref_count(page_tail) != 0, page_tail);
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372 if (PageAnon(head) && !PageSwapCache(head)) {
2373 page_ref_inc(page_tail);
2374 } else {
2375
2376 page_ref_add(page_tail, 2);
2377 }
2378
2379 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2380 page_tail->flags |= (head->flags &
2381 ((1L << PG_referenced) |
2382 (1L << PG_swapbacked) |
2383 (1L << PG_swapcache) |
2384 (1L << PG_mlocked) |
2385 (1L << PG_uptodate) |
2386 (1L << PG_active) |
2387 (1L << PG_locked) |
2388 (1L << PG_unevictable) |
2389 (1L << PG_dirty)));
2390
2391
2392
2393
2394
2395 smp_wmb();
2396
2397 clear_compound_head(page_tail);
2398
2399 if (page_is_young(head))
2400 set_page_young(page_tail);
2401 if (page_is_idle(head))
2402 set_page_idle(page_tail);
2403
2404
2405 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2406 page_tail);
2407 page_tail->mapping = head->mapping;
2408
2409 page_tail->index = head->index + tail;
2410 page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
2411 lru_add_page_tail(head, page_tail, lruvec, list);
2412}
2413
2414static void __split_huge_page(struct page *page, struct list_head *list,
2415 unsigned long flags)
2416{
2417 struct page *head = compound_head(page);
2418 struct zone *zone = page_zone(head);
2419 struct lruvec *lruvec;
2420 pgoff_t end = -1;
2421 int i;
2422
2423 lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
2424
2425
2426 mem_cgroup_split_huge_fixup(head);
2427
2428 if (!PageAnon(page))
2429 end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE);
2430
2431 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
2432 __split_huge_page_tail(head, i, lruvec, list);
2433
2434 if (head[i].index >= end) {
2435 __ClearPageDirty(head + i);
2436 __delete_from_page_cache(head + i, NULL);
2437 if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head))
2438 shmem_uncharge(head->mapping->host, 1);
2439 put_page(head + i);
2440 }
2441 }
2442
2443 ClearPageCompound(head);
2444
2445 if (PageAnon(head)) {
2446
2447 if (PageSwapCache(head))
2448 page_ref_add(head, 2);
2449 else
2450 page_ref_inc(head);
2451 } else {
2452
2453 page_ref_add(head, 2);
2454 spin_unlock(&head->mapping->tree_lock);
2455 }
2456
2457 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
2458
2459 unfreeze_page(head);
2460
2461 for (i = 0; i < HPAGE_PMD_NR; i++) {
2462 struct page *subpage = head + i;
2463 if (subpage == page)
2464 continue;
2465 unlock_page(subpage);
2466
2467
2468
2469
2470
2471
2472
2473
2474 put_page(subpage);
2475 }
2476}
2477
2478int total_mapcount(struct page *page)
2479{
2480 int i, compound, ret;
2481
2482 VM_BUG_ON_PAGE(PageTail(page), page);
2483
2484 if (likely(!PageCompound(page)))
2485 return atomic_read(&page->_mapcount) + 1;
2486
2487 compound = compound_mapcount(page);
2488 if (PageHuge(page))
2489 return compound;
2490 ret = compound;
2491 for (i = 0; i < HPAGE_PMD_NR; i++)
2492 ret += atomic_read(&page[i]._mapcount) + 1;
2493
2494 if (!PageAnon(page))
2495 return ret - compound * HPAGE_PMD_NR;
2496 if (PageDoubleMap(page))
2497 ret -= HPAGE_PMD_NR;
2498 return ret;
2499}
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
2526{
2527 int i, ret, _total_mapcount, mapcount;
2528
2529
2530 VM_BUG_ON_PAGE(PageHuge(page), page);
2531
2532 if (likely(!PageTransCompound(page))) {
2533 mapcount = atomic_read(&page->_mapcount) + 1;
2534 if (total_mapcount)
2535 *total_mapcount = mapcount;
2536 return mapcount;
2537 }
2538
2539 page = compound_head(page);
2540
2541 _total_mapcount = ret = 0;
2542 for (i = 0; i < HPAGE_PMD_NR; i++) {
2543 mapcount = atomic_read(&page[i]._mapcount) + 1;
2544 ret = max(ret, mapcount);
2545 _total_mapcount += mapcount;
2546 }
2547 if (PageDoubleMap(page)) {
2548 ret -= 1;
2549 _total_mapcount -= HPAGE_PMD_NR;
2550 }
2551 mapcount = compound_mapcount(page);
2552 ret += mapcount;
2553 _total_mapcount += mapcount;
2554 if (total_mapcount)
2555 *total_mapcount = _total_mapcount;
2556 return ret;
2557}
2558
2559
2560bool can_split_huge_page(struct page *page, int *pextra_pins)
2561{
2562 int extra_pins;
2563
2564
2565 if (PageAnon(page))
2566 extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0;
2567 else
2568 extra_pins = HPAGE_PMD_NR;
2569 if (pextra_pins)
2570 *pextra_pins = extra_pins;
2571 return total_mapcount(page) == page_count(page) - extra_pins - 1;
2572}
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593int split_huge_page_to_list(struct page *page, struct list_head *list)
2594{
2595 struct page *head = compound_head(page);
2596 struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
2597 struct anon_vma *anon_vma = NULL;
2598 struct address_space *mapping = NULL;
2599 int count, mapcount, extra_pins, ret;
2600 bool mlocked;
2601 unsigned long flags;
2602
2603 VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
2604 VM_BUG_ON_PAGE(!PageLocked(page), page);
2605 VM_BUG_ON_PAGE(!PageCompound(page), page);
2606
2607 if (PageWriteback(page))
2608 return -EBUSY;
2609
2610 if (PageAnon(head)) {
2611
2612
2613
2614
2615
2616
2617
2618
2619 anon_vma = page_get_anon_vma(head);
2620 if (!anon_vma) {
2621 ret = -EBUSY;
2622 goto out;
2623 }
2624 mapping = NULL;
2625 anon_vma_lock_write(anon_vma);
2626 } else {
2627 mapping = head->mapping;
2628
2629
2630 if (!mapping) {
2631 ret = -EBUSY;
2632 goto out;
2633 }
2634
2635 anon_vma = NULL;
2636 i_mmap_lock_read(mapping);
2637 }
2638
2639
2640
2641
2642
2643 if (!can_split_huge_page(head, &extra_pins)) {
2644 ret = -EBUSY;
2645 goto out_unlock;
2646 }
2647
2648 mlocked = PageMlocked(page);
2649 freeze_page(head);
2650 VM_BUG_ON_PAGE(compound_mapcount(head), head);
2651
2652
2653 if (mlocked)
2654 lru_add_drain();
2655
2656
2657 spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags);
2658
2659 if (mapping) {
2660 void **pslot;
2661
2662 spin_lock(&mapping->tree_lock);
2663 pslot = radix_tree_lookup_slot(&mapping->page_tree,
2664 page_index(head));
2665
2666
2667
2668
2669 if (radix_tree_deref_slot_protected(pslot,
2670 &mapping->tree_lock) != head)
2671 goto fail;
2672 }
2673
2674
2675 spin_lock(&pgdata->split_queue_lock);
2676 count = page_count(head);
2677 mapcount = total_mapcount(head);
2678 if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
2679 if (!list_empty(page_deferred_list(head))) {
2680 pgdata->split_queue_len--;
2681 list_del(page_deferred_list(head));
2682 }
2683 if (mapping)
2684 __dec_node_page_state(page, NR_SHMEM_THPS);
2685 spin_unlock(&pgdata->split_queue_lock);
2686 __split_huge_page(page, list, flags);
2687 if (PageSwapCache(head)) {
2688 swp_entry_t entry = { .val = page_private(head) };
2689
2690 ret = split_swap_cluster(entry);
2691 } else
2692 ret = 0;
2693 } else {
2694 if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
2695 pr_alert("total_mapcount: %u, page_count(): %u\n",
2696 mapcount, count);
2697 if (PageTail(page))
2698 dump_page(head, NULL);
2699 dump_page(page, "total_mapcount(head) > 0");
2700 BUG();
2701 }
2702 spin_unlock(&pgdata->split_queue_lock);
2703fail: if (mapping)
2704 spin_unlock(&mapping->tree_lock);
2705 spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
2706 unfreeze_page(head);
2707 ret = -EBUSY;
2708 }
2709
2710out_unlock:
2711 if (anon_vma) {
2712 anon_vma_unlock_write(anon_vma);
2713 put_anon_vma(anon_vma);
2714 }
2715 if (mapping)
2716 i_mmap_unlock_read(mapping);
2717out:
2718 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
2719 return ret;
2720}
2721
2722void free_transhuge_page(struct page *page)
2723{
2724 struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
2725 unsigned long flags;
2726
2727 spin_lock_irqsave(&pgdata->split_queue_lock, flags);
2728 if (!list_empty(page_deferred_list(page))) {
2729 pgdata->split_queue_len--;
2730 list_del(page_deferred_list(page));
2731 }
2732 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
2733 free_compound_page(page);
2734}
2735
2736void deferred_split_huge_page(struct page *page)
2737{
2738 struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
2739 unsigned long flags;
2740
2741 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
2742
2743 spin_lock_irqsave(&pgdata->split_queue_lock, flags);
2744 if (list_empty(page_deferred_list(page))) {
2745 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
2746 list_add_tail(page_deferred_list(page), &pgdata->split_queue);
2747 pgdata->split_queue_len++;
2748 }
2749 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
2750}
2751
2752static unsigned long deferred_split_count(struct shrinker *shrink,
2753 struct shrink_control *sc)
2754{
2755 struct pglist_data *pgdata = NODE_DATA(sc->nid);
2756 return READ_ONCE(pgdata->split_queue_len);
2757}
2758
2759static unsigned long deferred_split_scan(struct shrinker *shrink,
2760 struct shrink_control *sc)
2761{
2762 struct pglist_data *pgdata = NODE_DATA(sc->nid);
2763 unsigned long flags;
2764 LIST_HEAD(list), *pos, *next;
2765 struct page *page;
2766 int split = 0;
2767
2768 spin_lock_irqsave(&pgdata->split_queue_lock, flags);
2769
2770 list_for_each_safe(pos, next, &pgdata->split_queue) {
2771 page = list_entry((void *)pos, struct page, mapping);
2772 page = compound_head(page);
2773 if (get_page_unless_zero(page)) {
2774 list_move(page_deferred_list(page), &list);
2775 } else {
2776
2777 list_del_init(page_deferred_list(page));
2778 pgdata->split_queue_len--;
2779 }
2780 if (!--sc->nr_to_scan)
2781 break;
2782 }
2783 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
2784
2785 list_for_each_safe(pos, next, &list) {
2786 page = list_entry((void *)pos, struct page, mapping);
2787 if (!trylock_page(page))
2788 goto next;
2789
2790 if (!split_huge_page(page))
2791 split++;
2792 unlock_page(page);
2793next:
2794 put_page(page);
2795 }
2796
2797 spin_lock_irqsave(&pgdata->split_queue_lock, flags);
2798 list_splice_tail(&list, &pgdata->split_queue);
2799 spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
2800
2801
2802
2803
2804
2805 if (!split && list_empty(&pgdata->split_queue))
2806 return SHRINK_STOP;
2807 return split;
2808}
2809
2810static struct shrinker deferred_split_shrinker = {
2811 .count_objects = deferred_split_count,
2812 .scan_objects = deferred_split_scan,
2813 .seeks = DEFAULT_SEEKS,
2814 .flags = SHRINKER_NUMA_AWARE,
2815};
2816
2817#ifdef CONFIG_DEBUG_FS
2818static int split_huge_pages_set(void *data, u64 val)
2819{
2820 struct zone *zone;
2821 struct page *page;
2822 unsigned long pfn, max_zone_pfn;
2823 unsigned long total = 0, split = 0;
2824
2825 if (val != 1)
2826 return -EINVAL;
2827
2828 for_each_populated_zone(zone) {
2829 max_zone_pfn = zone_end_pfn(zone);
2830 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
2831 if (!pfn_valid(pfn))
2832 continue;
2833
2834 page = pfn_to_page(pfn);
2835 if (!get_page_unless_zero(page))
2836 continue;
2837
2838 if (zone != page_zone(page))
2839 goto next;
2840
2841 if (!PageHead(page) || PageHuge(page) || !PageLRU(page))
2842 goto next;
2843
2844 total++;
2845 lock_page(page);
2846 if (!split_huge_page(page))
2847 split++;
2848 unlock_page(page);
2849next:
2850 put_page(page);
2851 }
2852 }
2853
2854 pr_info("%lu of %lu THP split\n", split, total);
2855
2856 return 0;
2857}
2858DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set,
2859 "%llu\n");
2860
2861static int __init split_huge_pages_debugfs(void)
2862{
2863 void *ret;
2864
2865 ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
2866 &split_huge_pages_fops);
2867 if (!ret)
2868 pr_warn("Failed to create split_huge_pages in debugfs");
2869 return 0;
2870}
2871late_initcall(split_huge_pages_debugfs);
2872#endif
2873
2874#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2875void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
2876 struct page *page)
2877{
2878 struct vm_area_struct *vma = pvmw->vma;
2879 struct mm_struct *mm = vma->vm_mm;
2880 unsigned long address = pvmw->address;
2881 pmd_t pmdval;
2882 swp_entry_t entry;
2883 pmd_t pmdswp;
2884
2885 if (!(pvmw->pmd && !pvmw->pte))
2886 return;
2887
2888 mmu_notifier_invalidate_range_start(mm, address,
2889 address + HPAGE_PMD_SIZE);
2890
2891 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
2892 pmdval = *pvmw->pmd;
2893 pmdp_invalidate(vma, address, pvmw->pmd);
2894 if (pmd_dirty(pmdval))
2895 set_page_dirty(page);
2896 entry = make_migration_entry(page, pmd_write(pmdval));
2897 pmdswp = swp_entry_to_pmd(entry);
2898 if (pmd_soft_dirty(pmdval))
2899 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
2900 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
2901 page_remove_rmap(page, true);
2902 put_page(page);
2903
2904 mmu_notifier_invalidate_range_end(mm, address,
2905 address + HPAGE_PMD_SIZE);
2906}
2907
2908void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
2909{
2910 struct vm_area_struct *vma = pvmw->vma;
2911 struct mm_struct *mm = vma->vm_mm;
2912 unsigned long address = pvmw->address;
2913 unsigned long mmun_start = address & HPAGE_PMD_MASK;
2914 pmd_t pmde;
2915 swp_entry_t entry;
2916
2917 if (!(pvmw->pmd && !pvmw->pte))
2918 return;
2919
2920 entry = pmd_to_swp_entry(*pvmw->pmd);
2921 get_page(new);
2922 pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot));
2923 if (pmd_swp_soft_dirty(*pvmw->pmd))
2924 pmde = pmd_mksoft_dirty(pmde);
2925 if (is_write_migration_entry(entry))
2926 pmde = maybe_pmd_mkwrite(pmde, vma);
2927
2928 flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
2929 page_add_anon_rmap(new, vma, mmun_start, true);
2930 set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
2931 if (vma->vm_flags & VM_LOCKED)
2932 mlock_vma_page(new);
2933 update_mmu_cache_pmd(vma, address, pvmw->pmd);
2934}
2935#endif
2936