1
2
3
4
5
6
7
8
9
10#include <linux/sched.h>
11#include <linux/mm_types.h>
12#include <misc/cxl-base.h>
13
14#include <asm/pgalloc.h>
15#include <asm/tlb.h>
16
17#include "mmu_decl.h"
18#include <trace/events/thp.h>
19
20int (*register_process_table)(unsigned long base, unsigned long page_size,
21 unsigned long tbl_size);
22
23#ifdef CONFIG_TRANSPARENT_HUGEPAGE
24
25
26
27
28
29
30
31int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
32 pmd_t *pmdp, pmd_t entry, int dirty)
33{
34 int changed;
35#ifdef CONFIG_DEBUG_VM
36 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
37 assert_spin_locked(&vma->vm_mm->page_table_lock);
38#endif
39 changed = !pmd_same(*(pmdp), entry);
40 if (changed) {
41 __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp),
42 pmd_pte(entry), address);
43 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
44 }
45 return changed;
46}
47
48int pmdp_test_and_clear_young(struct vm_area_struct *vma,
49 unsigned long address, pmd_t *pmdp)
50{
51 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
52}
53
54
55
56
57void set_pmd_at(struct mm_struct *mm, unsigned long addr,
58 pmd_t *pmdp, pmd_t pmd)
59{
60#ifdef CONFIG_DEBUG_VM
61 WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
62 assert_spin_locked(&mm->page_table_lock);
63 WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
64#endif
65 trace_hugepage_set_pmd(addr, pmd_val(pmd));
66 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
67}
68
69static void do_nothing(void *unused)
70{
71
72}
73
74
75
76
77
78
79
80
81
82
83void serialize_against_pte_lookup(struct mm_struct *mm)
84{
85 smp_mb();
86 smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
87}
88
89
90
91
92
93void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
94 pmd_t *pmdp)
95{
96 pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
97 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
98
99
100
101
102 serialize_against_pte_lookup(vma->vm_mm);
103}
104
105static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
106{
107 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
108}
109
110pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
111{
112 unsigned long pmdv;
113
114 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
115 return pmd_set_protbits(__pmd(pmdv), pgprot);
116}
117
118pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
119{
120 return pfn_pmd(page_to_pfn(page), pgprot);
121}
122
123pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
124{
125 unsigned long pmdv;
126
127 pmdv = pmd_val(pmd);
128 pmdv &= _HPAGE_CHG_MASK;
129 return pmd_set_protbits(__pmd(pmdv), newprot);
130}
131
132
133
134
135
136
137
138void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
139 pmd_t *pmd)
140{
141 return;
142}
143#endif
144
145
146void mmu_cleanup_all(void)
147{
148 if (radix_enabled())
149 radix__mmu_cleanup_all();
150 else if (mmu_hash_ops.hpte_clear_all)
151 mmu_hash_ops.hpte_clear_all();
152}
153
154#ifdef CONFIG_MEMORY_HOTPLUG
155int create_section_mapping(unsigned long start, unsigned long end)
156{
157 if (radix_enabled())
158 return radix__create_section_mapping(start, end);
159
160 return hash__create_section_mapping(start, end);
161}
162
163int remove_section_mapping(unsigned long start, unsigned long end)
164{
165 if (radix_enabled())
166 return radix__remove_section_mapping(start, end);
167
168 return hash__remove_section_mapping(start, end);
169}
170#endif
171