1
2
3
4
5
6#include <linux/sched.h>
7#include <linux/mm_types.h>
8#include <linux/memblock.h>
9#include <misc/cxl-base.h>
10
11#include <asm/pgalloc.h>
12#include <asm/tlb.h>
13#include <asm/trace.h>
14#include <asm/powernv.h>
15
16#include <mm/mmu_decl.h>
17#include <trace/events/thp.h>
18
19unsigned long __pmd_frag_nr;
20EXPORT_SYMBOL(__pmd_frag_nr);
21unsigned long __pmd_frag_size_shift;
22EXPORT_SYMBOL(__pmd_frag_size_shift);
23
24int (*register_process_table)(unsigned long base, unsigned long page_size,
25 unsigned long tbl_size);
26
27#ifdef CONFIG_TRANSPARENT_HUGEPAGE
28
29
30
31
32
33
34
35int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
36 pmd_t *pmdp, pmd_t entry, int dirty)
37{
38 int changed;
39#ifdef CONFIG_DEBUG_VM
40 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
41 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
42#endif
43 changed = !pmd_same(*(pmdp), entry);
44 if (changed) {
45
46
47
48
49 __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
50 pmd_pte(entry), address, MMU_PAGE_2M);
51 }
52 return changed;
53}
54
55int pmdp_test_and_clear_young(struct vm_area_struct *vma,
56 unsigned long address, pmd_t *pmdp)
57{
58 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
59}
60
61
62
63
64void set_pmd_at(struct mm_struct *mm, unsigned long addr,
65 pmd_t *pmdp, pmd_t pmd)
66{
67#ifdef CONFIG_DEBUG_VM
68
69
70
71
72
73 WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
74 assert_spin_locked(pmd_lockptr(mm, pmdp));
75 WARN_ON(!(pmd_large(pmd)));
76#endif
77 trace_hugepage_set_pmd(addr, pmd_val(pmd));
78 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
79}
80
81static void do_nothing(void *unused)
82{
83
84}
85
86
87
88
89
90
91
92
93
94
95void serialize_against_pte_lookup(struct mm_struct *mm)
96{
97 smp_mb();
98 smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
99}
100
101
102
103
104
105pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
106 pmd_t *pmdp)
107{
108 unsigned long old_pmd;
109
110 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
111 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
112
113
114
115
116
117
118
119 serialize_against_pte_lookup(vma->vm_mm);
120 return __pmd(old_pmd);
121}
122
123static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
124{
125 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
126}
127
128pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
129{
130 unsigned long pmdv;
131
132 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
133 return pmd_set_protbits(__pmd(pmdv), pgprot);
134}
135
136pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
137{
138 return pfn_pmd(page_to_pfn(page), pgprot);
139}
140
141pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
142{
143 unsigned long pmdv;
144
145 pmdv = pmd_val(pmd);
146 pmdv &= _HPAGE_CHG_MASK;
147 return pmd_set_protbits(__pmd(pmdv), newprot);
148}
149
150
151
152
153
154
155
156void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
157 pmd_t *pmd)
158{
159 if (radix_enabled())
160 prefetch((void *)addr);
161}
162#endif
163
164
165void mmu_cleanup_all(void)
166{
167 if (radix_enabled())
168 radix__mmu_cleanup_all();
169 else if (mmu_hash_ops.hpte_clear_all)
170 mmu_hash_ops.hpte_clear_all();
171}
172
173#ifdef CONFIG_MEMORY_HOTPLUG
174int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid)
175{
176 if (radix_enabled())
177 return radix__create_section_mapping(start, end, nid);
178
179 return hash__create_section_mapping(start, end, nid);
180}
181
182int __meminit remove_section_mapping(unsigned long start, unsigned long end)
183{
184 if (radix_enabled())
185 return radix__remove_section_mapping(start, end);
186
187 return hash__remove_section_mapping(start, end);
188}
189#endif
190
191void __init mmu_partition_table_init(void)
192{
193 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
194 unsigned long ptcr;
195
196 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
197
198 partition_tb = memblock_alloc(patb_size, patb_size);
199 if (!partition_tb)
200 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
201 __func__, patb_size, patb_size);
202
203
204
205
206
207 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
208 mtspr(SPRN_PTCR, ptcr);
209 powernv_set_nmmu_ptcr(ptcr);
210}
211
212void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
213 unsigned long dw1)
214{
215 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
216
217 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
218 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
219
220
221
222
223
224
225 asm volatile("ptesync" : : : "memory");
226 if (old & PATB_HR) {
227 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
228 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
229 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
230 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
231 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
232 } else {
233 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
234 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
235 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
236 }
237
238 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
239}
240EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
241
242static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
243{
244 void *pmd_frag, *ret;
245
246 if (PMD_FRAG_NR == 1)
247 return NULL;
248
249 spin_lock(&mm->page_table_lock);
250 ret = mm->context.pmd_frag;
251 if (ret) {
252 pmd_frag = ret + PMD_FRAG_SIZE;
253
254
255
256 if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
257 pmd_frag = NULL;
258 mm->context.pmd_frag = pmd_frag;
259 }
260 spin_unlock(&mm->page_table_lock);
261 return (pmd_t *)ret;
262}
263
264static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
265{
266 void *ret = NULL;
267 struct page *page;
268 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
269
270 if (mm == &init_mm)
271 gfp &= ~__GFP_ACCOUNT;
272 page = alloc_page(gfp);
273 if (!page)
274 return NULL;
275 if (!pgtable_pmd_page_ctor(page)) {
276 __free_pages(page, 0);
277 return NULL;
278 }
279
280 atomic_set(&page->pt_frag_refcount, 1);
281
282 ret = page_address(page);
283
284
285
286
287 if (PMD_FRAG_NR == 1)
288 return ret;
289
290 spin_lock(&mm->page_table_lock);
291
292
293
294
295
296 if (likely(!mm->context.pmd_frag)) {
297 atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
298 mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
299 }
300 spin_unlock(&mm->page_table_lock);
301
302 return (pmd_t *)ret;
303}
304
305pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
306{
307 pmd_t *pmd;
308
309 pmd = get_pmd_from_cache(mm);
310 if (pmd)
311 return pmd;
312
313 return __alloc_for_pmdcache(mm);
314}
315
316void pmd_fragment_free(unsigned long *pmd)
317{
318 struct page *page = virt_to_page(pmd);
319
320 BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
321 if (atomic_dec_and_test(&page->pt_frag_refcount)) {
322 pgtable_pmd_page_dtor(page);
323 __free_page(page);
324 }
325}
326
327static inline void pgtable_free(void *table, int index)
328{
329 switch (index) {
330 case PTE_INDEX:
331 pte_fragment_free(table, 0);
332 break;
333 case PMD_INDEX:
334 pmd_fragment_free(table);
335 break;
336 case PUD_INDEX:
337 kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
338 break;
339#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
340
341 case HTLB_16M_INDEX:
342 BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
343 kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
344 break;
345
346 case HTLB_16G_INDEX:
347 BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
348 kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
349 break;
350#endif
351
352 default:
353 BUG();
354 }
355}
356
357#ifdef CONFIG_SMP
358void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
359{
360 unsigned long pgf = (unsigned long)table;
361
362 BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
363 pgf |= index;
364 tlb_remove_table(tlb, (void *)pgf);
365}
366
367void __tlb_remove_table(void *_table)
368{
369 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
370 unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
371
372 return pgtable_free(table, index);
373}
374#else
375void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
376{
377 return pgtable_free(table, index);
378}
379#endif
380
381#ifdef CONFIG_PROC_FS
382atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
383
384void arch_report_meminfo(struct seq_file *m)
385{
386
387
388
389
390 if (!radix_enabled())
391 return;
392 seq_printf(m, "DirectMap4k: %8lu kB\n",
393 atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
394 seq_printf(m, "DirectMap64k: %8lu kB\n",
395 atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
396 seq_printf(m, "DirectMap2M: %8lu kB\n",
397 atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
398 seq_printf(m, "DirectMap1G: %8lu kB\n",
399 atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
400}
401#endif
402
403pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
404 pte_t *ptep)
405{
406 unsigned long pte_val;
407
408
409
410
411
412
413 pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0);
414
415 return __pte(pte_val);
416
417}
418
419void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
420 pte_t *ptep, pte_t old_pte, pte_t pte)
421{
422 if (radix_enabled())
423 return radix__ptep_modify_prot_commit(vma, addr,
424 ptep, old_pte, pte);
425 set_pte_at(vma->vm_mm, addr, ptep, pte);
426}
427
428
429
430
431
432
433
434
435
436
437
438
439
440int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
441 struct spinlock *old_pmd_ptl,
442 struct vm_area_struct *vma)
443{
444 if (radix_enabled())
445 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
446
447 return true;
448}
449
450int ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid)
451{
452 unsigned long i;
453
454 if (radix_enabled())
455 return radix__ioremap_range(ea, pa, size, prot, nid);
456
457 for (i = 0; i < size; i += PAGE_SIZE) {
458 int err = map_kernel_page(ea + i, pa + i, prot);
459 if (err) {
460 if (slab_is_available())
461 unmap_kernel_range(ea, size);
462 else
463 WARN_ON_ONCE(1);
464 return err;
465 }
466 }
467
468 return 0;
469}
470