1
2
3
4
5
6#ifndef _ASM_RISCV_PGTABLE_H
7#define _ASM_RISCV_PGTABLE_H
8
9#include <linux/mmzone.h>
10#include <linux/sizes.h>
11
12#include <asm/pgtable-bits.h>
13
14#ifndef CONFIG_MMU
15#define KERNEL_LINK_ADDR PAGE_OFFSET
16#else
17
18#define ADDRESS_SPACE_END (UL(-1))
19
20#ifdef CONFIG_64BIT
21
22#define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
23#else
24#define KERNEL_LINK_ADDR PAGE_OFFSET
25#endif
26
27#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
28#define VMALLOC_END (PAGE_OFFSET - 1)
29#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
30
31#define BPF_JIT_REGION_SIZE (SZ_128M)
32#ifdef CONFIG_64BIT
33#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
34#define BPF_JIT_REGION_END (MODULES_END)
35#else
36#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
37#define BPF_JIT_REGION_END (VMALLOC_END)
38#endif
39
40
41#ifdef CONFIG_64BIT
42#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
43#define MODULES_END (PFN_ALIGN((unsigned long)&_start))
44#endif
45
46
47
48
49
50
51#define VMEMMAP_SHIFT \
52 (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
53#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
54#define VMEMMAP_END (VMALLOC_START - 1)
55#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
56
57
58
59
60
61#define vmemmap ((struct page *)VMEMMAP_START)
62
63#define PCI_IO_SIZE SZ_16M
64#define PCI_IO_END VMEMMAP_START
65#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
66
67#define FIXADDR_TOP PCI_IO_START
68#ifdef CONFIG_64BIT
69#define FIXADDR_SIZE PMD_SIZE
70#else
71#define FIXADDR_SIZE PGDIR_SIZE
72#endif
73#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
74
75#endif
76
77#ifdef CONFIG_XIP_KERNEL
78#define XIP_OFFSET SZ_8M
79#else
80#define XIP_OFFSET 0
81#endif
82
83#ifndef __ASSEMBLY__
84
85
86#include <asm-generic/pgtable-nopud.h>
87#include <asm/page.h>
88#include <asm/tlbflush.h>
89#include <linux/mm_types.h>
90
91#ifdef CONFIG_64BIT
92#include <asm/pgtable-64.h>
93#else
94#include <asm/pgtable-32.h>
95#endif
96
97#ifdef CONFIG_XIP_KERNEL
98#define XIP_FIXUP(addr) ({ \
99 uintptr_t __a = (uintptr_t)(addr); \
100 (__a >= CONFIG_XIP_PHYS_ADDR && __a < CONFIG_XIP_PHYS_ADDR + SZ_16M) ? \
101 __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
102 __a; \
103 })
104#else
105#define XIP_FIXUP(addr) (addr)
106#endif
107
108#ifdef CONFIG_MMU
109
110#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
111
112#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
113
114
115#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
116
117
118#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
119
120#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
121#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
122#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
123#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
124#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
125#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
126 _PAGE_EXEC | _PAGE_WRITE)
127
128#define PAGE_COPY PAGE_READ
129#define PAGE_COPY_EXEC PAGE_EXEC
130#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
131#define PAGE_SHARED PAGE_WRITE
132#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
133
134#define _PAGE_KERNEL (_PAGE_READ \
135 | _PAGE_WRITE \
136 | _PAGE_PRESENT \
137 | _PAGE_ACCESSED \
138 | _PAGE_DIRTY \
139 | _PAGE_GLOBAL)
140
141#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
142#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
143#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
144#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
145 | _PAGE_EXEC)
146
147#define PAGE_TABLE __pgprot(_PAGE_TABLE)
148
149
150
151
152
153#define _PAGE_IOREMAP _PAGE_KERNEL
154
155extern pgd_t swapper_pg_dir[];
156
157
158#define __P000 PAGE_NONE
159#define __P001 PAGE_READ
160#define __P010 PAGE_COPY
161#define __P011 PAGE_COPY
162#define __P100 PAGE_EXEC
163#define __P101 PAGE_READ_EXEC
164#define __P110 PAGE_COPY_EXEC
165#define __P111 PAGE_COPY_READ_EXEC
166
167
168#define __S000 PAGE_NONE
169#define __S001 PAGE_READ
170#define __S010 PAGE_SHARED
171#define __S011 PAGE_SHARED
172#define __S100 PAGE_EXEC
173#define __S101 PAGE_READ_EXEC
174#define __S110 PAGE_SHARED_EXEC
175#define __S111 PAGE_SHARED_EXEC
176
177#ifdef CONFIG_TRANSPARENT_HUGEPAGE
178static inline int pmd_present(pmd_t pmd)
179{
180
181
182
183
184
185
186 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
187}
188#else
189static inline int pmd_present(pmd_t pmd)
190{
191 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
192}
193#endif
194
195static inline int pmd_none(pmd_t pmd)
196{
197 return (pmd_val(pmd) == 0);
198}
199
200static inline int pmd_bad(pmd_t pmd)
201{
202 return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
203}
204
205#define pmd_leaf pmd_leaf
206static inline int pmd_leaf(pmd_t pmd)
207{
208 return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
209}
210
211static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
212{
213 *pmdp = pmd;
214}
215
216static inline void pmd_clear(pmd_t *pmdp)
217{
218 set_pmd(pmdp, __pmd(0));
219}
220
221static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
222{
223 return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
224}
225
226static inline unsigned long _pgd_pfn(pgd_t pgd)
227{
228 return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
229}
230
231static inline struct page *pmd_page(pmd_t pmd)
232{
233 return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
234}
235
236static inline unsigned long pmd_page_vaddr(pmd_t pmd)
237{
238 return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
239}
240
241static inline pte_t pmd_pte(pmd_t pmd)
242{
243 return __pte(pmd_val(pmd));
244}
245
246static inline pte_t pud_pte(pud_t pud)
247{
248 return __pte(pud_val(pud));
249}
250
251
252static inline unsigned long pte_pfn(pte_t pte)
253{
254 return (pte_val(pte) >> _PAGE_PFN_SHIFT);
255}
256
257#define pte_page(x) pfn_to_page(pte_pfn(x))
258
259
260static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
261{
262 return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
263}
264
265#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
266
267static inline int pte_present(pte_t pte)
268{
269 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
270}
271
272static inline int pte_none(pte_t pte)
273{
274 return (pte_val(pte) == 0);
275}
276
277static inline int pte_write(pte_t pte)
278{
279 return pte_val(pte) & _PAGE_WRITE;
280}
281
282static inline int pte_exec(pte_t pte)
283{
284 return pte_val(pte) & _PAGE_EXEC;
285}
286
287static inline int pte_huge(pte_t pte)
288{
289 return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
290}
291
292static inline int pte_dirty(pte_t pte)
293{
294 return pte_val(pte) & _PAGE_DIRTY;
295}
296
297static inline int pte_young(pte_t pte)
298{
299 return pte_val(pte) & _PAGE_ACCESSED;
300}
301
302static inline int pte_special(pte_t pte)
303{
304 return pte_val(pte) & _PAGE_SPECIAL;
305}
306
307
308
309static inline pte_t pte_wrprotect(pte_t pte)
310{
311 return __pte(pte_val(pte) & ~(_PAGE_WRITE));
312}
313
314
315
316static inline pte_t pte_mkwrite(pte_t pte)
317{
318 return __pte(pte_val(pte) | _PAGE_WRITE);
319}
320
321
322
323static inline pte_t pte_mkdirty(pte_t pte)
324{
325 return __pte(pte_val(pte) | _PAGE_DIRTY);
326}
327
328static inline pte_t pte_mkclean(pte_t pte)
329{
330 return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
331}
332
333static inline pte_t pte_mkyoung(pte_t pte)
334{
335 return __pte(pte_val(pte) | _PAGE_ACCESSED);
336}
337
338static inline pte_t pte_mkold(pte_t pte)
339{
340 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
341}
342
343static inline pte_t pte_mkspecial(pte_t pte)
344{
345 return __pte(pte_val(pte) | _PAGE_SPECIAL);
346}
347
348static inline pte_t pte_mkhuge(pte_t pte)
349{
350 return pte;
351}
352
353#ifdef CONFIG_NUMA_BALANCING
354
355
356
357static inline int pte_protnone(pte_t pte)
358{
359 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
360}
361
362static inline int pmd_protnone(pmd_t pmd)
363{
364 return pte_protnone(pmd_pte(pmd));
365}
366#endif
367
368
369static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
370{
371 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
372}
373
374#define pgd_ERROR(e) \
375 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
376
377
378
379static inline void update_mmu_cache(struct vm_area_struct *vma,
380 unsigned long address, pte_t *ptep)
381{
382
383
384
385
386
387
388
389 local_flush_tlb_page(address);
390}
391
392static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
393 unsigned long address, pmd_t *pmdp)
394{
395 pte_t *ptep = (pte_t *)pmdp;
396
397 update_mmu_cache(vma, address, ptep);
398}
399
400#define __HAVE_ARCH_PTE_SAME
401static inline int pte_same(pte_t pte_a, pte_t pte_b)
402{
403 return pte_val(pte_a) == pte_val(pte_b);
404}
405
406
407
408
409
410
411static inline void set_pte(pte_t *ptep, pte_t pteval)
412{
413 *ptep = pteval;
414}
415
416void flush_icache_pte(pte_t pte);
417
418static inline void set_pte_at(struct mm_struct *mm,
419 unsigned long addr, pte_t *ptep, pte_t pteval)
420{
421 if (pte_present(pteval) && pte_exec(pteval))
422 flush_icache_pte(pteval);
423
424 set_pte(ptep, pteval);
425}
426
427static inline void pte_clear(struct mm_struct *mm,
428 unsigned long addr, pte_t *ptep)
429{
430 set_pte_at(mm, addr, ptep, __pte(0));
431}
432
433#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
434static inline int ptep_set_access_flags(struct vm_area_struct *vma,
435 unsigned long address, pte_t *ptep,
436 pte_t entry, int dirty)
437{
438 if (!pte_same(*ptep, entry))
439 set_pte_at(vma->vm_mm, address, ptep, entry);
440
441
442
443
444 return true;
445}
446
447#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
448static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
449 unsigned long address, pte_t *ptep)
450{
451 return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
452}
453
454#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
455static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
456 unsigned long address,
457 pte_t *ptep)
458{
459 if (!pte_young(*ptep))
460 return 0;
461 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
462}
463
464#define __HAVE_ARCH_PTEP_SET_WRPROTECT
465static inline void ptep_set_wrprotect(struct mm_struct *mm,
466 unsigned long address, pte_t *ptep)
467{
468 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
469}
470
471#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
472static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
473 unsigned long address, pte_t *ptep)
474{
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490 return ptep_test_and_clear_young(vma, address, ptep);
491}
492
493
494
495
496static inline pmd_t pte_pmd(pte_t pte)
497{
498 return __pmd(pte_val(pte));
499}
500
501static inline pmd_t pmd_mkhuge(pmd_t pmd)
502{
503 return pmd;
504}
505
506static inline pmd_t pmd_mkinvalid(pmd_t pmd)
507{
508 return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
509}
510
511#define __pmd_to_phys(pmd) (pmd_val(pmd) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
512
513static inline unsigned long pmd_pfn(pmd_t pmd)
514{
515 return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
516}
517
518static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
519{
520 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
521}
522
523#define pmd_write pmd_write
524static inline int pmd_write(pmd_t pmd)
525{
526 return pte_write(pmd_pte(pmd));
527}
528
529static inline int pmd_dirty(pmd_t pmd)
530{
531 return pte_dirty(pmd_pte(pmd));
532}
533
534static inline int pmd_young(pmd_t pmd)
535{
536 return pte_young(pmd_pte(pmd));
537}
538
539static inline pmd_t pmd_mkold(pmd_t pmd)
540{
541 return pte_pmd(pte_mkold(pmd_pte(pmd)));
542}
543
544static inline pmd_t pmd_mkyoung(pmd_t pmd)
545{
546 return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
547}
548
549static inline pmd_t pmd_mkwrite(pmd_t pmd)
550{
551 return pte_pmd(pte_mkwrite(pmd_pte(pmd)));
552}
553
554static inline pmd_t pmd_wrprotect(pmd_t pmd)
555{
556 return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
557}
558
559static inline pmd_t pmd_mkclean(pmd_t pmd)
560{
561 return pte_pmd(pte_mkclean(pmd_pte(pmd)));
562}
563
564static inline pmd_t pmd_mkdirty(pmd_t pmd)
565{
566 return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
567}
568
569static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
570 pmd_t *pmdp, pmd_t pmd)
571{
572 return set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
573}
574
575static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
576 pud_t *pudp, pud_t pud)
577{
578 return set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
579}
580
581#ifdef CONFIG_TRANSPARENT_HUGEPAGE
582static inline int pmd_trans_huge(pmd_t pmd)
583{
584 return pmd_leaf(pmd);
585}
586
587#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
588static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
589 unsigned long address, pmd_t *pmdp,
590 pmd_t entry, int dirty)
591{
592 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
593}
594
595#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
596static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
597 unsigned long address, pmd_t *pmdp)
598{
599 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
600}
601
602#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
603static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
604 unsigned long address, pmd_t *pmdp)
605{
606 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
607}
608
609#define __HAVE_ARCH_PMDP_SET_WRPROTECT
610static inline void pmdp_set_wrprotect(struct mm_struct *mm,
611 unsigned long address, pmd_t *pmdp)
612{
613 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
614}
615
616#define pmdp_establish pmdp_establish
617static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
618 unsigned long address, pmd_t *pmdp, pmd_t pmd)
619{
620 return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
621}
622#endif
623
624
625
626
627
628
629
630
631
632
633#define __SWP_TYPE_SHIFT 2
634#define __SWP_TYPE_BITS 5
635#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
636#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
637
638#define MAX_SWAPFILES_CHECK() \
639 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
640
641#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
642#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
643#define __swp_entry(type, offset) ((swp_entry_t) \
644 { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
645
646#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
647#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
648
649
650
651
652
653#ifdef CONFIG_64BIT
654#define KERN_VIRT_START (-(BIT(CONFIG_VA_BITS)) + TASK_SIZE)
655#else
656#define KERN_VIRT_START FIXADDR_START
657#endif
658
659
660
661
662
663#ifdef CONFIG_64BIT
664#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
665#else
666#define TASK_SIZE FIXADDR_START
667#endif
668
669#else
670
671#define PAGE_SHARED __pgprot(0)
672#define PAGE_KERNEL __pgprot(0)
673#define swapper_pg_dir NULL
674#define TASK_SIZE 0xffffffffUL
675#define VMALLOC_START 0
676#define VMALLOC_END TASK_SIZE
677
678#endif
679
680#define kern_addr_valid(addr) (1)
681
682extern char _start[];
683extern void *_dtb_early_va;
684extern uintptr_t _dtb_early_pa;
685#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
686#define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
687#define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
688#else
689#define dtb_early_va _dtb_early_va
690#define dtb_early_pa _dtb_early_pa
691#endif
692
693void paging_init(void);
694void misc_mem_init(void);
695
696
697
698
699
700extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
701#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
702
703#endif
704
705#endif
706