1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef _ASM_RISCV_PGALLOC_H
16#define _ASM_RISCV_PGALLOC_H
17
18#include <linux/mm.h>
19#include <asm/tlb.h>
20
21static inline void pmd_populate_kernel(struct mm_struct *mm,
22 pmd_t *pmd, pte_t *pte)
23{
24 unsigned long pfn = virt_to_pfn(pte);
25
26 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
27}
28
29static inline void pmd_populate(struct mm_struct *mm,
30 pmd_t *pmd, pgtable_t pte)
31{
32 unsigned long pfn = virt_to_pfn(page_address(pte));
33
34 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
35}
36
37#ifndef __PAGETABLE_PMD_FOLDED
38static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
39{
40 unsigned long pfn = virt_to_pfn(pmd);
41
42 set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
43}
44#endif
45
46#define pmd_pgtable(pmd) pmd_page(pmd)
47
48static inline pgd_t *pgd_alloc(struct mm_struct *mm)
49{
50 pgd_t *pgd;
51
52 pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
53 if (likely(pgd != NULL)) {
54 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
55
56 memcpy(pgd + USER_PTRS_PER_PGD,
57 init_mm.pgd + USER_PTRS_PER_PGD,
58 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
59 }
60 return pgd;
61}
62
63static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
64{
65 free_page((unsigned long)pgd);
66}
67
68#ifndef __PAGETABLE_PMD_FOLDED
69
70static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
71{
72 return (pmd_t *)__get_free_page(
73 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
74}
75
76static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
77{
78 free_page((unsigned long)pmd);
79}
80
81#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
82
83#endif
84
85static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
86 unsigned long address)
87{
88 return (pte_t *)__get_free_page(
89 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
90}
91
92static inline struct page *pte_alloc_one(struct mm_struct *mm,
93 unsigned long address)
94{
95 struct page *pte;
96
97 pte = alloc_page(GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
98 if (likely(pte != NULL))
99 pgtable_page_ctor(pte);
100 return pte;
101}
102
103static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
104{
105 free_page((unsigned long)pte);
106}
107
108static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
109{
110 pgtable_page_dtor(pte);
111 __free_page(pte);
112}
113
114#define __pte_free_tlb(tlb, pte, buf) \
115do { \
116 pgtable_page_dtor(pte); \
117 tlb_remove_page((tlb), pte); \
118} while (0)
119
120static inline void check_pgt_cache(void)
121{
122}
123
124#endif
125