1
2
3
4
5
6
7
8
9
10#include <linux/fs.h>
11#include <linux/mm.h>
12#include <linux/sched/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/pagemap.h>
15#include <linux/sysctl.h>
16
17#include <asm/mman.h>
18#include <asm/pgalloc.h>
19#include <asm/tlb.h>
20#include <asm/tlbflush.h>
21#include <asm/cacheflush.h>
22#include <asm/mmu_context.h>
23
24
25unsigned long
26hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
27 unsigned long len, unsigned long pgoff, unsigned long flags)
28{
29 struct hstate *h = hstate_file(file);
30
31 if (len & ~huge_page_mask(h))
32 return -EINVAL;
33 if (len > TASK_SIZE)
34 return -ENOMEM;
35
36 if (flags & MAP_FIXED)
37 if (prepare_hugepage_range(file, addr, len))
38 return -EINVAL;
39
40 if (addr)
41 addr = ALIGN(addr, huge_page_size(h));
42
43
44 return arch_get_unmapped_area(file, addr, len, pgoff, flags);
45}
46
47
48pte_t *huge_pte_alloc(struct mm_struct *mm,
49 unsigned long addr, unsigned long sz)
50{
51 pgd_t *pgd;
52 pud_t *pud;
53 pmd_t *pmd;
54 pte_t *pte = NULL;
55
56
57
58
59
60
61 addr &= HPAGE_MASK;
62
63 pgd = pgd_offset(mm, addr);
64 pud = pud_alloc(mm, pgd, addr);
65 if (pud) {
66 pmd = pmd_alloc(mm, pud, addr);
67 if (pmd)
68 pte = pte_alloc_map(mm, pmd, addr);
69 }
70 return pte;
71}
72
73pte_t *huge_pte_offset(struct mm_struct *mm,
74 unsigned long addr, unsigned long sz)
75{
76 pgd_t *pgd;
77 pud_t *pud;
78 pmd_t *pmd;
79 pte_t *pte = NULL;
80
81 addr &= HPAGE_MASK;
82
83 pgd = pgd_offset(mm, addr);
84 if (!pgd_none(*pgd)) {
85 pud = pud_offset(pgd, addr);
86 if (!pud_none(*pud)) {
87 pmd = pmd_offset(pud, addr);
88 if (!pmd_none(*pmd))
89 pte = pte_offset_map(pmd, addr);
90 }
91 }
92 return pte;
93}
94
95
96
97
98
99static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
100{
101 int i;
102
103
104
105 BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
106
107 addr &= HPAGE_MASK;
108 addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
109
110 for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
111 purge_tlb_entries(mm, addr);
112 addr += (1UL << REAL_HPAGE_SHIFT);
113 }
114}
115
116
117static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
118 pte_t *ptep, pte_t entry)
119{
120 unsigned long addr_start;
121 int i;
122
123 addr &= HPAGE_MASK;
124 addr_start = addr;
125
126 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
127 set_pte(ptep, entry);
128 ptep++;
129
130 addr += PAGE_SIZE;
131 pte_val(entry) += PAGE_SIZE;
132 }
133
134 purge_tlb_entries_huge(mm, addr_start);
135}
136
137void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
138 pte_t *ptep, pte_t entry)
139{
140 unsigned long flags;
141
142 purge_tlb_start(flags);
143 __set_huge_pte_at(mm, addr, ptep, entry);
144 purge_tlb_end(flags);
145}
146
147
148pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
149 pte_t *ptep)
150{
151 unsigned long flags;
152 pte_t entry;
153
154 purge_tlb_start(flags);
155 entry = *ptep;
156 __set_huge_pte_at(mm, addr, ptep, __pte(0));
157 purge_tlb_end(flags);
158
159 return entry;
160}
161
162
163void huge_ptep_set_wrprotect(struct mm_struct *mm,
164 unsigned long addr, pte_t *ptep)
165{
166 unsigned long flags;
167 pte_t old_pte;
168
169 purge_tlb_start(flags);
170 old_pte = *ptep;
171 __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
172 purge_tlb_end(flags);
173}
174
175int huge_ptep_set_access_flags(struct vm_area_struct *vma,
176 unsigned long addr, pte_t *ptep,
177 pte_t pte, int dirty)
178{
179 unsigned long flags;
180 int changed;
181
182 purge_tlb_start(flags);
183 changed = !pte_same(*ptep, pte);
184 if (changed) {
185 __set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
186 }
187 purge_tlb_end(flags);
188 return changed;
189}
190
191
192int pmd_huge(pmd_t pmd)
193{
194 return 0;
195}
196
197int pud_huge(pud_t pud)
198{
199 return 0;
200}
201