1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef _S390_PGALLOC_H
14#define _S390_PGALLOC_H
15
16#include <linux/threads.h>
17#include <linux/gfp.h>
18#include <linux/mm.h>
19
20#define check_pgt_cache() do {} while (0)
21
22unsigned long *crst_table_alloc(struct mm_struct *, int);
23void crst_table_free(struct mm_struct *, unsigned long *);
24
25unsigned long *page_table_alloc(struct mm_struct *);
26void page_table_free(struct mm_struct *, unsigned long *);
27void disable_noexec(struct mm_struct *, struct task_struct *);
28
29static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
30{
31 typedef struct { char _[n]; } addrtype;
32
33 *s = val;
34 n = (n / 256) - 1;
35 asm volatile(
36#ifdef CONFIG_64BIT
37 " mvc 8(248,%0),0(%0)\n"
38#else
39 " mvc 4(252,%0),0(%0)\n"
40#endif
41 "0: mvc 256(256,%0),0(%0)\n"
42 " la %0,256(%0)\n"
43 " brct %1,0b\n"
44 : "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
45 : "m" (*(addrtype *) s));
46}
47
48static inline void crst_table_init(unsigned long *crst, unsigned long entry)
49{
50 clear_table(crst, entry, sizeof(unsigned long)*2048);
51 crst = get_shadow_table(crst);
52 if (crst)
53 clear_table(crst, entry, sizeof(unsigned long)*2048);
54}
55
56#ifndef __s390x__
57
58static inline unsigned long pgd_entry_type(struct mm_struct *mm)
59{
60 return _SEGMENT_ENTRY_EMPTY;
61}
62
63#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
64#define pud_free(mm, x) do { } while (0)
65
66#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
67#define pmd_free(mm, x) do { } while (0)
68
69#define pgd_populate(mm, pgd, pud) BUG()
70#define pgd_populate_kernel(mm, pgd, pud) BUG()
71
72#define pud_populate(mm, pud, pmd) BUG()
73#define pud_populate_kernel(mm, pud, pmd) BUG()
74
75#else
76
77static inline unsigned long pgd_entry_type(struct mm_struct *mm)
78{
79 if (mm->context.asce_limit <= (1UL << 31))
80 return _SEGMENT_ENTRY_EMPTY;
81 if (mm->context.asce_limit <= (1UL << 42))
82 return _REGION3_ENTRY_EMPTY;
83 return _REGION2_ENTRY_EMPTY;
84}
85
86int crst_table_upgrade(struct mm_struct *, unsigned long limit);
87void crst_table_downgrade(struct mm_struct *, unsigned long limit);
88
89static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
90{
91 unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
92 if (table)
93 crst_table_init(table, _REGION3_ENTRY_EMPTY);
94 return (pud_t *) table;
95}
96#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
97
98static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
99{
100 unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
101 if (table)
102 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
103 return (pmd_t *) table;
104}
105#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
106
107static inline void pgd_populate_kernel(struct mm_struct *mm,
108 pgd_t *pgd, pud_t *pud)
109{
110 pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
111}
112
113static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
114{
115 pgd_populate_kernel(mm, pgd, pud);
116 if (mm->context.noexec) {
117 pgd = get_shadow_table(pgd);
118 pud = get_shadow_table(pud);
119 pgd_populate_kernel(mm, pgd, pud);
120 }
121}
122
123static inline void pud_populate_kernel(struct mm_struct *mm,
124 pud_t *pud, pmd_t *pmd)
125{
126 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
127}
128
129static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
130{
131 pud_populate_kernel(mm, pud, pmd);
132 if (mm->context.noexec) {
133 pud = get_shadow_table(pud);
134 pmd = get_shadow_table(pmd);
135 pud_populate_kernel(mm, pud, pmd);
136 }
137}
138
139#endif
140
141static inline pgd_t *pgd_alloc(struct mm_struct *mm)
142{
143 spin_lock_init(&mm->context.list_lock);
144 INIT_LIST_HEAD(&mm->context.crst_list);
145 INIT_LIST_HEAD(&mm->context.pgtable_list);
146 return (pgd_t *) crst_table_alloc(mm, s390_noexec);
147}
148#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
149
150static inline void pmd_populate_kernel(struct mm_struct *mm,
151 pmd_t *pmd, pte_t *pte)
152{
153 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
154}
155
156static inline void pmd_populate(struct mm_struct *mm,
157 pmd_t *pmd, pgtable_t pte)
158{
159 pmd_populate_kernel(mm, pmd, pte);
160 if (mm->context.noexec) {
161 pmd = get_shadow_table(pmd);
162 pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
163 }
164}
165
166#define pmd_pgtable(pmd) \
167 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
168
169
170
171
172#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
173#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
174
175#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
176#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
177
178#endif
179