1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/init.h>
35#include <linux/init_task.h>
36#include <linux/kernel.h>
37#include <linux/percpu.h>
38#include <linux/gfp.h>
39#include <linux/random.h>
40#include <asm/pgtable.h>
41#include <asm/pgalloc.h>
42#include <asm/setup.h>
43#include <asm/espfix.h>
44
45
46
47
48
49#define ESPFIX_STACK_SIZE (8*8UL)
50#define ESPFIX_STACKS_PER_PAGE (PAGE_SIZE/ESPFIX_STACK_SIZE)
51
52
53#define ESPFIX_PAGE_SPACE (1UL << (P4D_SHIFT-PAGE_SHIFT-16))
54
55#define ESPFIX_MAX_CPUS (ESPFIX_STACKS_PER_PAGE * ESPFIX_PAGE_SPACE)
56#if CONFIG_NR_CPUS > ESPFIX_MAX_CPUS
57# error "Need more virtual address space for the ESPFIX hack"
58#endif
59
60#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
61
62
63DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
64DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
65
66
67static DEFINE_MUTEX(espfix_init_mutex);
68
69
70#define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
71static void *espfix_pages[ESPFIX_MAX_PAGES];
72
73static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
74 __aligned(PAGE_SIZE);
75
76static unsigned int page_random, slot_random;
77
78
79
80
81
82
83static inline unsigned long espfix_base_addr(unsigned int cpu)
84{
85 unsigned long page, slot;
86 unsigned long addr;
87
88 page = (cpu / ESPFIX_STACKS_PER_PAGE) ^ page_random;
89 slot = (cpu + slot_random) % ESPFIX_STACKS_PER_PAGE;
90 addr = (page << PAGE_SHIFT) + (slot * ESPFIX_STACK_SIZE);
91 addr = (addr & 0xffffUL) | ((addr & ~0xffffUL) << 16);
92 addr += ESPFIX_BASE_ADDR;
93 return addr;
94}
95
96#define PTE_STRIDE (65536/PAGE_SIZE)
97#define ESPFIX_PTE_CLONES (PTRS_PER_PTE/PTE_STRIDE)
98#define ESPFIX_PMD_CLONES PTRS_PER_PMD
99#define ESPFIX_PUD_CLONES (65536/(ESPFIX_PTE_CLONES*ESPFIX_PMD_CLONES))
100
101#define PGTABLE_PROT ((_KERNPG_TABLE & ~_PAGE_RW) | _PAGE_NX)
102
103static void init_espfix_random(void)
104{
105 unsigned long rand;
106
107
108
109
110
111 if (!arch_get_random_long(&rand)) {
112
113 rand = rdtsc();
114 rand *= 0xc345c6b72fd16123UL;
115 }
116
117 slot_random = rand % ESPFIX_STACKS_PER_PAGE;
118 page_random = (rand / ESPFIX_STACKS_PER_PAGE)
119 & (ESPFIX_PAGE_SPACE - 1);
120}
121
122void __init init_espfix_bsp(void)
123{
124 pgd_t *pgd;
125 p4d_t *p4d;
126
127
128 pgd = &init_top_pgt[pgd_index(ESPFIX_BASE_ADDR)];
129 p4d = p4d_alloc(&init_mm, pgd, ESPFIX_BASE_ADDR);
130 p4d_populate(&init_mm, p4d, espfix_pud_page);
131
132
133 init_espfix_random();
134
135
136 init_espfix_ap(0);
137}
138
139void init_espfix_ap(int cpu)
140{
141 unsigned int page;
142 unsigned long addr;
143 pud_t pud, *pud_p;
144 pmd_t pmd, *pmd_p;
145 pte_t pte, *pte_p;
146 int n, node;
147 void *stack_page;
148 pteval_t ptemask;
149
150
151 if (likely(per_cpu(espfix_stack, cpu)))
152 return;
153
154 addr = espfix_base_addr(cpu);
155 page = cpu/ESPFIX_STACKS_PER_PAGE;
156
157
158 stack_page = ACCESS_ONCE(espfix_pages[page]);
159 if (likely(stack_page))
160 goto done;
161
162 mutex_lock(&espfix_init_mutex);
163
164
165 stack_page = ACCESS_ONCE(espfix_pages[page]);
166 if (stack_page)
167 goto unlock_done;
168
169 node = cpu_to_node(cpu);
170 ptemask = __supported_pte_mask;
171
172 pud_p = &espfix_pud_page[pud_index(addr)];
173 pud = *pud_p;
174 if (!pud_present(pud)) {
175 struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
176
177 pmd_p = (pmd_t *)page_address(page);
178 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
179 paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
180 for (n = 0; n < ESPFIX_PUD_CLONES; n++)
181 set_pud(&pud_p[n], pud);
182 }
183
184 pmd_p = pmd_offset(&pud, addr);
185 pmd = *pmd_p;
186 if (!pmd_present(pmd)) {
187 struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
188
189 pte_p = (pte_t *)page_address(page);
190 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
191 paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
192 for (n = 0; n < ESPFIX_PMD_CLONES; n++)
193 set_pmd(&pmd_p[n], pmd);
194 }
195
196 pte_p = pte_offset_kernel(&pmd, addr);
197 stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
198 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
199 for (n = 0; n < ESPFIX_PTE_CLONES; n++)
200 set_pte(&pte_p[n*PTE_STRIDE], pte);
201
202
203 ACCESS_ONCE(espfix_pages[page]) = stack_page;
204
205unlock_done:
206 mutex_unlock(&espfix_init_mutex);
207done:
208 per_cpu(espfix_stack, cpu) = addr;
209 per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page
210 + (addr & ~PAGE_MASK);
211}
212