1
2
3
4
5
6
7
8#include <linux/miscdevice.h>
9#include <linux/mm.h>
10#include <linux/mman.h>
11#include <linux/sched/mm.h>
12#include <linux/sched/signal.h>
13#include <linux/slab.h>
14#include <linux/xarray.h>
15#include <asm/sgx.h>
16#include <uapi/asm/sgx.h>
17
18#include "encls.h"
19#include "sgx.h"
20
21struct sgx_vepc {
22 struct xarray page_array;
23 struct mutex lock;
24};
25
26
27
28
29
30static struct mutex zombie_secs_pages_lock;
31static struct list_head zombie_secs_pages;
32
33static int __sgx_vepc_fault(struct sgx_vepc *vepc,
34 struct vm_area_struct *vma, unsigned long addr)
35{
36 struct sgx_epc_page *epc_page;
37 unsigned long index, pfn;
38 int ret;
39
40 WARN_ON(!mutex_is_locked(&vepc->lock));
41
42
43 index = vma->vm_pgoff + PFN_DOWN(addr - vma->vm_start);
44
45 epc_page = xa_load(&vepc->page_array, index);
46 if (epc_page)
47 return 0;
48
49 epc_page = sgx_alloc_epc_page(vepc, false);
50 if (IS_ERR(epc_page))
51 return PTR_ERR(epc_page);
52
53 ret = xa_err(xa_store(&vepc->page_array, index, epc_page, GFP_KERNEL));
54 if (ret)
55 goto err_free;
56
57 pfn = PFN_DOWN(sgx_get_epc_phys_addr(epc_page));
58
59 ret = vmf_insert_pfn(vma, addr, pfn);
60 if (ret != VM_FAULT_NOPAGE) {
61 ret = -EFAULT;
62 goto err_delete;
63 }
64
65 return 0;
66
67err_delete:
68 xa_erase(&vepc->page_array, index);
69err_free:
70 sgx_free_epc_page(epc_page);
71 return ret;
72}
73
74static vm_fault_t sgx_vepc_fault(struct vm_fault *vmf)
75{
76 struct vm_area_struct *vma = vmf->vma;
77 struct sgx_vepc *vepc = vma->vm_private_data;
78 int ret;
79
80 mutex_lock(&vepc->lock);
81 ret = __sgx_vepc_fault(vepc, vma, vmf->address);
82 mutex_unlock(&vepc->lock);
83
84 if (!ret)
85 return VM_FAULT_NOPAGE;
86
87 if (ret == -EBUSY && (vmf->flags & FAULT_FLAG_ALLOW_RETRY)) {
88 mmap_read_unlock(vma->vm_mm);
89 return VM_FAULT_RETRY;
90 }
91
92 return VM_FAULT_SIGBUS;
93}
94
95static const struct vm_operations_struct sgx_vepc_vm_ops = {
96 .fault = sgx_vepc_fault,
97};
98
99static int sgx_vepc_mmap(struct file *file, struct vm_area_struct *vma)
100{
101 struct sgx_vepc *vepc = file->private_data;
102
103 if (!(vma->vm_flags & VM_SHARED))
104 return -EINVAL;
105
106 vma->vm_ops = &sgx_vepc_vm_ops;
107
108 vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY;
109 vma->vm_private_data = vepc;
110
111 return 0;
112}
113
114static int sgx_vepc_free_page(struct sgx_epc_page *epc_page)
115{
116 int ret;
117
118
119
120
121
122
123
124
125
126
127 ret = __eremove(sgx_get_epc_virt_addr(epc_page));
128 if (ret) {
129
130
131
132
133
134
135
136
137
138
139
140
141 WARN_ONCE(ret != SGX_CHILD_PRESENT, EREMOVE_ERROR_MESSAGE,
142 ret, ret);
143 return ret;
144 }
145
146 sgx_free_epc_page(epc_page);
147
148 return 0;
149}
150
151static int sgx_vepc_release(struct inode *inode, struct file *file)
152{
153 struct sgx_vepc *vepc = file->private_data;
154 struct sgx_epc_page *epc_page, *tmp, *entry;
155 unsigned long index;
156
157 LIST_HEAD(secs_pages);
158
159 xa_for_each(&vepc->page_array, index, entry) {
160
161
162
163
164
165
166 if (sgx_vepc_free_page(entry))
167 continue;
168
169 xa_erase(&vepc->page_array, index);
170 }
171
172
173
174
175
176 xa_for_each(&vepc->page_array, index, entry) {
177 epc_page = entry;
178
179
180
181
182
183
184 if (sgx_vepc_free_page(epc_page))
185 list_add_tail(&epc_page->list, &secs_pages);
186
187 xa_erase(&vepc->page_array, index);
188 }
189
190
191
192
193
194
195
196
197 mutex_lock(&zombie_secs_pages_lock);
198 list_for_each_entry_safe(epc_page, tmp, &zombie_secs_pages, list) {
199
200
201
202
203
204
205 list_del(&epc_page->list);
206
207 if (sgx_vepc_free_page(epc_page))
208 list_add_tail(&epc_page->list, &secs_pages);
209 }
210
211 if (!list_empty(&secs_pages))
212 list_splice_tail(&secs_pages, &zombie_secs_pages);
213 mutex_unlock(&zombie_secs_pages_lock);
214
215 xa_destroy(&vepc->page_array);
216 kfree(vepc);
217
218 return 0;
219}
220
221static int sgx_vepc_open(struct inode *inode, struct file *file)
222{
223 struct sgx_vepc *vepc;
224
225 vepc = kzalloc(sizeof(struct sgx_vepc), GFP_KERNEL);
226 if (!vepc)
227 return -ENOMEM;
228 mutex_init(&vepc->lock);
229 xa_init(&vepc->page_array);
230
231 file->private_data = vepc;
232
233 return 0;
234}
235
236static const struct file_operations sgx_vepc_fops = {
237 .owner = THIS_MODULE,
238 .open = sgx_vepc_open,
239 .release = sgx_vepc_release,
240 .mmap = sgx_vepc_mmap,
241};
242
243static struct miscdevice sgx_vepc_dev = {
244 .minor = MISC_DYNAMIC_MINOR,
245 .name = "sgx_vepc",
246 .nodename = "sgx_vepc",
247 .fops = &sgx_vepc_fops,
248};
249
250int __init sgx_vepc_init(void)
251{
252
253 if (!cpu_feature_enabled(X86_FEATURE_VMX))
254 return -ENODEV;
255
256 INIT_LIST_HEAD(&zombie_secs_pages);
257 mutex_init(&zombie_secs_pages_lock);
258
259 return misc_register(&sgx_vepc_dev);
260}
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276int sgx_virt_ecreate(struct sgx_pageinfo *pageinfo, void __user *secs,
277 int *trapnr)
278{
279 int ret;
280
281
282
283
284
285
286
287
288
289
290
291
292 if (WARN_ON_ONCE(!access_ok(secs, PAGE_SIZE)))
293 return -EINVAL;
294
295 __uaccess_begin();
296 ret = __ecreate(pageinfo, (void *)secs);
297 __uaccess_end();
298
299 if (encls_faulted(ret)) {
300 *trapnr = ENCLS_TRAPNR(ret);
301 return -EFAULT;
302 }
303
304
305 WARN_ON_ONCE(ret);
306 return 0;
307}
308EXPORT_SYMBOL_GPL(sgx_virt_ecreate);
309
310static int __sgx_virt_einit(void __user *sigstruct, void __user *token,
311 void __user *secs)
312{
313 int ret;
314
315
316
317
318
319
320#define SGX_EINITTOKEN_SIZE 304
321 if (WARN_ON_ONCE(!access_ok(sigstruct, sizeof(struct sgx_sigstruct)) ||
322 !access_ok(token, SGX_EINITTOKEN_SIZE) ||
323 !access_ok(secs, PAGE_SIZE)))
324 return -EINVAL;
325
326 __uaccess_begin();
327 ret = __einit((void *)sigstruct, (void *)token, (void *)secs);
328 __uaccess_end();
329
330 return ret;
331}
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350int sgx_virt_einit(void __user *sigstruct, void __user *token,
351 void __user *secs, u64 *lepubkeyhash, int *trapnr)
352{
353 int ret;
354
355 if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) {
356 ret = __sgx_virt_einit(sigstruct, token, secs);
357 } else {
358 preempt_disable();
359
360 sgx_update_lepubkeyhash(lepubkeyhash);
361
362 ret = __sgx_virt_einit(sigstruct, token, secs);
363 preempt_enable();
364 }
365
366
367 if (ret == -EINVAL)
368 return ret;
369
370 if (encls_faulted(ret)) {
371 *trapnr = ENCLS_TRAPNR(ret);
372 return -EFAULT;
373 }
374
375 return ret;
376}
377EXPORT_SYMBOL_GPL(sgx_virt_einit);
378