1
2
3
4
5
6
7#define KMSG_COMPONENT "prot_virt"
8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/sizes.h>
13#include <linux/bitmap.h>
14#include <linux/memblock.h>
15#include <linux/pagemap.h>
16#include <linux/swap.h>
17#include <asm/facility.h>
18#include <asm/sections.h>
19#include <asm/uv.h>
20
21
22#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23int __bootdata_preserved(prot_virt_guest);
24#endif
25
26struct uv_info __bootdata_preserved(uv_info);
27
28#if IS_ENABLED(CONFIG_KVM)
29int __bootdata_preserved(prot_virt_host);
30EXPORT_SYMBOL(prot_virt_host);
31EXPORT_SYMBOL(uv_info);
32
33static int __init uv_init(unsigned long stor_base, unsigned long stor_len)
34{
35 struct uv_cb_init uvcb = {
36 .header.cmd = UVC_CMD_INIT_UV,
37 .header.len = sizeof(uvcb),
38 .stor_origin = stor_base,
39 .stor_len = stor_len,
40 };
41
42 if (uv_call(0, (uint64_t)&uvcb)) {
43 pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
44 uvcb.header.rc, uvcb.header.rrc);
45 return -1;
46 }
47 return 0;
48}
49
50void __init setup_uv(void)
51{
52 unsigned long uv_stor_base;
53
54 if (!is_prot_virt_host())
55 return;
56
57 uv_stor_base = (unsigned long)memblock_alloc_try_nid(
58 uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
59 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
60 if (!uv_stor_base) {
61 pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
62 uv_info.uv_base_stor_len);
63 goto fail;
64 }
65
66 if (uv_init(uv_stor_base, uv_info.uv_base_stor_len)) {
67 memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
68 goto fail;
69 }
70
71 pr_info("Reserving %luMB as ultravisor base storage\n",
72 uv_info.uv_base_stor_len >> 20);
73 return;
74fail:
75 pr_info("Disabling support for protected virtualization");
76 prot_virt_host = 0;
77}
78
79
80
81
82
83static int uv_pin_shared(unsigned long paddr)
84{
85 struct uv_cb_cfs uvcb = {
86 .header.cmd = UVC_CMD_PIN_PAGE_SHARED,
87 .header.len = sizeof(uvcb),
88 .paddr = paddr,
89 };
90
91 if (uv_call(0, (u64)&uvcb))
92 return -EINVAL;
93 return 0;
94}
95
96
97
98
99
100
101
102
103int uv_destroy_page(unsigned long paddr)
104{
105 struct uv_cb_cfs uvcb = {
106 .header.cmd = UVC_CMD_DESTR_SEC_STOR,
107 .header.len = sizeof(uvcb),
108 .paddr = paddr
109 };
110
111 if (uv_call(0, (u64)&uvcb)) {
112
113
114
115
116 if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
117 return 0;
118 return -EINVAL;
119 }
120 return 0;
121}
122
123
124
125
126
127
128
129int uv_convert_from_secure(unsigned long paddr)
130{
131 struct uv_cb_cfs uvcb = {
132 .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
133 .header.len = sizeof(uvcb),
134 .paddr = paddr
135 };
136
137 if (uv_call(0, (u64)&uvcb))
138 return -EINVAL;
139 return 0;
140}
141
142
143
144
145
146
147
148static int expected_page_refs(struct page *page)
149{
150 int res;
151
152 res = page_mapcount(page);
153 if (PageSwapCache(page)) {
154 res++;
155 } else if (page_mapping(page)) {
156 res++;
157 if (page_has_private(page))
158 res++;
159 }
160 return res;
161}
162
163static int make_secure_pte(pte_t *ptep, unsigned long addr,
164 struct page *exp_page, struct uv_cb_header *uvcb)
165{
166 pte_t entry = READ_ONCE(*ptep);
167 struct page *page;
168 int expected, rc = 0;
169
170 if (!pte_present(entry))
171 return -ENXIO;
172 if (pte_val(entry) & _PAGE_INVALID)
173 return -ENXIO;
174
175 page = pte_page(entry);
176 if (page != exp_page)
177 return -ENXIO;
178 if (PageWriteback(page))
179 return -EAGAIN;
180 expected = expected_page_refs(page);
181 if (!page_ref_freeze(page, expected))
182 return -EBUSY;
183 set_bit(PG_arch_1, &page->flags);
184 rc = uv_call(0, (u64)uvcb);
185 page_ref_unfreeze(page, expected);
186
187 if (rc)
188 rc = uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
189 return rc;
190}
191
192
193
194
195
196
197
198int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
199{
200 struct vm_area_struct *vma;
201 bool local_drain = false;
202 spinlock_t *ptelock;
203 unsigned long uaddr;
204 struct page *page;
205 pte_t *ptep;
206 int rc;
207
208again:
209 rc = -EFAULT;
210 mmap_read_lock(gmap->mm);
211
212 uaddr = __gmap_translate(gmap, gaddr);
213 if (IS_ERR_VALUE(uaddr))
214 goto out;
215 vma = find_vma(gmap->mm, uaddr);
216 if (!vma)
217 goto out;
218
219
220
221
222
223
224
225 if (is_vm_hugetlb_page(vma))
226 goto out;
227
228 rc = -ENXIO;
229 page = follow_page(vma, uaddr, FOLL_WRITE);
230 if (IS_ERR_OR_NULL(page))
231 goto out;
232
233 lock_page(page);
234 ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
235 rc = make_secure_pte(ptep, uaddr, page, uvcb);
236 pte_unmap_unlock(ptep, ptelock);
237 unlock_page(page);
238out:
239 mmap_read_unlock(gmap->mm);
240
241 if (rc == -EAGAIN) {
242 wait_on_page_writeback(page);
243 } else if (rc == -EBUSY) {
244
245
246
247
248
249
250 if (local_drain) {
251 lru_add_drain_all();
252
253 return -EAGAIN;
254 }
255
256
257
258
259
260
261
262 lru_add_drain();
263 local_drain = true;
264
265 goto again;
266 } else if (rc == -ENXIO) {
267 if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
268 return -EFAULT;
269 return -EAGAIN;
270 }
271 return rc;
272}
273EXPORT_SYMBOL_GPL(gmap_make_secure);
274
275int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
276{
277 struct uv_cb_cts uvcb = {
278 .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
279 .header.len = sizeof(uvcb),
280 .guest_handle = gmap->guest_handle,
281 .gaddr = gaddr,
282 };
283
284 return gmap_make_secure(gmap, gaddr, &uvcb);
285}
286EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
287
288
289
290
291
292
293
294int arch_make_page_accessible(struct page *page)
295{
296 int rc = 0;
297
298
299 if (PageHuge(page))
300 return 0;
301
302
303
304
305
306
307
308
309
310
311 if (!test_bit(PG_arch_1, &page->flags))
312 return 0;
313
314 rc = uv_pin_shared(page_to_phys(page));
315 if (!rc) {
316 clear_bit(PG_arch_1, &page->flags);
317 return 0;
318 }
319
320 rc = uv_convert_from_secure(page_to_phys(page));
321 if (!rc) {
322 clear_bit(PG_arch_1, &page->flags);
323 return 0;
324 }
325
326 return rc;
327}
328EXPORT_SYMBOL_GPL(arch_make_page_accessible);
329
330#endif
331
332#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
333static ssize_t uv_query_facilities(struct kobject *kobj,
334 struct kobj_attribute *attr, char *page)
335{
336 return scnprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
337 uv_info.inst_calls_list[0],
338 uv_info.inst_calls_list[1],
339 uv_info.inst_calls_list[2],
340 uv_info.inst_calls_list[3]);
341}
342
343static struct kobj_attribute uv_query_facilities_attr =
344 __ATTR(facilities, 0444, uv_query_facilities, NULL);
345
346static ssize_t uv_query_feature_indications(struct kobject *kobj,
347 struct kobj_attribute *attr, char *buf)
348{
349 return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
350}
351
352static struct kobj_attribute uv_query_feature_indications_attr =
353 __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
354
355static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
356 struct kobj_attribute *attr, char *page)
357{
358 return scnprintf(page, PAGE_SIZE, "%d\n",
359 uv_info.max_guest_cpu_id + 1);
360}
361
362static struct kobj_attribute uv_query_max_guest_cpus_attr =
363 __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
364
365static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
366 struct kobj_attribute *attr, char *page)
367{
368 return scnprintf(page, PAGE_SIZE, "%d\n",
369 uv_info.max_num_sec_conf);
370}
371
372static struct kobj_attribute uv_query_max_guest_vms_attr =
373 __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
374
375static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
376 struct kobj_attribute *attr, char *page)
377{
378 return scnprintf(page, PAGE_SIZE, "%lx\n",
379 uv_info.max_sec_stor_addr);
380}
381
382static struct kobj_attribute uv_query_max_guest_addr_attr =
383 __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
384
385static struct attribute *uv_query_attrs[] = {
386 &uv_query_facilities_attr.attr,
387 &uv_query_feature_indications_attr.attr,
388 &uv_query_max_guest_cpus_attr.attr,
389 &uv_query_max_guest_vms_attr.attr,
390 &uv_query_max_guest_addr_attr.attr,
391 NULL,
392};
393
394static struct attribute_group uv_query_attr_group = {
395 .attrs = uv_query_attrs,
396};
397
398static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
399 struct kobj_attribute *attr, char *page)
400{
401 int val = 0;
402
403#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
404 val = prot_virt_guest;
405#endif
406 return scnprintf(page, PAGE_SIZE, "%d\n", val);
407}
408
409static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
410 struct kobj_attribute *attr, char *page)
411{
412 int val = 0;
413
414#if IS_ENABLED(CONFIG_KVM)
415 val = prot_virt_host;
416#endif
417
418 return scnprintf(page, PAGE_SIZE, "%d\n", val);
419}
420
421static struct kobj_attribute uv_prot_virt_guest =
422 __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
423
424static struct kobj_attribute uv_prot_virt_host =
425 __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
426
427static const struct attribute *uv_prot_virt_attrs[] = {
428 &uv_prot_virt_guest.attr,
429 &uv_prot_virt_host.attr,
430 NULL,
431};
432
433static struct kset *uv_query_kset;
434static struct kobject *uv_kobj;
435
436static int __init uv_info_init(void)
437{
438 int rc = -ENOMEM;
439
440 if (!test_facility(158))
441 return 0;
442
443 uv_kobj = kobject_create_and_add("uv", firmware_kobj);
444 if (!uv_kobj)
445 return -ENOMEM;
446
447 rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
448 if (rc)
449 goto out_kobj;
450
451 uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
452 if (!uv_query_kset) {
453 rc = -ENOMEM;
454 goto out_ind_files;
455 }
456
457 rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
458 if (!rc)
459 return 0;
460
461 kset_unregister(uv_query_kset);
462out_ind_files:
463 sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
464out_kobj:
465 kobject_del(uv_kobj);
466 kobject_put(uv_kobj);
467 return rc;
468}
469device_initcall(uv_info_init);
470#endif
471