1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#define pr_fmt(fmt) "efi: " fmt
20
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/mm.h>
24#include <linux/types.h>
25#include <linux/spinlock.h>
26#include <linux/bootmem.h>
27#include <linux/ioport.h>
28#include <linux/mc146818rtc.h>
29#include <linux/efi.h>
30#include <linux/export.h>
31#include <linux/uaccess.h>
32#include <linux/io.h>
33#include <linux/reboot.h>
34#include <linux/slab.h>
35#include <linux/ucs2_string.h>
36#include <linux/mem_encrypt.h>
37#include <linux/sched/task.h>
38
39#include <asm/setup.h>
40#include <asm/page.h>
41#include <asm/e820/api.h>
42#include <asm/pgtable.h>
43#include <asm/tlbflush.h>
44#include <asm/proto.h>
45#include <asm/efi.h>
46#include <asm/cacheflush.h>
47#include <asm/fixmap.h>
48#include <asm/realmode.h>
49#include <asm/time.h>
50#include <asm/pgalloc.h>
51
52
53
54
55
56static u64 efi_va = EFI_VA_START;
57
58struct efi_scratch efi_scratch;
59
60static void __init early_code_mapping_set_exec(int executable)
61{
62 efi_memory_desc_t *md;
63
64 if (!(__supported_pte_mask & _PAGE_NX))
65 return;
66
67
68 for_each_efi_memory_desc(md) {
69 if (md->type == EFI_RUNTIME_SERVICES_CODE ||
70 md->type == EFI_BOOT_SERVICES_CODE)
71 efi_set_executable(md, executable);
72 }
73}
74
75pgd_t * __init efi_call_phys_prolog(void)
76{
77 unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
78 pgd_t *save_pgd, *pgd_k, *pgd_efi;
79 p4d_t *p4d, *p4d_k, *p4d_efi;
80 pud_t *pud;
81
82 int pgd;
83 int n_pgds, i, j;
84
85 if (!efi_enabled(EFI_OLD_MEMMAP)) {
86 efi_switch_mm(&efi_mm);
87 return NULL;
88 }
89
90 early_code_mapping_set_exec(1);
91
92 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
93 save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
94
95
96
97
98
99
100
101
102
103 for (pgd = 0; pgd < n_pgds; pgd++) {
104 addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
105 vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
106 pgd_efi = pgd_offset_k(addr_pgd);
107 save_pgd[pgd] = *pgd_efi;
108
109 p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
110 if (!p4d) {
111 pr_err("Failed to allocate p4d table!\n");
112 goto out;
113 }
114
115 for (i = 0; i < PTRS_PER_P4D; i++) {
116 addr_p4d = addr_pgd + i * P4D_SIZE;
117 p4d_efi = p4d + p4d_index(addr_p4d);
118
119 pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
120 if (!pud) {
121 pr_err("Failed to allocate pud table!\n");
122 goto out;
123 }
124
125 for (j = 0; j < PTRS_PER_PUD; j++) {
126 addr_pud = addr_p4d + j * PUD_SIZE;
127
128 if (addr_pud > (max_pfn << PAGE_SHIFT))
129 break;
130
131 vaddr = (unsigned long)__va(addr_pud);
132
133 pgd_k = pgd_offset_k(vaddr);
134 p4d_k = p4d_offset(pgd_k, vaddr);
135 pud[j] = *pud_offset(p4d_k, vaddr);
136 }
137 }
138 pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
139 }
140
141out:
142 __flush_tlb_all();
143
144 return save_pgd;
145}
146
147void __init efi_call_phys_epilog(pgd_t *save_pgd)
148{
149
150
151
152 int pgd_idx, i;
153 int nr_pgds;
154 pgd_t *pgd;
155 p4d_t *p4d;
156 pud_t *pud;
157
158 if (!efi_enabled(EFI_OLD_MEMMAP)) {
159 efi_switch_mm(efi_scratch.prev_mm);
160 return;
161 }
162
163 nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
164
165 for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
166 pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
167 set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
168
169 if (!pgd_present(*pgd))
170 continue;
171
172 for (i = 0; i < PTRS_PER_P4D; i++) {
173 p4d = p4d_offset(pgd,
174 pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
175
176 if (!p4d_present(*p4d))
177 continue;
178
179 pud = (pud_t *)p4d_page_vaddr(*p4d);
180 pud_free(&init_mm, pud);
181 }
182
183 p4d = (p4d_t *)pgd_page_vaddr(*pgd);
184 p4d_free(&init_mm, p4d);
185 }
186
187 kfree(save_pgd);
188
189 __flush_tlb_all();
190 early_code_mapping_set_exec(0);
191}
192
193EXPORT_SYMBOL_GPL(efi_mm);
194
195
196
197
198
199
200
201
202
203
204int __init efi_alloc_page_tables(void)
205{
206 pgd_t *pgd, *efi_pgd;
207 p4d_t *p4d;
208 pud_t *pud;
209 gfp_t gfp_mask;
210
211 if (efi_enabled(EFI_OLD_MEMMAP))
212 return 0;
213
214 gfp_mask = GFP_KERNEL | __GFP_ZERO;
215 efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
216 if (!efi_pgd)
217 return -ENOMEM;
218
219 pgd = efi_pgd + pgd_index(EFI_VA_END);
220 p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
221 if (!p4d) {
222 free_page((unsigned long)efi_pgd);
223 return -ENOMEM;
224 }
225
226 pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
227 if (!pud) {
228 if (pgtable_l5_enabled())
229 free_page((unsigned long) pgd_page_vaddr(*pgd));
230 free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
231 return -ENOMEM;
232 }
233
234 efi_mm.pgd = efi_pgd;
235 mm_init_cpumask(&efi_mm);
236 init_new_context(NULL, &efi_mm);
237
238 return 0;
239}
240
241
242
243
244void efi_sync_low_kernel_mappings(void)
245{
246 unsigned num_entries;
247 pgd_t *pgd_k, *pgd_efi;
248 p4d_t *p4d_k, *p4d_efi;
249 pud_t *pud_k, *pud_efi;
250 pgd_t *efi_pgd = efi_mm.pgd;
251
252 if (efi_enabled(EFI_OLD_MEMMAP))
253 return;
254
255
256
257
258
259
260
261
262
263 MAYBE_BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
264 MAYBE_BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
265 (EFI_VA_END & PGDIR_MASK));
266
267 pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
268 pgd_k = pgd_offset_k(PAGE_OFFSET);
269
270 num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
271 memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
272
273
274
275
276
277 BUILD_BUG_ON(p4d_index(EFI_VA_END) != p4d_index(MODULES_END));
278 BUILD_BUG_ON((EFI_VA_START & P4D_MASK) != (EFI_VA_END & P4D_MASK));
279
280 pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
281 pgd_k = pgd_offset_k(EFI_VA_END);
282 p4d_efi = p4d_offset(pgd_efi, 0);
283 p4d_k = p4d_offset(pgd_k, 0);
284
285 num_entries = p4d_index(EFI_VA_END);
286 memcpy(p4d_efi, p4d_k, sizeof(p4d_t) * num_entries);
287
288
289
290
291
292 BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
293 BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
294
295 p4d_efi = p4d_offset(pgd_efi, EFI_VA_END);
296 p4d_k = p4d_offset(pgd_k, EFI_VA_END);
297 pud_efi = pud_offset(p4d_efi, 0);
298 pud_k = pud_offset(p4d_k, 0);
299
300 num_entries = pud_index(EFI_VA_END);
301 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
302
303 pud_efi = pud_offset(p4d_efi, EFI_VA_START);
304 pud_k = pud_offset(p4d_k, EFI_VA_START);
305
306 num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
307 memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
308}
309
310
311
312
313static inline phys_addr_t
314virt_to_phys_or_null_size(void *va, unsigned long size)
315{
316 bool bad_size;
317
318 if (!va)
319 return 0;
320
321 if (virt_addr_valid(va))
322 return virt_to_phys(va);
323
324
325
326
327
328
329 bad_size = size > PAGE_SIZE || !is_power_of_2(size);
330
331 WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
332
333 return slow_virt_to_phys(va);
334}
335
336#define virt_to_phys_or_null(addr) \
337 virt_to_phys_or_null_size((addr), sizeof(*(addr)))
338
339int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
340{
341 unsigned long pfn, text, pf;
342 struct page *page;
343 unsigned npages;
344 pgd_t *pgd = efi_mm.pgd;
345
346 if (efi_enabled(EFI_OLD_MEMMAP))
347 return 0;
348
349
350
351
352
353
354
355 pfn = pa_memmap >> PAGE_SHIFT;
356 pf = _PAGE_NX | _PAGE_RW | _PAGE_ENC;
357 if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
358 pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
359 return 1;
360 }
361
362
363
364
365
366
367
368
369
370
371
372
373 pf = _PAGE_RW;
374 if (sev_active())
375 pf |= _PAGE_ENC;
376
377 if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
378 pr_err("Failed to create 1:1 mapping for the first page!\n");
379 return 1;
380 }
381
382
383
384
385
386
387
388 if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native())
389 return 0;
390
391 page = alloc_page(GFP_KERNEL|__GFP_DMA32);
392 if (!page)
393 panic("Unable to allocate EFI runtime stack < 4GB\n");
394
395 efi_scratch.phys_stack = virt_to_phys(page_address(page));
396 efi_scratch.phys_stack += PAGE_SIZE;
397
398 npages = (_etext - _text) >> PAGE_SHIFT;
399 text = __pa(_text);
400 pfn = text >> PAGE_SHIFT;
401
402 pf = _PAGE_RW | _PAGE_ENC;
403 if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, pf)) {
404 pr_err("Failed to map kernel text 1:1\n");
405 return 1;
406 }
407
408 return 0;
409}
410
411static void __init __map_region(efi_memory_desc_t *md, u64 va)
412{
413 unsigned long flags = _PAGE_RW;
414 unsigned long pfn;
415 pgd_t *pgd = efi_mm.pgd;
416
417 if (!(md->attribute & EFI_MEMORY_WB))
418 flags |= _PAGE_PCD;
419
420 if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO)
421 flags |= _PAGE_ENC;
422
423 pfn = md->phys_addr >> PAGE_SHIFT;
424 if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
425 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
426 md->phys_addr, va);
427}
428
429void __init efi_map_region(efi_memory_desc_t *md)
430{
431 unsigned long size = md->num_pages << PAGE_SHIFT;
432 u64 pa = md->phys_addr;
433
434 if (efi_enabled(EFI_OLD_MEMMAP))
435 return old_map_region(md);
436
437
438
439
440
441
442 __map_region(md, md->phys_addr);
443
444
445
446
447
448
449 if (!efi_is_native () && IS_ENABLED(CONFIG_EFI_MIXED)) {
450 md->virt_addr = md->phys_addr;
451 return;
452 }
453
454 efi_va -= size;
455
456
457 if (!(pa & (PMD_SIZE - 1))) {
458 efi_va &= PMD_MASK;
459 } else {
460 u64 pa_offset = pa & (PMD_SIZE - 1);
461 u64 prev_va = efi_va;
462
463
464 efi_va = (efi_va & PMD_MASK) + pa_offset;
465
466 if (efi_va > prev_va)
467 efi_va -= PMD_SIZE;
468 }
469
470 if (efi_va < EFI_VA_END) {
471 pr_warn(FW_WARN "VA address range overflow!\n");
472 return;
473 }
474
475
476 __map_region(md, efi_va);
477 md->virt_addr = efi_va;
478}
479
480
481
482
483
484
485void __init efi_map_region_fixed(efi_memory_desc_t *md)
486{
487 __map_region(md, md->phys_addr);
488 __map_region(md, md->virt_addr);
489}
490
491void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
492 u32 type, u64 attribute)
493{
494 unsigned long last_map_pfn;
495
496 if (type == EFI_MEMORY_MAPPED_IO)
497 return ioremap(phys_addr, size);
498
499 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
500 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
501 unsigned long top = last_map_pfn << PAGE_SHIFT;
502 efi_ioremap(top, size - (top - phys_addr), type, attribute);
503 }
504
505 if (!(attribute & EFI_MEMORY_WB))
506 efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
507
508 return (void __iomem *)__va(phys_addr);
509}
510
511void __init parse_efi_setup(u64 phys_addr, u32 data_len)
512{
513 efi_setup = phys_addr + sizeof(struct setup_data);
514}
515
516static int __init efi_update_mappings(efi_memory_desc_t *md, unsigned long pf)
517{
518 unsigned long pfn;
519 pgd_t *pgd = efi_mm.pgd;
520 int err1, err2;
521
522
523 pfn = md->phys_addr >> PAGE_SHIFT;
524 err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
525 if (err1) {
526 pr_err("Error while updating 1:1 mapping PA 0x%llx -> VA 0x%llx!\n",
527 md->phys_addr, md->virt_addr);
528 }
529
530 err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
531 if (err2) {
532 pr_err("Error while updating VA mapping PA 0x%llx -> VA 0x%llx!\n",
533 md->phys_addr, md->virt_addr);
534 }
535
536 return err1 || err2;
537}
538
539static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md)
540{
541 unsigned long pf = 0;
542
543 if (md->attribute & EFI_MEMORY_XP)
544 pf |= _PAGE_NX;
545
546 if (!(md->attribute & EFI_MEMORY_RO))
547 pf |= _PAGE_RW;
548
549 if (sev_active())
550 pf |= _PAGE_ENC;
551
552 return efi_update_mappings(md, pf);
553}
554
555void __init efi_runtime_update_mappings(void)
556{
557 efi_memory_desc_t *md;
558
559 if (efi_enabled(EFI_OLD_MEMMAP)) {
560 if (__supported_pte_mask & _PAGE_NX)
561 runtime_code_page_mkexec();
562 return;
563 }
564
565
566
567
568
569 if (efi_enabled(EFI_MEM_ATTR)) {
570 efi_memattr_apply_permissions(NULL, efi_update_mem_attr);
571 return;
572 }
573
574
575
576
577
578
579
580
581
582
583 if (!efi_enabled(EFI_NX_PE_DATA))
584 return;
585
586 for_each_efi_memory_desc(md) {
587 unsigned long pf = 0;
588
589 if (!(md->attribute & EFI_MEMORY_RUNTIME))
590 continue;
591
592 if (!(md->attribute & EFI_MEMORY_WB))
593 pf |= _PAGE_PCD;
594
595 if ((md->attribute & EFI_MEMORY_XP) ||
596 (md->type == EFI_RUNTIME_SERVICES_DATA))
597 pf |= _PAGE_NX;
598
599 if (!(md->attribute & EFI_MEMORY_RO) &&
600 (md->type != EFI_RUNTIME_SERVICES_CODE))
601 pf |= _PAGE_RW;
602
603 if (sev_active())
604 pf |= _PAGE_ENC;
605
606 efi_update_mappings(md, pf);
607 }
608}
609
610void __init efi_dump_pagetable(void)
611{
612#ifdef CONFIG_EFI_PGT_DUMP
613 if (efi_enabled(EFI_OLD_MEMMAP))
614 ptdump_walk_pgd_level(NULL, swapper_pg_dir);
615 else
616 ptdump_walk_pgd_level(NULL, efi_mm.pgd);
617#endif
618}
619
620
621
622
623
624
625
626
627void efi_switch_mm(struct mm_struct *mm)
628{
629 task_lock(current);
630 efi_scratch.prev_mm = current->active_mm;
631 current->active_mm = mm;
632 switch_mm(efi_scratch.prev_mm, mm, NULL);
633 task_unlock(current);
634}
635
636#ifdef CONFIG_EFI_MIXED
637extern efi_status_t efi64_thunk(u32, ...);
638
639static DEFINE_SPINLOCK(efi_runtime_lock);
640
641#define runtime_service32(func) \
642({ \
643 u32 table = (u32)(unsigned long)efi.systab; \
644 u32 *rt, *___f; \
645 \
646 rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime)); \
647 ___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \
648 *___f; \
649})
650
651
652
653
654
655
656
657
658
659#define efi_thunk(f, ...) \
660({ \
661 efi_status_t __s; \
662 u32 __func; \
663 \
664 arch_efi_call_virt_setup(); \
665 \
666 __func = runtime_service32(f); \
667 __s = efi64_thunk(__func, __VA_ARGS__); \
668 \
669 arch_efi_call_virt_teardown(); \
670 \
671 __s; \
672})
673
674efi_status_t efi_thunk_set_virtual_address_map(
675 void *phys_set_virtual_address_map,
676 unsigned long memory_map_size,
677 unsigned long descriptor_size,
678 u32 descriptor_version,
679 efi_memory_desc_t *virtual_map)
680{
681 efi_status_t status;
682 unsigned long flags;
683 u32 func;
684
685 efi_sync_low_kernel_mappings();
686 local_irq_save(flags);
687
688 efi_switch_mm(&efi_mm);
689
690 func = (u32)(unsigned long)phys_set_virtual_address_map;
691 status = efi64_thunk(func, memory_map_size, descriptor_size,
692 descriptor_version, virtual_map);
693
694 efi_switch_mm(efi_scratch.prev_mm);
695 local_irq_restore(flags);
696
697 return status;
698}
699
700static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
701{
702 efi_status_t status;
703 u32 phys_tm, phys_tc;
704 unsigned long flags;
705
706 spin_lock(&rtc_lock);
707 spin_lock_irqsave(&efi_runtime_lock, flags);
708
709 phys_tm = virt_to_phys_or_null(tm);
710 phys_tc = virt_to_phys_or_null(tc);
711
712 status = efi_thunk(get_time, phys_tm, phys_tc);
713
714 spin_unlock_irqrestore(&efi_runtime_lock, flags);
715 spin_unlock(&rtc_lock);
716
717 return status;
718}
719
720static efi_status_t efi_thunk_set_time(efi_time_t *tm)
721{
722 efi_status_t status;
723 u32 phys_tm;
724 unsigned long flags;
725
726 spin_lock(&rtc_lock);
727 spin_lock_irqsave(&efi_runtime_lock, flags);
728
729 phys_tm = virt_to_phys_or_null(tm);
730
731 status = efi_thunk(set_time, phys_tm);
732
733 spin_unlock_irqrestore(&efi_runtime_lock, flags);
734 spin_unlock(&rtc_lock);
735
736 return status;
737}
738
739static efi_status_t
740efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
741 efi_time_t *tm)
742{
743 efi_status_t status;
744 u32 phys_enabled, phys_pending, phys_tm;
745 unsigned long flags;
746
747 spin_lock(&rtc_lock);
748 spin_lock_irqsave(&efi_runtime_lock, flags);
749
750 phys_enabled = virt_to_phys_or_null(enabled);
751 phys_pending = virt_to_phys_or_null(pending);
752 phys_tm = virt_to_phys_or_null(tm);
753
754 status = efi_thunk(get_wakeup_time, phys_enabled,
755 phys_pending, phys_tm);
756
757 spin_unlock_irqrestore(&efi_runtime_lock, flags);
758 spin_unlock(&rtc_lock);
759
760 return status;
761}
762
763static efi_status_t
764efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
765{
766 efi_status_t status;
767 u32 phys_tm;
768 unsigned long flags;
769
770 spin_lock(&rtc_lock);
771 spin_lock_irqsave(&efi_runtime_lock, flags);
772
773 phys_tm = virt_to_phys_or_null(tm);
774
775 status = efi_thunk(set_wakeup_time, enabled, phys_tm);
776
777 spin_unlock_irqrestore(&efi_runtime_lock, flags);
778 spin_unlock(&rtc_lock);
779
780 return status;
781}
782
783static unsigned long efi_name_size(efi_char16_t *name)
784{
785 return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
786}
787
788static efi_status_t
789efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
790 u32 *attr, unsigned long *data_size, void *data)
791{
792 efi_status_t status;
793 u32 phys_name, phys_vendor, phys_attr;
794 u32 phys_data_size, phys_data;
795 unsigned long flags;
796
797 spin_lock_irqsave(&efi_runtime_lock, flags);
798
799 phys_data_size = virt_to_phys_or_null(data_size);
800 phys_vendor = virt_to_phys_or_null(vendor);
801 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
802 phys_attr = virt_to_phys_or_null(attr);
803 phys_data = virt_to_phys_or_null_size(data, *data_size);
804
805 status = efi_thunk(get_variable, phys_name, phys_vendor,
806 phys_attr, phys_data_size, phys_data);
807
808 spin_unlock_irqrestore(&efi_runtime_lock, flags);
809
810 return status;
811}
812
813static efi_status_t
814efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
815 u32 attr, unsigned long data_size, void *data)
816{
817 u32 phys_name, phys_vendor, phys_data;
818 efi_status_t status;
819 unsigned long flags;
820
821 spin_lock_irqsave(&efi_runtime_lock, flags);
822
823 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
824 phys_vendor = virt_to_phys_or_null(vendor);
825 phys_data = virt_to_phys_or_null_size(data, data_size);
826
827
828 status = efi_thunk(set_variable, phys_name, phys_vendor,
829 attr, data_size, phys_data);
830
831 spin_unlock_irqrestore(&efi_runtime_lock, flags);
832
833 return status;
834}
835
836static efi_status_t
837efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
838 u32 attr, unsigned long data_size,
839 void *data)
840{
841 u32 phys_name, phys_vendor, phys_data;
842 efi_status_t status;
843 unsigned long flags;
844
845 if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
846 return EFI_NOT_READY;
847
848 phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
849 phys_vendor = virt_to_phys_or_null(vendor);
850 phys_data = virt_to_phys_or_null_size(data, data_size);
851
852
853 status = efi_thunk(set_variable, phys_name, phys_vendor,
854 attr, data_size, phys_data);
855
856 spin_unlock_irqrestore(&efi_runtime_lock, flags);
857
858 return status;
859}
860
861static efi_status_t
862efi_thunk_get_next_variable(unsigned long *name_size,
863 efi_char16_t *name,
864 efi_guid_t *vendor)
865{
866 efi_status_t status;
867 u32 phys_name_size, phys_name, phys_vendor;
868 unsigned long flags;
869
870 spin_lock_irqsave(&efi_runtime_lock, flags);
871
872 phys_name_size = virt_to_phys_or_null(name_size);
873 phys_vendor = virt_to_phys_or_null(vendor);
874 phys_name = virt_to_phys_or_null_size(name, *name_size);
875
876 status = efi_thunk(get_next_variable, phys_name_size,
877 phys_name, phys_vendor);
878
879 spin_unlock_irqrestore(&efi_runtime_lock, flags);
880
881 return status;
882}
883
884static efi_status_t
885efi_thunk_get_next_high_mono_count(u32 *count)
886{
887 efi_status_t status;
888 u32 phys_count;
889 unsigned long flags;
890
891 spin_lock_irqsave(&efi_runtime_lock, flags);
892
893 phys_count = virt_to_phys_or_null(count);
894 status = efi_thunk(get_next_high_mono_count, phys_count);
895
896 spin_unlock_irqrestore(&efi_runtime_lock, flags);
897
898 return status;
899}
900
901static void
902efi_thunk_reset_system(int reset_type, efi_status_t status,
903 unsigned long data_size, efi_char16_t *data)
904{
905 u32 phys_data;
906 unsigned long flags;
907
908 spin_lock_irqsave(&efi_runtime_lock, flags);
909
910 phys_data = virt_to_phys_or_null_size(data, data_size);
911
912 efi_thunk(reset_system, reset_type, status, data_size, phys_data);
913
914 spin_unlock_irqrestore(&efi_runtime_lock, flags);
915}
916
917static efi_status_t
918efi_thunk_update_capsule(efi_capsule_header_t **capsules,
919 unsigned long count, unsigned long sg_list)
920{
921
922
923
924
925
926 return EFI_UNSUPPORTED;
927}
928
929static efi_status_t
930efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
931 u64 *remaining_space,
932 u64 *max_variable_size)
933{
934 efi_status_t status;
935 u32 phys_storage, phys_remaining, phys_max;
936 unsigned long flags;
937
938 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
939 return EFI_UNSUPPORTED;
940
941 spin_lock_irqsave(&efi_runtime_lock, flags);
942
943 phys_storage = virt_to_phys_or_null(storage_space);
944 phys_remaining = virt_to_phys_or_null(remaining_space);
945 phys_max = virt_to_phys_or_null(max_variable_size);
946
947 status = efi_thunk(query_variable_info, attr, phys_storage,
948 phys_remaining, phys_max);
949
950 spin_unlock_irqrestore(&efi_runtime_lock, flags);
951
952 return status;
953}
954
955static efi_status_t
956efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
957 u64 *remaining_space,
958 u64 *max_variable_size)
959{
960 efi_status_t status;
961 u32 phys_storage, phys_remaining, phys_max;
962 unsigned long flags;
963
964 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
965 return EFI_UNSUPPORTED;
966
967 if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
968 return EFI_NOT_READY;
969
970 phys_storage = virt_to_phys_or_null(storage_space);
971 phys_remaining = virt_to_phys_or_null(remaining_space);
972 phys_max = virt_to_phys_or_null(max_variable_size);
973
974 status = efi_thunk(query_variable_info, attr, phys_storage,
975 phys_remaining, phys_max);
976
977 spin_unlock_irqrestore(&efi_runtime_lock, flags);
978
979 return status;
980}
981
982static efi_status_t
983efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
984 unsigned long count, u64 *max_size,
985 int *reset_type)
986{
987
988
989
990
991
992 return EFI_UNSUPPORTED;
993}
994
995void efi_thunk_runtime_setup(void)
996{
997 efi.get_time = efi_thunk_get_time;
998 efi.set_time = efi_thunk_set_time;
999 efi.get_wakeup_time = efi_thunk_get_wakeup_time;
1000 efi.set_wakeup_time = efi_thunk_set_wakeup_time;
1001 efi.get_variable = efi_thunk_get_variable;
1002 efi.get_next_variable = efi_thunk_get_next_variable;
1003 efi.set_variable = efi_thunk_set_variable;
1004 efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
1005 efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
1006 efi.reset_system = efi_thunk_reset_system;
1007 efi.query_variable_info = efi_thunk_query_variable_info;
1008 efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
1009 efi.update_capsule = efi_thunk_update_capsule;
1010 efi.query_capsule_caps = efi_thunk_query_capsule_caps;
1011}
1012#endif
1013