1
2
3
4
5
6
7
8
9#include <linux/bootmem.h>
10#include <linux/init.h>
11#include <linux/io.h>
12#include <linux/ioport.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/mmiotrace.h>
16#include <linux/mem_encrypt.h>
17#include <linux/efi.h>
18
19#include <asm/set_memory.h>
20#include <asm/e820/api.h>
21#include <asm/fixmap.h>
22#include <asm/pgtable.h>
23#include <asm/tlbflush.h>
24#include <asm/pgalloc.h>
25#include <asm/pat.h>
26#include <asm/setup.h>
27
28#include "physaddr.h"
29
30struct ioremap_mem_flags {
31 bool system_ram;
32 bool desc_other;
33};
34
35
36
37
38
39int ioremap_change_attr(unsigned long vaddr, unsigned long size,
40 enum page_cache_mode pcm)
41{
42 unsigned long nrpages = size >> PAGE_SHIFT;
43 int err;
44
45 switch (pcm) {
46 case _PAGE_CACHE_MODE_UC:
47 default:
48 err = _set_memory_uc(vaddr, nrpages);
49 break;
50 case _PAGE_CACHE_MODE_WC:
51 err = _set_memory_wc(vaddr, nrpages);
52 break;
53 case _PAGE_CACHE_MODE_WT:
54 err = _set_memory_wt(vaddr, nrpages);
55 break;
56 case _PAGE_CACHE_MODE_WB:
57 err = _set_memory_wb(vaddr, nrpages);
58 break;
59 }
60
61 return err;
62}
63
64static bool __ioremap_check_ram(struct resource *res)
65{
66 unsigned long start_pfn, stop_pfn;
67 unsigned long i;
68
69 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
70 return false;
71
72 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
73 stop_pfn = (res->end + 1) >> PAGE_SHIFT;
74 if (stop_pfn > start_pfn) {
75 for (i = 0; i < (stop_pfn - start_pfn); ++i)
76 if (pfn_valid(start_pfn + i) &&
77 !PageReserved(pfn_to_page(start_pfn + i)))
78 return true;
79 }
80
81 return false;
82}
83
84static int __ioremap_check_desc_other(struct resource *res)
85{
86 return (res->desc != IORES_DESC_NONE);
87}
88
89static int __ioremap_res_check(struct resource *res, void *arg)
90{
91 struct ioremap_mem_flags *flags = arg;
92
93 if (!flags->system_ram)
94 flags->system_ram = __ioremap_check_ram(res);
95
96 if (!flags->desc_other)
97 flags->desc_other = __ioremap_check_desc_other(res);
98
99 return flags->system_ram && flags->desc_other;
100}
101
102
103
104
105
106
107static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
108 struct ioremap_mem_flags *flags)
109{
110 u64 start, end;
111
112 start = (u64)addr;
113 end = start + size - 1;
114 memset(flags, 0, sizeof(*flags));
115
116 walk_mem_res(start, end, flags, __ioremap_res_check);
117}
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133static void __iomem *__ioremap_caller(resource_size_t phys_addr,
134 unsigned long size, enum page_cache_mode pcm, void *caller)
135{
136 unsigned long offset, vaddr;
137 resource_size_t last_addr;
138 const resource_size_t unaligned_phys_addr = phys_addr;
139 const unsigned long unaligned_size = size;
140 struct ioremap_mem_flags mem_flags;
141 struct vm_struct *area;
142 enum page_cache_mode new_pcm;
143 pgprot_t prot;
144 int retval;
145 void __iomem *ret_addr;
146
147
148 last_addr = phys_addr + size - 1;
149 if (!size || last_addr < phys_addr)
150 return NULL;
151
152 if (!phys_addr_valid(phys_addr)) {
153 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
154 (unsigned long long)phys_addr);
155 WARN_ON_ONCE(1);
156 return NULL;
157 }
158
159 __ioremap_check_mem(phys_addr, size, &mem_flags);
160
161
162
163
164 if (mem_flags.system_ram) {
165 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
166 &phys_addr, &last_addr);
167 return NULL;
168 }
169
170
171
172
173 offset = phys_addr & ~PAGE_MASK;
174 phys_addr &= PHYSICAL_PAGE_MASK;
175 size = PAGE_ALIGN(last_addr+1) - phys_addr;
176
177 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
178 pcm, &new_pcm);
179 if (retval) {
180 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
181 return NULL;
182 }
183
184 if (pcm != new_pcm) {
185 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
186 printk(KERN_ERR
187 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
188 (unsigned long long)phys_addr,
189 (unsigned long long)(phys_addr + size),
190 pcm, new_pcm);
191 goto err_free_memtype;
192 }
193 pcm = new_pcm;
194 }
195
196
197
198
199
200
201 prot = PAGE_KERNEL_IO;
202 if (sev_active() && mem_flags.desc_other)
203 prot = pgprot_encrypted(prot);
204
205 switch (pcm) {
206 case _PAGE_CACHE_MODE_UC:
207 default:
208 prot = __pgprot(pgprot_val(prot) |
209 cachemode2protval(_PAGE_CACHE_MODE_UC));
210 break;
211 case _PAGE_CACHE_MODE_UC_MINUS:
212 prot = __pgprot(pgprot_val(prot) |
213 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
214 break;
215 case _PAGE_CACHE_MODE_WC:
216 prot = __pgprot(pgprot_val(prot) |
217 cachemode2protval(_PAGE_CACHE_MODE_WC));
218 break;
219 case _PAGE_CACHE_MODE_WT:
220 prot = __pgprot(pgprot_val(prot) |
221 cachemode2protval(_PAGE_CACHE_MODE_WT));
222 break;
223 case _PAGE_CACHE_MODE_WB:
224 break;
225 }
226
227
228
229
230 area = get_vm_area_caller(size, VM_IOREMAP, caller);
231 if (!area)
232 goto err_free_memtype;
233 area->phys_addr = phys_addr;
234 vaddr = (unsigned long) area->addr;
235
236 if (kernel_map_sync_memtype(phys_addr, size, pcm))
237 goto err_free_area;
238
239 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
240 goto err_free_area;
241
242 ret_addr = (void __iomem *) (vaddr + offset);
243 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
244
245
246
247
248
249 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
250 pr_warn("caller %pS mapping multiple BARs\n", caller);
251
252 return ret_addr;
253err_free_area:
254 free_vm_area(area);
255err_free_memtype:
256 free_memtype(phys_addr, phys_addr + size);
257 return NULL;
258}
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
282{
283
284
285
286
287
288
289
290
291 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
292
293 return __ioremap_caller(phys_addr, size, pcm,
294 __builtin_return_address(0));
295}
296EXPORT_SYMBOL(ioremap_nocache);
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
323{
324 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
325
326 return __ioremap_caller(phys_addr, size, pcm,
327 __builtin_return_address(0));
328}
329EXPORT_SYMBOL_GPL(ioremap_uc);
330
331
332
333
334
335
336
337
338
339
340
341void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
342{
343 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
344 __builtin_return_address(0));
345}
346EXPORT_SYMBOL(ioremap_wc);
347
348
349
350
351
352
353
354
355
356
357
358void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
359{
360 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
361 __builtin_return_address(0));
362}
363EXPORT_SYMBOL(ioremap_wt);
364
365void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
366{
367 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
368 __builtin_return_address(0));
369}
370EXPORT_SYMBOL(ioremap_cache);
371
372void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
373 unsigned long prot_val)
374{
375 return __ioremap_caller(phys_addr, size,
376 pgprot2cachemode(__pgprot(prot_val)),
377 __builtin_return_address(0));
378}
379EXPORT_SYMBOL(ioremap_prot);
380
381
382
383
384
385
386
387void iounmap(volatile void __iomem *addr)
388{
389 struct vm_struct *p, *o;
390
391 if ((void __force *)addr <= high_memory)
392 return;
393
394
395
396
397
398
399
400
401 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
402 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
403 WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
404 return;
405 }
406
407 mmiotrace_iounmap(addr);
408
409 addr = (volatile void __iomem *)
410 (PAGE_MASK & (unsigned long __force)addr);
411
412
413
414
415
416
417 p = find_vm_area((void __force *)addr);
418
419 if (!p) {
420 printk(KERN_ERR "iounmap: bad address %p\n", addr);
421 dump_stack();
422 return;
423 }
424
425 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
426
427
428 o = remove_vm_area((void __force *)addr);
429 BUG_ON(p != o || o == NULL);
430 kfree(p);
431}
432EXPORT_SYMBOL(iounmap);
433
434int __init arch_ioremap_pud_supported(void)
435{
436#ifdef CONFIG_X86_64
437 return boot_cpu_has(X86_FEATURE_GBPAGES);
438#else
439 return 0;
440#endif
441}
442
443int __init arch_ioremap_pmd_supported(void)
444{
445 return boot_cpu_has(X86_FEATURE_PSE);
446}
447
448
449
450
451
452void *xlate_dev_mem_ptr(phys_addr_t phys)
453{
454 unsigned long start = phys & PAGE_MASK;
455 unsigned long offset = phys & ~PAGE_MASK;
456 void *vaddr;
457
458
459 vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
460
461
462 if (vaddr)
463 vaddr += offset;
464
465 return vaddr;
466}
467
468void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
469{
470 memunmap((void *)((unsigned long)addr & PAGE_MASK));
471}
472
473
474
475
476
477
478
479
480
481
482
483
484static bool memremap_should_map_decrypted(resource_size_t phys_addr,
485 unsigned long size)
486{
487 int is_pmem;
488
489
490
491
492
493 is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
494 IORES_DESC_PERSISTENT_MEMORY);
495 if (is_pmem != REGION_DISJOINT)
496 return true;
497
498
499
500
501
502 if (efi_enabled(EFI_BOOT)) {
503 switch (efi_mem_type(phys_addr)) {
504 case EFI_RESERVED_TYPE:
505 if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
506 return true;
507 break;
508 default:
509 break;
510 }
511 }
512
513
514 switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
515 case E820_TYPE_RESERVED:
516 case E820_TYPE_ACPI:
517 case E820_TYPE_NVS:
518 case E820_TYPE_UNUSABLE:
519
520 if (sev_active())
521 break;
522
523
524 case E820_TYPE_PRAM:
525 return true;
526 default:
527 break;
528 }
529
530 return false;
531}
532
533
534
535
536
537static bool memremap_is_efi_data(resource_size_t phys_addr,
538 unsigned long size)
539{
540 u64 paddr;
541
542
543 if (!efi_enabled(EFI_BOOT))
544 return false;
545
546 paddr = boot_params.efi_info.efi_memmap_hi;
547 paddr <<= 32;
548 paddr |= boot_params.efi_info.efi_memmap;
549 if (phys_addr == paddr)
550 return true;
551
552 paddr = boot_params.efi_info.efi_systab_hi;
553 paddr <<= 32;
554 paddr |= boot_params.efi_info.efi_systab;
555 if (phys_addr == paddr)
556 return true;
557
558 if (efi_is_table_address(phys_addr))
559 return true;
560
561 switch (efi_mem_type(phys_addr)) {
562 case EFI_BOOT_SERVICES_DATA:
563 case EFI_RUNTIME_SERVICES_DATA:
564 return true;
565 default:
566 break;
567 }
568
569 return false;
570}
571
572
573
574
575
576static bool memremap_is_setup_data(resource_size_t phys_addr,
577 unsigned long size)
578{
579 struct setup_data *data;
580 u64 paddr, paddr_next;
581
582 paddr = boot_params.hdr.setup_data;
583 while (paddr) {
584 unsigned int len;
585
586 if (phys_addr == paddr)
587 return true;
588
589 data = memremap(paddr, sizeof(*data),
590 MEMREMAP_WB | MEMREMAP_DEC);
591
592 paddr_next = data->next;
593 len = data->len;
594
595 memunmap(data);
596
597 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
598 return true;
599
600 paddr = paddr_next;
601 }
602
603 return false;
604}
605
606
607
608
609
610static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
611 unsigned long size)
612{
613 struct setup_data *data;
614 u64 paddr, paddr_next;
615
616 paddr = boot_params.hdr.setup_data;
617 while (paddr) {
618 unsigned int len;
619
620 if (phys_addr == paddr)
621 return true;
622
623 data = early_memremap_decrypted(paddr, sizeof(*data));
624
625 paddr_next = data->next;
626 len = data->len;
627
628 early_memunmap(data, sizeof(*data));
629
630 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
631 return true;
632
633 paddr = paddr_next;
634 }
635
636 return false;
637}
638
639
640
641
642
643
644bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
645 unsigned long flags)
646{
647 if (!mem_encrypt_active())
648 return true;
649
650 if (flags & MEMREMAP_ENC)
651 return true;
652
653 if (flags & MEMREMAP_DEC)
654 return false;
655
656 if (sme_active()) {
657 if (memremap_is_setup_data(phys_addr, size) ||
658 memremap_is_efi_data(phys_addr, size))
659 return false;
660 }
661
662 return !memremap_should_map_decrypted(phys_addr, size);
663}
664
665
666
667
668
669
670
671pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
672 unsigned long size,
673 pgprot_t prot)
674{
675 bool encrypted_prot;
676
677 if (!mem_encrypt_active())
678 return prot;
679
680 encrypted_prot = true;
681
682 if (sme_active()) {
683 if (early_memremap_is_setup_data(phys_addr, size) ||
684 memremap_is_efi_data(phys_addr, size))
685 encrypted_prot = false;
686 }
687
688 if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
689 encrypted_prot = false;
690
691 return encrypted_prot ? pgprot_encrypted(prot)
692 : pgprot_decrypted(prot);
693}
694
695bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
696{
697 return arch_memremap_can_ram_remap(phys_addr, size, 0);
698}
699
700#ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
701
702void __init *early_memremap_encrypted(resource_size_t phys_addr,
703 unsigned long size)
704{
705 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
706}
707
708
709
710
711
712void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
713 unsigned long size)
714{
715
716 if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
717 return NULL;
718
719 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
720}
721
722
723void __init *early_memremap_decrypted(resource_size_t phys_addr,
724 unsigned long size)
725{
726 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
727}
728
729
730
731
732
733void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
734 unsigned long size)
735{
736
737 if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
738 return NULL;
739
740 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
741}
742#endif
743
744static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
745
746static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
747{
748
749 pgd_t *base = __va(read_cr3_pa());
750 pgd_t *pgd = &base[pgd_index(addr)];
751 p4d_t *p4d = p4d_offset(pgd, addr);
752 pud_t *pud = pud_offset(p4d, addr);
753 pmd_t *pmd = pmd_offset(pud, addr);
754
755 return pmd;
756}
757
758static inline pte_t * __init early_ioremap_pte(unsigned long addr)
759{
760 return &bm_pte[pte_index(addr)];
761}
762
763bool __init is_early_ioremap_ptep(pte_t *ptep)
764{
765 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
766}
767
768void __init early_ioremap_init(void)
769{
770 pmd_t *pmd;
771
772#ifdef CONFIG_X86_64
773 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
774#else
775 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
776#endif
777
778 early_ioremap_setup();
779
780 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
781 memset(bm_pte, 0, sizeof(bm_pte));
782 pmd_populate_kernel(&init_mm, pmd, bm_pte);
783
784
785
786
787
788#define __FIXADDR_TOP (-PAGE_SIZE)
789 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
790 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
791#undef __FIXADDR_TOP
792 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
793 WARN_ON(1);
794 printk(KERN_WARNING "pmd %p != %p\n",
795 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
796 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
797 fix_to_virt(FIX_BTMAP_BEGIN));
798 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
799 fix_to_virt(FIX_BTMAP_END));
800
801 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
802 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
803 FIX_BTMAP_BEGIN);
804 }
805}
806
807void __init __early_set_fixmap(enum fixed_addresses idx,
808 phys_addr_t phys, pgprot_t flags)
809{
810 unsigned long addr = __fix_to_virt(idx);
811 pte_t *pte;
812
813 if (idx >= __end_of_fixed_addresses) {
814 BUG();
815 return;
816 }
817 pte = early_ioremap_pte(addr);
818
819
820 pgprot_val(flags) &= __default_kernel_pte_mask;
821
822 if (pgprot_val(flags))
823 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
824 else
825 pte_clear(&init_mm, addr, pte);
826 __flush_tlb_one_kernel(addr);
827}
828