linux/lib/ioremap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Re-map IO memory to kernel address space so that we can access it.
   4 * This is needed for high PCI addresses that aren't mapped in the
   5 * 640k-1MB IO memory area on PC's
   6 *
   7 * (C) Copyright 1995 1996 Linus Torvalds
   8 */
   9#include <linux/vmalloc.h>
  10#include <linux/mm.h>
  11#include <linux/sched.h>
  12#include <linux/io.h>
  13#include <linux/export.h>
  14#include <asm/cacheflush.h>
  15#include <asm/pgtable.h>
  16
  17#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
  18static int __read_mostly ioremap_p4d_capable;
  19static int __read_mostly ioremap_pud_capable;
  20static int __read_mostly ioremap_pmd_capable;
  21static int __read_mostly ioremap_huge_disabled;
  22
  23static int __init set_nohugeiomap(char *str)
  24{
  25        ioremap_huge_disabled = 1;
  26        return 0;
  27}
  28early_param("nohugeiomap", set_nohugeiomap);
  29
  30void __init ioremap_huge_init(void)
  31{
  32        if (!ioremap_huge_disabled) {
  33                if (arch_ioremap_pud_supported())
  34                        ioremap_pud_capable = 1;
  35                if (arch_ioremap_pmd_supported())
  36                        ioremap_pmd_capable = 1;
  37        }
  38}
  39
  40static inline int ioremap_p4d_enabled(void)
  41{
  42        return ioremap_p4d_capable;
  43}
  44
  45static inline int ioremap_pud_enabled(void)
  46{
  47        return ioremap_pud_capable;
  48}
  49
  50static inline int ioremap_pmd_enabled(void)
  51{
  52        return ioremap_pmd_capable;
  53}
  54
  55#else   /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
  56static inline int ioremap_p4d_enabled(void) { return 0; }
  57static inline int ioremap_pud_enabled(void) { return 0; }
  58static inline int ioremap_pmd_enabled(void) { return 0; }
  59#endif  /* CONFIG_HAVE_ARCH_HUGE_VMAP */
  60
  61static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
  62                unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
  63{
  64        pte_t *pte;
  65        u64 pfn;
  66
  67        pfn = phys_addr >> PAGE_SHIFT;
  68        pte = pte_alloc_kernel(pmd, addr);
  69        if (!pte)
  70                return -ENOMEM;
  71        do {
  72                BUG_ON(!pte_none(*pte));
  73                set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
  74                pfn++;
  75        } while (pte++, addr += PAGE_SIZE, addr != end);
  76        return 0;
  77}
  78
  79static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
  80                unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
  81{
  82        pmd_t *pmd;
  83        unsigned long next;
  84
  85        phys_addr -= addr;
  86        pmd = pmd_alloc(&init_mm, pud, addr);
  87        if (!pmd)
  88                return -ENOMEM;
  89        do {
  90                next = pmd_addr_end(addr, end);
  91
  92                if (ioremap_pmd_enabled() &&
  93                    ((next - addr) == PMD_SIZE) &&
  94                    IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
  95                    pmd_free_pte_page(pmd, addr)) {
  96                        if (pmd_set_huge(pmd, phys_addr + addr, prot))
  97                                continue;
  98                }
  99
 100                if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
 101                        return -ENOMEM;
 102        } while (pmd++, addr = next, addr != end);
 103        return 0;
 104}
 105
 106static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
 107                unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
 108{
 109        pud_t *pud;
 110        unsigned long next;
 111
 112        phys_addr -= addr;
 113        pud = pud_alloc(&init_mm, p4d, addr);
 114        if (!pud)
 115                return -ENOMEM;
 116        do {
 117                next = pud_addr_end(addr, end);
 118
 119                if (ioremap_pud_enabled() &&
 120                    ((next - addr) == PUD_SIZE) &&
 121                    IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
 122                    pud_free_pmd_page(pud, addr)) {
 123                        if (pud_set_huge(pud, phys_addr + addr, prot))
 124                                continue;
 125                }
 126
 127                if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
 128                        return -ENOMEM;
 129        } while (pud++, addr = next, addr != end);
 130        return 0;
 131}
 132
 133static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
 134                unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
 135{
 136        p4d_t *p4d;
 137        unsigned long next;
 138
 139        phys_addr -= addr;
 140        p4d = p4d_alloc(&init_mm, pgd, addr);
 141        if (!p4d)
 142                return -ENOMEM;
 143        do {
 144                next = p4d_addr_end(addr, end);
 145
 146                if (ioremap_p4d_enabled() &&
 147                    ((next - addr) == P4D_SIZE) &&
 148                    IS_ALIGNED(phys_addr + addr, P4D_SIZE)) {
 149                        if (p4d_set_huge(p4d, phys_addr + addr, prot))
 150                                continue;
 151                }
 152
 153                if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot))
 154                        return -ENOMEM;
 155        } while (p4d++, addr = next, addr != end);
 156        return 0;
 157}
 158
 159int ioremap_page_range(unsigned long addr,
 160                       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
 161{
 162        pgd_t *pgd;
 163        unsigned long start;
 164        unsigned long next;
 165        int err;
 166
 167        might_sleep();
 168        BUG_ON(addr >= end);
 169
 170        start = addr;
 171        phys_addr -= addr;
 172        pgd = pgd_offset_k(addr);
 173        do {
 174                next = pgd_addr_end(addr, end);
 175                err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
 176                if (err)
 177                        break;
 178        } while (pgd++, addr = next, addr != end);
 179
 180        flush_cache_vmap(start, end);
 181
 182        return err;
 183}
 184