linux/arch/powerpc/mm/ioremap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2
   3#include <linux/io.h>
   4#include <linux/slab.h>
   5#include <linux/mmzone.h>
   6#include <linux/vmalloc.h>
   7#include <asm/io-workarounds.h>
   8
   9unsigned long ioremap_bot;
  10EXPORT_SYMBOL(ioremap_bot);
  11
  12void __iomem *ioremap(phys_addr_t addr, unsigned long size)
  13{
  14        pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
  15        void *caller = __builtin_return_address(0);
  16
  17        if (iowa_is_active())
  18                return iowa_ioremap(addr, size, prot, caller);
  19        return __ioremap_caller(addr, size, prot, caller);
  20}
  21EXPORT_SYMBOL(ioremap);
  22
  23void __iomem *ioremap_wc(phys_addr_t addr, unsigned long size)
  24{
  25        pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
  26        void *caller = __builtin_return_address(0);
  27
  28        if (iowa_is_active())
  29                return iowa_ioremap(addr, size, prot, caller);
  30        return __ioremap_caller(addr, size, prot, caller);
  31}
  32EXPORT_SYMBOL(ioremap_wc);
  33
  34void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
  35{
  36        pgprot_t prot = pgprot_cached(PAGE_KERNEL);
  37        void *caller = __builtin_return_address(0);
  38
  39        if (iowa_is_active())
  40                return iowa_ioremap(addr, size, prot, caller);
  41        return __ioremap_caller(addr, size, prot, caller);
  42}
  43
  44void __iomem *ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
  45{
  46        pte_t pte = __pte(flags);
  47        void *caller = __builtin_return_address(0);
  48
  49        /* writeable implies dirty for kernel addresses */
  50        if (pte_write(pte))
  51                pte = pte_mkdirty(pte);
  52
  53        /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
  54        pte = pte_exprotect(pte);
  55        pte = pte_mkprivileged(pte);
  56
  57        if (iowa_is_active())
  58                return iowa_ioremap(addr, size, pte_pgprot(pte), caller);
  59        return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
  60}
  61EXPORT_SYMBOL(ioremap_prot);
  62
  63int early_ioremap_range(unsigned long ea, phys_addr_t pa,
  64                        unsigned long size, pgprot_t prot)
  65{
  66        unsigned long i;
  67
  68        for (i = 0; i < size; i += PAGE_SIZE) {
  69                int err = map_kernel_page(ea + i, pa + i, prot);
  70
  71                if (WARN_ON_ONCE(err))  /* Should clean up */
  72                        return err;
  73        }
  74
  75        return 0;
  76}
  77
  78void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
  79                         pgprot_t prot, void *caller)
  80{
  81        struct vm_struct *area;
  82        int ret;
  83        unsigned long va;
  84
  85        area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START, IOREMAP_END, caller);
  86        if (area == NULL)
  87                return NULL;
  88
  89        area->phys_addr = pa;
  90        va = (unsigned long)area->addr;
  91
  92        ret = ioremap_page_range(va, va + size, pa, prot);
  93        if (!ret)
  94                return (void __iomem *)area->addr + offset;
  95
  96        unmap_kernel_range(va, size);
  97        free_vm_area(area);
  98
  99        return NULL;
 100}
 101
 102#ifdef CONFIG_ZONE_DEVICE
 103/*
 104 * Override the generic version in mm/memremap.c.
 105 *
 106 * With hash translation, the direct-map range is mapped with just one
 107 * page size selected by htab_init_page_sizes(). Consult
 108 * mmu_psize_defs[] to determine the minimum page size alignment.
 109*/
 110unsigned long memremap_compat_align(void)
 111{
 112        unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
 113
 114        if (radix_enabled())
 115                return SUBSECTION_SIZE;
 116        return max(SUBSECTION_SIZE, 1UL << shift);
 117
 118}
 119EXPORT_SYMBOL_GPL(memremap_compat_align);
 120#endif
 121