linux/mm/early_ioremap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Provide common bits of early_ioremap() support for architectures needing
   4 * temporary mappings during boot before ioremap() is available.
   5 *
   6 * This is mostly a direct copy of the x86 early_ioremap implementation.
   7 *
   8 * (C) Copyright 1995 1996, 2014 Linus Torvalds
   9 *
  10 */
  11#include <linux/kernel.h>
  12#include <linux/init.h>
  13#include <linux/io.h>
  14#include <linux/module.h>
  15#include <linux/slab.h>
  16#include <linux/mm.h>
  17#include <linux/vmalloc.h>
  18#include <asm/fixmap.h>
  19#include <asm/early_ioremap.h>
  20
  21#ifdef CONFIG_MMU
  22static int early_ioremap_debug __initdata;
  23
  24static int __init early_ioremap_debug_setup(char *str)
  25{
  26        early_ioremap_debug = 1;
  27
  28        return 0;
  29}
  30early_param("early_ioremap_debug", early_ioremap_debug_setup);
  31
  32static int after_paging_init __initdata;
  33
  34pgprot_t __init __weak early_memremap_pgprot_adjust(resource_size_t phys_addr,
  35                                                    unsigned long size,
  36                                                    pgprot_t prot)
  37{
  38        return prot;
  39}
  40
  41void __init __weak early_ioremap_shutdown(void)
  42{
  43}
  44
  45void __init early_ioremap_reset(void)
  46{
  47        early_ioremap_shutdown();
  48        after_paging_init = 1;
  49}
  50
  51/*
  52 * Generally, ioremap() is available after paging_init() has been called.
  53 * Architectures wanting to allow early_ioremap after paging_init() can
  54 * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
  55 */
  56#ifndef __late_set_fixmap
  57static inline void __init __late_set_fixmap(enum fixed_addresses idx,
  58                                            phys_addr_t phys, pgprot_t prot)
  59{
  60        BUG();
  61}
  62#endif
  63
  64#ifndef __late_clear_fixmap
  65static inline void __init __late_clear_fixmap(enum fixed_addresses idx)
  66{
  67        BUG();
  68}
  69#endif
  70
  71static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
  72static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
  73static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
  74
  75void __init early_ioremap_setup(void)
  76{
  77        int i;
  78
  79        for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  80                if (WARN_ON(prev_map[i]))
  81                        break;
  82
  83        for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  84                slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
  85}
  86
  87static int __init check_early_ioremap_leak(void)
  88{
  89        int count = 0;
  90        int i;
  91
  92        for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  93                if (prev_map[i])
  94                        count++;
  95
  96        if (WARN(count, KERN_WARNING
  97                 "Debug warning: early ioremap leak of %d areas detected.\n"
  98                 "please boot with early_ioremap_debug and report the dmesg.\n",
  99                 count))
 100                return 1;
 101        return 0;
 102}
 103late_initcall(check_early_ioremap_leak);
 104
 105static void __init __iomem *
 106__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
 107{
 108        unsigned long offset;
 109        resource_size_t last_addr;
 110        unsigned int nrpages;
 111        enum fixed_addresses idx;
 112        int i, slot;
 113
 114        WARN_ON(system_state >= SYSTEM_RUNNING);
 115
 116        slot = -1;
 117        for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
 118                if (!prev_map[i]) {
 119                        slot = i;
 120                        break;
 121                }
 122        }
 123
 124        if (WARN(slot < 0, "%s(%pa, %08lx) not found slot\n",
 125                 __func__, &phys_addr, size))
 126                return NULL;
 127
 128        /* Don't allow wraparound or zero size */
 129        last_addr = phys_addr + size - 1;
 130        if (WARN_ON(!size || last_addr < phys_addr))
 131                return NULL;
 132
 133        prev_size[slot] = size;
 134        /*
 135         * Mappings have to be page-aligned
 136         */
 137        offset = offset_in_page(phys_addr);
 138        phys_addr &= PAGE_MASK;
 139        size = PAGE_ALIGN(last_addr + 1) - phys_addr;
 140
 141        /*
 142         * Mappings have to fit in the FIX_BTMAP area.
 143         */
 144        nrpages = size >> PAGE_SHIFT;
 145        if (WARN_ON(nrpages > NR_FIX_BTMAPS))
 146                return NULL;
 147
 148        /*
 149         * Ok, go for it..
 150         */
 151        idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
 152        while (nrpages > 0) {
 153                if (after_paging_init)
 154                        __late_set_fixmap(idx, phys_addr, prot);
 155                else
 156                        __early_set_fixmap(idx, phys_addr, prot);
 157                phys_addr += PAGE_SIZE;
 158                --idx;
 159                --nrpages;
 160        }
 161        WARN(early_ioremap_debug, "%s(%pa, %08lx) [%d] => %08lx + %08lx\n",
 162             __func__, &phys_addr, size, slot, offset, slot_virt[slot]);
 163
 164        prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
 165        return prev_map[slot];
 166}
 167
 168void __init early_iounmap(void __iomem *addr, unsigned long size)
 169{
 170        unsigned long virt_addr;
 171        unsigned long offset;
 172        unsigned int nrpages;
 173        enum fixed_addresses idx;
 174        int i, slot;
 175
 176        slot = -1;
 177        for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
 178                if (prev_map[i] == addr) {
 179                        slot = i;
 180                        break;
 181                }
 182        }
 183
 184        if (WARN(slot < 0, "early_iounmap(%p, %08lx) not found slot\n",
 185                 addr, size))
 186                return;
 187
 188        if (WARN(prev_size[slot] != size,
 189                 "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
 190                 addr, size, slot, prev_size[slot]))
 191                return;
 192
 193        WARN(early_ioremap_debug, "early_iounmap(%p, %08lx) [%d]\n",
 194             addr, size, slot);
 195
 196        virt_addr = (unsigned long)addr;
 197        if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)))
 198                return;
 199
 200        offset = offset_in_page(virt_addr);
 201        nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
 202
 203        idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
 204        while (nrpages > 0) {
 205                if (after_paging_init)
 206                        __late_clear_fixmap(idx);
 207                else
 208                        __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR);
 209                --idx;
 210                --nrpages;
 211        }
 212        prev_map[slot] = NULL;
 213}
 214
 215/* Remap an IO device */
 216void __init __iomem *
 217early_ioremap(resource_size_t phys_addr, unsigned long size)
 218{
 219        return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO);
 220}
 221
 222/* Remap memory */
 223void __init *
 224early_memremap(resource_size_t phys_addr, unsigned long size)
 225{
 226        pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size,
 227                                                     FIXMAP_PAGE_NORMAL);
 228
 229        return (__force void *)__early_ioremap(phys_addr, size, prot);
 230}
 231#ifdef FIXMAP_PAGE_RO
 232void __init *
 233early_memremap_ro(resource_size_t phys_addr, unsigned long size)
 234{
 235        pgprot_t prot = early_memremap_pgprot_adjust(phys_addr, size,
 236                                                     FIXMAP_PAGE_RO);
 237
 238        return (__force void *)__early_ioremap(phys_addr, size, prot);
 239}
 240#endif
 241
 242#ifdef CONFIG_ARCH_USE_MEMREMAP_PROT
 243void __init *
 244early_memremap_prot(resource_size_t phys_addr, unsigned long size,
 245                    unsigned long prot_val)
 246{
 247        return (__force void *)__early_ioremap(phys_addr, size,
 248                                               __pgprot(prot_val));
 249}
 250#endif
 251
 252#define MAX_MAP_CHUNK   (NR_FIX_BTMAPS << PAGE_SHIFT)
 253
 254void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size)
 255{
 256        unsigned long slop, clen;
 257        char *p;
 258
 259        while (size) {
 260                slop = offset_in_page(src);
 261                clen = size;
 262                if (clen > MAX_MAP_CHUNK - slop)
 263                        clen = MAX_MAP_CHUNK - slop;
 264                p = early_memremap(src & PAGE_MASK, clen + slop);
 265                memcpy(dest, p + slop, clen);
 266                early_memunmap(p, clen + slop);
 267                dest += clen;
 268                src += clen;
 269                size -= clen;
 270        }
 271}
 272
 273#else /* CONFIG_MMU */
 274
 275void __init __iomem *
 276early_ioremap(resource_size_t phys_addr, unsigned long size)
 277{
 278        return (__force void __iomem *)phys_addr;
 279}
 280
 281/* Remap memory */
 282void __init *
 283early_memremap(resource_size_t phys_addr, unsigned long size)
 284{
 285        return (void *)phys_addr;
 286}
 287void __init *
 288early_memremap_ro(resource_size_t phys_addr, unsigned long size)
 289{
 290        return (void *)phys_addr;
 291}
 292
 293void __init early_iounmap(void __iomem *addr, unsigned long size)
 294{
 295}
 296
 297#endif /* CONFIG_MMU */
 298
 299
 300void __init early_memunmap(void *addr, unsigned long size)
 301{
 302        early_iounmap((__force void __iomem *)addr, size);
 303}
 304