linux/mm/early_ioremap.c
<<
>>
Prefs
   1/*
   2 * Provide common bits of early_ioremap() support for architectures needing
   3 * temporary mappings during boot before ioremap() is available.
   4 *
   5 * This is mostly a direct copy of the x86 early_ioremap implementation.
   6 *
   7 * (C) Copyright 1995 1996, 2014 Linus Torvalds
   8 *
   9 */
  10#include <linux/kernel.h>
  11#include <linux/init.h>
  12#include <linux/io.h>
  13#include <linux/module.h>
  14#include <linux/slab.h>
  15#include <linux/mm.h>
  16#include <linux/vmalloc.h>
  17#include <asm/fixmap.h>
  18#include <asm/early_ioremap.h>
  19
  20#ifdef CONFIG_MMU
  21static int early_ioremap_debug __initdata;
  22
  23static int __init early_ioremap_debug_setup(char *str)
  24{
  25        early_ioremap_debug = 1;
  26
  27        return 0;
  28}
  29early_param("early_ioremap_debug", early_ioremap_debug_setup);
  30
  31static int after_paging_init __initdata;
  32
  33void __init __weak early_ioremap_shutdown(void)
  34{
  35}
  36
  37void __init early_ioremap_reset(void)
  38{
  39        early_ioremap_shutdown();
  40        after_paging_init = 1;
  41}
  42
  43/*
  44 * Generally, ioremap() is available after paging_init() has been called.
  45 * Architectures wanting to allow early_ioremap after paging_init() can
  46 * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
  47 */
  48#ifndef __late_set_fixmap
  49static inline void __init __late_set_fixmap(enum fixed_addresses idx,
  50                                            phys_addr_t phys, pgprot_t prot)
  51{
  52        BUG();
  53}
  54#endif
  55
  56#ifndef __late_clear_fixmap
  57static inline void __init __late_clear_fixmap(enum fixed_addresses idx)
  58{
  59        BUG();
  60}
  61#endif
  62
  63static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
  64static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
  65static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
  66
  67void __init early_ioremap_setup(void)
  68{
  69        int i;
  70
  71        for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  72                if (WARN_ON(prev_map[i]))
  73                        break;
  74
  75        for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  76                slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
  77}
  78
  79static int __init check_early_ioremap_leak(void)
  80{
  81        int count = 0;
  82        int i;
  83
  84        for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  85                if (prev_map[i])
  86                        count++;
  87
  88        if (WARN(count, KERN_WARNING
  89                 "Debug warning: early ioremap leak of %d areas detected.\n"
  90                 "please boot with early_ioremap_debug and report the dmesg.\n",
  91                 count))
  92                return 1;
  93        return 0;
  94}
  95late_initcall(check_early_ioremap_leak);
  96
  97static void __init __iomem *
  98__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
  99{
 100        unsigned long offset;
 101        resource_size_t last_addr;
 102        unsigned int nrpages;
 103        enum fixed_addresses idx;
 104        int i, slot;
 105
 106        WARN_ON(system_state != SYSTEM_BOOTING);
 107
 108        slot = -1;
 109        for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
 110                if (!prev_map[i]) {
 111                        slot = i;
 112                        break;
 113                }
 114        }
 115
 116        if (WARN(slot < 0, "%s(%08llx, %08lx) not found slot\n",
 117                 __func__, (u64)phys_addr, size))
 118                return NULL;
 119
 120        /* Don't allow wraparound or zero size */
 121        last_addr = phys_addr + size - 1;
 122        if (WARN_ON(!size || last_addr < phys_addr))
 123                return NULL;
 124
 125        prev_size[slot] = size;
 126        /*
 127         * Mappings have to be page-aligned
 128         */
 129        offset = offset_in_page(phys_addr);
 130        phys_addr &= PAGE_MASK;
 131        size = PAGE_ALIGN(last_addr + 1) - phys_addr;
 132
 133        /*
 134         * Mappings have to fit in the FIX_BTMAP area.
 135         */
 136        nrpages = size >> PAGE_SHIFT;
 137        if (WARN_ON(nrpages > NR_FIX_BTMAPS))
 138                return NULL;
 139
 140        /*
 141         * Ok, go for it..
 142         */
 143        idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
 144        while (nrpages > 0) {
 145                if (after_paging_init)
 146                        __late_set_fixmap(idx, phys_addr, prot);
 147                else
 148                        __early_set_fixmap(idx, phys_addr, prot);
 149                phys_addr += PAGE_SIZE;
 150                --idx;
 151                --nrpages;
 152        }
 153        WARN(early_ioremap_debug, "%s(%08llx, %08lx) [%d] => %08lx + %08lx\n",
 154             __func__, (u64)phys_addr, size, slot, offset, slot_virt[slot]);
 155
 156        prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
 157        return prev_map[slot];
 158}
 159
 160void __init early_iounmap(void __iomem *addr, unsigned long size)
 161{
 162        unsigned long virt_addr;
 163        unsigned long offset;
 164        unsigned int nrpages;
 165        enum fixed_addresses idx;
 166        int i, slot;
 167
 168        slot = -1;
 169        for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
 170                if (prev_map[i] == addr) {
 171                        slot = i;
 172                        break;
 173                }
 174        }
 175
 176        if (WARN(slot < 0, "early_iounmap(%p, %08lx) not found slot\n",
 177                 addr, size))
 178                return;
 179
 180        if (WARN(prev_size[slot] != size,
 181                 "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
 182                 addr, size, slot, prev_size[slot]))
 183                return;
 184
 185        WARN(early_ioremap_debug, "early_iounmap(%p, %08lx) [%d]\n",
 186             addr, size, slot);
 187
 188        virt_addr = (unsigned long)addr;
 189        if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)))
 190                return;
 191
 192        offset = offset_in_page(virt_addr);
 193        nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
 194
 195        idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
 196        while (nrpages > 0) {
 197                if (after_paging_init)
 198                        __late_clear_fixmap(idx);
 199                else
 200                        __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR);
 201                --idx;
 202                --nrpages;
 203        }
 204        prev_map[slot] = NULL;
 205}
 206
 207/* Remap an IO device */
 208void __init __iomem *
 209early_ioremap(resource_size_t phys_addr, unsigned long size)
 210{
 211        return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO);
 212}
 213
 214/* Remap memory */
 215void __init *
 216early_memremap(resource_size_t phys_addr, unsigned long size)
 217{
 218        return (__force void *)__early_ioremap(phys_addr, size,
 219                                               FIXMAP_PAGE_NORMAL);
 220}
 221#ifdef FIXMAP_PAGE_RO
 222void __init *
 223early_memremap_ro(resource_size_t phys_addr, unsigned long size)
 224{
 225        return (__force void *)__early_ioremap(phys_addr, size, FIXMAP_PAGE_RO);
 226}
 227#endif
 228
 229#define MAX_MAP_CHUNK   (NR_FIX_BTMAPS << PAGE_SHIFT)
 230
 231void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size)
 232{
 233        unsigned long slop, clen;
 234        char *p;
 235
 236        while (size) {
 237                slop = offset_in_page(src);
 238                clen = size;
 239                if (clen > MAX_MAP_CHUNK - slop)
 240                        clen = MAX_MAP_CHUNK - slop;
 241                p = early_memremap(src & PAGE_MASK, clen + slop);
 242                memcpy(dest, p + slop, clen);
 243                early_memunmap(p, clen + slop);
 244                dest += clen;
 245                src += clen;
 246                size -= clen;
 247        }
 248}
 249
 250#else /* CONFIG_MMU */
 251
 252void __init __iomem *
 253early_ioremap(resource_size_t phys_addr, unsigned long size)
 254{
 255        return (__force void __iomem *)phys_addr;
 256}
 257
 258/* Remap memory */
 259void __init *
 260early_memremap(resource_size_t phys_addr, unsigned long size)
 261{
 262        return (void *)phys_addr;
 263}
 264void __init *
 265early_memremap_ro(resource_size_t phys_addr, unsigned long size)
 266{
 267        return (void *)phys_addr;
 268}
 269
 270void __init early_iounmap(void __iomem *addr, unsigned long size)
 271{
 272}
 273
 274#endif /* CONFIG_MMU */
 275
 276
 277void __init early_memunmap(void *addr, unsigned long size)
 278{
 279        early_iounmap((__force void __iomem *)addr, size);
 280}
 281