linux/arch/arm/mm/idmap.c
<<
>>
Prefs
   1#include <linux/module.h>
   2#include <linux/kernel.h>
   3#include <linux/slab.h>
   4#include <linux/mm_types.h>
   5
   6#include <asm/cputype.h>
   7#include <asm/idmap.h>
   8#include <asm/pgalloc.h>
   9#include <asm/pgtable.h>
  10#include <asm/sections.h>
  11#include <asm/system_info.h>
  12
  13/*
  14 * Note: accesses outside of the kernel image and the identity map area
  15 * are not supported on any CPU using the idmap tables as its current
  16 * page tables.
  17 */
  18pgd_t *idmap_pgd;
  19long long arch_phys_to_idmap_offset;
  20
  21#ifdef CONFIG_ARM_LPAE
  22static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
  23        unsigned long prot)
  24{
  25        pmd_t *pmd;
  26        unsigned long next;
  27
  28        if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
  29                pmd = pmd_alloc_one(&init_mm, addr);
  30                if (!pmd) {
  31                        pr_warn("Failed to allocate identity pmd.\n");
  32                        return;
  33                }
  34                /*
  35                 * Copy the original PMD to ensure that the PMD entries for
  36                 * the kernel image are preserved.
  37                 */
  38                if (!pud_none(*pud))
  39                        memcpy(pmd, pmd_offset(pud, 0),
  40                               PTRS_PER_PMD * sizeof(pmd_t));
  41                pud_populate(&init_mm, pud, pmd);
  42                pmd += pmd_index(addr);
  43        } else
  44                pmd = pmd_offset(pud, addr);
  45
  46        do {
  47                next = pmd_addr_end(addr, end);
  48                *pmd = __pmd((addr & PMD_MASK) | prot);
  49                flush_pmd_entry(pmd);
  50        } while (pmd++, addr = next, addr != end);
  51}
  52#else   /* !CONFIG_ARM_LPAE */
  53static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
  54        unsigned long prot)
  55{
  56        pmd_t *pmd = pmd_offset(pud, addr);
  57
  58        addr = (addr & PMD_MASK) | prot;
  59        pmd[0] = __pmd(addr);
  60        addr += SECTION_SIZE;
  61        pmd[1] = __pmd(addr);
  62        flush_pmd_entry(pmd);
  63}
  64#endif  /* CONFIG_ARM_LPAE */
  65
  66static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
  67        unsigned long prot)
  68{
  69        pud_t *pud = pud_offset(pgd, addr);
  70        unsigned long next;
  71
  72        do {
  73                next = pud_addr_end(addr, end);
  74                idmap_add_pmd(pud, addr, next, prot);
  75        } while (pud++, addr = next, addr != end);
  76}
  77
  78static void identity_mapping_add(pgd_t *pgd, const char *text_start,
  79                                 const char *text_end, unsigned long prot)
  80{
  81        unsigned long addr, end;
  82        unsigned long next;
  83
  84        addr = virt_to_idmap(text_start);
  85        end = virt_to_idmap(text_end);
  86        pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end);
  87
  88        prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
  89
  90        if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale_family())
  91                prot |= PMD_BIT4;
  92
  93        pgd += pgd_index(addr);
  94        do {
  95                next = pgd_addr_end(addr, end);
  96                idmap_add_pud(pgd, addr, next, prot);
  97        } while (pgd++, addr = next, addr != end);
  98}
  99
 100extern char  __idmap_text_start[], __idmap_text_end[];
 101
 102static int __init init_static_idmap(void)
 103{
 104        idmap_pgd = pgd_alloc(&init_mm);
 105        if (!idmap_pgd)
 106                return -ENOMEM;
 107
 108        identity_mapping_add(idmap_pgd, __idmap_text_start,
 109                             __idmap_text_end, 0);
 110
 111        /* Flush L1 for the hardware to see this page table content */
 112        flush_cache_louis();
 113
 114        return 0;
 115}
 116early_initcall(init_static_idmap);
 117
 118/*
 119 * In order to soft-boot, we need to switch to a 1:1 mapping for the
 120 * cpu_reset functions. This will then ensure that we have predictable
 121 * results when turning off the mmu.
 122 */
 123void setup_mm_for_reboot(void)
 124{
 125        /* Switch to the identity mapping. */
 126        cpu_switch_mm(idmap_pgd, &init_mm);
 127        local_flush_bp_all();
 128
 129#ifdef CONFIG_CPU_HAS_ASID
 130        /*
 131         * We don't have a clean ASID for the identity mapping, which
 132         * may clash with virtual addresses of the previous page tables
 133         * and therefore potentially in the TLB.
 134         */
 135        local_flush_tlb_all();
 136#endif
 137}
 138