linux/arch/arm/mm/fault-armv.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/mm/fault-armv.c
   3 *
   4 *  Copyright (C) 1995  Linus Torvalds
   5 *  Modifications for ARM processor (c) 1995-2002 Russell King
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#include <linux/sched.h>
  12#include <linux/kernel.h>
  13#include <linux/mm.h>
  14#include <linux/bitops.h>
  15#include <linux/vmalloc.h>
  16#include <linux/init.h>
  17#include <linux/pagemap.h>
  18#include <linux/gfp.h>
  19
  20#include <asm/bugs.h>
  21#include <asm/cacheflush.h>
  22#include <asm/cachetype.h>
  23#include <asm/pgtable.h>
  24#include <asm/tlbflush.h>
  25
  26#include "mm.h"
  27
  28static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
  29
  30#if __LINUX_ARM_ARCH__ < 6
  31/*
  32 * We take the easy way out of this problem - we make the
  33 * PTE uncacheable.  However, we leave the write buffer on.
  34 *
  35 * Note that the pte lock held when calling update_mmu_cache must also
  36 * guard the pte (somewhere else in the same mm) that we modify here.
  37 * Therefore those configurations which might call adjust_pte (those
  38 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
  39 */
  40static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
  41        unsigned long pfn, pte_t *ptep)
  42{
  43        pte_t entry = *ptep;
  44        int ret;
  45
  46        /*
  47         * If this page is present, it's actually being shared.
  48         */
  49        ret = pte_present(entry);
  50
  51        /*
  52         * If this page isn't present, or is already setup to
  53         * fault (ie, is old), we can safely ignore any issues.
  54         */
  55        if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
  56                flush_cache_page(vma, address, pfn);
  57                outer_flush_range((pfn << PAGE_SHIFT),
  58                                  (pfn << PAGE_SHIFT) + PAGE_SIZE);
  59                pte_val(entry) &= ~L_PTE_MT_MASK;
  60                pte_val(entry) |= shared_pte_mask;
  61                set_pte_at(vma->vm_mm, address, ptep, entry);
  62                flush_tlb_page(vma, address);
  63        }
  64
  65        return ret;
  66}
  67
  68#if USE_SPLIT_PTE_PTLOCKS
  69/*
  70 * If we are using split PTE locks, then we need to take the page
  71 * lock here.  Otherwise we are using shared mm->page_table_lock
  72 * which is already locked, thus cannot take it.
  73 */
  74static inline void do_pte_lock(spinlock_t *ptl)
  75{
  76        /*
  77         * Use nested version here to indicate that we are already
  78         * holding one similar spinlock.
  79         */
  80        spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
  81}
  82
  83static inline void do_pte_unlock(spinlock_t *ptl)
  84{
  85        spin_unlock(ptl);
  86}
  87#else /* !USE_SPLIT_PTE_PTLOCKS */
  88static inline void do_pte_lock(spinlock_t *ptl) {}
  89static inline void do_pte_unlock(spinlock_t *ptl) {}
  90#endif /* USE_SPLIT_PTE_PTLOCKS */
  91
  92static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
  93        unsigned long pfn)
  94{
  95        spinlock_t *ptl;
  96        pgd_t *pgd;
  97        pud_t *pud;
  98        pmd_t *pmd;
  99        pte_t *pte;
 100        int ret;
 101
 102        pgd = pgd_offset(vma->vm_mm, address);
 103        if (pgd_none_or_clear_bad(pgd))
 104                return 0;
 105
 106        pud = pud_offset(pgd, address);
 107        if (pud_none_or_clear_bad(pud))
 108                return 0;
 109
 110        pmd = pmd_offset(pud, address);
 111        if (pmd_none_or_clear_bad(pmd))
 112                return 0;
 113
 114        /*
 115         * This is called while another page table is mapped, so we
 116         * must use the nested version.  This also means we need to
 117         * open-code the spin-locking.
 118         */
 119        ptl = pte_lockptr(vma->vm_mm, pmd);
 120        pte = pte_offset_map(pmd, address);
 121        do_pte_lock(ptl);
 122
 123        ret = do_adjust_pte(vma, address, pfn, pte);
 124
 125        do_pte_unlock(ptl);
 126        pte_unmap(pte);
 127
 128        return ret;
 129}
 130
 131static void
 132make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
 133        unsigned long addr, pte_t *ptep, unsigned long pfn)
 134{
 135        struct mm_struct *mm = vma->vm_mm;
 136        struct vm_area_struct *mpnt;
 137        unsigned long offset;
 138        pgoff_t pgoff;
 139        int aliases = 0;
 140
 141        pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
 142
 143        /*
 144         * If we have any shared mappings that are in the same mm
 145         * space, then we need to handle them specially to maintain
 146         * cache coherency.
 147         */
 148        flush_dcache_mmap_lock(mapping);
 149        vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
 150                /*
 151                 * If this VMA is not in our MM, we can ignore it.
 152                 * Note that we intentionally mask out the VMA
 153                 * that we are fixing up.
 154                 */
 155                if (mpnt->vm_mm != mm || mpnt == vma)
 156                        continue;
 157                if (!(mpnt->vm_flags & VM_MAYSHARE))
 158                        continue;
 159                offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
 160                aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
 161        }
 162        flush_dcache_mmap_unlock(mapping);
 163        if (aliases)
 164                do_adjust_pte(vma, addr, pfn, ptep);
 165}
 166
 167/*
 168 * Take care of architecture specific things when placing a new PTE into
 169 * a page table, or changing an existing PTE.  Basically, there are two
 170 * things that we need to take care of:
 171 *
 172 *  1. If PG_dcache_clean is not set for the page, we need to ensure
 173 *     that any cache entries for the kernels virtual memory
 174 *     range are written back to the page.
 175 *  2. If we have multiple shared mappings of the same space in
 176 *     an object, we need to deal with the cache aliasing issues.
 177 *
 178 * Note that the pte lock will be held.
 179 */
 180void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
 181        pte_t *ptep)
 182{
 183        unsigned long pfn = pte_pfn(*ptep);
 184        struct address_space *mapping;
 185        struct page *page;
 186
 187        if (!pfn_valid(pfn))
 188                return;
 189
 190        /*
 191         * The zero page is never written to, so never has any dirty
 192         * cache lines, and therefore never needs to be flushed.
 193         */
 194        page = pfn_to_page(pfn);
 195        if (page == ZERO_PAGE(0))
 196                return;
 197
 198        mapping = page_mapping_file(page);
 199        if (!test_and_set_bit(PG_dcache_clean, &page->flags))
 200                __flush_dcache_page(mapping, page);
 201        if (mapping) {
 202                if (cache_is_vivt())
 203                        make_coherent(mapping, vma, addr, ptep, pfn);
 204                else if (vma->vm_flags & VM_EXEC)
 205                        __flush_icache_all();
 206        }
 207}
 208#endif  /* __LINUX_ARM_ARCH__ < 6 */
 209
 210/*
 211 * Check whether the write buffer has physical address aliasing
 212 * issues.  If it has, we need to avoid them for the case where
 213 * we have several shared mappings of the same object in user
 214 * space.
 215 */
 216static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
 217{
 218        register unsigned long zero = 0, one = 1, val;
 219
 220        local_irq_disable();
 221        mb();
 222        *p1 = one;
 223        mb();
 224        *p2 = zero;
 225        mb();
 226        val = *p1;
 227        mb();
 228        local_irq_enable();
 229        return val != zero;
 230}
 231
 232void __init check_writebuffer_bugs(void)
 233{
 234        struct page *page;
 235        const char *reason;
 236        unsigned long v = 1;
 237
 238        pr_info("CPU: Testing write buffer coherency: ");
 239
 240        page = alloc_page(GFP_KERNEL);
 241        if (page) {
 242                unsigned long *p1, *p2;
 243                pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
 244                                        L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
 245
 246                p1 = vmap(&page, 1, VM_IOREMAP, prot);
 247                p2 = vmap(&page, 1, VM_IOREMAP, prot);
 248
 249                if (p1 && p2) {
 250                        v = check_writebuffer(p1, p2);
 251                        reason = "enabling work-around";
 252                } else {
 253                        reason = "unable to map memory\n";
 254                }
 255
 256                vunmap(p1);
 257                vunmap(p2);
 258                put_page(page);
 259        } else {
 260                reason = "unable to grab page\n";
 261        }
 262
 263        if (v) {
 264                pr_cont("failed, %s\n", reason);
 265                shared_pte_mask = L_PTE_MT_UNCACHED;
 266        } else {
 267                pr_cont("ok\n");
 268        }
 269}
 270