linux/arch/arm/mm/flush.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/mm/flush.c
   3 *
   4 *  Copyright (C) 1995-2002 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/module.h>
  11#include <linux/mm.h>
  12#include <linux/pagemap.h>
  13#include <linux/highmem.h>
  14
  15#include <asm/cacheflush.h>
  16#include <asm/cachetype.h>
  17#include <asm/highmem.h>
  18#include <asm/smp_plat.h>
  19#include <asm/tlbflush.h>
  20
  21#include "mm.h"
  22
  23#ifdef CONFIG_CPU_CACHE_VIPT
  24
  25static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
  26{
  27        unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
  28        const int zero = 0;
  29
  30        set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
  31
  32        asm(    "mcrr   p15, 0, %1, %0, c14\n"
  33        "       mcr     p15, 0, %2, c7, c10, 4"
  34            :
  35            : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
  36            : "cc");
  37}
  38
  39static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
  40{
  41        unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
  42        unsigned long offset = vaddr & (PAGE_SIZE - 1);
  43        unsigned long to;
  44
  45        set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
  46        to = va + offset;
  47        flush_icache_range(to, to + len);
  48}
  49
  50void flush_cache_mm(struct mm_struct *mm)
  51{
  52        if (cache_is_vivt()) {
  53                vivt_flush_cache_mm(mm);
  54                return;
  55        }
  56
  57        if (cache_is_vipt_aliasing()) {
  58                asm(    "mcr    p15, 0, %0, c7, c14, 0\n"
  59                "       mcr     p15, 0, %0, c7, c10, 4"
  60                    :
  61                    : "r" (0)
  62                    : "cc");
  63        }
  64}
  65
  66void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
  67{
  68        if (cache_is_vivt()) {
  69                vivt_flush_cache_range(vma, start, end);
  70                return;
  71        }
  72
  73        if (cache_is_vipt_aliasing()) {
  74                asm(    "mcr    p15, 0, %0, c7, c14, 0\n"
  75                "       mcr     p15, 0, %0, c7, c10, 4"
  76                    :
  77                    : "r" (0)
  78                    : "cc");
  79        }
  80
  81        if (vma->vm_flags & VM_EXEC)
  82                __flush_icache_all();
  83}
  84
  85void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
  86{
  87        if (cache_is_vivt()) {
  88                vivt_flush_cache_page(vma, user_addr, pfn);
  89                return;
  90        }
  91
  92        if (cache_is_vipt_aliasing()) {
  93                flush_pfn_alias(pfn, user_addr);
  94                __flush_icache_all();
  95        }
  96
  97        if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
  98                __flush_icache_all();
  99}
 100
 101#else
 102#define flush_pfn_alias(pfn,vaddr)              do { } while (0)
 103#define flush_icache_alias(pfn,vaddr,len)       do { } while (0)
 104#endif
 105
 106static void flush_ptrace_access_other(void *args)
 107{
 108        __flush_icache_all();
 109}
 110
 111static
 112void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
 113                         unsigned long uaddr, void *kaddr, unsigned long len)
 114{
 115        if (cache_is_vivt()) {
 116                if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
 117                        unsigned long addr = (unsigned long)kaddr;
 118                        __cpuc_coherent_kern_range(addr, addr + len);
 119                }
 120                return;
 121        }
 122
 123        if (cache_is_vipt_aliasing()) {
 124                flush_pfn_alias(page_to_pfn(page), uaddr);
 125                __flush_icache_all();
 126                return;
 127        }
 128
 129        /* VIPT non-aliasing D-cache */
 130        if (vma->vm_flags & VM_EXEC) {
 131                unsigned long addr = (unsigned long)kaddr;
 132                if (icache_is_vipt_aliasing())
 133                        flush_icache_alias(page_to_pfn(page), uaddr, len);
 134                else
 135                        __cpuc_coherent_kern_range(addr, addr + len);
 136                if (cache_ops_need_broadcast())
 137                        smp_call_function(flush_ptrace_access_other,
 138                                          NULL, 1);
 139        }
 140}
 141
 142/*
 143 * Copy user data from/to a page which is mapped into a different
 144 * processes address space.  Really, we want to allow our "user
 145 * space" model to handle this.
 146 *
 147 * Note that this code needs to run on the current CPU.
 148 */
 149void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 150                       unsigned long uaddr, void *dst, const void *src,
 151                       unsigned long len)
 152{
 153#ifdef CONFIG_SMP
 154        preempt_disable();
 155#endif
 156        memcpy(dst, src, len);
 157        flush_ptrace_access(vma, page, uaddr, dst, len);
 158#ifdef CONFIG_SMP
 159        preempt_enable();
 160#endif
 161}
 162
 163void __flush_dcache_page(struct address_space *mapping, struct page *page)
 164{
 165        /*
 166         * Writeback any data associated with the kernel mapping of this
 167         * page.  This ensures that data in the physical page is mutually
 168         * coherent with the kernels mapping.
 169         */
 170        if (!PageHighMem(page)) {
 171                __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
 172        } else {
 173                void *addr;
 174
 175                if (cache_is_vipt_nonaliasing()) {
 176                        addr = kmap_atomic(page);
 177                        __cpuc_flush_dcache_area(addr, PAGE_SIZE);
 178                        kunmap_atomic(addr);
 179                } else {
 180                        addr = kmap_high_get(page);
 181                        if (addr) {
 182                                __cpuc_flush_dcache_area(addr, PAGE_SIZE);
 183                                kunmap_high(page);
 184                        }
 185                }
 186        }
 187
 188        /*
 189         * If this is a page cache page, and we have an aliasing VIPT cache,
 190         * we only need to do one flush - which would be at the relevant
 191         * userspace colour, which is congruent with page->index.
 192         */
 193        if (mapping && cache_is_vipt_aliasing())
 194                flush_pfn_alias(page_to_pfn(page),
 195                                page->index << PAGE_CACHE_SHIFT);
 196}
 197
 198static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
 199{
 200        struct mm_struct *mm = current->active_mm;
 201        struct vm_area_struct *mpnt;
 202        pgoff_t pgoff;
 203
 204        /*
 205         * There are possible user space mappings of this page:
 206         * - VIVT cache: we need to also write back and invalidate all user
 207         *   data in the current VM view associated with this page.
 208         * - aliasing VIPT: we only need to find one mapping of this page.
 209         */
 210        pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 211
 212        flush_dcache_mmap_lock(mapping);
 213        vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
 214                unsigned long offset;
 215
 216                /*
 217                 * If this VMA is not in our MM, we can ignore it.
 218                 */
 219                if (mpnt->vm_mm != mm)
 220                        continue;
 221                if (!(mpnt->vm_flags & VM_MAYSHARE))
 222                        continue;
 223                offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
 224                flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
 225        }
 226        flush_dcache_mmap_unlock(mapping);
 227}
 228
 229#if __LINUX_ARM_ARCH__ >= 6
 230void __sync_icache_dcache(pte_t pteval)
 231{
 232        unsigned long pfn;
 233        struct page *page;
 234        struct address_space *mapping;
 235
 236        if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
 237                /* only flush non-aliasing VIPT caches for exec mappings */
 238                return;
 239        pfn = pte_pfn(pteval);
 240        if (!pfn_valid(pfn))
 241                return;
 242
 243        page = pfn_to_page(pfn);
 244        if (cache_is_vipt_aliasing())
 245                mapping = page_mapping(page);
 246        else
 247                mapping = NULL;
 248
 249        if (!test_and_set_bit(PG_dcache_clean, &page->flags))
 250                __flush_dcache_page(mapping, page);
 251
 252        if (pte_exec(pteval))
 253                __flush_icache_all();
 254}
 255#endif
 256
 257/*
 258 * Ensure cache coherency between kernel mapping and userspace mapping
 259 * of this page.
 260 *
 261 * We have three cases to consider:
 262 *  - VIPT non-aliasing cache: fully coherent so nothing required.
 263 *  - VIVT: fully aliasing, so we need to handle every alias in our
 264 *          current VM view.
 265 *  - VIPT aliasing: need to handle one alias in our current VM view.
 266 *
 267 * If we need to handle aliasing:
 268 *  If the page only exists in the page cache and there are no user
 269 *  space mappings, we can be lazy and remember that we may have dirty
 270 *  kernel cache lines for later.  Otherwise, we assume we have
 271 *  aliasing mappings.
 272 *
 273 * Note that we disable the lazy flush for SMP configurations where
 274 * the cache maintenance operations are not automatically broadcasted.
 275 */
 276void flush_dcache_page(struct page *page)
 277{
 278        struct address_space *mapping;
 279
 280        /*
 281         * The zero page is never written to, so never has any dirty
 282         * cache lines, and therefore never needs to be flushed.
 283         */
 284        if (page == ZERO_PAGE(0))
 285                return;
 286
 287        mapping = page_mapping(page);
 288
 289        if (!cache_ops_need_broadcast() &&
 290            mapping && !mapping_mapped(mapping))
 291                clear_bit(PG_dcache_clean, &page->flags);
 292        else {
 293                __flush_dcache_page(mapping, page);
 294                if (mapping && cache_is_vivt())
 295                        __flush_dcache_aliases(mapping, page);
 296                else if (mapping)
 297                        __flush_icache_all();
 298                set_bit(PG_dcache_clean, &page->flags);
 299        }
 300}
 301EXPORT_SYMBOL(flush_dcache_page);
 302
 303/*
 304 * Ensure cache coherency for the kernel mapping of this page. We can
 305 * assume that the page is pinned via kmap.
 306 *
 307 * If the page only exists in the page cache and there are no user
 308 * space mappings, this is a no-op since the page was already marked
 309 * dirty at creation.  Otherwise, we need to flush the dirty kernel
 310 * cache lines directly.
 311 */
 312void flush_kernel_dcache_page(struct page *page)
 313{
 314        if (cache_is_vivt() || cache_is_vipt_aliasing()) {
 315                struct address_space *mapping;
 316
 317                mapping = page_mapping(page);
 318
 319                if (!mapping || mapping_mapped(mapping)) {
 320                        void *addr;
 321
 322                        addr = page_address(page);
 323                        /*
 324                         * kmap_atomic() doesn't set the page virtual
 325                         * address for highmem pages, and
 326                         * kunmap_atomic() takes care of cache
 327                         * flushing already.
 328                         */
 329                        if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
 330                                __cpuc_flush_dcache_area(addr, PAGE_SIZE);
 331                }
 332        }
 333}
 334EXPORT_SYMBOL(flush_kernel_dcache_page);
 335
 336/*
 337 * Flush an anonymous page so that users of get_user_pages()
 338 * can safely access the data.  The expected sequence is:
 339 *
 340 *  get_user_pages()
 341 *    -> flush_anon_page
 342 *  memcpy() to/from page
 343 *  if written to page, flush_dcache_page()
 344 */
 345void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
 346{
 347        unsigned long pfn;
 348
 349        /* VIPT non-aliasing caches need do nothing */
 350        if (cache_is_vipt_nonaliasing())
 351                return;
 352
 353        /*
 354         * Write back and invalidate userspace mapping.
 355         */
 356        pfn = page_to_pfn(page);
 357        if (cache_is_vivt()) {
 358                flush_cache_page(vma, vmaddr, pfn);
 359        } else {
 360                /*
 361                 * For aliasing VIPT, we can flush an alias of the
 362                 * userspace address only.
 363                 */
 364                flush_pfn_alias(pfn, vmaddr);
 365                __flush_icache_all();
 366        }
 367
 368        /*
 369         * Invalidate kernel mapping.  No data should be contained
 370         * in this mapping of the page.  FIXME: this is overkill
 371         * since we actually ask for a write-back and invalidate.
 372         */
 373        __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
 374}
 375