linux/arch/arm64/include/asm/cacheflush.h
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/include/asm/cacheflush.h
   3 *
   4 * Copyright (C) 1999-2002 Russell King.
   5 * Copyright (C) 2012 ARM Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19#ifndef __ASM_CACHEFLUSH_H
  20#define __ASM_CACHEFLUSH_H
  21
  22#include <linux/mm.h>
  23
  24/*
  25 * This flag is used to indicate that the page pointed to by a pte is clean
  26 * and does not require cleaning before returning it to the user.
  27 */
  28#define PG_dcache_clean PG_arch_1
  29
  30/*
  31 *      MM Cache Management
  32 *      ===================
  33 *
  34 *      The arch/arm64/mm/cache.S implements these methods.
  35 *
  36 *      Start addresses are inclusive and end addresses are exclusive; start
  37 *      addresses should be rounded down, end addresses up.
  38 *
  39 *      See Documentation/cachetlb.txt for more information. Please note that
  40 *      the implementation assumes non-aliasing VIPT D-cache and (aliasing)
  41 *      VIPT or ASID-tagged VIVT I-cache.
  42 *
  43 *      flush_cache_all()
  44 *
  45 *              Unconditionally clean and invalidate the entire cache.
  46 *
  47 *      flush_cache_mm(mm)
  48 *
  49 *              Clean and invalidate all user space cache entries
  50 *              before a change of page tables.
  51 *
  52 *      flush_icache_range(start, end)
  53 *
  54 *              Ensure coherency between the I-cache and the D-cache in the
  55 *              region described by start, end.
  56 *              - start  - virtual start address
  57 *              - end    - virtual end address
  58 *
  59 *      __flush_cache_user_range(start, end)
  60 *
  61 *              Ensure coherency between the I-cache and the D-cache in the
  62 *              region described by start, end.
  63 *              - start  - virtual start address
  64 *              - end    - virtual end address
  65 *
  66 *      __flush_dcache_area(kaddr, size)
  67 *
  68 *              Ensure that the data held in page is written back.
  69 *              - kaddr  - page address
  70 *              - size   - region size
  71 */
  72extern void flush_cache_all(void);
  73extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
  74extern void flush_icache_range(unsigned long start, unsigned long end);
  75extern void __flush_dcache_area(void *addr, size_t len);
  76extern void __flush_cache_user_range(unsigned long start, unsigned long end);
  77
  78static inline void flush_cache_mm(struct mm_struct *mm)
  79{
  80}
  81
  82static inline void flush_cache_page(struct vm_area_struct *vma,
  83                                    unsigned long user_addr, unsigned long pfn)
  84{
  85}
  86
  87/*
  88 * Copy user data from/to a page which is mapped into a different
  89 * processes address space.  Really, we want to allow our "user
  90 * space" model to handle this.
  91 */
  92extern void copy_to_user_page(struct vm_area_struct *, struct page *,
  93        unsigned long, void *, const void *, unsigned long);
  94#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  95        do {                                                    \
  96                memcpy(dst, src, len);                          \
  97        } while (0)
  98
  99#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
 100
 101/*
 102 * flush_dcache_page is used when the kernel has written to the page
 103 * cache page at virtual address page->virtual.
 104 *
 105 * If this page isn't mapped (ie, page_mapping == NULL), or it might
 106 * have userspace mappings, then we _must_ always clean + invalidate
 107 * the dcache entries associated with the kernel mapping.
 108 *
 109 * Otherwise we can defer the operation, and clean the cache when we are
 110 * about to change to user space.  This is the same method as used on SPARC64.
 111 * See update_mmu_cache for the user space part.
 112 */
 113#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 114extern void flush_dcache_page(struct page *);
 115
 116static inline void __flush_icache_all(void)
 117{
 118        asm("ic ialluis");
 119}
 120
 121#define flush_dcache_mmap_lock(mapping) \
 122        spin_lock_irq(&(mapping)->tree_lock)
 123#define flush_dcache_mmap_unlock(mapping) \
 124        spin_unlock_irq(&(mapping)->tree_lock)
 125
 126/*
 127 * We don't appear to need to do anything here.  In fact, if we did, we'd
 128 * duplicate cache flushing elsewhere performed by flush_dcache_page().
 129 */
 130#define flush_icache_page(vma,page)     do { } while (0)
 131
 132/*
 133 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
 134 * vmalloc, ioremap etc) in kernel space for pages.  On non-VIPT
 135 * caches, since the direct-mappings of these pages may contain cached
 136 * data, we need to do a full cache flush to ensure that writebacks
 137 * don't corrupt data placed into these pages via the new mappings.
 138 */
 139static inline void flush_cache_vmap(unsigned long start, unsigned long end)
 140{
 141        /*
 142         * set_pte_at() called from vmap_pte_range() does not
 143         * have a DSB after cleaning the cache line.
 144         */
 145        dsb();
 146}
 147
 148static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
 149{
 150}
 151
 152#endif
 153