linux/arch/arm64/include/asm/cacheflush.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Based on arch/arm/include/asm/cacheflush.h
   4 *
   5 * Copyright (C) 1999-2002 Russell King.
   6 * Copyright (C) 2012 ARM Ltd.
   7 */
   8#ifndef __ASM_CACHEFLUSH_H
   9#define __ASM_CACHEFLUSH_H
  10
  11#include <linux/kgdb.h>
  12#include <linux/mm.h>
  13
  14/*
  15 * This flag is used to indicate that the page pointed to by a pte is clean
  16 * and does not require cleaning before returning it to the user.
  17 */
  18#define PG_dcache_clean PG_arch_1
  19
  20/*
  21 *      MM Cache Management
  22 *      ===================
  23 *
  24 *      The arch/arm64/mm/cache.S implements these methods.
  25 *
  26 *      Start addresses are inclusive and end addresses are exclusive; start
  27 *      addresses should be rounded down, end addresses up.
  28 *
  29 *      See Documentation/core-api/cachetlb.rst for more information. Please note that
  30 *      the implementation assumes non-aliasing VIPT D-cache and (aliasing)
  31 *      VIPT I-cache.
  32 *
  33 *      All functions below apply to the interval [start, end)
  34 *              - start  - virtual start address (inclusive)
  35 *              - end    - virtual end address (exclusive)
  36 *
  37 *      caches_clean_inval_pou(start, end)
  38 *
  39 *              Ensure coherency between the I-cache and the D-cache region to
  40 *              the Point of Unification.
  41 *
  42 *      caches_clean_inval_user_pou(start, end)
  43 *
  44 *              Ensure coherency between the I-cache and the D-cache region to
  45 *              the Point of Unification.
  46 *              Use only if the region might access user memory.
  47 *
  48 *      icache_inval_pou(start, end)
  49 *
  50 *              Invalidate I-cache region to the Point of Unification.
  51 *
  52 *      dcache_clean_inval_poc(start, end)
  53 *
  54 *              Clean and invalidate D-cache region to the Point of Coherency.
  55 *
  56 *      dcache_inval_poc(start, end)
  57 *
  58 *              Invalidate D-cache region to the Point of Coherency.
  59 *
  60 *      dcache_clean_poc(start, end)
  61 *
  62 *              Clean D-cache region to the Point of Coherency.
  63 *
  64 *      dcache_clean_pop(start, end)
  65 *
  66 *              Clean D-cache region to the Point of Persistence.
  67 *
  68 *      dcache_clean_pou(start, end)
  69 *
  70 *              Clean D-cache region to the Point of Unification.
  71 */
  72extern void caches_clean_inval_pou(unsigned long start, unsigned long end);
  73extern void icache_inval_pou(unsigned long start, unsigned long end);
  74extern void dcache_clean_inval_poc(unsigned long start, unsigned long end);
  75extern void dcache_inval_poc(unsigned long start, unsigned long end);
  76extern void dcache_clean_poc(unsigned long start, unsigned long end);
  77extern void dcache_clean_pop(unsigned long start, unsigned long end);
  78extern void dcache_clean_pou(unsigned long start, unsigned long end);
  79extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end);
  80extern void sync_icache_aliases(unsigned long start, unsigned long end);
  81
  82static inline void flush_icache_range(unsigned long start, unsigned long end)
  83{
  84        caches_clean_inval_pou(start, end);
  85
  86        /*
  87         * IPI all online CPUs so that they undergo a context synchronization
  88         * event and are forced to refetch the new instructions.
  89         */
  90
  91        /*
  92         * KGDB performs cache maintenance with interrupts disabled, so we
  93         * will deadlock trying to IPI the secondary CPUs. In theory, we can
  94         * set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that
  95         * just means that KGDB will elide the maintenance altogether! As it
  96         * turns out, KGDB uses IPIs to round-up the secondary CPUs during
  97         * the patching operation, so we don't need extra IPIs here anyway.
  98         * In which case, add a KGDB-specific bodge and return early.
  99         */
 100        if (in_dbg_master())
 101                return;
 102
 103        kick_all_cpus_sync();
 104}
 105#define flush_icache_range flush_icache_range
 106
 107/*
 108 * Cache maintenance functions used by the DMA API. No to be used directly.
 109 */
 110extern void __dma_map_area(const void *, size_t, int);
 111extern void __dma_unmap_area(const void *, size_t, int);
 112extern void __dma_flush_area(const void *, size_t);
 113
 114/*
 115 * Copy user data from/to a page which is mapped into a different
 116 * processes address space.  Really, we want to allow our "user
 117 * space" model to handle this.
 118 */
 119extern void copy_to_user_page(struct vm_area_struct *, struct page *,
 120        unsigned long, void *, const void *, unsigned long);
 121#define copy_to_user_page copy_to_user_page
 122
 123/*
 124 * flush_dcache_page is used when the kernel has written to the page
 125 * cache page at virtual address page->virtual.
 126 *
 127 * If this page isn't mapped (ie, page_mapping == NULL), or it might
 128 * have userspace mappings, then we _must_ always clean + invalidate
 129 * the dcache entries associated with the kernel mapping.
 130 *
 131 * Otherwise we can defer the operation, and clean the cache when we are
 132 * about to change to user space.  This is the same method as used on SPARC64.
 133 * See update_mmu_cache for the user space part.
 134 */
 135#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 136extern void flush_dcache_page(struct page *);
 137
 138static __always_inline void icache_inval_all_pou(void)
 139{
 140        if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
 141                return;
 142
 143        asm("ic ialluis");
 144        dsb(ish);
 145}
 146
 147#include <asm-generic/cacheflush.h>
 148
 149#endif /* __ASM_CACHEFLUSH_H */
 150