linux/arch/powerpc/include/asm/cacheflush.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 */
   4#ifndef _ASM_POWERPC_CACHEFLUSH_H
   5#define _ASM_POWERPC_CACHEFLUSH_H
   6
   7#include <linux/mm.h>
   8#include <asm/cputable.h>
   9#include <asm/cpu_has_feature.h>
  10
  11#ifdef CONFIG_PPC_BOOK3S_64
  12/*
  13 * Book3s has no ptesync after setting a pte, so without this ptesync it's
  14 * possible for a kernel virtual mapping access to return a spurious fault
  15 * if it's accessed right after the pte is set. The page fault handler does
  16 * not expect this type of fault. flush_cache_vmap is not exactly the right
  17 * place to put this, but it seems to work well enough.
  18 */
  19static inline void flush_cache_vmap(unsigned long start, unsigned long end)
  20{
  21        asm volatile("ptesync" ::: "memory");
  22}
  23#define flush_cache_vmap flush_cache_vmap
  24#endif /* CONFIG_PPC_BOOK3S_64 */
  25
  26#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  27extern void flush_dcache_page(struct page *page);
  28
  29void flush_icache_range(unsigned long start, unsigned long stop);
  30#define flush_icache_range flush_icache_range
  31
  32void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
  33                unsigned long addr, int len);
  34#define flush_icache_user_page flush_icache_user_page
  35
  36void flush_dcache_icache_page(struct page *page);
  37void __flush_dcache_icache(void *page);
  38
  39/**
  40 * flush_dcache_range(): Write any modified data cache blocks out to memory and
  41 * invalidate them. Does not invalidate the corresponding instruction cache
  42 * blocks.
  43 *
  44 * @start: the start address
  45 * @stop: the stop address (exclusive)
  46 */
  47static inline void flush_dcache_range(unsigned long start, unsigned long stop)
  48{
  49        unsigned long shift = l1_dcache_shift();
  50        unsigned long bytes = l1_dcache_bytes();
  51        void *addr = (void *)(start & ~(bytes - 1));
  52        unsigned long size = stop - (unsigned long)addr + (bytes - 1);
  53        unsigned long i;
  54
  55        if (IS_ENABLED(CONFIG_PPC64))
  56                mb();   /* sync */
  57
  58        for (i = 0; i < size >> shift; i++, addr += bytes)
  59                dcbf(addr);
  60        mb();   /* sync */
  61
  62}
  63
  64/*
  65 * Write any modified data cache blocks out to memory.
  66 * Does not invalidate the corresponding cache lines (especially for
  67 * any corresponding instruction cache).
  68 */
  69static inline void clean_dcache_range(unsigned long start, unsigned long stop)
  70{
  71        unsigned long shift = l1_dcache_shift();
  72        unsigned long bytes = l1_dcache_bytes();
  73        void *addr = (void *)(start & ~(bytes - 1));
  74        unsigned long size = stop - (unsigned long)addr + (bytes - 1);
  75        unsigned long i;
  76
  77        for (i = 0; i < size >> shift; i++, addr += bytes)
  78                dcbst(addr);
  79        mb();   /* sync */
  80}
  81
  82/*
  83 * Like above, but invalidate the D-cache.  This is used by the 8xx
  84 * to invalidate the cache so the PPC core doesn't get stale data
  85 * from the CPM (no cache snooping here :-).
  86 */
  87static inline void invalidate_dcache_range(unsigned long start,
  88                                           unsigned long stop)
  89{
  90        unsigned long shift = l1_dcache_shift();
  91        unsigned long bytes = l1_dcache_bytes();
  92        void *addr = (void *)(start & ~(bytes - 1));
  93        unsigned long size = stop - (unsigned long)addr + (bytes - 1);
  94        unsigned long i;
  95
  96        for (i = 0; i < size >> shift; i++, addr += bytes)
  97                dcbi(addr);
  98        mb();   /* sync */
  99}
 100
 101#include <asm-generic/cacheflush.h>
 102
 103#endif /* _ASM_POWERPC_CACHEFLUSH_H */
 104