linux/arch/powerpc/include/asm/cache.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_POWERPC_CACHE_H
   3#define _ASM_POWERPC_CACHE_H
   4
   5#ifdef __KERNEL__
   6
   7
   8/* bytes per L1 cache line */
   9#if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX)
  10#define L1_CACHE_SHIFT          4
  11#define MAX_COPY_PREFETCH       1
  12#define IFETCH_ALIGN_SHIFT      2
  13#elif defined(CONFIG_PPC_E500MC)
  14#define L1_CACHE_SHIFT          6
  15#define MAX_COPY_PREFETCH       4
  16#define IFETCH_ALIGN_SHIFT      3
  17#elif defined(CONFIG_PPC32)
  18#define MAX_COPY_PREFETCH       4
  19#define IFETCH_ALIGN_SHIFT      3       /* 603 fetches 2 insn at a time */
  20#if defined(CONFIG_PPC_47x)
  21#define L1_CACHE_SHIFT          7
  22#else
  23#define L1_CACHE_SHIFT          5
  24#endif
  25#else /* CONFIG_PPC64 */
  26#define L1_CACHE_SHIFT          7
  27#define IFETCH_ALIGN_SHIFT      4 /* POWER8,9 */
  28#endif
  29
  30#define L1_CACHE_BYTES          (1 << L1_CACHE_SHIFT)
  31
  32#define SMP_CACHE_BYTES         L1_CACHE_BYTES
  33
  34#define IFETCH_ALIGN_BYTES      (1 << IFETCH_ALIGN_SHIFT)
  35
  36#if !defined(__ASSEMBLY__)
  37#ifdef CONFIG_PPC64
  38
  39struct ppc_cache_info {
  40        u32 size;
  41        u32 line_size;
  42        u32 block_size; /* L1 only */
  43        u32 log_block_size;
  44        u32 blocks_per_page;
  45        u32 sets;
  46        u32 assoc;
  47};
  48
  49struct ppc64_caches {
  50        struct ppc_cache_info l1d;
  51        struct ppc_cache_info l1i;
  52        struct ppc_cache_info l2;
  53        struct ppc_cache_info l3;
  54};
  55
  56extern struct ppc64_caches ppc64_caches;
  57
  58static inline u32 l1_cache_shift(void)
  59{
  60        return ppc64_caches.l1d.log_block_size;
  61}
  62
  63static inline u32 l1_cache_bytes(void)
  64{
  65        return ppc64_caches.l1d.block_size;
  66}
  67#else
  68static inline u32 l1_cache_shift(void)
  69{
  70        return L1_CACHE_SHIFT;
  71}
  72
  73static inline u32 l1_cache_bytes(void)
  74{
  75        return L1_CACHE_BYTES;
  76}
  77#endif
  78#endif /* ! __ASSEMBLY__ */
  79
  80#if defined(__ASSEMBLY__)
  81/*
  82 * For a snooping icache, we still need a dummy icbi to purge all the
  83 * prefetched instructions from the ifetch buffers. We also need a sync
  84 * before the icbi to order the the actual stores to memory that might
  85 * have modified instructions with the icbi.
  86 */
  87#define PURGE_PREFETCHED_INS    \
  88        sync;                   \
  89        icbi    0,r3;           \
  90        sync;                   \
  91        isync
  92
  93#else
  94#define __read_mostly __attribute__((__section__(".data..read_mostly")))
  95
  96#ifdef CONFIG_PPC_BOOK3S_32
  97extern long _get_L2CR(void);
  98extern long _get_L3CR(void);
  99extern void _set_L2CR(unsigned long);
 100extern void _set_L3CR(unsigned long);
 101#else
 102#define _get_L2CR()     0L
 103#define _get_L3CR()     0L
 104#define _set_L2CR(val)  do { } while(0)
 105#define _set_L3CR(val)  do { } while(0)
 106#endif
 107
 108static inline void dcbz(void *addr)
 109{
 110        __asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory");
 111}
 112
 113static inline void dcbi(void *addr)
 114{
 115        __asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory");
 116}
 117
 118static inline void dcbf(void *addr)
 119{
 120        __asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory");
 121}
 122
 123static inline void dcbst(void *addr)
 124{
 125        __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
 126}
 127#endif /* !__ASSEMBLY__ */
 128#endif /* __KERNEL__ */
 129#endif /* _ASM_POWERPC_CACHE_H */
 130