linux/arch/tile/include/asm/cache.h
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 */
  14
  15#ifndef _ASM_TILE_CACHE_H
  16#define _ASM_TILE_CACHE_H
  17
  18#include <arch/chip.h>
  19
  20/* bytes per L1 data cache line */
  21#define L1_CACHE_SHIFT          CHIP_L1D_LOG_LINE_SIZE()
  22#define L1_CACHE_BYTES          (1 << L1_CACHE_SHIFT)
  23
  24/* bytes per L2 cache line */
  25#define L2_CACHE_SHIFT          CHIP_L2_LOG_LINE_SIZE()
  26#define L2_CACHE_BYTES          (1 << L2_CACHE_SHIFT)
  27#define L2_CACHE_ALIGN(x)       (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES)
  28
  29/*
  30 * TILEPro I/O is not always coherent (networking typically uses coherent
  31 * I/O, but PCI traffic does not) and setting ARCH_DMA_MINALIGN to the
  32 * L2 cacheline size helps ensure that kernel heap allocations are aligned.
  33 * TILE-Gx I/O is always coherent when used on hash-for-home pages.
  34 *
  35 * However, it's possible at runtime to request not to use hash-for-home
  36 * for the kernel heap, in which case the kernel will use flush-and-inval
  37 * to manage coherence.  As a result, we use L2_CACHE_BYTES for the
  38 * DMA minimum alignment to avoid false sharing in the kernel heap.
  39 */
  40#define ARCH_DMA_MINALIGN       L2_CACHE_BYTES
  41
  42/* use the cache line size for the L2, which is where it counts */
  43#define SMP_CACHE_BYTES_SHIFT   L2_CACHE_SHIFT
  44#define SMP_CACHE_BYTES         L2_CACHE_BYTES
  45#define INTERNODE_CACHE_SHIFT   L2_CACHE_SHIFT
  46#define INTERNODE_CACHE_BYTES   L2_CACHE_BYTES
  47
  48/* Group together read-mostly things to avoid cache false sharing */
  49#define __read_mostly __attribute__((__section__(".data..read_mostly")))
  50
  51/*
  52 * Originally we used small TLB pages for kernel data and grouped some
  53 * things together as ro-after-init, enforcing the property at the end
  54 * of initialization by making those pages read-only and non-coherent.
  55 * This allowed better cache utilization since cache inclusion did not
  56 * need to be maintained.  However, to do this requires an extra TLB
  57 * entry, which on balance is more of a performance hit than the
  58 * non-coherence is a performance gain, so we now just make "read
  59 * mostly" and "ro-after-init" be synonyms.  We keep the attribute
  60 * separate in case we change our minds at a future date.
  61 */
  62#define __ro_after_init __read_mostly
  63
  64#endif /* _ASM_TILE_CACHE_H */
  65