linux/arch/powerpc/include/asm/xive-regs.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * Copyright 2016,2017 IBM Corporation.
   4 */
   5#ifndef _ASM_POWERPC_XIVE_REGS_H
   6#define _ASM_POWERPC_XIVE_REGS_H
   7
   8/*
   9 * "magic" Event State Buffer (ESB) MMIO offsets.
  10 *
  11 * Each interrupt source has a 2-bit state machine called ESB
  12 * which can be controlled by MMIO. It's made of 2 bits, P and
  13 * Q. P indicates that an interrupt is pending (has been sent
  14 * to a queue and is waiting for an EOI). Q indicates that the
  15 * interrupt has been triggered while pending.
  16 *
  17 * This acts as a coalescing mechanism in order to guarantee
  18 * that a given interrupt only occurs at most once in a queue.
  19 *
  20 * When doing an EOI, the Q bit will indicate if the interrupt
  21 * needs to be re-triggered.
  22 *
  23 * The following offsets into the ESB MMIO allow to read or
  24 * manipulate the PQ bits. They must be used with an 8-bytes
  25 * load instruction. They all return the previous state of the
  26 * interrupt (atomically).
  27 *
  28 * Additionally, some ESB pages support doing an EOI via a
  29 * store at 0 and some ESBs support doing a trigger via a
  30 * separate trigger page.
  31 */
  32#define XIVE_ESB_STORE_EOI      0x400 /* Store */
  33#define XIVE_ESB_LOAD_EOI       0x000 /* Load */
  34#define XIVE_ESB_GET            0x800 /* Load */
  35#define XIVE_ESB_SET_PQ_00      0xc00 /* Load */
  36#define XIVE_ESB_SET_PQ_01      0xd00 /* Load */
  37#define XIVE_ESB_SET_PQ_10      0xe00 /* Load */
  38#define XIVE_ESB_SET_PQ_11      0xf00 /* Load */
  39
  40/*
  41 * Load-after-store ordering
  42 *
  43 * Adding this offset to the load address will enforce
  44 * load-after-store ordering. This is required to use StoreEOI.
  45 */
  46#define XIVE_ESB_LD_ST_MO       0x40 /* Load-after-store ordering */
  47
  48#define XIVE_ESB_VAL_P          0x2
  49#define XIVE_ESB_VAL_Q          0x1
  50#define XIVE_ESB_INVALID        0xFF
  51
  52/*
  53 * Thread Management (aka "TM") registers
  54 */
  55
  56/* TM register offsets */
  57#define TM_QW0_USER             0x000 /* All rings */
  58#define TM_QW1_OS               0x010 /* Ring 0..2 */
  59#define TM_QW2_HV_POOL          0x020 /* Ring 0..1 */
  60#define TM_QW3_HV_PHYS          0x030 /* Ring 0..1 */
  61
  62/* Byte offsets inside a QW             QW0 QW1 QW2 QW3 */
  63#define TM_NSR                  0x0  /*  +   +   -   +  */
  64#define TM_CPPR                 0x1  /*  -   +   -   +  */
  65#define TM_IPB                  0x2  /*  -   +   +   +  */
  66#define TM_LSMFB                0x3  /*  -   +   +   +  */
  67#define TM_ACK_CNT              0x4  /*  -   +   -   -  */
  68#define TM_INC                  0x5  /*  -   +   -   +  */
  69#define TM_AGE                  0x6  /*  -   +   -   +  */
  70#define TM_PIPR                 0x7  /*  -   +   -   +  */
  71
  72#define TM_WORD0                0x0
  73#define TM_WORD1                0x4
  74
  75/*
  76 * QW word 2 contains the valid bit at the top and other fields
  77 * depending on the QW.
  78 */
  79#define TM_WORD2                0x8
  80#define   TM_QW0W2_VU           PPC_BIT32(0)
  81#define   TM_QW0W2_LOGIC_SERV   PPC_BITMASK32(1,31) // XX 2,31 ?
  82#define   TM_QW1W2_VO           PPC_BIT32(0)
  83#define   TM_QW1W2_OS_CAM       PPC_BITMASK32(8,31)
  84#define   TM_QW2W2_VP           PPC_BIT32(0)
  85#define   TM_QW2W2_POOL_CAM     PPC_BITMASK32(8,31)
  86#define   TM_QW3W2_VT           PPC_BIT32(0)
  87#define   TM_QW3W2_LP           PPC_BIT32(6)
  88#define   TM_QW3W2_LE           PPC_BIT32(7)
  89#define   TM_QW3W2_T            PPC_BIT32(31)
  90
  91/*
  92 * In addition to normal loads to "peek" and writes (only when invalid)
  93 * using 4 and 8 bytes accesses, the above registers support these
  94 * "special" byte operations:
  95 *
  96 *   - Byte load from QW0[NSR] - User level NSR (EBB)
  97 *   - Byte store to QW0[NSR] - User level NSR (EBB)
  98 *   - Byte load/store to QW1[CPPR] and QW3[CPPR] - CPPR access
  99 *   - Byte load from QW3[TM_WORD2] - Read VT||00000||LP||LE on thrd 0
 100 *                                    otherwise VT||0000000
 101 *   - Byte store to QW3[TM_WORD2] - Set VT bit (and LP/LE if present)
 102 *
 103 * Then we have all these "special" CI ops at these offset that trigger
 104 * all sorts of side effects:
 105 */
 106#define TM_SPC_ACK_EBB          0x800   /* Load8 ack EBB to reg*/
 107#define TM_SPC_ACK_OS_REG       0x810   /* Load16 ack OS irq to reg */
 108#define TM_SPC_PUSH_USR_CTX     0x808   /* Store32 Push/Validate user context */
 109#define TM_SPC_PULL_USR_CTX     0x808   /* Load32 Pull/Invalidate user context */
 110#define TM_SPC_SET_OS_PENDING   0x812   /* Store8 Set OS irq pending bit */
 111#define TM_SPC_PULL_OS_CTX      0x818   /* Load32/Load64 Pull/Invalidate OS context to reg */
 112#define TM_SPC_PULL_POOL_CTX    0x828   /* Load32/Load64 Pull/Invalidate Pool context to reg*/
 113#define TM_SPC_ACK_HV_REG       0x830   /* Load16 ack HV irq to reg */
 114#define TM_SPC_PULL_USR_CTX_OL  0xc08   /* Store8 Pull/Inval usr ctx to odd line */
 115#define TM_SPC_ACK_OS_EL        0xc10   /* Store8 ack OS irq to even line */
 116#define TM_SPC_ACK_HV_POOL_EL   0xc20   /* Store8 ack HV evt pool to even line */
 117#define TM_SPC_ACK_HV_EL        0xc30   /* Store8 ack HV irq to even line */
 118/* XXX more... */
 119
 120/* NSR fields for the various QW ack types */
 121#define TM_QW0_NSR_EB           PPC_BIT8(0)
 122#define TM_QW1_NSR_EO           PPC_BIT8(0)
 123#define TM_QW3_NSR_HE           PPC_BITMASK8(0,1)
 124#define  TM_QW3_NSR_HE_NONE     0
 125#define  TM_QW3_NSR_HE_POOL     1
 126#define  TM_QW3_NSR_HE_PHYS     2
 127#define  TM_QW3_NSR_HE_LSI      3
 128#define TM_QW3_NSR_I            PPC_BIT8(2)
 129#define TM_QW3_NSR_GRP_LVL      PPC_BIT8(3,7)
 130
 131#endif /* _ASM_POWERPC_XIVE_REGS_H */
 132