linux/arch/tile/include/asm/irqflags.h
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 */
  14
  15#ifndef _ASM_TILE_IRQFLAGS_H
  16#define _ASM_TILE_IRQFLAGS_H
  17
  18#include <arch/interrupts.h>
  19#include <arch/chip.h>
  20
  21/*
  22 * The set of interrupts we want to allow when interrupts are nominally
  23 * disabled.  The remainder are effectively "NMI" interrupts from
  24 * the point of view of the generic Linux code.  Note that synchronous
  25 * interrupts (aka "non-queued") are not blocked by the mask in any case.
  26 */
  27#define LINUX_MASKABLE_INTERRUPTS \
  28        (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT)))
  29
  30#if CHIP_HAS_SPLIT_INTR_MASK()
  31/* The same macro, but for the two 32-bit SPRs separately. */
  32#define LINUX_MASKABLE_INTERRUPTS_LO (-1)
  33#define LINUX_MASKABLE_INTERRUPTS_HI \
  34        (~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32))))
  35#endif
  36
  37#ifndef __ASSEMBLY__
  38
  39/* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */
  40#include <asm/percpu.h>
  41#include <arch/spr_def.h>
  42
  43/*
  44 * Set and clear kernel interrupt masks.
  45 *
  46 * NOTE: __insn_mtspr() is a compiler builtin marked as a memory
  47 * clobber.  We rely on it being equivalent to a compiler barrier in
  48 * this code since arch_local_irq_save() and friends must act as
  49 * compiler barriers.  This compiler semantic is baked into enough
  50 * places that the compiler will maintain it going forward.
  51 */
  52#if CHIP_HAS_SPLIT_INTR_MASK()
  53#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
  54# error Fix assumptions about which word various interrupts are in
  55#endif
  56#define interrupt_mask_set(n) do { \
  57        int __n = (n); \
  58        int __mask = 1 << (__n & 0x1f); \
  59        if (__n < 32) \
  60                __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \
  61        else \
  62                __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \
  63} while (0)
  64#define interrupt_mask_reset(n) do { \
  65        int __n = (n); \
  66        int __mask = 1 << (__n & 0x1f); \
  67        if (__n < 32) \
  68                __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \
  69        else \
  70                __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \
  71} while (0)
  72#define interrupt_mask_check(n) ({ \
  73        int __n = (n); \
  74        (((__n < 32) ? \
  75         __insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \
  76         __insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \
  77          >> (__n & 0x1f)) & 1; \
  78})
  79#define interrupt_mask_set_mask(mask) do { \
  80        unsigned long long __m = (mask); \
  81        __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \
  82        __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \
  83} while (0)
  84#define interrupt_mask_reset_mask(mask) do { \
  85        unsigned long long __m = (mask); \
  86        __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
  87        __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
  88} while (0)
  89#define interrupt_mask_save_mask() \
  90        (__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_0) | \
  91         (((unsigned long long)__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_1))<<32))
  92#define interrupt_mask_restore_mask(mask) do { \
  93        unsigned long long __m = (mask); \
  94        __insn_mtspr(SPR_INTERRUPT_MASK_K_0, (unsigned long)(__m)); \
  95        __insn_mtspr(SPR_INTERRUPT_MASK_K_1, (unsigned long)(__m>>32)); \
  96} while (0)
  97#else
  98#define interrupt_mask_set(n) \
  99        __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
 100#define interrupt_mask_reset(n) \
 101        __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n)))
 102#define interrupt_mask_check(n) \
 103        ((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1)
 104#define interrupt_mask_set_mask(mask) \
 105        __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
 106#define interrupt_mask_reset_mask(mask) \
 107        __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
 108#define interrupt_mask_save_mask() \
 109        __insn_mfspr(SPR_INTERRUPT_MASK_K)
 110#define interrupt_mask_restore_mask(mask) \
 111        __insn_mtspr(SPR_INTERRUPT_MASK_K, (mask))
 112#endif
 113
 114/*
 115 * The set of interrupts we want active if irqs are enabled.
 116 * Note that in particular, the tile timer interrupt comes and goes
 117 * from this set, since we have no other way to turn off the timer.
 118 * Likewise, INTCTRL_K is removed and re-added during device
 119 * interrupts, as is the the hardwall UDN_FIREWALL interrupt.
 120 * We use a low bit (MEM_ERROR) as our sentinel value and make sure it
 121 * is always claimed as an "active interrupt" so we can query that bit
 122 * to know our current state.
 123 */
 124DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
 125#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
 126
 127#ifdef CONFIG_DEBUG_PREEMPT
 128/* Due to inclusion issues, we can't rely on <linux/smp.h> here. */
 129extern unsigned int debug_smp_processor_id(void);
 130# define smp_processor_id() debug_smp_processor_id()
 131#endif
 132
 133/* Disable interrupts. */
 134#define arch_local_irq_disable() \
 135        interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
 136
 137/* Disable all interrupts, including NMIs. */
 138#define arch_local_irq_disable_all() \
 139        interrupt_mask_set_mask(-1ULL)
 140
 141/*
 142 * Read the set of maskable interrupts.
 143 * We avoid the preemption warning here via raw_cpu_ptr since even
 144 * if irqs are already enabled, it's harmless to read the wrong cpu's
 145 * enabled mask.
 146 */
 147#define arch_local_irqs_enabled() \
 148        (*raw_cpu_ptr(&interrupts_enabled_mask))
 149
 150/* Re-enable all maskable interrupts. */
 151#define arch_local_irq_enable() \
 152        interrupt_mask_reset_mask(arch_local_irqs_enabled())
 153
 154/* Disable or enable interrupts based on flag argument. */
 155#define arch_local_irq_restore(disabled) do { \
 156        if (disabled) \
 157                arch_local_irq_disable(); \
 158        else \
 159                arch_local_irq_enable(); \
 160} while (0)
 161
 162/* Return true if "flags" argument means interrupts are disabled. */
 163#define arch_irqs_disabled_flags(flags) ((flags) != 0)
 164
 165/* Return true if interrupts are currently disabled. */
 166#define arch_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR)
 167
 168/* Save whether interrupts are currently disabled. */
 169#define arch_local_save_flags() arch_irqs_disabled()
 170
 171/* Save whether interrupts are currently disabled, then disable them. */
 172#define arch_local_irq_save() ({ \
 173        unsigned long __flags = arch_local_save_flags(); \
 174        arch_local_irq_disable(); \
 175        __flags; })
 176
 177/* Prevent the given interrupt from being enabled next time we enable irqs. */
 178#define arch_local_irq_mask(interrupt) \
 179        this_cpu_and(interrupts_enabled_mask, ~(1ULL << (interrupt)))
 180
 181/* Prevent the given interrupt from being enabled immediately. */
 182#define arch_local_irq_mask_now(interrupt) do { \
 183        arch_local_irq_mask(interrupt); \
 184        interrupt_mask_set(interrupt); \
 185} while (0)
 186
 187/* Allow the given interrupt to be enabled next time we enable irqs. */
 188#define arch_local_irq_unmask(interrupt) \
 189        this_cpu_or(interrupts_enabled_mask, (1ULL << (interrupt)))
 190
 191/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
 192#define arch_local_irq_unmask_now(interrupt) do { \
 193        arch_local_irq_unmask(interrupt); \
 194        if (!irqs_disabled()) \
 195                interrupt_mask_reset(interrupt); \
 196} while (0)
 197
 198#else /* __ASSEMBLY__ */
 199
 200/* We provide a somewhat more restricted set for assembly. */
 201
 202#ifdef __tilegx__
 203
 204#if INT_MEM_ERROR != 0
 205# error Fix IRQS_DISABLED() macro
 206#endif
 207
 208/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
 209#define IRQS_DISABLED(tmp)                                      \
 210        mfspr   tmp, SPR_INTERRUPT_MASK_K;                      \
 211        andi    tmp, tmp, 1
 212
 213/* Load up a pointer to &interrupts_enabled_mask. */
 214#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg)                    \
 215        moveli reg, hw2_last(interrupts_enabled_mask);          \
 216        shl16insli reg, reg, hw1(interrupts_enabled_mask);      \
 217        shl16insli reg, reg, hw0(interrupts_enabled_mask);      \
 218        add     reg, reg, tp
 219
 220/* Disable interrupts. */
 221#define IRQ_DISABLE(tmp0, tmp1)                                 \
 222        moveli  tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS);      \
 223        shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS);  \
 224        shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS);  \
 225        mtspr   SPR_INTERRUPT_MASK_SET_K, tmp0
 226
 227/* Disable ALL synchronous interrupts (used by NMI entry). */
 228#define IRQ_DISABLE_ALL(tmp)                                    \
 229        movei   tmp, -1;                                        \
 230        mtspr   SPR_INTERRUPT_MASK_SET_K, tmp
 231
 232/* Enable interrupts. */
 233#define IRQ_ENABLE_LOAD(tmp0, tmp1)                             \
 234        GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0);                  \
 235        ld      tmp0, tmp0
 236#define IRQ_ENABLE_APPLY(tmp0, tmp1)                            \
 237        mtspr   SPR_INTERRUPT_MASK_RESET_K, tmp0
 238
 239#else /* !__tilegx__ */
 240
 241/*
 242 * Return 0 or 1 to indicate whether interrupts are currently disabled.
 243 * Note that it's important that we use a bit from the "low" mask word,
 244 * since when we are enabling, that is the word we write first, so if we
 245 * are interrupted after only writing half of the mask, the interrupt
 246 * handler will correctly observe that we have interrupts enabled, and
 247 * will enable interrupts itself on return from the interrupt handler
 248 * (making the original code's write of the "high" mask word idempotent).
 249 */
 250#define IRQS_DISABLED(tmp)                                      \
 251        mfspr   tmp, SPR_INTERRUPT_MASK_K_0;                    \
 252        shri    tmp, tmp, INT_MEM_ERROR;                        \
 253        andi    tmp, tmp, 1
 254
 255/* Load up a pointer to &interrupts_enabled_mask. */
 256#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg)                    \
 257        moveli  reg, lo16(interrupts_enabled_mask);             \
 258        auli    reg, reg, ha16(interrupts_enabled_mask);        \
 259        add     reg, reg, tp
 260
 261/* Disable interrupts. */
 262#define IRQ_DISABLE(tmp0, tmp1)                                 \
 263        {                                                       \
 264         movei  tmp0, LINUX_MASKABLE_INTERRUPTS_LO;             \
 265         moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI)        \
 266        };                                                      \
 267        {                                                       \
 268         mtspr  SPR_INTERRUPT_MASK_SET_K_0, tmp0;               \
 269         auli   tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS_HI)  \
 270        };                                                      \
 271        mtspr   SPR_INTERRUPT_MASK_SET_K_1, tmp1
 272
 273/* Disable ALL synchronous interrupts (used by NMI entry). */
 274#define IRQ_DISABLE_ALL(tmp)                                    \
 275        movei   tmp, -1;                                        \
 276        mtspr   SPR_INTERRUPT_MASK_SET_K_0, tmp;                \
 277        mtspr   SPR_INTERRUPT_MASK_SET_K_1, tmp
 278
 279/* Enable interrupts. */
 280#define IRQ_ENABLE_LOAD(tmp0, tmp1)                             \
 281        GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0);                  \
 282        {                                                       \
 283         lw     tmp0, tmp0;                                     \
 284         addi   tmp1, tmp0, 4                                   \
 285        };                                                      \
 286        lw      tmp1, tmp1
 287#define IRQ_ENABLE_APPLY(tmp0, tmp1)                            \
 288        mtspr   SPR_INTERRUPT_MASK_RESET_K_0, tmp0;             \
 289        mtspr   SPR_INTERRUPT_MASK_RESET_K_1, tmp1
 290#endif
 291
 292#define IRQ_ENABLE(tmp0, tmp1)                                  \
 293        IRQ_ENABLE_LOAD(tmp0, tmp1);                            \
 294        IRQ_ENABLE_APPLY(tmp0, tmp1)
 295
 296/*
 297 * Do the CPU's IRQ-state tracing from assembly code. We call a
 298 * C function, but almost everywhere we do, we don't mind clobbering
 299 * all the caller-saved registers.
 300 */
 301#ifdef CONFIG_TRACE_IRQFLAGS
 302# define TRACE_IRQS_ON  jal trace_hardirqs_on
 303# define TRACE_IRQS_OFF jal trace_hardirqs_off
 304#else
 305# define TRACE_IRQS_ON
 306# define TRACE_IRQS_OFF
 307#endif
 308
 309#endif /* __ASSEMBLY__ */
 310
 311#endif /* _ASM_TILE_IRQFLAGS_H */
 312