linux/arch/powerpc/include/asm/interrupt.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2#ifndef _ASM_POWERPC_INTERRUPT_H
   3#define _ASM_POWERPC_INTERRUPT_H
   4
   5/* BookE/4xx */
   6#define INTERRUPT_CRITICAL_INPUT  0x100
   7
   8/* BookE */
   9#define INTERRUPT_DEBUG           0xd00
  10#ifdef CONFIG_BOOKE
  11#define INTERRUPT_PERFMON         0x260
  12#define INTERRUPT_DOORBELL        0x280
  13#endif
  14
  15/* BookS/4xx/8xx */
  16#define INTERRUPT_MACHINE_CHECK   0x200
  17
  18/* BookS/8xx */
  19#define INTERRUPT_SYSTEM_RESET    0x100
  20
  21/* BookS */
  22#define INTERRUPT_DATA_SEGMENT    0x380
  23#define INTERRUPT_INST_SEGMENT    0x480
  24#define INTERRUPT_TRACE           0xd00
  25#define INTERRUPT_H_DATA_STORAGE  0xe00
  26#define INTERRUPT_HMI                   0xe60
  27#define INTERRUPT_H_FAC_UNAVAIL   0xf80
  28#ifdef CONFIG_PPC_BOOK3S
  29#define INTERRUPT_DOORBELL        0xa00
  30#define INTERRUPT_PERFMON         0xf00
  31#define INTERRUPT_ALTIVEC_UNAVAIL       0xf20
  32#endif
  33
  34/* BookE/BookS/4xx/8xx */
  35#define INTERRUPT_DATA_STORAGE    0x300
  36#define INTERRUPT_INST_STORAGE    0x400
  37#define INTERRUPT_EXTERNAL              0x500
  38#define INTERRUPT_ALIGNMENT       0x600
  39#define INTERRUPT_PROGRAM         0x700
  40#define INTERRUPT_SYSCALL         0xc00
  41#define INTERRUPT_TRACE                 0xd00
  42
  43/* BookE/BookS/44x */
  44#define INTERRUPT_FP_UNAVAIL      0x800
  45
  46/* BookE/BookS/44x/8xx */
  47#define INTERRUPT_DECREMENTER     0x900
  48
  49#ifndef INTERRUPT_PERFMON
  50#define INTERRUPT_PERFMON         0x0
  51#endif
  52
  53/* 8xx */
  54#define INTERRUPT_SOFT_EMU_8xx          0x1000
  55#define INTERRUPT_INST_TLB_MISS_8xx     0x1100
  56#define INTERRUPT_DATA_TLB_MISS_8xx     0x1200
  57#define INTERRUPT_INST_TLB_ERROR_8xx    0x1300
  58#define INTERRUPT_DATA_TLB_ERROR_8xx    0x1400
  59#define INTERRUPT_DATA_BREAKPOINT_8xx   0x1c00
  60#define INTERRUPT_INST_BREAKPOINT_8xx   0x1d00
  61
  62/* 603 */
  63#define INTERRUPT_INST_TLB_MISS_603             0x1000
  64#define INTERRUPT_DATA_LOAD_TLB_MISS_603        0x1100
  65#define INTERRUPT_DATA_STORE_TLB_MISS_603       0x1200
  66
  67#ifndef __ASSEMBLY__
  68
  69#include <linux/context_tracking.h>
  70#include <linux/hardirq.h>
  71#include <asm/cputime.h>
  72#include <asm/ftrace.h>
  73#include <asm/kprobes.h>
  74#include <asm/runlatch.h>
  75
  76#ifdef CONFIG_PPC_BOOK3S_64
  77extern char __end_soft_masked[];
  78bool search_kernel_soft_mask_table(unsigned long addr);
  79unsigned long search_kernel_restart_table(unsigned long addr);
  80
  81DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
  82
  83static inline bool is_implicit_soft_masked(struct pt_regs *regs)
  84{
  85        if (regs->msr & MSR_PR)
  86                return false;
  87
  88        if (regs->nip >= (unsigned long)__end_soft_masked)
  89                return false;
  90
  91        return search_kernel_soft_mask_table(regs->nip);
  92}
  93
  94static inline void srr_regs_clobbered(void)
  95{
  96        local_paca->srr_valid = 0;
  97        local_paca->hsrr_valid = 0;
  98}
  99#else
 100static inline bool is_implicit_soft_masked(struct pt_regs *regs)
 101{
 102        return false;
 103}
 104
 105static inline void srr_regs_clobbered(void)
 106{
 107}
 108#endif
 109
 110static inline void nap_adjust_return(struct pt_regs *regs)
 111{
 112#ifdef CONFIG_PPC_970_NAP
 113        if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
 114                /* Can avoid a test-and-clear because NMIs do not call this */
 115                clear_thread_local_flags(_TLF_NAPPING);
 116                regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return);
 117        }
 118#endif
 119}
 120
 121struct interrupt_state {
 122};
 123
 124static inline void booke_restore_dbcr0(void)
 125{
 126#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 127        unsigned long dbcr0 = current->thread.debug.dbcr0;
 128
 129        if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {
 130                mtspr(SPRN_DBSR, -1);
 131                mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);
 132        }
 133#endif
 134}
 135
 136static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
 137{
 138#ifdef CONFIG_PPC32
 139        if (!arch_irq_disabled_regs(regs))
 140                trace_hardirqs_off();
 141
 142        if (user_mode(regs)) {
 143                kuep_lock();
 144                account_cpu_user_entry();
 145        } else {
 146                kuap_save_and_lock(regs);
 147        }
 148#endif
 149
 150#ifdef CONFIG_PPC64
 151        if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
 152                trace_hardirqs_off();
 153        local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
 154
 155        if (user_mode(regs)) {
 156                CT_WARN_ON(ct_state() != CONTEXT_USER);
 157                user_exit_irqoff();
 158
 159                account_cpu_user_entry();
 160                account_stolen_time();
 161        } else {
 162                /*
 163                 * CT_WARN_ON comes here via program_check_exception,
 164                 * so avoid recursion.
 165                 */
 166                if (TRAP(regs) != INTERRUPT_PROGRAM) {
 167                        CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
 168                        BUG_ON(is_implicit_soft_masked(regs));
 169                }
 170#ifdef CONFIG_PPC_BOOK3S
 171                /* Move this under a debugging check */
 172                if (arch_irq_disabled_regs(regs))
 173                        BUG_ON(search_kernel_restart_table(regs->nip));
 174#endif
 175        }
 176        if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
 177                BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
 178#endif
 179
 180        booke_restore_dbcr0();
 181}
 182
 183/*
 184 * Care should be taken to note that interrupt_exit_prepare and
 185 * interrupt_async_exit_prepare do not necessarily return immediately to
 186 * regs context (e.g., if regs is usermode, we don't necessarily return to
 187 * user mode). Other interrupts might be taken between here and return,
 188 * context switch / preemption may occur in the exit path after this, or a
 189 * signal may be delivered, etc.
 190 *
 191 * The real interrupt exit code is platform specific, e.g.,
 192 * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
 193 *
 194 * However interrupt_nmi_exit_prepare does return directly to regs, because
 195 * NMIs do not do "exit work" or replay soft-masked interrupts.
 196 */
 197static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
 198{
 199}
 200
 201static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
 202{
 203#ifdef CONFIG_PPC_BOOK3S_64
 204        if (cpu_has_feature(CPU_FTR_CTRL) &&
 205            !test_thread_local_flags(_TLF_RUNLATCH))
 206                __ppc64_runlatch_on();
 207#endif
 208
 209        interrupt_enter_prepare(regs, state);
 210        irq_enter();
 211}
 212
 213static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
 214{
 215        /*
 216         * Adjust at exit so the main handler sees the true NIA. This must
 217         * come before irq_exit() because irq_exit can enable interrupts, and
 218         * if another interrupt is taken before nap_adjust_return has run
 219         * here, then that interrupt would return directly to idle nap return.
 220         */
 221        nap_adjust_return(regs);
 222
 223        irq_exit();
 224        interrupt_exit_prepare(regs, state);
 225}
 226
 227struct interrupt_nmi_state {
 228#ifdef CONFIG_PPC64
 229        u8 irq_soft_mask;
 230        u8 irq_happened;
 231        u8 ftrace_enabled;
 232        u64 softe;
 233#endif
 234};
 235
 236static inline bool nmi_disables_ftrace(struct pt_regs *regs)
 237{
 238        /* Allow DEC and PMI to be traced when they are soft-NMI */
 239        if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
 240                if (TRAP(regs) == INTERRUPT_DECREMENTER)
 241                       return false;
 242                if (TRAP(regs) == INTERRUPT_PERFMON)
 243                       return false;
 244        }
 245        if (IS_ENABLED(CONFIG_PPC_BOOK3E)) {
 246                if (TRAP(regs) == INTERRUPT_PERFMON)
 247                        return false;
 248        }
 249
 250        return true;
 251}
 252
 253static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
 254{
 255#ifdef CONFIG_PPC64
 256        state->irq_soft_mask = local_paca->irq_soft_mask;
 257        state->irq_happened = local_paca->irq_happened;
 258        state->softe = regs->softe;
 259
 260        /*
 261         * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
 262         * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
 263         * because that goes through irq tracing which we don't want in NMI.
 264         */
 265        local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
 266        local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
 267
 268        if (is_implicit_soft_masked(regs)) {
 269                // Adjust regs->softe soft implicit soft-mask, so
 270                // arch_irq_disabled_regs(regs) behaves as expected.
 271                regs->softe = IRQS_ALL_DISABLED;
 272        }
 273        if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
 274                BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
 275
 276        /* Don't do any per-CPU operations until interrupt state is fixed */
 277
 278        if (nmi_disables_ftrace(regs)) {
 279                state->ftrace_enabled = this_cpu_get_ftrace_enabled();
 280                this_cpu_set_ftrace_enabled(0);
 281        }
 282#endif
 283
 284        /*
 285         * Do not use nmi_enter() for pseries hash guest taking a real-mode
 286         * NMI because not everything it touches is within the RMA limit.
 287         */
 288        if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
 289                        !firmware_has_feature(FW_FEATURE_LPAR) ||
 290                        radix_enabled() || (mfmsr() & MSR_DR))
 291                nmi_enter();
 292}
 293
 294static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
 295{
 296        if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) ||
 297                        !firmware_has_feature(FW_FEATURE_LPAR) ||
 298                        radix_enabled() || (mfmsr() & MSR_DR))
 299                nmi_exit();
 300
 301        /*
 302         * nmi does not call nap_adjust_return because nmi should not create
 303         * new work to do (must use irq_work for that).
 304         */
 305
 306#ifdef CONFIG_PPC64
 307#ifdef CONFIG_PPC_BOOK3S
 308        if (arch_irq_disabled_regs(regs)) {
 309                unsigned long rst = search_kernel_restart_table(regs->nip);
 310                if (rst)
 311                        regs_set_return_ip(regs, rst);
 312        }
 313#endif
 314
 315        if (nmi_disables_ftrace(regs))
 316                this_cpu_set_ftrace_enabled(state->ftrace_enabled);
 317
 318        /* Check we didn't change the pending interrupt mask. */
 319        WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
 320        regs->softe = state->softe;
 321        local_paca->irq_happened = state->irq_happened;
 322        local_paca->irq_soft_mask = state->irq_soft_mask;
 323#endif
 324}
 325
 326/*
 327 * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each
 328 * function definition. The reason for this is the noinstr section is placed
 329 * after the main text section, i.e., very far away from the interrupt entry
 330 * asm. That creates problems with fitting linker stubs when building large
 331 * kernels.
 332 */
 333#define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
 334
 335/**
 336 * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
 337 * @func:       Function name of the entry point
 338 * @returns:    Returns a value back to asm caller
 339 */
 340#define DECLARE_INTERRUPT_HANDLER_RAW(func)                             \
 341        __visible long func(struct pt_regs *regs)
 342
 343/**
 344 * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
 345 * @func:       Function name of the entry point
 346 * @returns:    Returns a value back to asm caller
 347 *
 348 * @func is called from ASM entry code.
 349 *
 350 * This is a plain function which does no tracing, reconciling, etc.
 351 * The macro is written so it acts as function definition. Append the
 352 * body with a pair of curly brackets.
 353 *
 354 * raw interrupt handlers must not enable or disable interrupts, or
 355 * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
 356 * not be advisable either, although may be possible in a pinch, the
 357 * trace will look odd at least.
 358 *
 359 * A raw handler may call one of the other interrupt handler functions
 360 * to be converted into that interrupt context without these restrictions.
 361 *
 362 * On PPC64, _RAW handlers may return with fast_interrupt_return.
 363 *
 364 * Specific handlers may have additional restrictions.
 365 */
 366#define DEFINE_INTERRUPT_HANDLER_RAW(func)                              \
 367static __always_inline long ____##func(struct pt_regs *regs);           \
 368                                                                        \
 369interrupt_handler long func(struct pt_regs *regs)                       \
 370{                                                                       \
 371        long ret;                                                       \
 372                                                                        \
 373        ret = ____##func (regs);                                        \
 374                                                                        \
 375        return ret;                                                     \
 376}                                                                       \
 377NOKPROBE_SYMBOL(func);                                                  \
 378                                                                        \
 379static __always_inline long ____##func(struct pt_regs *regs)
 380
 381/**
 382 * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
 383 * @func:       Function name of the entry point
 384 */
 385#define DECLARE_INTERRUPT_HANDLER(func)                                 \
 386        __visible void func(struct pt_regs *regs)
 387
 388/**
 389 * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
 390 * @func:       Function name of the entry point
 391 *
 392 * @func is called from ASM entry code.
 393 *
 394 * The macro is written so it acts as function definition. Append the
 395 * body with a pair of curly brackets.
 396 */
 397#define DEFINE_INTERRUPT_HANDLER(func)                                  \
 398static __always_inline void ____##func(struct pt_regs *regs);           \
 399                                                                        \
 400interrupt_handler void func(struct pt_regs *regs)                       \
 401{                                                                       \
 402        struct interrupt_state state;                                   \
 403                                                                        \
 404        interrupt_enter_prepare(regs, &state);                          \
 405                                                                        \
 406        ____##func (regs);                                              \
 407                                                                        \
 408        interrupt_exit_prepare(regs, &state);                           \
 409}                                                                       \
 410NOKPROBE_SYMBOL(func);                                                  \
 411                                                                        \
 412static __always_inline void ____##func(struct pt_regs *regs)
 413
 414/**
 415 * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
 416 * @func:       Function name of the entry point
 417 * @returns:    Returns a value back to asm caller
 418 */
 419#define DECLARE_INTERRUPT_HANDLER_RET(func)                             \
 420        __visible long func(struct pt_regs *regs)
 421
 422/**
 423 * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
 424 * @func:       Function name of the entry point
 425 * @returns:    Returns a value back to asm caller
 426 *
 427 * @func is called from ASM entry code.
 428 *
 429 * The macro is written so it acts as function definition. Append the
 430 * body with a pair of curly brackets.
 431 */
 432#define DEFINE_INTERRUPT_HANDLER_RET(func)                              \
 433static __always_inline long ____##func(struct pt_regs *regs);           \
 434                                                                        \
 435interrupt_handler long func(struct pt_regs *regs)                       \
 436{                                                                       \
 437        struct interrupt_state state;                                   \
 438        long ret;                                                       \
 439                                                                        \
 440        interrupt_enter_prepare(regs, &state);                          \
 441                                                                        \
 442        ret = ____##func (regs);                                        \
 443                                                                        \
 444        interrupt_exit_prepare(regs, &state);                           \
 445                                                                        \
 446        return ret;                                                     \
 447}                                                                       \
 448NOKPROBE_SYMBOL(func);                                                  \
 449                                                                        \
 450static __always_inline long ____##func(struct pt_regs *regs)
 451
 452/**
 453 * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
 454 * @func:       Function name of the entry point
 455 */
 456#define DECLARE_INTERRUPT_HANDLER_ASYNC(func)                           \
 457        __visible void func(struct pt_regs *regs)
 458
 459/**
 460 * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
 461 * @func:       Function name of the entry point
 462 *
 463 * @func is called from ASM entry code.
 464 *
 465 * The macro is written so it acts as function definition. Append the
 466 * body with a pair of curly brackets.
 467 */
 468#define DEFINE_INTERRUPT_HANDLER_ASYNC(func)                            \
 469static __always_inline void ____##func(struct pt_regs *regs);           \
 470                                                                        \
 471interrupt_handler void func(struct pt_regs *regs)                       \
 472{                                                                       \
 473        struct interrupt_state state;                                   \
 474                                                                        \
 475        interrupt_async_enter_prepare(regs, &state);                    \
 476                                                                        \
 477        ____##func (regs);                                              \
 478                                                                        \
 479        interrupt_async_exit_prepare(regs, &state);                     \
 480}                                                                       \
 481NOKPROBE_SYMBOL(func);                                                  \
 482                                                                        \
 483static __always_inline void ____##func(struct pt_regs *regs)
 484
 485/**
 486 * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
 487 * @func:       Function name of the entry point
 488 * @returns:    Returns a value back to asm caller
 489 */
 490#define DECLARE_INTERRUPT_HANDLER_NMI(func)                             \
 491        __visible long func(struct pt_regs *regs)
 492
 493/**
 494 * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
 495 * @func:       Function name of the entry point
 496 * @returns:    Returns a value back to asm caller
 497 *
 498 * @func is called from ASM entry code.
 499 *
 500 * The macro is written so it acts as function definition. Append the
 501 * body with a pair of curly brackets.
 502 */
 503#define DEFINE_INTERRUPT_HANDLER_NMI(func)                              \
 504static __always_inline long ____##func(struct pt_regs *regs);           \
 505                                                                        \
 506interrupt_handler long func(struct pt_regs *regs)                       \
 507{                                                                       \
 508        struct interrupt_nmi_state state;                               \
 509        long ret;                                                       \
 510                                                                        \
 511        interrupt_nmi_enter_prepare(regs, &state);                      \
 512                                                                        \
 513        ret = ____##func (regs);                                        \
 514                                                                        \
 515        interrupt_nmi_exit_prepare(regs, &state);                       \
 516                                                                        \
 517        return ret;                                                     \
 518}                                                                       \
 519NOKPROBE_SYMBOL(func);                                                  \
 520                                                                        \
 521static __always_inline long ____##func(struct pt_regs *regs)
 522
 523
 524/* Interrupt handlers */
 525/* kernel/traps.c */
 526DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
 527#ifdef CONFIG_PPC_BOOK3S_64
 528DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception);
 529#else
 530DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
 531#endif
 532DECLARE_INTERRUPT_HANDLER(SMIException);
 533DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
 534DECLARE_INTERRUPT_HANDLER(unknown_exception);
 535DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
 536DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception);
 537DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
 538DECLARE_INTERRUPT_HANDLER(RunModeException);
 539DECLARE_INTERRUPT_HANDLER(single_step_exception);
 540DECLARE_INTERRUPT_HANDLER(program_check_exception);
 541DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
 542DECLARE_INTERRUPT_HANDLER(alignment_exception);
 543DECLARE_INTERRUPT_HANDLER(StackOverflow);
 544DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
 545DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
 546DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
 547DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
 548DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
 549DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
 550DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
 551DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
 552DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
 553DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
 554DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
 555DECLARE_INTERRUPT_HANDLER(DebugException);
 556DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
 557DECLARE_INTERRUPT_HANDLER(CacheLockingException);
 558DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
 559DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
 560DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException);
 561DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
 562
 563/* slb.c */
 564DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
 565DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
 566
 567/* hash_utils.c */
 568DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
 569
 570/* fault.c */
 571DECLARE_INTERRUPT_HANDLER(do_page_fault);
 572DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
 573
 574/* process.c */
 575DECLARE_INTERRUPT_HANDLER(do_break);
 576
 577/* time.c */
 578DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
 579
 580/* mce.c */
 581DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
 582DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
 583
 584DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
 585
 586/* irq.c */
 587DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);
 588
 589void __noreturn unrecoverable_exception(struct pt_regs *regs);
 590
 591void replay_system_reset(void);
 592void replay_soft_interrupts(void);
 593
 594static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
 595{
 596        if (!arch_irq_disabled_regs(regs))
 597                local_irq_enable();
 598}
 599
 600#endif /* __ASSEMBLY__ */
 601
 602#endif /* _ASM_POWERPC_INTERRUPT_H */
 603