linux/arch/powerpc/kernel/interrupt.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2
   3#include <linux/context_tracking.h>
   4#include <linux/err.h>
   5#include <linux/compat.h>
   6#include <linux/sched/debug.h> /* for show_regs */
   7
   8#include <asm/kup.h>
   9#include <asm/cputime.h>
  10#include <asm/hw_irq.h>
  11#include <asm/interrupt.h>
  12#include <asm/kprobes.h>
  13#include <asm/paca.h>
  14#include <asm/ptrace.h>
  15#include <asm/reg.h>
  16#include <asm/signal.h>
  17#include <asm/switch_to.h>
  18#include <asm/syscall.h>
  19#include <asm/time.h>
  20#include <asm/tm.h>
  21#include <asm/unistd.h>
  22
  23#if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
  24unsigned long global_dbcr0[NR_CPUS];
  25#endif
  26
  27typedef long (*syscall_fn)(long, long, long, long, long, long);
  28
  29#ifdef CONFIG_PPC_BOOK3S_64
  30DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
  31static inline bool exit_must_hard_disable(void)
  32{
  33        return static_branch_unlikely(&interrupt_exit_not_reentrant);
  34}
  35#else
  36static inline bool exit_must_hard_disable(void)
  37{
  38        return true;
  39}
  40#endif
  41
  42/*
  43 * local irqs must be disabled. Returns false if the caller must re-enable
  44 * them, check for new work, and try again.
  45 *
  46 * This should be called with local irqs disabled, but if they were previously
  47 * enabled when the interrupt handler returns (indicating a process-context /
  48 * synchronous interrupt) then irqs_enabled should be true.
  49 *
  50 * restartable is true then EE/RI can be left on because interrupts are handled
  51 * with a restart sequence.
  52 */
  53static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
  54{
  55        /* This must be done with RI=1 because tracing may touch vmaps */
  56        trace_hardirqs_on();
  57
  58        if (exit_must_hard_disable() || !restartable)
  59                __hard_EE_RI_disable();
  60
  61#ifdef CONFIG_PPC64
  62        /* This pattern matches prep_irq_for_idle */
  63        if (unlikely(lazy_irq_pending_nocheck())) {
  64                if (exit_must_hard_disable() || !restartable) {
  65                        local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
  66                        __hard_RI_enable();
  67                }
  68                trace_hardirqs_off();
  69
  70                return false;
  71        }
  72#endif
  73        return true;
  74}
  75
  76/* Has to run notrace because it is entered not completely "reconciled" */
  77notrace long system_call_exception(long r3, long r4, long r5,
  78                                   long r6, long r7, long r8,
  79                                   unsigned long r0, struct pt_regs *regs)
  80{
  81        syscall_fn f;
  82
  83        kuap_lock();
  84
  85        regs->orig_gpr3 = r3;
  86
  87        if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
  88                BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
  89
  90        trace_hardirqs_off(); /* finish reconciling */
  91
  92        CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
  93        user_exit_irqoff();
  94
  95        BUG_ON(regs_is_unrecoverable(regs));
  96        BUG_ON(!(regs->msr & MSR_PR));
  97        BUG_ON(arch_irq_disabled_regs(regs));
  98
  99#ifdef CONFIG_PPC_PKEY
 100        if (mmu_has_feature(MMU_FTR_PKEY)) {
 101                unsigned long amr, iamr;
 102                bool flush_needed = false;
 103                /*
 104                 * When entering from userspace we mostly have the AMR/IAMR
 105                 * different from kernel default values. Hence don't compare.
 106                 */
 107                amr = mfspr(SPRN_AMR);
 108                iamr = mfspr(SPRN_IAMR);
 109                regs->amr  = amr;
 110                regs->iamr = iamr;
 111                if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
 112                        mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
 113                        flush_needed = true;
 114                }
 115                if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
 116                        mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
 117                        flush_needed = true;
 118                }
 119                if (flush_needed)
 120                        isync();
 121        } else
 122#endif
 123                kuap_assert_locked();
 124
 125        booke_restore_dbcr0();
 126
 127        account_cpu_user_entry();
 128
 129        account_stolen_time();
 130
 131        /*
 132         * This is not required for the syscall exit path, but makes the
 133         * stack frame look nicer. If this was initialised in the first stack
 134         * frame, or if the unwinder was taught the first stack frame always
 135         * returns to user with IRQS_ENABLED, this store could be avoided!
 136         */
 137        irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
 138
 139        /*
 140         * If system call is called with TM active, set _TIF_RESTOREALL to
 141         * prevent RFSCV being used to return to userspace, because POWER9
 142         * TM implementation has problems with this instruction returning to
 143         * transactional state. Final register values are not relevant because
 144         * the transaction will be aborted upon return anyway. Or in the case
 145         * of unsupported_scv SIGILL fault, the return state does not much
 146         * matter because it's an edge case.
 147         */
 148        if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
 149                        unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
 150                set_bits(_TIF_RESTOREALL, &current_thread_info()->flags);
 151
 152        /*
 153         * If the system call was made with a transaction active, doom it and
 154         * return without performing the system call. Unless it was an
 155         * unsupported scv vector, in which case it's treated like an illegal
 156         * instruction.
 157         */
 158#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 159        if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
 160            !trap_is_unsupported_scv(regs)) {
 161                /* Enable TM in the kernel, and disable EE (for scv) */
 162                hard_irq_disable();
 163                mtmsr(mfmsr() | MSR_TM);
 164
 165                /* tabort, this dooms the transaction, nothing else */
 166                asm volatile(".long 0x7c00071d | ((%0) << 16)"
 167                                :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
 168
 169                /*
 170                 * Userspace will never see the return value. Execution will
 171                 * resume after the tbegin. of the aborted transaction with the
 172                 * checkpointed register state. A context switch could occur
 173                 * or signal delivered to the process before resuming the
 174                 * doomed transaction context, but that should all be handled
 175                 * as expected.
 176                 */
 177                return -ENOSYS;
 178        }
 179#endif // CONFIG_PPC_TRANSACTIONAL_MEM
 180
 181        local_irq_enable();
 182
 183        if (unlikely(read_thread_flags() & _TIF_SYSCALL_DOTRACE)) {
 184                if (unlikely(trap_is_unsupported_scv(regs))) {
 185                        /* Unsupported scv vector */
 186                        _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
 187                        return regs->gpr[3];
 188                }
 189                /*
 190                 * We use the return value of do_syscall_trace_enter() as the
 191                 * syscall number. If the syscall was rejected for any reason
 192                 * do_syscall_trace_enter() returns an invalid syscall number
 193                 * and the test against NR_syscalls will fail and the return
 194                 * value to be used is in regs->gpr[3].
 195                 */
 196                r0 = do_syscall_trace_enter(regs);
 197                if (unlikely(r0 >= NR_syscalls))
 198                        return regs->gpr[3];
 199                r3 = regs->gpr[3];
 200                r4 = regs->gpr[4];
 201                r5 = regs->gpr[5];
 202                r6 = regs->gpr[6];
 203                r7 = regs->gpr[7];
 204                r8 = regs->gpr[8];
 205
 206        } else if (unlikely(r0 >= NR_syscalls)) {
 207                if (unlikely(trap_is_unsupported_scv(regs))) {
 208                        /* Unsupported scv vector */
 209                        _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
 210                        return regs->gpr[3];
 211                }
 212                return -ENOSYS;
 213        }
 214
 215        /* May be faster to do array_index_nospec? */
 216        barrier_nospec();
 217
 218        if (unlikely(is_compat_task())) {
 219                f = (void *)compat_sys_call_table[r0];
 220
 221                r3 &= 0x00000000ffffffffULL;
 222                r4 &= 0x00000000ffffffffULL;
 223                r5 &= 0x00000000ffffffffULL;
 224                r6 &= 0x00000000ffffffffULL;
 225                r7 &= 0x00000000ffffffffULL;
 226                r8 &= 0x00000000ffffffffULL;
 227
 228        } else {
 229                f = (void *)sys_call_table[r0];
 230        }
 231
 232        return f(r3, r4, r5, r6, r7, r8);
 233}
 234
 235static notrace void booke_load_dbcr0(void)
 236{
 237#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 238        unsigned long dbcr0 = current->thread.debug.dbcr0;
 239
 240        if (likely(!(dbcr0 & DBCR0_IDM)))
 241                return;
 242
 243        /*
 244         * Check to see if the dbcr0 register is set up to debug.
 245         * Use the internal debug mode bit to do this.
 246         */
 247        mtmsr(mfmsr() & ~MSR_DE);
 248        if (IS_ENABLED(CONFIG_PPC32)) {
 249                isync();
 250                global_dbcr0[smp_processor_id()] = mfspr(SPRN_DBCR0);
 251        }
 252        mtspr(SPRN_DBCR0, dbcr0);
 253        mtspr(SPRN_DBSR, -1);
 254#endif
 255}
 256
 257static void check_return_regs_valid(struct pt_regs *regs)
 258{
 259#ifdef CONFIG_PPC_BOOK3S_64
 260        unsigned long trap, srr0, srr1;
 261        static bool warned;
 262        u8 *validp;
 263        char *h;
 264
 265        if (trap_is_scv(regs))
 266                return;
 267
 268        trap = TRAP(regs);
 269        // EE in HV mode sets HSRRs like 0xea0
 270        if (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL)
 271                trap = 0xea0;
 272
 273        switch (trap) {
 274        case 0x980:
 275        case INTERRUPT_H_DATA_STORAGE:
 276        case 0xe20:
 277        case 0xe40:
 278        case INTERRUPT_HMI:
 279        case 0xe80:
 280        case 0xea0:
 281        case INTERRUPT_H_FAC_UNAVAIL:
 282        case 0x1200:
 283        case 0x1500:
 284        case 0x1600:
 285        case 0x1800:
 286                validp = &local_paca->hsrr_valid;
 287                if (!*validp)
 288                        return;
 289
 290                srr0 = mfspr(SPRN_HSRR0);
 291                srr1 = mfspr(SPRN_HSRR1);
 292                h = "H";
 293
 294                break;
 295        default:
 296                validp = &local_paca->srr_valid;
 297                if (!*validp)
 298                        return;
 299
 300                srr0 = mfspr(SPRN_SRR0);
 301                srr1 = mfspr(SPRN_SRR1);
 302                h = "";
 303                break;
 304        }
 305
 306        if (srr0 == regs->nip && srr1 == regs->msr)
 307                return;
 308
 309        /*
 310         * A NMI / soft-NMI interrupt may have come in after we found
 311         * srr_valid and before the SRRs are loaded. The interrupt then
 312         * comes in and clobbers SRRs and clears srr_valid. Then we load
 313         * the SRRs here and test them above and find they don't match.
 314         *
 315         * Test validity again after that, to catch such false positives.
 316         *
 317         * This test in general will have some window for false negatives
 318         * and may not catch and fix all such cases if an NMI comes in
 319         * later and clobbers SRRs without clearing srr_valid, but hopefully
 320         * such things will get caught most of the time, statistically
 321         * enough to be able to get a warning out.
 322         */
 323        barrier();
 324
 325        if (!*validp)
 326                return;
 327
 328        if (!warned) {
 329                warned = true;
 330                printk("%sSRR0 was: %lx should be: %lx\n", h, srr0, regs->nip);
 331                printk("%sSRR1 was: %lx should be: %lx\n", h, srr1, regs->msr);
 332                show_regs(regs);
 333        }
 334
 335        *validp = 0; /* fixup */
 336#endif
 337}
 338
 339static notrace unsigned long
 340interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs)
 341{
 342        unsigned long ti_flags;
 343
 344again:
 345        ti_flags = read_thread_flags();
 346        while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
 347                local_irq_enable();
 348                if (ti_flags & _TIF_NEED_RESCHED) {
 349                        schedule();
 350                } else {
 351                        /*
 352                         * SIGPENDING must restore signal handler function
 353                         * argument GPRs, and some non-volatiles (e.g., r1).
 354                         * Restore all for now. This could be made lighter.
 355                         */
 356                        if (ti_flags & _TIF_SIGPENDING)
 357                                ret |= _TIF_RESTOREALL;
 358                        do_notify_resume(regs, ti_flags);
 359                }
 360                local_irq_disable();
 361                ti_flags = read_thread_flags();
 362        }
 363
 364        if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
 365                if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
 366                                unlikely((ti_flags & _TIF_RESTORE_TM))) {
 367                        restore_tm_state(regs);
 368                } else {
 369                        unsigned long mathflags = MSR_FP;
 370
 371                        if (cpu_has_feature(CPU_FTR_VSX))
 372                                mathflags |= MSR_VEC | MSR_VSX;
 373                        else if (cpu_has_feature(CPU_FTR_ALTIVEC))
 374                                mathflags |= MSR_VEC;
 375
 376                        /*
 377                         * If userspace MSR has all available FP bits set,
 378                         * then they are live and no need to restore. If not,
 379                         * it means the regs were given up and restore_math
 380                         * may decide to restore them (to avoid taking an FP
 381                         * fault).
 382                         */
 383                        if ((regs->msr & mathflags) != mathflags)
 384                                restore_math(regs);
 385                }
 386        }
 387
 388        check_return_regs_valid(regs);
 389
 390        user_enter_irqoff();
 391        if (!prep_irq_for_enabled_exit(true)) {
 392                user_exit_irqoff();
 393                local_irq_enable();
 394                local_irq_disable();
 395                goto again;
 396        }
 397
 398#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 399        local_paca->tm_scratch = regs->msr;
 400#endif
 401
 402        booke_load_dbcr0();
 403
 404        account_cpu_user_exit();
 405
 406        /* Restore user access locks last */
 407        kuap_user_restore(regs);
 408
 409        return ret;
 410}
 411
 412/*
 413 * This should be called after a syscall returns, with r3 the return value
 414 * from the syscall. If this function returns non-zero, the system call
 415 * exit assembly should additionally load all GPR registers and CTR and XER
 416 * from the interrupt frame.
 417 *
 418 * The function graph tracer can not trace the return side of this function,
 419 * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
 420 */
 421notrace unsigned long syscall_exit_prepare(unsigned long r3,
 422                                           struct pt_regs *regs,
 423                                           long scv)
 424{
 425        unsigned long ti_flags;
 426        unsigned long ret = 0;
 427        bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
 428
 429        CT_WARN_ON(ct_state() == CONTEXT_USER);
 430
 431        kuap_assert_locked();
 432
 433        regs->result = r3;
 434
 435        /* Check whether the syscall is issued inside a restartable sequence */
 436        rseq_syscall(regs);
 437
 438        ti_flags = read_thread_flags();
 439
 440        if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
 441                if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
 442                        r3 = -r3;
 443                        regs->ccr |= 0x10000000; /* Set SO bit in CR */
 444                }
 445        }
 446
 447        if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
 448                if (ti_flags & _TIF_RESTOREALL)
 449                        ret = _TIF_RESTOREALL;
 450                else
 451                        regs->gpr[3] = r3;
 452                clear_bits(_TIF_PERSYSCALL_MASK, &current_thread_info()->flags);
 453        } else {
 454                regs->gpr[3] = r3;
 455        }
 456
 457        if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
 458                do_syscall_trace_leave(regs);
 459                ret |= _TIF_RESTOREALL;
 460        }
 461
 462        local_irq_disable();
 463        ret = interrupt_exit_user_prepare_main(ret, regs);
 464
 465#ifdef CONFIG_PPC64
 466        regs->exit_result = ret;
 467#endif
 468
 469        return ret;
 470}
 471
 472#ifdef CONFIG_PPC64
 473notrace unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs)
 474{
 475        /*
 476         * This is called when detecting a soft-pending interrupt as well as
 477         * an alternate-return interrupt. So we can't just have the alternate
 478         * return path clear SRR1[MSR] and set PACA_IRQ_HARD_DIS (unless
 479         * the soft-pending case were to fix things up as well). RI might be
 480         * disabled, in which case it gets re-enabled by __hard_irq_disable().
 481         */
 482        __hard_irq_disable();
 483        local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
 484
 485#ifdef CONFIG_PPC_BOOK3S_64
 486        set_kuap(AMR_KUAP_BLOCKED);
 487#endif
 488
 489        trace_hardirqs_off();
 490        user_exit_irqoff();
 491        account_cpu_user_entry();
 492
 493        BUG_ON(!user_mode(regs));
 494
 495        regs->exit_result = interrupt_exit_user_prepare_main(regs->exit_result, regs);
 496
 497        return regs->exit_result;
 498}
 499#endif
 500
 501notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)
 502{
 503        unsigned long ret;
 504
 505        BUG_ON(regs_is_unrecoverable(regs));
 506        BUG_ON(arch_irq_disabled_regs(regs));
 507        CT_WARN_ON(ct_state() == CONTEXT_USER);
 508
 509        /*
 510         * We don't need to restore AMR on the way back to userspace for KUAP.
 511         * AMR can only have been unlocked if we interrupted the kernel.
 512         */
 513        kuap_assert_locked();
 514
 515        local_irq_disable();
 516
 517        ret = interrupt_exit_user_prepare_main(0, regs);
 518
 519#ifdef CONFIG_PPC64
 520        regs->exit_result = ret;
 521#endif
 522
 523        return ret;
 524}
 525
 526void preempt_schedule_irq(void);
 527
 528notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
 529{
 530        unsigned long flags;
 531        unsigned long ret = 0;
 532        unsigned long kuap;
 533        bool stack_store = read_thread_flags() & _TIF_EMULATE_STACK_STORE;
 534
 535        if (regs_is_unrecoverable(regs))
 536                unrecoverable_exception(regs);
 537        /*
 538         * CT_WARN_ON comes here via program_check_exception,
 539         * so avoid recursion.
 540         */
 541        if (TRAP(regs) != INTERRUPT_PROGRAM)
 542                CT_WARN_ON(ct_state() == CONTEXT_USER);
 543
 544        kuap = kuap_get_and_assert_locked();
 545
 546        local_irq_save(flags);
 547
 548        if (!arch_irq_disabled_regs(regs)) {
 549                /* Returning to a kernel context with local irqs enabled. */
 550                WARN_ON_ONCE(!(regs->msr & MSR_EE));
 551again:
 552                if (IS_ENABLED(CONFIG_PREEMPT)) {
 553                        /* Return to preemptible kernel context */
 554                        if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
 555                                if (preempt_count() == 0)
 556                                        preempt_schedule_irq();
 557                        }
 558                }
 559
 560                check_return_regs_valid(regs);
 561
 562                /*
 563                 * Stack store exit can't be restarted because the interrupt
 564                 * stack frame might have been clobbered.
 565                 */
 566                if (!prep_irq_for_enabled_exit(unlikely(stack_store))) {
 567                        /*
 568                         * Replay pending soft-masked interrupts now. Don't
 569                         * just local_irq_enabe(); local_irq_disable(); because
 570                         * if we are returning from an asynchronous interrupt
 571                         * here, another one might hit after irqs are enabled,
 572                         * and it would exit via this same path allowing
 573                         * another to fire, and so on unbounded.
 574                         */
 575                        hard_irq_disable();
 576                        replay_soft_interrupts();
 577                        /* Took an interrupt, may have more exit work to do. */
 578                        goto again;
 579                }
 580#ifdef CONFIG_PPC64
 581                /*
 582                 * An interrupt may clear MSR[EE] and set this concurrently,
 583                 * but it will be marked pending and the exit will be retried.
 584                 * This leaves a racy window where MSR[EE]=0 and HARD_DIS is
 585                 * clear, until interrupt_exit_kernel_restart() calls
 586                 * hard_irq_disable(), which will set HARD_DIS again.
 587                 */
 588                local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
 589
 590        } else {
 591                check_return_regs_valid(regs);
 592
 593                if (unlikely(stack_store))
 594                        __hard_EE_RI_disable();
 595                /*
 596                 * Returning to a kernel context with local irqs disabled.
 597                 * Here, if EE was enabled in the interrupted context, enable
 598                 * it on return as well. A problem exists here where a soft
 599                 * masked interrupt may have cleared MSR[EE] and set HARD_DIS
 600                 * here, and it will still exist on return to the caller. This
 601                 * will be resolved by the masked interrupt firing again.
 602                 */
 603                if (regs->msr & MSR_EE)
 604                        local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
 605#endif /* CONFIG_PPC64 */
 606        }
 607
 608        if (unlikely(stack_store)) {
 609                clear_bits(_TIF_EMULATE_STACK_STORE, &current_thread_info()->flags);
 610                ret = 1;
 611        }
 612
 613#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 614        local_paca->tm_scratch = regs->msr;
 615#endif
 616
 617        /*
 618         * 64s does not want to mfspr(SPRN_AMR) here, because this comes after
 619         * mtmsr, which would cause Read-After-Write stalls. Hence, take the
 620         * AMR value from the check above.
 621         */
 622        kuap_kernel_restore(regs, kuap);
 623
 624        return ret;
 625}
 626
 627#ifdef CONFIG_PPC64
 628notrace unsigned long interrupt_exit_user_restart(struct pt_regs *regs)
 629{
 630        __hard_irq_disable();
 631        local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
 632
 633#ifdef CONFIG_PPC_BOOK3S_64
 634        set_kuap(AMR_KUAP_BLOCKED);
 635#endif
 636
 637        trace_hardirqs_off();
 638        user_exit_irqoff();
 639        account_cpu_user_entry();
 640
 641        BUG_ON(!user_mode(regs));
 642
 643        regs->exit_result |= interrupt_exit_user_prepare(regs);
 644
 645        return regs->exit_result;
 646}
 647
 648/*
 649 * No real need to return a value here because the stack store case does not
 650 * get restarted.
 651 */
 652notrace unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs)
 653{
 654        __hard_irq_disable();
 655        local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
 656
 657#ifdef CONFIG_PPC_BOOK3S_64
 658        set_kuap(AMR_KUAP_BLOCKED);
 659#endif
 660
 661        if (regs->softe == IRQS_ENABLED)
 662                trace_hardirqs_off();
 663
 664        BUG_ON(user_mode(regs));
 665
 666        return interrupt_exit_kernel_prepare(regs);
 667}
 668#endif
 669