linux/arch/x86/kernel/traps.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 1991, 1992  Linus Torvalds
   3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
   4 *
   5 *  Pentium III FXSR, SSE support
   6 *      Gareth Hughes <gareth@valinux.com>, May 2000
   7 */
   8
   9/*
  10 * Handle hardware traps and faults.
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/context_tracking.h>
  16#include <linux/interrupt.h>
  17#include <linux/kallsyms.h>
  18#include <linux/spinlock.h>
  19#include <linux/kprobes.h>
  20#include <linux/uaccess.h>
  21#include <linux/kdebug.h>
  22#include <linux/kgdb.h>
  23#include <linux/kernel.h>
  24#include <linux/module.h>
  25#include <linux/ptrace.h>
  26#include <linux/uprobes.h>
  27#include <linux/string.h>
  28#include <linux/delay.h>
  29#include <linux/errno.h>
  30#include <linux/kexec.h>
  31#include <linux/sched.h>
  32#include <linux/timer.h>
  33#include <linux/init.h>
  34#include <linux/bug.h>
  35#include <linux/nmi.h>
  36#include <linux/mm.h>
  37#include <linux/smp.h>
  38#include <linux/io.h>
  39
  40#ifdef CONFIG_EISA
  41#include <linux/ioport.h>
  42#include <linux/eisa.h>
  43#endif
  44
  45#if defined(CONFIG_EDAC)
  46#include <linux/edac.h>
  47#endif
  48
  49#include <asm/kmemcheck.h>
  50#include <asm/stacktrace.h>
  51#include <asm/processor.h>
  52#include <asm/debugreg.h>
  53#include <linux/atomic.h>
  54#include <asm/ftrace.h>
  55#include <asm/traps.h>
  56#include <asm/desc.h>
  57#include <asm/i387.h>
  58#include <asm/fpu-internal.h>
  59#include <asm/mce.h>
  60#include <asm/fixmap.h>
  61#include <asm/mach_traps.h>
  62#include <asm/alternative.h>
  63
  64#ifdef CONFIG_X86_64
  65#include <asm/x86_init.h>
  66#include <asm/pgalloc.h>
  67#include <asm/proto.h>
  68
  69/* No need to be aligned, but done to keep all IDTs defined the same way. */
  70gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
  71#else
  72#include <asm/processor-flags.h>
  73#include <asm/setup.h>
  74
  75asmlinkage int system_call(void);
  76#endif
  77
  78/* Must be page-aligned because the real IDT is used in a fixmap. */
  79gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
  80
  81DECLARE_BITMAP(used_vectors, NR_VECTORS);
  82EXPORT_SYMBOL_GPL(used_vectors);
  83
  84static inline void conditional_sti(struct pt_regs *regs)
  85{
  86        if (regs->flags & X86_EFLAGS_IF)
  87                local_irq_enable();
  88}
  89
  90static inline void preempt_conditional_sti(struct pt_regs *regs)
  91{
  92        preempt_count_inc();
  93        if (regs->flags & X86_EFLAGS_IF)
  94                local_irq_enable();
  95}
  96
  97static inline void conditional_cli(struct pt_regs *regs)
  98{
  99        if (regs->flags & X86_EFLAGS_IF)
 100                local_irq_disable();
 101}
 102
 103static inline void preempt_conditional_cli(struct pt_regs *regs)
 104{
 105        if (regs->flags & X86_EFLAGS_IF)
 106                local_irq_disable();
 107        preempt_count_dec();
 108}
 109
 110static nokprobe_inline int
 111do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
 112                  struct pt_regs *regs, long error_code)
 113{
 114#ifdef CONFIG_X86_32
 115        if (regs->flags & X86_VM_MASK) {
 116                /*
 117                 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
 118                 * On nmi (interrupt 2), do_trap should not be called.
 119                 */
 120                if (trapnr < X86_TRAP_UD) {
 121                        if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
 122                                                error_code, trapnr))
 123                                return 0;
 124                }
 125                return -1;
 126        }
 127#endif
 128        if (!user_mode(regs)) {
 129                if (!fixup_exception(regs)) {
 130                        tsk->thread.error_code = error_code;
 131                        tsk->thread.trap_nr = trapnr;
 132                        die(str, regs, error_code);
 133                }
 134                return 0;
 135        }
 136
 137        return -1;
 138}
 139
 140static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
 141                                siginfo_t *info)
 142{
 143        unsigned long siaddr;
 144        int sicode;
 145
 146        switch (trapnr) {
 147        default:
 148                return SEND_SIG_PRIV;
 149
 150        case X86_TRAP_DE:
 151                sicode = FPE_INTDIV;
 152                siaddr = uprobe_get_trap_addr(regs);
 153                break;
 154        case X86_TRAP_UD:
 155                sicode = ILL_ILLOPN;
 156                siaddr = uprobe_get_trap_addr(regs);
 157                break;
 158        case X86_TRAP_AC:
 159                sicode = BUS_ADRALN;
 160                siaddr = 0;
 161                break;
 162        }
 163
 164        info->si_signo = signr;
 165        info->si_errno = 0;
 166        info->si_code = sicode;
 167        info->si_addr = (void __user *)siaddr;
 168        return info;
 169}
 170
 171static void
 172do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
 173        long error_code, siginfo_t *info)
 174{
 175        struct task_struct *tsk = current;
 176
 177
 178        if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
 179                return;
 180        /*
 181         * We want error_code and trap_nr set for userspace faults and
 182         * kernelspace faults which result in die(), but not
 183         * kernelspace faults which are fixed up.  die() gives the
 184         * process no chance to handle the signal and notice the
 185         * kernel fault information, so that won't result in polluting
 186         * the information about previously queued, but not yet
 187         * delivered, faults.  See also do_general_protection below.
 188         */
 189        tsk->thread.error_code = error_code;
 190        tsk->thread.trap_nr = trapnr;
 191
 192#ifdef CONFIG_X86_64
 193        if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
 194            printk_ratelimit()) {
 195                pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
 196                        tsk->comm, tsk->pid, str,
 197                        regs->ip, regs->sp, error_code);
 198                print_vma_addr(" in ", regs->ip);
 199                pr_cont("\n");
 200        }
 201#endif
 202
 203        force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
 204}
 205NOKPROBE_SYMBOL(do_trap);
 206
 207static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
 208                          unsigned long trapnr, int signr)
 209{
 210        enum ctx_state prev_state = exception_enter();
 211        siginfo_t info;
 212
 213        if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
 214                        NOTIFY_STOP) {
 215                conditional_sti(regs);
 216                do_trap(trapnr, signr, str, regs, error_code,
 217                        fill_trap_info(regs, signr, trapnr, &info));
 218        }
 219
 220        exception_exit(prev_state);
 221}
 222
 223#define DO_ERROR(trapnr, signr, str, name)                              \
 224dotraplinkage void do_##name(struct pt_regs *regs, long error_code)     \
 225{                                                                       \
 226        do_error_trap(regs, error_code, str, trapnr, signr);            \
 227}
 228
 229DO_ERROR(X86_TRAP_DE,     SIGFPE,  "divide error",              divide_error)
 230DO_ERROR(X86_TRAP_OF,     SIGSEGV, "overflow",                  overflow)
 231DO_ERROR(X86_TRAP_BR,     SIGSEGV, "bounds",                    bounds)
 232DO_ERROR(X86_TRAP_UD,     SIGILL,  "invalid opcode",            invalid_op)
 233DO_ERROR(X86_TRAP_OLD_MF, SIGFPE,  "coprocessor segment overrun",coprocessor_segment_overrun)
 234DO_ERROR(X86_TRAP_TS,     SIGSEGV, "invalid TSS",               invalid_TSS)
 235DO_ERROR(X86_TRAP_NP,     SIGBUS,  "segment not present",       segment_not_present)
 236#ifdef CONFIG_X86_32
 237DO_ERROR(X86_TRAP_SS,     SIGBUS,  "stack segment",             stack_segment)
 238#endif
 239DO_ERROR(X86_TRAP_AC,     SIGBUS,  "alignment check",           alignment_check)
 240
 241#ifdef CONFIG_X86_64
 242/* Runs on IST stack */
 243dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
 244{
 245        enum ctx_state prev_state;
 246
 247        prev_state = exception_enter();
 248        if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
 249                       X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
 250                preempt_conditional_sti(regs);
 251                do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
 252                preempt_conditional_cli(regs);
 253        }
 254        exception_exit(prev_state);
 255}
 256
 257dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 258{
 259        static const char str[] = "double fault";
 260        struct task_struct *tsk = current;
 261
 262        exception_enter();
 263        /* Return not checked because double check cannot be ignored */
 264        notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
 265
 266        tsk->thread.error_code = error_code;
 267        tsk->thread.trap_nr = X86_TRAP_DF;
 268
 269#ifdef CONFIG_DOUBLEFAULT
 270        df_debug(regs, error_code);
 271#endif
 272        /*
 273         * This is always a kernel trap and never fixable (and thus must
 274         * never return).
 275         */
 276        for (;;)
 277                die(str, regs, error_code);
 278}
 279#endif
 280
 281dotraplinkage void
 282do_general_protection(struct pt_regs *regs, long error_code)
 283{
 284        struct task_struct *tsk;
 285        enum ctx_state prev_state;
 286
 287        prev_state = exception_enter();
 288        conditional_sti(regs);
 289
 290#ifdef CONFIG_X86_32
 291        if (regs->flags & X86_VM_MASK) {
 292                local_irq_enable();
 293                handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
 294                goto exit;
 295        }
 296#endif
 297
 298        tsk = current;
 299        if (!user_mode(regs)) {
 300                if (fixup_exception(regs))
 301                        goto exit;
 302
 303                tsk->thread.error_code = error_code;
 304                tsk->thread.trap_nr = X86_TRAP_GP;
 305                if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
 306                               X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
 307                        die("general protection fault", regs, error_code);
 308                goto exit;
 309        }
 310
 311        tsk->thread.error_code = error_code;
 312        tsk->thread.trap_nr = X86_TRAP_GP;
 313
 314        if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
 315                        printk_ratelimit()) {
 316                pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
 317                        tsk->comm, task_pid_nr(tsk),
 318                        regs->ip, regs->sp, error_code);
 319                print_vma_addr(" in ", regs->ip);
 320                pr_cont("\n");
 321        }
 322
 323        force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
 324exit:
 325        exception_exit(prev_state);
 326}
 327NOKPROBE_SYMBOL(do_general_protection);
 328
 329/* May run on IST stack. */
 330dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
 331{
 332        enum ctx_state prev_state;
 333
 334#ifdef CONFIG_DYNAMIC_FTRACE
 335        /*
 336         * ftrace must be first, everything else may cause a recursive crash.
 337         * See note by declaration of modifying_ftrace_code in ftrace.c
 338         */
 339        if (unlikely(atomic_read(&modifying_ftrace_code)) &&
 340            ftrace_int3_handler(regs))
 341                return;
 342#endif
 343        if (poke_int3_handler(regs))
 344                return;
 345
 346        prev_state = exception_enter();
 347#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 348        if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
 349                                SIGTRAP) == NOTIFY_STOP)
 350                goto exit;
 351#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 352
 353#ifdef CONFIG_KPROBES
 354        if (kprobe_int3_handler(regs))
 355                goto exit;
 356#endif
 357
 358        if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
 359                        SIGTRAP) == NOTIFY_STOP)
 360                goto exit;
 361
 362        /*
 363         * Let others (NMI) know that the debug stack is in use
 364         * as we may switch to the interrupt stack.
 365         */
 366        debug_stack_usage_inc();
 367        preempt_conditional_sti(regs);
 368        do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
 369        preempt_conditional_cli(regs);
 370        debug_stack_usage_dec();
 371exit:
 372        exception_exit(prev_state);
 373}
 374NOKPROBE_SYMBOL(do_int3);
 375
 376#ifdef CONFIG_X86_64
 377/*
 378 * Help handler running on IST stack to switch back to user stack
 379 * for scheduling or signal handling. The actual stack switch is done in
 380 * entry.S
 381 */
 382asmlinkage __visible struct pt_regs *sync_regs(struct pt_regs *eregs)
 383{
 384        struct pt_regs *regs = eregs;
 385        /* Did already sync */
 386        if (eregs == (struct pt_regs *)eregs->sp)
 387                ;
 388        /* Exception from user space */
 389        else if (user_mode(eregs))
 390                regs = task_pt_regs(current);
 391        /*
 392         * Exception from kernel and interrupts are enabled. Move to
 393         * kernel process stack.
 394         */
 395        else if (eregs->flags & X86_EFLAGS_IF)
 396                regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
 397        if (eregs != regs)
 398                *regs = *eregs;
 399        return regs;
 400}
 401NOKPROBE_SYMBOL(sync_regs);
 402#endif
 403
 404/*
 405 * Our handling of the processor debug registers is non-trivial.
 406 * We do not clear them on entry and exit from the kernel. Therefore
 407 * it is possible to get a watchpoint trap here from inside the kernel.
 408 * However, the code in ./ptrace.c has ensured that the user can
 409 * only set watchpoints on userspace addresses. Therefore the in-kernel
 410 * watchpoint trap can only occur in code which is reading/writing
 411 * from user space. Such code must not hold kernel locks (since it
 412 * can equally take a page fault), therefore it is safe to call
 413 * force_sig_info even though that claims and releases locks.
 414 *
 415 * Code in ./signal.c ensures that the debug control register
 416 * is restored before we deliver any signal, and therefore that
 417 * user code runs with the correct debug control register even though
 418 * we clear it here.
 419 *
 420 * Being careful here means that we don't have to be as careful in a
 421 * lot of more complicated places (task switching can be a bit lazy
 422 * about restoring all the debug state, and ptrace doesn't have to
 423 * find every occurrence of the TF bit that could be saved away even
 424 * by user code)
 425 *
 426 * May run on IST stack.
 427 */
 428dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
 429{
 430        struct task_struct *tsk = current;
 431        enum ctx_state prev_state;
 432        int user_icebp = 0;
 433        unsigned long dr6;
 434        int si_code;
 435
 436        prev_state = exception_enter();
 437
 438        get_debugreg(dr6, 6);
 439
 440        /* Filter out all the reserved bits which are preset to 1 */
 441        dr6 &= ~DR6_RESERVED;
 442
 443        /*
 444         * If dr6 has no reason to give us about the origin of this trap,
 445         * then it's very likely the result of an icebp/int01 trap.
 446         * User wants a sigtrap for that.
 447         */
 448        if (!dr6 && user_mode(regs))
 449                user_icebp = 1;
 450
 451        /* Catch kmemcheck conditions first of all! */
 452        if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
 453                goto exit;
 454
 455        /* DR6 may or may not be cleared by the CPU */
 456        set_debugreg(0, 6);
 457
 458        /*
 459         * The processor cleared BTF, so don't mark that we need it set.
 460         */
 461        clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
 462
 463        /* Store the virtualized DR6 value */
 464        tsk->thread.debugreg6 = dr6;
 465
 466#ifdef CONFIG_KPROBES
 467        if (kprobe_debug_handler(regs))
 468                goto exit;
 469#endif
 470
 471        if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
 472                                                        SIGTRAP) == NOTIFY_STOP)
 473                goto exit;
 474
 475        /*
 476         * Let others (NMI) know that the debug stack is in use
 477         * as we may switch to the interrupt stack.
 478         */
 479        debug_stack_usage_inc();
 480
 481        /* It's safe to allow irq's after DR6 has been saved */
 482        preempt_conditional_sti(regs);
 483
 484        if (regs->flags & X86_VM_MASK) {
 485                handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
 486                                        X86_TRAP_DB);
 487                preempt_conditional_cli(regs);
 488                debug_stack_usage_dec();
 489                goto exit;
 490        }
 491
 492        /*
 493         * Single-stepping through system calls: ignore any exceptions in
 494         * kernel space, but re-enable TF when returning to user mode.
 495         *
 496         * We already checked v86 mode above, so we can check for kernel mode
 497         * by just checking the CPL of CS.
 498         */
 499        if ((dr6 & DR_STEP) && !user_mode(regs)) {
 500                tsk->thread.debugreg6 &= ~DR_STEP;
 501                set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
 502                regs->flags &= ~X86_EFLAGS_TF;
 503        }
 504        si_code = get_si_code(tsk->thread.debugreg6);
 505        if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
 506                send_sigtrap(tsk, regs, error_code, si_code);
 507        preempt_conditional_cli(regs);
 508        debug_stack_usage_dec();
 509
 510exit:
 511        exception_exit(prev_state);
 512}
 513NOKPROBE_SYMBOL(do_debug);
 514
 515/*
 516 * Note that we play around with the 'TS' bit in an attempt to get
 517 * the correct behaviour even in the presence of the asynchronous
 518 * IRQ13 behaviour
 519 */
 520static void math_error(struct pt_regs *regs, int error_code, int trapnr)
 521{
 522        struct task_struct *task = current;
 523        siginfo_t info;
 524        unsigned short err;
 525        char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
 526                                                "simd exception";
 527
 528        if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
 529                return;
 530        conditional_sti(regs);
 531
 532        if (!user_mode_vm(regs))
 533        {
 534                if (!fixup_exception(regs)) {
 535                        task->thread.error_code = error_code;
 536                        task->thread.trap_nr = trapnr;
 537                        die(str, regs, error_code);
 538                }
 539                return;
 540        }
 541
 542        /*
 543         * Save the info for the exception handler and clear the error.
 544         */
 545        save_init_fpu(task);
 546        task->thread.trap_nr = trapnr;
 547        task->thread.error_code = error_code;
 548        info.si_signo = SIGFPE;
 549        info.si_errno = 0;
 550        info.si_addr = (void __user *)uprobe_get_trap_addr(regs);
 551        if (trapnr == X86_TRAP_MF) {
 552                unsigned short cwd, swd;
 553                /*
 554                 * (~cwd & swd) will mask out exceptions that are not set to unmasked
 555                 * status.  0x3f is the exception bits in these regs, 0x200 is the
 556                 * C1 reg you need in case of a stack fault, 0x040 is the stack
 557                 * fault bit.  We should only be taking one exception at a time,
 558                 * so if this combination doesn't produce any single exception,
 559                 * then we have a bad program that isn't synchronizing its FPU usage
 560                 * and it will suffer the consequences since we won't be able to
 561                 * fully reproduce the context of the exception
 562                 */
 563                cwd = get_fpu_cwd(task);
 564                swd = get_fpu_swd(task);
 565
 566                err = swd & ~cwd;
 567        } else {
 568                /*
 569                 * The SIMD FPU exceptions are handled a little differently, as there
 570                 * is only a single status/control register.  Thus, to determine which
 571                 * unmasked exception was caught we must mask the exception mask bits
 572                 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
 573                 */
 574                unsigned short mxcsr = get_fpu_mxcsr(task);
 575                err = ~(mxcsr >> 7) & mxcsr;
 576        }
 577
 578        if (err & 0x001) {      /* Invalid op */
 579                /*
 580                 * swd & 0x240 == 0x040: Stack Underflow
 581                 * swd & 0x240 == 0x240: Stack Overflow
 582                 * User must clear the SF bit (0x40) if set
 583                 */
 584                info.si_code = FPE_FLTINV;
 585        } else if (err & 0x004) { /* Divide by Zero */
 586                info.si_code = FPE_FLTDIV;
 587        } else if (err & 0x008) { /* Overflow */
 588                info.si_code = FPE_FLTOVF;
 589        } else if (err & 0x012) { /* Denormal, Underflow */
 590                info.si_code = FPE_FLTUND;
 591        } else if (err & 0x020) { /* Precision */
 592                info.si_code = FPE_FLTRES;
 593        } else {
 594                /*
 595                 * If we're using IRQ 13, or supposedly even some trap
 596                 * X86_TRAP_MF implementations, it's possible
 597                 * we get a spurious trap, which is not an error.
 598                 */
 599                return;
 600        }
 601        force_sig_info(SIGFPE, &info, task);
 602}
 603
 604dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
 605{
 606        enum ctx_state prev_state;
 607
 608        prev_state = exception_enter();
 609        math_error(regs, error_code, X86_TRAP_MF);
 610        exception_exit(prev_state);
 611}
 612
 613dotraplinkage void
 614do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
 615{
 616        enum ctx_state prev_state;
 617
 618        prev_state = exception_enter();
 619        math_error(regs, error_code, X86_TRAP_XF);
 620        exception_exit(prev_state);
 621}
 622
 623dotraplinkage void
 624do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
 625{
 626        conditional_sti(regs);
 627#if 0
 628        /* No need to warn about this any longer. */
 629        pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
 630#endif
 631}
 632
 633asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void)
 634{
 635}
 636
 637asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void)
 638{
 639}
 640
 641/*
 642 * 'math_state_restore()' saves the current math information in the
 643 * old math state array, and gets the new ones from the current task
 644 *
 645 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
 646 * Don't touch unless you *really* know how it works.
 647 *
 648 * Must be called with kernel preemption disabled (eg with local
 649 * local interrupts as in the case of do_device_not_available).
 650 */
 651void math_state_restore(void)
 652{
 653        struct task_struct *tsk = current;
 654
 655        if (!tsk_used_math(tsk)) {
 656                local_irq_enable();
 657                /*
 658                 * does a slab alloc which can sleep
 659                 */
 660                if (init_fpu(tsk)) {
 661                        /*
 662                         * ran out of memory!
 663                         */
 664                        do_group_exit(SIGKILL);
 665                        return;
 666                }
 667                local_irq_disable();
 668        }
 669
 670        __thread_fpu_begin(tsk);
 671
 672        /*
 673         * Paranoid restore. send a SIGSEGV if we fail to restore the state.
 674         */
 675        if (unlikely(restore_fpu_checking(tsk))) {
 676                drop_init_fpu(tsk);
 677                force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
 678                return;
 679        }
 680
 681        tsk->thread.fpu_counter++;
 682}
 683EXPORT_SYMBOL_GPL(math_state_restore);
 684
 685dotraplinkage void
 686do_device_not_available(struct pt_regs *regs, long error_code)
 687{
 688        enum ctx_state prev_state;
 689
 690        prev_state = exception_enter();
 691        BUG_ON(use_eager_fpu());
 692
 693#ifdef CONFIG_MATH_EMULATION
 694        if (read_cr0() & X86_CR0_EM) {
 695                struct math_emu_info info = { };
 696
 697                conditional_sti(regs);
 698
 699                info.regs = regs;
 700                math_emulate(&info);
 701                exception_exit(prev_state);
 702                return;
 703        }
 704#endif
 705        math_state_restore(); /* interrupts still off */
 706#ifdef CONFIG_X86_32
 707        conditional_sti(regs);
 708#endif
 709        exception_exit(prev_state);
 710}
 711NOKPROBE_SYMBOL(do_device_not_available);
 712
 713#ifdef CONFIG_X86_32
 714dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
 715{
 716        siginfo_t info;
 717        enum ctx_state prev_state;
 718
 719        prev_state = exception_enter();
 720        local_irq_enable();
 721
 722        info.si_signo = SIGILL;
 723        info.si_errno = 0;
 724        info.si_code = ILL_BADSTK;
 725        info.si_addr = NULL;
 726        if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
 727                        X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
 728                do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
 729                        &info);
 730        }
 731        exception_exit(prev_state);
 732}
 733#endif
 734
 735/* Set of traps needed for early debugging. */
 736void __init early_trap_init(void)
 737{
 738        set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
 739        /* int3 can be called from all */
 740        set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
 741#ifdef CONFIG_X86_32
 742        set_intr_gate(X86_TRAP_PF, page_fault);
 743#endif
 744        load_idt(&idt_descr);
 745}
 746
 747void __init early_trap_pf_init(void)
 748{
 749#ifdef CONFIG_X86_64
 750        set_intr_gate(X86_TRAP_PF, page_fault);
 751#endif
 752}
 753
 754void __init trap_init(void)
 755{
 756        int i;
 757
 758#ifdef CONFIG_EISA
 759        void __iomem *p = early_ioremap(0x0FFFD9, 4);
 760
 761        if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
 762                EISA_bus = 1;
 763        early_iounmap(p, 4);
 764#endif
 765
 766        set_intr_gate(X86_TRAP_DE, divide_error);
 767        set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
 768        /* int4 can be called from all */
 769        set_system_intr_gate(X86_TRAP_OF, &overflow);
 770        set_intr_gate(X86_TRAP_BR, bounds);
 771        set_intr_gate(X86_TRAP_UD, invalid_op);
 772        set_intr_gate(X86_TRAP_NM, device_not_available);
 773#ifdef CONFIG_X86_32
 774        set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
 775#else
 776        set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
 777#endif
 778        set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
 779        set_intr_gate(X86_TRAP_TS, invalid_TSS);
 780        set_intr_gate(X86_TRAP_NP, segment_not_present);
 781        set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
 782        set_intr_gate(X86_TRAP_GP, general_protection);
 783        set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
 784        set_intr_gate(X86_TRAP_MF, coprocessor_error);
 785        set_intr_gate(X86_TRAP_AC, alignment_check);
 786#ifdef CONFIG_X86_MCE
 787        set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
 788#endif
 789        set_intr_gate(X86_TRAP_XF, simd_coprocessor_error);
 790
 791        /* Reserve all the builtin and the syscall vector: */
 792        for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
 793                set_bit(i, used_vectors);
 794
 795#ifdef CONFIG_IA32_EMULATION
 796        set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
 797        set_bit(IA32_SYSCALL_VECTOR, used_vectors);
 798#endif
 799
 800#ifdef CONFIG_X86_32
 801        set_system_trap_gate(SYSCALL_VECTOR, &system_call);
 802        set_bit(SYSCALL_VECTOR, used_vectors);
 803#endif
 804
 805        /*
 806         * Set the IDT descriptor to a fixed read-only location, so that the
 807         * "sidt" instruction will not leak the location of the kernel, and
 808         * to defend the IDT against arbitrary memory write vulnerabilities.
 809         * It will be reloaded in cpu_init() */
 810        __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
 811        idt_descr.address = fix_to_virt(FIX_RO_IDT);
 812
 813        /*
 814         * Should be a barrier for any external CPU state:
 815         */
 816        cpu_init();
 817
 818        x86_init.irqs.trap_init();
 819
 820#ifdef CONFIG_X86_64
 821        memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
 822        set_nmi_gate(X86_TRAP_DB, &debug);
 823        set_nmi_gate(X86_TRAP_BP, &int3);
 824#endif
 825}
 826