linux/arch/ia64/kernel/traps.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Architecture-specific trap handling.
   4 *
   5 * Copyright (C) 1998-2003 Hewlett-Packard Co
   6 *      David Mosberger-Tang <davidm@hpl.hp.com>
   7 *
   8 * 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/init.h>
  13#include <linux/sched/signal.h>
  14#include <linux/sched/debug.h>
  15#include <linux/tty.h>
  16#include <linux/vt_kern.h>              /* For unblank_screen() */
  17#include <linux/export.h>
  18#include <linux/extable.h>
  19#include <linux/hardirq.h>
  20#include <linux/kprobes.h>
  21#include <linux/delay.h>                /* for ssleep() */
  22#include <linux/kdebug.h>
  23#include <linux/uaccess.h>
  24
  25#include <asm/fpswa.h>
  26#include <asm/intrinsics.h>
  27#include <asm/processor.h>
  28#include <asm/exception.h>
  29#include <asm/setup.h>
  30
  31fpswa_interface_t *fpswa_interface;
  32EXPORT_SYMBOL(fpswa_interface);
  33
  34void __init
  35trap_init (void)
  36{
  37        if (ia64_boot_param->fpswa)
  38                /* FPSWA fixup: make the interface pointer a kernel virtual address: */
  39                fpswa_interface = __va(ia64_boot_param->fpswa);
  40}
  41
  42int
  43die (const char *str, struct pt_regs *regs, long err)
  44{
  45        static struct {
  46                spinlock_t lock;
  47                u32 lock_owner;
  48                int lock_owner_depth;
  49        } die = {
  50                .lock = __SPIN_LOCK_UNLOCKED(die.lock),
  51                .lock_owner = -1,
  52                .lock_owner_depth = 0
  53        };
  54        static int die_counter;
  55        int cpu = get_cpu();
  56
  57        if (die.lock_owner != cpu) {
  58                console_verbose();
  59                spin_lock_irq(&die.lock);
  60                die.lock_owner = cpu;
  61                die.lock_owner_depth = 0;
  62                bust_spinlocks(1);
  63        }
  64        put_cpu();
  65
  66        if (++die.lock_owner_depth < 3) {
  67                printk("%s[%d]: %s %ld [%d]\n",
  68                current->comm, task_pid_nr(current), str, err, ++die_counter);
  69                if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV)
  70                    != NOTIFY_STOP)
  71                        show_regs(regs);
  72                else
  73                        regs = NULL;
  74        } else
  75                printk(KERN_ERR "Recursive die() failure, output suppressed\n");
  76
  77        bust_spinlocks(0);
  78        die.lock_owner = -1;
  79        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
  80        spin_unlock_irq(&die.lock);
  81
  82        if (!regs)
  83                return 1;
  84
  85        if (panic_on_oops)
  86                panic("Fatal exception");
  87
  88        do_exit(SIGSEGV);
  89        return 0;
  90}
  91
  92int
  93die_if_kernel (char *str, struct pt_regs *regs, long err)
  94{
  95        if (!user_mode(regs))
  96                return die(str, regs, err);
  97        return 0;
  98}
  99
 100void
 101__kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
 102{
 103        siginfo_t siginfo;
 104        int sig, code;
 105
 106        /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
 107        clear_siginfo(&siginfo);
 108        siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
 109        siginfo.si_imm = break_num;
 110        siginfo.si_flags = 0;           /* clear __ISR_VALID */
 111        siginfo.si_isr = 0;
 112
 113        switch (break_num) {
 114              case 0: /* unknown error (used by GCC for __builtin_abort()) */
 115                if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP)
 116                                == NOTIFY_STOP)
 117                        return;
 118                if (die_if_kernel("bugcheck!", regs, break_num))
 119                        return;
 120                sig = SIGILL; code = ILL_ILLOPC;
 121                break;
 122
 123              case 1: /* integer divide by zero */
 124                sig = SIGFPE; code = FPE_INTDIV;
 125                break;
 126
 127              case 2: /* integer overflow */
 128                sig = SIGFPE; code = FPE_INTOVF;
 129                break;
 130
 131              case 3: /* range check/bounds check */
 132                sig = SIGFPE; code = FPE_FLTSUB;
 133                break;
 134
 135              case 4: /* null pointer dereference */
 136                sig = SIGSEGV; code = SEGV_MAPERR;
 137                break;
 138
 139              case 5: /* misaligned data */
 140                sig = SIGSEGV; code = BUS_ADRALN;
 141                break;
 142
 143              case 6: /* decimal overflow */
 144                sig = SIGFPE; code = __FPE_DECOVF;
 145                break;
 146
 147              case 7: /* decimal divide by zero */
 148                sig = SIGFPE; code = __FPE_DECDIV;
 149                break;
 150
 151              case 8: /* packed decimal error */
 152                sig = SIGFPE; code = __FPE_DECERR;
 153                break;
 154
 155              case 9: /* invalid ASCII digit */
 156                sig = SIGFPE; code = __FPE_INVASC;
 157                break;
 158
 159              case 10: /* invalid decimal digit */
 160                sig = SIGFPE; code = __FPE_INVDEC;
 161                break;
 162
 163              case 11: /* paragraph stack overflow */
 164                sig = SIGSEGV; code = __SEGV_PSTKOVF;
 165                break;
 166
 167              case 0x3f000 ... 0x3ffff: /* bundle-update in progress */
 168                sig = SIGILL; code = __ILL_BNDMOD;
 169                break;
 170
 171              default:
 172                if ((break_num < 0x40000 || break_num > 0x100000)
 173                    && die_if_kernel("Bad break", regs, break_num))
 174                        return;
 175
 176                if (break_num < 0x80000) {
 177                        sig = SIGILL; code = __ILL_BREAK;
 178                } else {
 179                        if (notify_die(DIE_BREAK, "bad break", regs, break_num, TRAP_BRKPT, SIGTRAP)
 180                                        == NOTIFY_STOP)
 181                                return;
 182                        sig = SIGTRAP; code = TRAP_BRKPT;
 183                }
 184        }
 185        siginfo.si_signo = sig;
 186        siginfo.si_errno = 0;
 187        siginfo.si_code = code;
 188        force_sig_info(sig, &siginfo, current);
 189}
 190
 191/*
 192 * disabled_fph_fault() is called when a user-level process attempts to access f32..f127
 193 * and it doesn't own the fp-high register partition.  When this happens, we save the
 194 * current fph partition in the task_struct of the fpu-owner (if necessary) and then load
 195 * the fp-high partition of the current task (if necessary).  Note that the kernel has
 196 * access to fph by the time we get here, as the IVT's "Disabled FP-Register" handler takes
 197 * care of clearing psr.dfh.
 198 */
 199static inline void
 200disabled_fph_fault (struct pt_regs *regs)
 201{
 202        struct ia64_psr *psr = ia64_psr(regs);
 203
 204        /* first, grant user-level access to fph partition: */
 205        psr->dfh = 0;
 206
 207        /*
 208         * Make sure that no other task gets in on this processor
 209         * while we're claiming the FPU
 210         */
 211        preempt_disable();
 212#ifndef CONFIG_SMP
 213        {
 214                struct task_struct *fpu_owner
 215                        = (struct task_struct *)ia64_get_kr(IA64_KR_FPU_OWNER);
 216
 217                if (ia64_is_local_fpu_owner(current)) {
 218                        preempt_enable_no_resched();
 219                        return;
 220                }
 221
 222                if (fpu_owner)
 223                        ia64_flush_fph(fpu_owner);
 224        }
 225#endif /* !CONFIG_SMP */
 226        ia64_set_local_fpu_owner(current);
 227        if ((current->thread.flags & IA64_THREAD_FPH_VALID) != 0) {
 228                __ia64_load_fpu(current->thread.fph);
 229                psr->mfh = 0;
 230        } else {
 231                __ia64_init_fpu();
 232                /*
 233                 * Set mfh because the state in thread.fph does not match the state in
 234                 * the fph partition.
 235                 */
 236                psr->mfh = 1;
 237        }
 238        preempt_enable_no_resched();
 239}
 240
 241static inline int
 242fp_emulate (int fp_fault, void *bundle, long *ipsr, long *fpsr, long *isr, long *pr, long *ifs,
 243            struct pt_regs *regs)
 244{
 245        fp_state_t fp_state;
 246        fpswa_ret_t ret;
 247
 248        if (!fpswa_interface)
 249                return -1;
 250
 251        memset(&fp_state, 0, sizeof(fp_state_t));
 252
 253        /*
 254         * compute fp_state.  only FP registers f6 - f11 are used by the
 255         * kernel, so set those bits in the mask and set the low volatile
 256         * pointer to point to these registers.
 257         */
 258        fp_state.bitmask_low64 = 0xfc0;  /* bit6..bit11 */
 259
 260        fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
 261        /*
 262         * unsigned long (*EFI_FPSWA) (
 263         *      unsigned long    trap_type,
 264         *      void             *Bundle,
 265         *      unsigned long    *pipsr,
 266         *      unsigned long    *pfsr,
 267         *      unsigned long    *pisr,
 268         *      unsigned long    *ppreds,
 269         *      unsigned long    *pifs,
 270         *      void             *fp_state);
 271         */
 272        ret = (*fpswa_interface->fpswa)((unsigned long) fp_fault, bundle,
 273                                        (unsigned long *) ipsr, (unsigned long *) fpsr,
 274                                        (unsigned long *) isr, (unsigned long *) pr,
 275                                        (unsigned long *) ifs, &fp_state);
 276
 277        return ret.status;
 278}
 279
 280struct fpu_swa_msg {
 281        unsigned long count;
 282        unsigned long time;
 283};
 284static DEFINE_PER_CPU(struct fpu_swa_msg, cpulast);
 285DECLARE_PER_CPU(struct fpu_swa_msg, cpulast);
 286static struct fpu_swa_msg last __cacheline_aligned;
 287
 288
 289/*
 290 * Handle floating-point assist faults and traps.
 291 */
 292static int
 293handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
 294{
 295        long exception, bundle[2];
 296        unsigned long fault_ip;
 297
 298        fault_ip = regs->cr_iip;
 299        if (!fp_fault && (ia64_psr(regs)->ri == 0))
 300                fault_ip -= 16;
 301        if (copy_from_user(bundle, (void __user *) fault_ip, sizeof(bundle)))
 302                return -1;
 303
 304        if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT))  {
 305                unsigned long count, current_jiffies = jiffies;
 306                struct fpu_swa_msg *cp = this_cpu_ptr(&cpulast);
 307
 308                if (unlikely(current_jiffies > cp->time))
 309                        cp->count = 0;
 310                if (unlikely(cp->count < 5)) {
 311                        cp->count++;
 312                        cp->time = current_jiffies + 5 * HZ;
 313
 314                        /* minimize races by grabbing a copy of count BEFORE checking last.time. */
 315                        count = last.count;
 316                        barrier();
 317
 318                        /*
 319                         * Lower 4 bits are used as a count. Upper bits are a sequence
 320                         * number that is updated when count is reset. The cmpxchg will
 321                         * fail is seqno has changed. This minimizes mutiple cpus
 322                         * resetting the count.
 323                         */
 324                        if (current_jiffies > last.time)
 325                                (void) cmpxchg_acq(&last.count, count, 16 + (count & ~15));
 326
 327                        /* used fetchadd to atomically update the count */
 328                        if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) {
 329                                last.time = current_jiffies + 5 * HZ;
 330                                printk(KERN_WARNING
 331                                        "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
 332                                        current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr);
 333                        }
 334                }
 335        }
 336
 337        exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr,
 338                               &regs->cr_ifs, regs);
 339        if (fp_fault) {
 340                if (exception == 0) {
 341                        /* emulation was successful */
 342                        ia64_increment_ip(regs);
 343                } else if (exception == -1) {
 344                        printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n");
 345                        return -1;
 346                } else {
 347                        struct siginfo siginfo;
 348
 349                        /* is next instruction a trap? */
 350                        if (exception & 2) {
 351                                ia64_increment_ip(regs);
 352                        }
 353                        clear_siginfo(&siginfo);
 354                        siginfo.si_signo = SIGFPE;
 355                        siginfo.si_errno = 0;
 356                        siginfo.si_code = FPE_FLTUNK;   /* default code */
 357                        siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
 358                        if (isr & 0x11) {
 359                                siginfo.si_code = FPE_FLTINV;
 360                        } else if (isr & 0x22) {
 361                                /* denormal operand gets the same si_code as underflow 
 362                                * see arch/i386/kernel/traps.c:math_error()  */
 363                                siginfo.si_code = FPE_FLTUND;
 364                        } else if (isr & 0x44) {
 365                                siginfo.si_code = FPE_FLTDIV;
 366                        }
 367                        siginfo.si_isr = isr;
 368                        siginfo.si_flags = __ISR_VALID;
 369                        siginfo.si_imm = 0;
 370                        force_sig_info(SIGFPE, &siginfo, current);
 371                }
 372        } else {
 373                if (exception == -1) {
 374                        printk(KERN_ERR "handle_fpu_swa: fp_emulate() returned -1\n");
 375                        return -1;
 376                } else if (exception != 0) {
 377                        /* raise exception */
 378                        struct siginfo siginfo;
 379
 380                        clear_siginfo(&siginfo);
 381                        siginfo.si_signo = SIGFPE;
 382                        siginfo.si_errno = 0;
 383                        siginfo.si_code = FPE_FLTUNK;   /* default code */
 384                        siginfo.si_addr = (void __user *) (regs->cr_iip + ia64_psr(regs)->ri);
 385                        if (isr & 0x880) {
 386                                siginfo.si_code = FPE_FLTOVF;
 387                        } else if (isr & 0x1100) {
 388                                siginfo.si_code = FPE_FLTUND;
 389                        } else if (isr & 0x2200) {
 390                                siginfo.si_code = FPE_FLTRES;
 391                        }
 392                        siginfo.si_isr = isr;
 393                        siginfo.si_flags = __ISR_VALID;
 394                        siginfo.si_imm = 0;
 395                        force_sig_info(SIGFPE, &siginfo, current);
 396                }
 397        }
 398        return 0;
 399}
 400
 401struct illegal_op_return {
 402        unsigned long fkt, arg1, arg2, arg3;
 403};
 404
 405struct illegal_op_return
 406ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
 407                       long arg4, long arg5, long arg6, long arg7,
 408                       struct pt_regs regs)
 409{
 410        struct illegal_op_return rv;
 411        struct siginfo si;
 412        char buf[128];
 413
 414#ifdef CONFIG_IA64_BRL_EMU
 415        {
 416                extern struct illegal_op_return ia64_emulate_brl (struct pt_regs *, unsigned long);
 417
 418                rv = ia64_emulate_brl(&regs, ec);
 419                if (rv.fkt != (unsigned long) -1)
 420                        return rv;
 421        }
 422#endif
 423
 424        sprintf(buf, "IA-64 Illegal operation fault");
 425        rv.fkt = 0;
 426        if (die_if_kernel(buf, &regs, 0))
 427                return rv;
 428
 429        clear_siginfo(&si);
 430        si.si_signo = SIGILL;
 431        si.si_code = ILL_ILLOPC;
 432        si.si_addr = (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri);
 433        force_sig_info(SIGILL, &si, current);
 434        return rv;
 435}
 436
 437void __kprobes
 438ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
 439            unsigned long iim, unsigned long itir, long arg5, long arg6,
 440            long arg7, struct pt_regs regs)
 441{
 442        unsigned long code, error = isr, iip;
 443        char buf[128];
 444        int result, sig;
 445        static const char *reason[] = {
 446                "IA-64 Illegal Operation fault",
 447                "IA-64 Privileged Operation fault",
 448                "IA-64 Privileged Register fault",
 449                "IA-64 Reserved Register/Field fault",
 450                "Disabled Instruction Set Transition fault",
 451                "Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
 452                "Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
 453                "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
 454        };
 455
 456        if ((isr & IA64_ISR_NA) && ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
 457                /*
 458                 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
 459                 * the lfetch.
 460                 */
 461                ia64_psr(&regs)->ed = 1;
 462                return;
 463        }
 464
 465        iip = regs.cr_iip + ia64_psr(&regs)->ri;
 466
 467        switch (vector) {
 468              case 24: /* General Exception */
 469                code = (isr >> 4) & 0xf;
 470                sprintf(buf, "General Exception: %s%s", reason[code],
 471                        (code == 3) ? ((isr & (1UL << 37))
 472                                       ? " (RSE access)" : " (data access)") : "");
 473                if (code == 8) {
 474# ifdef CONFIG_IA64_PRINT_HAZARDS
 475                        printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
 476                               current->comm, task_pid_nr(current),
 477                               regs.cr_iip + ia64_psr(&regs)->ri, regs.pr);
 478# endif
 479                        return;
 480                }
 481                break;
 482
 483              case 25: /* Disabled FP-Register */
 484                if (isr & 2) {
 485                        disabled_fph_fault(&regs);
 486                        return;
 487                }
 488                sprintf(buf, "Disabled FPL fault---not supposed to happen!");
 489                break;
 490
 491              case 26: /* NaT Consumption */
 492                if (user_mode(&regs)) {
 493                        struct siginfo siginfo;
 494                        void __user *addr;
 495
 496                        if (((isr >> 4) & 0xf) == 2) {
 497                                /* NaT page consumption */
 498                                sig = SIGSEGV;
 499                                code = SEGV_ACCERR;
 500                                addr = (void __user *) ifa;
 501                        } else {
 502                                /* register NaT consumption */
 503                                sig = SIGILL;
 504                                code = ILL_ILLOPN;
 505                                addr = (void __user *) (regs.cr_iip
 506                                                        + ia64_psr(&regs)->ri);
 507                        }
 508                        clear_siginfo(&siginfo);
 509                        siginfo.si_signo = sig;
 510                        siginfo.si_code = code;
 511                        siginfo.si_errno = 0;
 512                        siginfo.si_addr = addr;
 513                        siginfo.si_imm = vector;
 514                        siginfo.si_flags = __ISR_VALID;
 515                        siginfo.si_isr = isr;
 516                        force_sig_info(sig, &siginfo, current);
 517                        return;
 518                } else if (ia64_done_with_exception(&regs))
 519                        return;
 520                sprintf(buf, "NaT consumption");
 521                break;
 522
 523              case 31: /* Unsupported Data Reference */
 524                if (user_mode(&regs)) {
 525                        struct siginfo siginfo;
 526
 527                        clear_siginfo(&siginfo);
 528                        siginfo.si_signo = SIGILL;
 529                        siginfo.si_code = ILL_ILLOPN;
 530                        siginfo.si_errno = 0;
 531                        siginfo.si_addr = (void __user *) iip;
 532                        siginfo.si_imm = vector;
 533                        siginfo.si_flags = __ISR_VALID;
 534                        siginfo.si_isr = isr;
 535                        force_sig_info(SIGILL, &siginfo, current);
 536                        return;
 537                }
 538                sprintf(buf, "Unsupported data reference");
 539                break;
 540
 541              case 29: /* Debug */
 542              case 35: /* Taken Branch Trap */
 543              case 36: /* Single Step Trap */
 544              {
 545                struct siginfo siginfo;
 546
 547                clear_siginfo(&siginfo);
 548                if (fsys_mode(current, &regs)) {
 549                        extern char __kernel_syscall_via_break[];
 550                        /*
 551                         * Got a trap in fsys-mode: Taken Branch Trap
 552                         * and Single Step trap need special handling;
 553                         * Debug trap is ignored (we disable it here
 554                         * and re-enable it in the lower-privilege trap).
 555                         */
 556                        if (unlikely(vector == 29)) {
 557                                set_thread_flag(TIF_DB_DISABLED);
 558                                ia64_psr(&regs)->db = 0;
 559                                ia64_psr(&regs)->lp = 1;
 560                                return;
 561                        }
 562                        /* re-do the system call via break 0x100000: */
 563                        regs.cr_iip = (unsigned long) __kernel_syscall_via_break;
 564                        ia64_psr(&regs)->ri = 0;
 565                        ia64_psr(&regs)->cpl = 3;
 566                        return;
 567                }
 568                switch (vector) {
 569                      default:
 570                      case 29:
 571                        siginfo.si_code = TRAP_HWBKPT;
 572#ifdef CONFIG_ITANIUM
 573                        /*
 574                         * Erratum 10 (IFA may contain incorrect address) now has
 575                         * "NoFix" status.  There are no plans for fixing this.
 576                         */
 577                        if (ia64_psr(&regs)->is == 0)
 578                          ifa = regs.cr_iip;
 579#endif
 580                        break;
 581                      case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
 582                      case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
 583                }
 584                if (notify_die(DIE_FAULT, "ia64_fault", &regs, vector, siginfo.si_code, SIGTRAP)
 585                                == NOTIFY_STOP)
 586                        return;
 587                siginfo.si_signo = SIGTRAP;
 588                siginfo.si_errno = 0;
 589                siginfo.si_addr  = (void __user *) ifa;
 590                siginfo.si_imm   = 0;
 591                siginfo.si_flags = __ISR_VALID;
 592                siginfo.si_isr   = isr;
 593                force_sig_info(SIGTRAP, &siginfo, current);
 594                return;
 595              }
 596
 597              case 32: /* fp fault */
 598              case 33: /* fp trap */
 599                result = handle_fpu_swa((vector == 32) ? 1 : 0, &regs, isr);
 600                if ((result < 0) || (current->thread.flags & IA64_THREAD_FPEMU_SIGFPE)) {
 601                        struct siginfo siginfo;
 602
 603                        clear_siginfo(&siginfo);
 604                        siginfo.si_signo = SIGFPE;
 605                        siginfo.si_errno = 0;
 606                        siginfo.si_code = FPE_FLTINV;
 607                        siginfo.si_addr = (void __user *) iip;
 608                        siginfo.si_flags = __ISR_VALID;
 609                        siginfo.si_isr = isr;
 610                        siginfo.si_imm = 0;
 611                        force_sig_info(SIGFPE, &siginfo, current);
 612                }
 613                return;
 614
 615              case 34:
 616                if (isr & 0x2) {
 617                        /* Lower-Privilege Transfer Trap */
 618
 619                        /* If we disabled debug traps during an fsyscall,
 620                         * re-enable them here.
 621                         */
 622                        if (test_thread_flag(TIF_DB_DISABLED)) {
 623                                clear_thread_flag(TIF_DB_DISABLED);
 624                                ia64_psr(&regs)->db = 1;
 625                        }
 626
 627                        /*
 628                         * Just clear PSR.lp and then return immediately:
 629                         * all the interesting work (e.g., signal delivery)
 630                         * is done in the kernel exit path.
 631                         */
 632                        ia64_psr(&regs)->lp = 0;
 633                        return;
 634                } else {
 635                        /* Unimplemented Instr. Address Trap */
 636                        if (user_mode(&regs)) {
 637                                struct siginfo siginfo;
 638
 639                                clear_siginfo(&siginfo);
 640                                siginfo.si_signo = SIGILL;
 641                                siginfo.si_code = ILL_BADIADDR;
 642                                siginfo.si_errno = 0;
 643                                siginfo.si_flags = 0;
 644                                siginfo.si_isr = 0;
 645                                siginfo.si_imm = 0;
 646                                siginfo.si_addr = (void __user *) iip;
 647                                force_sig_info(SIGILL, &siginfo, current);
 648                                return;
 649                        }
 650                        sprintf(buf, "Unimplemented Instruction Address fault");
 651                }
 652                break;
 653
 654              case 45:
 655                printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
 656                printk(KERN_ERR "  iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
 657                       iip, ifa, isr);
 658                force_sig(SIGSEGV, current);
 659                return;
 660
 661              case 46:
 662                printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
 663                printk(KERN_ERR "  iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
 664                       iip, ifa, isr, iim);
 665                force_sig(SIGSEGV, current);
 666                return;
 667
 668              case 47:
 669                sprintf(buf, "IA-32 Interruption Fault (int 0x%lx)", isr >> 16);
 670                break;
 671
 672              default:
 673                sprintf(buf, "Fault %lu", vector);
 674                break;
 675        }
 676        if (!die_if_kernel(buf, &regs, error))
 677                force_sig(SIGILL, current);
 678}
 679