linux/arch/metag/kernel/traps.c
<<
>>
Prefs
   1/*
   2 *  Meta exception handling.
   3 *
   4 *  Copyright (C) 2005,2006,2007,2008,2009,2012 Imagination Technologies Ltd.
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License.  See the file COPYING in the main directory of this archive
   8 * for more details.
   9 */
  10
  11#include <linux/export.h>
  12#include <linux/sched.h>
  13#include <linux/sched/debug.h>
  14#include <linux/sched/task.h>
  15#include <linux/sched/task_stack.h>
  16#include <linux/signal.h>
  17#include <linux/kernel.h>
  18#include <linux/mm.h>
  19#include <linux/types.h>
  20#include <linux/init.h>
  21#include <linux/interrupt.h>
  22#include <linux/preempt.h>
  23#include <linux/ptrace.h>
  24#include <linux/module.h>
  25#include <linux/kallsyms.h>
  26#include <linux/kdebug.h>
  27#include <linux/kexec.h>
  28#include <linux/unistd.h>
  29#include <linux/smp.h>
  30#include <linux/slab.h>
  31#include <linux/syscalls.h>
  32
  33#include <asm/bug.h>
  34#include <asm/core_reg.h>
  35#include <asm/irqflags.h>
  36#include <asm/siginfo.h>
  37#include <asm/traps.h>
  38#include <asm/hwthread.h>
  39#include <asm/setup.h>
  40#include <asm/switch.h>
  41#include <asm/user_gateway.h>
  42#include <asm/syscall.h>
  43#include <asm/syscalls.h>
  44
  45/* Passing syscall arguments as long long is quicker. */
  46typedef unsigned int (*LPSYSCALL) (unsigned long long,
  47                                   unsigned long long,
  48                                   unsigned long long);
  49
  50/*
  51 * Users of LNKSET should compare the bus error bits obtained from DEFR
  52 * against TXDEFR_LNKSET_SUCCESS only as the failure code will vary between
  53 * different cores revisions.
  54 */
  55#define TXDEFR_LNKSET_SUCCESS 0x02000000
  56#define TXDEFR_LNKSET_FAILURE 0x04000000
  57
  58/*
  59 * Our global TBI handle.  Initialised from setup.c/setup_arch.
  60 */
  61DECLARE_PER_CPU(PTBI, pTBI);
  62
  63#ifdef CONFIG_SMP
  64static DEFINE_PER_CPU(unsigned int, trigger_mask);
  65#else
  66unsigned int global_trigger_mask;
  67EXPORT_SYMBOL(global_trigger_mask);
  68#endif
  69
  70unsigned long per_cpu__stack_save[NR_CPUS];
  71
  72static const char * const trap_names[] = {
  73        [TBIXXF_SIGNUM_IIF] = "Illegal instruction fault",
  74        [TBIXXF_SIGNUM_PGF] = "Privilege violation",
  75        [TBIXXF_SIGNUM_DHF] = "Unaligned data access fault",
  76        [TBIXXF_SIGNUM_IGF] = "Code fetch general read failure",
  77        [TBIXXF_SIGNUM_DGF] = "Data access general read/write fault",
  78        [TBIXXF_SIGNUM_IPF] = "Code fetch page fault",
  79        [TBIXXF_SIGNUM_DPF] = "Data access page fault",
  80        [TBIXXF_SIGNUM_IHF] = "Instruction breakpoint",
  81        [TBIXXF_SIGNUM_DWF] = "Read-only data access fault",
  82};
  83
  84const char *trap_name(int trapno)
  85{
  86        if (trapno >= 0 && trapno < ARRAY_SIZE(trap_names)
  87                        && trap_names[trapno])
  88                return trap_names[trapno];
  89        return "Unknown fault";
  90}
  91
  92static DEFINE_SPINLOCK(die_lock);
  93
  94void __noreturn die(const char *str, struct pt_regs *regs,
  95                    long err, unsigned long addr)
  96{
  97        static int die_counter;
  98
  99        oops_enter();
 100
 101        spin_lock_irq(&die_lock);
 102        console_verbose();
 103        bust_spinlocks(1);
 104        pr_err("%s: err %04lx (%s) addr %08lx [#%d]\n", str, err & 0xffff,
 105               trap_name(err & 0xffff), addr, ++die_counter);
 106
 107        print_modules();
 108        show_regs(regs);
 109
 110        pr_err("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
 111               task_pid_nr(current), task_stack_page(current) + THREAD_SIZE);
 112
 113        bust_spinlocks(0);
 114        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 115        if (kexec_should_crash(current))
 116                crash_kexec(regs);
 117
 118        if (in_interrupt())
 119                panic("Fatal exception in interrupt");
 120
 121        if (panic_on_oops)
 122                panic("Fatal exception");
 123
 124        spin_unlock_irq(&die_lock);
 125        oops_exit();
 126        do_exit(SIGSEGV);
 127}
 128
 129#ifdef CONFIG_METAG_DSP
 130/*
 131 * The ECH encoding specifies the size of a DSPRAM as,
 132 *
 133 *              "slots" / 4
 134 *
 135 * A "slot" is the size of two DSPRAM bank entries; an entry from
 136 * DSPRAM bank A and an entry from DSPRAM bank B. One DSPRAM bank
 137 * entry is 4 bytes.
 138 */
 139#define SLOT_SZ 8
 140static inline unsigned int decode_dspram_size(unsigned int size)
 141{
 142        unsigned int _sz = size & 0x7f;
 143
 144        return _sz * SLOT_SZ * 4;
 145}
 146
 147static void dspram_save(struct meta_ext_context *dsp_ctx,
 148                        unsigned int ramA_sz, unsigned int ramB_sz)
 149{
 150        unsigned int ram_sz[2];
 151        int i;
 152
 153        ram_sz[0] = ramA_sz;
 154        ram_sz[1] = ramB_sz;
 155
 156        for (i = 0; i < 2; i++) {
 157                if (ram_sz[i] != 0) {
 158                        unsigned int sz;
 159
 160                        if (i == 0)
 161                                sz = decode_dspram_size(ram_sz[i] >> 8);
 162                        else
 163                                sz = decode_dspram_size(ram_sz[i]);
 164
 165                        if (dsp_ctx->ram[i] == NULL) {
 166                                dsp_ctx->ram[i] = kmalloc(sz, GFP_KERNEL);
 167
 168                                if (dsp_ctx->ram[i] == NULL)
 169                                        panic("couldn't save DSP context");
 170                        } else {
 171                                if (ram_sz[i] > dsp_ctx->ram_sz[i]) {
 172                                        kfree(dsp_ctx->ram[i]);
 173
 174                                        dsp_ctx->ram[i] = kmalloc(sz,
 175                                                                  GFP_KERNEL);
 176
 177                                        if (dsp_ctx->ram[i] == NULL)
 178                                                panic("couldn't save DSP context");
 179                                }
 180                        }
 181
 182                        if (i == 0)
 183                                __TBIDspramSaveA(ram_sz[i], dsp_ctx->ram[i]);
 184                        else
 185                                __TBIDspramSaveB(ram_sz[i], dsp_ctx->ram[i]);
 186
 187                        dsp_ctx->ram_sz[i] = ram_sz[i];
 188                }
 189        }
 190}
 191#endif /* CONFIG_METAG_DSP */
 192
 193/*
 194 * Allow interrupts to be nested and save any "extended" register
 195 * context state, e.g. DSP regs and RAMs.
 196 */
 197static void nest_interrupts(TBIRES State, unsigned long mask)
 198{
 199#ifdef CONFIG_METAG_DSP
 200        struct meta_ext_context *dsp_ctx;
 201        unsigned int D0_8;
 202
 203        /*
 204         * D0.8 may contain an ECH encoding. The upper 16 bits
 205         * tell us what DSP resources the current process is
 206         * using. OR the bits into the SaveMask so that
 207         * __TBINestInts() knows what resources to save as
 208         * part of this context.
 209         *
 210         * Don't save the context if we're nesting interrupts in the
 211         * kernel because the kernel doesn't use DSP hardware.
 212         */
 213        D0_8 = __core_reg_get(D0.8);
 214
 215        if (D0_8 && (State.Sig.SaveMask & TBICTX_PRIV_BIT)) {
 216                State.Sig.SaveMask |= (D0_8 >> 16);
 217
 218                dsp_ctx = current->thread.dsp_context;
 219                if (dsp_ctx == NULL) {
 220                        dsp_ctx = kzalloc(sizeof(*dsp_ctx), GFP_KERNEL);
 221                        if (dsp_ctx == NULL)
 222                                panic("couldn't save DSP context: ENOMEM");
 223
 224                        current->thread.dsp_context = dsp_ctx;
 225                }
 226
 227                current->thread.user_flags |= (D0_8 & 0xffff0000);
 228                __TBINestInts(State, &dsp_ctx->regs, mask);
 229                dspram_save(dsp_ctx, D0_8 & 0x7f00, D0_8 & 0x007f);
 230        } else
 231                __TBINestInts(State, NULL, mask);
 232#else
 233        __TBINestInts(State, NULL, mask);
 234#endif
 235}
 236
 237void head_end(TBIRES State, unsigned long mask)
 238{
 239        unsigned int savemask = (unsigned short)State.Sig.SaveMask;
 240        unsigned int ctx_savemask = (unsigned short)State.Sig.pCtx->SaveMask;
 241
 242        if (savemask & TBICTX_PRIV_BIT) {
 243                ctx_savemask |= TBICTX_PRIV_BIT;
 244                current->thread.user_flags = savemask;
 245        }
 246
 247        /* Always undo the sleep bit */
 248        ctx_savemask &= ~TBICTX_WAIT_BIT;
 249
 250        /* Always save the catch buffer and RD pipe if they are dirty */
 251        savemask |= TBICTX_XCBF_BIT;
 252
 253        /* Only save the catch and RD if we have not already done so.
 254         * Note - the RD bits are in the pCtx only, and not in the
 255         * State.SaveMask.
 256         */
 257        if ((savemask & TBICTX_CBUF_BIT) ||
 258            (ctx_savemask & TBICTX_CBRP_BIT)) {
 259                /* Have we already saved the buffers though?
 260                 * - See TestTrack 5071 */
 261                if (ctx_savemask & TBICTX_XCBF_BIT) {
 262                        /* Strip off the bits so the call to __TBINestInts
 263                         * won't save the buffers again. */
 264                        savemask &= ~TBICTX_CBUF_BIT;
 265                        ctx_savemask &= ~TBICTX_CBRP_BIT;
 266                }
 267        }
 268
 269#ifdef CONFIG_METAG_META21
 270        {
 271                unsigned int depth, txdefr;
 272
 273                /*
 274                 * Save TXDEFR state.
 275                 *
 276                 * The process may have been interrupted after a LNKSET, but
 277                 * before it could read the DEFR state, so we mustn't lose that
 278                 * state or it could end up retrying an atomic operation that
 279                 * succeeded.
 280                 *
 281                 * All interrupts are disabled at this point so we
 282                 * don't need to perform any locking. We must do this
 283                 * dance before we use LNKGET or LNKSET.
 284                 */
 285                BUG_ON(current->thread.int_depth > HARDIRQ_BITS);
 286
 287                depth = current->thread.int_depth++;
 288
 289                txdefr = __core_reg_get(TXDEFR);
 290
 291                txdefr &= TXDEFR_BUS_STATE_BITS;
 292                if (txdefr & TXDEFR_LNKSET_SUCCESS)
 293                        current->thread.txdefr_failure &= ~(1 << depth);
 294                else
 295                        current->thread.txdefr_failure |= (1 << depth);
 296        }
 297#endif
 298
 299        State.Sig.SaveMask = savemask;
 300        State.Sig.pCtx->SaveMask = ctx_savemask;
 301
 302        nest_interrupts(State, mask);
 303
 304#ifdef CONFIG_METAG_POISON_CATCH_BUFFERS
 305        /* Poison the catch registers.  This shows up any mistakes we have
 306         * made in their handling MUCH quicker.
 307         */
 308        __core_reg_set(TXCATCH0, 0x87650021);
 309        __core_reg_set(TXCATCH1, 0x87654322);
 310        __core_reg_set(TXCATCH2, 0x87654323);
 311        __core_reg_set(TXCATCH3, 0x87654324);
 312#endif /* CONFIG_METAG_POISON_CATCH_BUFFERS */
 313}
 314
 315TBIRES tail_end_sys(TBIRES State, int syscall, int *restart)
 316{
 317        struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
 318        unsigned long flags;
 319
 320        local_irq_disable();
 321
 322        if (user_mode(regs)) {
 323                flags = current_thread_info()->flags;
 324                if (flags & _TIF_WORK_MASK &&
 325                    do_work_pending(regs, flags, syscall)) {
 326                        *restart = 1;
 327                        return State;
 328                }
 329
 330#ifdef CONFIG_METAG_FPU
 331                if (current->thread.fpu_context &&
 332                    current->thread.fpu_context->needs_restore) {
 333                        __TBICtxFPURestore(State, current->thread.fpu_context);
 334                        /*
 335                         * Clearing this bit ensures the FP unit is not made
 336                         * active again unless it is used.
 337                         */
 338                        State.Sig.SaveMask &= ~TBICTX_FPAC_BIT;
 339                        current->thread.fpu_context->needs_restore = false;
 340                }
 341                State.Sig.TrigMask |= TBI_TRIG_BIT(TBID_SIGNUM_DFR);
 342#endif
 343        }
 344
 345        /* TBI will turn interrupts back on at some point. */
 346        if (!irqs_disabled_flags((unsigned long)State.Sig.TrigMask))
 347                trace_hardirqs_on();
 348
 349#ifdef CONFIG_METAG_DSP
 350        /*
 351         * If we previously saved an extended context then restore it
 352         * now. Otherwise, clear D0.8 because this process is not
 353         * using DSP hardware.
 354         */
 355        if (State.Sig.pCtx->SaveMask & TBICTX_XEXT_BIT) {
 356                unsigned int D0_8;
 357                struct meta_ext_context *dsp_ctx = current->thread.dsp_context;
 358
 359                /* Make sure we're going to return to userland. */
 360                BUG_ON(current->thread.int_depth != 1);
 361
 362                if (dsp_ctx->ram_sz[0] > 0)
 363                        __TBIDspramRestoreA(dsp_ctx->ram_sz[0],
 364                                            dsp_ctx->ram[0]);
 365                if (dsp_ctx->ram_sz[1] > 0)
 366                        __TBIDspramRestoreB(dsp_ctx->ram_sz[1],
 367                                            dsp_ctx->ram[1]);
 368
 369                State.Sig.SaveMask |= State.Sig.pCtx->SaveMask;
 370                __TBICtxRestore(State, current->thread.dsp_context);
 371                D0_8 = __core_reg_get(D0.8);
 372                D0_8 |= current->thread.user_flags & 0xffff0000;
 373                D0_8 |= (dsp_ctx->ram_sz[1] | dsp_ctx->ram_sz[0]) & 0xffff;
 374                __core_reg_set(D0.8, D0_8);
 375        } else
 376                __core_reg_set(D0.8, 0);
 377#endif /* CONFIG_METAG_DSP */
 378
 379#ifdef CONFIG_METAG_META21
 380        {
 381                unsigned int depth, txdefr;
 382
 383                /*
 384                 * If there hasn't been a LNKSET since the last LNKGET then the
 385                 * link flag will be set, causing the next LNKSET to succeed if
 386                 * the addresses match. The two LNK operations may not be a pair
 387                 * (e.g. see atomic_read()), so the LNKSET should fail.
 388                 * We use a conditional-never LNKSET to clear the link flag
 389                 * without side effects.
 390                 */
 391                asm volatile("LNKSETDNV [D0Re0],D0Re0");
 392
 393                depth = --current->thread.int_depth;
 394
 395                BUG_ON(user_mode(regs) && depth);
 396
 397                txdefr = __core_reg_get(TXDEFR);
 398
 399                txdefr &= ~TXDEFR_BUS_STATE_BITS;
 400
 401                /* Do we need to restore a failure code into TXDEFR? */
 402                if (current->thread.txdefr_failure & (1 << depth))
 403                        txdefr |= (TXDEFR_LNKSET_FAILURE | TXDEFR_BUS_TRIG_BIT);
 404                else
 405                        txdefr |= (TXDEFR_LNKSET_SUCCESS | TXDEFR_BUS_TRIG_BIT);
 406
 407                __core_reg_set(TXDEFR, txdefr);
 408        }
 409#endif
 410        return State;
 411}
 412
 413#ifdef CONFIG_SMP
 414/*
 415 * If we took an interrupt in the middle of __kuser_get_tls then we need
 416 * to rewind the PC to the start of the function in case the process
 417 * gets migrated to another thread (SMP only) and it reads the wrong tls
 418 * data.
 419 */
 420static inline void _restart_critical_section(TBIRES State)
 421{
 422        unsigned long get_tls_start;
 423        unsigned long get_tls_end;
 424
 425        get_tls_start = (unsigned long)__kuser_get_tls -
 426                (unsigned long)&__user_gateway_start;
 427
 428        get_tls_start += USER_GATEWAY_PAGE;
 429
 430        get_tls_end = (unsigned long)__kuser_get_tls_end -
 431                (unsigned long)&__user_gateway_start;
 432
 433        get_tls_end += USER_GATEWAY_PAGE;
 434
 435        if ((State.Sig.pCtx->CurrPC >= get_tls_start) &&
 436            (State.Sig.pCtx->CurrPC < get_tls_end))
 437                State.Sig.pCtx->CurrPC = get_tls_start;
 438}
 439#else
 440/*
 441 * If we took an interrupt in the middle of
 442 * __kuser_cmpxchg then we need to rewind the PC to the
 443 * start of the function.
 444 */
 445static inline void _restart_critical_section(TBIRES State)
 446{
 447        unsigned long cmpxchg_start;
 448        unsigned long cmpxchg_end;
 449
 450        cmpxchg_start = (unsigned long)__kuser_cmpxchg -
 451                (unsigned long)&__user_gateway_start;
 452
 453        cmpxchg_start += USER_GATEWAY_PAGE;
 454
 455        cmpxchg_end = (unsigned long)__kuser_cmpxchg_end -
 456                (unsigned long)&__user_gateway_start;
 457
 458        cmpxchg_end += USER_GATEWAY_PAGE;
 459
 460        if ((State.Sig.pCtx->CurrPC >= cmpxchg_start) &&
 461            (State.Sig.pCtx->CurrPC < cmpxchg_end))
 462                State.Sig.pCtx->CurrPC = cmpxchg_start;
 463}
 464#endif
 465
 466/* Used by kick_handler() */
 467void restart_critical_section(TBIRES State)
 468{
 469        _restart_critical_section(State);
 470}
 471
 472TBIRES trigger_handler(TBIRES State, int SigNum, int Triggers, int Inst,
 473                       PTBI pTBI)
 474{
 475        head_end(State, ~INTS_OFF_MASK);
 476
 477        /* If we interrupted user code handle any critical sections. */
 478        if (State.Sig.SaveMask & TBICTX_PRIV_BIT)
 479                _restart_critical_section(State);
 480
 481        trace_hardirqs_off();
 482
 483        do_IRQ(SigNum, (struct pt_regs *)State.Sig.pCtx);
 484
 485        return tail_end(State);
 486}
 487
 488static unsigned int load_fault(PTBICTXEXTCB0 pbuf)
 489{
 490        return pbuf->CBFlags & TXCATCH0_READ_BIT;
 491}
 492
 493static unsigned long fault_address(PTBICTXEXTCB0 pbuf)
 494{
 495        return pbuf->CBAddr;
 496}
 497
 498static void unhandled_fault(struct pt_regs *regs, unsigned long addr,
 499                            int signo, int code, int trapno)
 500{
 501        if (user_mode(regs)) {
 502                siginfo_t info;
 503
 504                if (show_unhandled_signals && unhandled_signal(current, signo)
 505                    && printk_ratelimit()) {
 506
 507                        pr_info("pid %d unhandled fault: pc 0x%08x, addr 0x%08lx, trap %d (%s)\n",
 508                                current->pid, regs->ctx.CurrPC, addr,
 509                                trapno, trap_name(trapno));
 510                        print_vma_addr(" in ", regs->ctx.CurrPC);
 511                        print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
 512                        printk("\n");
 513                        show_regs(regs);
 514                }
 515
 516                info.si_signo = signo;
 517                info.si_errno = 0;
 518                info.si_code = code;
 519                info.si_addr = (__force void __user *)addr;
 520                info.si_trapno = trapno;
 521                force_sig_info(signo, &info, current);
 522        } else {
 523                die("Oops", regs, trapno, addr);
 524        }
 525}
 526
 527static int handle_data_fault(PTBICTXEXTCB0 pcbuf, struct pt_regs *regs,
 528                             unsigned int data_address, int trapno)
 529{
 530        int ret;
 531
 532        ret = do_page_fault(regs, data_address, !load_fault(pcbuf), trapno);
 533
 534        return ret;
 535}
 536
 537static unsigned long get_inst_fault_address(struct pt_regs *regs)
 538{
 539        return regs->ctx.CurrPC;
 540}
 541
 542TBIRES fault_handler(TBIRES State, int SigNum, int Triggers,
 543                     int Inst, PTBI pTBI)
 544{
 545        struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
 546        PTBICTXEXTCB0 pcbuf = (PTBICTXEXTCB0)&regs->extcb0;
 547        unsigned long data_address;
 548
 549        head_end(State, ~INTS_OFF_MASK);
 550
 551        /* Hardware breakpoint or data watch */
 552        if ((SigNum == TBIXXF_SIGNUM_IHF) ||
 553            ((SigNum == TBIXXF_SIGNUM_DHF) &&
 554             (pcbuf[0].CBFlags & (TXCATCH0_WATCH1_BIT |
 555                                  TXCATCH0_WATCH0_BIT)))) {
 556                State = __TBIUnExpXXX(State, SigNum, Triggers, Inst,
 557                                      pTBI);
 558                return tail_end(State);
 559        }
 560
 561        local_irq_enable();
 562
 563        data_address = fault_address(pcbuf);
 564
 565        switch (SigNum) {
 566        case TBIXXF_SIGNUM_IGF:
 567                /* 1st-level entry invalid (instruction fetch) */
 568        case TBIXXF_SIGNUM_IPF: {
 569                /* 2nd-level entry invalid (instruction fetch) */
 570                unsigned long addr = get_inst_fault_address(regs);
 571                do_page_fault(regs, addr, 0, SigNum);
 572                break;
 573        }
 574
 575        case TBIXXF_SIGNUM_DGF:
 576                /* 1st-level entry invalid (data access) */
 577        case TBIXXF_SIGNUM_DPF:
 578                /* 2nd-level entry invalid (data access) */
 579        case TBIXXF_SIGNUM_DWF:
 580                /* Write to read only page */
 581                handle_data_fault(pcbuf, regs, data_address, SigNum);
 582                break;
 583
 584        case TBIXXF_SIGNUM_IIF:
 585                /* Illegal instruction */
 586                unhandled_fault(regs, regs->ctx.CurrPC, SIGILL, ILL_ILLOPC,
 587                                SigNum);
 588                break;
 589
 590        case TBIXXF_SIGNUM_DHF:
 591                /* Unaligned access */
 592                unhandled_fault(regs, data_address, SIGBUS, BUS_ADRALN,
 593                                SigNum);
 594                break;
 595        case TBIXXF_SIGNUM_PGF:
 596                /* Privilege violation */
 597                unhandled_fault(regs, data_address, SIGSEGV, SEGV_ACCERR,
 598                                SigNum);
 599                break;
 600        default:
 601                BUG();
 602                break;
 603        }
 604
 605        return tail_end(State);
 606}
 607
 608static bool switch_is_syscall(unsigned int inst)
 609{
 610        return inst == __METAG_SW_ENCODING(SYS);
 611}
 612
 613static bool switch_is_legacy_syscall(unsigned int inst)
 614{
 615        return inst == __METAG_SW_ENCODING(SYS_LEGACY);
 616}
 617
 618static inline void step_over_switch(struct pt_regs *regs, unsigned int inst)
 619{
 620        regs->ctx.CurrPC += 4;
 621}
 622
 623static inline int test_syscall_work(void)
 624{
 625        return current_thread_info()->flags & _TIF_WORK_SYSCALL_MASK;
 626}
 627
 628TBIRES switch1_handler(TBIRES State, int SigNum, int Triggers,
 629                       int Inst, PTBI pTBI)
 630{
 631        struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
 632        unsigned int sysnumber;
 633        unsigned long long a1_a2, a3_a4, a5_a6;
 634        LPSYSCALL syscall_entry;
 635        int restart;
 636
 637        head_end(State, ~INTS_OFF_MASK);
 638
 639        /*
 640         * If this is not a syscall SWITCH it could be a breakpoint.
 641         */
 642        if (!switch_is_syscall(Inst)) {
 643                /*
 644                 * Alert the user if they're trying to use legacy system
 645                 * calls. This suggests they need to update their C
 646                 * library and build against up to date kernel headers.
 647                 */
 648                if (switch_is_legacy_syscall(Inst))
 649                        pr_warn_once("WARNING: A legacy syscall was made. Your userland needs updating.\n");
 650                /*
 651                 * We don't know how to handle the SWITCH and cannot
 652                 * safely ignore it, so treat all unknown switches
 653                 * (including breakpoints) as traps.
 654                 */
 655                force_sig(SIGTRAP, current);
 656                return tail_end(State);
 657        }
 658
 659        local_irq_enable();
 660
 661restart_syscall:
 662        restart = 0;
 663        sysnumber = regs->ctx.DX[0].U1;
 664
 665        if (test_syscall_work())
 666                sysnumber = syscall_trace_enter(regs);
 667
 668        /* Skip over the SWITCH instruction - or you just get 'stuck' on it! */
 669        step_over_switch(regs, Inst);
 670
 671        if (sysnumber >= __NR_syscalls) {
 672                pr_debug("unknown syscall number: %d\n", sysnumber);
 673                syscall_entry = (LPSYSCALL) sys_ni_syscall;
 674        } else {
 675                syscall_entry = (LPSYSCALL) sys_call_table[sysnumber];
 676        }
 677
 678        /* Use 64bit loads for speed. */
 679        a5_a6 = *(unsigned long long *)&regs->ctx.DX[1];
 680        a3_a4 = *(unsigned long long *)&regs->ctx.DX[2];
 681        a1_a2 = *(unsigned long long *)&regs->ctx.DX[3];
 682
 683        /* here is the actual call to the syscall handler functions */
 684        regs->ctx.DX[0].U0 = syscall_entry(a1_a2, a3_a4, a5_a6);
 685
 686        if (test_syscall_work())
 687                syscall_trace_leave(regs);
 688
 689        State = tail_end_sys(State, sysnumber, &restart);
 690        /* Handlerless restarts shouldn't go via userland */
 691        if (restart)
 692                goto restart_syscall;
 693        return State;
 694}
 695
 696TBIRES switchx_handler(TBIRES State, int SigNum, int Triggers,
 697                       int Inst, PTBI pTBI)
 698{
 699        struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
 700
 701        /*
 702         * This can be caused by any user process simply executing an unusual
 703         * SWITCH instruction. If there's no DA, __TBIUnExpXXX will cause the
 704         * thread to stop, so signal a SIGTRAP instead.
 705         */
 706        head_end(State, ~INTS_OFF_MASK);
 707        if (user_mode(regs))
 708                force_sig(SIGTRAP, current);
 709        else
 710                State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, pTBI);
 711        return tail_end(State);
 712}
 713
 714#ifdef CONFIG_METAG_META21
 715TBIRES fpe_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
 716{
 717        struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
 718        unsigned int error_state = Triggers;
 719        siginfo_t info;
 720
 721        head_end(State, ~INTS_OFF_MASK);
 722
 723        local_irq_enable();
 724
 725        info.si_signo = SIGFPE;
 726
 727        if (error_state & TXSTAT_FPE_INVALID_BIT)
 728                info.si_code = FPE_FLTINV;
 729        else if (error_state & TXSTAT_FPE_DIVBYZERO_BIT)
 730                info.si_code = FPE_FLTDIV;
 731        else if (error_state & TXSTAT_FPE_OVERFLOW_BIT)
 732                info.si_code = FPE_FLTOVF;
 733        else if (error_state & TXSTAT_FPE_UNDERFLOW_BIT)
 734                info.si_code = FPE_FLTUND;
 735        else if (error_state & TXSTAT_FPE_INEXACT_BIT)
 736                info.si_code = FPE_FLTRES;
 737        else
 738                info.si_code = 0;
 739        info.si_errno = 0;
 740        info.si_addr = (__force void __user *)regs->ctx.CurrPC;
 741        force_sig_info(SIGFPE, &info, current);
 742
 743        return tail_end(State);
 744}
 745#endif
 746
 747#ifdef CONFIG_METAG_SUSPEND_MEM
 748struct traps_context {
 749        PTBIAPIFN fnSigs[TBID_SIGNUM_MAX + 1];
 750};
 751
 752static struct traps_context *metag_traps_context;
 753
 754int traps_save_context(void)
 755{
 756        unsigned long cpu = smp_processor_id();
 757        PTBI _pTBI = per_cpu(pTBI, cpu);
 758        struct traps_context *context;
 759
 760        context = kzalloc(sizeof(*context), GFP_ATOMIC);
 761        if (!context)
 762                return -ENOMEM;
 763
 764        memcpy(context->fnSigs, (void *)_pTBI->fnSigs, sizeof(context->fnSigs));
 765
 766        metag_traps_context = context;
 767        return 0;
 768}
 769
 770int traps_restore_context(void)
 771{
 772        unsigned long cpu = smp_processor_id();
 773        PTBI _pTBI = per_cpu(pTBI, cpu);
 774        struct traps_context *context = metag_traps_context;
 775
 776        metag_traps_context = NULL;
 777
 778        memcpy((void *)_pTBI->fnSigs, context->fnSigs, sizeof(context->fnSigs));
 779
 780        kfree(context);
 781        return 0;
 782}
 783#endif
 784
 785#ifdef CONFIG_SMP
 786static inline unsigned int _get_trigger_mask(void)
 787{
 788        unsigned long cpu = smp_processor_id();
 789        return per_cpu(trigger_mask, cpu);
 790}
 791
 792unsigned int get_trigger_mask(void)
 793{
 794        return _get_trigger_mask();
 795}
 796EXPORT_SYMBOL(get_trigger_mask);
 797
 798static void set_trigger_mask(unsigned int mask)
 799{
 800        unsigned long cpu = smp_processor_id();
 801        per_cpu(trigger_mask, cpu) = mask;
 802}
 803
 804void arch_local_irq_enable(void)
 805{
 806        preempt_disable();
 807        arch_local_irq_restore(_get_trigger_mask());
 808        preempt_enable_no_resched();
 809}
 810EXPORT_SYMBOL(arch_local_irq_enable);
 811#else
 812static void set_trigger_mask(unsigned int mask)
 813{
 814        global_trigger_mask = mask;
 815}
 816#endif
 817
 818void per_cpu_trap_init(unsigned long cpu)
 819{
 820        TBIRES int_context;
 821        unsigned int thread = cpu_2_hwthread_id[cpu];
 822
 823        set_trigger_mask(TBI_INTS_INIT(thread) | /* interrupts */
 824                         TBI_TRIG_BIT(TBID_SIGNUM_LWK) | /* low level kick */
 825                         TBI_TRIG_BIT(TBID_SIGNUM_SW1));
 826
 827        /* non-priv - use current stack */
 828        int_context.Sig.pCtx = NULL;
 829        /* Start with interrupts off */
 830        int_context.Sig.TrigMask = INTS_OFF_MASK;
 831        int_context.Sig.SaveMask = 0;
 832
 833        /* And call __TBIASyncTrigger() */
 834        __TBIASyncTrigger(int_context);
 835}
 836
 837void __init trap_init(void)
 838{
 839        unsigned long cpu = smp_processor_id();
 840        PTBI _pTBI = per_cpu(pTBI, cpu);
 841
 842        _pTBI->fnSigs[TBID_SIGNUM_XXF] = fault_handler;
 843        _pTBI->fnSigs[TBID_SIGNUM_SW0] = switchx_handler;
 844        _pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_handler;
 845        _pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_handler;
 846        _pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_handler;
 847        _pTBI->fnSigs[TBID_SIGNUM_LWK] = kick_handler;
 848
 849#ifdef CONFIG_METAG_META21
 850        _pTBI->fnSigs[TBID_SIGNUM_DFR] = __TBIHandleDFR;
 851        _pTBI->fnSigs[TBID_SIGNUM_FPE] = fpe_handler;
 852#endif
 853
 854        per_cpu_trap_init(cpu);
 855}
 856
 857void tbi_startup_interrupt(int irq)
 858{
 859        unsigned long cpu = smp_processor_id();
 860        PTBI _pTBI = per_cpu(pTBI, cpu);
 861
 862        BUG_ON(irq > TBID_SIGNUM_MAX);
 863
 864        /* For TR1 and TR2, the thread id is encoded in the irq number */
 865        if (irq >= TBID_SIGNUM_T10 && irq < TBID_SIGNUM_TR3)
 866                cpu = hwthread_id_2_cpu[(irq - TBID_SIGNUM_T10) % 4];
 867
 868        set_trigger_mask(get_trigger_mask() | TBI_TRIG_BIT(irq));
 869
 870        _pTBI->fnSigs[irq] = trigger_handler;
 871}
 872
 873void tbi_shutdown_interrupt(int irq)
 874{
 875        unsigned long cpu = smp_processor_id();
 876        PTBI _pTBI = per_cpu(pTBI, cpu);
 877
 878        BUG_ON(irq > TBID_SIGNUM_MAX);
 879
 880        set_trigger_mask(get_trigger_mask() & ~TBI_TRIG_BIT(irq));
 881
 882        _pTBI->fnSigs[irq] = __TBIUnExpXXX;
 883}
 884
 885int ret_from_fork(TBIRES arg)
 886{
 887        struct task_struct *prev = arg.Switch.pPara;
 888        struct task_struct *tsk = current;
 889        struct pt_regs *regs = task_pt_regs(tsk);
 890        int (*fn)(void *);
 891        TBIRES Next;
 892
 893        schedule_tail(prev);
 894
 895        if (tsk->flags & PF_KTHREAD) {
 896                fn = (void *)regs->ctx.DX[4].U1;
 897                BUG_ON(!fn);
 898
 899                fn((void *)regs->ctx.DX[3].U1);
 900        }
 901
 902        if (test_syscall_work())
 903                syscall_trace_leave(regs);
 904
 905        preempt_disable();
 906
 907        Next.Sig.TrigMask = get_trigger_mask();
 908        Next.Sig.SaveMask = 0;
 909        Next.Sig.pCtx = &regs->ctx;
 910
 911        set_gateway_tls(current->thread.tls_ptr);
 912
 913        preempt_enable_no_resched();
 914
 915        /* And interrupts should come back on when we resume the real usermode
 916         * code. Call __TBIASyncResume()
 917         */
 918        __TBIASyncResume(tail_end(Next));
 919        /* ASyncResume should NEVER return */
 920        BUG();
 921        return 0;
 922}
 923
 924void show_trace(struct task_struct *tsk, unsigned long *sp,
 925                struct pt_regs *regs)
 926{
 927        unsigned long addr;
 928#ifdef CONFIG_FRAME_POINTER
 929        unsigned long fp, fpnew;
 930        unsigned long stack;
 931#endif
 932
 933        if (regs && user_mode(regs))
 934                return;
 935
 936        printk("\nCall trace: ");
 937#ifdef CONFIG_KALLSYMS
 938        printk("\n");
 939#endif
 940
 941        if (!tsk)
 942                tsk = current;
 943
 944#ifdef CONFIG_FRAME_POINTER
 945        if (regs) {
 946                print_ip_sym(regs->ctx.CurrPC);
 947                fp = regs->ctx.AX[1].U0;
 948        } else {
 949                fp = __core_reg_get(A0FrP);
 950        }
 951
 952        /* detect when the frame pointer has been used for other purposes and
 953         * doesn't point to the stack (it may point completely elsewhere which
 954         * kstack_end may not detect).
 955         */
 956        stack = (unsigned long)task_stack_page(tsk);
 957        while (fp >= stack && fp + 8 <= stack + THREAD_SIZE) {
 958                addr = __raw_readl((unsigned long *)(fp + 4)) - 4;
 959                if (kernel_text_address(addr))
 960                        print_ip_sym(addr);
 961                else
 962                        break;
 963                /* stack grows up, so frame pointers must decrease */
 964                fpnew = __raw_readl((unsigned long *)(fp + 0));
 965                if (fpnew >= fp)
 966                        break;
 967                fp = fpnew;
 968        }
 969#else
 970        while (!kstack_end(sp)) {
 971                addr = (*sp--) - 4;
 972                if (kernel_text_address(addr))
 973                        print_ip_sym(addr);
 974        }
 975#endif
 976
 977        printk("\n");
 978
 979        debug_show_held_locks(tsk);
 980}
 981
 982void show_stack(struct task_struct *tsk, unsigned long *sp)
 983{
 984        if (!tsk)
 985                tsk = current;
 986        if (tsk == current)
 987                sp = (unsigned long *)current_stack_pointer;
 988        else
 989                sp = (unsigned long *)tsk->thread.kernel_context->AX[0].U0;
 990
 991        show_trace(tsk, sp, NULL);
 992}
 993