linux/arch/metag/kernel/traps.c
<<
>>
Prefs
   1/*
   2 *  Meta exception handling.
   3 *
   4 *  Copyright (C) 2005,2006,2007,2008,2009,2012 Imagination Technologies Ltd.
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License.  See the file COPYING in the main directory of this archive
   8 * for more details.
   9 */
  10
  11#include <linux/export.h>
  12#include <linux/sched.h>
  13#include <linux/signal.h>
  14#include <linux/kernel.h>
  15#include <linux/mm.h>
  16#include <linux/types.h>
  17#include <linux/init.h>
  18#include <linux/interrupt.h>
  19#include <linux/preempt.h>
  20#include <linux/ptrace.h>
  21#include <linux/module.h>
  22#include <linux/kallsyms.h>
  23#include <linux/kdebug.h>
  24#include <linux/kexec.h>
  25#include <linux/unistd.h>
  26#include <linux/smp.h>
  27#include <linux/slab.h>
  28#include <linux/syscalls.h>
  29
  30#include <asm/bug.h>
  31#include <asm/core_reg.h>
  32#include <asm/irqflags.h>
  33#include <asm/siginfo.h>
  34#include <asm/traps.h>
  35#include <asm/hwthread.h>
  36#include <asm/switch.h>
  37#include <asm/user_gateway.h>
  38#include <asm/syscall.h>
  39#include <asm/syscalls.h>
  40
  41/* Passing syscall arguments as long long is quicker. */
  42typedef unsigned int (*LPSYSCALL) (unsigned long long,
  43                                   unsigned long long,
  44                                   unsigned long long);
  45
  46/*
  47 * Users of LNKSET should compare the bus error bits obtained from DEFR
  48 * against TXDEFR_LNKSET_SUCCESS only as the failure code will vary between
  49 * different cores revisions.
  50 */
  51#define TXDEFR_LNKSET_SUCCESS 0x02000000
  52#define TXDEFR_LNKSET_FAILURE 0x04000000
  53
  54/*
  55 * Our global TBI handle.  Initialised from setup.c/setup_arch.
  56 */
  57DECLARE_PER_CPU(PTBI, pTBI);
  58
  59#ifdef CONFIG_SMP
  60static DEFINE_PER_CPU(unsigned int, trigger_mask);
  61#else
  62unsigned int global_trigger_mask;
  63EXPORT_SYMBOL(global_trigger_mask);
  64#endif
  65
  66unsigned long per_cpu__stack_save[NR_CPUS];
  67
  68static const char * const trap_names[] = {
  69        [TBIXXF_SIGNUM_IIF] = "Illegal instruction fault",
  70        [TBIXXF_SIGNUM_PGF] = "Privilege violation",
  71        [TBIXXF_SIGNUM_DHF] = "Unaligned data access fault",
  72        [TBIXXF_SIGNUM_IGF] = "Code fetch general read failure",
  73        [TBIXXF_SIGNUM_DGF] = "Data access general read/write fault",
  74        [TBIXXF_SIGNUM_IPF] = "Code fetch page fault",
  75        [TBIXXF_SIGNUM_DPF] = "Data access page fault",
  76        [TBIXXF_SIGNUM_IHF] = "Instruction breakpoint",
  77        [TBIXXF_SIGNUM_DWF] = "Read-only data access fault",
  78};
  79
  80const char *trap_name(int trapno)
  81{
  82        if (trapno >= 0 && trapno < ARRAY_SIZE(trap_names)
  83                        && trap_names[trapno])
  84                return trap_names[trapno];
  85        return "Unknown fault";
  86}
  87
  88static DEFINE_SPINLOCK(die_lock);
  89
  90void die(const char *str, struct pt_regs *regs, long err,
  91         unsigned long addr)
  92{
  93        static int die_counter;
  94
  95        oops_enter();
  96
  97        spin_lock_irq(&die_lock);
  98        console_verbose();
  99        bust_spinlocks(1);
 100        pr_err("%s: err %04lx (%s) addr %08lx [#%d]\n", str, err & 0xffff,
 101               trap_name(err & 0xffff), addr, ++die_counter);
 102
 103        print_modules();
 104        show_regs(regs);
 105
 106        pr_err("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
 107               task_pid_nr(current), task_stack_page(current) + THREAD_SIZE);
 108
 109        bust_spinlocks(0);
 110        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 111        if (kexec_should_crash(current))
 112                crash_kexec(regs);
 113
 114        if (in_interrupt())
 115                panic("Fatal exception in interrupt");
 116
 117        if (panic_on_oops)
 118                panic("Fatal exception");
 119
 120        spin_unlock_irq(&die_lock);
 121        oops_exit();
 122        do_exit(SIGSEGV);
 123}
 124
 125#ifdef CONFIG_METAG_DSP
 126/*
 127 * The ECH encoding specifies the size of a DSPRAM as,
 128 *
 129 *              "slots" / 4
 130 *
 131 * A "slot" is the size of two DSPRAM bank entries; an entry from
 132 * DSPRAM bank A and an entry from DSPRAM bank B. One DSPRAM bank
 133 * entry is 4 bytes.
 134 */
 135#define SLOT_SZ 8
 136static inline unsigned int decode_dspram_size(unsigned int size)
 137{
 138        unsigned int _sz = size & 0x7f;
 139
 140        return _sz * SLOT_SZ * 4;
 141}
 142
 143static void dspram_save(struct meta_ext_context *dsp_ctx,
 144                        unsigned int ramA_sz, unsigned int ramB_sz)
 145{
 146        unsigned int ram_sz[2];
 147        int i;
 148
 149        ram_sz[0] = ramA_sz;
 150        ram_sz[1] = ramB_sz;
 151
 152        for (i = 0; i < 2; i++) {
 153                if (ram_sz[i] != 0) {
 154                        unsigned int sz;
 155
 156                        if (i == 0)
 157                                sz = decode_dspram_size(ram_sz[i] >> 8);
 158                        else
 159                                sz = decode_dspram_size(ram_sz[i]);
 160
 161                        if (dsp_ctx->ram[i] == NULL) {
 162                                dsp_ctx->ram[i] = kmalloc(sz, GFP_KERNEL);
 163
 164                                if (dsp_ctx->ram[i] == NULL)
 165                                        panic("couldn't save DSP context");
 166                        } else {
 167                                if (ram_sz[i] > dsp_ctx->ram_sz[i]) {
 168                                        kfree(dsp_ctx->ram[i]);
 169
 170                                        dsp_ctx->ram[i] = kmalloc(sz,
 171                                                                  GFP_KERNEL);
 172
 173                                        if (dsp_ctx->ram[i] == NULL)
 174                                                panic("couldn't save DSP context");
 175                                }
 176                        }
 177
 178                        if (i == 0)
 179                                __TBIDspramSaveA(ram_sz[i], dsp_ctx->ram[i]);
 180                        else
 181                                __TBIDspramSaveB(ram_sz[i], dsp_ctx->ram[i]);
 182
 183                        dsp_ctx->ram_sz[i] = ram_sz[i];
 184                }
 185        }
 186}
 187#endif /* CONFIG_METAG_DSP */
 188
 189/*
 190 * Allow interrupts to be nested and save any "extended" register
 191 * context state, e.g. DSP regs and RAMs.
 192 */
 193static void nest_interrupts(TBIRES State, unsigned long mask)
 194{
 195#ifdef CONFIG_METAG_DSP
 196        struct meta_ext_context *dsp_ctx;
 197        unsigned int D0_8;
 198
 199        /*
 200         * D0.8 may contain an ECH encoding. The upper 16 bits
 201         * tell us what DSP resources the current process is
 202         * using. OR the bits into the SaveMask so that
 203         * __TBINestInts() knows what resources to save as
 204         * part of this context.
 205         *
 206         * Don't save the context if we're nesting interrupts in the
 207         * kernel because the kernel doesn't use DSP hardware.
 208         */
 209        D0_8 = __core_reg_get(D0.8);
 210
 211        if (D0_8 && (State.Sig.SaveMask & TBICTX_PRIV_BIT)) {
 212                State.Sig.SaveMask |= (D0_8 >> 16);
 213
 214                dsp_ctx = current->thread.dsp_context;
 215                if (dsp_ctx == NULL) {
 216                        dsp_ctx = kzalloc(sizeof(*dsp_ctx), GFP_KERNEL);
 217                        if (dsp_ctx == NULL)
 218                                panic("couldn't save DSP context: ENOMEM");
 219
 220                        current->thread.dsp_context = dsp_ctx;
 221                }
 222
 223                current->thread.user_flags |= (D0_8 & 0xffff0000);
 224                __TBINestInts(State, &dsp_ctx->regs, mask);
 225                dspram_save(dsp_ctx, D0_8 & 0x7f00, D0_8 & 0x007f);
 226        } else
 227                __TBINestInts(State, NULL, mask);
 228#else
 229        __TBINestInts(State, NULL, mask);
 230#endif
 231}
 232
 233void head_end(TBIRES State, unsigned long mask)
 234{
 235        unsigned int savemask = (unsigned short)State.Sig.SaveMask;
 236        unsigned int ctx_savemask = (unsigned short)State.Sig.pCtx->SaveMask;
 237
 238        if (savemask & TBICTX_PRIV_BIT) {
 239                ctx_savemask |= TBICTX_PRIV_BIT;
 240                current->thread.user_flags = savemask;
 241        }
 242
 243        /* Always undo the sleep bit */
 244        ctx_savemask &= ~TBICTX_WAIT_BIT;
 245
 246        /* Always save the catch buffer and RD pipe if they are dirty */
 247        savemask |= TBICTX_XCBF_BIT;
 248
 249        /* Only save the catch and RD if we have not already done so.
 250         * Note - the RD bits are in the pCtx only, and not in the
 251         * State.SaveMask.
 252         */
 253        if ((savemask & TBICTX_CBUF_BIT) ||
 254            (ctx_savemask & TBICTX_CBRP_BIT)) {
 255                /* Have we already saved the buffers though?
 256                 * - See TestTrack 5071 */
 257                if (ctx_savemask & TBICTX_XCBF_BIT) {
 258                        /* Strip off the bits so the call to __TBINestInts
 259                         * won't save the buffers again. */
 260                        savemask &= ~TBICTX_CBUF_BIT;
 261                        ctx_savemask &= ~TBICTX_CBRP_BIT;
 262                }
 263        }
 264
 265#ifdef CONFIG_METAG_META21
 266        {
 267                unsigned int depth, txdefr;
 268
 269                /*
 270                 * Save TXDEFR state.
 271                 *
 272                 * The process may have been interrupted after a LNKSET, but
 273                 * before it could read the DEFR state, so we mustn't lose that
 274                 * state or it could end up retrying an atomic operation that
 275                 * succeeded.
 276                 *
 277                 * All interrupts are disabled at this point so we
 278                 * don't need to perform any locking. We must do this
 279                 * dance before we use LNKGET or LNKSET.
 280                 */
 281                BUG_ON(current->thread.int_depth > HARDIRQ_BITS);
 282
 283                depth = current->thread.int_depth++;
 284
 285                txdefr = __core_reg_get(TXDEFR);
 286
 287                txdefr &= TXDEFR_BUS_STATE_BITS;
 288                if (txdefr & TXDEFR_LNKSET_SUCCESS)
 289                        current->thread.txdefr_failure &= ~(1 << depth);
 290                else
 291                        current->thread.txdefr_failure |= (1 << depth);
 292        }
 293#endif
 294
 295        State.Sig.SaveMask = savemask;
 296        State.Sig.pCtx->SaveMask = ctx_savemask;
 297
 298        nest_interrupts(State, mask);
 299
 300#ifdef CONFIG_METAG_POISON_CATCH_BUFFERS
 301        /* Poison the catch registers.  This shows up any mistakes we have
 302         * made in their handling MUCH quicker.
 303         */
 304        __core_reg_set(TXCATCH0, 0x87650021);
 305        __core_reg_set(TXCATCH1, 0x87654322);
 306        __core_reg_set(TXCATCH2, 0x87654323);
 307        __core_reg_set(TXCATCH3, 0x87654324);
 308#endif /* CONFIG_METAG_POISON_CATCH_BUFFERS */
 309}
 310
 311TBIRES tail_end_sys(TBIRES State, int syscall, int *restart)
 312{
 313        struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
 314        unsigned long flags;
 315
 316        local_irq_disable();
 317
 318        if (user_mode(regs)) {
 319                flags = current_thread_info()->flags;
 320                if (flags & _TIF_WORK_MASK &&
 321                    do_work_pending(regs, flags, syscall)) {
 322                        *restart = 1;
 323                        return State;
 324                }
 325
 326#ifdef CONFIG_METAG_FPU
 327                if (current->thread.fpu_context &&
 328                    current->thread.fpu_context->needs_restore) {
 329                        __TBICtxFPURestore(State, current->thread.fpu_context);
 330                        /*
 331                         * Clearing this bit ensures the FP unit is not made
 332                         * active again unless it is used.
 333                         */
 334                        State.Sig.SaveMask &= ~TBICTX_FPAC_BIT;
 335                        current->thread.fpu_context->needs_restore = false;
 336                }
 337                State.Sig.TrigMask |= TBI_TRIG_BIT(TBID_SIGNUM_DFR);
 338#endif
 339        }
 340
 341        /* TBI will turn interrupts back on at some point. */
 342        if (!irqs_disabled_flags((unsigned long)State.Sig.TrigMask))
 343                trace_hardirqs_on();
 344
 345#ifdef CONFIG_METAG_DSP
 346        /*
 347         * If we previously saved an extended context then restore it
 348         * now. Otherwise, clear D0.8 because this process is not
 349         * using DSP hardware.
 350         */
 351        if (State.Sig.pCtx->SaveMask & TBICTX_XEXT_BIT) {
 352                unsigned int D0_8;
 353                struct meta_ext_context *dsp_ctx = current->thread.dsp_context;
 354
 355                /* Make sure we're going to return to userland. */
 356                BUG_ON(current->thread.int_depth != 1);
 357
 358                if (dsp_ctx->ram_sz[0] > 0)
 359                        __TBIDspramRestoreA(dsp_ctx->ram_sz[0],
 360                                            dsp_ctx->ram[0]);
 361                if (dsp_ctx->ram_sz[1] > 0)
 362                        __TBIDspramRestoreB(dsp_ctx->ram_sz[1],
 363                                            dsp_ctx->ram[1]);
 364
 365                State.Sig.SaveMask |= State.Sig.pCtx->SaveMask;
 366                __TBICtxRestore(State, current->thread.dsp_context);
 367                D0_8 = __core_reg_get(D0.8);
 368                D0_8 |= current->thread.user_flags & 0xffff0000;
 369                D0_8 |= (dsp_ctx->ram_sz[1] | dsp_ctx->ram_sz[0]) & 0xffff;
 370                __core_reg_set(D0.8, D0_8);
 371        } else
 372                __core_reg_set(D0.8, 0);
 373#endif /* CONFIG_METAG_DSP */
 374
 375#ifdef CONFIG_METAG_META21
 376        {
 377                unsigned int depth, txdefr;
 378
 379                /*
 380                 * If there hasn't been a LNKSET since the last LNKGET then the
 381                 * link flag will be set, causing the next LNKSET to succeed if
 382                 * the addresses match. The two LNK operations may not be a pair
 383                 * (e.g. see atomic_read()), so the LNKSET should fail.
 384                 * We use a conditional-never LNKSET to clear the link flag
 385                 * without side effects.
 386                 */
 387                asm volatile("LNKSETDNV [D0Re0],D0Re0");
 388
 389                depth = --current->thread.int_depth;
 390
 391                BUG_ON(user_mode(regs) && depth);
 392
 393                txdefr = __core_reg_get(TXDEFR);
 394
 395                txdefr &= ~TXDEFR_BUS_STATE_BITS;
 396
 397                /* Do we need to restore a failure code into TXDEFR? */
 398                if (current->thread.txdefr_failure & (1 << depth))
 399                        txdefr |= (TXDEFR_LNKSET_FAILURE | TXDEFR_BUS_TRIG_BIT);
 400                else
 401                        txdefr |= (TXDEFR_LNKSET_SUCCESS | TXDEFR_BUS_TRIG_BIT);
 402
 403                __core_reg_set(TXDEFR, txdefr);
 404        }
 405#endif
 406        return State;
 407}
 408
 409#ifdef CONFIG_SMP
 410/*
 411 * If we took an interrupt in the middle of __kuser_get_tls then we need
 412 * to rewind the PC to the start of the function in case the process
 413 * gets migrated to another thread (SMP only) and it reads the wrong tls
 414 * data.
 415 */
 416static inline void _restart_critical_section(TBIRES State)
 417{
 418        unsigned long get_tls_start;
 419        unsigned long get_tls_end;
 420
 421        get_tls_start = (unsigned long)__kuser_get_tls -
 422                (unsigned long)&__user_gateway_start;
 423
 424        get_tls_start += USER_GATEWAY_PAGE;
 425
 426        get_tls_end = (unsigned long)__kuser_get_tls_end -
 427                (unsigned long)&__user_gateway_start;
 428
 429        get_tls_end += USER_GATEWAY_PAGE;
 430
 431        if ((State.Sig.pCtx->CurrPC >= get_tls_start) &&
 432            (State.Sig.pCtx->CurrPC < get_tls_end))
 433                State.Sig.pCtx->CurrPC = get_tls_start;
 434}
 435#else
 436/*
 437 * If we took an interrupt in the middle of
 438 * __kuser_cmpxchg then we need to rewind the PC to the
 439 * start of the function.
 440 */
 441static inline void _restart_critical_section(TBIRES State)
 442{
 443        unsigned long cmpxchg_start;
 444        unsigned long cmpxchg_end;
 445
 446        cmpxchg_start = (unsigned long)__kuser_cmpxchg -
 447                (unsigned long)&__user_gateway_start;
 448
 449        cmpxchg_start += USER_GATEWAY_PAGE;
 450
 451        cmpxchg_end = (unsigned long)__kuser_cmpxchg_end -
 452                (unsigned long)&__user_gateway_start;
 453
 454        cmpxchg_end += USER_GATEWAY_PAGE;
 455
 456        if ((State.Sig.pCtx->CurrPC >= cmpxchg_start) &&
 457            (State.Sig.pCtx->CurrPC < cmpxchg_end))
 458                State.Sig.pCtx->CurrPC = cmpxchg_start;
 459}
 460#endif
 461
 462/* Used by kick_handler() */
 463void restart_critical_section(TBIRES State)
 464{
 465        _restart_critical_section(State);
 466}
 467
 468TBIRES trigger_handler(TBIRES State, int SigNum, int Triggers, int Inst,
 469                       PTBI pTBI)
 470{
 471        head_end(State, ~INTS_OFF_MASK);
 472
 473        /* If we interrupted user code handle any critical sections. */
 474        if (State.Sig.SaveMask & TBICTX_PRIV_BIT)
 475                _restart_critical_section(State);
 476
 477        trace_hardirqs_off();
 478
 479        do_IRQ(SigNum, (struct pt_regs *)State.Sig.pCtx);
 480
 481        return tail_end(State);
 482}
 483
 484static unsigned int load_fault(PTBICTXEXTCB0 pbuf)
 485{
 486        return pbuf->CBFlags & TXCATCH0_READ_BIT;
 487}
 488
 489static unsigned long fault_address(PTBICTXEXTCB0 pbuf)
 490{
 491        return pbuf->CBAddr;
 492}
 493
 494static void unhandled_fault(struct pt_regs *regs, unsigned long addr,
 495                            int signo, int code, int trapno)
 496{
 497        if (user_mode(regs)) {
 498                siginfo_t info;
 499
 500                if (show_unhandled_signals && unhandled_signal(current, signo)
 501                    && printk_ratelimit()) {
 502
 503                        pr_info("pid %d unhandled fault: pc 0x%08x, addr 0x%08lx, trap %d (%s)\n",
 504                                current->pid, regs->ctx.CurrPC, addr,
 505                                trapno, trap_name(trapno));
 506                        print_vma_addr(" in ", regs->ctx.CurrPC);
 507                        print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
 508                        printk("\n");
 509                        show_regs(regs);
 510                }
 511
 512                info.si_signo = signo;
 513                info.si_errno = 0;
 514                info.si_code = code;
 515                info.si_addr = (__force void __user *)addr;
 516                info.si_trapno = trapno;
 517                force_sig_info(signo, &info, current);
 518        } else {
 519                die("Oops", regs, trapno, addr);
 520        }
 521}
 522
 523static int handle_data_fault(PTBICTXEXTCB0 pcbuf, struct pt_regs *regs,
 524                             unsigned int data_address, int trapno)
 525{
 526        int ret;
 527
 528        ret = do_page_fault(regs, data_address, !load_fault(pcbuf), trapno);
 529
 530        return ret;
 531}
 532
 533static unsigned long get_inst_fault_address(struct pt_regs *regs)
 534{
 535        return regs->ctx.CurrPC;
 536}
 537
 538TBIRES fault_handler(TBIRES State, int SigNum, int Triggers,
 539                     int Inst, PTBI pTBI)
 540{
 541        struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
 542        PTBICTXEXTCB0 pcbuf = (PTBICTXEXTCB0)&regs->extcb0;
 543        unsigned long data_address;
 544
 545        head_end(State, ~INTS_OFF_MASK);
 546
 547        /* Hardware breakpoint or data watch */
 548        if ((SigNum == TBIXXF_SIGNUM_IHF) ||
 549            ((SigNum == TBIXXF_SIGNUM_DHF) &&
 550             (pcbuf[0].CBFlags & (TXCATCH0_WATCH1_BIT |
 551                                  TXCATCH0_WATCH0_BIT)))) {
 552                State = __TBIUnExpXXX(State, SigNum, Triggers, Inst,
 553                                      pTBI);
 554                return tail_end(State);
 555        }
 556
 557        local_irq_enable();
 558
 559        data_address = fault_address(pcbuf);
 560
 561        switch (SigNum) {
 562        case TBIXXF_SIGNUM_IGF:
 563                /* 1st-level entry invalid (instruction fetch) */
 564        case TBIXXF_SIGNUM_IPF: {
 565                /* 2nd-level entry invalid (instruction fetch) */
 566                unsigned long addr = get_inst_fault_address(regs);
 567                do_page_fault(regs, addr, 0, SigNum);
 568                break;
 569        }
 570
 571        case TBIXXF_SIGNUM_DGF:
 572                /* 1st-level entry invalid (data access) */
 573        case TBIXXF_SIGNUM_DPF:
 574                /* 2nd-level entry invalid (data access) */
 575        case TBIXXF_SIGNUM_DWF:
 576                /* Write to read only page */
 577                handle_data_fault(pcbuf, regs, data_address, SigNum);
 578                break;
 579
 580        case TBIXXF_SIGNUM_IIF:
 581                /* Illegal instruction */
 582                unhandled_fault(regs, regs->ctx.CurrPC, SIGILL, ILL_ILLOPC,
 583                                SigNum);
 584                break;
 585
 586        case TBIXXF_SIGNUM_DHF:
 587                /* Unaligned access */
 588                unhandled_fault(regs, data_address, SIGBUS, BUS_ADRALN,
 589                                SigNum);
 590                break;
 591        case TBIXXF_SIGNUM_PGF:
 592                /* Privilege violation */
 593                unhandled_fault(regs, data_address, SIGSEGV, SEGV_ACCERR,
 594                                SigNum);
 595                break;
 596        default:
 597                BUG();
 598                break;
 599        }
 600
 601        return tail_end(State);
 602}
 603
 604static bool switch_is_syscall(unsigned int inst)
 605{
 606        return inst == __METAG_SW_ENCODING(SYS);
 607}
 608
 609static bool switch_is_legacy_syscall(unsigned int inst)
 610{
 611        return inst == __METAG_SW_ENCODING(SYS_LEGACY);
 612}
 613
 614static inline void step_over_switch(struct pt_regs *regs, unsigned int inst)
 615{
 616        regs->ctx.CurrPC += 4;
 617}
 618
 619static inline int test_syscall_work(void)
 620{
 621        return current_thread_info()->flags & _TIF_WORK_SYSCALL_MASK;
 622}
 623
 624TBIRES switch1_handler(TBIRES State, int SigNum, int Triggers,
 625                       int Inst, PTBI pTBI)
 626{
 627        struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
 628        unsigned int sysnumber;
 629        unsigned long long a1_a2, a3_a4, a5_a6;
 630        LPSYSCALL syscall_entry;
 631        int restart;
 632
 633        head_end(State, ~INTS_OFF_MASK);
 634
 635        /*
 636         * If this is not a syscall SWITCH it could be a breakpoint.
 637         */
 638        if (!switch_is_syscall(Inst)) {
 639                /*
 640                 * Alert the user if they're trying to use legacy system
 641                 * calls. This suggests they need to update their C
 642                 * library and build against up to date kernel headers.
 643                 */
 644                if (switch_is_legacy_syscall(Inst))
 645                        pr_warn_once("WARNING: A legacy syscall was made. Your userland needs updating.\n");
 646                /*
 647                 * We don't know how to handle the SWITCH and cannot
 648                 * safely ignore it, so treat all unknown switches
 649                 * (including breakpoints) as traps.
 650                 */
 651                force_sig(SIGTRAP, current);
 652                return tail_end(State);
 653        }
 654
 655        local_irq_enable();
 656
 657restart_syscall:
 658        restart = 0;
 659        sysnumber = regs->ctx.DX[0].U1;
 660
 661        if (test_syscall_work())
 662                sysnumber = syscall_trace_enter(regs);
 663
 664        /* Skip over the SWITCH instruction - or you just get 'stuck' on it! */
 665        step_over_switch(regs, Inst);
 666
 667        if (sysnumber >= __NR_syscalls) {
 668                pr_debug("unknown syscall number: %d\n", sysnumber);
 669                syscall_entry = (LPSYSCALL) sys_ni_syscall;
 670        } else {
 671                syscall_entry = (LPSYSCALL) sys_call_table[sysnumber];
 672        }
 673
 674        /* Use 64bit loads for speed. */
 675        a5_a6 = *(unsigned long long *)&regs->ctx.DX[1];
 676        a3_a4 = *(unsigned long long *)&regs->ctx.DX[2];
 677        a1_a2 = *(unsigned long long *)&regs->ctx.DX[3];
 678
 679        /* here is the actual call to the syscall handler functions */
 680        regs->ctx.DX[0].U0 = syscall_entry(a1_a2, a3_a4, a5_a6);
 681
 682        if (test_syscall_work())
 683                syscall_trace_leave(regs);
 684
 685        State = tail_end_sys(State, sysnumber, &restart);
 686        /* Handlerless restarts shouldn't go via userland */
 687        if (restart)
 688                goto restart_syscall;
 689        return State;
 690}
 691
 692TBIRES switchx_handler(TBIRES State, int SigNum, int Triggers,
 693                       int Inst, PTBI pTBI)
 694{
 695        struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
 696
 697        /*
 698         * This can be caused by any user process simply executing an unusual
 699         * SWITCH instruction. If there's no DA, __TBIUnExpXXX will cause the
 700         * thread to stop, so signal a SIGTRAP instead.
 701         */
 702        head_end(State, ~INTS_OFF_MASK);
 703        if (user_mode(regs))
 704                force_sig(SIGTRAP, current);
 705        else
 706                State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, pTBI);
 707        return tail_end(State);
 708}
 709
 710#ifdef CONFIG_METAG_META21
 711TBIRES fpe_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
 712{
 713        struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
 714        unsigned int error_state = Triggers;
 715        siginfo_t info;
 716
 717        head_end(State, ~INTS_OFF_MASK);
 718
 719        local_irq_enable();
 720
 721        info.si_signo = SIGFPE;
 722
 723        if (error_state & TXSTAT_FPE_INVALID_BIT)
 724                info.si_code = FPE_FLTINV;
 725        else if (error_state & TXSTAT_FPE_DIVBYZERO_BIT)
 726                info.si_code = FPE_FLTDIV;
 727        else if (error_state & TXSTAT_FPE_OVERFLOW_BIT)
 728                info.si_code = FPE_FLTOVF;
 729        else if (error_state & TXSTAT_FPE_UNDERFLOW_BIT)
 730                info.si_code = FPE_FLTUND;
 731        else if (error_state & TXSTAT_FPE_INEXACT_BIT)
 732                info.si_code = FPE_FLTRES;
 733        else
 734                info.si_code = 0;
 735        info.si_errno = 0;
 736        info.si_addr = (__force void __user *)regs->ctx.CurrPC;
 737        force_sig_info(SIGFPE, &info, current);
 738
 739        return tail_end(State);
 740}
 741#endif
 742
 743#ifdef CONFIG_METAG_SUSPEND_MEM
 744struct traps_context {
 745        PTBIAPIFN fnSigs[TBID_SIGNUM_MAX + 1];
 746};
 747
 748static struct traps_context *metag_traps_context;
 749
 750int traps_save_context(void)
 751{
 752        unsigned long cpu = smp_processor_id();
 753        PTBI _pTBI = per_cpu(pTBI, cpu);
 754        struct traps_context *context;
 755
 756        context = kzalloc(sizeof(*context), GFP_ATOMIC);
 757        if (!context)
 758                return -ENOMEM;
 759
 760        memcpy(context->fnSigs, (void *)_pTBI->fnSigs, sizeof(context->fnSigs));
 761
 762        metag_traps_context = context;
 763        return 0;
 764}
 765
 766int traps_restore_context(void)
 767{
 768        unsigned long cpu = smp_processor_id();
 769        PTBI _pTBI = per_cpu(pTBI, cpu);
 770        struct traps_context *context = metag_traps_context;
 771
 772        metag_traps_context = NULL;
 773
 774        memcpy((void *)_pTBI->fnSigs, context->fnSigs, sizeof(context->fnSigs));
 775
 776        kfree(context);
 777        return 0;
 778}
 779#endif
 780
 781#ifdef CONFIG_SMP
 782static inline unsigned int _get_trigger_mask(void)
 783{
 784        unsigned long cpu = smp_processor_id();
 785        return per_cpu(trigger_mask, cpu);
 786}
 787
 788unsigned int get_trigger_mask(void)
 789{
 790        return _get_trigger_mask();
 791}
 792EXPORT_SYMBOL(get_trigger_mask);
 793
 794static void set_trigger_mask(unsigned int mask)
 795{
 796        unsigned long cpu = smp_processor_id();
 797        per_cpu(trigger_mask, cpu) = mask;
 798}
 799
 800void arch_local_irq_enable(void)
 801{
 802        preempt_disable();
 803        arch_local_irq_restore(_get_trigger_mask());
 804        preempt_enable_no_resched();
 805}
 806EXPORT_SYMBOL(arch_local_irq_enable);
 807#else
 808static void set_trigger_mask(unsigned int mask)
 809{
 810        global_trigger_mask = mask;
 811}
 812#endif
 813
 814void __cpuinit per_cpu_trap_init(unsigned long cpu)
 815{
 816        TBIRES int_context;
 817        unsigned int thread = cpu_2_hwthread_id[cpu];
 818
 819        set_trigger_mask(TBI_INTS_INIT(thread) | /* interrupts */
 820                         TBI_TRIG_BIT(TBID_SIGNUM_LWK) | /* low level kick */
 821                         TBI_TRIG_BIT(TBID_SIGNUM_SW1) |
 822                         TBI_TRIG_BIT(TBID_SIGNUM_SWS));
 823
 824        /* non-priv - use current stack */
 825        int_context.Sig.pCtx = NULL;
 826        /* Start with interrupts off */
 827        int_context.Sig.TrigMask = INTS_OFF_MASK;
 828        int_context.Sig.SaveMask = 0;
 829
 830        /* And call __TBIASyncTrigger() */
 831        __TBIASyncTrigger(int_context);
 832}
 833
 834void __init trap_init(void)
 835{
 836        unsigned long cpu = smp_processor_id();
 837        PTBI _pTBI = per_cpu(pTBI, cpu);
 838
 839        _pTBI->fnSigs[TBID_SIGNUM_XXF] = fault_handler;
 840        _pTBI->fnSigs[TBID_SIGNUM_SW0] = switchx_handler;
 841        _pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_handler;
 842        _pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_handler;
 843        _pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_handler;
 844        _pTBI->fnSigs[TBID_SIGNUM_SWK] = kick_handler;
 845
 846#ifdef CONFIG_METAG_META21
 847        _pTBI->fnSigs[TBID_SIGNUM_DFR] = __TBIHandleDFR;
 848        _pTBI->fnSigs[TBID_SIGNUM_FPE] = fpe_handler;
 849#endif
 850
 851        per_cpu_trap_init(cpu);
 852}
 853
 854void tbi_startup_interrupt(int irq)
 855{
 856        unsigned long cpu = smp_processor_id();
 857        PTBI _pTBI = per_cpu(pTBI, cpu);
 858
 859        BUG_ON(irq > TBID_SIGNUM_MAX);
 860
 861        /* For TR1 and TR2, the thread id is encoded in the irq number */
 862        if (irq >= TBID_SIGNUM_T10 && irq < TBID_SIGNUM_TR3)
 863                cpu = hwthread_id_2_cpu[(irq - TBID_SIGNUM_T10) % 4];
 864
 865        set_trigger_mask(get_trigger_mask() | TBI_TRIG_BIT(irq));
 866
 867        _pTBI->fnSigs[irq] = trigger_handler;
 868}
 869
 870void tbi_shutdown_interrupt(int irq)
 871{
 872        unsigned long cpu = smp_processor_id();
 873        PTBI _pTBI = per_cpu(pTBI, cpu);
 874
 875        BUG_ON(irq > TBID_SIGNUM_MAX);
 876
 877        set_trigger_mask(get_trigger_mask() & ~TBI_TRIG_BIT(irq));
 878
 879        _pTBI->fnSigs[irq] = __TBIUnExpXXX;
 880}
 881
 882int ret_from_fork(TBIRES arg)
 883{
 884        struct task_struct *prev = arg.Switch.pPara;
 885        struct task_struct *tsk = current;
 886        struct pt_regs *regs = task_pt_regs(tsk);
 887        int (*fn)(void *);
 888        TBIRES Next;
 889
 890        schedule_tail(prev);
 891
 892        if (tsk->flags & PF_KTHREAD) {
 893                fn = (void *)regs->ctx.DX[4].U1;
 894                BUG_ON(!fn);
 895
 896                fn((void *)regs->ctx.DX[3].U1);
 897        }
 898
 899        if (test_syscall_work())
 900                syscall_trace_leave(regs);
 901
 902        preempt_disable();
 903
 904        Next.Sig.TrigMask = get_trigger_mask();
 905        Next.Sig.SaveMask = 0;
 906        Next.Sig.pCtx = &regs->ctx;
 907
 908        set_gateway_tls(current->thread.tls_ptr);
 909
 910        preempt_enable_no_resched();
 911
 912        /* And interrupts should come back on when we resume the real usermode
 913         * code. Call __TBIASyncResume()
 914         */
 915        __TBIASyncResume(tail_end(Next));
 916        /* ASyncResume should NEVER return */
 917        BUG();
 918        return 0;
 919}
 920
 921void show_trace(struct task_struct *tsk, unsigned long *sp,
 922                struct pt_regs *regs)
 923{
 924        unsigned long addr;
 925#ifdef CONFIG_FRAME_POINTER
 926        unsigned long fp, fpnew;
 927        unsigned long stack;
 928#endif
 929
 930        if (regs && user_mode(regs))
 931                return;
 932
 933        printk("\nCall trace: ");
 934#ifdef CONFIG_KALLSYMS
 935        printk("\n");
 936#endif
 937
 938        if (!tsk)
 939                tsk = current;
 940
 941#ifdef CONFIG_FRAME_POINTER
 942        if (regs) {
 943                print_ip_sym(regs->ctx.CurrPC);
 944                fp = regs->ctx.AX[1].U0;
 945        } else {
 946                fp = __core_reg_get(A0FrP);
 947        }
 948
 949        /* detect when the frame pointer has been used for other purposes and
 950         * doesn't point to the stack (it may point completely elsewhere which
 951         * kstack_end may not detect).
 952         */
 953        stack = (unsigned long)task_stack_page(tsk);
 954        while (fp >= stack && fp + 8 <= stack + THREAD_SIZE) {
 955                addr = __raw_readl((unsigned long *)(fp + 4)) - 4;
 956                if (kernel_text_address(addr))
 957                        print_ip_sym(addr);
 958                else
 959                        break;
 960                /* stack grows up, so frame pointers must decrease */
 961                fpnew = __raw_readl((unsigned long *)(fp + 0));
 962                if (fpnew >= fp)
 963                        break;
 964                fp = fpnew;
 965        }
 966#else
 967        while (!kstack_end(sp)) {
 968                addr = (*sp--) - 4;
 969                if (kernel_text_address(addr))
 970                        print_ip_sym(addr);
 971        }
 972#endif
 973
 974        printk("\n");
 975
 976        debug_show_held_locks(tsk);
 977}
 978
 979void show_stack(struct task_struct *tsk, unsigned long *sp)
 980{
 981        if (!tsk)
 982                tsk = current;
 983        if (tsk == current)
 984                sp = (unsigned long *)current_stack_pointer;
 985        else
 986                sp = (unsigned long *)tsk->thread.kernel_context->AX[0].U0;
 987
 988        show_trace(tsk, sp, NULL);
 989}
 990