linux/arch/blackfin/mach-common/interrupt.S
<<
>>
Prefs
   1/*
   2 * Interrupt Entries
   3 *
   4 * Copyright 2005-2009 Analog Devices Inc.
   5 *               D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>
   6 *               Kenneth Albanowski <kjahds@kjahds.com>
   7 *
   8 * Licensed under the GPL-2 or later.
   9 */
  10
  11#include <asm/blackfin.h>
  12#include <mach/irq.h>
  13#include <linux/linkage.h>
  14#include <asm/entry.h>
  15#include <asm/asm-offsets.h>
  16#include <asm/trace.h>
  17#include <asm/traps.h>
  18#include <asm/thread_info.h>
  19
  20#include <asm/context.S>
  21
  22.extern _ret_from_exception
  23
  24#ifdef CONFIG_I_ENTRY_L1
  25.section .l1.text
  26#else
  27.text
  28#endif
  29
  30.align 4        /* just in case */
  31
  32/* Common interrupt entry code.  First we do CLI, then push
  33 * RETI, to keep interrupts disabled, but to allow this state to be changed
  34 * by local_bh_enable.
  35 * R0 contains the interrupt number, while R1 may contain the value of IPEND,
  36 * or garbage if IPEND won't be needed by the ISR.  */
  37__common_int_entry:
  38        [--sp] = fp;
  39        [--sp] = usp;
  40
  41        [--sp] = i0;
  42        [--sp] = i1;
  43        [--sp] = i2;
  44        [--sp] = i3;
  45
  46        [--sp] = m0;
  47        [--sp] = m1;
  48        [--sp] = m2;
  49        [--sp] = m3;
  50
  51        [--sp] = l0;
  52        [--sp] = l1;
  53        [--sp] = l2;
  54        [--sp] = l3;
  55
  56        [--sp] = b0;
  57        [--sp] = b1;
  58        [--sp] = b2;
  59        [--sp] = b3;
  60        [--sp] = a0.x;
  61        [--sp] = a0.w;
  62        [--sp] = a1.x;
  63        [--sp] = a1.w;
  64
  65        [--sp] = LC0;
  66        [--sp] = LC1;
  67        [--sp] = LT0;
  68        [--sp] = LT1;
  69        [--sp] = LB0;
  70        [--sp] = LB1;
  71
  72        [--sp] = ASTAT;
  73
  74        [--sp] = r0;    /* Skip reserved */
  75        [--sp] = RETS;
  76        r2 = RETI;
  77        [--sp] = r2;
  78        [--sp] = RETX;
  79        [--sp] = RETN;
  80        [--sp] = RETE;
  81        [--sp] = SEQSTAT;
  82        [--sp] = r1;    /* IPEND - R1 may or may not be set up before jumping here. */
  83
  84        /* Switch to other method of keeping interrupts disabled.  */
  85#ifdef CONFIG_DEBUG_HWERR
  86        r1 = 0x3f;
  87        sti r1;
  88#else
  89        cli r1;
  90#endif
  91#ifdef CONFIG_TRACE_IRQFLAGS
  92        [--sp] = r0;
  93        sp += -12;
  94        call _trace_hardirqs_off;
  95        sp += 12;
  96        r0 = [sp++];
  97#endif
  98        [--sp] = RETI;  /* orig_pc */
  99        /* Clear all L registers.  */
 100        r1 = 0 (x);
 101        l0 = r1;
 102        l1 = r1;
 103        l2 = r1;
 104        l3 = r1;
 105#ifdef CONFIG_FRAME_POINTER
 106        fp = 0;
 107#endif
 108
 109        ANOMALY_283_315_WORKAROUND(p5, r7)
 110
 111        r1 =  sp;
 112        SP += -12;
 113#ifdef CONFIG_IPIPE
 114        call ___ipipe_grab_irq
 115        SP += 12;
 116        cc = r0 == 0;
 117        if cc jump .Lcommon_restore_context;
 118#else /* CONFIG_IPIPE */
 119
 120#ifdef CONFIG_PREEMPT
 121        r7 = sp;
 122        r4.l = lo(ALIGN_PAGE_MASK);
 123        r4.h = hi(ALIGN_PAGE_MASK);
 124        r7 = r7 & r4;
 125        p5 = r7;
 126        r7 = [p5 + TI_PREEMPT]; /* get preempt count */
 127        r7 += 1;                /* increment it */
 128        [p5 + TI_PREEMPT] = r7;
 129#endif
 130        pseudo_long_call _do_irq, p2;
 131
 132#ifdef CONFIG_PREEMPT
 133        r7 += -1;
 134        [p5 + TI_PREEMPT] = r7; /* restore preempt count */
 135#endif
 136
 137        SP += 12;
 138#endif /* CONFIG_IPIPE */
 139        pseudo_long_call _return_from_int, p2;
 140.Lcommon_restore_context:
 141        RESTORE_CONTEXT
 142        rti;
 143
 144/* interrupt routine for ivhw - 5 */
 145ENTRY(_evt_ivhw)
 146        /* In case a single action kicks off multiple memory transactions, (like
 147         * a cache line fetch, - this can cause multiple hardware errors, let's
 148         * catch them all. First - make sure all the actions are complete, and
 149         * the core sees the hardware errors.
 150         */
 151        SSYNC;
 152        SSYNC;
 153
 154        SAVE_ALL_SYS
 155#ifdef CONFIG_FRAME_POINTER
 156        fp = 0;
 157#endif
 158
 159        ANOMALY_283_315_WORKAROUND(p5, r7)
 160
 161        /* Handle all stacked hardware errors
 162         * To make sure we don't hang forever, only do it 10 times
 163         */
 164        R0 = 0;
 165        R2 = 10;
 1661:
 167        P0.L = LO(ILAT);
 168        P0.H = HI(ILAT);
 169        R1 = [P0];
 170        CC = BITTST(R1, EVT_IVHW_P);
 171        IF ! CC JUMP 2f;
 172        /* OK a hardware error is pending - clear it */
 173        R1 = EVT_IVHW_P;
 174        [P0] = R1;
 175        R0 += 1;
 176        CC = R1 == R2;
 177        if CC JUMP 2f;
 178        JUMP 1b;
 1792:
 180        # We are going to dump something out, so make sure we print IPEND properly
 181        p2.l = lo(IPEND);
 182        p2.h = hi(IPEND);
 183        r0 = [p2];
 184        [sp + PT_IPEND] = r0;
 185
 186        /* set the EXCAUSE to HWERR for trap_c */
 187        r0 = [sp + PT_SEQSTAT];
 188        R1.L = LO(VEC_HWERR);
 189        R1.H = HI(VEC_HWERR);
 190        R0 = R0 | R1;
 191        [sp + PT_SEQSTAT] = R0;
 192
 193        r0 = sp;        /* stack frame pt_regs pointer argument ==> r0 */
 194        SP += -12;
 195        pseudo_long_call _trap_c, p5;
 196        SP += 12;
 197
 198#ifdef EBIU_ERRMST
 199        /* make sure EBIU_ERRMST is clear */
 200        p0.l = LO(EBIU_ERRMST);
 201        p0.h = HI(EBIU_ERRMST);
 202        r0.l = (CORE_ERROR | CORE_MERROR);
 203        w[p0] = r0.l;
 204#endif
 205
 206        pseudo_long_call _ret_from_exception, p2;
 207
 208.Lcommon_restore_all_sys:
 209        RESTORE_ALL_SYS
 210        rti;
 211ENDPROC(_evt_ivhw)
 212
 213/* Interrupt routine for evt2 (NMI).
 214 * For inner circle type details, please see:
 215 * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi
 216 */
 217ENTRY(_evt_nmi)
 218#ifndef CONFIG_NMI_WATCHDOG
 219.weak _evt_nmi
 220#else
 221        /* Not take account of CPLBs, this handler will not return */
 222        SAVE_ALL_SYS
 223        r0 = sp;
 224        r1 = retn;
 225        [sp + PT_PC] = r1;
 226        trace_buffer_save(p4,r5);
 227
 228        ANOMALY_283_315_WORKAROUND(p4, r5)
 229
 230        SP += -12;
 231        call _do_nmi;
 232        SP += 12;
 2331:
 234        jump 1b;
 235#endif
 236        rtn;
 237ENDPROC(_evt_nmi)
 238
 239/* interrupt routine for core timer - 6 */
 240ENTRY(_evt_timer)
 241        TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P)
 242
 243/* interrupt routine for evt7 - 7 */
 244ENTRY(_evt_evt7)
 245        INTERRUPT_ENTRY(EVT_IVG7_P)
 246ENTRY(_evt_evt8)
 247        INTERRUPT_ENTRY(EVT_IVG8_P)
 248ENTRY(_evt_evt9)
 249        INTERRUPT_ENTRY(EVT_IVG9_P)
 250ENTRY(_evt_evt10)
 251        INTERRUPT_ENTRY(EVT_IVG10_P)
 252ENTRY(_evt_evt11)
 253        INTERRUPT_ENTRY(EVT_IVG11_P)
 254ENTRY(_evt_evt12)
 255        INTERRUPT_ENTRY(EVT_IVG12_P)
 256ENTRY(_evt_evt13)
 257        INTERRUPT_ENTRY(EVT_IVG13_P)
 258
 259
 260 /* interrupt routine for system_call - 15 */
 261ENTRY(_evt_system_call)
 262        SAVE_CONTEXT_SYSCALL
 263#ifdef CONFIG_FRAME_POINTER
 264        fp = 0;
 265#endif
 266        pseudo_long_call _system_call, p2;
 267        jump .Lcommon_restore_context;
 268ENDPROC(_evt_system_call)
 269
 270#ifdef CONFIG_IPIPE
 271/*
 272 * __ipipe_call_irqtail: lowers the current priority level to EVT15
 273 * before running a user-defined routine, then raises the priority
 274 * level to EVT14 to prepare the caller for a normal interrupt
 275 * return through RTI.
 276 *
 277 * We currently use this feature in two occasions:
 278 *
 279 * - before branching to __ipipe_irq_tail_hook as requested by a high
 280 *   priority domain after the pipeline delivered an interrupt,
 281 *   e.g. such as Xenomai, in order to start its rescheduling
 282 *   procedure, since we may not switch tasks when IRQ levels are
 283 *   nested on the Blackfin, so we have to fake an interrupt return
 284 *   so that we may reschedule immediately.
 285 *
 286 * - before branching to __ipipe_sync_root(), in order to play any interrupt
 287 *   pending for the root domain (i.e. the Linux kernel). This lowers
 288 *   the core priority level enough so that Linux IRQ handlers may
 289 *   never delay interrupts handled by high priority domains; we defer
 290 *   those handlers until this point instead. This is a substitute
 291 *   to using a threaded interrupt model for the Linux kernel.
 292 *
 293 * r0: address of user-defined routine
 294 * context: caller must have preempted EVT15, hw interrupts must be off.
 295 */
 296ENTRY(___ipipe_call_irqtail)
 297        p0 = r0;
 298        r0.l = 1f;
 299        r0.h = 1f;
 300        reti = r0;
 301        rti;
 3021:
 303        [--sp] = rets;
 304        [--sp] = ( r7:4, p5:3 );
 305        sp += -12;
 306        call (p0);
 307        sp += 12;
 308        ( r7:4, p5:3 ) = [sp++];
 309        rets = [sp++];
 310
 311#ifdef CONFIG_DEBUG_HWERR
 312        /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
 313        r0 = (EVT_IVG14 | EVT_IVHW | \
 314                EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
 315#else
 316        /* Only enable irq14 interrupt, until we transition to _evt_evt14 */
 317        r0 = (EVT_IVG14 | \
 318                EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
 319#endif
 320        sti r0;
 321        raise 14;               /* Branches to _evt_evt14 */
 3222:
 323        jump 2b;                /* Likely paranoid. */
 324ENDPROC(___ipipe_call_irqtail)
 325
 326#endif /* CONFIG_IPIPE */
 327