linux/arch/blackfin/mach-common/interrupt.S
<<
>>
Prefs
   1/*
   2 * Interrupt Entries
   3 *
   4 * Copyright 2005-2009 Analog Devices Inc.
   5 *               D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>
   6 *               Kenneth Albanowski <kjahds@kjahds.com>
   7 *
   8 * Licensed under the GPL-2 or later.
   9 */
  10
  11#include <asm/blackfin.h>
  12#include <mach/irq.h>
  13#include <linux/linkage.h>
  14#include <asm/entry.h>
  15#include <asm/asm-offsets.h>
  16#include <asm/trace.h>
  17#include <asm/traps.h>
  18#include <asm/thread_info.h>
  19
  20#include <asm/context.S>
  21
  22.extern _ret_from_exception
  23
  24#ifdef CONFIG_I_ENTRY_L1
  25.section .l1.text
  26#else
  27.text
  28#endif
  29
  30.align 4        /* just in case */
  31
  32/* Common interrupt entry code.  First we do CLI, then push
  33 * RETI, to keep interrupts disabled, but to allow this state to be changed
  34 * by local_bh_enable.
  35 * R0 contains the interrupt number, while R1 may contain the value of IPEND,
  36 * or garbage if IPEND won't be needed by the ISR.  */
  37__common_int_entry:
  38        [--sp] = fp;
  39        [--sp] = usp;
  40
  41        [--sp] = i0;
  42        [--sp] = i1;
  43        [--sp] = i2;
  44        [--sp] = i3;
  45
  46        [--sp] = m0;
  47        [--sp] = m1;
  48        [--sp] = m2;
  49        [--sp] = m3;
  50
  51        [--sp] = l0;
  52        [--sp] = l1;
  53        [--sp] = l2;
  54        [--sp] = l3;
  55
  56        [--sp] = b0;
  57        [--sp] = b1;
  58        [--sp] = b2;
  59        [--sp] = b3;
  60        [--sp] = a0.x;
  61        [--sp] = a0.w;
  62        [--sp] = a1.x;
  63        [--sp] = a1.w;
  64
  65        [--sp] = LC0;
  66        [--sp] = LC1;
  67        [--sp] = LT0;
  68        [--sp] = LT1;
  69        [--sp] = LB0;
  70        [--sp] = LB1;
  71
  72        [--sp] = ASTAT;
  73
  74        [--sp] = r0;    /* Skip reserved */
  75        [--sp] = RETS;
  76        r2 = RETI;
  77        [--sp] = r2;
  78        [--sp] = RETX;
  79        [--sp] = RETN;
  80        [--sp] = RETE;
  81        [--sp] = SEQSTAT;
  82        [--sp] = r1;    /* IPEND - R1 may or may not be set up before jumping here. */
  83
  84        /* Switch to other method of keeping interrupts disabled.  */
  85#ifdef CONFIG_DEBUG_HWERR
  86        r1 = 0x3f;
  87        sti r1;
  88#else
  89        cli r1;
  90#endif
  91        [--sp] = RETI;  /* orig_pc */
  92        /* Clear all L registers.  */
  93        r1 = 0 (x);
  94        l0 = r1;
  95        l1 = r1;
  96        l2 = r1;
  97        l3 = r1;
  98#ifdef CONFIG_FRAME_POINTER
  99        fp = 0;
 100#endif
 101
 102        ANOMALY_283_315_WORKAROUND(p5, r7)
 103
 104        r1 =  sp;
 105        SP += -12;
 106#ifdef CONFIG_IPIPE
 107        call ___ipipe_grab_irq
 108        SP += 12;
 109        cc = r0 == 0;
 110        if cc jump .Lcommon_restore_context;
 111#else /* CONFIG_IPIPE */
 112        call _do_irq;
 113        SP += 12;
 114#endif /* CONFIG_IPIPE */
 115        call _return_from_int;
 116.Lcommon_restore_context:
 117        RESTORE_CONTEXT
 118        rti;
 119
 120/* interrupt routine for ivhw - 5 */
 121ENTRY(_evt_ivhw)
 122        /* In case a single action kicks off multiple memory transactions, (like
 123         * a cache line fetch, - this can cause multiple hardware errors, let's
 124         * catch them all. First - make sure all the actions are complete, and
 125         * the core sees the hardware errors.
 126         */
 127        SSYNC;
 128        SSYNC;
 129
 130        SAVE_ALL_SYS
 131#ifdef CONFIG_FRAME_POINTER
 132        fp = 0;
 133#endif
 134
 135        ANOMALY_283_315_WORKAROUND(p5, r7)
 136
 137        /* Handle all stacked hardware errors
 138         * To make sure we don't hang forever, only do it 10 times
 139         */
 140        R0 = 0;
 141        R2 = 10;
 1421:
 143        P0.L = LO(ILAT);
 144        P0.H = HI(ILAT);
 145        R1 = [P0];
 146        CC = BITTST(R1, EVT_IVHW_P);
 147        IF ! CC JUMP 2f;
 148        /* OK a hardware error is pending - clear it */
 149        R1 = EVT_IVHW_P;
 150        [P0] = R1;
 151        R0 += 1;
 152        CC = R1 == R2;
 153        if CC JUMP 2f;
 154        JUMP 1b;
 1552:
 156        # We are going to dump something out, so make sure we print IPEND properly
 157        p2.l = lo(IPEND);
 158        p2.h = hi(IPEND);
 159        r0 = [p2];
 160        [sp + PT_IPEND] = r0;
 161
 162        /* set the EXCAUSE to HWERR for trap_c */
 163        r0 = [sp + PT_SEQSTAT];
 164        R1.L = LO(VEC_HWERR);
 165        R1.H = HI(VEC_HWERR);
 166        R0 = R0 | R1;
 167        [sp + PT_SEQSTAT] = R0;
 168
 169        r0 = sp;        /* stack frame pt_regs pointer argument ==> r0 */
 170        SP += -12;
 171        call _trap_c;
 172        SP += 12;
 173
 174#ifdef EBIU_ERRMST
 175        /* make sure EBIU_ERRMST is clear */
 176        p0.l = LO(EBIU_ERRMST);
 177        p0.h = HI(EBIU_ERRMST);
 178        r0.l = (CORE_ERROR | CORE_MERROR);
 179        w[p0] = r0.l;
 180#endif
 181
 182        call _ret_from_exception;
 183
 184.Lcommon_restore_all_sys:
 185        RESTORE_ALL_SYS
 186        rti;
 187ENDPROC(_evt_ivhw)
 188
 189/* Interrupt routine for evt2 (NMI).
 190 * We don't actually use this, so just return.
 191 * For inner circle type details, please see:
 192 * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi
 193 */
 194ENTRY(_evt_nmi)
 195.weak _evt_nmi
 196        rtn;
 197ENDPROC(_evt_nmi)
 198
 199/* interrupt routine for core timer - 6 */
 200ENTRY(_evt_timer)
 201        TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P)
 202
 203/* interrupt routine for evt7 - 7 */
 204ENTRY(_evt_evt7)
 205        INTERRUPT_ENTRY(EVT_IVG7_P)
 206ENTRY(_evt_evt8)
 207        INTERRUPT_ENTRY(EVT_IVG8_P)
 208ENTRY(_evt_evt9)
 209        INTERRUPT_ENTRY(EVT_IVG9_P)
 210ENTRY(_evt_evt10)
 211        INTERRUPT_ENTRY(EVT_IVG10_P)
 212ENTRY(_evt_evt11)
 213        INTERRUPT_ENTRY(EVT_IVG11_P)
 214ENTRY(_evt_evt12)
 215        INTERRUPT_ENTRY(EVT_IVG12_P)
 216ENTRY(_evt_evt13)
 217        INTERRUPT_ENTRY(EVT_IVG13_P)
 218
 219
 220 /* interrupt routine for system_call - 15 */
 221ENTRY(_evt_system_call)
 222        SAVE_CONTEXT_SYSCALL
 223#ifdef CONFIG_FRAME_POINTER
 224        fp = 0;
 225#endif
 226        call _system_call;
 227        jump .Lcommon_restore_context;
 228ENDPROC(_evt_system_call)
 229
 230#ifdef CONFIG_IPIPE
 231/*
 232 * __ipipe_call_irqtail: lowers the current priority level to EVT15
 233 * before running a user-defined routine, then raises the priority
 234 * level to EVT14 to prepare the caller for a normal interrupt
 235 * return through RTI.
 236 *
 237 * We currently use this facility in two occasions:
 238 *
 239 * - to branch to __ipipe_irq_tail_hook as requested by a high
 240 *   priority domain after the pipeline delivered an interrupt,
 241 *   e.g. such as Xenomai, in order to start its rescheduling
 242 *   procedure, since we may not switch tasks when IRQ levels are
 243 *   nested on the Blackfin, so we have to fake an interrupt return
 244 *   so that we may reschedule immediately.
 245 *
 246 * - to branch to sync_root_irqs, in order to play any interrupt
 247 *   pending for the root domain (i.e. the Linux kernel). This lowers
 248 *   the core priority level enough so that Linux IRQ handlers may
 249 *   never delay interrupts handled by high priority domains; we defer
 250 *   those handlers until this point instead. This is a substitute
 251 *   to using a threaded interrupt model for the Linux kernel.
 252 *
 253 * r0: address of user-defined routine
 254 * context: caller must have preempted EVT15, hw interrupts must be off.
 255 */
 256ENTRY(___ipipe_call_irqtail)
 257        p0 = r0;
 258        r0.l = 1f;
 259        r0.h = 1f;
 260        reti = r0;
 261        rti;
 2621:
 263        [--sp] = rets;
 264        [--sp] = ( r7:4, p5:3 );
 265        sp += -12;
 266        call (p0);
 267        sp += 12;
 268        ( r7:4, p5:3 ) = [sp++];
 269        rets = [sp++];
 270
 271#ifdef CONFIG_DEBUG_HWERR
 272        /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
 273        r0 = (EVT_IVG14 | EVT_IVHW | \
 274                EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
 275#else
 276        /* Only enable irq14 interrupt, until we transition to _evt_evt14 */
 277        r0 = (EVT_IVG14 | \
 278                EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
 279#endif
 280        sti r0;
 281        raise 14;               /* Branches to _evt_evt14 */
 2822:
 283        jump 2b;                /* Likely paranoid. */
 284ENDPROC(___ipipe_call_irqtail)
 285
 286#endif /* CONFIG_IPIPE */
 287