linux/arch/m68k/kernel/entry.S
<<
>>
Prefs
   1/* -*- mode: asm -*-
   2 *
   3 *  linux/arch/m68k/kernel/entry.S
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 *
   7 * This file is subject to the terms and conditions of the GNU General Public
   8 * License.  See the file README.legal in the main directory of this archive
   9 * for more details.
  10 *
  11 * Linux/m68k support by Hamish Macdonald
  12 *
  13 * 68060 fixes by Jesper Skov
  14 *
  15 */
  16
  17/*
  18 * entry.S  contains the system-call and fault low-level handling routines.
  19 * This also contains the timer-interrupt handler, as well as all interrupts
  20 * and faults that can result in a task-switch.
  21 *
  22 * NOTE: This code handles signal-recognition, which happens every time
  23 * after a timer-interrupt and after each system call.
  24 *
  25 */
  26
  27/*
  28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
  29 *               all pointers that used to be 'current' are now entry
  30 *               number 0 in the 'current_set' list.
  31 *
  32 *  6/05/00 RZ:  addedd writeback completion after return from sighandler
  33 *               for 68040
  34 */
  35
  36#include <linux/linkage.h>
  37#include <asm/errno.h>
  38#include <asm/setup.h>
  39#include <asm/segment.h>
  40#include <asm/traps.h>
  41#include <asm/unistd.h>
  42#include <asm/asm-offsets.h>
  43#include <asm/entry.h>
  44
  45.globl system_call, buserr, trap, resume
  46.globl sys_call_table
  47.globl __sys_fork, __sys_clone, __sys_vfork
  48.globl ret_from_interrupt, bad_interrupt
  49.globl auto_irqhandler_fixup
  50.globl user_irqvec_fixup
  51
  52.text
  53ENTRY(__sys_fork)
  54        SAVE_SWITCH_STACK
  55        jbsr    sys_fork
  56        lea     %sp@(24),%sp
  57        rts
  58
  59ENTRY(__sys_clone)
  60        SAVE_SWITCH_STACK
  61        pea     %sp@(SWITCH_STACK_SIZE)
  62        jbsr    m68k_clone
  63        lea     %sp@(28),%sp
  64        rts
  65
  66ENTRY(__sys_vfork)
  67        SAVE_SWITCH_STACK
  68        jbsr    sys_vfork
  69        lea     %sp@(24),%sp
  70        rts
  71
  72ENTRY(sys_sigreturn)
  73        SAVE_SWITCH_STACK
  74        jbsr    do_sigreturn
  75        RESTORE_SWITCH_STACK
  76        rts
  77
  78ENTRY(sys_rt_sigreturn)
  79        SAVE_SWITCH_STACK
  80        jbsr    do_rt_sigreturn
  81        RESTORE_SWITCH_STACK
  82        rts
  83
  84ENTRY(buserr)
  85        SAVE_ALL_INT
  86        GET_CURRENT(%d0)
  87        movel   %sp,%sp@-               | stack frame pointer argument
  88        jbsr    buserr_c
  89        addql   #4,%sp
  90        jra     ret_from_exception
  91
  92ENTRY(trap)
  93        SAVE_ALL_INT
  94        GET_CURRENT(%d0)
  95        movel   %sp,%sp@-               | stack frame pointer argument
  96        jbsr    trap_c
  97        addql   #4,%sp
  98        jra     ret_from_exception
  99
 100        | After a fork we jump here directly from resume,
 101        | so that %d1 contains the previous task
 102        | schedule_tail now used regardless of CONFIG_SMP
 103ENTRY(ret_from_fork)
 104        movel   %d1,%sp@-
 105        jsr     schedule_tail
 106        addql   #4,%sp
 107        jra     ret_from_exception
 108
 109ENTRY(ret_from_kernel_thread)
 110        | a3 contains the kernel thread payload, d7 - its argument
 111        movel   %d1,%sp@-
 112        jsr     schedule_tail
 113        movel   %d7,(%sp)
 114        jsr     %a3@
 115        addql   #4,%sp
 116        jra     ret_from_exception
 117
 118#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
 119
 120#ifdef TRAP_DBG_INTERRUPT
 121
 122.globl dbginterrupt
 123ENTRY(dbginterrupt)
 124        SAVE_ALL_INT
 125        GET_CURRENT(%d0)
 126        movel   %sp,%sp@-               /* stack frame pointer argument */
 127        jsr     dbginterrupt_c
 128        addql   #4,%sp
 129        jra     ret_from_exception
 130#endif
 131
 132ENTRY(reschedule)
 133        /* save top of frame */
 134        pea     %sp@
 135        jbsr    set_esp0
 136        addql   #4,%sp
 137        pea     ret_from_exception
 138        jmp     schedule
 139
 140ENTRY(ret_from_user_signal)
 141        moveq #__NR_sigreturn,%d0
 142        trap #0
 143
 144ENTRY(ret_from_user_rt_signal)
 145        movel #__NR_rt_sigreturn,%d0
 146        trap #0
 147
 148#else
 149
 150do_trace_entry:
 151        movel   #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
 152        subql   #4,%sp
 153        SAVE_SWITCH_STACK
 154        jbsr    syscall_trace
 155        RESTORE_SWITCH_STACK
 156        addql   #4,%sp
 157        movel   %sp@(PT_OFF_ORIG_D0),%d0
 158        cmpl    #NR_syscalls,%d0
 159        jcs     syscall
 160badsys:
 161        movel   #-ENOSYS,%sp@(PT_OFF_D0)
 162        jra     ret_from_syscall
 163
 164do_trace_exit:
 165        subql   #4,%sp
 166        SAVE_SWITCH_STACK
 167        jbsr    syscall_trace
 168        RESTORE_SWITCH_STACK
 169        addql   #4,%sp
 170        jra     .Lret_from_exception
 171
 172ENTRY(ret_from_signal)
 173        movel   %curptr@(TASK_STACK),%a1
 174        tstb    %a1@(TINFO_FLAGS+2)
 175        jge     1f
 176        jbsr    syscall_trace
 1771:      RESTORE_SWITCH_STACK
 178        addql   #4,%sp
 179/* on 68040 complete pending writebacks if any */
 180#ifdef CONFIG_M68040
 181        bfextu  %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
 182        subql   #7,%d0                          | bus error frame ?
 183        jbne    1f
 184        movel   %sp,%sp@-
 185        jbsr    berr_040cleanup
 186        addql   #4,%sp
 1871:
 188#endif
 189        jra     .Lret_from_exception
 190
 191ENTRY(system_call)
 192        SAVE_ALL_SYS
 193
 194        GET_CURRENT(%d1)
 195        movel   %d1,%a1
 196
 197        | save top of frame
 198        movel   %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
 199
 200        | syscall trace?
 201        tstb    %a1@(TINFO_FLAGS+2)
 202        jmi     do_trace_entry
 203        cmpl    #NR_syscalls,%d0
 204        jcc     badsys
 205syscall:
 206        jbsr    @(sys_call_table,%d0:l:4)@(0)
 207        movel   %d0,%sp@(PT_OFF_D0)     | save the return value
 208ret_from_syscall:
 209        |oriw   #0x0700,%sr
 210        movel   %curptr@(TASK_STACK),%a1
 211        movew   %a1@(TINFO_FLAGS+2),%d0
 212        jne     syscall_exit_work
 2131:      RESTORE_ALL
 214
 215syscall_exit_work:
 216        btst    #5,%sp@(PT_OFF_SR)      | check if returning to kernel
 217        bnes    1b                      | if so, skip resched, signals
 218        lslw    #1,%d0
 219        jcs     do_trace_exit
 220        jmi     do_delayed_trace
 221        lslw    #8,%d0
 222        jne     do_signal_return
 223        pea     resume_userspace
 224        jra     schedule
 225
 226
 227ENTRY(ret_from_exception)
 228.Lret_from_exception:
 229        btst    #5,%sp@(PT_OFF_SR)      | check if returning to kernel
 230        bnes    1f                      | if so, skip resched, signals
 231        | only allow interrupts when we are really the last one on the
 232        | kernel stack, otherwise stack overflow can occur during
 233        | heavy interrupt load
 234        andw    #ALLOWINT,%sr
 235
 236resume_userspace:
 237        movel   %curptr@(TASK_STACK),%a1
 238        moveb   %a1@(TINFO_FLAGS+3),%d0
 239        jne     exit_work
 2401:      RESTORE_ALL
 241
 242exit_work:
 243        | save top of frame
 244        movel   %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
 245        lslb    #1,%d0
 246        jne     do_signal_return
 247        pea     resume_userspace
 248        jra     schedule
 249
 250
 251do_signal_return:
 252        |andw   #ALLOWINT,%sr
 253        subql   #4,%sp                  | dummy return address
 254        SAVE_SWITCH_STACK
 255        pea     %sp@(SWITCH_STACK_SIZE)
 256        bsrl    do_notify_resume
 257        addql   #4,%sp
 258        RESTORE_SWITCH_STACK
 259        addql   #4,%sp
 260        jbra    resume_userspace
 261
 262do_delayed_trace:
 263        bclr    #7,%sp@(PT_OFF_SR)      | clear trace bit in SR
 264        pea     1                       | send SIGTRAP
 265        movel   %curptr,%sp@-
 266        pea     LSIGTRAP
 267        jbsr    send_sig
 268        addql   #8,%sp
 269        addql   #4,%sp
 270        jbra    resume_userspace
 271
 272
 273/* This is the main interrupt handler for autovector interrupts */
 274
 275ENTRY(auto_inthandler)
 276        SAVE_ALL_INT
 277        GET_CURRENT(%d0)
 278        movel   %d0,%a1
 279        addqb   #1,%a1@(TINFO_PREEMPT+1)
 280                                        |  put exception # in d0
 281        bfextu  %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
 282        subw    #VEC_SPUR,%d0
 283
 284        movel   %sp,%sp@-
 285        movel   %d0,%sp@-               |  put vector # on stack
 286auto_irqhandler_fixup = . + 2
 287        jsr     do_IRQ                  |  process the IRQ
 288        addql   #8,%sp                  |  pop parameters off stack
 289
 290ret_from_interrupt:
 291        movel   %curptr@(TASK_STACK),%a1
 292        subqb   #1,%a1@(TINFO_PREEMPT+1)
 293        jeq     ret_from_last_interrupt
 2942:      RESTORE_ALL
 295
 296        ALIGN
 297ret_from_last_interrupt:
 298        moveq   #(~ALLOWINT>>8)&0xff,%d0
 299        andb    %sp@(PT_OFF_SR),%d0
 300        jne     2b
 301
 302        /* check if we need to do software interrupts */
 303        tstl    irq_stat+CPUSTAT_SOFTIRQ_PENDING
 304        jeq     .Lret_from_exception
 305        pea     ret_from_exception
 306        jra     do_softirq
 307
 308/* Handler for user defined interrupt vectors */
 309
 310ENTRY(user_inthandler)
 311        SAVE_ALL_INT
 312        GET_CURRENT(%d0)
 313        movel   %d0,%a1
 314        addqb   #1,%a1@(TINFO_PREEMPT+1)
 315                                        |  put exception # in d0
 316        bfextu  %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
 317user_irqvec_fixup = . + 2
 318        subw    #VEC_USER,%d0
 319
 320        movel   %sp,%sp@-
 321        movel   %d0,%sp@-               |  put vector # on stack
 322        jsr     do_IRQ                  |  process the IRQ
 323        addql   #8,%sp                  |  pop parameters off stack
 324
 325        movel   %curptr@(TASK_STACK),%a1
 326        subqb   #1,%a1@(TINFO_PREEMPT+1)
 327        jeq     ret_from_last_interrupt
 328        RESTORE_ALL
 329
 330/* Handler for uninitialized and spurious interrupts */
 331
 332ENTRY(bad_inthandler)
 333        SAVE_ALL_INT
 334        GET_CURRENT(%d0)
 335        movel   %d0,%a1
 336        addqb   #1,%a1@(TINFO_PREEMPT+1)
 337
 338        movel   %sp,%sp@-
 339        jsr     handle_badint
 340        addql   #4,%sp
 341
 342        movel   %curptr@(TASK_STACK),%a1
 343        subqb   #1,%a1@(TINFO_PREEMPT+1)
 344        jeq     ret_from_last_interrupt
 345        RESTORE_ALL
 346
 347
 348resume:
 349        /*
 350         * Beware - when entering resume, prev (the current task) is
 351         * in a0, next (the new task) is in a1,so don't change these
 352         * registers until their contents are no longer needed.
 353         */
 354
 355        /* save sr */
 356        movew   %sr,%a0@(TASK_THREAD+THREAD_SR)
 357
 358        /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
 359        movec   %sfc,%d0
 360        movew   %d0,%a0@(TASK_THREAD+THREAD_FS)
 361
 362        /* save usp */
 363        /* it is better to use a movel here instead of a movew 8*) */
 364        movec   %usp,%d0
 365        movel   %d0,%a0@(TASK_THREAD+THREAD_USP)
 366
 367        /* save non-scratch registers on stack */
 368        SAVE_SWITCH_STACK
 369
 370        /* save current kernel stack pointer */
 371        movel   %sp,%a0@(TASK_THREAD+THREAD_KSP)
 372
 373        /* save floating point context */
 374#ifndef CONFIG_M68KFPU_EMU_ONLY
 375#ifdef CONFIG_M68KFPU_EMU
 376        tstl    m68k_fputype
 377        jeq     3f
 378#endif
 379        fsave   %a0@(TASK_THREAD+THREAD_FPSTATE)
 380
 381#if defined(CONFIG_M68060)
 382#if !defined(CPU_M68060_ONLY)
 383        btst    #3,m68k_cputype+3
 384        beqs    1f
 385#endif
 386        /* The 060 FPU keeps status in bits 15-8 of the first longword */
 387        tstb    %a0@(TASK_THREAD+THREAD_FPSTATE+2)
 388        jeq     3f
 389#if !defined(CPU_M68060_ONLY)
 390        jra     2f
 391#endif
 392#endif /* CONFIG_M68060 */
 393#if !defined(CPU_M68060_ONLY)
 3941:      tstb    %a0@(TASK_THREAD+THREAD_FPSTATE)
 395        jeq     3f
 396#endif
 3972:      fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
 398        fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
 3993:
 400#endif  /* CONFIG_M68KFPU_EMU_ONLY */
 401        /* Return previous task in %d1 */
 402        movel   %curptr,%d1
 403
 404        /* switch to new task (a1 contains new task) */
 405        movel   %a1,%curptr
 406
 407        /* restore floating point context */
 408#ifndef CONFIG_M68KFPU_EMU_ONLY
 409#ifdef CONFIG_M68KFPU_EMU
 410        tstl    m68k_fputype
 411        jeq     4f
 412#endif
 413#if defined(CONFIG_M68060)
 414#if !defined(CPU_M68060_ONLY)
 415        btst    #3,m68k_cputype+3
 416        beqs    1f
 417#endif
 418        /* The 060 FPU keeps status in bits 15-8 of the first longword */
 419        tstb    %a1@(TASK_THREAD+THREAD_FPSTATE+2)
 420        jeq     3f
 421#if !defined(CPU_M68060_ONLY)
 422        jra     2f
 423#endif
 424#endif /* CONFIG_M68060 */
 425#if !defined(CPU_M68060_ONLY)
 4261:      tstb    %a1@(TASK_THREAD+THREAD_FPSTATE)
 427        jeq     3f
 428#endif
 4292:      fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
 430        fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
 4313:      frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
 4324:
 433#endif  /* CONFIG_M68KFPU_EMU_ONLY */
 434
 435        /* restore the kernel stack pointer */
 436        movel   %a1@(TASK_THREAD+THREAD_KSP),%sp
 437
 438        /* restore non-scratch registers */
 439        RESTORE_SWITCH_STACK
 440
 441        /* restore user stack pointer */
 442        movel   %a1@(TASK_THREAD+THREAD_USP),%a0
 443        movel   %a0,%usp
 444
 445        /* restore fs (sfc,%dfc) */
 446        movew   %a1@(TASK_THREAD+THREAD_FS),%a0
 447        movec   %a0,%sfc
 448        movec   %a0,%dfc
 449
 450        /* restore status register */
 451        movew   %a1@(TASK_THREAD+THREAD_SR),%sr
 452
 453        rts
 454
 455#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
 456