linux/arch/sparc/kernel/rtrap_64.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * rtrap.S: Preparing for return from trap on Sparc V9.
   4 *
   5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   6 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
   7 */
   8
   9
  10#include <asm/asi.h>
  11#include <asm/pstate.h>
  12#include <asm/ptrace.h>
  13#include <asm/spitfire.h>
  14#include <asm/head.h>
  15#include <asm/visasm.h>
  16#include <asm/processor.h>
  17
  18#ifdef CONFIG_CONTEXT_TRACKING
  19# define SCHEDULE_USER schedule_user
  20#else
  21# define SCHEDULE_USER schedule
  22#endif
  23
  24                .text
  25                .align                  32
  26__handle_preemption:
  27                call                    SCHEDULE_USER
  28661:             wrpr                   %g0, RTRAP_PSTATE, %pstate
  29                /* If userspace is using ADI, it could potentially pass
  30                 * a pointer with version tag embedded in it. To maintain
  31                 * the ADI security, we must re-enable PSTATE.mcde before
  32                 * we continue execution in the kernel for another thread.
  33                 */
  34                .section .sun_m7_1insn_patch, "ax"
  35                .word   661b
  36                 wrpr                   %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
  37                .previous
  38                ba,pt                   %xcc, __handle_preemption_continue
  39                 wrpr                   %g0, RTRAP_PSTATE_IRQOFF, %pstate
  40
  41__handle_user_windows:
  42                call                    fault_in_user_windows
  43661:             wrpr                   %g0, RTRAP_PSTATE, %pstate
  44                /* If userspace is using ADI, it could potentially pass
  45                 * a pointer with version tag embedded in it. To maintain
  46                 * the ADI security, we must re-enable PSTATE.mcde before
  47                 * we continue execution in the kernel for another thread.
  48                 */
  49                .section .sun_m7_1insn_patch, "ax"
  50                .word   661b
  51                 wrpr                   %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
  52                .previous
  53                ba,pt                   %xcc, __handle_preemption_continue
  54                 wrpr                   %g0, RTRAP_PSTATE_IRQOFF, %pstate
  55
  56__handle_userfpu:
  57                rd                      %fprs, %l5
  58                andcc                   %l5, FPRS_FEF, %g0
  59                sethi                   %hi(TSTATE_PEF), %o0
  60                be,a,pn                 %icc, __handle_userfpu_continue
  61                 andn                   %l1, %o0, %l1
  62                ba,a,pt                 %xcc, __handle_userfpu_continue
  63
  64__handle_signal:
  65                mov                     %l5, %o1
  66                add                     %sp, PTREGS_OFF, %o0
  67                mov                     %l0, %o2
  68                call                    do_notify_resume
  69661:             wrpr                   %g0, RTRAP_PSTATE, %pstate
  70                /* If userspace is using ADI, it could potentially pass
  71                 * a pointer with version tag embedded in it. To maintain
  72                 * the ADI security, we must re-enable PSTATE.mcde before
  73                 * we continue execution in the kernel for another thread.
  74                 */
  75                .section .sun_m7_1insn_patch, "ax"
  76                .word   661b
  77                 wrpr                   %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate
  78                .previous
  79                wrpr                    %g0, RTRAP_PSTATE_IRQOFF, %pstate
  80
  81                /* Signal delivery can modify pt_regs tstate, so we must
  82                 * reload it.
  83                 */
  84                ldx                     [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  85                sethi                   %hi(0xf << 20), %l4
  86                and                     %l1, %l4, %l4
  87                andn                    %l1, %l4, %l1
  88                ba,pt                   %xcc, __handle_preemption_continue
  89                 srl                    %l4, 20, %l4
  90
  91                /* When returning from a NMI (%pil==15) interrupt we want to
  92                 * avoid running softirqs, doing IRQ tracing, preempting, etc.
  93                 */
  94                .globl                  rtrap_nmi
  95rtrap_nmi:      ldx                     [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  96                sethi                   %hi(0xf << 20), %l4
  97                and                     %l1, %l4, %l4
  98                andn                    %l1, %l4, %l1
  99                srl                     %l4, 20, %l4
 100                ba,pt                   %xcc, rtrap_no_irq_enable
 101                nop
 102                /* Do not actually set the %pil here.  We will do that
 103                 * below after we clear PSTATE_IE in the %pstate register.
 104                 * If we re-enable interrupts here, we can recurse down
 105                 * the hardirq stack potentially endlessly, causing a
 106                 * stack overflow.
 107                 */
 108
 109                .align                  64
 110                .globl                  rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
 111rtrap_irq:
 112rtrap:
 113                /* mm/ultra.S:xcall_report_regs KNOWS about this load. */
 114                ldx                     [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
 115rtrap_xcall:
 116                sethi                   %hi(0xf << 20), %l4
 117                and                     %l1, %l4, %l4
 118                andn                    %l1, %l4, %l1
 119                srl                     %l4, 20, %l4
 120#ifdef CONFIG_TRACE_IRQFLAGS
 121                brnz,pn                 %l4, rtrap_no_irq_enable
 122                 nop
 123                call                    trace_hardirqs_on
 124                 nop
 125                /* Do not actually set the %pil here.  We will do that
 126                 * below after we clear PSTATE_IE in the %pstate register.
 127                 * If we re-enable interrupts here, we can recurse down
 128                 * the hardirq stack potentially endlessly, causing a
 129                 * stack overflow.
 130                 *
 131                 * It is tempting to put this test and trace_hardirqs_on
 132                 * call at the 'rt_continue' label, but that will not work
 133                 * as that path hits unconditionally and we do not want to
 134                 * execute this in NMI return paths, for example.
 135                 */
 136#endif
 137rtrap_no_irq_enable:
 138                andcc                   %l1, TSTATE_PRIV, %l3
 139                bne,pn                  %icc, to_kernel
 140                 nop
 141
 142                /* We must hold IRQs off and atomically test schedule+signal
 143                 * state, then hold them off all the way back to userspace.
 144                 * If we are returning to kernel, none of this matters.  Note
 145                 * that we are disabling interrupts via PSTATE_IE, not using
 146                 * %pil.
 147                 *
 148                 * If we do not do this, there is a window where we would do
 149                 * the tests, later the signal/resched event arrives but we do
 150                 * not process it since we are still in kernel mode.  It would
 151                 * take until the next local IRQ before the signal/resched
 152                 * event would be handled.
 153                 *
 154                 * This also means that if we have to deal with user
 155                 * windows, we have to redo all of these sched+signal checks
 156                 * with IRQs disabled.
 157                 */
 158to_user:        wrpr                    %g0, RTRAP_PSTATE_IRQOFF, %pstate
 159                wrpr                    0, %pil
 160__handle_preemption_continue:
 161                ldx                     [%g6 + TI_FLAGS], %l0
 162                sethi                   %hi(_TIF_USER_WORK_MASK), %o0
 163                or                      %o0, %lo(_TIF_USER_WORK_MASK), %o0
 164                andcc                   %l0, %o0, %g0
 165                sethi                   %hi(TSTATE_PEF), %o0
 166                be,pt                   %xcc, user_nowork
 167                 andcc                  %l1, %o0, %g0
 168                andcc                   %l0, _TIF_NEED_RESCHED, %g0
 169                bne,pn                  %xcc, __handle_preemption
 170                 andcc                  %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
 171                bne,pn                  %xcc, __handle_signal
 172                 ldub                   [%g6 + TI_WSAVED], %o2
 173                brnz,pn                 %o2, __handle_user_windows
 174                 nop
 175                sethi                   %hi(TSTATE_PEF), %o0
 176                andcc                   %l1, %o0, %g0
 177
 178                /* This fpdepth clear is necessary for non-syscall rtraps only */
 179user_nowork:
 180                bne,pn                  %xcc, __handle_userfpu
 181                 stb                    %g0, [%g6 + TI_FPDEPTH]
 182__handle_userfpu_continue:
 183
 184rt_continue:    ldx                     [%sp + PTREGS_OFF + PT_V9_G1], %g1
 185                ldx                     [%sp + PTREGS_OFF + PT_V9_G2], %g2
 186
 187                ldx                     [%sp + PTREGS_OFF + PT_V9_G3], %g3
 188                ldx                     [%sp + PTREGS_OFF + PT_V9_G4], %g4
 189                ldx                     [%sp + PTREGS_OFF + PT_V9_G5], %g5
 190                brz,pt                  %l3, 1f
 191                mov                     %g6, %l2
 192
 193                /* Must do this before thread reg is clobbered below.  */
 194                LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2)
 1951:
 196                ldx                     [%sp + PTREGS_OFF + PT_V9_G6], %g6
 197                ldx                     [%sp + PTREGS_OFF + PT_V9_G7], %g7
 198
 199                /* Normal globals are restored, go to trap globals.  */
 200661:            wrpr                    %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
 201                nop
 202                .section                .sun4v_2insn_patch, "ax"
 203                .word                   661b
 204                wrpr                    %g0, RTRAP_PSTATE_IRQOFF, %pstate
 205                SET_GL(1)
 206                .previous
 207
 208                mov                     %l2, %g6
 209
 210                ldx                     [%sp + PTREGS_OFF + PT_V9_I0], %i0
 211                ldx                     [%sp + PTREGS_OFF + PT_V9_I1], %i1
 212
 213                ldx                     [%sp + PTREGS_OFF + PT_V9_I2], %i2
 214                ldx                     [%sp + PTREGS_OFF + PT_V9_I3], %i3
 215                ldx                     [%sp + PTREGS_OFF + PT_V9_I4], %i4
 216                ldx                     [%sp + PTREGS_OFF + PT_V9_I5], %i5
 217                ldx                     [%sp + PTREGS_OFF + PT_V9_I6], %i6
 218                ldx                     [%sp + PTREGS_OFF + PT_V9_I7], %i7
 219                ldx                     [%sp + PTREGS_OFF + PT_V9_TPC], %l2
 220                ldx                     [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
 221
 222                ld                      [%sp + PTREGS_OFF + PT_V9_Y], %o3
 223                wr                      %o3, %g0, %y
 224                wrpr                    %l4, 0x0, %pil
 225                wrpr                    %g0, 0x1, %tl
 226                andn                    %l1, TSTATE_SYSCALL, %l1
 227                wrpr                    %l1, %g0, %tstate
 228                wrpr                    %l2, %g0, %tpc
 229                wrpr                    %o2, %g0, %tnpc
 230
 231                brnz,pn                 %l3, kern_rtt
 232                 mov                    PRIMARY_CONTEXT, %l7
 233
 234661:            ldxa                    [%l7 + %l7] ASI_DMMU, %l0
 235                .section                .sun4v_1insn_patch, "ax"
 236                .word                   661b
 237                ldxa                    [%l7 + %l7] ASI_MMU, %l0
 238                .previous
 239
 240                sethi                   %hi(sparc64_kern_pri_nuc_bits), %l1
 241                ldx                     [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
 242                or                      %l0, %l1, %l0
 243
 244661:            stxa                    %l0, [%l7] ASI_DMMU
 245                .section                .sun4v_1insn_patch, "ax"
 246                .word                   661b
 247                stxa                    %l0, [%l7] ASI_MMU
 248                .previous
 249
 250                sethi                   %hi(KERNBASE), %l7
 251                flush                   %l7
 252                rdpr                    %wstate, %l1
 253                rdpr                    %otherwin, %l2
 254                srl                     %l1, 3, %l1
 255
 256661:            wrpr                    %l2, %g0, %canrestore
 257                .section                .fast_win_ctrl_1insn_patch, "ax"
 258                .word                   661b
 259                .word                   0x89880000      ! normalw
 260                .previous
 261
 262                wrpr                    %l1, %g0, %wstate
 263                brnz,pt                 %l2, user_rtt_restore
 264661:             wrpr                   %g0, %g0, %otherwin
 265                .section                .fast_win_ctrl_1insn_patch, "ax"
 266                .word                   661b
 267                 nop
 268                .previous
 269
 270                ldx                     [%g6 + TI_FLAGS], %g3
 271                wr                      %g0, ASI_AIUP, %asi
 272                rdpr                    %cwp, %g1
 273                andcc                   %g3, _TIF_32BIT, %g0
 274                sub                     %g1, 1, %g1
 275                bne,pt                  %xcc, user_rtt_fill_32bit
 276                 wrpr                   %g1, %cwp
 277                ba,a,pt                 %xcc, user_rtt_fill_64bit
 278                 nop
 279
 280user_rtt_fill_fixup_dax:
 281                ba,pt   %xcc, user_rtt_fill_fixup_common
 282                 mov    1, %g3
 283
 284user_rtt_fill_fixup_mna:
 285                ba,pt   %xcc, user_rtt_fill_fixup_common
 286                 mov    2, %g3
 287
 288user_rtt_fill_fixup:
 289                ba,pt   %xcc, user_rtt_fill_fixup_common
 290                 clr    %g3
 291
 292user_rtt_pre_restore:
 293                add                     %g1, 1, %g1
 294                wrpr                    %g1, 0x0, %cwp
 295
 296user_rtt_restore:
 297                restore
 298                rdpr                    %canrestore, %g1
 299                wrpr                    %g1, 0x0, %cleanwin
 300                retry
 301                nop
 302
 303kern_rtt:       rdpr                    %canrestore, %g1
 304                brz,pn                  %g1, kern_rtt_fill
 305                 nop
 306kern_rtt_restore:
 307                stw                     %g0, [%sp + PTREGS_OFF + PT_V9_MAGIC]
 308                restore
 309                retry
 310
 311to_kernel:
 312#ifdef CONFIG_PREEMPT
 313                ldsw                    [%g6 + TI_PRE_COUNT], %l5
 314                brnz                    %l5, kern_fpucheck
 315                 ldx                    [%g6 + TI_FLAGS], %l5
 316                andcc                   %l5, _TIF_NEED_RESCHED, %g0
 317                be,pt                   %xcc, kern_fpucheck
 318                 nop
 319                cmp                     %l4, 0
 320                bne,pn                  %xcc, kern_fpucheck
 321                 nop
 322                call                    preempt_schedule_irq
 323                 nop
 324                ba,pt                   %xcc, rtrap
 325#endif
 326kern_fpucheck:  ldub                    [%g6 + TI_FPDEPTH], %l5
 327                brz,pt                  %l5, rt_continue
 328                 srl                    %l5, 1, %o0
 329                add                     %g6, TI_FPSAVED, %l6
 330                ldub                    [%l6 + %o0], %l2
 331                sub                     %l5, 2, %l5
 332
 333                add                     %g6, TI_GSR, %o1
 334                andcc                   %l2, (FPRS_FEF|FPRS_DU), %g0
 335                be,pt                   %icc, 2f
 336                 and                    %l2, FPRS_DL, %l6
 337                andcc                   %l2, FPRS_FEF, %g0
 338                be,pn                   %icc, 5f
 339                 sll                    %o0, 3, %o5
 340                rd                      %fprs, %g1
 341
 342                wr                      %g1, FPRS_FEF, %fprs
 343                ldx                     [%o1 + %o5], %g1
 344                add                     %g6, TI_XFSR, %o1
 345                sll                     %o0, 8, %o2
 346                add                     %g6, TI_FPREGS, %o3
 347                brz,pn                  %l6, 1f
 348                 add                    %g6, TI_FPREGS+0x40, %o4
 349
 350                membar                  #Sync
 351                ldda                    [%o3 + %o2] ASI_BLK_P, %f0
 352                ldda                    [%o4 + %o2] ASI_BLK_P, %f16
 353                membar                  #Sync
 3541:              andcc                   %l2, FPRS_DU, %g0
 355                be,pn                   %icc, 1f
 356                 wr                     %g1, 0, %gsr
 357                add                     %o2, 0x80, %o2
 358                membar                  #Sync
 359                ldda                    [%o3 + %o2] ASI_BLK_P, %f32
 360                ldda                    [%o4 + %o2] ASI_BLK_P, %f48
 3611:              membar                  #Sync
 362                ldx                     [%o1 + %o5], %fsr
 3632:              stb                     %l5, [%g6 + TI_FPDEPTH]
 364                ba,pt                   %xcc, rt_continue
 365                 nop
 3665:              wr                      %g0, FPRS_FEF, %fprs
 367                sll                     %o0, 8, %o2
 368
 369                add                     %g6, TI_FPREGS+0x80, %o3
 370                add                     %g6, TI_FPREGS+0xc0, %o4
 371                membar                  #Sync
 372                ldda                    [%o3 + %o2] ASI_BLK_P, %f32
 373                ldda                    [%o4 + %o2] ASI_BLK_P, %f48
 374                membar                  #Sync
 375                wr                      %g0, FPRS_DU, %fprs
 376                ba,pt                   %xcc, rt_continue
 377                 stb                    %l5, [%g6 + TI_FPDEPTH]
 378