linux/arch/sh/include/asm/processor_32.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * include/asm-sh/processor.h
   4 *
   5 * Copyright (C) 1999, 2000  Niibe Yutaka
   6 * Copyright (C) 2002, 2003  Paul Mundt
   7 */
   8
   9#ifndef __ASM_SH_PROCESSOR_32_H
  10#define __ASM_SH_PROCESSOR_32_H
  11#ifdef __KERNEL__
  12
  13#include <linux/compiler.h>
  14#include <linux/linkage.h>
  15#include <asm/page.h>
  16#include <asm/types.h>
  17#include <asm/hw_breakpoint.h>
  18
  19/*
  20 * Default implementation of macro that returns current
  21 * instruction pointer ("program counter").
  22 */
  23#define current_text_addr() ({ void *pc; __asm__("mova  1f, %0\n.align 2\n1:":"=z" (pc)); pc; })
  24
  25/* Core Processor Version Register */
  26#define CCN_PVR         0xff000030
  27#define CCN_CVR         0xff000040
  28#define CCN_PRR         0xff000044
  29
  30/*
  31 * User space process size: 2GB.
  32 *
  33 * Since SH7709 and SH7750 have "area 7", we can't use 0x7c000000--0x7fffffff
  34 */
  35#define TASK_SIZE       0x7c000000UL
  36
  37#define STACK_TOP       TASK_SIZE
  38#define STACK_TOP_MAX   STACK_TOP
  39
  40/* This decides where the kernel will search for a free chunk of vm
  41 * space during mmap's.
  42 */
  43#define TASK_UNMAPPED_BASE      PAGE_ALIGN(TASK_SIZE / 3)
  44
  45/*
  46 * Bit of SR register
  47 *
  48 * FD-bit:
  49 *     When it's set, it means the processor doesn't have right to use FPU,
  50 *     and it results exception when the floating operation is executed.
  51 *
  52 * IMASK-bit:
  53 *     Interrupt level mask
  54 */
  55#define SR_DSP          0x00001000
  56#define SR_IMASK        0x000000f0
  57#define SR_FD           0x00008000
  58#define SR_MD           0x40000000
  59
  60/*
  61 * DSP structure and data
  62 */
  63struct sh_dsp_struct {
  64        unsigned long dsp_regs[14];
  65        long status;
  66};
  67
  68/*
  69 * FPU structure and data
  70 */
  71
  72struct sh_fpu_hard_struct {
  73        unsigned long fp_regs[16];
  74        unsigned long xfp_regs[16];
  75        unsigned long fpscr;
  76        unsigned long fpul;
  77
  78        long status; /* software status information */
  79};
  80
  81/* Dummy fpu emulator  */
  82struct sh_fpu_soft_struct {
  83        unsigned long fp_regs[16];
  84        unsigned long xfp_regs[16];
  85        unsigned long fpscr;
  86        unsigned long fpul;
  87
  88        unsigned char lookahead;
  89        unsigned long entry_pc;
  90};
  91
  92union thread_xstate {
  93        struct sh_fpu_hard_struct hardfpu;
  94        struct sh_fpu_soft_struct softfpu;
  95};
  96
  97struct thread_struct {
  98        /* Saved registers when thread is descheduled */
  99        unsigned long sp;
 100        unsigned long pc;
 101
 102        /* Various thread flags, see SH_THREAD_xxx */
 103        unsigned long flags;
 104
 105        /* Save middle states of ptrace breakpoints */
 106        struct perf_event *ptrace_bps[HBP_NUM];
 107
 108#ifdef CONFIG_SH_DSP
 109        /* Dsp status information */
 110        struct sh_dsp_struct dsp_status;
 111#endif
 112
 113        /* Extended processor state */
 114        union thread_xstate *xstate;
 115
 116        /*
 117         * fpu_counter contains the number of consecutive context switches
 118         * that the FPU is used. If this is over a threshold, the lazy fpu
 119         * saving becomes unlazy to save the trap. This is an unsigned char
 120         * so that after 256 times the counter wraps and the behavior turns
 121         * lazy again; this to deal with bursty apps that only use FPU for
 122         * a short time
 123         */
 124        unsigned char fpu_counter;
 125};
 126
 127#define INIT_THREAD  {                                          \
 128        .sp = sizeof(init_stack) + (long) &init_stack,          \
 129        .flags = 0,                                             \
 130}
 131
 132/* Forward declaration, a strange C thing */
 133struct task_struct;
 134
 135extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp);
 136
 137/* Free all resources held by a thread. */
 138extern void release_thread(struct task_struct *);
 139
 140/*
 141 * FPU lazy state save handling.
 142 */
 143
 144static __inline__ void disable_fpu(void)
 145{
 146        unsigned long __dummy;
 147
 148        /* Set FD flag in SR */
 149        __asm__ __volatile__("stc       sr, %0\n\t"
 150                             "or        %1, %0\n\t"
 151                             "ldc       %0, sr"
 152                             : "=&r" (__dummy)
 153                             : "r" (SR_FD));
 154}
 155
 156static __inline__ void enable_fpu(void)
 157{
 158        unsigned long __dummy;
 159
 160        /* Clear out FD flag in SR */
 161        __asm__ __volatile__("stc       sr, %0\n\t"
 162                             "and       %1, %0\n\t"
 163                             "ldc       %0, sr"
 164                             : "=&r" (__dummy)
 165                             : "r" (~SR_FD));
 166}
 167
 168/* Double presision, NANS as NANS, rounding to nearest, no exceptions */
 169#define FPSCR_INIT  0x00080000
 170
 171#define FPSCR_CAUSE_MASK        0x0001f000      /* Cause bits */
 172#define FPSCR_FLAG_MASK         0x0000007c      /* Flag bits */
 173
 174/*
 175 * Return saved PC of a blocked thread.
 176 */
 177#define thread_saved_pc(tsk)    (tsk->thread.pc)
 178
 179void show_trace(struct task_struct *tsk, unsigned long *sp,
 180                struct pt_regs *regs);
 181
 182#ifdef CONFIG_DUMP_CODE
 183void show_code(struct pt_regs *regs);
 184#else
 185static inline void show_code(struct pt_regs *regs)
 186{
 187}
 188#endif
 189
 190extern unsigned long get_wchan(struct task_struct *p);
 191
 192#define KSTK_EIP(tsk)  (task_pt_regs(tsk)->pc)
 193#define KSTK_ESP(tsk)  (task_pt_regs(tsk)->regs[15])
 194
 195#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
 196
 197#define PREFETCH_STRIDE         L1_CACHE_BYTES
 198#define ARCH_HAS_PREFETCH
 199#define ARCH_HAS_PREFETCHW
 200
 201static inline void prefetch(const void *x)
 202{
 203        __builtin_prefetch(x, 0, 3);
 204}
 205
 206static inline void prefetchw(const void *x)
 207{
 208        __builtin_prefetch(x, 1, 3);
 209}
 210#endif
 211
 212#endif /* __KERNEL__ */
 213#endif /* __ASM_SH_PROCESSOR_32_H */
 214