1
2
3
4
5
6
7
8#ifndef __ASM_SH_PROCESSOR_32_H
9#define __ASM_SH_PROCESSOR_32_H
10#ifdef __KERNEL__
11
12#include <linux/compiler.h>
13#include <linux/linkage.h>
14#include <asm/page.h>
15#include <asm/types.h>
16#include <asm/hw_breakpoint.h>
17
18
19
20
21
22#define current_text_addr() ({ void *pc; __asm__("mova 1f, %0\n.align 2\n1:":"=z" (pc)); pc; })
23
24
25#define CCN_PVR 0xff000030
26#define CCN_CVR 0xff000040
27#define CCN_PRR 0xff000044
28
29
30
31
32
33
34#define TASK_SIZE 0x7c000000UL
35
36#define STACK_TOP TASK_SIZE
37#define STACK_TOP_MAX STACK_TOP
38
39
40
41
42#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
43
44
45
46
47
48
49
50
51
52
53
54#define SR_DSP 0x00001000
55#define SR_IMASK 0x000000f0
56#define SR_FD 0x00008000
57#define SR_MD 0x40000000
58
59
60
61
62struct sh_dsp_struct {
63 unsigned long dsp_regs[14];
64 long status;
65};
66
67
68
69
70
71struct sh_fpu_hard_struct {
72 unsigned long fp_regs[16];
73 unsigned long xfp_regs[16];
74 unsigned long fpscr;
75 unsigned long fpul;
76
77 long status;
78};
79
80
81struct sh_fpu_soft_struct {
82 unsigned long fp_regs[16];
83 unsigned long xfp_regs[16];
84 unsigned long fpscr;
85 unsigned long fpul;
86
87 unsigned char lookahead;
88 unsigned long entry_pc;
89};
90
91union thread_xstate {
92 struct sh_fpu_hard_struct hardfpu;
93 struct sh_fpu_soft_struct softfpu;
94};
95
96struct thread_struct {
97
98 unsigned long sp;
99 unsigned long pc;
100
101
102 unsigned long flags;
103
104
105 struct perf_event *ptrace_bps[HBP_NUM];
106
107#ifdef CONFIG_SH_DSP
108
109 struct sh_dsp_struct dsp_status;
110#endif
111
112
113 union thread_xstate *xstate;
114};
115
116#define INIT_THREAD { \
117 .sp = sizeof(init_stack) + (long) &init_stack, \
118 .flags = 0, \
119}
120
121
122struct task_struct;
123
124extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp);
125
126
127extern void release_thread(struct task_struct *);
128
129
130#define copy_segments(p, mm) do { } while(0)
131#define release_segments(mm) do { } while(0)
132
133
134
135
136
137static __inline__ void disable_fpu(void)
138{
139 unsigned long __dummy;
140
141
142 __asm__ __volatile__("stc sr, %0\n\t"
143 "or %1, %0\n\t"
144 "ldc %0, sr"
145 : "=&r" (__dummy)
146 : "r" (SR_FD));
147}
148
149static __inline__ void enable_fpu(void)
150{
151 unsigned long __dummy;
152
153
154 __asm__ __volatile__("stc sr, %0\n\t"
155 "and %1, %0\n\t"
156 "ldc %0, sr"
157 : "=&r" (__dummy)
158 : "r" (~SR_FD));
159}
160
161
162#define FPSCR_INIT 0x00080000
163
164#define FPSCR_CAUSE_MASK 0x0001f000
165#define FPSCR_FLAG_MASK 0x0000007c
166
167
168
169
170#define thread_saved_pc(tsk) (tsk->thread.pc)
171
172void show_trace(struct task_struct *tsk, unsigned long *sp,
173 struct pt_regs *regs);
174
175#ifdef CONFIG_DUMP_CODE
176void show_code(struct pt_regs *regs);
177#else
178static inline void show_code(struct pt_regs *regs)
179{
180}
181#endif
182
183extern unsigned long get_wchan(struct task_struct *p);
184
185#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
186#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15])
187
188#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
189
190#define PREFETCH_STRIDE L1_CACHE_BYTES
191#define ARCH_HAS_PREFETCH
192#define ARCH_HAS_PREFETCHW
193
194static inline void prefetch(const void *x)
195{
196 __builtin_prefetch(x, 0, 3);
197}
198
199static inline void prefetchw(const void *x)
200{
201 __builtin_prefetch(x, 1, 3);
202}
203#endif
204
205#endif
206#endif
207