1
2
3
4
5
6
7
8#ifndef __ASM_SH_PROCESSOR_32_H
9#define __ASM_SH_PROCESSOR_32_H
10#ifdef __KERNEL__
11
12#include <linux/compiler.h>
13#include <linux/linkage.h>
14#include <asm/page.h>
15#include <asm/types.h>
16#include <asm/hw_breakpoint.h>
17
18
19
20
21
22#define current_text_addr() ({ void *pc; __asm__("mova 1f, %0\n.align 2\n1:":"=z" (pc)); pc; })
23
24
25#define CCN_PVR 0xff000030
26#define CCN_CVR 0xff000040
27#define CCN_PRR 0xff000044
28
29
30
31
32
33
34#define TASK_SIZE 0x7c000000UL
35
36#define STACK_TOP TASK_SIZE
37#define STACK_TOP_MAX STACK_TOP
38
39
40
41
42#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
43
44
45
46
47
48
49
50
51
52
53
54#define SR_DSP 0x00001000
55#define SR_IMASK 0x000000f0
56#define SR_FD 0x00008000
57#define SR_MD 0x40000000
58
59
60
61
62struct sh_dsp_struct {
63 unsigned long dsp_regs[14];
64 long status;
65};
66
67
68
69
70
71struct sh_fpu_hard_struct {
72 unsigned long fp_regs[16];
73 unsigned long xfp_regs[16];
74 unsigned long fpscr;
75 unsigned long fpul;
76
77 long status;
78};
79
80
81struct sh_fpu_soft_struct {
82 unsigned long fp_regs[16];
83 unsigned long xfp_regs[16];
84 unsigned long fpscr;
85 unsigned long fpul;
86
87 unsigned char lookahead;
88 unsigned long entry_pc;
89};
90
91union thread_xstate {
92 struct sh_fpu_hard_struct hardfpu;
93 struct sh_fpu_soft_struct softfpu;
94};
95
96struct thread_struct {
97
98 unsigned long sp;
99 unsigned long pc;
100
101
102 unsigned long flags;
103
104
105 struct perf_event *ptrace_bps[HBP_NUM];
106
107#ifdef CONFIG_SH_DSP
108
109 struct sh_dsp_struct dsp_status;
110#endif
111
112
113 union thread_xstate *xstate;
114};
115
116#define INIT_THREAD { \
117 .sp = sizeof(init_stack) + (long) &init_stack, \
118 .flags = 0, \
119}
120
121
122struct task_struct;
123
124extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp);
125
126
127extern void release_thread(struct task_struct *);
128
129
130void prepare_to_copy(struct task_struct *tsk);
131
132
133
134
135extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
136
137
138#define copy_segments(p, mm) do { } while(0)
139#define release_segments(mm) do { } while(0)
140
141
142
143
144
145static __inline__ void disable_fpu(void)
146{
147 unsigned long __dummy;
148
149
150 __asm__ __volatile__("stc sr, %0\n\t"
151 "or %1, %0\n\t"
152 "ldc %0, sr"
153 : "=&r" (__dummy)
154 : "r" (SR_FD));
155}
156
157static __inline__ void enable_fpu(void)
158{
159 unsigned long __dummy;
160
161
162 __asm__ __volatile__("stc sr, %0\n\t"
163 "and %1, %0\n\t"
164 "ldc %0, sr"
165 : "=&r" (__dummy)
166 : "r" (~SR_FD));
167}
168
169
170#define FPSCR_INIT 0x00080000
171
172#define FPSCR_CAUSE_MASK 0x0001f000
173#define FPSCR_FLAG_MASK 0x0000007c
174
175
176
177
178#define thread_saved_pc(tsk) (tsk->thread.pc)
179
180void show_trace(struct task_struct *tsk, unsigned long *sp,
181 struct pt_regs *regs);
182
183#ifdef CONFIG_DUMP_CODE
184void show_code(struct pt_regs *regs);
185#else
186static inline void show_code(struct pt_regs *regs)
187{
188}
189#endif
190
191extern unsigned long get_wchan(struct task_struct *p);
192
193#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
194#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15])
195
196#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
197
198#define PREFETCH_STRIDE L1_CACHE_BYTES
199#define ARCH_HAS_PREFETCH
200#define ARCH_HAS_PREFETCHW
201
202static inline void prefetch(const void *x)
203{
204 __builtin_prefetch(x, 0, 3);
205}
206
207static inline void prefetchw(const void *x)
208{
209 __builtin_prefetch(x, 1, 3);
210}
211#endif
212
213#endif
214#endif
215