1#ifndef __SPARC64_SYSTEM_H
2#define __SPARC64_SYSTEM_H
3
4#include <asm/ptrace.h>
5#include <asm/processor.h>
6#include <asm/visasm.h>
7
8#ifndef __ASSEMBLY__
9
10#include <linux/irqflags.h>
11
12
13
14
15enum sparc_cpu {
16 sun4 = 0x00,
17 sun4c = 0x01,
18 sun4m = 0x02,
19 sun4d = 0x03,
20 sun4e = 0x04,
21 sun4u = 0x05,
22 sun_unknown = 0x06,
23 ap1000 = 0x07,
24};
25
26#define sparc_cpu_model sun4u
27
28
29#define ARCH_SUN4C_SUN4 0
30#define ARCH_SUN4 0
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#define membar_safe(type) \
54do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
55 " membar " type "\n" \
56 "1:\n" \
57 : : : "memory"); \
58} while (0)
59
60#define mb() \
61 membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
62#define rmb() \
63 membar_safe("#LoadLoad")
64#define wmb() \
65 membar_safe("#StoreStore")
66#define membar_storeload() \
67 membar_safe("#StoreLoad")
68#define membar_storeload_storestore() \
69 membar_safe("#StoreLoad | #StoreStore")
70#define membar_storeload_loadload() \
71 membar_safe("#StoreLoad | #LoadLoad")
72#define membar_storestore_loadstore() \
73 membar_safe("#StoreStore | #LoadStore")
74
75#endif
76
77#define nop() __asm__ __volatile__ ("nop")
78
79#define read_barrier_depends() do { } while(0)
80#define set_mb(__var, __value) \
81 do { __var = __value; membar_storeload_storestore(); } while(0)
82
83#ifdef CONFIG_SMP
84#define smp_mb() mb()
85#define smp_rmb() rmb()
86#define smp_wmb() wmb()
87#define smp_read_barrier_depends() read_barrier_depends()
88#else
89#define smp_mb() __asm__ __volatile__("":::"memory")
90#define smp_rmb() __asm__ __volatile__("":::"memory")
91#define smp_wmb() __asm__ __volatile__("":::"memory")
92#define smp_read_barrier_depends() do { } while(0)
93#endif
94
95#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
96
97#define flushw_all() __asm__ __volatile__("flushw")
98
99
100#define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p))
101#define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p))
102#define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p))
103
104
105
106
107
108#define reset_pic() \
109 __asm__ __volatile__("ba,pt %xcc, 99f\n\t" \
110 ".align 64\n" \
111 "99:wr %g0, 0x0, %pic\n\t" \
112 "rd %pic, %g0")
113
114#ifndef __ASSEMBLY__
115
116extern void sun_do_break(void);
117extern int stop_a_enabled;
118
119extern void synchronize_user_stack(void);
120
121extern void __flushw_user(void);
122#define flushw_user() __flushw_user()
123
124#define flush_user_windows flushw_user
125#define flush_register_windows flushw_all
126
127
128#define __ARCH_WANT_UNLOCKED_CTXSW
129#define prepare_arch_switch(next) \
130do { \
131 flushw_all(); \
132} while (0)
133
134
135
136
137
138
139
140
141
142
143#define switch_to(prev, next, last) \
144do { if (test_thread_flag(TIF_PERFCTR)) { \
145 unsigned long __tmp; \
146 read_pcr(__tmp); \
147 current_thread_info()->pcr_reg = __tmp; \
148 read_pic(__tmp); \
149 current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\
150 current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \
151 } \
152 flush_tlb_pending(); \
153 save_and_clear_fpu(); \
154 \
155 \
156 __asm__ __volatile__("wr %%g0, %0, %%asi" \
157 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
158 trap_block[current_thread_info()->cpu].thread = \
159 task_thread_info(next); \
160 __asm__ __volatile__( \
161 "mov %%g4, %%g7\n\t" \
162 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \
163 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
164 "rdpr %%wstate, %%o5\n\t" \
165 "stx %%o6, [%%g6 + %6]\n\t" \
166 "stb %%o5, [%%g6 + %5]\n\t" \
167 "rdpr %%cwp, %%o5\n\t" \
168 "stb %%o5, [%%g6 + %8]\n\t" \
169 "mov %4, %%g6\n\t" \
170 "ldub [%4 + %8], %%g1\n\t" \
171 "wrpr %%g1, %%cwp\n\t" \
172 "ldx [%%g6 + %6], %%o6\n\t" \
173 "ldub [%%g6 + %5], %%o5\n\t" \
174 "ldub [%%g6 + %7], %%o7\n\t" \
175 "wrpr %%o5, 0x0, %%wstate\n\t" \
176 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
177 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
178 "ldx [%%g6 + %9], %%g4\n\t" \
179 "brz,pt %%o7, 1f\n\t" \
180 " mov %%g7, %0\n\t" \
181 "sethi %%hi(ret_from_syscall), %%g1\n\t" \
182 "jmpl %%g1 + %%lo(ret_from_syscall), %%g0\n\t" \
183 " nop\n\t" \
184 "1:\n\t" \
185 : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \
186 "=r" (__local_per_cpu_offset) \
187 : "0" (task_thread_info(next)), \
188 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
189 "i" (TI_CWP), "i" (TI_TASK) \
190 : "cc", \
191 "g1", "g2", "g3", "g7", \
192 "l1", "l2", "l3", "l4", "l5", "l6", "l7", \
193 "i0", "i1", "i2", "i3", "i4", "i5", \
194 "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \
195 \
196 if (test_thread_flag(TIF_PERFCTR)) { \
197 write_pcr(current_thread_info()->pcr_reg); \
198 reset_pic(); \
199 } \
200} while(0)
201
202static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
203{
204 unsigned long tmp1, tmp2;
205
206 __asm__ __volatile__(
207" membar #StoreLoad | #LoadLoad\n"
208" mov %0, %1\n"
209"1: lduw [%4], %2\n"
210" cas [%4], %2, %0\n"
211" cmp %2, %0\n"
212" bne,a,pn %%icc, 1b\n"
213" mov %1, %0\n"
214" membar #StoreLoad | #StoreStore\n"
215 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
216 : "0" (val), "r" (m)
217 : "cc", "memory");
218 return val;
219}
220
221static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
222{
223 unsigned long tmp1, tmp2;
224
225 __asm__ __volatile__(
226" membar #StoreLoad | #LoadLoad\n"
227" mov %0, %1\n"
228"1: ldx [%4], %2\n"
229" casx [%4], %2, %0\n"
230" cmp %2, %0\n"
231" bne,a,pn %%xcc, 1b\n"
232" mov %1, %0\n"
233" membar #StoreLoad | #StoreStore\n"
234 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
235 : "0" (val), "r" (m)
236 : "cc", "memory");
237 return val;
238}
239
240#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
241
242extern void __xchg_called_with_bad_pointer(void);
243
244static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
245 int size)
246{
247 switch (size) {
248 case 4:
249 return xchg32(ptr, x);
250 case 8:
251 return xchg64(ptr, x);
252 };
253 __xchg_called_with_bad_pointer();
254 return x;
255}
256
257extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
258
259
260
261
262
263
264
265#define __HAVE_ARCH_CMPXCHG 1
266
267static inline unsigned long
268__cmpxchg_u32(volatile int *m, int old, int new)
269{
270 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
271 "cas [%2], %3, %0\n\t"
272 "membar #StoreLoad | #StoreStore"
273 : "=&r" (new)
274 : "0" (new), "r" (m), "r" (old)
275 : "memory");
276
277 return new;
278}
279
280static inline unsigned long
281__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
282{
283 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
284 "casx [%2], %3, %0\n\t"
285 "membar #StoreLoad | #StoreStore"
286 : "=&r" (new)
287 : "0" (new), "r" (m), "r" (old)
288 : "memory");
289
290 return new;
291}
292
293
294
295extern void __cmpxchg_called_with_bad_pointer(void);
296
297static inline unsigned long
298__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
299{
300 switch (size) {
301 case 4:
302 return __cmpxchg_u32(ptr, old, new);
303 case 8:
304 return __cmpxchg_u64(ptr, old, new);
305 }
306 __cmpxchg_called_with_bad_pointer();
307 return old;
308}
309
310#define cmpxchg(ptr,o,n) \
311 ({ \
312 __typeof__(*(ptr)) _o_ = (o); \
313 __typeof__(*(ptr)) _n_ = (n); \
314 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
315 (unsigned long)_n_, sizeof(*(ptr))); \
316 })
317
318#endif
319
320#define arch_align_stack(x) (x)
321
322#endif
323