1
2
3
4
5
6
7
8
9#include <linux/context_tracking.h>
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/ptrace.h>
13#include <linux/stddef.h>
14
15#include <asm/bugs.h>
16#include <asm/compiler.h>
17#include <asm/cpu.h>
18#include <asm/fpu.h>
19#include <asm/mipsregs.h>
20#include <asm/setup.h>
21
22static char bug64hit[] __initdata =
23 "reliable operation impossible!\n%s";
24static char nowar[] __initdata =
25 "Please report to <linux-mips@linux-mips.org>.";
26static char r4kwar[] __initdata =
27 "Enable CPU_R4000_WORKAROUNDS to rectify.";
28static char daddiwar[] __initdata =
29 "Enable CPU_DADDI_WORKAROUNDS to rectify.";
30
31static inline void align_mod(const int align, const int mod)
32{
33 asm volatile(
34 ".set push\n\t"
35 ".set noreorder\n\t"
36 ".balign %0\n\t"
37 ".rept %1\n\t"
38 "nop\n\t"
39 ".endr\n\t"
40 ".set pop"
41 :
42 : GCC_IMM_ASM() (align), GCC_IMM_ASM() (mod));
43}
44
45static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
46 const int align, const int mod)
47{
48 unsigned long flags;
49 int m1, m2;
50 long p, s, lv1, lv2, lw;
51
52
53
54
55
56
57
58
59
60 local_irq_save(flags);
61
62
63
64
65
66
67
68
69
70
71 asm volatile(
72 ""
73 : "=r" (m1), "=r" (m2), "=r" (s)
74 : "0" (5), "1" (8), "2" (5));
75 align_mod(align, mod);
76
77
78
79
80
81
82 asm volatile(
83 ".set push\n\t"
84 ".set noat\n\t"
85 ".set noreorder\n\t"
86 ".set nomacro\n\t"
87 "mult %2, %3\n\t"
88 "dsll32 %0, %4, %5\n\t"
89 "mflo $0\n\t"
90 "dsll32 %1, %4, %5\n\t"
91 "nop\n\t"
92 ".set pop"
93 : "=&r" (lv1), "=r" (lw)
94 : "r" (m1), "r" (m2), "r" (s), "I" (0)
95 : "hi", "lo", GCC_REG_ACCUM);
96
97
98
99
100
101
102 asm volatile(
103 ""
104 : "=r" (m1), "=r" (m2), "=r" (s)
105 : "0" (m1), "1" (m2), "2" (s));
106 align_mod(align, mod);
107 p = m1 * m2;
108 lv2 = s << 32;
109 asm volatile(
110 ""
111 : "=r" (lv2)
112 : "0" (lv2), "r" (p));
113 local_irq_restore(flags);
114
115 *v1 = lv1;
116 *v2 = lv2;
117 *w = lw;
118}
119
120static inline void check_mult_sh(void)
121{
122 long v1[8], v2[8], w[8];
123 int bug, fix, i;
124
125 printk("Checking for the multiply/shift bug... ");
126
127
128
129
130
131
132
133
134
135
136 mult_sh_align_mod(&v1[0], &v2[0], &w[0], 32, 0);
137 mult_sh_align_mod(&v1[1], &v2[1], &w[1], 32, 1);
138 mult_sh_align_mod(&v1[2], &v2[2], &w[2], 32, 2);
139 mult_sh_align_mod(&v1[3], &v2[3], &w[3], 32, 3);
140 mult_sh_align_mod(&v1[4], &v2[4], &w[4], 32, 4);
141 mult_sh_align_mod(&v1[5], &v2[5], &w[5], 32, 5);
142 mult_sh_align_mod(&v1[6], &v2[6], &w[6], 32, 6);
143 mult_sh_align_mod(&v1[7], &v2[7], &w[7], 32, 7);
144
145 bug = 0;
146 for (i = 0; i < 8; i++)
147 if (v1[i] != w[i])
148 bug = 1;
149
150 if (bug == 0) {
151 pr_cont("no.\n");
152 return;
153 }
154
155 pr_cont("yes, workaround... ");
156
157 fix = 1;
158 for (i = 0; i < 8; i++)
159 if (v2[i] != w[i])
160 fix = 0;
161
162 if (fix == 1) {
163 pr_cont("yes.\n");
164 return;
165 }
166
167 pr_cont("no.\n");
168 panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
169}
170
171static volatile int daddi_ov;
172
173asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
174{
175 enum ctx_state prev_state;
176
177 prev_state = exception_enter();
178 daddi_ov = 1;
179 regs->cp0_epc += 4;
180 exception_exit(prev_state);
181}
182
183static inline void check_daddi(void)
184{
185 extern asmlinkage void handle_daddi_ov(void);
186 unsigned long flags;
187 void *handler;
188 long v, tmp;
189
190 printk("Checking for the daddi bug... ");
191
192 local_irq_save(flags);
193 handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
194
195
196
197
198
199
200
201
202
203 asm volatile(
204 ".set push\n\t"
205 ".set noat\n\t"
206 ".set noreorder\n\t"
207 ".set nomacro\n\t"
208 "addiu %1, $0, %2\n\t"
209 "dsrl %1, %1, 1\n\t"
210#ifdef HAVE_AS_SET_DADDI
211 ".set daddi\n\t"
212#endif
213 "daddi %0, %1, %3\n\t"
214 ".set pop"
215 : "=r" (v), "=&r" (tmp)
216 : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
217 set_except_vector(EXCCODE_OV, handler);
218 local_irq_restore(flags);
219
220 if (daddi_ov) {
221 pr_cont("no.\n");
222 return;
223 }
224
225 pr_cont("yes, workaround... ");
226
227 local_irq_save(flags);
228 handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
229 asm volatile(
230 "addiu %1, $0, %2\n\t"
231 "dsrl %1, %1, 1\n\t"
232 "daddi %0, %1, %3"
233 : "=r" (v), "=&r" (tmp)
234 : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
235 set_except_vector(EXCCODE_OV, handler);
236 local_irq_restore(flags);
237
238 if (daddi_ov) {
239 pr_cont("yes.\n");
240 return;
241 }
242
243 pr_cont("no.\n");
244 panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
245}
246
247int daddiu_bug = IS_ENABLED(CONFIG_CPU_MIPSR6) ? 0 : -1;
248
249static inline void check_daddiu(void)
250{
251 long v, w, tmp;
252
253 printk("Checking for the daddiu bug... ");
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271 asm volatile(
272 ".set push\n\t"
273 ".set noat\n\t"
274 ".set noreorder\n\t"
275 ".set nomacro\n\t"
276 "addiu %2, $0, %3\n\t"
277 "dsrl %2, %2, 1\n\t"
278#ifdef HAVE_AS_SET_DADDI
279 ".set daddi\n\t"
280#endif
281 "daddiu %0, %2, %4\n\t"
282 "addiu %1, $0, %4\n\t"
283 "daddu %1, %2\n\t"
284 ".set pop"
285 : "=&r" (v), "=&r" (w), "=&r" (tmp)
286 : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
287
288 daddiu_bug = v != w;
289
290 if (!daddiu_bug) {
291 pr_cont("no.\n");
292 return;
293 }
294
295 pr_cont("yes, workaround... ");
296
297 asm volatile(
298 "addiu %2, $0, %3\n\t"
299 "dsrl %2, %2, 1\n\t"
300 "daddiu %0, %2, %4\n\t"
301 "addiu %1, $0, %4\n\t"
302 "daddu %1, %2"
303 : "=&r" (v), "=&r" (w), "=&r" (tmp)
304 : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
305
306 if (v == w) {
307 pr_cont("yes.\n");
308 return;
309 }
310
311 pr_cont("no.\n");
312 panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
313}
314
315void __init check_bugs64_early(void)
316{
317 if (!IS_ENABLED(CONFIG_CPU_MIPSR6)) {
318 check_mult_sh();
319 check_daddiu();
320 }
321}
322
323void __init check_bugs64(void)
324{
325 if (!IS_ENABLED(CONFIG_CPU_MIPSR6))
326 check_daddi();
327}
328