1
2
3
4
5
6
7
8
9
10#ifndef _ASM_X86_I387_H
11#define _ASM_X86_I387_H
12
13#include <linux/sched.h>
14#include <linux/kernel_stat.h>
15#include <linux/regset.h>
16#include <linux/hardirq.h>
17#include <asm/asm.h>
18#include <asm/processor.h>
19#include <asm/sigcontext.h>
20#include <asm/user.h>
21#include <asm/uaccess.h>
22#include <asm/xsave.h>
23
24extern unsigned int sig_xstate_size;
25extern void fpu_init(void);
26extern void mxcsr_feature_mask_init(void);
27extern int init_fpu(struct task_struct *child);
28extern asmlinkage void math_state_restore(void);
29extern void __math_state_restore(void);
30extern void init_thread_xstate(void);
31extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
32
33extern user_regset_active_fn fpregs_active, xfpregs_active;
34extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get;
35extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set;
36
37extern struct _fpx_sw_bytes fx_sw_reserved;
38#ifdef CONFIG_IA32_EMULATION
39extern unsigned int sig_xstate_ia32_size;
40extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
41struct _fpstate_ia32;
42struct _xstate_ia32;
43extern int save_i387_xstate_ia32(void __user *buf);
44extern int restore_i387_xstate_ia32(void __user *buf);
45#endif
46
47#define X87_FSW_ES (1 << 7)
48
49#ifdef CONFIG_X86_64
50
51
52static inline void tolerant_fwait(void)
53{
54 asm volatile("1: fwait\n"
55 "2:\n"
56 _ASM_EXTABLE(1b, 2b));
57}
58
59static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
60{
61 int err;
62
63 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
64 "2:\n"
65 ".section .fixup,\"ax\"\n"
66 "3: movl $-1,%[err]\n"
67 " jmp 2b\n"
68 ".previous\n"
69 _ASM_EXTABLE(1b, 3b)
70 : [err] "=r" (err)
71#if 0
72 : [fx] "r" (fx), "m" (*fx), "0" (0));
73#else
74 : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
75#endif
76 return err;
77}
78
79
80
81
82
83
84static inline void clear_fpu_state(struct task_struct *tsk)
85{
86 struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
87 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
88
89
90
91
92 if ((task_thread_info(tsk)->status & TS_XSAVE) &&
93 !(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
94 return;
95
96 if (unlikely(fx->swd & X87_FSW_ES))
97 asm volatile("fnclex");
98 alternative_input(ASM_NOP8 ASM_NOP2,
99 " emms\n"
100 " fildl %%gs:0",
101 X86_FEATURE_FXSAVE_LEAK);
102}
103
104static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
105{
106 int err;
107
108 asm volatile("1: rex64/fxsave (%[fx])\n\t"
109 "2:\n"
110 ".section .fixup,\"ax\"\n"
111 "3: movl $-1,%[err]\n"
112 " jmp 2b\n"
113 ".previous\n"
114 _ASM_EXTABLE(1b, 3b)
115 : [err] "=r" (err), "=m" (*fx)
116#if 0
117 : [fx] "r" (fx), "0" (0));
118#else
119 : [fx] "cdaSDb" (fx), "0" (0));
120#endif
121 if (unlikely(err) &&
122 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
123 err = -EFAULT;
124
125 return err;
126}
127
128static inline void fxsave(struct task_struct *tsk)
129{
130
131
132
133
134#if 0
135
136
137 __asm__ __volatile__("fxsaveq %0"
138 : "=m" (tsk->thread.xstate->fxsave));
139#elif 0
140
141
142
143
144 __asm__ __volatile__("rex64/fxsave %0"
145 : "=m" (tsk->thread.xstate->fxsave));
146#else
147
148
149 __asm__ __volatile__("rex64/fxsave (%1)"
150 : "=m" (tsk->thread.xstate->fxsave)
151 : "cdaSDb" (&tsk->thread.xstate->fxsave));
152#endif
153}
154
155static inline void __save_init_fpu(struct task_struct *tsk)
156{
157 if (task_thread_info(tsk)->status & TS_XSAVE)
158 xsave(tsk);
159 else
160 fxsave(tsk);
161
162 clear_fpu_state(tsk);
163 task_thread_info(tsk)->status &= ~TS_USEDFPU;
164}
165
166#else
167
168#ifdef CONFIG_MATH_EMULATION
169extern void finit_task(struct task_struct *tsk);
170#else
171static inline void finit_task(struct task_struct *tsk)
172{
173}
174#endif
175
176static inline void tolerant_fwait(void)
177{
178 asm volatile("fnclex ; fwait");
179}
180
181
182static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
183{
184
185
186
187
188 alternative_input(
189 "nop ; frstor %1",
190 "fxrstor %1",
191 X86_FEATURE_FXSR,
192 "m" (*fx));
193
194 return 0;
195}
196
197
198
199
200#ifdef CONFIG_SMP
201#define safe_address (__per_cpu_offset[0])
202#else
203#define safe_address (kstat_cpu(0).cpustat.user)
204#endif
205
206
207
208
209static inline void __save_init_fpu(struct task_struct *tsk)
210{
211 if (task_thread_info(tsk)->status & TS_XSAVE) {
212 struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
213 struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
214
215 xsave(tsk);
216
217
218
219
220 if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
221 goto end;
222
223 if (unlikely(fx->swd & X87_FSW_ES))
224 asm volatile("fnclex");
225
226
227
228
229 goto clear_state;
230 }
231
232
233
234 alternative_input(
235 "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
236 "fxsave %[fx]\n"
237 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
238 X86_FEATURE_FXSR,
239 [fx] "m" (tsk->thread.xstate->fxsave),
240 [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory");
241clear_state:
242
243
244
245 alternative_input(
246 GENERIC_NOP8 GENERIC_NOP2,
247 "emms\n\t"
248 "fildl %[addr]",
249 X86_FEATURE_FXSAVE_LEAK,
250 [addr] "m" (safe_address));
251end:
252 task_thread_info(tsk)->status &= ~TS_USEDFPU;
253}
254
255#endif
256
257static inline int restore_fpu_checking(struct task_struct *tsk)
258{
259 if (task_thread_info(tsk)->status & TS_XSAVE)
260 return xrstor_checking(&tsk->thread.xstate->xsave);
261 else
262 return fxrstor_checking(&tsk->thread.xstate->fxsave);
263}
264
265
266
267
268extern int save_i387_xstate(void __user *buf);
269extern int restore_i387_xstate(void __user *buf);
270
271static inline void __unlazy_fpu(struct task_struct *tsk)
272{
273 if (task_thread_info(tsk)->status & TS_USEDFPU) {
274 __save_init_fpu(tsk);
275 stts();
276 } else
277 tsk->fpu_counter = 0;
278}
279
280static inline void __clear_fpu(struct task_struct *tsk)
281{
282 if (task_thread_info(tsk)->status & TS_USEDFPU) {
283 tolerant_fwait();
284 task_thread_info(tsk)->status &= ~TS_USEDFPU;
285 stts();
286 }
287}
288
289static inline void kernel_fpu_begin(void)
290{
291 struct thread_info *me = current_thread_info();
292 preempt_disable();
293 if (me->status & TS_USEDFPU)
294 __save_init_fpu(me->task);
295 else
296 clts();
297}
298
299static inline void kernel_fpu_end(void)
300{
301 stts();
302 preempt_enable();
303}
304
305static inline bool irq_fpu_usable(void)
306{
307 struct pt_regs *regs;
308
309 return !in_interrupt() || !(regs = get_irq_regs()) || \
310 user_mode(regs) || (read_cr0() & X86_CR0_TS);
311}
312
313
314
315
316
317
318
319
320static inline int irq_ts_save(void)
321{
322
323
324
325
326
327 if (!in_atomic())
328 return 0;
329
330 if (read_cr0() & X86_CR0_TS) {
331 clts();
332 return 1;
333 }
334
335 return 0;
336}
337
338static inline void irq_ts_restore(int TS_state)
339{
340 if (TS_state)
341 stts();
342}
343
344#ifdef CONFIG_X86_64
345
346static inline void save_init_fpu(struct task_struct *tsk)
347{
348 __save_init_fpu(tsk);
349 stts();
350}
351
352#define unlazy_fpu __unlazy_fpu
353#define clear_fpu __clear_fpu
354
355#else
356
357
358
359
360static inline void save_init_fpu(struct task_struct *tsk)
361{
362 preempt_disable();
363 __save_init_fpu(tsk);
364 stts();
365 preempt_enable();
366}
367
368static inline void unlazy_fpu(struct task_struct *tsk)
369{
370 preempt_disable();
371 __unlazy_fpu(tsk);
372 preempt_enable();
373}
374
375static inline void clear_fpu(struct task_struct *tsk)
376{
377 preempt_disable();
378 __clear_fpu(tsk);
379 preempt_enable();
380}
381
382#endif
383
384
385
386
387static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
388{
389 if (cpu_has_fxsr) {
390 return tsk->thread.xstate->fxsave.cwd;
391 } else {
392 return (unsigned short)tsk->thread.xstate->fsave.cwd;
393 }
394}
395
396static inline unsigned short get_fpu_swd(struct task_struct *tsk)
397{
398 if (cpu_has_fxsr) {
399 return tsk->thread.xstate->fxsave.swd;
400 } else {
401 return (unsigned short)tsk->thread.xstate->fsave.swd;
402 }
403}
404
405static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
406{
407 if (cpu_has_xmm) {
408 return tsk->thread.xstate->fxsave.mxcsr;
409 } else {
410 return MXCSR_DEFAULT;
411 }
412}
413
414#endif
415