1
2
3
4
5
6
7
8
9
10#ifndef _ASM_X86_I387_H
11#define _ASM_X86_I387_H
12
13#ifndef __ASSEMBLY__
14
15#include <linux/sched.h>
16#include <linux/kernel_stat.h>
17#include <linux/regset.h>
18#include <linux/hardirq.h>
19#include <linux/slab.h>
20#include <asm/asm.h>
21#include <asm/cpufeature.h>
22#include <asm/processor.h>
23#include <asm/sigcontext.h>
24#include <asm/user.h>
25#include <asm/uaccess.h>
26#include <asm/xsave.h>
27
28extern unsigned int sig_xstate_size;
29extern void fpu_init(void);
30extern void mxcsr_feature_mask_init(void);
31extern int init_fpu(struct task_struct *child);
32extern asmlinkage void math_state_restore(void);
33extern void __math_state_restore(void);
34extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
35
36extern user_regset_active_fn fpregs_active, xfpregs_active;
37extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
38 xstateregs_get;
39extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
40 xstateregs_set;
41
42
43
44
45
46#define xstateregs_active fpregs_active
47
48extern struct _fpx_sw_bytes fx_sw_reserved;
49#ifdef CONFIG_IA32_EMULATION
50extern unsigned int sig_xstate_ia32_size;
51extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
52struct _fpstate_ia32;
53struct _xstate_ia32;
54extern int save_i387_xstate_ia32(void __user *buf);
55extern int restore_i387_xstate_ia32(void __user *buf);
56#endif
57
58#ifdef CONFIG_MATH_EMULATION
59extern void finit_soft_fpu(struct i387_soft_struct *soft);
60#else
61static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
62#endif
63
64#define X87_FSW_ES (1 << 7)
65
66static __always_inline __pure bool use_xsaveopt(void)
67{
68 return static_cpu_has(X86_FEATURE_XSAVEOPT);
69}
70
71static __always_inline __pure bool use_xsave(void)
72{
73 return static_cpu_has(X86_FEATURE_XSAVE);
74}
75
76static __always_inline __pure bool use_fxsr(void)
77{
78 return static_cpu_has(X86_FEATURE_FXSR);
79}
80
81extern void __sanitize_i387_state(struct task_struct *);
82
83static inline void sanitize_i387_state(struct task_struct *tsk)
84{
85 if (!use_xsaveopt())
86 return;
87 __sanitize_i387_state(tsk);
88}
89
90#ifdef CONFIG_X86_64
91static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
92{
93 int err;
94
95
96#ifdef CONFIG_AS_FXSAVEQ
97 asm volatile("1: fxrstorq %[fx]\n\t"
98 "2:\n"
99 ".section .fixup,\"ax\"\n"
100 "3: movl $-1,%[err]\n"
101 " jmp 2b\n"
102 ".previous\n"
103 _ASM_EXTABLE(1b, 3b)
104 : [err] "=r" (err)
105 : [fx] "m" (*fx), "0" (0));
106#else
107 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
108 "2:\n"
109 ".section .fixup,\"ax\"\n"
110 "3: movl $-1,%[err]\n"
111 " jmp 2b\n"
112 ".previous\n"
113 _ASM_EXTABLE(1b, 3b)
114 : [err] "=r" (err)
115 : [fx] "R" (fx), "m" (*fx), "0" (0));
116#endif
117 return err;
118}
119
120static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
121{
122 int err;
123
124
125
126
127
128 err = __clear_user(&fx->sw_reserved,
129 sizeof(struct _fpx_sw_bytes));
130 if (unlikely(err))
131 return -EFAULT;
132
133
134#ifdef CONFIG_AS_FXSAVEQ
135 asm volatile("1: fxsaveq %[fx]\n\t"
136 "2:\n"
137 ".section .fixup,\"ax\"\n"
138 "3: movl $-1,%[err]\n"
139 " jmp 2b\n"
140 ".previous\n"
141 _ASM_EXTABLE(1b, 3b)
142 : [err] "=r" (err), [fx] "=m" (*fx)
143 : "0" (0));
144#else
145 asm volatile("1: rex64/fxsave (%[fx])\n\t"
146 "2:\n"
147 ".section .fixup,\"ax\"\n"
148 "3: movl $-1,%[err]\n"
149 " jmp 2b\n"
150 ".previous\n"
151 _ASM_EXTABLE(1b, 3b)
152 : [err] "=r" (err), "=m" (*fx)
153 : [fx] "R" (fx), "0" (0));
154#endif
155 if (unlikely(err) &&
156 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
157 err = -EFAULT;
158
159 return err;
160}
161
162static inline void fpu_fxsave(struct fpu *fpu)
163{
164
165
166
167
168
169#ifdef CONFIG_AS_FXSAVEQ
170
171
172 __asm__ __volatile__("fxsaveq %0"
173 : "=m" (fpu->state->fxsave));
174#else
175
176
177
178
179
180
181
182
183 asm volatile("rex64/fxsave (%[fx])"
184 : "=m" (fpu->state->fxsave)
185 : [fx] "R" (&fpu->state->fxsave));
186#endif
187}
188
189#else
190
191
192static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
193{
194
195
196
197
198 alternative_input(
199 "nop ; frstor %1",
200 "fxrstor %1",
201 X86_FEATURE_FXSR,
202 "m" (*fx));
203
204 return 0;
205}
206
207static inline void fpu_fxsave(struct fpu *fpu)
208{
209 asm volatile("fxsave %[fx]"
210 : [fx] "=m" (fpu->state->fxsave));
211}
212
213#endif
214
215
216
217
218#ifdef CONFIG_SMP
219#define safe_address (__per_cpu_offset[0])
220#else
221#define safe_address (kstat_cpu(0).cpustat.user)
222#endif
223
224
225
226
227static inline void fpu_save_init(struct fpu *fpu)
228{
229 if (use_xsave()) {
230 fpu_xsave(fpu);
231
232
233
234
235 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
236 return;
237 } else if (use_fxsr()) {
238 fpu_fxsave(fpu);
239 } else {
240 asm volatile("fnsave %[fx]; fwait"
241 : [fx] "=m" (fpu->state->fsave));
242 return;
243 }
244
245 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES))
246 asm volatile("fnclex");
247
248
249
250
251 alternative_input(
252 ASM_NOP8 ASM_NOP2,
253 "emms\n\t"
254 "fildl %P[addr]",
255 X86_FEATURE_FXSAVE_LEAK,
256 [addr] "m" (safe_address));
257}
258
259static inline void __save_init_fpu(struct task_struct *tsk)
260{
261 fpu_save_init(&tsk->thread.fpu);
262 task_thread_info(tsk)->status &= ~TS_USEDFPU;
263}
264
265static inline int fpu_fxrstor_checking(struct fpu *fpu)
266{
267 return fxrstor_checking(&fpu->state->fxsave);
268}
269
270static inline int fpu_restore_checking(struct fpu *fpu)
271{
272 if (use_xsave())
273 return fpu_xrstor_checking(fpu);
274 else
275 return fpu_fxrstor_checking(fpu);
276}
277
278static inline int restore_fpu_checking(struct task_struct *tsk)
279{
280 return fpu_restore_checking(&tsk->thread.fpu);
281}
282
283
284
285
286extern int save_i387_xstate(void __user *buf);
287extern int restore_i387_xstate(void __user *buf);
288
289static inline void __unlazy_fpu(struct task_struct *tsk)
290{
291 if (task_thread_info(tsk)->status & TS_USEDFPU) {
292 __save_init_fpu(tsk);
293 stts();
294 } else
295 tsk->fpu_counter = 0;
296}
297
298static inline void __clear_fpu(struct task_struct *tsk)
299{
300 if (task_thread_info(tsk)->status & TS_USEDFPU) {
301
302 asm volatile("1: fwait\n"
303 "2:\n"
304 _ASM_EXTABLE(1b, 2b));
305 task_thread_info(tsk)->status &= ~TS_USEDFPU;
306 stts();
307 }
308}
309
310static inline void kernel_fpu_begin(void)
311{
312 struct thread_info *me = current_thread_info();
313 preempt_disable();
314 if (me->status & TS_USEDFPU)
315 __save_init_fpu(me->task);
316 else
317 clts();
318}
319
320static inline void kernel_fpu_end(void)
321{
322 stts();
323 preempt_enable();
324}
325
326static inline bool irq_fpu_usable(void)
327{
328 struct pt_regs *regs;
329
330 return !in_interrupt() || !(regs = get_irq_regs()) || \
331 user_mode(regs) || (read_cr0() & X86_CR0_TS);
332}
333
334
335
336
337
338
339
340
341static inline int irq_ts_save(void)
342{
343
344
345
346
347
348 if (!in_atomic())
349 return 0;
350
351 if (read_cr0() & X86_CR0_TS) {
352 clts();
353 return 1;
354 }
355
356 return 0;
357}
358
359static inline void irq_ts_restore(int TS_state)
360{
361 if (TS_state)
362 stts();
363}
364
365
366
367
368static inline void save_init_fpu(struct task_struct *tsk)
369{
370 preempt_disable();
371 __save_init_fpu(tsk);
372 stts();
373 preempt_enable();
374}
375
376static inline void unlazy_fpu(struct task_struct *tsk)
377{
378 preempt_disable();
379 __unlazy_fpu(tsk);
380 preempt_enable();
381}
382
383static inline void clear_fpu(struct task_struct *tsk)
384{
385 preempt_disable();
386 __clear_fpu(tsk);
387 preempt_enable();
388}
389
390
391
392
393static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
394{
395 if (cpu_has_fxsr) {
396 return tsk->thread.fpu.state->fxsave.cwd;
397 } else {
398 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
399 }
400}
401
402static inline unsigned short get_fpu_swd(struct task_struct *tsk)
403{
404 if (cpu_has_fxsr) {
405 return tsk->thread.fpu.state->fxsave.swd;
406 } else {
407 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
408 }
409}
410
411static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
412{
413 if (cpu_has_xmm) {
414 return tsk->thread.fpu.state->fxsave.mxcsr;
415 } else {
416 return MXCSR_DEFAULT;
417 }
418}
419
420static bool fpu_allocated(struct fpu *fpu)
421{
422 return fpu->state != NULL;
423}
424
425static inline int fpu_alloc(struct fpu *fpu)
426{
427 if (fpu_allocated(fpu))
428 return 0;
429 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
430 if (!fpu->state)
431 return -ENOMEM;
432 WARN_ON((unsigned long)fpu->state & 15);
433 return 0;
434}
435
436static inline void fpu_free(struct fpu *fpu)
437{
438 if (fpu->state) {
439 kmem_cache_free(task_xstate_cachep, fpu->state);
440 fpu->state = NULL;
441 }
442}
443
444static inline void fpu_copy(struct fpu *dst, struct fpu *src)
445{
446 memcpy(dst->state, src->state, xstate_size);
447}
448
449extern void fpu_finit(struct fpu *fpu);
450
451#endif
452
453#endif
454