1
2
3
4
5
6
7
8
9
10#ifndef _FPU_INTERNAL_H
11#define _FPU_INTERNAL_H
12
13#include <linux/kernel_stat.h>
14#include <linux/regset.h>
15#include <linux/slab.h>
16#include <asm/asm.h>
17#include <asm/cpufeature.h>
18#include <asm/processor.h>
19#include <asm/sigcontext.h>
20#include <asm/user.h>
21#include <asm/uaccess.h>
22#include <asm/xsave.h>
23
24extern unsigned int sig_xstate_size;
25extern void fpu_init(void);
26
27DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
28
29extern user_regset_active_fn fpregs_active, xfpregs_active;
30extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
31 xstateregs_get;
32extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
33 xstateregs_set;
34
35
36
37
38
39
40#define xstateregs_active fpregs_active
41
42extern struct _fpx_sw_bytes fx_sw_reserved;
43#ifdef CONFIG_IA32_EMULATION
44extern unsigned int sig_xstate_ia32_size;
45extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
46struct _fpstate_ia32;
47struct _xstate_ia32;
48extern int save_i387_xstate_ia32(void __user *buf);
49extern int restore_i387_xstate_ia32(void __user *buf);
50#endif
51
52#ifdef CONFIG_MATH_EMULATION
53extern void finit_soft_fpu(struct i387_soft_struct *soft);
54#else
55static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
56#endif
57
58#define X87_FSW_ES (1 << 7)
59
60static __always_inline __pure bool use_xsaveopt(void)
61{
62 return static_cpu_has(X86_FEATURE_XSAVEOPT);
63}
64
65static __always_inline __pure bool use_xsave(void)
66{
67 return static_cpu_has(X86_FEATURE_XSAVE);
68}
69
70static __always_inline __pure bool use_fxsr(void)
71{
72 return static_cpu_has(X86_FEATURE_FXSR);
73}
74
75extern void __sanitize_i387_state(struct task_struct *);
76
77static inline void sanitize_i387_state(struct task_struct *tsk)
78{
79 if (!use_xsaveopt())
80 return;
81 __sanitize_i387_state(tsk);
82}
83
84#ifdef CONFIG_X86_64
85static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
86{
87 int err;
88
89
90#ifdef CONFIG_AS_FXSAVEQ
91 asm volatile("1: fxrstorq %[fx]\n\t"
92 "2:\n"
93 ".section .fixup,\"ax\"\n"
94 "3: movl $-1,%[err]\n"
95 " jmp 2b\n"
96 ".previous\n"
97 _ASM_EXTABLE(1b, 3b)
98 : [err] "=r" (err)
99 : [fx] "m" (*fx), "0" (0));
100#else
101 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
102 "2:\n"
103 ".section .fixup,\"ax\"\n"
104 "3: movl $-1,%[err]\n"
105 " jmp 2b\n"
106 ".previous\n"
107 _ASM_EXTABLE(1b, 3b)
108 : [err] "=r" (err)
109 : [fx] "R" (fx), "m" (*fx), "0" (0));
110#endif
111 return err;
112}
113
114static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
115{
116 int err;
117
118
119
120
121
122 err = __clear_user(&fx->sw_reserved,
123 sizeof(struct _fpx_sw_bytes));
124 if (unlikely(err))
125 return -EFAULT;
126
127
128#ifdef CONFIG_AS_FXSAVEQ
129 asm volatile("1: fxsaveq %[fx]\n\t"
130 "2:\n"
131 ".section .fixup,\"ax\"\n"
132 "3: movl $-1,%[err]\n"
133 " jmp 2b\n"
134 ".previous\n"
135 _ASM_EXTABLE(1b, 3b)
136 : [err] "=r" (err), [fx] "=m" (*fx)
137 : "0" (0));
138#else
139 asm volatile("1: rex64/fxsave (%[fx])\n\t"
140 "2:\n"
141 ".section .fixup,\"ax\"\n"
142 "3: movl $-1,%[err]\n"
143 " jmp 2b\n"
144 ".previous\n"
145 _ASM_EXTABLE(1b, 3b)
146 : [err] "=r" (err), "=m" (*fx)
147 : [fx] "R" (fx), "0" (0));
148#endif
149 if (unlikely(err) &&
150 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
151 err = -EFAULT;
152
153 return err;
154}
155
156static inline void fpu_fxsave(struct fpu *fpu)
157{
158
159
160
161
162
163#ifdef CONFIG_AS_FXSAVEQ
164
165
166 __asm__ __volatile__("fxsaveq %0"
167 : "=m" (fpu->state->fxsave));
168#else
169
170
171
172
173
174
175
176
177 asm volatile("rex64/fxsave (%[fx])"
178 : "=m" (fpu->state->fxsave)
179 : [fx] "R" (&fpu->state->fxsave));
180#endif
181}
182
183#else
184
185
186static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
187{
188
189
190
191
192 alternative_input(
193 "nop ; frstor %1",
194 "fxrstor %1",
195 X86_FEATURE_FXSR,
196 "m" (*fx));
197
198 return 0;
199}
200
201static inline void fpu_fxsave(struct fpu *fpu)
202{
203 asm volatile("fxsave %[fx]"
204 : [fx] "=m" (fpu->state->fxsave));
205}
206
207#endif
208
209
210
211
212
213static inline int fpu_save_init(struct fpu *fpu)
214{
215 if (use_xsave()) {
216 fpu_xsave(fpu);
217
218
219
220
221 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
222 return 1;
223 } else if (use_fxsr()) {
224 fpu_fxsave(fpu);
225 } else {
226 asm volatile("fnsave %[fx]; fwait"
227 : [fx] "=m" (fpu->state->fsave));
228 return 0;
229 }
230
231
232
233
234
235
236
237
238
239 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
240 asm volatile("fnclex");
241 return 0;
242 }
243 return 1;
244}
245
246static inline int __save_init_fpu(struct task_struct *tsk)
247{
248 return fpu_save_init(&tsk->thread.fpu);
249}
250
251static inline int fpu_fxrstor_checking(struct fpu *fpu)
252{
253 return fxrstor_checking(&fpu->state->fxsave);
254}
255
256static inline int fpu_restore_checking(struct fpu *fpu)
257{
258 if (use_xsave())
259 return fpu_xrstor_checking(fpu);
260 else
261 return fpu_fxrstor_checking(fpu);
262}
263
264static inline int restore_fpu_checking(struct task_struct *tsk)
265{
266
267
268
269 alternative_input(
270 ASM_NOP8 ASM_NOP2,
271 "emms\n\t"
272 "fildl %P[addr]",
273 X86_FEATURE_FXSAVE_LEAK,
274 [addr] "m" (tsk->thread.fpu.has_fpu));
275
276 return fpu_restore_checking(&tsk->thread.fpu);
277}
278
279
280
281
282
283
284static inline int __thread_has_fpu(struct task_struct *tsk)
285{
286 return tsk->thread.fpu.has_fpu;
287}
288
289
290static inline void __thread_clear_has_fpu(struct task_struct *tsk)
291{
292 tsk->thread.fpu.has_fpu = 0;
293 this_cpu_write(fpu_owner_task, NULL);
294}
295
296
297static inline void __thread_set_has_fpu(struct task_struct *tsk)
298{
299 tsk->thread.fpu.has_fpu = 1;
300 this_cpu_write(fpu_owner_task, tsk);
301}
302
303
304
305
306
307
308
309
310static inline void __thread_fpu_end(struct task_struct *tsk)
311{
312 __thread_clear_has_fpu(tsk);
313 stts();
314}
315
316static inline void __thread_fpu_begin(struct task_struct *tsk)
317{
318 clts();
319 __thread_set_has_fpu(tsk);
320}
321
322
323
324
325
326
327
328
329
330
331
332
333
334typedef struct { int preload; } fpu_switch_t;
335
336
337
338
339
340
341
342
343
344
345static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
346{
347 return new == this_cpu_read_stable(fpu_owner_task) &&
348 cpu == new->thread.fpu.last_cpu;
349}
350
351static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
352{
353 fpu_switch_t fpu;
354
355 fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
356 if (__thread_has_fpu(old)) {
357 if (!__save_init_fpu(old))
358 cpu = ~0;
359 old->thread.fpu.last_cpu = cpu;
360 old->thread.fpu.has_fpu = 0;
361
362
363 if (fpu.preload) {
364 new->fpu_counter++;
365 __thread_set_has_fpu(new);
366 prefetch(new->thread.fpu.state);
367 } else
368 stts();
369 } else {
370 old->fpu_counter = 0;
371 old->thread.fpu.last_cpu = ~0;
372 if (fpu.preload) {
373 new->fpu_counter++;
374 if (fpu_lazy_restore(new, cpu))
375 fpu.preload = 0;
376 else
377 prefetch(new->thread.fpu.state);
378 __thread_fpu_begin(new);
379 }
380 }
381 return fpu;
382}
383
384
385
386
387
388
389
390static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
391{
392 if (fpu.preload) {
393 if (unlikely(restore_fpu_checking(new)))
394 __thread_fpu_end(new);
395 }
396}
397
398
399
400
401extern int save_i387_xstate(void __user *buf);
402extern int restore_i387_xstate(void __user *buf);
403
404static inline void __clear_fpu(struct task_struct *tsk)
405{
406 if (__thread_has_fpu(tsk)) {
407
408 asm volatile("1: fwait\n"
409 "2:\n"
410 _ASM_EXTABLE(1b, 2b));
411 __thread_fpu_end(tsk);
412 }
413}
414
415
416
417
418
419
420
421
422
423
424
425static inline void user_fpu_end(void)
426{
427 preempt_disable();
428 __thread_fpu_end(current);
429 preempt_enable();
430}
431
432static inline void user_fpu_begin(void)
433{
434 preempt_disable();
435 if (!user_has_fpu())
436 __thread_fpu_begin(current);
437 preempt_enable();
438}
439
440
441
442
443static inline void save_init_fpu(struct task_struct *tsk)
444{
445 WARN_ON_ONCE(!__thread_has_fpu(tsk));
446 preempt_disable();
447 __save_init_fpu(tsk);
448 __thread_fpu_end(tsk);
449 preempt_enable();
450}
451
452static inline void clear_fpu(struct task_struct *tsk)
453{
454 preempt_disable();
455 __clear_fpu(tsk);
456 preempt_enable();
457}
458
459
460
461
462static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
463{
464 if (cpu_has_fxsr) {
465 return tsk->thread.fpu.state->fxsave.cwd;
466 } else {
467 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
468 }
469}
470
471static inline unsigned short get_fpu_swd(struct task_struct *tsk)
472{
473 if (cpu_has_fxsr) {
474 return tsk->thread.fpu.state->fxsave.swd;
475 } else {
476 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
477 }
478}
479
480static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
481{
482 if (cpu_has_xmm) {
483 return tsk->thread.fpu.state->fxsave.mxcsr;
484 } else {
485 return MXCSR_DEFAULT;
486 }
487}
488
489static bool fpu_allocated(struct fpu *fpu)
490{
491 return fpu->state != NULL;
492}
493
494static inline int fpu_alloc(struct fpu *fpu)
495{
496 if (fpu_allocated(fpu))
497 return 0;
498 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
499 if (!fpu->state)
500 return -ENOMEM;
501 WARN_ON((unsigned long)fpu->state & 15);
502 return 0;
503}
504
505static inline void fpu_free(struct fpu *fpu)
506{
507 if (fpu->state) {
508 kmem_cache_free(task_xstate_cachep, fpu->state);
509 fpu->state = NULL;
510 }
511}
512
513static inline void fpu_copy(struct fpu *dst, struct fpu *src)
514{
515 memcpy(dst->state, src->state, xstate_size);
516}
517
518extern void fpu_finit(struct fpu *fpu);
519
520#endif
521