1
2
3
4
5
6
7
8#include <asm/fpu/internal.h>
9#include <asm/fpu/regset.h>
10#include <asm/fpu/signal.h>
11#include <asm/fpu/types.h>
12#include <asm/traps.h>
13
14#include <linux/hardirq.h>
15#include <linux/pkeys.h>
16
17#define CREATE_TRACE_POINTS
18#include <asm/trace/fpu.h>
19
20
21
22
23
24union fpregs_state init_fpstate __read_mostly;
25
26
27
28
29
30
31
32
33
34
35
36
37static DEFINE_PER_CPU(bool, in_kernel_fpu);
38
39
40
41
42DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
43
44static void kernel_fpu_disable(void)
45{
46 WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
47 this_cpu_write(in_kernel_fpu, true);
48}
49
50static void kernel_fpu_enable(void)
51{
52 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
53 this_cpu_write(in_kernel_fpu, false);
54}
55
56static bool kernel_fpu_disabled(void)
57{
58 return this_cpu_read(in_kernel_fpu);
59}
60
61static bool interrupted_kernel_fpu_idle(void)
62{
63 return !kernel_fpu_disabled();
64}
65
66
67
68
69
70
71
72
73
74static bool interrupted_user_mode(void)
75{
76 struct pt_regs *regs = get_irq_regs();
77 return regs && user_mode(regs);
78}
79
80
81
82
83
84
85
86
87bool irq_fpu_usable(void)
88{
89 return !in_interrupt() ||
90 interrupted_user_mode() ||
91 interrupted_kernel_fpu_idle();
92}
93EXPORT_SYMBOL(irq_fpu_usable);
94
95void __kernel_fpu_begin(void)
96{
97 struct fpu *fpu = ¤t->thread.fpu;
98
99 WARN_ON_FPU(!irq_fpu_usable());
100
101 kernel_fpu_disable();
102
103 if (fpu->initialized) {
104
105
106
107
108 copy_fpregs_to_fpstate(fpu);
109 } else {
110 __cpu_invalidate_fpregs_state();
111 }
112}
113EXPORT_SYMBOL(__kernel_fpu_begin);
114
115void __kernel_fpu_end(void)
116{
117 struct fpu *fpu = ¤t->thread.fpu;
118
119 if (fpu->initialized)
120 copy_kernel_to_fpregs(&fpu->state);
121
122 kernel_fpu_enable();
123}
124EXPORT_SYMBOL(__kernel_fpu_end);
125
126void kernel_fpu_begin(void)
127{
128 preempt_disable();
129 __kernel_fpu_begin();
130}
131EXPORT_SYMBOL_GPL(kernel_fpu_begin);
132
133void kernel_fpu_end(void)
134{
135 __kernel_fpu_end();
136 preempt_enable();
137}
138EXPORT_SYMBOL_GPL(kernel_fpu_end);
139
140
141
142
143
144
145void fpu__save(struct fpu *fpu)
146{
147 WARN_ON_FPU(fpu != ¤t->thread.fpu);
148
149 preempt_disable();
150 trace_x86_fpu_before_save(fpu);
151 if (fpu->initialized) {
152 if (!copy_fpregs_to_fpstate(fpu)) {
153 copy_kernel_to_fpregs(&fpu->state);
154 }
155 }
156 trace_x86_fpu_after_save(fpu);
157 preempt_enable();
158}
159EXPORT_SYMBOL_GPL(fpu__save);
160
161
162
163
164static inline void fpstate_init_fstate(struct fregs_state *fp)
165{
166 fp->cwd = 0xffff037fu;
167 fp->swd = 0xffff0000u;
168 fp->twd = 0xffffffffu;
169 fp->fos = 0xffff0000u;
170}
171
172void fpstate_init(union fpregs_state *state)
173{
174 if (!static_cpu_has(X86_FEATURE_FPU)) {
175 fpstate_init_soft(&state->soft);
176 return;
177 }
178
179 memset(state, 0, fpu_kernel_xstate_size);
180
181 if (static_cpu_has(X86_FEATURE_XSAVES))
182 fpstate_init_xstate(&state->xsave);
183 if (static_cpu_has(X86_FEATURE_FXSR))
184 fpstate_init_fxstate(&state->fxsave);
185 else
186 fpstate_init_fstate(&state->fsave);
187}
188EXPORT_SYMBOL_GPL(fpstate_init);
189
190int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
191{
192 dst_fpu->last_cpu = -1;
193
194 if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU))
195 return 0;
196
197 WARN_ON_FPU(src_fpu != ¤t->thread.fpu);
198
199
200
201
202
203 memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
204
205
206
207
208
209
210
211
212 if (!copy_fpregs_to_fpstate(dst_fpu)) {
213 memcpy(&src_fpu->state, &dst_fpu->state, fpu_kernel_xstate_size);
214 copy_kernel_to_fpregs(&src_fpu->state);
215 }
216
217 trace_x86_fpu_copy_src(src_fpu);
218 trace_x86_fpu_copy_dst(dst_fpu);
219
220 return 0;
221}
222
223
224
225
226
227void fpu__initialize(struct fpu *fpu)
228{
229 WARN_ON_FPU(fpu != ¤t->thread.fpu);
230
231 if (!fpu->initialized) {
232 fpstate_init(&fpu->state);
233 trace_x86_fpu_init_state(fpu);
234
235 trace_x86_fpu_activate_state(fpu);
236
237 fpu->initialized = 1;
238 }
239}
240EXPORT_SYMBOL_GPL(fpu__initialize);
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257void fpu__prepare_read(struct fpu *fpu)
258{
259 if (fpu == ¤t->thread.fpu) {
260 fpu__save(fpu);
261 } else {
262 if (!fpu->initialized) {
263 fpstate_init(&fpu->state);
264 trace_x86_fpu_init_state(fpu);
265
266 trace_x86_fpu_activate_state(fpu);
267
268 fpu->initialized = 1;
269 }
270 }
271}
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286void fpu__prepare_write(struct fpu *fpu)
287{
288
289
290
291
292 WARN_ON_FPU(fpu == ¤t->thread.fpu);
293
294 if (fpu->initialized) {
295
296 __fpu_invalidate_fpregs_state(fpu);
297 } else {
298 fpstate_init(&fpu->state);
299 trace_x86_fpu_init_state(fpu);
300
301 trace_x86_fpu_activate_state(fpu);
302
303 fpu->initialized = 1;
304 }
305}
306
307
308
309
310
311
312
313
314
315
316
317void fpu__restore(struct fpu *fpu)
318{
319 fpu__initialize(fpu);
320
321
322 kernel_fpu_disable();
323 trace_x86_fpu_before_restore(fpu);
324 fpregs_activate(fpu);
325 copy_kernel_to_fpregs(&fpu->state);
326 trace_x86_fpu_after_restore(fpu);
327 kernel_fpu_enable();
328}
329EXPORT_SYMBOL_GPL(fpu__restore);
330
331
332
333
334
335
336
337
338
339
340void fpu__drop(struct fpu *fpu)
341{
342 preempt_disable();
343
344 if (fpu == ¤t->thread.fpu) {
345 if (fpu->initialized) {
346
347 asm volatile("1: fwait\n"
348 "2:\n"
349 _ASM_EXTABLE(1b, 2b));
350 fpregs_deactivate(fpu);
351 }
352 }
353
354 fpu->initialized = 0;
355
356 trace_x86_fpu_dropped(fpu);
357
358 preempt_enable();
359}
360
361
362
363
364
365static inline void copy_init_fpstate_to_fpregs(void)
366{
367 if (use_xsave())
368 copy_kernel_to_xregs(&init_fpstate.xsave, -1);
369 else if (static_cpu_has(X86_FEATURE_FXSR))
370 copy_kernel_to_fxregs(&init_fpstate.fxsave);
371 else
372 copy_kernel_to_fregs(&init_fpstate.fsave);
373
374 if (boot_cpu_has(X86_FEATURE_OSPKE))
375 copy_init_pkru_to_fpregs();
376}
377
378
379
380
381
382
383
384void fpu__clear(struct fpu *fpu)
385{
386 WARN_ON_FPU(fpu != ¤t->thread.fpu);
387
388 fpu__drop(fpu);
389
390
391
392
393 if (static_cpu_has(X86_FEATURE_FPU)) {
394 preempt_disable();
395 fpu__initialize(fpu);
396 user_fpu_begin();
397 copy_init_fpstate_to_fpregs();
398 preempt_enable();
399 }
400}
401
402
403
404
405
406int fpu__exception_code(struct fpu *fpu, int trap_nr)
407{
408 int err;
409
410 if (trap_nr == X86_TRAP_MF) {
411 unsigned short cwd, swd;
412
413
414
415
416
417
418
419
420
421
422 if (boot_cpu_has(X86_FEATURE_FXSR)) {
423 cwd = fpu->state.fxsave.cwd;
424 swd = fpu->state.fxsave.swd;
425 } else {
426 cwd = (unsigned short)fpu->state.fsave.cwd;
427 swd = (unsigned short)fpu->state.fsave.swd;
428 }
429
430 err = swd & ~cwd;
431 } else {
432
433
434
435
436
437
438 unsigned short mxcsr = MXCSR_DEFAULT;
439
440 if (boot_cpu_has(X86_FEATURE_XMM))
441 mxcsr = fpu->state.fxsave.mxcsr;
442
443 err = ~(mxcsr >> 7) & mxcsr;
444 }
445
446 if (err & 0x001) {
447
448
449
450
451
452 return FPE_FLTINV;
453 } else if (err & 0x004) {
454 return FPE_FLTDIV;
455 } else if (err & 0x008) {
456 return FPE_FLTOVF;
457 } else if (err & 0x012) {
458 return FPE_FLTUND;
459 } else if (err & 0x020) {
460 return FPE_FLTRES;
461 }
462
463
464
465
466
467
468 return 0;
469}
470