1
2
3
4
5
6
7#include <linux/compat.h>
8#include <linux/cpu.h>
9#include <linux/mman.h>
10#include <linux/pkeys.h>
11#include <linux/seq_file.h>
12#include <linux/proc_fs.h>
13
14#include <asm/fpu/api.h>
15#include <asm/fpu/internal.h>
16#include <asm/fpu/signal.h>
17#include <asm/fpu/regset.h>
18#include <asm/fpu/xstate.h>
19
20#include <asm/tlbflush.h>
21#include <asm/cpufeature.h>
22
23
24
25
26
27
28static const char *xfeature_names[] =
29{
30 "x87 floating point registers" ,
31 "SSE registers" ,
32 "AVX registers" ,
33 "MPX bounds registers" ,
34 "MPX CSR" ,
35 "AVX-512 opmask" ,
36 "AVX-512 Hi256" ,
37 "AVX-512 ZMM_Hi256" ,
38 "Processor Trace (unused)" ,
39 "Protection Keys User registers",
40 "unknown xstate feature" ,
41};
42
43static short xsave_cpuid_features[] __initdata = {
44 X86_FEATURE_FPU,
45 X86_FEATURE_XMM,
46 X86_FEATURE_AVX,
47 X86_FEATURE_MPX,
48 X86_FEATURE_MPX,
49 X86_FEATURE_AVX512F,
50 X86_FEATURE_AVX512F,
51 X86_FEATURE_AVX512F,
52 X86_FEATURE_INTEL_PT,
53 X86_FEATURE_PKU,
54};
55
56
57
58
59u64 xfeatures_mask __read_mostly;
60
61static unsigned int xstate_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
62static unsigned int xstate_sizes[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
63static unsigned int xstate_comp_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
64
65
66
67
68
69
70unsigned int fpu_user_xstate_size;
71
72
73
74
75
76
77int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
78{
79 u64 xfeatures_missing = xfeatures_needed & ~xfeatures_mask;
80
81 if (unlikely(feature_name)) {
82 long xfeature_idx, max_idx;
83 u64 xfeatures_print;
84
85
86
87
88
89
90
91 if (xfeatures_missing)
92 xfeatures_print = xfeatures_missing;
93 else
94 xfeatures_print = xfeatures_needed;
95
96 xfeature_idx = fls64(xfeatures_print)-1;
97 max_idx = ARRAY_SIZE(xfeature_names)-1;
98 xfeature_idx = min(xfeature_idx, max_idx);
99
100 *feature_name = xfeature_names[xfeature_idx];
101 }
102
103 if (xfeatures_missing)
104 return 0;
105
106 return 1;
107}
108EXPORT_SYMBOL_GPL(cpu_has_xfeatures);
109
110static bool xfeature_is_supervisor(int xfeature_nr)
111{
112
113
114
115
116
117 u32 eax, ebx, ecx, edx;
118
119 cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
120 return ecx & 1;
121}
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138void fpstate_sanitize_xstate(struct fpu *fpu)
139{
140 struct fxregs_state *fx = &fpu->state.fxsave;
141 int feature_bit;
142 u64 xfeatures;
143
144 if (!use_xsaveopt())
145 return;
146
147 xfeatures = fpu->state.xsave.header.xfeatures;
148
149
150
151
152
153 if ((xfeatures & xfeatures_mask) == xfeatures_mask)
154 return;
155
156
157
158
159 if (!(xfeatures & XFEATURE_MASK_FP)) {
160 fx->cwd = 0x37f;
161 fx->swd = 0;
162 fx->twd = 0;
163 fx->fop = 0;
164 fx->rip = 0;
165 fx->rdp = 0;
166 memset(&fx->st_space[0], 0, 128);
167 }
168
169
170
171
172 if (!(xfeatures & XFEATURE_MASK_SSE))
173 memset(&fx->xmm_space[0], 0, 256);
174
175
176
177
178
179 feature_bit = 0x2;
180 xfeatures = (xfeatures_mask & ~xfeatures) >> 2;
181
182
183
184
185
186
187 while (xfeatures) {
188 if (xfeatures & 0x1) {
189 int offset = xstate_comp_offsets[feature_bit];
190 int size = xstate_sizes[feature_bit];
191
192 memcpy((void *)fx + offset,
193 (void *)&init_fpstate.xsave + offset,
194 size);
195 }
196
197 xfeatures >>= 1;
198 feature_bit++;
199 }
200}
201
202
203
204
205
206void fpu__init_cpu_xstate(void)
207{
208 if (!boot_cpu_has(X86_FEATURE_XSAVE) || !xfeatures_mask)
209 return;
210
211
212
213
214
215 WARN_ONCE((xfeatures_mask & XFEATURE_MASK_SUPERVISOR),
216 "x86/fpu: XSAVES supervisor states are not yet implemented.\n");
217
218 xfeatures_mask &= ~XFEATURE_MASK_SUPERVISOR;
219
220 cr4_set_bits(X86_CR4_OSXSAVE);
221 xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
222}
223
224
225
226
227
228
229static int xfeature_enabled(enum xfeature xfeature)
230{
231 return !!(xfeatures_mask & (1UL << xfeature));
232}
233
234
235
236
237
238static void __init setup_xstate_features(void)
239{
240 u32 eax, ebx, ecx, edx, i;
241
242 unsigned int last_good_offset = offsetof(struct xregs_state,
243 extended_state_area);
244
245
246
247
248
249 xstate_offsets[XFEATURE_FP] = 0;
250 xstate_sizes[XFEATURE_FP] = offsetof(struct fxregs_state,
251 xmm_space);
252
253 xstate_offsets[XFEATURE_SSE] = xstate_sizes[XFEATURE_FP];
254 xstate_sizes[XFEATURE_SSE] = sizeof_field(struct fxregs_state,
255 xmm_space);
256
257 for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
258 if (!xfeature_enabled(i))
259 continue;
260
261 cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
262
263 xstate_sizes[i] = eax;
264
265
266
267
268
269 if (xfeature_is_supervisor(i))
270 continue;
271
272 xstate_offsets[i] = ebx;
273
274
275
276
277
278
279 WARN_ONCE(last_good_offset > xstate_offsets[i],
280 "x86/fpu: misordered xstate at %d\n", last_good_offset);
281
282 last_good_offset = xstate_offsets[i];
283 }
284}
285
286static void __init print_xstate_feature(u64 xstate_mask)
287{
288 const char *feature_name;
289
290 if (cpu_has_xfeatures(xstate_mask, &feature_name))
291 pr_info("x86/fpu: Supporting XSAVE feature 0x%03Lx: '%s'\n", xstate_mask, feature_name);
292}
293
294
295
296
297static void __init print_xstate_features(void)
298{
299 print_xstate_feature(XFEATURE_MASK_FP);
300 print_xstate_feature(XFEATURE_MASK_SSE);
301 print_xstate_feature(XFEATURE_MASK_YMM);
302 print_xstate_feature(XFEATURE_MASK_BNDREGS);
303 print_xstate_feature(XFEATURE_MASK_BNDCSR);
304 print_xstate_feature(XFEATURE_MASK_OPMASK);
305 print_xstate_feature(XFEATURE_MASK_ZMM_Hi256);
306 print_xstate_feature(XFEATURE_MASK_Hi16_ZMM);
307 print_xstate_feature(XFEATURE_MASK_PKRU);
308}
309
310
311
312
313
314#define CHECK_XFEATURE(nr) do { \
315 WARN_ON(nr < FIRST_EXTENDED_XFEATURE); \
316 WARN_ON(nr >= XFEATURE_MAX); \
317} while (0)
318
319
320
321
322
323static int xfeature_is_aligned(int xfeature_nr)
324{
325 u32 eax, ebx, ecx, edx;
326
327 CHECK_XFEATURE(xfeature_nr);
328
329 if (!xfeature_enabled(xfeature_nr)) {
330 WARN_ONCE(1, "Checking alignment of disabled xfeature %d\n",
331 xfeature_nr);
332 return 0;
333 }
334
335 cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
336
337
338
339
340
341 return !!(ecx & 2);
342}
343
344
345
346
347
348
349static void __init setup_xstate_comp_offsets(void)
350{
351 unsigned int next_offset;
352 int i;
353
354
355
356
357
358
359 xstate_comp_offsets[XFEATURE_FP] = 0;
360 xstate_comp_offsets[XFEATURE_SSE] = offsetof(struct fxregs_state,
361 xmm_space);
362
363 if (!boot_cpu_has(X86_FEATURE_XSAVES)) {
364 for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
365 if (xfeature_enabled(i))
366 xstate_comp_offsets[i] = xstate_offsets[i];
367 }
368 return;
369 }
370
371 next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
372
373 for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
374 if (!xfeature_enabled(i))
375 continue;
376
377 if (xfeature_is_aligned(i))
378 next_offset = ALIGN(next_offset, 64);
379
380 xstate_comp_offsets[i] = next_offset;
381 next_offset += xstate_sizes[i];
382 }
383}
384
385
386
387
388static void __init print_xstate_offset_size(void)
389{
390 int i;
391
392 for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
393 if (!xfeature_enabled(i))
394 continue;
395 pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n",
396 i, xstate_comp_offsets[i], i, xstate_sizes[i]);
397 }
398}
399
400
401
402
403static void __init setup_init_fpu_buf(void)
404{
405 static int on_boot_cpu __initdata = 1;
406
407 WARN_ON_FPU(!on_boot_cpu);
408 on_boot_cpu = 0;
409
410 if (!boot_cpu_has(X86_FEATURE_XSAVE))
411 return;
412
413 setup_xstate_features();
414 print_xstate_features();
415
416 if (boot_cpu_has(X86_FEATURE_XSAVES))
417 init_fpstate.xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
418 xfeatures_mask;
419
420
421
422
423 copy_kernel_to_xregs_booting(&init_fpstate.xsave);
424
425
426
427
428
429 copy_xregs_to_kernel_booting(&init_fpstate.xsave);
430}
431
432static int xfeature_uncompacted_offset(int xfeature_nr)
433{
434 u32 eax, ebx, ecx, edx;
435
436
437
438
439
440
441 if (XFEATURE_MASK_SUPERVISOR & BIT_ULL(xfeature_nr)) {
442 WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
443 return -1;
444 }
445
446 CHECK_XFEATURE(xfeature_nr);
447 cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
448 return ebx;
449}
450
451static int xfeature_size(int xfeature_nr)
452{
453 u32 eax, ebx, ecx, edx;
454
455 CHECK_XFEATURE(xfeature_nr);
456 cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
457 return eax;
458}
459
460
461
462
463
464
465
466
467
468
469int using_compacted_format(void)
470{
471 return boot_cpu_has(X86_FEATURE_XSAVES);
472}
473
474
475int validate_xstate_header(const struct xstate_header *hdr)
476{
477
478 if (hdr->xfeatures & (~xfeatures_mask | XFEATURE_MASK_SUPERVISOR))
479 return -EINVAL;
480
481
482 if (hdr->xcomp_bv)
483 return -EINVAL;
484
485
486
487
488
489 BUILD_BUG_ON(sizeof(hdr->reserved) != 48);
490
491
492 if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
493 return -EINVAL;
494
495 return 0;
496}
497
498static void __xstate_dump_leaves(void)
499{
500 int i;
501 u32 eax, ebx, ecx, edx;
502 static int should_dump = 1;
503
504 if (!should_dump)
505 return;
506 should_dump = 0;
507
508
509
510
511 for (i = 0; i < XFEATURE_MAX + 10; i++) {
512 cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
513 pr_warn("CPUID[%02x, %02x]: eax=%08x ebx=%08x ecx=%08x edx=%08x\n",
514 XSTATE_CPUID, i, eax, ebx, ecx, edx);
515 }
516}
517
518#define XSTATE_WARN_ON(x) do { \
519 if (WARN_ONCE(x, "XSAVE consistency problem, dumping leaves")) { \
520 __xstate_dump_leaves(); \
521 } \
522} while (0)
523
524#define XCHECK_SZ(sz, nr, nr_macro, __struct) do { \
525 if ((nr == nr_macro) && \
526 WARN_ONCE(sz != sizeof(__struct), \
527 "%s: struct is %zu bytes, cpu state %d bytes\n", \
528 __stringify(nr_macro), sizeof(__struct), sz)) { \
529 __xstate_dump_leaves(); \
530 } \
531} while (0)
532
533
534
535
536
537
538static void check_xstate_against_struct(int nr)
539{
540
541
542
543 int sz = xfeature_size(nr);
544
545
546
547
548 XCHECK_SZ(sz, nr, XFEATURE_YMM, struct ymmh_struct);
549 XCHECK_SZ(sz, nr, XFEATURE_BNDREGS, struct mpx_bndreg_state);
550 XCHECK_SZ(sz, nr, XFEATURE_BNDCSR, struct mpx_bndcsr_state);
551 XCHECK_SZ(sz, nr, XFEATURE_OPMASK, struct avx_512_opmask_state);
552 XCHECK_SZ(sz, nr, XFEATURE_ZMM_Hi256, struct avx_512_zmm_uppers_state);
553 XCHECK_SZ(sz, nr, XFEATURE_Hi16_ZMM, struct avx_512_hi16_state);
554 XCHECK_SZ(sz, nr, XFEATURE_PKRU, struct pkru_state);
555
556
557
558
559
560
561 if ((nr < XFEATURE_YMM) ||
562 (nr >= XFEATURE_MAX) ||
563 (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR)) {
564 WARN_ONCE(1, "no structure for xstate: %d\n", nr);
565 XSTATE_WARN_ON(1);
566 }
567}
568
569
570
571
572
573
574static void do_extra_xstate_size_checks(void)
575{
576 int paranoid_xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
577 int i;
578
579 for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
580 if (!xfeature_enabled(i))
581 continue;
582
583 check_xstate_against_struct(i);
584
585
586
587
588 if (!using_compacted_format())
589 XSTATE_WARN_ON(xfeature_is_supervisor(i));
590
591
592 if (xfeature_is_aligned(i))
593 paranoid_xstate_size = ALIGN(paranoid_xstate_size, 64);
594
595
596
597
598
599
600 if (!using_compacted_format())
601 paranoid_xstate_size = xfeature_uncompacted_offset(i);
602
603
604
605
606 paranoid_xstate_size += xfeature_size(i);
607 }
608 XSTATE_WARN_ON(paranoid_xstate_size != fpu_kernel_xstate_size);
609}
610
611
612
613
614
615
616
617
618
619
620
621
622
623static unsigned int __init get_xsaves_size(void)
624{
625 unsigned int eax, ebx, ecx, edx;
626
627
628
629
630
631
632
633
634 cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
635 return ebx;
636}
637
638static unsigned int __init get_xsave_size(void)
639{
640 unsigned int eax, ebx, ecx, edx;
641
642
643
644
645
646
647
648 cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
649 return ebx;
650}
651
652
653
654
655
656static bool is_supported_xstate_size(unsigned int test_xstate_size)
657{
658 if (test_xstate_size <= sizeof(union fpregs_state))
659 return true;
660
661 pr_warn("x86/fpu: xstate buffer too small (%zu < %d), disabling xsave\n",
662 sizeof(union fpregs_state), test_xstate_size);
663 return false;
664}
665
666static int __init init_xstate_size(void)
667{
668
669 unsigned int possible_xstate_size;
670 unsigned int xsave_size;
671
672 xsave_size = get_xsave_size();
673
674 if (boot_cpu_has(X86_FEATURE_XSAVES))
675 possible_xstate_size = get_xsaves_size();
676 else
677 possible_xstate_size = xsave_size;
678
679
680 if (!is_supported_xstate_size(possible_xstate_size))
681 return -EINVAL;
682
683
684
685
686
687 fpu_kernel_xstate_size = possible_xstate_size;
688 do_extra_xstate_size_checks();
689
690
691
692
693 fpu_user_xstate_size = xsave_size;
694 return 0;
695}
696
697
698
699
700
701static void fpu__init_disable_system_xstate(void)
702{
703 xfeatures_mask = 0;
704 cr4_clear_bits(X86_CR4_OSXSAVE);
705 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
706}
707
708
709
710
711
712void __init fpu__init_system_xstate(void)
713{
714 unsigned int eax, ebx, ecx, edx;
715 static int on_boot_cpu __initdata = 1;
716 int err;
717 int i;
718
719 WARN_ON_FPU(!on_boot_cpu);
720 on_boot_cpu = 0;
721
722 if (!boot_cpu_has(X86_FEATURE_FPU)) {
723 pr_info("x86/fpu: No FPU detected\n");
724 return;
725 }
726
727 if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
728 pr_info("x86/fpu: x87 FPU will use %s\n",
729 boot_cpu_has(X86_FEATURE_FXSR) ? "FXSAVE" : "FSAVE");
730 return;
731 }
732
733 if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
734 WARN_ON_FPU(1);
735 return;
736 }
737
738 cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
739 xfeatures_mask = eax + ((u64)edx << 32);
740
741 if ((xfeatures_mask & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
742
743
744
745
746
747 pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", xfeatures_mask);
748 goto out_disable;
749 }
750
751
752
753
754 for (i = 0; i < ARRAY_SIZE(xsave_cpuid_features); i++) {
755 if (!boot_cpu_has(xsave_cpuid_features[i]))
756 xfeatures_mask &= ~BIT(i);
757 }
758
759 xfeatures_mask &= fpu__get_supported_xfeatures_mask();
760
761
762 fpu__init_cpu_xstate();
763 err = init_xstate_size();
764 if (err)
765 goto out_disable;
766
767
768
769
770
771 update_regset_xstate_info(fpu_user_xstate_size, xfeatures_mask & ~XFEATURE_MASK_SUPERVISOR);
772
773 fpu__init_prepare_fx_sw_frame();
774 setup_init_fpu_buf();
775 setup_xstate_comp_offsets();
776 print_xstate_offset_size();
777
778 pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
779 xfeatures_mask,
780 fpu_kernel_xstate_size,
781 boot_cpu_has(X86_FEATURE_XSAVES) ? "compacted" : "standard");
782 return;
783
784out_disable:
785
786 fpu__init_disable_system_xstate();
787}
788
789
790
791
792void fpu__resume_cpu(void)
793{
794
795
796
797 if (boot_cpu_has(X86_FEATURE_XSAVE))
798 xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
799}
800
801
802
803
804
805
806static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
807{
808 if (!xfeature_enabled(xfeature_nr)) {
809 WARN_ON_FPU(1);
810 return NULL;
811 }
812
813 return (void *)xsave + xstate_comp_offsets[xfeature_nr];
814}
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
834{
835
836
837
838 if (!boot_cpu_has(X86_FEATURE_XSAVE))
839 return NULL;
840
841
842
843
844
845
846 WARN_ONCE(!(xfeatures_mask & BIT_ULL(xfeature_nr)),
847 "get of unsupported state");
848
849
850
851
852
853
854
855
856
857
858
859 if (!(xsave->header.xfeatures & BIT_ULL(xfeature_nr)))
860 return NULL;
861
862 return __raw_xsave_addr(xsave, xfeature_nr);
863}
864EXPORT_SYMBOL_GPL(get_xsave_addr);
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883const void *get_xsave_field_ptr(int xfeature_nr)
884{
885 struct fpu *fpu = ¤t->thread.fpu;
886
887
888
889
890
891 fpu__save(fpu);
892
893 return get_xsave_addr(&fpu->state.xsave, xfeature_nr);
894}
895
896#ifdef CONFIG_ARCH_HAS_PKEYS
897
898
899
900
901
902int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
903 unsigned long init_val)
904{
905 u32 old_pkru;
906 int pkey_shift = (pkey * PKRU_BITS_PER_PKEY);
907 u32 new_pkru_bits = 0;
908
909
910
911
912
913 if (!boot_cpu_has(X86_FEATURE_OSPKE))
914 return -EINVAL;
915
916
917
918
919
920
921 WARN_ON_ONCE(pkey >= arch_max_pkey());
922
923
924 if (init_val & PKEY_DISABLE_ACCESS)
925 new_pkru_bits |= PKRU_AD_BIT;
926 if (init_val & PKEY_DISABLE_WRITE)
927 new_pkru_bits |= PKRU_WD_BIT;
928
929
930 new_pkru_bits <<= pkey_shift;
931
932
933 old_pkru = read_pkru();
934 old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
935
936
937 write_pkru(old_pkru | new_pkru_bits);
938
939 return 0;
940}
941#endif
942
943
944
945
946
947
948
949static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
950{
951 if (!(xfeatures & (XFEATURE_MASK_SSE|XFEATURE_MASK_YMM)))
952 return false;
953
954 if (xfeatures & XFEATURE_MASK_FP)
955 return false;
956
957 return true;
958}
959
960static void fill_gap(unsigned to, void **kbuf, unsigned *pos, unsigned *count)
961{
962 if (*pos < to) {
963 unsigned size = to - *pos;
964
965 if (size > *count)
966 size = *count;
967 memcpy(*kbuf, (void *)&init_fpstate.xsave + *pos, size);
968 *kbuf += size;
969 *pos += size;
970 *count -= size;
971 }
972}
973
974static void copy_part(unsigned offset, unsigned size, void *from,
975 void **kbuf, unsigned *pos, unsigned *count)
976{
977 fill_gap(offset, kbuf, pos, count);
978 if (size > *count)
979 size = *count;
980 if (size) {
981 memcpy(*kbuf, from, size);
982 *kbuf += size;
983 *pos += size;
984 *count -= size;
985 }
986}
987
988
989
990
991
992
993
994
995int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
996{
997 struct xstate_header header;
998 const unsigned off_mxcsr = offsetof(struct fxregs_state, mxcsr);
999 unsigned count = size_total;
1000 int i;
1001
1002
1003
1004
1005 if (unlikely(offset_start != 0))
1006 return -EFAULT;
1007
1008
1009
1010
1011 memset(&header, 0, sizeof(header));
1012 header.xfeatures = xsave->header.xfeatures;
1013 header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
1014
1015 if (header.xfeatures & XFEATURE_MASK_FP)
1016 copy_part(0, off_mxcsr,
1017 &xsave->i387, &kbuf, &offset_start, &count);
1018 if (header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM))
1019 copy_part(off_mxcsr, MXCSR_AND_FLAGS_SIZE,
1020 &xsave->i387.mxcsr, &kbuf, &offset_start, &count);
1021 if (header.xfeatures & XFEATURE_MASK_FP)
1022 copy_part(offsetof(struct fxregs_state, st_space), 128,
1023 &xsave->i387.st_space, &kbuf, &offset_start, &count);
1024 if (header.xfeatures & XFEATURE_MASK_SSE)
1025 copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256,
1026 &xsave->i387.xmm_space, &kbuf, &offset_start, &count);
1027
1028
1029
1030 copy_part(offsetof(struct fxregs_state, sw_reserved), 48,
1031 xstate_fx_sw_bytes, &kbuf, &offset_start, &count);
1032
1033
1034
1035 copy_part(offsetof(struct xregs_state, header), sizeof(header),
1036 &header, &kbuf, &offset_start, &count);
1037
1038 for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
1039
1040
1041
1042 if ((header.xfeatures >> i) & 1) {
1043 void *src = __raw_xsave_addr(xsave, i);
1044
1045 copy_part(xstate_offsets[i], xstate_sizes[i],
1046 src, &kbuf, &offset_start, &count);
1047 }
1048
1049 }
1050 fill_gap(size_total, &kbuf, &offset_start, &count);
1051
1052 return 0;
1053}
1054
1055static inline int
1056__copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int offset, unsigned int size, unsigned int size_total)
1057{
1058 if (!size)
1059 return 0;
1060
1061 if (offset < size_total) {
1062 unsigned int copy = min(size, size_total - offset);
1063
1064 if (__copy_to_user(ubuf + offset, data, copy))
1065 return -EFAULT;
1066 }
1067 return 0;
1068}
1069
1070
1071
1072
1073
1074
1075
1076int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
1077{
1078 unsigned int offset, size;
1079 int ret, i;
1080 struct xstate_header header;
1081
1082
1083
1084
1085 if (unlikely(offset_start != 0))
1086 return -EFAULT;
1087
1088
1089
1090
1091 memset(&header, 0, sizeof(header));
1092 header.xfeatures = xsave->header.xfeatures;
1093 header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
1094
1095
1096
1097
1098 offset = offsetof(struct xregs_state, header);
1099 size = sizeof(header);
1100
1101 ret = __copy_xstate_to_user(ubuf, &header, offset, size, size_total);
1102 if (ret)
1103 return ret;
1104
1105 for (i = 0; i < XFEATURE_MAX; i++) {
1106
1107
1108
1109 if ((header.xfeatures >> i) & 1) {
1110 void *src = __raw_xsave_addr(xsave, i);
1111
1112 offset = xstate_offsets[i];
1113 size = xstate_sizes[i];
1114
1115
1116 if (offset + size > size_total)
1117 break;
1118
1119 ret = __copy_xstate_to_user(ubuf, src, offset, size, size_total);
1120 if (ret)
1121 return ret;
1122 }
1123
1124 }
1125
1126 if (xfeatures_mxcsr_quirk(header.xfeatures)) {
1127 offset = offsetof(struct fxregs_state, mxcsr);
1128 size = MXCSR_AND_FLAGS_SIZE;
1129 __copy_xstate_to_user(ubuf, &xsave->i387.mxcsr, offset, size, size_total);
1130 }
1131
1132
1133
1134
1135 offset = offsetof(struct fxregs_state, sw_reserved);
1136 size = sizeof(xstate_fx_sw_bytes);
1137
1138 ret = __copy_xstate_to_user(ubuf, xstate_fx_sw_bytes, offset, size, size_total);
1139 if (ret)
1140 return ret;
1141
1142 return 0;
1143}
1144
1145
1146
1147
1148
1149int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
1150{
1151 unsigned int offset, size;
1152 int i;
1153 struct xstate_header hdr;
1154
1155 offset = offsetof(struct xregs_state, header);
1156 size = sizeof(hdr);
1157
1158 memcpy(&hdr, kbuf + offset, size);
1159
1160 if (validate_xstate_header(&hdr))
1161 return -EINVAL;
1162
1163 for (i = 0; i < XFEATURE_MAX; i++) {
1164 u64 mask = ((u64)1 << i);
1165
1166 if (hdr.xfeatures & mask) {
1167 void *dst = __raw_xsave_addr(xsave, i);
1168
1169 offset = xstate_offsets[i];
1170 size = xstate_sizes[i];
1171
1172 memcpy(dst, kbuf + offset, size);
1173 }
1174 }
1175
1176 if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
1177 offset = offsetof(struct fxregs_state, mxcsr);
1178 size = MXCSR_AND_FLAGS_SIZE;
1179 memcpy(&xsave->i387.mxcsr, kbuf + offset, size);
1180 }
1181
1182
1183
1184
1185
1186 xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR;
1187
1188
1189
1190
1191 xsave->header.xfeatures |= hdr.xfeatures;
1192
1193 return 0;
1194}
1195
1196
1197
1198
1199
1200
1201
1202int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
1203{
1204 unsigned int offset, size;
1205 int i;
1206 struct xstate_header hdr;
1207
1208 offset = offsetof(struct xregs_state, header);
1209 size = sizeof(hdr);
1210
1211 if (__copy_from_user(&hdr, ubuf + offset, size))
1212 return -EFAULT;
1213
1214 if (validate_xstate_header(&hdr))
1215 return -EINVAL;
1216
1217 for (i = 0; i < XFEATURE_MAX; i++) {
1218 u64 mask = ((u64)1 << i);
1219
1220 if (hdr.xfeatures & mask) {
1221 void *dst = __raw_xsave_addr(xsave, i);
1222
1223 offset = xstate_offsets[i];
1224 size = xstate_sizes[i];
1225
1226 if (__copy_from_user(dst, ubuf + offset, size))
1227 return -EFAULT;
1228 }
1229 }
1230
1231 if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
1232 offset = offsetof(struct fxregs_state, mxcsr);
1233 size = MXCSR_AND_FLAGS_SIZE;
1234 if (__copy_from_user(&xsave->i387.mxcsr, ubuf + offset, size))
1235 return -EFAULT;
1236 }
1237
1238
1239
1240
1241
1242 xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR;
1243
1244
1245
1246
1247 xsave->header.xfeatures |= hdr.xfeatures;
1248
1249 return 0;
1250}
1251
1252#ifdef CONFIG_PROC_PID_ARCH_STATUS
1253
1254
1255
1256
1257static void avx512_status(struct seq_file *m, struct task_struct *task)
1258{
1259 unsigned long timestamp = READ_ONCE(task->thread.fpu.avx512_timestamp);
1260 long delta;
1261
1262 if (!timestamp) {
1263
1264
1265
1266 delta = -1;
1267 } else {
1268 delta = (long)(jiffies - timestamp);
1269
1270
1271
1272 if (delta < 0)
1273 delta = LONG_MAX;
1274 delta = jiffies_to_msecs(delta);
1275 }
1276
1277 seq_put_decimal_ll(m, "AVX512_elapsed_ms:\t", delta);
1278 seq_putc(m, '\n');
1279}
1280
1281
1282
1283
1284int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
1285 struct pid *pid, struct task_struct *task)
1286{
1287
1288
1289
1290 if (cpu_feature_enabled(X86_FEATURE_AVX512F))
1291 avx512_status(m, task);
1292
1293 return 0;
1294}
1295#endif
1296