1
2
3
4
5
6
7
8
9#ifndef __ASM_CPUFEATURE_H
10#define __ASM_CPUFEATURE_H
11
12#include <asm/cpucaps.h>
13#include <asm/cputype.h>
14#include <asm/hwcap.h>
15#include <asm/sysreg.h>
16
17
18
19
20
21
22
23
24#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
25#define cpu_feature(x) ilog2(HWCAP_ ## x)
26
27#ifndef __ASSEMBLY__
28
29#include <linux/bug.h>
30#include <linux/jump_label.h>
31#include <linux/kernel.h>
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47enum ftr_type {
48 FTR_EXACT,
49 FTR_LOWER_SAFE,
50 FTR_HIGHER_SAFE,
51};
52
53#define FTR_STRICT true
54#define FTR_NONSTRICT false
55
56#define FTR_SIGNED true
57#define FTR_UNSIGNED false
58
59#define FTR_VISIBLE true
60#define FTR_HIDDEN false
61
62#define FTR_VISIBLE_IF_IS_ENABLED(config) \
63 (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
64
65struct arm64_ftr_bits {
66 bool sign;
67 bool visible;
68 bool strict;
69 enum ftr_type type;
70 u8 shift;
71 u8 width;
72 s64 safe_val;
73};
74
75
76
77
78
79
80struct arm64_ftr_reg {
81 const char *name;
82 u64 strict_mask;
83 u64 user_mask;
84 u64 sys_val;
85 u64 user_val;
86 const struct arm64_ftr_bits *ftr_bits;
87};
88
89extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227#define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
228#define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
229
230
231
232
233
234#define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2))
235#define ARM64_CPUCAP_SCOPE_MASK \
236 (ARM64_CPUCAP_SCOPE_SYSTEM | \
237 ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
238 ARM64_CPUCAP_SCOPE_BOOT_CPU)
239
240#define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM
241#define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU
242#define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU
243#define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK
244
245
246
247
248
249#define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
250
251#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
252
253
254
255
256
257
258
259
260#define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \
261 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
262
263
264
265
266
267
268
269#define ARM64_CPUCAP_SYSTEM_FEATURE \
270 (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
271
272
273
274
275#define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
276 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
277 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
278 ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
279
280
281
282
283
284
285#define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \
286 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
287 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
288
289
290
291
292
293#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU
294
295struct arm64_cpu_capabilities {
296 const char *desc;
297 u16 capability;
298 u16 type;
299 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
300
301
302
303
304
305 void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
306 union {
307 struct {
308 struct midr_range midr_range;
309 const struct arm64_midr_revidr {
310 u32 midr_rv;
311 u32 revidr_mask;
312 } * const fixed_revs;
313 };
314
315 const struct midr_range *midr_range_list;
316 struct {
317 u32 sys_reg;
318 u8 field_pos;
319 u8 min_field_value;
320 u8 hwcap_type;
321 bool sign;
322 unsigned long hwcap;
323 };
324 };
325
326
327
328
329
330
331
332
333
334
335
336
337 const struct arm64_cpu_capabilities *match_list;
338};
339
340static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
341{
342 return cap->type & ARM64_CPUCAP_SCOPE_MASK;
343}
344
345static inline bool
346cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
347{
348 return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
349}
350
351static inline bool
352cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
353{
354 return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
355}
356
357
358
359
360
361
362static inline bool
363cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
364 int scope)
365{
366 const struct arm64_cpu_capabilities *caps;
367
368 for (caps = entry->match_list; caps->matches; caps++)
369 if (caps->matches(caps, scope))
370 return true;
371
372 return false;
373}
374
375
376
377
378
379static inline void
380cpucap_multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
381{
382 const struct arm64_cpu_capabilities *caps;
383
384 for (caps = entry->match_list; caps->matches; caps++)
385 if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
386 caps->cpu_enable)
387 caps->cpu_enable(caps);
388}
389
390extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
391extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
392extern struct static_key_false arm64_const_caps_ready;
393
394bool this_cpu_has_cap(unsigned int cap);
395
396static inline bool cpu_have_feature(unsigned int num)
397{
398 return elf_hwcap & (1UL << num);
399}
400
401
402static inline bool __cpus_have_const_cap(int num)
403{
404 if (num >= ARM64_NCAPS)
405 return false;
406 return static_branch_unlikely(&cpu_hwcap_keys[num]);
407}
408
409static inline bool cpus_have_cap(unsigned int num)
410{
411 if (num >= ARM64_NCAPS)
412 return false;
413 return test_bit(num, cpu_hwcaps);
414}
415
416static inline bool cpus_have_const_cap(int num)
417{
418 if (static_branch_likely(&arm64_const_caps_ready))
419 return __cpus_have_const_cap(num);
420 else
421 return cpus_have_cap(num);
422}
423
424static inline void cpus_set_cap(unsigned int num)
425{
426 if (num >= ARM64_NCAPS) {
427 pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
428 num, ARM64_NCAPS);
429 } else {
430 __set_bit(num, cpu_hwcaps);
431 }
432}
433
434static inline int __attribute_const__
435cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
436{
437 return (s64)(features << (64 - width - field)) >> (64 - width);
438}
439
440static inline int __attribute_const__
441cpuid_feature_extract_signed_field(u64 features, int field)
442{
443 return cpuid_feature_extract_signed_field_width(features, field, 4);
444}
445
446static inline unsigned int __attribute_const__
447cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
448{
449 return (u64)(features << (64 - width - field)) >> (64 - width);
450}
451
452static inline unsigned int __attribute_const__
453cpuid_feature_extract_unsigned_field(u64 features, int field)
454{
455 return cpuid_feature_extract_unsigned_field_width(features, field, 4);
456}
457
458static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
459{
460 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
461}
462
463static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
464{
465 return (reg->user_val | (reg->sys_val & reg->user_mask));
466}
467
468static inline int __attribute_const__
469cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign)
470{
471 return (sign) ?
472 cpuid_feature_extract_signed_field_width(features, field, width) :
473 cpuid_feature_extract_unsigned_field_width(features, field, width);
474}
475
476static inline int __attribute_const__
477cpuid_feature_extract_field(u64 features, int field, bool sign)
478{
479 return cpuid_feature_extract_field_width(features, field, 4, sign);
480}
481
482static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
483{
484 return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
485}
486
487static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
488{
489 return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
490 cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
491}
492
493static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
494{
495 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
496
497 return val == ID_AA64PFR0_EL0_32BIT_64BIT;
498}
499
500static inline bool id_aa64pfr0_sve(u64 pfr0)
501{
502 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT);
503
504 return val > 0;
505}
506
507void __init setup_cpu_features(void);
508void check_local_cpu_capabilities(void);
509
510u64 read_sanitised_ftr_reg(u32 id);
511
512static inline bool cpu_supports_mixed_endian_el0(void)
513{
514 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
515}
516
517static inline bool system_supports_32bit_el0(void)
518{
519 return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
520}
521
522static inline bool system_supports_4kb_granule(void)
523{
524 u64 mmfr0;
525 u32 val;
526
527 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
528 val = cpuid_feature_extract_unsigned_field(mmfr0,
529 ID_AA64MMFR0_TGRAN4_SHIFT);
530
531 return val == ID_AA64MMFR0_TGRAN4_SUPPORTED;
532}
533
534static inline bool system_supports_64kb_granule(void)
535{
536 u64 mmfr0;
537 u32 val;
538
539 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
540 val = cpuid_feature_extract_unsigned_field(mmfr0,
541 ID_AA64MMFR0_TGRAN64_SHIFT);
542
543 return val == ID_AA64MMFR0_TGRAN64_SUPPORTED;
544}
545
546static inline bool system_supports_16kb_granule(void)
547{
548 u64 mmfr0;
549 u32 val;
550
551 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
552 val = cpuid_feature_extract_unsigned_field(mmfr0,
553 ID_AA64MMFR0_TGRAN16_SHIFT);
554
555 return val == ID_AA64MMFR0_TGRAN16_SUPPORTED;
556}
557
558static inline bool system_supports_mixed_endian_el0(void)
559{
560 return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
561}
562
563static inline bool system_supports_mixed_endian(void)
564{
565 u64 mmfr0;
566 u32 val;
567
568 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
569 val = cpuid_feature_extract_unsigned_field(mmfr0,
570 ID_AA64MMFR0_BIGENDEL_SHIFT);
571
572 return val == 0x1;
573}
574
575static inline bool system_supports_fpsimd(void)
576{
577 return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
578}
579
580static inline bool system_uses_ttbr0_pan(void)
581{
582 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
583 !cpus_have_const_cap(ARM64_HAS_PAN);
584}
585
586static inline bool system_supports_sve(void)
587{
588 return IS_ENABLED(CONFIG_ARM64_SVE) &&
589 cpus_have_const_cap(ARM64_SVE);
590}
591
592static inline bool system_supports_cnp(void)
593{
594 return IS_ENABLED(CONFIG_ARM64_CNP) &&
595 cpus_have_const_cap(ARM64_HAS_CNP);
596}
597
598#define ARM64_BP_HARDEN_UNKNOWN -1
599#define ARM64_BP_HARDEN_WA_NEEDED 0
600#define ARM64_BP_HARDEN_NOT_REQUIRED 1
601
602int get_spectre_v2_workaround_state(void);
603
604static inline bool system_supports_address_auth(void)
605{
606 return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
607 (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
608 cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF));
609}
610
611static inline bool system_supports_generic_auth(void)
612{
613 return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
614 (cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
615 cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF));
616}
617
618#define ARM64_SSBD_UNKNOWN -1
619#define ARM64_SSBD_FORCE_DISABLE 0
620#define ARM64_SSBD_KERNEL 1
621#define ARM64_SSBD_FORCE_ENABLE 2
622#define ARM64_SSBD_MITIGATED 3
623
624static inline int arm64_get_ssbd_state(void)
625{
626#ifdef CONFIG_ARM64_SSBD
627 extern int ssbd_state;
628 return ssbd_state;
629#else
630 return ARM64_SSBD_UNKNOWN;
631#endif
632}
633
634void arm64_set_ssbd_mitigation(bool state);
635
636static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
637{
638 switch (parange) {
639 case 0: return 32;
640 case 1: return 36;
641 case 2: return 40;
642 case 3: return 42;
643 case 4: return 44;
644 case 5: return 48;
645 case 6: return 52;
646
647
648
649
650
651
652
653 default: return CONFIG_ARM64_PA_BITS;
654 }
655}
656#endif
657
658#endif
659