1
2
3
4
5
6
7
8
9#ifndef __ASM_CPUFEATURE_H
10#define __ASM_CPUFEATURE_H
11
12#include <asm/cpucaps.h>
13#include <asm/cputype.h>
14#include <asm/hwcap.h>
15#include <asm/sysreg.h>
16
17
18
19
20
21
22
23
24#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap))
25#define cpu_feature(x) ilog2(HWCAP_ ## x)
26
27#ifndef __ASSEMBLY__
28
29#include <linux/bug.h>
30#include <linux/jump_label.h>
31#include <linux/kernel.h>
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47enum ftr_type {
48 FTR_EXACT,
49 FTR_LOWER_SAFE,
50 FTR_HIGHER_SAFE,
51};
52
53#define FTR_STRICT true
54#define FTR_NONSTRICT false
55
56#define FTR_SIGNED true
57#define FTR_UNSIGNED false
58
59#define FTR_VISIBLE true
60#define FTR_HIDDEN false
61
62#define FTR_VISIBLE_IF_IS_ENABLED(config) \
63 (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN)
64
65struct arm64_ftr_bits {
66 bool sign;
67 bool visible;
68 bool strict;
69 enum ftr_type type;
70 u8 shift;
71 u8 width;
72 s64 safe_val;
73};
74
75
76
77
78
79
80struct arm64_ftr_reg {
81 const char *name;
82 u64 strict_mask;
83 u64 user_mask;
84 u64 sys_val;
85 u64 user_val;
86 const struct arm64_ftr_bits *ftr_bits;
87};
88
89extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227#define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0))
228#define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1))
229
230
231
232
233
234#define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2))
235#define ARM64_CPUCAP_SCOPE_MASK \
236 (ARM64_CPUCAP_SCOPE_SYSTEM | \
237 ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
238 ARM64_CPUCAP_SCOPE_BOOT_CPU)
239
240#define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM
241#define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU
242#define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU
243#define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK
244
245
246
247
248
249#define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4))
250
251#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
252
253
254
255
256
257
258
259
260#define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \
261 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
262
263
264
265
266
267
268
269#define ARM64_CPUCAP_SYSTEM_FEATURE \
270 (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
271
272
273
274
275#define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \
276 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
277 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \
278 ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
279
280
281
282
283
284
285#define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \
286 (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \
287 ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU)
288
289
290
291
292
293#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU
294
295struct arm64_cpu_capabilities {
296 const char *desc;
297 u16 capability;
298 u16 type;
299 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
300
301
302
303
304
305 void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
306 union {
307 struct {
308 struct midr_range midr_range;
309 const struct arm64_midr_revidr {
310 u32 midr_rv;
311 u32 revidr_mask;
312 } * const fixed_revs;
313 };
314
315 const struct midr_range *midr_range_list;
316 struct {
317 u32 sys_reg;
318 u8 field_pos;
319 u8 min_field_value;
320 u8 hwcap_type;
321 bool sign;
322 unsigned long hwcap;
323 };
324 };
325
326
327
328
329
330
331
332
333
334
335
336
337 const struct arm64_cpu_capabilities *match_list;
338};
339
340static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
341{
342 return cap->type & ARM64_CPUCAP_SCOPE_MASK;
343}
344
345static inline bool
346cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap)
347{
348 return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU);
349}
350
351static inline bool
352cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
353{
354 return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
355}
356
357
358
359
360
361
362static inline bool
363cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
364 int scope)
365{
366 const struct arm64_cpu_capabilities *caps;
367
368 for (caps = entry->match_list; caps->matches; caps++)
369 if (caps->matches(caps, scope))
370 return true;
371
372 return false;
373}
374
375
376
377
378
379static inline void
380cpucap_multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
381{
382 const struct arm64_cpu_capabilities *caps;
383
384 for (caps = entry->match_list; caps->matches; caps++)
385 if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
386 caps->cpu_enable)
387 caps->cpu_enable(caps);
388}
389
390extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
391extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
392extern struct static_key_false arm64_const_caps_ready;
393
394#define for_each_available_cap(cap) \
395 for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS)
396
397bool this_cpu_has_cap(unsigned int cap);
398
399static inline bool cpu_have_feature(unsigned int num)
400{
401 return elf_hwcap & (1UL << num);
402}
403
404
405static inline bool __cpus_have_const_cap(int num)
406{
407 if (num >= ARM64_NCAPS)
408 return false;
409 return static_branch_unlikely(&cpu_hwcap_keys[num]);
410}
411
412static inline bool cpus_have_cap(unsigned int num)
413{
414 if (num >= ARM64_NCAPS)
415 return false;
416 return test_bit(num, cpu_hwcaps);
417}
418
419static inline bool cpus_have_const_cap(int num)
420{
421 if (static_branch_likely(&arm64_const_caps_ready))
422 return __cpus_have_const_cap(num);
423 else
424 return cpus_have_cap(num);
425}
426
427static inline void cpus_set_cap(unsigned int num)
428{
429 if (num >= ARM64_NCAPS) {
430 pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
431 num, ARM64_NCAPS);
432 } else {
433 __set_bit(num, cpu_hwcaps);
434 }
435}
436
437static inline int __attribute_const__
438cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
439{
440 return (s64)(features << (64 - width - field)) >> (64 - width);
441}
442
443static inline int __attribute_const__
444cpuid_feature_extract_signed_field(u64 features, int field)
445{
446 return cpuid_feature_extract_signed_field_width(features, field, 4);
447}
448
449static inline unsigned int __attribute_const__
450cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
451{
452 return (u64)(features << (64 - width - field)) >> (64 - width);
453}
454
455static inline unsigned int __attribute_const__
456cpuid_feature_extract_unsigned_field(u64 features, int field)
457{
458 return cpuid_feature_extract_unsigned_field_width(features, field, 4);
459}
460
461static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
462{
463 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
464}
465
466static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
467{
468 return (reg->user_val | (reg->sys_val & reg->user_mask));
469}
470
471static inline int __attribute_const__
472cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign)
473{
474 return (sign) ?
475 cpuid_feature_extract_signed_field_width(features, field, width) :
476 cpuid_feature_extract_unsigned_field_width(features, field, width);
477}
478
479static inline int __attribute_const__
480cpuid_feature_extract_field(u64 features, int field, bool sign)
481{
482 return cpuid_feature_extract_field_width(features, field, 4, sign);
483}
484
485static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
486{
487 return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
488}
489
490static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
491{
492 return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
493 cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
494}
495
496static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
497{
498 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
499
500 return val == ID_AA64PFR0_EL0_32BIT_64BIT;
501}
502
503static inline bool id_aa64pfr0_sve(u64 pfr0)
504{
505 u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT);
506
507 return val > 0;
508}
509
510void __init setup_cpu_features(void);
511void check_local_cpu_capabilities(void);
512
513u64 read_sanitised_ftr_reg(u32 id);
514
515static inline bool cpu_supports_mixed_endian_el0(void)
516{
517 return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
518}
519
520static inline bool system_supports_32bit_el0(void)
521{
522 return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
523}
524
525static inline bool system_supports_4kb_granule(void)
526{
527 u64 mmfr0;
528 u32 val;
529
530 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
531 val = cpuid_feature_extract_unsigned_field(mmfr0,
532 ID_AA64MMFR0_TGRAN4_SHIFT);
533
534 return val == ID_AA64MMFR0_TGRAN4_SUPPORTED;
535}
536
537static inline bool system_supports_64kb_granule(void)
538{
539 u64 mmfr0;
540 u32 val;
541
542 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
543 val = cpuid_feature_extract_unsigned_field(mmfr0,
544 ID_AA64MMFR0_TGRAN64_SHIFT);
545
546 return val == ID_AA64MMFR0_TGRAN64_SUPPORTED;
547}
548
549static inline bool system_supports_16kb_granule(void)
550{
551 u64 mmfr0;
552 u32 val;
553
554 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
555 val = cpuid_feature_extract_unsigned_field(mmfr0,
556 ID_AA64MMFR0_TGRAN16_SHIFT);
557
558 return val == ID_AA64MMFR0_TGRAN16_SUPPORTED;
559}
560
561static inline bool system_supports_mixed_endian_el0(void)
562{
563 return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
564}
565
566static inline bool system_supports_mixed_endian(void)
567{
568 u64 mmfr0;
569 u32 val;
570
571 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
572 val = cpuid_feature_extract_unsigned_field(mmfr0,
573 ID_AA64MMFR0_BIGENDEL_SHIFT);
574
575 return val == 0x1;
576}
577
578static inline bool system_supports_fpsimd(void)
579{
580 return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
581}
582
583static inline bool system_uses_ttbr0_pan(void)
584{
585 return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
586 !cpus_have_const_cap(ARM64_HAS_PAN);
587}
588
589static inline bool system_supports_sve(void)
590{
591 return IS_ENABLED(CONFIG_ARM64_SVE) &&
592 cpus_have_const_cap(ARM64_SVE);
593}
594
595static inline bool system_supports_cnp(void)
596{
597 return IS_ENABLED(CONFIG_ARM64_CNP) &&
598 cpus_have_const_cap(ARM64_HAS_CNP);
599}
600
601static inline bool system_supports_address_auth(void)
602{
603 return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
604 (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
605 cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF));
606}
607
608static inline bool system_supports_generic_auth(void)
609{
610 return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
611 (cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
612 cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF));
613}
614
615#define ARM64_SSBD_UNKNOWN -1
616#define ARM64_SSBD_FORCE_DISABLE 0
617#define ARM64_SSBD_KERNEL 1
618#define ARM64_SSBD_FORCE_ENABLE 2
619#define ARM64_SSBD_MITIGATED 3
620
621static inline int arm64_get_ssbd_state(void)
622{
623#ifdef CONFIG_ARM64_SSBD
624 extern int ssbd_state;
625 return ssbd_state;
626#else
627 return ARM64_SSBD_UNKNOWN;
628#endif
629}
630
631#ifdef CONFIG_ARM64_SSBD
632void arm64_set_ssbd_mitigation(bool state);
633#else
634static inline void arm64_set_ssbd_mitigation(bool state) {}
635#endif
636
637extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
638
639static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
640{
641 switch (parange) {
642 case 0: return 32;
643 case 1: return 36;
644 case 2: return 40;
645 case 3: return 42;
646 case 4: return 44;
647 case 5: return 48;
648 case 6: return 52;
649
650
651
652
653
654
655
656 default: return CONFIG_ARM64_PA_BITS;
657 }
658}
659#endif
660
661#endif
662