1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#define pr_fmt(fmt) "microcode: " fmt
23
24#include <linux/platform_device.h>
25#include <linux/syscore_ops.h>
26#include <linux/miscdevice.h>
27#include <linux/capability.h>
28#include <linux/firmware.h>
29#include <linux/kernel.h>
30#include <linux/mutex.h>
31#include <linux/cpu.h>
32#include <linux/fs.h>
33#include <linux/mm.h>
34
35#include <asm/microcode_intel.h>
36#include <asm/cpu_device_id.h>
37#include <asm/microcode_amd.h>
38#include <asm/perf_event.h>
39#include <asm/microcode.h>
40#include <asm/processor.h>
41#include <asm/cmdline.h>
42#include <asm/setup.h>
43
44#define DRIVER_VERSION "2.2"
45
46static struct microcode_ops *microcode_ops;
47static bool dis_ucode_ldr = true;
48
49bool initrd_gone;
50
51LIST_HEAD(microcode_cache);
52
53
54
55
56
57
58
59
60
61
62
63
64
65static DEFINE_MUTEX(microcode_mutex);
66
67struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
68
69struct cpu_info_ctx {
70 struct cpu_signature *cpu_sig;
71 int err;
72};
73
74
75
76
77static u32 final_levels[] = {
78 0x01000098,
79 0x0100009f,
80 0x010000af,
81 0,
82};
83
84
85
86
87
88
89
90
91static bool amd_check_current_patch_level(void)
92{
93 u32 lvl, dummy, i;
94 u32 *levels;
95
96 native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
97
98 if (IS_ENABLED(CONFIG_X86_32))
99 levels = (u32 *)__pa_nodebug(&final_levels);
100 else
101 levels = final_levels;
102
103 for (i = 0; levels[i]; i++) {
104 if (lvl == levels[i])
105 return true;
106 }
107 return false;
108}
109
110static bool __init check_loader_disabled_bsp(void)
111{
112 static const char *__dis_opt_str = "dis_ucode_ldr";
113
114#ifdef CONFIG_X86_32
115 const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
116 const char *option = (const char *)__pa_nodebug(__dis_opt_str);
117 bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
118
119#else
120 const char *cmdline = boot_command_line;
121 const char *option = __dis_opt_str;
122 bool *res = &dis_ucode_ldr;
123#endif
124
125 if (!have_cpuid_p())
126 return *res;
127
128
129
130
131
132
133 if (native_cpuid_ecx(1) & BIT(31))
134 return *res;
135
136 if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
137 if (amd_check_current_patch_level())
138 return *res;
139 }
140
141 if (cmdline_find_option_bool(cmdline, option) <= 0)
142 *res = false;
143
144 return *res;
145}
146
147extern struct builtin_fw __start_builtin_fw[];
148extern struct builtin_fw __end_builtin_fw[];
149
150bool get_builtin_firmware(struct cpio_data *cd, const char *name)
151{
152#ifdef CONFIG_FW_LOADER
153 struct builtin_fw *b_fw;
154
155 for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
156 if (!strcmp(name, b_fw->name)) {
157 cd->size = b_fw->size;
158 cd->data = b_fw->data;
159 return true;
160 }
161 }
162#endif
163 return false;
164}
165
166void __init load_ucode_bsp(void)
167{
168 unsigned int cpuid_1_eax;
169
170 if (check_loader_disabled_bsp())
171 return;
172
173 cpuid_1_eax = native_cpuid_eax(1);
174
175 switch (x86_cpuid_vendor()) {
176 case X86_VENDOR_INTEL:
177 if (x86_family(cpuid_1_eax) >= 6)
178 load_ucode_intel_bsp();
179 break;
180 case X86_VENDOR_AMD:
181 if (x86_family(cpuid_1_eax) >= 0x10)
182 load_ucode_amd_bsp(cpuid_1_eax);
183 break;
184 default:
185 break;
186 }
187}
188
189static bool check_loader_disabled_ap(void)
190{
191#ifdef CONFIG_X86_32
192 return *((bool *)__pa_nodebug(&dis_ucode_ldr));
193#else
194 return dis_ucode_ldr;
195#endif
196}
197
198void load_ucode_ap(void)
199{
200 unsigned int cpuid_1_eax;
201
202 if (check_loader_disabled_ap())
203 return;
204
205 cpuid_1_eax = native_cpuid_eax(1);
206
207 switch (x86_cpuid_vendor()) {
208 case X86_VENDOR_INTEL:
209 if (x86_family(cpuid_1_eax) >= 6)
210 load_ucode_intel_ap();
211 break;
212 case X86_VENDOR_AMD:
213 if (x86_family(cpuid_1_eax) >= 0x10)
214 load_ucode_amd_ap(cpuid_1_eax);
215 break;
216 default:
217 break;
218 }
219}
220
221static int __init save_microcode_in_initrd(void)
222{
223 struct cpuinfo_x86 *c = &boot_cpu_data;
224 int ret = -EINVAL;
225
226 switch (c->x86_vendor) {
227 case X86_VENDOR_INTEL:
228 if (c->x86 >= 6)
229 ret = save_microcode_in_initrd_intel();
230 break;
231 case X86_VENDOR_AMD:
232 if (c->x86 >= 0x10)
233 return save_microcode_in_initrd_amd(cpuid_eax(1));
234 break;
235 default:
236 break;
237 }
238
239 initrd_gone = true;
240
241 return ret;
242}
243
244struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
245{
246#ifdef CONFIG_BLK_DEV_INITRD
247 unsigned long start = 0;
248 size_t size;
249
250#ifdef CONFIG_X86_32
251 struct boot_params *params;
252
253 if (use_pa)
254 params = (struct boot_params *)__pa_nodebug(&boot_params);
255 else
256 params = &boot_params;
257
258 size = params->hdr.ramdisk_size;
259
260
261
262
263
264 if (size)
265 start = params->hdr.ramdisk_image;
266
267# else
268 size = (unsigned long)boot_params.ext_ramdisk_size << 32;
269 size |= boot_params.hdr.ramdisk_size;
270
271 if (size) {
272 start = (unsigned long)boot_params.ext_ramdisk_image << 32;
273 start |= boot_params.hdr.ramdisk_image;
274
275 start += PAGE_OFFSET;
276 }
277# endif
278
279
280
281
282
283
284
285
286
287
288 if (!use_pa) {
289 if (initrd_gone)
290 return (struct cpio_data){ NULL, 0, "" };
291 if (initrd_start)
292 start = initrd_start;
293 }
294
295 return find_cpio_data(path, (void *)start, size, NULL);
296#else
297 return (struct cpio_data){ NULL, 0, "" };
298#endif
299}
300
301void reload_early_microcode(void)
302{
303 int vendor, family;
304
305 vendor = x86_cpuid_vendor();
306 family = x86_cpuid_family();
307
308 switch (vendor) {
309 case X86_VENDOR_INTEL:
310 if (family >= 6)
311 reload_ucode_intel();
312 break;
313 case X86_VENDOR_AMD:
314 if (family >= 0x10)
315 reload_ucode_amd();
316 break;
317 default:
318 break;
319 }
320}
321
322static void collect_cpu_info_local(void *arg)
323{
324 struct cpu_info_ctx *ctx = arg;
325
326 ctx->err = microcode_ops->collect_cpu_info(smp_processor_id(),
327 ctx->cpu_sig);
328}
329
330static int collect_cpu_info_on_target(int cpu, struct cpu_signature *cpu_sig)
331{
332 struct cpu_info_ctx ctx = { .cpu_sig = cpu_sig, .err = 0 };
333 int ret;
334
335 ret = smp_call_function_single(cpu, collect_cpu_info_local, &ctx, 1);
336 if (!ret)
337 ret = ctx.err;
338
339 return ret;
340}
341
342static int collect_cpu_info(int cpu)
343{
344 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
345 int ret;
346
347 memset(uci, 0, sizeof(*uci));
348
349 ret = collect_cpu_info_on_target(cpu, &uci->cpu_sig);
350 if (!ret)
351 uci->valid = 1;
352
353 return ret;
354}
355
356struct apply_microcode_ctx {
357 int err;
358};
359
360static void apply_microcode_local(void *arg)
361{
362 struct apply_microcode_ctx *ctx = arg;
363
364 ctx->err = microcode_ops->apply_microcode(smp_processor_id());
365}
366
367static int apply_microcode_on_target(int cpu)
368{
369 struct apply_microcode_ctx ctx = { .err = 0 };
370 int ret;
371
372 ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
373 if (!ret)
374 ret = ctx.err;
375
376 return ret;
377}
378
379#ifdef CONFIG_MICROCODE_OLD_INTERFACE
380static int do_microcode_update(const void __user *buf, size_t size)
381{
382 int error = 0;
383 int cpu;
384
385 for_each_online_cpu(cpu) {
386 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
387 enum ucode_state ustate;
388
389 if (!uci->valid)
390 continue;
391
392 ustate = microcode_ops->request_microcode_user(cpu, buf, size);
393 if (ustate == UCODE_ERROR) {
394 error = -1;
395 break;
396 } else if (ustate == UCODE_OK)
397 apply_microcode_on_target(cpu);
398 }
399
400 return error;
401}
402
403static int microcode_open(struct inode *inode, struct file *file)
404{
405 return capable(CAP_SYS_RAWIO) ? nonseekable_open(inode, file) : -EPERM;
406}
407
408static ssize_t microcode_write(struct file *file, const char __user *buf,
409 size_t len, loff_t *ppos)
410{
411 ssize_t ret = -EINVAL;
412
413 if ((len >> PAGE_SHIFT) > totalram_pages) {
414 pr_err("too much data (max %ld pages)\n", totalram_pages);
415 return ret;
416 }
417
418 get_online_cpus();
419 mutex_lock(µcode_mutex);
420
421 if (do_microcode_update(buf, len) == 0)
422 ret = (ssize_t)len;
423
424 if (ret > 0)
425 perf_check_microcode();
426
427 mutex_unlock(µcode_mutex);
428 put_online_cpus();
429
430 return ret;
431}
432
433static const struct file_operations microcode_fops = {
434 .owner = THIS_MODULE,
435 .write = microcode_write,
436 .open = microcode_open,
437 .llseek = no_llseek,
438};
439
440static struct miscdevice microcode_dev = {
441 .minor = MICROCODE_MINOR,
442 .name = "microcode",
443 .nodename = "cpu/microcode",
444 .fops = µcode_fops,
445};
446
447static int __init microcode_dev_init(void)
448{
449 int error;
450
451 error = misc_register(µcode_dev);
452 if (error) {
453 pr_err("can't misc_register on minor=%d\n", MICROCODE_MINOR);
454 return error;
455 }
456
457 return 0;
458}
459
460static void __exit microcode_dev_exit(void)
461{
462 misc_deregister(µcode_dev);
463}
464#else
465#define microcode_dev_init() 0
466#define microcode_dev_exit() do { } while (0)
467#endif
468
469
470static struct platform_device *microcode_pdev;
471
472static int reload_for_cpu(int cpu)
473{
474 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
475 enum ucode_state ustate;
476 int err = 0;
477
478 if (!uci->valid)
479 return err;
480
481 ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, true);
482 if (ustate == UCODE_OK)
483 apply_microcode_on_target(cpu);
484 else
485 if (ustate == UCODE_ERROR)
486 err = -EINVAL;
487 return err;
488}
489
490static ssize_t reload_store(struct device *dev,
491 struct device_attribute *attr,
492 const char *buf, size_t size)
493{
494 unsigned long val;
495 int cpu;
496 ssize_t ret = 0, tmp_ret;
497
498 ret = kstrtoul(buf, 0, &val);
499 if (ret)
500 return ret;
501
502 if (val != 1)
503 return size;
504
505 get_online_cpus();
506 mutex_lock(µcode_mutex);
507 for_each_online_cpu(cpu) {
508 tmp_ret = reload_for_cpu(cpu);
509 if (tmp_ret != 0)
510 pr_warn("Error reloading microcode on CPU %d\n", cpu);
511
512
513 if (!ret)
514 ret = tmp_ret;
515 }
516 if (!ret)
517 perf_check_microcode();
518 mutex_unlock(µcode_mutex);
519 put_online_cpus();
520
521 if (!ret)
522 ret = size;
523
524 return ret;
525}
526
527static ssize_t version_show(struct device *dev,
528 struct device_attribute *attr, char *buf)
529{
530 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
531
532 return sprintf(buf, "0x%x\n", uci->cpu_sig.rev);
533}
534
535static ssize_t pf_show(struct device *dev,
536 struct device_attribute *attr, char *buf)
537{
538 struct ucode_cpu_info *uci = ucode_cpu_info + dev->id;
539
540 return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
541}
542
543static DEVICE_ATTR(reload, 0200, NULL, reload_store);
544static DEVICE_ATTR(version, 0400, version_show, NULL);
545static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
546
547static struct attribute *mc_default_attrs[] = {
548 &dev_attr_version.attr,
549 &dev_attr_processor_flags.attr,
550 NULL
551};
552
553static struct attribute_group mc_attr_group = {
554 .attrs = mc_default_attrs,
555 .name = "microcode",
556};
557
558static void microcode_fini_cpu(int cpu)
559{
560 if (microcode_ops->microcode_fini_cpu)
561 microcode_ops->microcode_fini_cpu(cpu);
562}
563
564static enum ucode_state microcode_resume_cpu(int cpu)
565{
566 if (apply_microcode_on_target(cpu))
567 return UCODE_ERROR;
568
569 pr_debug("CPU%d updated upon resume\n", cpu);
570
571 return UCODE_OK;
572}
573
574static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
575{
576 enum ucode_state ustate;
577 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
578
579 if (uci->valid)
580 return UCODE_OK;
581
582 if (collect_cpu_info(cpu))
583 return UCODE_ERROR;
584
585
586 if (system_state != SYSTEM_RUNNING)
587 return UCODE_NFOUND;
588
589 ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev,
590 refresh_fw);
591
592 if (ustate == UCODE_OK) {
593 pr_debug("CPU%d updated upon init\n", cpu);
594 apply_microcode_on_target(cpu);
595 }
596
597 return ustate;
598}
599
600static enum ucode_state microcode_update_cpu(int cpu)
601{
602 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
603
604
605 collect_cpu_info(cpu);
606
607 if (uci->valid)
608 return microcode_resume_cpu(cpu);
609
610 return microcode_init_cpu(cpu, false);
611}
612
613static int mc_device_add(struct device *dev, struct subsys_interface *sif)
614{
615 int err, cpu = dev->id;
616
617 if (!cpu_online(cpu))
618 return 0;
619
620 pr_debug("CPU%d added\n", cpu);
621
622 err = sysfs_create_group(&dev->kobj, &mc_attr_group);
623 if (err)
624 return err;
625
626 if (microcode_init_cpu(cpu, true) == UCODE_ERROR)
627 return -EINVAL;
628
629 return err;
630}
631
632static void mc_device_remove(struct device *dev, struct subsys_interface *sif)
633{
634 int cpu = dev->id;
635
636 if (!cpu_online(cpu))
637 return;
638
639 pr_debug("CPU%d removed\n", cpu);
640 microcode_fini_cpu(cpu);
641 sysfs_remove_group(&dev->kobj, &mc_attr_group);
642}
643
644static struct subsys_interface mc_cpu_interface = {
645 .name = "microcode",
646 .subsys = &cpu_subsys,
647 .add_dev = mc_device_add,
648 .remove_dev = mc_device_remove,
649};
650
651
652
653
654static void mc_bp_resume(void)
655{
656 int cpu = smp_processor_id();
657 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
658
659 if (uci->valid && uci->mc)
660 microcode_ops->apply_microcode(cpu);
661 else if (!uci->mc)
662 reload_early_microcode();
663}
664
665static struct syscore_ops mc_syscore_ops = {
666 .resume = mc_bp_resume,
667};
668
669static int mc_cpu_online(unsigned int cpu)
670{
671 struct device *dev;
672
673 dev = get_cpu_device(cpu);
674 microcode_update_cpu(cpu);
675 pr_debug("CPU%d added\n", cpu);
676
677 if (sysfs_create_group(&dev->kobj, &mc_attr_group))
678 pr_err("Failed to create group for CPU%d\n", cpu);
679 return 0;
680}
681
682static int mc_cpu_down_prep(unsigned int cpu)
683{
684 struct device *dev;
685
686 dev = get_cpu_device(cpu);
687
688 sysfs_remove_group(&dev->kobj, &mc_attr_group);
689 pr_debug("CPU%d removed\n", cpu);
690
691 return 0;
692}
693
694static struct attribute *cpu_root_microcode_attrs[] = {
695 &dev_attr_reload.attr,
696 NULL
697};
698
699static struct attribute_group cpu_root_microcode_group = {
700 .name = "microcode",
701 .attrs = cpu_root_microcode_attrs,
702};
703
704int __init microcode_init(void)
705{
706 struct cpuinfo_x86 *c = &boot_cpu_data;
707 int error;
708
709 if (dis_ucode_ldr)
710 return -EINVAL;
711
712 if (c->x86_vendor == X86_VENDOR_INTEL)
713 microcode_ops = init_intel_microcode();
714 else if (c->x86_vendor == X86_VENDOR_AMD)
715 microcode_ops = init_amd_microcode();
716 else
717 pr_err("no support for this CPU vendor\n");
718
719 if (!microcode_ops)
720 return -ENODEV;
721
722 microcode_pdev = platform_device_register_simple("microcode", -1,
723 NULL, 0);
724 if (IS_ERR(microcode_pdev))
725 return PTR_ERR(microcode_pdev);
726
727 get_online_cpus();
728 mutex_lock(µcode_mutex);
729
730 error = subsys_interface_register(&mc_cpu_interface);
731 if (!error)
732 perf_check_microcode();
733 mutex_unlock(µcode_mutex);
734 put_online_cpus();
735
736 if (error)
737 goto out_pdev;
738
739 error = sysfs_create_group(&cpu_subsys.dev_root->kobj,
740 &cpu_root_microcode_group);
741
742 if (error) {
743 pr_err("Error creating microcode group!\n");
744 goto out_driver;
745 }
746
747 error = microcode_dev_init();
748 if (error)
749 goto out_ucode_group;
750
751 register_syscore_ops(&mc_syscore_ops);
752 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
753 mc_cpu_online, mc_cpu_down_prep);
754
755 pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
756
757 return 0;
758
759 out_ucode_group:
760 sysfs_remove_group(&cpu_subsys.dev_root->kobj,
761 &cpu_root_microcode_group);
762
763 out_driver:
764 get_online_cpus();
765 mutex_lock(µcode_mutex);
766
767 subsys_interface_unregister(&mc_cpu_interface);
768
769 mutex_unlock(µcode_mutex);
770 put_online_cpus();
771
772 out_pdev:
773 platform_device_unregister(microcode_pdev);
774 return error;
775
776}
777fs_initcall(save_microcode_in_initrd);
778late_initcall(microcode_init);
779