1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#define pr_fmt(fmt) "microcode: " fmt
24
25#include <linux/earlycpio.h>
26#include <linux/firmware.h>
27#include <linux/uaccess.h>
28#include <linux/vmalloc.h>
29#include <linux/initrd.h>
30#include <linux/kernel.h>
31#include <linux/pci.h>
32
33#include <asm/microcode_amd.h>
34#include <asm/microcode.h>
35#include <asm/processor.h>
36#include <asm/setup.h>
37#include <asm/cpu.h>
38#include <asm/msr.h>
39
40static struct equiv_cpu_entry *equiv_cpu_table;
41
42struct ucode_patch {
43 struct list_head plist;
44 void *data;
45 u32 patch_id;
46 u16 equiv_cpu;
47};
48
49static LIST_HEAD(pcache);
50
51
52
53
54
55static u8 *container;
56static size_t container_size;
57static bool ucode_builtin;
58
59static u32 ucode_new_rev;
60static u8 amd_ucode_patch[PATCH_MAX_SIZE];
61static u16 this_equiv_id;
62
63static struct cpio_data ucode_cpio;
64
65static struct cpio_data __init find_ucode_in_initrd(void)
66{
67#ifdef CONFIG_BLK_DEV_INITRD
68 char *path;
69 void *start;
70 size_t size;
71
72
73
74
75
76 static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin";
77
78#ifdef CONFIG_X86_32
79 struct boot_params *p;
80
81
82
83
84
85 p = (struct boot_params *)__pa_nodebug(&boot_params);
86 path = (char *)__pa_nodebug(ucode_path);
87 start = (void *)p->hdr.ramdisk_image;
88 size = p->hdr.ramdisk_size;
89#else
90 path = ucode_path;
91 start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET);
92 size = boot_params.hdr.ramdisk_size;
93#endif
94
95 return find_cpio_data(path, start, size, NULL);
96#else
97 return (struct cpio_data){ NULL, 0, "" };
98#endif
99}
100
101static size_t compute_container_size(u8 *data, u32 total_size)
102{
103 size_t size = 0;
104 u32 *header = (u32 *)data;
105
106 if (header[0] != UCODE_MAGIC ||
107 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE ||
108 header[2] == 0)
109 return size;
110
111 size = header[2] + CONTAINER_HDR_SZ;
112 total_size -= size;
113 data += size;
114
115 while (total_size) {
116 u16 patch_size;
117
118 header = (u32 *)data;
119
120 if (header[0] != UCODE_UCODE_TYPE)
121 break;
122
123
124
125
126 patch_size = header[1];
127 if (patch_size > PATCH_MAX_SIZE)
128 break;
129
130 size += patch_size + SECTION_HDR_SIZE;
131 data += patch_size + SECTION_HDR_SIZE;
132 total_size -= patch_size + SECTION_HDR_SIZE;
133 }
134
135 return size;
136}
137
138
139
140
141
142
143
144
145
146
147static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
148{
149 struct equiv_cpu_entry *eq;
150 size_t *cont_sz;
151 u32 *header;
152 u8 *data, **cont;
153 u8 (*patch)[PATCH_MAX_SIZE];
154 u16 eq_id = 0;
155 int offset, left;
156 u32 rev, eax, ebx, ecx, edx;
157 u32 *new_rev;
158
159#ifdef CONFIG_X86_32
160 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
161 cont_sz = (size_t *)__pa_nodebug(&container_size);
162 cont = (u8 **)__pa_nodebug(&container);
163 patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
164#else
165 new_rev = &ucode_new_rev;
166 cont_sz = &container_size;
167 cont = &container;
168 patch = &amd_ucode_patch;
169#endif
170
171 data = ucode;
172 left = size;
173 header = (u32 *)data;
174
175
176 if (header[0] != UCODE_MAGIC ||
177 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE ||
178 header[2] == 0)
179 return;
180
181 eax = 0x00000001;
182 ecx = 0;
183 native_cpuid(&eax, &ebx, &ecx, &edx);
184
185 while (left > 0) {
186 eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
187
188 *cont = data;
189
190
191 offset = header[2] + CONTAINER_HDR_SZ;
192 data += offset;
193 left -= offset;
194
195 eq_id = find_equiv_id(eq, eax);
196 if (eq_id) {
197 this_equiv_id = eq_id;
198 *cont_sz = compute_container_size(*cont, left + offset);
199
200
201
202
203
204 left = *cont_sz - offset;
205 break;
206 }
207
208
209
210
211
212
213 while (left > 0) {
214 header = (u32 *)data;
215 if (header[0] == UCODE_MAGIC &&
216 header[1] == UCODE_EQUIV_CPU_TABLE_TYPE)
217 break;
218
219 offset = header[1] + SECTION_HDR_SIZE;
220 data += offset;
221 left -= offset;
222 }
223
224
225 offset = data - (u8 *)ucode;
226 ucode = data;
227 }
228
229 if (!eq_id) {
230 *cont = NULL;
231 *cont_sz = 0;
232 return;
233 }
234
235 if (check_current_patch_level(&rev, true))
236 return;
237
238 while (left > 0) {
239 struct microcode_amd *mc;
240
241 header = (u32 *)data;
242 if (header[0] != UCODE_UCODE_TYPE ||
243 header[1] == 0)
244 break;
245
246 mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE);
247
248 if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) {
249
250 if (!__apply_microcode_amd(mc)) {
251 rev = mc->hdr.patch_id;
252 *new_rev = rev;
253
254 if (save_patch)
255 memcpy(patch, mc,
256 min_t(u32, header[1], PATCH_MAX_SIZE));
257 }
258 }
259
260 offset = header[1] + SECTION_HDR_SIZE;
261 data += offset;
262 left -= offset;
263 }
264}
265
266static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
267 unsigned int family)
268{
269#ifdef CONFIG_X86_64
270 char fw_name[36] = "amd-ucode/microcode_amd.bin";
271
272 if (family >= 0x15)
273 snprintf(fw_name, sizeof(fw_name),
274 "amd-ucode/microcode_amd_fam%.2xh.bin", family);
275
276 return get_builtin_firmware(cp, fw_name);
277#else
278 return false;
279#endif
280}
281
282void __init load_ucode_amd_bsp(unsigned int family)
283{
284 struct cpio_data cp;
285 bool *builtin;
286 void **data;
287 size_t *size;
288
289#ifdef CONFIG_X86_32
290 data = (void **)__pa_nodebug(&ucode_cpio.data);
291 size = (size_t *)__pa_nodebug(&ucode_cpio.size);
292 builtin = (bool *)__pa_nodebug(&ucode_builtin);
293#else
294 data = &ucode_cpio.data;
295 size = &ucode_cpio.size;
296 builtin = &ucode_builtin;
297#endif
298
299 *builtin = load_builtin_amd_microcode(&cp, family);
300 if (!*builtin)
301 cp = find_ucode_in_initrd();
302
303 if (!(cp.data && cp.size))
304 return;
305
306 *data = cp.data;
307 *size = cp.size;
308
309 apply_ucode_in_initrd(cp.data, cp.size, true);
310}
311
312#ifdef CONFIG_X86_32
313
314
315
316
317
318
319
320void load_ucode_amd_ap(void)
321{
322 struct microcode_amd *mc;
323 size_t *usize;
324 void **ucode;
325
326 mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
327 if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
328 __apply_microcode_amd(mc);
329 return;
330 }
331
332 ucode = (void *)__pa_nodebug(&container);
333 usize = (size_t *)__pa_nodebug(&container_size);
334
335 if (!*ucode || !*usize)
336 return;
337
338 apply_ucode_in_initrd(*ucode, *usize, false);
339}
340
341static void __init collect_cpu_sig_on_bsp(void *arg)
342{
343 unsigned int cpu = smp_processor_id();
344 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
345
346 uci->cpu_sig.sig = cpuid_eax(0x00000001);
347}
348
349static void __init get_bsp_sig(void)
350{
351 unsigned int bsp = boot_cpu_data.cpu_index;
352 struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
353
354 if (!uci->cpu_sig.sig)
355 smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
356}
357#else
358void load_ucode_amd_ap(void)
359{
360 unsigned int cpu = smp_processor_id();
361 struct equiv_cpu_entry *eq;
362 struct microcode_amd *mc;
363 u8 *cont = container;
364 u32 rev, eax;
365 u16 eq_id;
366
367
368 if (!cpu)
369 return;
370
371 if (!container)
372 return;
373
374
375
376
377 if (check_current_patch_level(&rev, false))
378 return;
379
380
381 if (!ucode_builtin)
382 cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
383
384 eax = cpuid_eax(0x00000001);
385 eq = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ);
386
387 eq_id = find_equiv_id(eq, eax);
388 if (!eq_id)
389 return;
390
391 if (eq_id == this_equiv_id) {
392 mc = (struct microcode_amd *)amd_ucode_patch;
393
394 if (mc && rev < mc->hdr.patch_id) {
395 if (!__apply_microcode_amd(mc))
396 ucode_new_rev = mc->hdr.patch_id;
397 }
398
399 } else {
400 if (!ucode_cpio.data)
401 return;
402
403
404
405
406
407 apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false);
408 }
409}
410#endif
411
412int __init save_microcode_in_initrd_amd(void)
413{
414 unsigned long cont;
415 int retval = 0;
416 enum ucode_state ret;
417 u8 *cont_va;
418 u32 eax;
419
420 if (!container)
421 return -EINVAL;
422
423#ifdef CONFIG_X86_32
424 get_bsp_sig();
425 cont = (unsigned long)container;
426 cont_va = __va(container);
427#else
428
429
430
431
432 cont = __pa_nodebug(container);
433 cont_va = container;
434#endif
435
436
437
438
439
440
441 if (relocated_ramdisk)
442 container = (u8 *)(__va(relocated_ramdisk) +
443 (cont - boot_params.hdr.ramdisk_image));
444 else
445 container = cont_va;
446
447
448 if (!ucode_builtin)
449 container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
450
451 eax = cpuid_eax(0x00000001);
452 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
453
454 ret = load_microcode_amd(smp_processor_id(), eax, container, container_size);
455 if (ret != UCODE_OK)
456 retval = -EINVAL;
457
458
459
460
461
462 container = NULL;
463 container_size = 0;
464
465 return retval;
466}
467
468void reload_ucode_amd(void)
469{
470 struct microcode_amd *mc;
471 u32 rev;
472
473
474
475
476
477 if (check_current_patch_level(&rev, false))
478 return;
479
480 mc = (struct microcode_amd *)amd_ucode_patch;
481
482 if (mc && rev < mc->hdr.patch_id) {
483 if (!__apply_microcode_amd(mc)) {
484 ucode_new_rev = mc->hdr.patch_id;
485 pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
486 }
487 }
488}
489static u16 __find_equiv_id(unsigned int cpu)
490{
491 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
492 return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig);
493}
494
495static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu)
496{
497 int i = 0;
498
499 BUG_ON(!equiv_cpu_table);
500
501 while (equiv_cpu_table[i].equiv_cpu != 0) {
502 if (equiv_cpu == equiv_cpu_table[i].equiv_cpu)
503 return equiv_cpu_table[i].installed_cpu;
504 i++;
505 }
506 return 0;
507}
508
509
510
511
512static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
513{
514 struct ucode_patch *p;
515
516 list_for_each_entry(p, &pcache, plist)
517 if (p->equiv_cpu == equiv_cpu)
518 return p;
519 return NULL;
520}
521
522static void update_cache(struct ucode_patch *new_patch)
523{
524 struct ucode_patch *p;
525
526 list_for_each_entry(p, &pcache, plist) {
527 if (p->equiv_cpu == new_patch->equiv_cpu) {
528 if (p->patch_id >= new_patch->patch_id)
529
530 return;
531
532 list_replace(&p->plist, &new_patch->plist);
533 kfree(p->data);
534 kfree(p);
535 return;
536 }
537 }
538
539 list_add_tail(&new_patch->plist, &pcache);
540}
541
542static void free_cache(void)
543{
544 struct ucode_patch *p, *tmp;
545
546 list_for_each_entry_safe(p, tmp, &pcache, plist) {
547 __list_del(p->plist.prev, p->plist.next);
548 kfree(p->data);
549 kfree(p);
550 }
551}
552
553static struct ucode_patch *find_patch(unsigned int cpu)
554{
555 u16 equiv_id;
556
557 equiv_id = __find_equiv_id(cpu);
558 if (!equiv_id)
559 return NULL;
560
561 return cache_find_patch(equiv_id);
562}
563
564static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
565{
566 struct cpuinfo_x86 *c = &cpu_data(cpu);
567 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
568 struct ucode_patch *p;
569
570 csig->sig = cpuid_eax(0x00000001);
571 csig->rev = c->microcode;
572
573
574
575
576
577 p = find_patch(cpu);
578 if (p && (p->patch_id == csig->rev))
579 uci->mc = p->data;
580
581 pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
582
583 return 0;
584}
585
586static unsigned int verify_patch_size(u8 family, u32 patch_size,
587 unsigned int size)
588{
589 u32 max_size;
590
591#define F1XH_MPB_MAX_SIZE 2048
592#define F14H_MPB_MAX_SIZE 1824
593#define F15H_MPB_MAX_SIZE 4096
594#define F16H_MPB_MAX_SIZE 3458
595
596 switch (family) {
597 case 0x14:
598 max_size = F14H_MPB_MAX_SIZE;
599 break;
600 case 0x15:
601 max_size = F15H_MPB_MAX_SIZE;
602 break;
603 case 0x16:
604 max_size = F16H_MPB_MAX_SIZE;
605 break;
606 default:
607 max_size = F1XH_MPB_MAX_SIZE;
608 break;
609 }
610
611 if (patch_size > min_t(u32, size, max_size)) {
612 pr_err("patch size mismatch\n");
613 return 0;
614 }
615
616 return patch_size;
617}
618
619
620
621
622static u32 final_levels[] = {
623 0x01000098,
624 0x0100009f,
625 0x010000af,
626 0,
627};
628
629
630
631
632
633
634
635
636
637
638
639bool check_current_patch_level(u32 *rev, bool early)
640{
641 u32 lvl, dummy, i;
642 bool ret = false;
643 u32 *levels;
644
645 native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
646
647 if (IS_ENABLED(CONFIG_X86_32) && early)
648 levels = (u32 *)__pa_nodebug(&final_levels);
649 else
650 levels = final_levels;
651
652 for (i = 0; levels[i]; i++) {
653 if (lvl == levels[i]) {
654 lvl = 0;
655 ret = true;
656 break;
657 }
658 }
659
660 if (rev)
661 *rev = lvl;
662
663 return ret;
664}
665
666int __apply_microcode_amd(struct microcode_amd *mc_amd)
667{
668 u32 rev, dummy;
669
670 native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
671
672
673 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
674 if (rev != mc_amd->hdr.patch_id)
675 return -1;
676
677 return 0;
678}
679
680int apply_microcode_amd(int cpu)
681{
682 struct cpuinfo_x86 *c = &cpu_data(cpu);
683 struct microcode_amd *mc_amd;
684 struct ucode_cpu_info *uci;
685 struct ucode_patch *p;
686 u32 rev;
687
688 BUG_ON(raw_smp_processor_id() != cpu);
689
690 uci = ucode_cpu_info + cpu;
691
692 p = find_patch(cpu);
693 if (!p)
694 return 0;
695
696 mc_amd = p->data;
697 uci->mc = p->data;
698
699 if (check_current_patch_level(&rev, false))
700 return -1;
701
702
703 if (rev >= mc_amd->hdr.patch_id) {
704 c->microcode = rev;
705 uci->cpu_sig.rev = rev;
706 return 0;
707 }
708
709 if (__apply_microcode_amd(mc_amd)) {
710 pr_err("CPU%d: update failed for patch_level=0x%08x\n",
711 cpu, mc_amd->hdr.patch_id);
712 return -1;
713 }
714 pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
715 mc_amd->hdr.patch_id);
716
717 uci->cpu_sig.rev = mc_amd->hdr.patch_id;
718 c->microcode = mc_amd->hdr.patch_id;
719
720 return 0;
721}
722
723static int install_equiv_cpu_table(const u8 *buf)
724{
725 unsigned int *ibuf = (unsigned int *)buf;
726 unsigned int type = ibuf[1];
727 unsigned int size = ibuf[2];
728
729 if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
730 pr_err("empty section/"
731 "invalid type field in container file section header\n");
732 return -EINVAL;
733 }
734
735 equiv_cpu_table = vmalloc(size);
736 if (!equiv_cpu_table) {
737 pr_err("failed to allocate equivalent CPU table\n");
738 return -ENOMEM;
739 }
740
741 memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size);
742
743
744 return size + CONTAINER_HDR_SZ;
745}
746
747static void free_equiv_cpu_table(void)
748{
749 vfree(equiv_cpu_table);
750 equiv_cpu_table = NULL;
751}
752
753static void cleanup(void)
754{
755 free_equiv_cpu_table();
756 free_cache();
757}
758
759
760
761
762
763
764
765
766static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
767{
768 struct microcode_header_amd *mc_hdr;
769 struct ucode_patch *patch;
770 unsigned int patch_size, crnt_size, ret;
771 u32 proc_fam;
772 u16 proc_id;
773
774 patch_size = *(u32 *)(fw + 4);
775 crnt_size = patch_size + SECTION_HDR_SIZE;
776 mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
777 proc_id = mc_hdr->processor_rev_id;
778
779 proc_fam = find_cpu_family_by_equiv_cpu(proc_id);
780 if (!proc_fam) {
781 pr_err("No patch family for equiv ID: 0x%04x\n", proc_id);
782 return crnt_size;
783 }
784
785
786 proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
787 if (proc_fam != family)
788 return crnt_size;
789
790 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
791 pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n",
792 mc_hdr->patch_id);
793 return crnt_size;
794 }
795
796 ret = verify_patch_size(family, patch_size, leftover);
797 if (!ret) {
798 pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
799 return crnt_size;
800 }
801
802 patch = kzalloc(sizeof(*patch), GFP_KERNEL);
803 if (!patch) {
804 pr_err("Patch allocation failure.\n");
805 return -EINVAL;
806 }
807
808 patch->data = kmemdup(fw + SECTION_HDR_SIZE, patch_size, GFP_KERNEL);
809 if (!patch->data) {
810 pr_err("Patch data allocation failure.\n");
811 kfree(patch);
812 return -EINVAL;
813 }
814
815 INIT_LIST_HEAD(&patch->plist);
816 patch->patch_id = mc_hdr->patch_id;
817 patch->equiv_cpu = proc_id;
818
819 pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
820 __func__, patch->patch_id, proc_id);
821
822
823 update_cache(patch);
824
825 return crnt_size;
826}
827
828static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
829 size_t size)
830{
831 enum ucode_state ret = UCODE_ERROR;
832 unsigned int leftover;
833 u8 *fw = (u8 *)data;
834 int crnt_size = 0;
835 int offset;
836
837 offset = install_equiv_cpu_table(data);
838 if (offset < 0) {
839 pr_err("failed to create equivalent cpu table\n");
840 return ret;
841 }
842 fw += offset;
843 leftover = size - offset;
844
845 if (*(u32 *)fw != UCODE_UCODE_TYPE) {
846 pr_err("invalid type field in container file section header\n");
847 free_equiv_cpu_table();
848 return ret;
849 }
850
851 while (leftover) {
852 crnt_size = verify_and_add_patch(family, fw, leftover);
853 if (crnt_size < 0)
854 return ret;
855
856 fw += crnt_size;
857 leftover -= crnt_size;
858 }
859
860 return UCODE_OK;
861}
862
863enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
864{
865 enum ucode_state ret;
866
867
868 free_equiv_cpu_table();
869
870 ret = __load_microcode_amd(family, data, size);
871
872 if (ret != UCODE_OK)
873 cleanup();
874
875#ifdef CONFIG_X86_32
876
877 if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
878 struct ucode_patch *p = find_patch(cpu);
879 if (p) {
880 memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
881 memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
882 PATCH_MAX_SIZE));
883 }
884 }
885#endif
886 return ret;
887}
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905static enum ucode_state request_microcode_amd(int cpu, struct device *device,
906 bool refresh_fw)
907{
908 char fw_name[36] = "amd-ucode/microcode_amd.bin";
909 struct cpuinfo_x86 *c = &cpu_data(cpu);
910 enum ucode_state ret = UCODE_NFOUND;
911 const struct firmware *fw;
912
913
914 if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
915 return UCODE_OK;
916
917 if (c->x86 >= 0x15)
918 snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
919
920 if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
921 pr_debug("failed to load file %s\n", fw_name);
922 goto out;
923 }
924
925 ret = UCODE_ERROR;
926 if (*(u32 *)fw->data != UCODE_MAGIC) {
927 pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
928 goto fw_release;
929 }
930
931 ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
932
933 fw_release:
934 release_firmware(fw);
935
936 out:
937 return ret;
938}
939
940static enum ucode_state
941request_microcode_user(int cpu, const void __user *buf, size_t size)
942{
943 return UCODE_ERROR;
944}
945
946static void microcode_fini_cpu_amd(int cpu)
947{
948 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
949
950 uci->mc = NULL;
951}
952
953static struct microcode_ops microcode_amd_ops = {
954 .request_microcode_user = request_microcode_user,
955 .request_microcode_fw = request_microcode_amd,
956 .collect_cpu_info = collect_cpu_info_amd,
957 .apply_microcode = apply_microcode_amd,
958 .microcode_fini_cpu = microcode_fini_cpu_amd,
959};
960
961struct microcode_ops * __init init_amd_microcode(void)
962{
963 struct cpuinfo_x86 *c = &boot_cpu_data;
964
965 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
966 pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
967 return NULL;
968 }
969
970 if (ucode_new_rev)
971 pr_info_once("microcode updated early to new patch_level=0x%08x\n",
972 ucode_new_rev);
973
974 return µcode_amd_ops;
975}
976
977void __exit exit_amd_microcode(void)
978{
979 cleanup();
980}
981