linux/arch/x86/kernel/cpu/microcode/amd_early.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
   3 *
   4 * Author: Jacob Shin <jacob.shin@amd.com>
   5 * Fixes: Borislav Petkov <bp@suse.de>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11
  12#include <linux/earlycpio.h>
  13#include <linux/initrd.h>
  14
  15#include <asm/cpu.h>
  16#include <asm/setup.h>
  17#include <asm/microcode_amd.h>
  18
  19/*
  20 * This points to the current valid container of microcode patches which we will
  21 * save from the initrd before jettisoning its contents.
  22 */
  23static u8 *container;
  24static size_t container_size;
  25
  26static u32 ucode_new_rev;
  27u8 amd_ucode_patch[PATCH_MAX_SIZE];
  28static u16 this_equiv_id;
  29
  30struct cpio_data ucode_cpio;
  31
  32/*
  33 * Microcode patch container file is prepended to the initrd in cpio format.
  34 * See Documentation/x86/early-microcode.txt
  35 */
  36static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin";
  37
  38static struct cpio_data __init find_ucode_in_initrd(void)
  39{
  40        long offset = 0;
  41        char *path;
  42        void *start;
  43        size_t size;
  44
  45#ifdef CONFIG_X86_32
  46        struct boot_params *p;
  47
  48        /*
  49         * On 32-bit, early load occurs before paging is turned on so we need
  50         * to use physical addresses.
  51         */
  52        p       = (struct boot_params *)__pa_nodebug(&boot_params);
  53        path    = (char *)__pa_nodebug(ucode_path);
  54        start   = (void *)p->hdr.ramdisk_image;
  55        size    = p->hdr.ramdisk_size;
  56#else
  57        path    = ucode_path;
  58        start   = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET);
  59        size    = boot_params.hdr.ramdisk_size;
  60#endif
  61
  62        return find_cpio_data(path, start, size, &offset);
  63}
  64
  65static size_t compute_container_size(u8 *data, u32 total_size)
  66{
  67        size_t size = 0;
  68        u32 *header = (u32 *)data;
  69
  70        if (header[0] != UCODE_MAGIC ||
  71            header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
  72            header[2] == 0)                            /* size */
  73                return size;
  74
  75        size = header[2] + CONTAINER_HDR_SZ;
  76        total_size -= size;
  77        data += size;
  78
  79        while (total_size) {
  80                u16 patch_size;
  81
  82                header = (u32 *)data;
  83
  84                if (header[0] != UCODE_UCODE_TYPE)
  85                        break;
  86
  87                /*
  88                 * Sanity-check patch size.
  89                 */
  90                patch_size = header[1];
  91                if (patch_size > PATCH_MAX_SIZE)
  92                        break;
  93
  94                size       += patch_size + SECTION_HDR_SIZE;
  95                data       += patch_size + SECTION_HDR_SIZE;
  96                total_size -= patch_size + SECTION_HDR_SIZE;
  97        }
  98
  99        return size;
 100}
 101
 102/*
 103 * Early load occurs before we can vmalloc(). So we look for the microcode
 104 * patch container file in initrd, traverse equivalent cpu table, look for a
 105 * matching microcode patch, and update, all in initrd memory in place.
 106 * When vmalloc() is available for use later -- on 64-bit during first AP load,
 107 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
 108 * load_microcode_amd() to save equivalent cpu table and microcode patches in
 109 * kernel heap memory.
 110 */
 111static void apply_ucode_in_initrd(void *ucode, size_t size)
 112{
 113        struct equiv_cpu_entry *eq;
 114        size_t *cont_sz;
 115        u32 *header;
 116        u8  *data, **cont;
 117        u16 eq_id = 0;
 118        int offset, left;
 119        u32 rev, eax, ebx, ecx, edx;
 120        u32 *new_rev;
 121
 122#ifdef CONFIG_X86_32
 123        new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
 124        cont_sz = (size_t *)__pa_nodebug(&container_size);
 125        cont    = (u8 **)__pa_nodebug(&container);
 126#else
 127        new_rev = &ucode_new_rev;
 128        cont_sz = &container_size;
 129        cont    = &container;
 130#endif
 131
 132        data   = ucode;
 133        left   = size;
 134        header = (u32 *)data;
 135
 136        /* find equiv cpu table */
 137        if (header[0] != UCODE_MAGIC ||
 138            header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
 139            header[2] == 0)                            /* size */
 140                return;
 141
 142        eax = 0x00000001;
 143        ecx = 0;
 144        native_cpuid(&eax, &ebx, &ecx, &edx);
 145
 146        while (left > 0) {
 147                eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
 148
 149                *cont = data;
 150
 151                /* Advance past the container header */
 152                offset = header[2] + CONTAINER_HDR_SZ;
 153                data  += offset;
 154                left  -= offset;
 155
 156                eq_id = find_equiv_id(eq, eax);
 157                if (eq_id) {
 158                        this_equiv_id = eq_id;
 159                        *cont_sz = compute_container_size(*cont, left + offset);
 160
 161                        /*
 162                         * truncate how much we need to iterate over in the
 163                         * ucode update loop below
 164                         */
 165                        left = *cont_sz - offset;
 166                        break;
 167                }
 168
 169                /*
 170                 * support multiple container files appended together. if this
 171                 * one does not have a matching equivalent cpu entry, we fast
 172                 * forward to the next container file.
 173                 */
 174                while (left > 0) {
 175                        header = (u32 *)data;
 176                        if (header[0] == UCODE_MAGIC &&
 177                            header[1] == UCODE_EQUIV_CPU_TABLE_TYPE)
 178                                break;
 179
 180                        offset = header[1] + SECTION_HDR_SIZE;
 181                        data  += offset;
 182                        left  -= offset;
 183                }
 184
 185                /* mark where the next microcode container file starts */
 186                offset    = data - (u8 *)ucode;
 187                ucode     = data;
 188        }
 189
 190        if (!eq_id) {
 191                *cont = NULL;
 192                *cont_sz = 0;
 193                return;
 194        }
 195
 196        /* find ucode and update if needed */
 197
 198        native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
 199
 200        while (left > 0) {
 201                struct microcode_amd *mc;
 202
 203                header = (u32 *)data;
 204                if (header[0] != UCODE_UCODE_TYPE || /* type */
 205                    header[1] == 0)                  /* size */
 206                        break;
 207
 208                mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE);
 209
 210                if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) {
 211
 212                        if (!__apply_microcode_amd(mc)) {
 213                                rev = mc->hdr.patch_id;
 214                                *new_rev = rev;
 215
 216                                /* save ucode patch */
 217                                memcpy(amd_ucode_patch, mc,
 218                                       min_t(u32, header[1], PATCH_MAX_SIZE));
 219                        }
 220                }
 221
 222                offset  = header[1] + SECTION_HDR_SIZE;
 223                data   += offset;
 224                left   -= offset;
 225        }
 226}
 227
 228void __init load_ucode_amd_bsp(void)
 229{
 230        struct cpio_data cp;
 231        void **data;
 232        size_t *size;
 233
 234#ifdef CONFIG_X86_32
 235        data =  (void **)__pa_nodebug(&ucode_cpio.data);
 236        size = (size_t *)__pa_nodebug(&ucode_cpio.size);
 237#else
 238        data = &ucode_cpio.data;
 239        size = &ucode_cpio.size;
 240#endif
 241
 242        cp = find_ucode_in_initrd();
 243        if (!cp.data)
 244                return;
 245
 246        *data = cp.data;
 247        *size = cp.size;
 248
 249        apply_ucode_in_initrd(cp.data, cp.size);
 250}
 251
 252#ifdef CONFIG_X86_32
 253/*
 254 * On 32-bit, since AP's early load occurs before paging is turned on, we
 255 * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during
 256 * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During
 257 * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
 258 * which is used upon resume from suspend.
 259 */
 260void load_ucode_amd_ap(void)
 261{
 262        struct microcode_amd *mc;
 263        size_t *usize;
 264        void **ucode;
 265
 266        mc = (struct microcode_amd *)__pa(amd_ucode_patch);
 267        if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
 268                __apply_microcode_amd(mc);
 269                return;
 270        }
 271
 272        ucode = (void *)__pa_nodebug(&container);
 273        usize = (size_t *)__pa_nodebug(&container_size);
 274
 275        if (!*ucode || !*usize)
 276                return;
 277
 278        apply_ucode_in_initrd(*ucode, *usize);
 279}
 280
 281static void __init collect_cpu_sig_on_bsp(void *arg)
 282{
 283        unsigned int cpu = smp_processor_id();
 284        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 285
 286        uci->cpu_sig.sig = cpuid_eax(0x00000001);
 287}
 288
 289static void __init get_bsp_sig(void)
 290{
 291        unsigned int bsp = boot_cpu_data.cpu_index;
 292        struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
 293
 294        if (!uci->cpu_sig.sig)
 295                smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
 296}
 297#else
 298void load_ucode_amd_ap(void)
 299{
 300        unsigned int cpu = smp_processor_id();
 301        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 302        struct equiv_cpu_entry *eq;
 303        struct microcode_amd *mc;
 304        u32 rev, eax;
 305        u16 eq_id;
 306
 307        /* Exit if called on the BSP. */
 308        if (!cpu)
 309                return;
 310
 311        if (!container)
 312                return;
 313
 314        rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
 315
 316        uci->cpu_sig.rev = rev;
 317        uci->cpu_sig.sig = eax;
 318
 319        eax = cpuid_eax(0x00000001);
 320        eq  = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ);
 321
 322        eq_id = find_equiv_id(eq, eax);
 323        if (!eq_id)
 324                return;
 325
 326        if (eq_id == this_equiv_id) {
 327                mc = (struct microcode_amd *)amd_ucode_patch;
 328
 329                if (mc && rev < mc->hdr.patch_id) {
 330                        if (!__apply_microcode_amd(mc))
 331                                ucode_new_rev = mc->hdr.patch_id;
 332                }
 333
 334        } else {
 335                if (!ucode_cpio.data)
 336                        return;
 337
 338                /*
 339                 * AP has a different equivalence ID than BSP, looks like
 340                 * mixed-steppings silicon so go through the ucode blob anew.
 341                 */
 342                apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size);
 343        }
 344}
 345#endif
 346
 347int __init save_microcode_in_initrd_amd(void)
 348{
 349        unsigned long cont;
 350        enum ucode_state ret;
 351        u32 eax;
 352
 353        if (!container)
 354                return -EINVAL;
 355
 356#ifdef CONFIG_X86_32
 357        get_bsp_sig();
 358        cont = (unsigned long)container;
 359#else
 360        /*
 361         * We need the physical address of the container for both bitness since
 362         * boot_params.hdr.ramdisk_image is a physical address.
 363         */
 364        cont = __pa(container);
 365#endif
 366
 367        /*
 368         * Take into account the fact that the ramdisk might get relocated and
 369         * therefore we need to recompute the container's position in virtual
 370         * memory space.
 371         */
 372        if (relocated_ramdisk)
 373                container = (u8 *)(__va(relocated_ramdisk) +
 374                             (cont - boot_params.hdr.ramdisk_image));
 375
 376        if (ucode_new_rev)
 377                pr_info("microcode: updated early to new patch_level=0x%08x\n",
 378                        ucode_new_rev);
 379
 380        eax   = cpuid_eax(0x00000001);
 381        eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
 382
 383        ret = load_microcode_amd(eax, container, container_size);
 384        if (ret != UCODE_OK)
 385                return -EINVAL;
 386
 387        /*
 388         * This will be freed any msec now, stash patches for the current
 389         * family and switch to patch cache for cpu hotplug, etc later.
 390         */
 391        container = NULL;
 392        container_size = 0;
 393
 394        return 0;
 395}
 396