linux/arch/x86/kernel/cpu/microcode/intel.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Intel CPU Microcode Update Driver for Linux
   4 *
   5 * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com>
   6 *               2006 Shaohua Li <shaohua.li@intel.com>
   7 *
   8 * Intel CPU microcode early update for Linux
   9 *
  10 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
  11 *                    H Peter Anvin" <hpa@zytor.com>
  12 */
  13
  14/*
  15 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
  16 * printk calls into no_printk().
  17 *
  18 *#define DEBUG
  19 */
  20#define pr_fmt(fmt) "microcode: " fmt
  21
  22#include <linux/earlycpio.h>
  23#include <linux/firmware.h>
  24#include <linux/uaccess.h>
  25#include <linux/vmalloc.h>
  26#include <linux/initrd.h>
  27#include <linux/kernel.h>
  28#include <linux/slab.h>
  29#include <linux/cpu.h>
  30#include <linux/uio.h>
  31#include <linux/mm.h>
  32
  33#include <asm/microcode_intel.h>
  34#include <asm/intel-family.h>
  35#include <asm/processor.h>
  36#include <asm/tlbflush.h>
  37#include <asm/setup.h>
  38#include <asm/msr.h>
  39
  40static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
  41
  42/* Current microcode patch used in early patching on the APs. */
  43static struct microcode_intel *intel_ucode_patch;
  44
  45/* last level cache size per core */
  46static int llc_size_per_core;
  47
  48static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
  49                                        unsigned int s2, unsigned int p2)
  50{
  51        if (s1 != s2)
  52                return false;
  53
  54        /* Processor flags are either both 0 ... */
  55        if (!p1 && !p2)
  56                return true;
  57
  58        /* ... or they intersect. */
  59        return p1 & p2;
  60}
  61
  62/*
  63 * Returns 1 if update has been found, 0 otherwise.
  64 */
  65static int find_matching_signature(void *mc, unsigned int csig, int cpf)
  66{
  67        struct microcode_header_intel *mc_hdr = mc;
  68        struct extended_sigtable *ext_hdr;
  69        struct extended_signature *ext_sig;
  70        int i;
  71
  72        if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
  73                return 1;
  74
  75        /* Look for ext. headers: */
  76        if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE)
  77                return 0;
  78
  79        ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE;
  80        ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
  81
  82        for (i = 0; i < ext_hdr->count; i++) {
  83                if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
  84                        return 1;
  85                ext_sig++;
  86        }
  87        return 0;
  88}
  89
  90/*
  91 * Returns 1 if update has been found, 0 otherwise.
  92 */
  93static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
  94{
  95        struct microcode_header_intel *mc_hdr = mc;
  96
  97        if (mc_hdr->rev <= new_rev)
  98                return 0;
  99
 100        return find_matching_signature(mc, csig, cpf);
 101}
 102
 103/*
 104 * Given CPU signature and a microcode patch, this function finds if the
 105 * microcode patch has matching family and model with the CPU.
 106 *
 107 * %true - if there's a match
 108 * %false - otherwise
 109 */
 110static bool microcode_matches(struct microcode_header_intel *mc_header,
 111                              unsigned long sig)
 112{
 113        unsigned long total_size = get_totalsize(mc_header);
 114        unsigned long data_size = get_datasize(mc_header);
 115        struct extended_sigtable *ext_header;
 116        unsigned int fam_ucode, model_ucode;
 117        struct extended_signature *ext_sig;
 118        unsigned int fam, model;
 119        int ext_sigcount, i;
 120
 121        fam   = x86_family(sig);
 122        model = x86_model(sig);
 123
 124        fam_ucode   = x86_family(mc_header->sig);
 125        model_ucode = x86_model(mc_header->sig);
 126
 127        if (fam == fam_ucode && model == model_ucode)
 128                return true;
 129
 130        /* Look for ext. headers: */
 131        if (total_size <= data_size + MC_HEADER_SIZE)
 132                return false;
 133
 134        ext_header   = (void *) mc_header + data_size + MC_HEADER_SIZE;
 135        ext_sig      = (void *)ext_header + EXT_HEADER_SIZE;
 136        ext_sigcount = ext_header->count;
 137
 138        for (i = 0; i < ext_sigcount; i++) {
 139                fam_ucode   = x86_family(ext_sig->sig);
 140                model_ucode = x86_model(ext_sig->sig);
 141
 142                if (fam == fam_ucode && model == model_ucode)
 143                        return true;
 144
 145                ext_sig++;
 146        }
 147        return false;
 148}
 149
 150static struct ucode_patch *memdup_patch(void *data, unsigned int size)
 151{
 152        struct ucode_patch *p;
 153
 154        p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
 155        if (!p)
 156                return NULL;
 157
 158        p->data = kmemdup(data, size, GFP_KERNEL);
 159        if (!p->data) {
 160                kfree(p);
 161                return NULL;
 162        }
 163
 164        return p;
 165}
 166
 167static void save_microcode_patch(void *data, unsigned int size)
 168{
 169        struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
 170        struct ucode_patch *iter, *tmp, *p = NULL;
 171        bool prev_found = false;
 172        unsigned int sig, pf;
 173
 174        mc_hdr = (struct microcode_header_intel *)data;
 175
 176        list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
 177                mc_saved_hdr = (struct microcode_header_intel *)iter->data;
 178                sig          = mc_saved_hdr->sig;
 179                pf           = mc_saved_hdr->pf;
 180
 181                if (find_matching_signature(data, sig, pf)) {
 182                        prev_found = true;
 183
 184                        if (mc_hdr->rev <= mc_saved_hdr->rev)
 185                                continue;
 186
 187                        p = memdup_patch(data, size);
 188                        if (!p)
 189                                pr_err("Error allocating buffer %p\n", data);
 190                        else {
 191                                list_replace(&iter->plist, &p->plist);
 192                                kfree(iter->data);
 193                                kfree(iter);
 194                        }
 195                }
 196        }
 197
 198        /*
 199         * There weren't any previous patches found in the list cache; save the
 200         * newly found.
 201         */
 202        if (!prev_found) {
 203                p = memdup_patch(data, size);
 204                if (!p)
 205                        pr_err("Error allocating buffer for %p\n", data);
 206                else
 207                        list_add_tail(&p->plist, &microcode_cache);
 208        }
 209
 210        if (!p)
 211                return;
 212
 213        /*
 214         * Save for early loading. On 32-bit, that needs to be a physical
 215         * address as the APs are running from physical addresses, before
 216         * paging has been enabled.
 217         */
 218        if (IS_ENABLED(CONFIG_X86_32))
 219                intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data);
 220        else
 221                intel_ucode_patch = p->data;
 222}
 223
 224static int microcode_sanity_check(void *mc, int print_err)
 225{
 226        unsigned long total_size, data_size, ext_table_size;
 227        struct microcode_header_intel *mc_header = mc;
 228        struct extended_sigtable *ext_header = NULL;
 229        u32 sum, orig_sum, ext_sigcount = 0, i;
 230        struct extended_signature *ext_sig;
 231
 232        total_size = get_totalsize(mc_header);
 233        data_size = get_datasize(mc_header);
 234
 235        if (data_size + MC_HEADER_SIZE > total_size) {
 236                if (print_err)
 237                        pr_err("Error: bad microcode data file size.\n");
 238                return -EINVAL;
 239        }
 240
 241        if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
 242                if (print_err)
 243                        pr_err("Error: invalid/unknown microcode update format.\n");
 244                return -EINVAL;
 245        }
 246
 247        ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
 248        if (ext_table_size) {
 249                u32 ext_table_sum = 0;
 250                u32 *ext_tablep;
 251
 252                if ((ext_table_size < EXT_HEADER_SIZE)
 253                 || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
 254                        if (print_err)
 255                                pr_err("Error: truncated extended signature table.\n");
 256                        return -EINVAL;
 257                }
 258
 259                ext_header = mc + MC_HEADER_SIZE + data_size;
 260                if (ext_table_size != exttable_size(ext_header)) {
 261                        if (print_err)
 262                                pr_err("Error: extended signature table size mismatch.\n");
 263                        return -EFAULT;
 264                }
 265
 266                ext_sigcount = ext_header->count;
 267
 268                /*
 269                 * Check extended table checksum: the sum of all dwords that
 270                 * comprise a valid table must be 0.
 271                 */
 272                ext_tablep = (u32 *)ext_header;
 273
 274                i = ext_table_size / sizeof(u32);
 275                while (i--)
 276                        ext_table_sum += ext_tablep[i];
 277
 278                if (ext_table_sum) {
 279                        if (print_err)
 280                                pr_warn("Bad extended signature table checksum, aborting.\n");
 281                        return -EINVAL;
 282                }
 283        }
 284
 285        /*
 286         * Calculate the checksum of update data and header. The checksum of
 287         * valid update data and header including the extended signature table
 288         * must be 0.
 289         */
 290        orig_sum = 0;
 291        i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
 292        while (i--)
 293                orig_sum += ((u32 *)mc)[i];
 294
 295        if (orig_sum) {
 296                if (print_err)
 297                        pr_err("Bad microcode data checksum, aborting.\n");
 298                return -EINVAL;
 299        }
 300
 301        if (!ext_table_size)
 302                return 0;
 303
 304        /*
 305         * Check extended signature checksum: 0 => valid.
 306         */
 307        for (i = 0; i < ext_sigcount; i++) {
 308                ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
 309                          EXT_SIGNATURE_SIZE * i;
 310
 311                sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
 312                      (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
 313                if (sum) {
 314                        if (print_err)
 315                                pr_err("Bad extended signature checksum, aborting.\n");
 316                        return -EINVAL;
 317                }
 318        }
 319        return 0;
 320}
 321
 322/*
 323 * Get microcode matching with BSP's model. Only CPUs with the same model as
 324 * BSP can stay in the platform.
 325 */
 326static struct microcode_intel *
 327scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
 328{
 329        struct microcode_header_intel *mc_header;
 330        struct microcode_intel *patch = NULL;
 331        unsigned int mc_size;
 332
 333        while (size) {
 334                if (size < sizeof(struct microcode_header_intel))
 335                        break;
 336
 337                mc_header = (struct microcode_header_intel *)data;
 338
 339                mc_size = get_totalsize(mc_header);
 340                if (!mc_size ||
 341                    mc_size > size ||
 342                    microcode_sanity_check(data, 0) < 0)
 343                        break;
 344
 345                size -= mc_size;
 346
 347                if (!microcode_matches(mc_header, uci->cpu_sig.sig)) {
 348                        data += mc_size;
 349                        continue;
 350                }
 351
 352                if (save) {
 353                        save_microcode_patch(data, mc_size);
 354                        goto next;
 355                }
 356
 357
 358                if (!patch) {
 359                        if (!has_newer_microcode(data,
 360                                                 uci->cpu_sig.sig,
 361                                                 uci->cpu_sig.pf,
 362                                                 uci->cpu_sig.rev))
 363                                goto next;
 364
 365                } else {
 366                        struct microcode_header_intel *phdr = &patch->hdr;
 367
 368                        if (!has_newer_microcode(data,
 369                                                 phdr->sig,
 370                                                 phdr->pf,
 371                                                 phdr->rev))
 372                                goto next;
 373                }
 374
 375                /* We have a newer patch, save it. */
 376                patch = data;
 377
 378next:
 379                data += mc_size;
 380        }
 381
 382        if (size)
 383                return NULL;
 384
 385        return patch;
 386}
 387
 388static int collect_cpu_info_early(struct ucode_cpu_info *uci)
 389{
 390        unsigned int val[2];
 391        unsigned int family, model;
 392        struct cpu_signature csig = { 0 };
 393        unsigned int eax, ebx, ecx, edx;
 394
 395        memset(uci, 0, sizeof(*uci));
 396
 397        eax = 0x00000001;
 398        ecx = 0;
 399        native_cpuid(&eax, &ebx, &ecx, &edx);
 400        csig.sig = eax;
 401
 402        family = x86_family(eax);
 403        model  = x86_model(eax);
 404
 405        if ((model >= 5) || (family > 6)) {
 406                /* get processor flags from MSR 0x17 */
 407                native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
 408                csig.pf = 1 << ((val[1] >> 18) & 7);
 409        }
 410
 411        csig.rev = intel_get_microcode_revision();
 412
 413        uci->cpu_sig = csig;
 414        uci->valid = 1;
 415
 416        return 0;
 417}
 418
 419static void show_saved_mc(void)
 420{
 421#ifdef DEBUG
 422        int i = 0, j;
 423        unsigned int sig, pf, rev, total_size, data_size, date;
 424        struct ucode_cpu_info uci;
 425        struct ucode_patch *p;
 426
 427        if (list_empty(&microcode_cache)) {
 428                pr_debug("no microcode data saved.\n");
 429                return;
 430        }
 431
 432        collect_cpu_info_early(&uci);
 433
 434        sig     = uci.cpu_sig.sig;
 435        pf      = uci.cpu_sig.pf;
 436        rev     = uci.cpu_sig.rev;
 437        pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
 438
 439        list_for_each_entry(p, &microcode_cache, plist) {
 440                struct microcode_header_intel *mc_saved_header;
 441                struct extended_sigtable *ext_header;
 442                struct extended_signature *ext_sig;
 443                int ext_sigcount;
 444
 445                mc_saved_header = (struct microcode_header_intel *)p->data;
 446
 447                sig     = mc_saved_header->sig;
 448                pf      = mc_saved_header->pf;
 449                rev     = mc_saved_header->rev;
 450                date    = mc_saved_header->date;
 451
 452                total_size      = get_totalsize(mc_saved_header);
 453                data_size       = get_datasize(mc_saved_header);
 454
 455                pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
 456                         i++, sig, pf, rev, total_size,
 457                         date & 0xffff,
 458                         date >> 24,
 459                         (date >> 16) & 0xff);
 460
 461                /* Look for ext. headers: */
 462                if (total_size <= data_size + MC_HEADER_SIZE)
 463                        continue;
 464
 465                ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE;
 466                ext_sigcount = ext_header->count;
 467                ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
 468
 469                for (j = 0; j < ext_sigcount; j++) {
 470                        sig = ext_sig->sig;
 471                        pf = ext_sig->pf;
 472
 473                        pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
 474                                 j, sig, pf);
 475
 476                        ext_sig++;
 477                }
 478        }
 479#endif
 480}
 481
 482/*
 483 * Save this microcode patch. It will be loaded early when a CPU is
 484 * hot-added or resumes.
 485 */
 486static void save_mc_for_early(u8 *mc, unsigned int size)
 487{
 488        /* Synchronization during CPU hotplug. */
 489        static DEFINE_MUTEX(x86_cpu_microcode_mutex);
 490
 491        mutex_lock(&x86_cpu_microcode_mutex);
 492
 493        save_microcode_patch(mc, size);
 494        show_saved_mc();
 495
 496        mutex_unlock(&x86_cpu_microcode_mutex);
 497}
 498
 499static bool load_builtin_intel_microcode(struct cpio_data *cp)
 500{
 501        unsigned int eax = 1, ebx, ecx = 0, edx;
 502        char name[30];
 503
 504        if (IS_ENABLED(CONFIG_X86_32))
 505                return false;
 506
 507        native_cpuid(&eax, &ebx, &ecx, &edx);
 508
 509        sprintf(name, "intel-ucode/%02x-%02x-%02x",
 510                      x86_family(eax), x86_model(eax), x86_stepping(eax));
 511
 512        return get_builtin_firmware(cp, name);
 513}
 514
 515/*
 516 * Print ucode update info.
 517 */
 518static void
 519print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
 520{
 521        pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
 522                     uci->cpu_sig.rev,
 523                     date & 0xffff,
 524                     date >> 24,
 525                     (date >> 16) & 0xff);
 526}
 527
 528#ifdef CONFIG_X86_32
 529
 530static int delay_ucode_info;
 531static int current_mc_date;
 532
 533/*
 534 * Print early updated ucode info after printk works. This is delayed info dump.
 535 */
 536void show_ucode_info_early(void)
 537{
 538        struct ucode_cpu_info uci;
 539
 540        if (delay_ucode_info) {
 541                collect_cpu_info_early(&uci);
 542                print_ucode_info(&uci, current_mc_date);
 543                delay_ucode_info = 0;
 544        }
 545}
 546
 547/*
 548 * At this point, we can not call printk() yet. Delay printing microcode info in
 549 * show_ucode_info_early() until printk() works.
 550 */
 551static void print_ucode(struct ucode_cpu_info *uci)
 552{
 553        struct microcode_intel *mc;
 554        int *delay_ucode_info_p;
 555        int *current_mc_date_p;
 556
 557        mc = uci->mc;
 558        if (!mc)
 559                return;
 560
 561        delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
 562        current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
 563
 564        *delay_ucode_info_p = 1;
 565        *current_mc_date_p = mc->hdr.date;
 566}
 567#else
 568
 569static inline void print_ucode(struct ucode_cpu_info *uci)
 570{
 571        struct microcode_intel *mc;
 572
 573        mc = uci->mc;
 574        if (!mc)
 575                return;
 576
 577        print_ucode_info(uci, mc->hdr.date);
 578}
 579#endif
 580
 581static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
 582{
 583        struct microcode_intel *mc;
 584        u32 rev;
 585
 586        mc = uci->mc;
 587        if (!mc)
 588                return 0;
 589
 590        /*
 591         * Save us the MSR write below - which is a particular expensive
 592         * operation - when the other hyperthread has updated the microcode
 593         * already.
 594         */
 595        rev = intel_get_microcode_revision();
 596        if (rev >= mc->hdr.rev) {
 597                uci->cpu_sig.rev = rev;
 598                return UCODE_OK;
 599        }
 600
 601        /*
 602         * Writeback and invalidate caches before updating microcode to avoid
 603         * internal issues depending on what the microcode is updating.
 604         */
 605        native_wbinvd();
 606
 607        /* write microcode via MSR 0x79 */
 608        native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
 609
 610        rev = intel_get_microcode_revision();
 611        if (rev != mc->hdr.rev)
 612                return -1;
 613
 614        uci->cpu_sig.rev = rev;
 615
 616        if (early)
 617                print_ucode(uci);
 618        else
 619                print_ucode_info(uci, mc->hdr.date);
 620
 621        return 0;
 622}
 623
 624int __init save_microcode_in_initrd_intel(void)
 625{
 626        struct ucode_cpu_info uci;
 627        struct cpio_data cp;
 628
 629        /*
 630         * initrd is going away, clear patch ptr. We will scan the microcode one
 631         * last time before jettisoning and save a patch, if found. Then we will
 632         * update that pointer too, with a stable patch address to use when
 633         * resuming the cores.
 634         */
 635        intel_ucode_patch = NULL;
 636
 637        if (!load_builtin_intel_microcode(&cp))
 638                cp = find_microcode_in_initrd(ucode_path, false);
 639
 640        if (!(cp.data && cp.size))
 641                return 0;
 642
 643        collect_cpu_info_early(&uci);
 644
 645        scan_microcode(cp.data, cp.size, &uci, true);
 646
 647        show_saved_mc();
 648
 649        return 0;
 650}
 651
 652/*
 653 * @res_patch, output: a pointer to the patch we found.
 654 */
 655static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci)
 656{
 657        static const char *path;
 658        struct cpio_data cp;
 659        bool use_pa;
 660
 661        if (IS_ENABLED(CONFIG_X86_32)) {
 662                path      = (const char *)__pa_nodebug(ucode_path);
 663                use_pa    = true;
 664        } else {
 665                path      = ucode_path;
 666                use_pa    = false;
 667        }
 668
 669        /* try built-in microcode first */
 670        if (!load_builtin_intel_microcode(&cp))
 671                cp = find_microcode_in_initrd(path, use_pa);
 672
 673        if (!(cp.data && cp.size))
 674                return NULL;
 675
 676        collect_cpu_info_early(uci);
 677
 678        return scan_microcode(cp.data, cp.size, uci, false);
 679}
 680
 681void __init load_ucode_intel_bsp(void)
 682{
 683        struct microcode_intel *patch;
 684        struct ucode_cpu_info uci;
 685
 686        patch = __load_ucode_intel(&uci);
 687        if (!patch)
 688                return;
 689
 690        uci.mc = patch;
 691
 692        apply_microcode_early(&uci, true);
 693}
 694
 695void load_ucode_intel_ap(void)
 696{
 697        struct microcode_intel *patch, **iup;
 698        struct ucode_cpu_info uci;
 699
 700        if (IS_ENABLED(CONFIG_X86_32))
 701                iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch);
 702        else
 703                iup = &intel_ucode_patch;
 704
 705reget:
 706        if (!*iup) {
 707                patch = __load_ucode_intel(&uci);
 708                if (!patch)
 709                        return;
 710
 711                *iup = patch;
 712        }
 713
 714        uci.mc = *iup;
 715
 716        if (apply_microcode_early(&uci, true)) {
 717                /* Mixed-silicon system? Try to refetch the proper patch: */
 718                *iup = NULL;
 719
 720                goto reget;
 721        }
 722}
 723
 724static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
 725{
 726        struct microcode_header_intel *phdr;
 727        struct ucode_patch *iter, *tmp;
 728
 729        list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
 730
 731                phdr = (struct microcode_header_intel *)iter->data;
 732
 733                if (phdr->rev <= uci->cpu_sig.rev)
 734                        continue;
 735
 736                if (!find_matching_signature(phdr,
 737                                             uci->cpu_sig.sig,
 738                                             uci->cpu_sig.pf))
 739                        continue;
 740
 741                return iter->data;
 742        }
 743        return NULL;
 744}
 745
 746void reload_ucode_intel(void)
 747{
 748        struct microcode_intel *p;
 749        struct ucode_cpu_info uci;
 750
 751        collect_cpu_info_early(&uci);
 752
 753        p = find_patch(&uci);
 754        if (!p)
 755                return;
 756
 757        uci.mc = p;
 758
 759        apply_microcode_early(&uci, false);
 760}
 761
 762static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
 763{
 764        static struct cpu_signature prev;
 765        struct cpuinfo_x86 *c = &cpu_data(cpu_num);
 766        unsigned int val[2];
 767
 768        memset(csig, 0, sizeof(*csig));
 769
 770        csig->sig = cpuid_eax(0x00000001);
 771
 772        if ((c->x86_model >= 5) || (c->x86 > 6)) {
 773                /* get processor flags from MSR 0x17 */
 774                rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
 775                csig->pf = 1 << ((val[1] >> 18) & 7);
 776        }
 777
 778        csig->rev = c->microcode;
 779
 780        /* No extra locking on prev, races are harmless. */
 781        if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) {
 782                pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n",
 783                        csig->sig, csig->pf, csig->rev);
 784                prev = *csig;
 785        }
 786
 787        return 0;
 788}
 789
 790static enum ucode_state apply_microcode_intel(int cpu)
 791{
 792        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 793        struct cpuinfo_x86 *c = &cpu_data(cpu);
 794        bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
 795        struct microcode_intel *mc;
 796        enum ucode_state ret;
 797        static int prev_rev;
 798        u32 rev;
 799
 800        /* We should bind the task to the CPU */
 801        if (WARN_ON(raw_smp_processor_id() != cpu))
 802                return UCODE_ERROR;
 803
 804        /* Look for a newer patch in our cache: */
 805        mc = find_patch(uci);
 806        if (!mc) {
 807                mc = uci->mc;
 808                if (!mc)
 809                        return UCODE_NFOUND;
 810        }
 811
 812        /*
 813         * Save us the MSR write below - which is a particular expensive
 814         * operation - when the other hyperthread has updated the microcode
 815         * already.
 816         */
 817        rev = intel_get_microcode_revision();
 818        if (rev >= mc->hdr.rev) {
 819                ret = UCODE_OK;
 820                goto out;
 821        }
 822
 823        /*
 824         * Writeback and invalidate caches before updating microcode to avoid
 825         * internal issues depending on what the microcode is updating.
 826         */
 827        native_wbinvd();
 828
 829        /* write microcode via MSR 0x79 */
 830        wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
 831
 832        rev = intel_get_microcode_revision();
 833
 834        if (rev != mc->hdr.rev) {
 835                pr_err("CPU%d update to revision 0x%x failed\n",
 836                       cpu, mc->hdr.rev);
 837                return UCODE_ERROR;
 838        }
 839
 840        if (bsp && rev != prev_rev) {
 841                pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
 842                        rev,
 843                        mc->hdr.date & 0xffff,
 844                        mc->hdr.date >> 24,
 845                        (mc->hdr.date >> 16) & 0xff);
 846                prev_rev = rev;
 847        }
 848
 849        ret = UCODE_UPDATED;
 850
 851out:
 852        uci->cpu_sig.rev = rev;
 853        c->microcode     = rev;
 854
 855        /* Update boot_cpu_data's revision too, if we're on the BSP: */
 856        if (bsp)
 857                boot_cpu_data.microcode = rev;
 858
 859        return ret;
 860}
 861
 862static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter)
 863{
 864        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 865        unsigned int curr_mc_size = 0, new_mc_size = 0;
 866        enum ucode_state ret = UCODE_OK;
 867        int new_rev = uci->cpu_sig.rev;
 868        u8 *new_mc = NULL, *mc = NULL;
 869        unsigned int csig, cpf;
 870
 871        while (iov_iter_count(iter)) {
 872                struct microcode_header_intel mc_header;
 873                unsigned int mc_size, data_size;
 874                u8 *data;
 875
 876                if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) {
 877                        pr_err("error! Truncated or inaccessible header in microcode data file\n");
 878                        break;
 879                }
 880
 881                mc_size = get_totalsize(&mc_header);
 882                if (mc_size < sizeof(mc_header)) {
 883                        pr_err("error! Bad data in microcode data file (totalsize too small)\n");
 884                        break;
 885                }
 886                data_size = mc_size - sizeof(mc_header);
 887                if (data_size > iov_iter_count(iter)) {
 888                        pr_err("error! Bad data in microcode data file (truncated file?)\n");
 889                        break;
 890                }
 891
 892                /* For performance reasons, reuse mc area when possible */
 893                if (!mc || mc_size > curr_mc_size) {
 894                        vfree(mc);
 895                        mc = vmalloc(mc_size);
 896                        if (!mc)
 897                                break;
 898                        curr_mc_size = mc_size;
 899                }
 900
 901                memcpy(mc, &mc_header, sizeof(mc_header));
 902                data = mc + sizeof(mc_header);
 903                if (!copy_from_iter_full(data, data_size, iter) ||
 904                    microcode_sanity_check(mc, 1) < 0) {
 905                        break;
 906                }
 907
 908                csig = uci->cpu_sig.sig;
 909                cpf = uci->cpu_sig.pf;
 910                if (has_newer_microcode(mc, csig, cpf, new_rev)) {
 911                        vfree(new_mc);
 912                        new_rev = mc_header.rev;
 913                        new_mc  = mc;
 914                        new_mc_size = mc_size;
 915                        mc = NULL;      /* trigger new vmalloc */
 916                        ret = UCODE_NEW;
 917                }
 918        }
 919
 920        vfree(mc);
 921
 922        if (iov_iter_count(iter)) {
 923                vfree(new_mc);
 924                return UCODE_ERROR;
 925        }
 926
 927        if (!new_mc)
 928                return UCODE_NFOUND;
 929
 930        vfree(uci->mc);
 931        uci->mc = (struct microcode_intel *)new_mc;
 932
 933        /*
 934         * If early loading microcode is supported, save this mc into
 935         * permanent memory. So it will be loaded early when a CPU is hot added
 936         * or resumes.
 937         */
 938        save_mc_for_early(new_mc, new_mc_size);
 939
 940        pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
 941                 cpu, new_rev, uci->cpu_sig.rev);
 942
 943        return ret;
 944}
 945
 946static bool is_blacklisted(unsigned int cpu)
 947{
 948        struct cpuinfo_x86 *c = &cpu_data(cpu);
 949
 950        /*
 951         * Late loading on model 79 with microcode revision less than 0x0b000021
 952         * and LLC size per core bigger than 2.5MB may result in a system hang.
 953         * This behavior is documented in item BDF90, #334165 (Intel Xeon
 954         * Processor E7-8800/4800 v4 Product Family).
 955         */
 956        if (c->x86 == 6 &&
 957            c->x86_model == INTEL_FAM6_BROADWELL_X &&
 958            c->x86_stepping == 0x01 &&
 959            llc_size_per_core > 2621440 &&
 960            c->microcode < 0x0b000021) {
 961                pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
 962                pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
 963                return true;
 964        }
 965
 966        return false;
 967}
 968
 969static enum ucode_state request_microcode_fw(int cpu, struct device *device,
 970                                             bool refresh_fw)
 971{
 972        struct cpuinfo_x86 *c = &cpu_data(cpu);
 973        const struct firmware *firmware;
 974        struct iov_iter iter;
 975        enum ucode_state ret;
 976        struct kvec kvec;
 977        char name[30];
 978
 979        if (is_blacklisted(cpu))
 980                return UCODE_NFOUND;
 981
 982        sprintf(name, "intel-ucode/%02x-%02x-%02x",
 983                c->x86, c->x86_model, c->x86_stepping);
 984
 985        if (request_firmware_direct(&firmware, name, device)) {
 986                pr_debug("data file %s load failed\n", name);
 987                return UCODE_NFOUND;
 988        }
 989
 990        kvec.iov_base = (void *)firmware->data;
 991        kvec.iov_len = firmware->size;
 992        iov_iter_kvec(&iter, WRITE, &kvec, 1, firmware->size);
 993        ret = generic_load_microcode(cpu, &iter);
 994
 995        release_firmware(firmware);
 996
 997        return ret;
 998}
 999
1000static enum ucode_state
1001request_microcode_user(int cpu, const void __user *buf, size_t size)
1002{
1003        struct iov_iter iter;
1004        struct iovec iov;
1005
1006        if (is_blacklisted(cpu))
1007                return UCODE_NFOUND;
1008
1009        iov.iov_base = (void __user *)buf;
1010        iov.iov_len = size;
1011        iov_iter_init(&iter, WRITE, &iov, 1, size);
1012
1013        return generic_load_microcode(cpu, &iter);
1014}
1015
1016static struct microcode_ops microcode_intel_ops = {
1017        .request_microcode_user           = request_microcode_user,
1018        .request_microcode_fw             = request_microcode_fw,
1019        .collect_cpu_info                 = collect_cpu_info,
1020        .apply_microcode                  = apply_microcode_intel,
1021};
1022
1023static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
1024{
1025        u64 llc_size = c->x86_cache_size * 1024ULL;
1026
1027        do_div(llc_size, c->x86_max_cores);
1028
1029        return (int)llc_size;
1030}
1031
1032struct microcode_ops * __init init_intel_microcode(void)
1033{
1034        struct cpuinfo_x86 *c = &boot_cpu_data;
1035
1036        if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
1037            cpu_has(c, X86_FEATURE_IA64)) {
1038                pr_err("Intel CPU family 0x%x not supported\n", c->x86);
1039                return NULL;
1040        }
1041
1042        llc_size_per_core = calc_llc_size_per_core(c);
1043
1044        return &microcode_intel_ops;
1045}
1046