linux/arch/x86/kernel/cpu/amd.c
<<
>>
Prefs
   1#include <linux/export.h>
   2#include <linux/init.h>
   3#include <linux/bitops.h>
   4#include <linux/elf.h>
   5#include <linux/mm.h>
   6
   7#include <linux/io.h>
   8#include <linux/sched.h>
   9#include <asm/processor.h>
  10#include <asm/apic.h>
  11#include <asm/cpu.h>
  12#include <asm/pci-direct.h>
  13
  14#ifdef CONFIG_X86_64
  15# include <asm/numa_64.h>
  16# include <asm/mmconfig.h>
  17# include <asm/cacheflush.h>
  18#endif
  19
  20#include "cpu.h"
  21
  22#ifdef CONFIG_X86_32
  23/*
  24 *      B step AMD K6 before B 9730xxxx have hardware bugs that can cause
  25 *      misexecution of code under Linux. Owners of such processors should
  26 *      contact AMD for precise details and a CPU swap.
  27 *
  28 *      See     http://www.multimania.com/poulot/k6bug.html
  29 *      and     section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
  30 *              (Publication # 21266  Issue Date: August 1998)
  31 *
  32 *      The following test is erm.. interesting. AMD neglected to up
  33 *      the chip setting when fixing the bug but they also tweaked some
  34 *      performance at the same time..
  35 */
  36
  37extern void vide(void);
  38__asm__(".align 4\nvide: ret");
  39
  40static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
  41{
  42/*
  43 * General Systems BIOSen alias the cpu frequency registers
  44 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
  45 * drivers subsequently pokes it, and changes the CPU speed.
  46 * Workaround : Remove the unneeded alias.
  47 */
  48#define CBAR            (0xfffc) /* Configuration Base Address  (32-bit) */
  49#define CBAR_ENB        (0x80000000)
  50#define CBAR_KEY        (0X000000CB)
  51        if (c->x86_model == 9 || c->x86_model == 10) {
  52                if (inl(CBAR) & CBAR_ENB)
  53                        outl(0 | CBAR_KEY, CBAR);
  54        }
  55}
  56
  57
  58static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
  59{
  60        u32 l, h;
  61        int mbytes = num_physpages >> (20-PAGE_SHIFT);
  62
  63        if (c->x86_model < 6) {
  64                /* Based on AMD doc 20734R - June 2000 */
  65                if (c->x86_model == 0) {
  66                        clear_cpu_cap(c, X86_FEATURE_APIC);
  67                        set_cpu_cap(c, X86_FEATURE_PGE);
  68                }
  69                return;
  70        }
  71
  72        if (c->x86_model == 6 && c->x86_mask == 1) {
  73                const int K6_BUG_LOOP = 1000000;
  74                int n;
  75                void (*f_vide)(void);
  76                unsigned long d, d2;
  77
  78                printk(KERN_INFO "AMD K6 stepping B detected - ");
  79
  80                /*
  81                 * It looks like AMD fixed the 2.6.2 bug and improved indirect
  82                 * calls at the same time.
  83                 */
  84
  85                n = K6_BUG_LOOP;
  86                f_vide = vide;
  87                rdtscl(d);
  88                while (n--)
  89                        f_vide();
  90                rdtscl(d2);
  91                d = d2-d;
  92
  93                if (d > 20*K6_BUG_LOOP)
  94                        printk(KERN_CONT
  95                                "system stability may be impaired when more than 32 MB are used.\n");
  96                else
  97                        printk(KERN_CONT "probably OK (after B9730xxxx).\n");
  98        }
  99
 100        /* K6 with old style WHCR */
 101        if (c->x86_model < 8 ||
 102           (c->x86_model == 8 && c->x86_mask < 8)) {
 103                /* We can only write allocate on the low 508Mb */
 104                if (mbytes > 508)
 105                        mbytes = 508;
 106
 107                rdmsr(MSR_K6_WHCR, l, h);
 108                if ((l&0x0000FFFF) == 0) {
 109                        unsigned long flags;
 110                        l = (1<<0)|((mbytes/4)<<1);
 111                        local_irq_save(flags);
 112                        wbinvd();
 113                        wrmsr(MSR_K6_WHCR, l, h);
 114                        local_irq_restore(flags);
 115                        printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
 116                                mbytes);
 117                }
 118                return;
 119        }
 120
 121        if ((c->x86_model == 8 && c->x86_mask > 7) ||
 122             c->x86_model == 9 || c->x86_model == 13) {
 123                /* The more serious chips .. */
 124
 125                if (mbytes > 4092)
 126                        mbytes = 4092;
 127
 128                rdmsr(MSR_K6_WHCR, l, h);
 129                if ((l&0xFFFF0000) == 0) {
 130                        unsigned long flags;
 131                        l = ((mbytes>>2)<<22)|(1<<16);
 132                        local_irq_save(flags);
 133                        wbinvd();
 134                        wrmsr(MSR_K6_WHCR, l, h);
 135                        local_irq_restore(flags);
 136                        printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
 137                                mbytes);
 138                }
 139
 140                return;
 141        }
 142
 143        if (c->x86_model == 10) {
 144                /* AMD Geode LX is model 10 */
 145                /* placeholder for any needed mods */
 146                return;
 147        }
 148}
 149
 150static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
 151{
 152        /* calling is from identify_secondary_cpu() ? */
 153        if (!c->cpu_index)
 154                return;
 155
 156        /*
 157         * Certain Athlons might work (for various values of 'work') in SMP
 158         * but they are not certified as MP capable.
 159         */
 160        /* Athlon 660/661 is valid. */
 161        if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
 162            (c->x86_mask == 1)))
 163                goto valid_k7;
 164
 165        /* Duron 670 is valid */
 166        if ((c->x86_model == 7) && (c->x86_mask == 0))
 167                goto valid_k7;
 168
 169        /*
 170         * Athlon 662, Duron 671, and Athlon >model 7 have capability
 171         * bit. It's worth noting that the A5 stepping (662) of some
 172         * Athlon XP's have the MP bit set.
 173         * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
 174         * more.
 175         */
 176        if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
 177            ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
 178             (c->x86_model > 7))
 179                if (cpu_has_mp)
 180                        goto valid_k7;
 181
 182        /* If we get here, not a certified SMP capable AMD system. */
 183
 184        /*
 185         * Don't taint if we are running SMP kernel on a single non-MP
 186         * approved Athlon
 187         */
 188        WARN_ONCE(1, "WARNING: This combination of AMD"
 189                " processors is not suitable for SMP.\n");
 190        if (!test_taint(TAINT_UNSAFE_SMP))
 191                add_taint(TAINT_UNSAFE_SMP);
 192
 193valid_k7:
 194        ;
 195}
 196
 197static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
 198{
 199        u32 l, h;
 200
 201        /*
 202         * Bit 15 of Athlon specific MSR 15, needs to be 0
 203         * to enable SSE on Palomino/Morgan/Barton CPU's.
 204         * If the BIOS didn't enable it already, enable it here.
 205         */
 206        if (c->x86_model >= 6 && c->x86_model <= 10) {
 207                if (!cpu_has(c, X86_FEATURE_XMM)) {
 208                        printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
 209                        rdmsr(MSR_K7_HWCR, l, h);
 210                        l &= ~0x00008000;
 211                        wrmsr(MSR_K7_HWCR, l, h);
 212                        set_cpu_cap(c, X86_FEATURE_XMM);
 213                }
 214        }
 215
 216        /*
 217         * It's been determined by AMD that Athlons since model 8 stepping 1
 218         * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
 219         * As per AMD technical note 27212 0.2
 220         */
 221        if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
 222                rdmsr(MSR_K7_CLK_CTL, l, h);
 223                if ((l & 0xfff00000) != 0x20000000) {
 224                        printk(KERN_INFO
 225                            "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
 226                                        l, ((l & 0x000fffff)|0x20000000));
 227                        wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
 228                }
 229        }
 230
 231        set_cpu_cap(c, X86_FEATURE_K7);
 232
 233        amd_k7_smp_check(c);
 234}
 235#endif
 236
 237#ifdef CONFIG_NUMA
 238/*
 239 * To workaround broken NUMA config.  Read the comment in
 240 * srat_detect_node().
 241 */
 242static int __cpuinit nearby_node(int apicid)
 243{
 244        int i, node;
 245
 246        for (i = apicid - 1; i >= 0; i--) {
 247                node = __apicid_to_node[i];
 248                if (node != NUMA_NO_NODE && node_online(node))
 249                        return node;
 250        }
 251        for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
 252                node = __apicid_to_node[i];
 253                if (node != NUMA_NO_NODE && node_online(node))
 254                        return node;
 255        }
 256        return first_node(node_online_map); /* Shouldn't happen */
 257}
 258#endif
 259
 260/*
 261 * Fixup core topology information for
 262 * (1) AMD multi-node processors
 263 *     Assumption: Number of cores in each internal node is the same.
 264 * (2) AMD processors supporting compute units
 265 */
 266#ifdef CONFIG_X86_HT
 267static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
 268{
 269        u32 nodes, cores_per_cu = 1;
 270        u8 node_id;
 271        int cpu = smp_processor_id();
 272
 273        /* get information required for multi-node processors */
 274        if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
 275                u32 eax, ebx, ecx, edx;
 276
 277                cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
 278                nodes = ((ecx >> 8) & 7) + 1;
 279                node_id = ecx & 7;
 280
 281                /* get compute unit information */
 282                smp_num_siblings = ((ebx >> 8) & 3) + 1;
 283                c->compute_unit_id = ebx & 0xff;
 284                cores_per_cu += ((ebx >> 8) & 3);
 285        } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
 286                u64 value;
 287
 288                rdmsrl(MSR_FAM10H_NODE_ID, value);
 289                nodes = ((value >> 3) & 7) + 1;
 290                node_id = value & 7;
 291        } else
 292                return;
 293
 294        /* fixup multi-node processor information */
 295        if (nodes > 1) {
 296                u32 cores_per_node;
 297                u32 cus_per_node;
 298
 299                set_cpu_cap(c, X86_FEATURE_AMD_DCM);
 300                cores_per_node = c->x86_max_cores / nodes;
 301                cus_per_node = cores_per_node / cores_per_cu;
 302
 303                /* store NodeID, use llc_shared_map to store sibling info */
 304                per_cpu(cpu_llc_id, cpu) = node_id;
 305
 306                /* core id has to be in the [0 .. cores_per_node - 1] range */
 307                c->cpu_core_id %= cores_per_node;
 308                c->compute_unit_id %= cus_per_node;
 309        }
 310}
 311#endif
 312
 313/*
 314 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
 315 * Assumes number of cores is a power of two.
 316 */
 317static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
 318{
 319#ifdef CONFIG_X86_HT
 320        unsigned bits;
 321        int cpu = smp_processor_id();
 322
 323        bits = c->x86_coreid_bits;
 324        /* Low order bits define the core id (index of core in socket) */
 325        c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
 326        /* Convert the initial APIC ID into the socket ID */
 327        c->phys_proc_id = c->initial_apicid >> bits;
 328        /* use socket ID also for last level cache */
 329        per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
 330        amd_get_topology(c);
 331#endif
 332}
 333
 334int amd_get_nb_id(int cpu)
 335{
 336        int id = 0;
 337#ifdef CONFIG_SMP
 338        id = per_cpu(cpu_llc_id, cpu);
 339#endif
 340        return id;
 341}
 342EXPORT_SYMBOL_GPL(amd_get_nb_id);
 343
 344static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
 345{
 346#ifdef CONFIG_NUMA
 347        int cpu = smp_processor_id();
 348        int node;
 349        unsigned apicid = c->apicid;
 350
 351        node = numa_cpu_node(cpu);
 352        if (node == NUMA_NO_NODE)
 353                node = per_cpu(cpu_llc_id, cpu);
 354
 355        /*
 356         * On multi-fabric platform (e.g. Numascale NumaChip) a
 357         * platform-specific handler needs to be called to fixup some
 358         * IDs of the CPU.
 359         */
 360        if (x86_cpuinit.fixup_cpu_id)
 361                x86_cpuinit.fixup_cpu_id(c, node);
 362
 363        if (!node_online(node)) {
 364                /*
 365                 * Two possibilities here:
 366                 *
 367                 * - The CPU is missing memory and no node was created.  In
 368                 *   that case try picking one from a nearby CPU.
 369                 *
 370                 * - The APIC IDs differ from the HyperTransport node IDs
 371                 *   which the K8 northbridge parsing fills in.  Assume
 372                 *   they are all increased by a constant offset, but in
 373                 *   the same order as the HT nodeids.  If that doesn't
 374                 *   result in a usable node fall back to the path for the
 375                 *   previous case.
 376                 *
 377                 * This workaround operates directly on the mapping between
 378                 * APIC ID and NUMA node, assuming certain relationship
 379                 * between APIC ID, HT node ID and NUMA topology.  As going
 380                 * through CPU mapping may alter the outcome, directly
 381                 * access __apicid_to_node[].
 382                 */
 383                int ht_nodeid = c->initial_apicid;
 384
 385                if (ht_nodeid >= 0 &&
 386                    __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
 387                        node = __apicid_to_node[ht_nodeid];
 388                /* Pick a nearby node */
 389                if (!node_online(node))
 390                        node = nearby_node(apicid);
 391        }
 392        numa_set_node(cpu, node);
 393#endif
 394}
 395
 396static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
 397{
 398#ifdef CONFIG_X86_HT
 399        unsigned bits, ecx;
 400
 401        /* Multi core CPU? */
 402        if (c->extended_cpuid_level < 0x80000008)
 403                return;
 404
 405        ecx = cpuid_ecx(0x80000008);
 406
 407        c->x86_max_cores = (ecx & 0xff) + 1;
 408
 409        /* CPU telling us the core id bits shift? */
 410        bits = (ecx >> 12) & 0xF;
 411
 412        /* Otherwise recompute */
 413        if (bits == 0) {
 414                while ((1 << bits) < c->x86_max_cores)
 415                        bits++;
 416        }
 417
 418        c->x86_coreid_bits = bits;
 419#endif
 420}
 421
 422static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
 423{
 424        if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
 425
 426                if (c->x86 > 0x10 ||
 427                    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
 428                        u64 val;
 429
 430                        rdmsrl(MSR_K7_HWCR, val);
 431                        if (!(val & BIT(24)))
 432                                printk(KERN_WARNING FW_BUG "TSC doesn't count "
 433                                        "with P0 frequency!\n");
 434                }
 435        }
 436
 437        if (c->x86 == 0x15) {
 438                unsigned long upperbit;
 439                u32 cpuid, assoc;
 440
 441                cpuid    = cpuid_edx(0x80000005);
 442                assoc    = cpuid >> 16 & 0xff;
 443                upperbit = ((cpuid >> 24) << 10) / assoc;
 444
 445                va_align.mask     = (upperbit - 1) & PAGE_MASK;
 446                va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
 447        }
 448}
 449
 450static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
 451{
 452        early_init_amd_mc(c);
 453
 454        /*
 455         * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
 456         * with P/T states and does not stop in deep C-states
 457         */
 458        if (c->x86_power & (1 << 8)) {
 459                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 460                set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 461                if (!check_tsc_unstable())
 462                        sched_clock_stable = 1;
 463        }
 464
 465#ifdef CONFIG_X86_64
 466        set_cpu_cap(c, X86_FEATURE_SYSCALL32);
 467#else
 468        /*  Set MTRR capability flag if appropriate */
 469        if (c->x86 == 5)
 470                if (c->x86_model == 13 || c->x86_model == 9 ||
 471                    (c->x86_model == 8 && c->x86_mask >= 8))
 472                        set_cpu_cap(c, X86_FEATURE_K6_MTRR);
 473#endif
 474#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
 475        /* check CPU config space for extended APIC ID */
 476        if (cpu_has_apic && c->x86 >= 0xf) {
 477                unsigned int val;
 478                val = read_pci_config(0, 24, 0, 0x68);
 479                if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
 480                        set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
 481        }
 482#endif
 483}
 484
 485static void __cpuinit init_amd(struct cpuinfo_x86 *c)
 486{
 487        u32 dummy;
 488
 489#ifdef CONFIG_SMP
 490        unsigned long long value;
 491
 492        /*
 493         * Disable TLB flush filter by setting HWCR.FFDIS on K8
 494         * bit 6 of msr C001_0015
 495         *
 496         * Errata 63 for SH-B3 steppings
 497         * Errata 122 for all steppings (F+ have it disabled by default)
 498         */
 499        if (c->x86 == 0xf) {
 500                rdmsrl(MSR_K7_HWCR, value);
 501                value |= 1 << 6;
 502                wrmsrl(MSR_K7_HWCR, value);
 503        }
 504#endif
 505
 506        early_init_amd(c);
 507
 508        /*
 509         * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
 510         * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
 511         */
 512        clear_cpu_cap(c, 0*32+31);
 513
 514#ifdef CONFIG_X86_64
 515        /* On C+ stepping K8 rep microcode works well for copy/memset */
 516        if (c->x86 == 0xf) {
 517                u32 level;
 518
 519                level = cpuid_eax(1);
 520                if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
 521                        set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 522
 523                /*
 524                 * Some BIOSes incorrectly force this feature, but only K8
 525                 * revision D (model = 0x14) and later actually support it.
 526                 * (AMD Erratum #110, docId: 25759).
 527                 */
 528                if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
 529                        u64 val;
 530
 531                        clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
 532                        if (!rdmsrl_amd_safe(0xc001100d, &val)) {
 533                                val &= ~(1ULL << 32);
 534                                wrmsrl_amd_safe(0xc001100d, val);
 535                        }
 536                }
 537
 538        }
 539        if (c->x86 >= 0x10)
 540                set_cpu_cap(c, X86_FEATURE_REP_GOOD);
 541
 542        /* get apicid instead of initial apic id from cpuid */
 543        c->apicid = hard_smp_processor_id();
 544#else
 545
 546        /*
 547         *      FIXME: We should handle the K5 here. Set up the write
 548         *      range and also turn on MSR 83 bits 4 and 31 (write alloc,
 549         *      no bus pipeline)
 550         */
 551
 552        switch (c->x86) {
 553        case 4:
 554                init_amd_k5(c);
 555                break;
 556        case 5:
 557                init_amd_k6(c);
 558                break;
 559        case 6: /* An Athlon/Duron */
 560                init_amd_k7(c);
 561                break;
 562        }
 563
 564        /* K6s reports MCEs but don't actually have all the MSRs */
 565        if (c->x86 < 6)
 566                clear_cpu_cap(c, X86_FEATURE_MCE);
 567#endif
 568
 569        /* Enable workaround for FXSAVE leak */
 570        if (c->x86 >= 6)
 571                set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
 572
 573        if (!c->x86_model_id[0]) {
 574                switch (c->x86) {
 575                case 0xf:
 576                        /* Should distinguish Models here, but this is only
 577                           a fallback anyways. */
 578                        strcpy(c->x86_model_id, "Hammer");
 579                        break;
 580                }
 581        }
 582
 583        /* re-enable TopologyExtensions if switched off by BIOS */
 584        if ((c->x86 == 0x15) &&
 585            (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
 586            !cpu_has(c, X86_FEATURE_TOPOEXT)) {
 587                u64 val;
 588
 589                if (!rdmsrl_amd_safe(0xc0011005, &val)) {
 590                        val |= 1ULL << 54;
 591                        wrmsrl_amd_safe(0xc0011005, val);
 592                        rdmsrl(0xc0011005, val);
 593                        if (val & (1ULL << 54)) {
 594                                set_cpu_cap(c, X86_FEATURE_TOPOEXT);
 595                                printk(KERN_INFO FW_INFO "CPU: Re-enabling "
 596                                  "disabled Topology Extensions Support\n");
 597                        }
 598                }
 599        }
 600
 601        cpu_detect_cache_sizes(c);
 602
 603        /* Multi core CPU? */
 604        if (c->extended_cpuid_level >= 0x80000008) {
 605                amd_detect_cmp(c);
 606                srat_detect_node(c);
 607        }
 608
 609#ifdef CONFIG_X86_32
 610        detect_ht(c);
 611#endif
 612
 613        if (c->extended_cpuid_level >= 0x80000006) {
 614                if (cpuid_edx(0x80000006) & 0xf000)
 615                        num_cache_leaves = 4;
 616                else
 617                        num_cache_leaves = 3;
 618        }
 619
 620        if (c->x86 >= 0xf)
 621                set_cpu_cap(c, X86_FEATURE_K8);
 622
 623        if (cpu_has_xmm2) {
 624                /* MFENCE stops RDTSC speculation */
 625                set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
 626        }
 627
 628#ifdef CONFIG_X86_64
 629        if (c->x86 == 0x10) {
 630                /* do this for boot cpu */
 631                if (c == &boot_cpu_data)
 632                        check_enable_amd_mmconf_dmi();
 633
 634                fam10h_check_enable_mmcfg();
 635        }
 636
 637        if (c == &boot_cpu_data && c->x86 >= 0xf) {
 638                unsigned long long tseg;
 639
 640                /*
 641                 * Split up direct mapping around the TSEG SMM area.
 642                 * Don't do it for gbpages because there seems very little
 643                 * benefit in doing so.
 644                 */
 645                if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
 646                        printk(KERN_DEBUG "tseg: %010llx\n", tseg);
 647                        if ((tseg>>PMD_SHIFT) <
 648                                (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
 649                                ((tseg>>PMD_SHIFT) <
 650                                (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
 651                                (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
 652                                set_memory_4k((unsigned long)__va(tseg), 1);
 653                }
 654        }
 655#endif
 656
 657        /*
 658         * Family 0x12 and above processors have APIC timer
 659         * running in deep C states.
 660         */
 661        if (c->x86 > 0x11)
 662                set_cpu_cap(c, X86_FEATURE_ARAT);
 663
 664        /*
 665         * Disable GART TLB Walk Errors on Fam10h. We do this here
 666         * because this is always needed when GART is enabled, even in a
 667         * kernel which has no MCE support built in.
 668         */
 669        if (c->x86 == 0x10) {
 670                /*
 671                 * BIOS should disable GartTlbWlk Errors themself. If
 672                 * it doesn't do it here as suggested by the BKDG.
 673                 *
 674                 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
 675                 */
 676                u64 mask;
 677                int err;
 678
 679                err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
 680                if (err == 0) {
 681                        mask |= (1 << 10);
 682                        checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
 683                }
 684        }
 685
 686        rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 687}
 688
 689#ifdef CONFIG_X86_32
 690static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
 691                                                        unsigned int size)
 692{
 693        /* AMD errata T13 (order #21922) */
 694        if ((c->x86 == 6)) {
 695                /* Duron Rev A0 */
 696                if (c->x86_model == 3 && c->x86_mask == 0)
 697                        size = 64;
 698                /* Tbird rev A1/A2 */
 699                if (c->x86_model == 4 &&
 700                        (c->x86_mask == 0 || c->x86_mask == 1))
 701                        size = 256;
 702        }
 703        return size;
 704}
 705#endif
 706
 707static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
 708        .c_vendor       = "AMD",
 709        .c_ident        = { "AuthenticAMD" },
 710#ifdef CONFIG_X86_32
 711        .c_models = {
 712                { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
 713                  {
 714                          [3] = "486 DX/2",
 715                          [7] = "486 DX/2-WB",
 716                          [8] = "486 DX/4",
 717                          [9] = "486 DX/4-WB",
 718                          [14] = "Am5x86-WT",
 719                          [15] = "Am5x86-WB"
 720                  }
 721                },
 722        },
 723        .c_size_cache   = amd_size_cache,
 724#endif
 725        .c_early_init   = early_init_amd,
 726        .c_bsp_init     = bsp_init_amd,
 727        .c_init         = init_amd,
 728        .c_x86_vendor   = X86_VENDOR_AMD,
 729};
 730
 731cpu_dev_register(amd_cpu_dev);
 732
 733/*
 734 * AMD errata checking
 735 *
 736 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
 737 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
 738 * have an OSVW id assigned, which it takes as first argument. Both take a
 739 * variable number of family-specific model-stepping ranges created by
 740 * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const
 741 * int[] in arch/x86/include/asm/processor.h.
 742 *
 743 * Example:
 744 *
 745 * const int amd_erratum_319[] =
 746 *      AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
 747 *                         AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
 748 *                         AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
 749 */
 750
 751const int amd_erratum_400[] =
 752        AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
 753                            AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
 754EXPORT_SYMBOL_GPL(amd_erratum_400);
 755
 756const int amd_erratum_383[] =
 757        AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
 758EXPORT_SYMBOL_GPL(amd_erratum_383);
 759
 760bool cpu_has_amd_erratum(const int *erratum)
 761{
 762        struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
 763        int osvw_id = *erratum++;
 764        u32 range;
 765        u32 ms;
 766
 767        /*
 768         * If called early enough that current_cpu_data hasn't been initialized
 769         * yet, fall back to boot_cpu_data.
 770         */
 771        if (cpu->x86 == 0)
 772                cpu = &boot_cpu_data;
 773
 774        if (cpu->x86_vendor != X86_VENDOR_AMD)
 775                return false;
 776
 777        if (osvw_id >= 0 && osvw_id < 65536 &&
 778            cpu_has(cpu, X86_FEATURE_OSVW)) {
 779                u64 osvw_len;
 780
 781                rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
 782                if (osvw_id < osvw_len) {
 783                        u64 osvw_bits;
 784
 785                        rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
 786                            osvw_bits);
 787                        return osvw_bits & (1ULL << (osvw_id & 0x3f));
 788                }
 789        }
 790
 791        /* OSVW unavailable or ID unknown, match family-model-stepping range */
 792        ms = (cpu->x86_model << 4) | cpu->x86_mask;
 793        while ((range = *erratum++))
 794                if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
 795                    (ms >= AMD_MODEL_RANGE_START(range)) &&
 796                    (ms <= AMD_MODEL_RANGE_END(range)))
 797                        return true;
 798
 799        return false;
 800}
 801
 802EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);
 803