linux/arch/x86/kernel/cpu/centaur.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/sched.h>
   4#include <linux/sched/clock.h>
   5
   6#include <asm/cpufeature.h>
   7#include <asm/e820/api.h>
   8#include <asm/mtrr.h>
   9#include <asm/msr.h>
  10
  11#include "cpu.h"
  12
  13#define ACE_PRESENT     (1 << 6)
  14#define ACE_ENABLED     (1 << 7)
  15#define ACE_FCR         (1 << 28)       /* MSR_VIA_FCR */
  16
  17#define RNG_PRESENT     (1 << 2)
  18#define RNG_ENABLED     (1 << 3)
  19#define RNG_ENABLE      (1 << 6)        /* MSR_VIA_RNG */
  20
  21#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW    0x00200000
  22#define X86_VMX_FEATURE_PROC_CTLS_VNMI          0x00400000
  23#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS      0x80000000
  24#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC    0x00000001
  25#define X86_VMX_FEATURE_PROC_CTLS2_EPT          0x00000002
  26#define X86_VMX_FEATURE_PROC_CTLS2_VPID         0x00000020
  27
  28static void init_c3(struct cpuinfo_x86 *c)
  29{
  30        u32  lo, hi;
  31
  32        /* Test for Centaur Extended Feature Flags presence */
  33        if (cpuid_eax(0xC0000000) >= 0xC0000001) {
  34                u32 tmp = cpuid_edx(0xC0000001);
  35
  36                /* enable ACE unit, if present and disabled */
  37                if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) {
  38                        rdmsr(MSR_VIA_FCR, lo, hi);
  39                        lo |= ACE_FCR;          /* enable ACE unit */
  40                        wrmsr(MSR_VIA_FCR, lo, hi);
  41                        pr_info("CPU: Enabled ACE h/w crypto\n");
  42                }
  43
  44                /* enable RNG unit, if present and disabled */
  45                if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) {
  46                        rdmsr(MSR_VIA_RNG, lo, hi);
  47                        lo |= RNG_ENABLE;       /* enable RNG unit */
  48                        wrmsr(MSR_VIA_RNG, lo, hi);
  49                        pr_info("CPU: Enabled h/w RNG\n");
  50                }
  51
  52                /* store Centaur Extended Feature Flags as
  53                 * word 5 of the CPU capability bit array
  54                 */
  55                c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001);
  56        }
  57#ifdef CONFIG_X86_32
  58        /* Cyrix III family needs CX8 & PGE explicitly enabled. */
  59        if (c->x86_model >= 6 && c->x86_model <= 13) {
  60                rdmsr(MSR_VIA_FCR, lo, hi);
  61                lo |= (1<<1 | 1<<7);
  62                wrmsr(MSR_VIA_FCR, lo, hi);
  63                set_cpu_cap(c, X86_FEATURE_CX8);
  64        }
  65
  66        /* Before Nehemiah, the C3's had 3dNOW! */
  67        if (c->x86_model >= 6 && c->x86_model < 9)
  68                set_cpu_cap(c, X86_FEATURE_3DNOW);
  69#endif
  70        if (c->x86 == 0x6 && c->x86_model >= 0xf) {
  71                c->x86_cache_alignment = c->x86_clflush_size * 2;
  72                set_cpu_cap(c, X86_FEATURE_REP_GOOD);
  73        }
  74
  75        cpu_detect_cache_sizes(c);
  76}
  77
  78enum {
  79                ECX8            = 1<<1,
  80                EIERRINT        = 1<<2,
  81                DPM             = 1<<3,
  82                DMCE            = 1<<4,
  83                DSTPCLK         = 1<<5,
  84                ELINEAR         = 1<<6,
  85                DSMC            = 1<<7,
  86                DTLOCK          = 1<<8,
  87                EDCTLB          = 1<<8,
  88                EMMX            = 1<<9,
  89                DPDC            = 1<<11,
  90                EBRPRED         = 1<<12,
  91                DIC             = 1<<13,
  92                DDC             = 1<<14,
  93                DNA             = 1<<15,
  94                ERETSTK         = 1<<16,
  95                E2MMX           = 1<<19,
  96                EAMD3D          = 1<<20,
  97};
  98
  99static void early_init_centaur(struct cpuinfo_x86 *c)
 100{
 101        switch (c->x86) {
 102#ifdef CONFIG_X86_32
 103        case 5:
 104                /* Emulate MTRRs using Centaur's MCR. */
 105                set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
 106                break;
 107#endif
 108        case 6:
 109                if (c->x86_model >= 0xf)
 110                        set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 111                break;
 112        }
 113#ifdef CONFIG_X86_64
 114        set_cpu_cap(c, X86_FEATURE_SYSENTER32);
 115#endif
 116        if (c->x86_power & (1 << 8)) {
 117                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 118                set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
 119        }
 120}
 121
 122static void centaur_detect_vmx_virtcap(struct cpuinfo_x86 *c)
 123{
 124        u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
 125
 126        rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
 127        msr_ctl = vmx_msr_high | vmx_msr_low;
 128
 129        if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
 130                set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
 131        if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
 132                set_cpu_cap(c, X86_FEATURE_VNMI);
 133        if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
 134                rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
 135                      vmx_msr_low, vmx_msr_high);
 136                msr_ctl2 = vmx_msr_high | vmx_msr_low;
 137                if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
 138                    (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
 139                        set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
 140                if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
 141                        set_cpu_cap(c, X86_FEATURE_EPT);
 142                if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
 143                        set_cpu_cap(c, X86_FEATURE_VPID);
 144        }
 145}
 146
 147static void init_centaur(struct cpuinfo_x86 *c)
 148{
 149#ifdef CONFIG_X86_32
 150        char *name;
 151        u32  fcr_set = 0;
 152        u32  fcr_clr = 0;
 153        u32  lo, hi, newlo;
 154        u32  aa, bb, cc, dd;
 155
 156        /*
 157         * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
 158         * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
 159         */
 160        clear_cpu_cap(c, 0*32+31);
 161#endif
 162        early_init_centaur(c);
 163        init_intel_cacheinfo(c);
 164        detect_num_cpu_cores(c);
 165#ifdef CONFIG_X86_32
 166        detect_ht(c);
 167#endif
 168
 169        if (c->cpuid_level > 9) {
 170                unsigned int eax = cpuid_eax(10);
 171
 172                /*
 173                 * Check for version and the number of counters
 174                 * Version(eax[7:0]) can't be 0;
 175                 * Counters(eax[15:8]) should be greater than 1;
 176                 */
 177                if ((eax & 0xff) && (((eax >> 8) & 0xff) > 1))
 178                        set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
 179        }
 180
 181        switch (c->x86) {
 182#ifdef CONFIG_X86_32
 183        case 5:
 184                switch (c->x86_model) {
 185                case 4:
 186                        name = "C6";
 187                        fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
 188                        fcr_clr = DPDC;
 189                        pr_notice("Disabling bugged TSC.\n");
 190                        clear_cpu_cap(c, X86_FEATURE_TSC);
 191                        break;
 192                case 8:
 193                        switch (c->x86_stepping) {
 194                        default:
 195                        name = "2";
 196                                break;
 197                        case 7 ... 9:
 198                                name = "2A";
 199                                break;
 200                        case 10 ... 15:
 201                                name = "2B";
 202                                break;
 203                        }
 204                        fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
 205                                  E2MMX|EAMD3D;
 206                        fcr_clr = DPDC;
 207                        break;
 208                case 9:
 209                        name = "3";
 210                        fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
 211                                  E2MMX|EAMD3D;
 212                        fcr_clr = DPDC;
 213                        break;
 214                default:
 215                        name = "??";
 216                }
 217
 218                rdmsr(MSR_IDT_FCR1, lo, hi);
 219                newlo = (lo|fcr_set) & (~fcr_clr);
 220
 221                if (newlo != lo) {
 222                        pr_info("Centaur FCR was 0x%X now 0x%X\n",
 223                                lo, newlo);
 224                        wrmsr(MSR_IDT_FCR1, newlo, hi);
 225                } else {
 226                        pr_info("Centaur FCR is 0x%X\n", lo);
 227                }
 228                /* Emulate MTRRs using Centaur's MCR. */
 229                set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
 230                /* Report CX8 */
 231                set_cpu_cap(c, X86_FEATURE_CX8);
 232                /* Set 3DNow! on Winchip 2 and above. */
 233                if (c->x86_model >= 8)
 234                        set_cpu_cap(c, X86_FEATURE_3DNOW);
 235                /* See if we can find out some more. */
 236                if (cpuid_eax(0x80000000) >= 0x80000005) {
 237                        /* Yes, we can. */
 238                        cpuid(0x80000005, &aa, &bb, &cc, &dd);
 239                        /* Add L1 data and code cache sizes. */
 240                        c->x86_cache_size = (cc>>24)+(dd>>24);
 241                }
 242                sprintf(c->x86_model_id, "WinChip %s", name);
 243                break;
 244#endif
 245        case 6:
 246                init_c3(c);
 247                break;
 248        }
 249#ifdef CONFIG_X86_64
 250        set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
 251#endif
 252
 253        if (cpu_has(c, X86_FEATURE_VMX))
 254                centaur_detect_vmx_virtcap(c);
 255}
 256
 257#ifdef CONFIG_X86_32
 258static unsigned int
 259centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 260{
 261        /* VIA C3 CPUs (670-68F) need further shifting. */
 262        if ((c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8)))
 263                size >>= 8;
 264
 265        /*
 266         * There's also an erratum in Nehemiah stepping 1, which
 267         * returns '65KB' instead of '64KB'
 268         *  - Note, it seems this may only be in engineering samples.
 269         */
 270        if ((c->x86 == 6) && (c->x86_model == 9) &&
 271                                (c->x86_stepping == 1) && (size == 65))
 272                size -= 1;
 273        return size;
 274}
 275#endif
 276
 277static const struct cpu_dev centaur_cpu_dev = {
 278        .c_vendor       = "Centaur",
 279        .c_ident        = { "CentaurHauls" },
 280        .c_early_init   = early_init_centaur,
 281        .c_init         = init_centaur,
 282#ifdef CONFIG_X86_32
 283        .legacy_cache_size = centaur_size_cache,
 284#endif
 285        .c_x86_vendor   = X86_VENDOR_CENTAUR,
 286};
 287
 288cpu_dev_register(centaur_cpu_dev);
 289