linux/arch/x86/boot/cpuflags.c
<<
>>
Prefs
   1#include <linux/types.h>
   2#include "bitops.h"
   3
   4#include <asm/processor-flags.h>
   5#include <asm/required-features.h>
   6#include <asm/msr-index.h>
   7#include "cpuflags.h"
   8
   9struct cpu_features cpu;
  10u32 cpu_vendor[3];
  11
  12static bool loaded_flags;
  13
  14static int has_fpu(void)
  15{
  16        u16 fcw = -1, fsw = -1;
  17        unsigned long cr0;
  18
  19        asm volatile("mov %%cr0,%0" : "=r" (cr0));
  20        if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
  21                cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
  22                asm volatile("mov %0,%%cr0" : : "r" (cr0));
  23        }
  24
  25        asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
  26                     : "+m" (fsw), "+m" (fcw));
  27
  28        return fsw == 0 && (fcw & 0x103f) == 0x003f;
  29}
  30
  31/*
  32 * For building the 16-bit code we want to explicitly specify 32-bit
  33 * push/pop operations, rather than just saying 'pushf' or 'popf' and
  34 * letting the compiler choose. But this is also included from the
  35 * compressed/ directory where it may be 64-bit code, and thus needs
  36 * to be 'pushfq' or 'popfq' in that case.
  37 */
  38#ifdef __x86_64__
  39#define PUSHF "pushfq"
  40#define POPF "popfq"
  41#else
  42#define PUSHF "pushfl"
  43#define POPF "popfl"
  44#endif
  45
  46int has_eflag(unsigned long mask)
  47{
  48        unsigned long f0, f1;
  49
  50        asm volatile(PUSHF "    \n\t"
  51                     PUSHF "    \n\t"
  52                     "pop %0    \n\t"
  53                     "mov %0,%1 \n\t"
  54                     "xor %2,%1 \n\t"
  55                     "push %1   \n\t"
  56                     POPF "     \n\t"
  57                     PUSHF "    \n\t"
  58                     "pop %1    \n\t"
  59                     POPF
  60                     : "=&r" (f0), "=&r" (f1)
  61                     : "ri" (mask));
  62
  63        return !!((f0^f1) & mask);
  64}
  65
  66/* Handle x86_32 PIC using ebx. */
  67#if defined(__i386__) && defined(__PIC__)
  68# define EBX_REG "=r"
  69#else
  70# define EBX_REG "=b"
  71#endif
  72
  73static inline void cpuid(u32 id, u32 *a, u32 *b, u32 *c, u32 *d)
  74{
  75        asm volatile(".ifnc %%ebx,%3 ; movl  %%ebx,%3 ; .endif  \n\t"
  76                     "cpuid                                     \n\t"
  77                     ".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif  \n\t"
  78                    : "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b)
  79                    : "a" (id)
  80        );
  81}
  82
  83void get_cpuflags(void)
  84{
  85        u32 max_intel_level, max_amd_level;
  86        u32 tfms;
  87        u32 ignored;
  88
  89        if (loaded_flags)
  90                return;
  91        loaded_flags = true;
  92
  93        if (has_fpu())
  94                set_bit(X86_FEATURE_FPU, cpu.flags);
  95
  96        if (has_eflag(X86_EFLAGS_ID)) {
  97                cpuid(0x0, &max_intel_level, &cpu_vendor[0], &cpu_vendor[2],
  98                      &cpu_vendor[1]);
  99
 100                if (max_intel_level >= 0x00000001 &&
 101                    max_intel_level <= 0x0000ffff) {
 102                        cpuid(0x1, &tfms, &ignored, &cpu.flags[4],
 103                              &cpu.flags[0]);
 104                        cpu.level = (tfms >> 8) & 15;
 105                        cpu.family = cpu.level;
 106                        cpu.model = (tfms >> 4) & 15;
 107                        if (cpu.level >= 6)
 108                                cpu.model += ((tfms >> 16) & 0xf) << 4;
 109                }
 110
 111                cpuid(0x80000000, &max_amd_level, &ignored, &ignored,
 112                      &ignored);
 113
 114                if (max_amd_level >= 0x80000001 &&
 115                    max_amd_level <= 0x8000ffff) {
 116                        cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
 117                              &cpu.flags[1]);
 118                }
 119        }
 120}
 121