1#ifndef _ASM_X86_PROCESSOR_H
2#define _ASM_X86_PROCESSOR_H
3
4#include <asm/processor-flags.h>
5
6
7struct task_struct;
8struct mm_struct;
9
10#include <asm/vm86.h>
11#include <asm/math_emu.h>
12#include <asm/segment.h>
13#include <asm/types.h>
14#include <asm/sigcontext.h>
15#include <asm/current.h>
16#include <asm/cpufeature.h>
17#include <asm/page.h>
18#include <asm/pgtable_types.h>
19#include <asm/percpu.h>
20#include <asm/msr.h>
21#include <asm/desc_defs.h>
22#include <asm/nops.h>
23#include <asm/special_insns.h>
24
25#include <linux/personality.h>
26#include <linux/cpumask.h>
27#include <linux/cache.h>
28#include <linux/threads.h>
29#include <linux/math64.h>
30#include <linux/init.h>
31#include <linux/err.h>
32#include <linux/irqflags.h>
33
34
35
36
37
38
39
40#define NET_IP_ALIGN 0
41
42#define HBP_NUM 4
43
44
45
46
47static inline void *current_text_addr(void)
48{
49 void *pc;
50
51 asm volatile("mov $1f, %0; 1:":"=r" (pc));
52
53 return pc;
54}
55
56#ifdef CONFIG_X86_VSMP
57# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
58# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
59#else
60# define ARCH_MIN_TASKALIGN 16
61# define ARCH_MIN_MMSTRUCT_ALIGN 0
62#endif
63
64
65
66
67
68
69
70struct cpuinfo_x86 {
71 __u8 x86;
72 __u8 x86_vendor;
73 __u8 x86_model;
74 __u8 x86_mask;
75#ifdef CONFIG_X86_32
76 char wp_works_ok;
77
78
79 char hlt_works_ok;
80 char hard_math;
81 char rfu;
82 char fdiv_bug;
83 char f00f_bug;
84 char coma_bug;
85 char pad0;
86#else
87
88 int x86_tlbsize;
89#endif
90 __u8 x86_virt_bits;
91 __u8 x86_phys_bits;
92
93 __u8 x86_coreid_bits;
94
95 __u32 extended_cpuid_level;
96
97 int cpuid_level;
98 __u32 x86_capability[NCAPINTS];
99 char x86_vendor_id[16];
100 char x86_model_id[64];
101
102 int x86_cache_size;
103 int x86_cache_alignment;
104 int x86_power;
105 unsigned long loops_per_jiffy;
106
107 u16 x86_max_cores;
108 u16 apicid;
109 u16 initial_apicid;
110 u16 x86_clflush_size;
111
112 u16 booted_cores;
113
114 u16 phys_proc_id;
115
116 u16 cpu_core_id;
117
118 u8 compute_unit_id;
119
120 u16 cpu_index;
121 u32 microcode;
122} __attribute__((__aligned__(SMP_CACHE_BYTES)));
123
124#define X86_VENDOR_INTEL 0
125#define X86_VENDOR_CYRIX 1
126#define X86_VENDOR_AMD 2
127#define X86_VENDOR_UMC 3
128#define X86_VENDOR_CENTAUR 5
129#define X86_VENDOR_TRANSMETA 7
130#define X86_VENDOR_NSC 8
131#define X86_VENDOR_NUM 9
132
133#define X86_VENDOR_UNKNOWN 0xff
134
135
136
137
138extern struct cpuinfo_x86 boot_cpu_data;
139extern struct cpuinfo_x86 new_cpu_data;
140
141extern struct tss_struct doublefault_tss;
142extern __u32 cpu_caps_cleared[NCAPINTS];
143extern __u32 cpu_caps_set[NCAPINTS];
144
145#ifdef CONFIG_SMP
146DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
147#define cpu_data(cpu) per_cpu(cpu_info, cpu)
148#else
149#define cpu_info boot_cpu_data
150#define cpu_data(cpu) boot_cpu_data
151#endif
152
153extern const struct seq_operations cpuinfo_op;
154
155static inline int hlt_works(int cpu)
156{
157#ifdef CONFIG_X86_32
158 return cpu_data(cpu).hlt_works_ok;
159#else
160 return 1;
161#endif
162}
163
164#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
165
166extern void cpu_detect(struct cpuinfo_x86 *c);
167
168extern struct pt_regs *idle_regs(struct pt_regs *);
169
170extern void early_cpu_init(void);
171extern void identify_boot_cpu(void);
172extern void identify_secondary_cpu(struct cpuinfo_x86 *);
173extern void print_cpu_info(struct cpuinfo_x86 *);
174void print_cpu_msr(struct cpuinfo_x86 *);
175extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
176extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
177extern unsigned short num_cache_leaves;
178
179extern void detect_extended_topology(struct cpuinfo_x86 *c);
180extern void detect_ht(struct cpuinfo_x86 *c);
181
182static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
183 unsigned int *ecx, unsigned int *edx)
184{
185
186 asm volatile("cpuid"
187 : "=a" (*eax),
188 "=b" (*ebx),
189 "=c" (*ecx),
190 "=d" (*edx)
191 : "0" (*eax), "2" (*ecx)
192 : "memory");
193}
194
195static inline void load_cr3(pgd_t *pgdir)
196{
197 write_cr3(__pa(pgdir));
198}
199
200#ifdef CONFIG_X86_32
201
202struct x86_hw_tss {
203 unsigned short back_link, __blh;
204 unsigned long sp0;
205 unsigned short ss0, __ss0h;
206 unsigned long sp1;
207
208 unsigned short ss1, __ss1h;
209 unsigned long sp2;
210 unsigned short ss2, __ss2h;
211 unsigned long __cr3;
212 unsigned long ip;
213 unsigned long flags;
214 unsigned long ax;
215 unsigned long cx;
216 unsigned long dx;
217 unsigned long bx;
218 unsigned long sp;
219 unsigned long bp;
220 unsigned long si;
221 unsigned long di;
222 unsigned short es, __esh;
223 unsigned short cs, __csh;
224 unsigned short ss, __ssh;
225 unsigned short ds, __dsh;
226 unsigned short fs, __fsh;
227 unsigned short gs, __gsh;
228 unsigned short ldt, __ldth;
229 unsigned short trace;
230 unsigned short io_bitmap_base;
231
232} __attribute__((packed));
233#else
234struct x86_hw_tss {
235 u32 reserved1;
236 u64 sp0;
237 u64 sp1;
238 u64 sp2;
239 u64 reserved2;
240 u64 ist[7];
241 u32 reserved3;
242 u32 reserved4;
243 u16 reserved5;
244 u16 io_bitmap_base;
245
246} __attribute__((packed)) ____cacheline_aligned;
247#endif
248
249
250
251
252#define IO_BITMAP_BITS 65536
253#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
254#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
255#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
256#define INVALID_IO_BITMAP_OFFSET 0x8000
257
258struct tss_struct {
259
260
261
262 struct x86_hw_tss x86_tss;
263
264
265
266
267
268
269
270 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
271
272
273
274
275 unsigned long stack[64];
276
277} ____cacheline_aligned;
278
279DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
280
281
282
283
284struct orig_ist {
285 unsigned long ist[7];
286};
287
288#define MXCSR_DEFAULT 0x1f80
289
290struct i387_fsave_struct {
291 u32 cwd;
292 u32 swd;
293 u32 twd;
294 u32 fip;
295 u32 fcs;
296 u32 foo;
297 u32 fos;
298
299
300 u32 st_space[20];
301
302
303 u32 status;
304};
305
306struct i387_fxsave_struct {
307 u16 cwd;
308 u16 swd;
309 u16 twd;
310 u16 fop;
311 union {
312 struct {
313 u64 rip;
314 u64 rdp;
315 };
316 struct {
317 u32 fip;
318 u32 fcs;
319 u32 foo;
320 u32 fos;
321 };
322 };
323 u32 mxcsr;
324 u32 mxcsr_mask;
325
326
327 u32 st_space[32];
328
329
330 u32 xmm_space[64];
331
332 u32 padding[12];
333
334 union {
335 u32 padding1[12];
336 u32 sw_reserved[12];
337 };
338
339} __attribute__((aligned(16)));
340
341struct i387_soft_struct {
342 u32 cwd;
343 u32 swd;
344 u32 twd;
345 u32 fip;
346 u32 fcs;
347 u32 foo;
348 u32 fos;
349
350 u32 st_space[20];
351 u8 ftop;
352 u8 changed;
353 u8 lookahead;
354 u8 no_update;
355 u8 rm;
356 u8 alimit;
357 struct math_emu_info *info;
358 u32 entry_eip;
359};
360
361struct ymmh_struct {
362
363 u32 ymmh_space[64];
364};
365
366struct xsave_hdr_struct {
367 u64 xstate_bv;
368 u64 reserved1[2];
369 u64 reserved2[5];
370} __attribute__((packed));
371
372struct xsave_struct {
373 struct i387_fxsave_struct i387;
374 struct xsave_hdr_struct xsave_hdr;
375 struct ymmh_struct ymmh;
376
377} __attribute__ ((packed, aligned (64)));
378
379union thread_xstate {
380 struct i387_fsave_struct fsave;
381 struct i387_fxsave_struct fxsave;
382 struct i387_soft_struct soft;
383 struct xsave_struct xsave;
384};
385
386struct fpu {
387 unsigned int last_cpu;
388 unsigned int has_fpu;
389 union thread_xstate *state;
390};
391
392#ifdef CONFIG_X86_64
393DECLARE_PER_CPU(struct orig_ist, orig_ist);
394
395union irq_stack_union {
396 char irq_stack[IRQ_STACK_SIZE];
397
398
399
400
401
402 struct {
403 char gs_base[40];
404 unsigned long stack_canary;
405 };
406};
407
408DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union);
409DECLARE_INIT_PER_CPU(irq_stack_union);
410
411DECLARE_PER_CPU(char *, irq_stack_ptr);
412DECLARE_PER_CPU(unsigned int, irq_count);
413extern unsigned long kernel_eflags;
414extern asmlinkage void ignore_sysret(void);
415#else
416#ifdef CONFIG_CC_STACKPROTECTOR
417
418
419
420
421
422
423struct stack_canary {
424 char __pad[20];
425 unsigned long canary;
426};
427DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
428#endif
429#endif
430
431extern unsigned int xstate_size;
432extern void free_thread_xstate(struct task_struct *);
433extern struct kmem_cache *task_xstate_cachep;
434
435struct perf_event;
436
437struct thread_struct {
438
439 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
440 unsigned long sp0;
441 unsigned long sp;
442#ifdef CONFIG_X86_32
443 unsigned long sysenter_cs;
444#else
445 unsigned long usersp;
446 unsigned short es;
447 unsigned short ds;
448 unsigned short fsindex;
449 unsigned short gsindex;
450#endif
451#ifdef CONFIG_X86_32
452 unsigned long ip;
453#endif
454#ifdef CONFIG_X86_64
455 unsigned long fs;
456#endif
457 unsigned long gs;
458
459 struct perf_event *ptrace_bps[HBP_NUM];
460
461 unsigned long debugreg6;
462
463 unsigned long ptrace_dr7;
464
465 unsigned long cr2;
466 unsigned long trap_nr;
467 unsigned long error_code;
468
469 struct fpu fpu;
470#ifdef CONFIG_X86_32
471
472 struct vm86_struct __user *vm86_info;
473 unsigned long screen_bitmap;
474 unsigned long v86flags;
475 unsigned long v86mask;
476 unsigned long saved_sp0;
477 unsigned int saved_fs;
478 unsigned int saved_gs;
479#endif
480
481 unsigned long *io_bitmap_ptr;
482 unsigned long iopl;
483
484 unsigned io_bitmap_max;
485};
486
487
488
489
490static inline void native_set_iopl_mask(unsigned mask)
491{
492#ifdef CONFIG_X86_32
493 unsigned int reg;
494
495 asm volatile ("pushfl;"
496 "popl %0;"
497 "andl %1, %0;"
498 "orl %2, %0;"
499 "pushl %0;"
500 "popfl"
501 : "=&r" (reg)
502 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
503#endif
504}
505
506static inline void
507native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
508{
509 tss->x86_tss.sp0 = thread->sp0;
510#ifdef CONFIG_X86_32
511
512 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
513 tss->x86_tss.ss1 = thread->sysenter_cs;
514 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
515 }
516#endif
517}
518
519static inline void native_swapgs(void)
520{
521#ifdef CONFIG_X86_64
522 asm volatile("swapgs" ::: "memory");
523#endif
524}
525
526#ifdef CONFIG_PARAVIRT
527#include <asm/paravirt.h>
528#else
529#define __cpuid native_cpuid
530#define paravirt_enabled() 0
531
532static inline void load_sp0(struct tss_struct *tss,
533 struct thread_struct *thread)
534{
535 native_load_sp0(tss, thread);
536}
537
538#define set_iopl_mask native_set_iopl_mask
539#endif
540
541
542
543
544
545
546
547extern unsigned long mmu_cr4_features;
548
549static inline void set_in_cr4(unsigned long mask)
550{
551 unsigned long cr4;
552
553 mmu_cr4_features |= mask;
554 cr4 = read_cr4();
555 cr4 |= mask;
556 write_cr4(cr4);
557}
558
559static inline void clear_in_cr4(unsigned long mask)
560{
561 unsigned long cr4;
562
563 mmu_cr4_features &= ~mask;
564 cr4 = read_cr4();
565 cr4 &= ~mask;
566 write_cr4(cr4);
567}
568
569typedef struct {
570 unsigned long seg;
571} mm_segment_t;
572
573
574
575
576
577extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
578
579
580extern void release_thread(struct task_struct *);
581
582
583extern void prepare_to_copy(struct task_struct *tsk);
584
585unsigned long get_wchan(struct task_struct *p);
586
587
588
589
590
591
592static inline void cpuid(unsigned int op,
593 unsigned int *eax, unsigned int *ebx,
594 unsigned int *ecx, unsigned int *edx)
595{
596 *eax = op;
597 *ecx = 0;
598 __cpuid(eax, ebx, ecx, edx);
599}
600
601
602static inline void cpuid_count(unsigned int op, int count,
603 unsigned int *eax, unsigned int *ebx,
604 unsigned int *ecx, unsigned int *edx)
605{
606 *eax = op;
607 *ecx = count;
608 __cpuid(eax, ebx, ecx, edx);
609}
610
611
612
613
614static inline unsigned int cpuid_eax(unsigned int op)
615{
616 unsigned int eax, ebx, ecx, edx;
617
618 cpuid(op, &eax, &ebx, &ecx, &edx);
619
620 return eax;
621}
622
623static inline unsigned int cpuid_ebx(unsigned int op)
624{
625 unsigned int eax, ebx, ecx, edx;
626
627 cpuid(op, &eax, &ebx, &ecx, &edx);
628
629 return ebx;
630}
631
632static inline unsigned int cpuid_ecx(unsigned int op)
633{
634 unsigned int eax, ebx, ecx, edx;
635
636 cpuid(op, &eax, &ebx, &ecx, &edx);
637
638 return ecx;
639}
640
641static inline unsigned int cpuid_edx(unsigned int op)
642{
643 unsigned int eax, ebx, ecx, edx;
644
645 cpuid(op, &eax, &ebx, &ecx, &edx);
646
647 return edx;
648}
649
650
651static inline void rep_nop(void)
652{
653 asm volatile("rep; nop" ::: "memory");
654}
655
656static inline void cpu_relax(void)
657{
658 rep_nop();
659}
660
661
662static inline void sync_core(void)
663{
664 int tmp;
665
666#if defined(CONFIG_M386) || defined(CONFIG_M486)
667 if (boot_cpu_data.x86 < 5)
668
669
670 asm volatile("jmp 1f\n1:\n" ::: "memory");
671 else
672#endif
673
674
675
676 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
677 : "ebx", "ecx", "edx", "memory");
678}
679
680static inline void __monitor(const void *eax, unsigned long ecx,
681 unsigned long edx)
682{
683
684 asm volatile(".byte 0x0f, 0x01, 0xc8;"
685 :: "a" (eax), "c" (ecx), "d"(edx));
686}
687
688static inline void __mwait(unsigned long eax, unsigned long ecx)
689{
690
691 asm volatile(".byte 0x0f, 0x01, 0xc9;"
692 :: "a" (eax), "c" (ecx));
693}
694
695static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
696{
697 trace_hardirqs_on();
698
699 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
700 :: "a" (eax), "c" (ecx));
701}
702
703extern void select_idle_routine(const struct cpuinfo_x86 *c);
704extern void init_amd_e400_c1e_mask(void);
705
706extern unsigned long boot_option_idle_override;
707extern bool amd_e400_c1e_detected;
708
709enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
710 IDLE_POLL, IDLE_FORCE_MWAIT};
711
712extern void enable_sep_cpu(void);
713extern int sysenter_setup(void);
714
715extern void early_trap_init(void);
716
717
718extern struct desc_ptr early_gdt_descr;
719
720extern void cpu_set_gdt(int);
721extern void switch_to_new_gdt(int);
722extern void load_percpu_segment(int);
723extern void cpu_init(void);
724
725static inline unsigned long get_debugctlmsr(void)
726{
727 unsigned long debugctlmsr = 0;
728
729#ifndef CONFIG_X86_DEBUGCTLMSR
730 if (boot_cpu_data.x86 < 6)
731 return 0;
732#endif
733 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
734
735 return debugctlmsr;
736}
737
738static inline void update_debugctlmsr(unsigned long debugctlmsr)
739{
740#ifndef CONFIG_X86_DEBUGCTLMSR
741 if (boot_cpu_data.x86 < 6)
742 return;
743#endif
744 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
745}
746
747
748
749
750
751extern unsigned int machine_id;
752extern unsigned int machine_submodel_id;
753extern unsigned int BIOS_revision;
754
755
756extern int bootloader_type;
757extern int bootloader_version;
758
759extern char ignore_fpu_irq;
760
761#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
762#define ARCH_HAS_PREFETCHW
763#define ARCH_HAS_SPINLOCK_PREFETCH
764
765#ifdef CONFIG_X86_32
766# define BASE_PREFETCH ASM_NOP4
767# define ARCH_HAS_PREFETCH
768#else
769# define BASE_PREFETCH "prefetcht0 (%1)"
770#endif
771
772
773
774
775
776
777
778static inline void prefetch(const void *x)
779{
780 alternative_input(BASE_PREFETCH,
781 "prefetchnta (%1)",
782 X86_FEATURE_XMM,
783 "r" (x));
784}
785
786
787
788
789
790
791static inline void prefetchw(const void *x)
792{
793 alternative_input(BASE_PREFETCH,
794 "prefetchw (%1)",
795 X86_FEATURE_3DNOW,
796 "r" (x));
797}
798
799static inline void spin_lock_prefetch(const void *x)
800{
801 prefetchw(x);
802}
803
804#ifdef CONFIG_X86_32
805
806
807
808#define TASK_SIZE PAGE_OFFSET
809#define TASK_SIZE_MAX TASK_SIZE
810#define STACK_TOP TASK_SIZE
811#define STACK_TOP_MAX STACK_TOP
812
813#define INIT_THREAD { \
814 .sp0 = sizeof(init_stack) + (long)&init_stack, \
815 .vm86_info = NULL, \
816 .sysenter_cs = __KERNEL_CS, \
817 .io_bitmap_ptr = NULL, \
818}
819
820
821
822
823
824
825
826#define INIT_TSS { \
827 .x86_tss = { \
828 .sp0 = sizeof(init_stack) + (long)&init_stack, \
829 .ss0 = __KERNEL_DS, \
830 .ss1 = __KERNEL_CS, \
831 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
832 }, \
833 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
834}
835
836extern unsigned long thread_saved_pc(struct task_struct *tsk);
837
838#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
839#define KSTK_TOP(info) \
840({ \
841 unsigned long *__ptr = (unsigned long *)(info); \
842 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
843})
844
845
846
847
848
849
850
851
852
853
854
855#define task_pt_regs(task) \
856({ \
857 struct pt_regs *__regs__; \
858 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
859 __regs__ - 1; \
860})
861
862#define KSTK_ESP(task) (task_pt_regs(task)->sp)
863
864#else
865
866
867
868#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
869
870
871
872
873#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
874 0xc0000000 : 0xFFFFe000)
875
876#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
877 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
878#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
879 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
880
881#define STACK_TOP TASK_SIZE
882#define STACK_TOP_MAX TASK_SIZE_MAX
883
884#define INIT_THREAD { \
885 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
886}
887
888#define INIT_TSS { \
889 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
890}
891
892
893
894
895
896#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
897
898#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
899extern unsigned long KSTK_ESP(struct task_struct *task);
900
901
902
903
904DECLARE_PER_CPU(unsigned long, old_rsp);
905
906#endif
907
908extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
909 unsigned long new_sp);
910
911
912
913
914
915#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
916
917#define KSTK_EIP(task) (task_pt_regs(task)->ip)
918
919
920#define GET_TSC_CTL(adr) get_tsc_mode((adr))
921#define SET_TSC_CTL(val) set_tsc_mode((val))
922
923extern int get_tsc_mode(unsigned long adr);
924extern int set_tsc_mode(unsigned int val);
925
926extern int amd_get_nb_id(int cpu);
927
928struct aperfmperf {
929 u64 aperf, mperf;
930};
931
932static inline void get_aperfmperf(struct aperfmperf *am)
933{
934 WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF));
935
936 rdmsrl(MSR_IA32_APERF, am->aperf);
937 rdmsrl(MSR_IA32_MPERF, am->mperf);
938}
939
940#define APERFMPERF_SHIFT 10
941
942static inline
943unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
944 struct aperfmperf *new)
945{
946 u64 aperf = new->aperf - old->aperf;
947 u64 mperf = new->mperf - old->mperf;
948 unsigned long ratio = aperf;
949
950 mperf >>= APERFMPERF_SHIFT;
951 if (mperf)
952 ratio = div64_u64(aperf, mperf);
953
954 return ratio;
955}
956
957
958
959
960#ifdef CONFIG_CPU_SUP_AMD
961extern const int amd_erratum_383[];
962extern const int amd_erratum_400[];
963extern bool cpu_has_amd_erratum(const int *);
964
965#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
966#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
967#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
968 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
969#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
970#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
971#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
972
973#else
974#define cpu_has_amd_erratum(x) (false)
975#endif
976
977void cpu_idle_wait(void);
978
979extern unsigned long arch_align_stack(unsigned long sp);
980extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
981
982void default_idle(void);
983bool set_pm_idle_to_default(void);
984
985void stop_this_cpu(void *dummy);
986
987#endif
988