1#ifndef _ASM_X86_PROCESSOR_H
2#define _ASM_X86_PROCESSOR_H
3
4#include <asm/processor-flags.h>
5
6
7struct task_struct;
8struct mm_struct;
9struct vm86;
10
11#include <asm/math_emu.h>
12#include <asm/segment.h>
13#include <asm/types.h>
14#include <uapi/asm/sigcontext.h>
15#include <asm/current.h>
16#include <asm/cpufeatures.h>
17#include <asm/page.h>
18#include <asm/pgtable_types.h>
19#include <asm/percpu.h>
20#include <asm/msr.h>
21#include <asm/desc_defs.h>
22#include <asm/nops.h>
23#include <asm/special_insns.h>
24#include <asm/fpu/types.h>
25
26#include <linux/personality.h>
27#include <linux/cache.h>
28#include <linux/threads.h>
29#include <linux/math64.h>
30#include <linux/err.h>
31#include <linux/irqflags.h>
32
33
34
35
36
37
38
39#define NET_IP_ALIGN 0
40
41#define HBP_NUM 4
42
43
44
45
46static inline void *current_text_addr(void)
47{
48 void *pc;
49
50 asm volatile("mov $1f, %0; 1:":"=r" (pc));
51
52 return pc;
53}
54
55
56
57
58
59
60#ifdef CONFIG_X86_VSMP
61# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
62# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
63#else
64# define ARCH_MIN_TASKALIGN __alignof__(union fpregs_state)
65# define ARCH_MIN_MMSTRUCT_ALIGN 0
66#endif
67
68enum tlb_infos {
69 ENTRIES,
70 NR_INFO
71};
72
73extern u16 __read_mostly tlb_lli_4k[NR_INFO];
74extern u16 __read_mostly tlb_lli_2m[NR_INFO];
75extern u16 __read_mostly tlb_lli_4m[NR_INFO];
76extern u16 __read_mostly tlb_lld_4k[NR_INFO];
77extern u16 __read_mostly tlb_lld_2m[NR_INFO];
78extern u16 __read_mostly tlb_lld_4m[NR_INFO];
79extern u16 __read_mostly tlb_lld_1g[NR_INFO];
80
81
82
83
84
85
86
87struct cpuinfo_x86 {
88 __u8 x86;
89 __u8 x86_vendor;
90 __u8 x86_model;
91 __u8 x86_mask;
92#ifdef CONFIG_X86_32
93 char wp_works_ok;
94
95
96 char rfu;
97 char pad0;
98 char pad1;
99#else
100
101 int x86_tlbsize;
102#endif
103 __u8 x86_virt_bits;
104 __u8 x86_phys_bits;
105
106 __u8 x86_coreid_bits;
107
108 __u32 extended_cpuid_level;
109
110 int cpuid_level;
111 __u32 x86_capability[NCAPINTS + NBUGINTS];
112 char x86_vendor_id[16];
113 char x86_model_id[64];
114
115 int x86_cache_size;
116 int x86_cache_alignment;
117
118 int x86_cache_max_rmid;
119 int x86_cache_occ_scale;
120 int x86_power;
121 unsigned long loops_per_jiffy;
122
123 u16 x86_max_cores;
124 u16 apicid;
125 u16 initial_apicid;
126 u16 x86_clflush_size;
127
128 u16 booted_cores;
129
130 u16 phys_proc_id;
131
132 u16 logical_proc_id;
133
134 u16 cpu_core_id;
135
136 u16 cpu_index;
137 u32 microcode;
138};
139
140#define X86_VENDOR_INTEL 0
141#define X86_VENDOR_CYRIX 1
142#define X86_VENDOR_AMD 2
143#define X86_VENDOR_UMC 3
144#define X86_VENDOR_CENTAUR 5
145#define X86_VENDOR_TRANSMETA 7
146#define X86_VENDOR_NSC 8
147#define X86_VENDOR_NUM 9
148
149#define X86_VENDOR_UNKNOWN 0xff
150
151
152
153
154extern struct cpuinfo_x86 boot_cpu_data;
155extern struct cpuinfo_x86 new_cpu_data;
156
157extern struct tss_struct doublefault_tss;
158extern __u32 cpu_caps_cleared[NCAPINTS];
159extern __u32 cpu_caps_set[NCAPINTS];
160
161#ifdef CONFIG_SMP
162DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
163#define cpu_data(cpu) per_cpu(cpu_info, cpu)
164#else
165#define cpu_info boot_cpu_data
166#define cpu_data(cpu) boot_cpu_data
167#endif
168
169extern const struct seq_operations cpuinfo_op;
170
171#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
172
173extern void cpu_detect(struct cpuinfo_x86 *c);
174
175extern void early_cpu_init(void);
176extern void identify_boot_cpu(void);
177extern void identify_secondary_cpu(struct cpuinfo_x86 *);
178extern void print_cpu_info(struct cpuinfo_x86 *);
179void print_cpu_msr(struct cpuinfo_x86 *);
180extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
181extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
182extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
183
184extern void detect_extended_topology(struct cpuinfo_x86 *c);
185extern void detect_ht(struct cpuinfo_x86 *c);
186
187#ifdef CONFIG_X86_32
188extern int have_cpuid_p(void);
189#else
190static inline int have_cpuid_p(void)
191{
192 return 1;
193}
194#endif
195static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
196 unsigned int *ecx, unsigned int *edx)
197{
198
199 asm volatile("cpuid"
200 : "=a" (*eax),
201 "=b" (*ebx),
202 "=c" (*ecx),
203 "=d" (*edx)
204 : "0" (*eax), "2" (*ecx)
205 : "memory");
206}
207
208static inline void load_cr3(pgd_t *pgdir)
209{
210 write_cr3(__pa(pgdir));
211}
212
213#ifdef CONFIG_X86_32
214
215struct x86_hw_tss {
216 unsigned short back_link, __blh;
217 unsigned long sp0;
218 unsigned short ss0, __ss0h;
219 unsigned long sp1;
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234 unsigned short ss1;
235
236 unsigned short __ss1h;
237 unsigned long sp2;
238 unsigned short ss2, __ss2h;
239 unsigned long __cr3;
240 unsigned long ip;
241 unsigned long flags;
242 unsigned long ax;
243 unsigned long cx;
244 unsigned long dx;
245 unsigned long bx;
246 unsigned long sp;
247 unsigned long bp;
248 unsigned long si;
249 unsigned long di;
250 unsigned short es, __esh;
251 unsigned short cs, __csh;
252 unsigned short ss, __ssh;
253 unsigned short ds, __dsh;
254 unsigned short fs, __fsh;
255 unsigned short gs, __gsh;
256 unsigned short ldt, __ldth;
257 unsigned short trace;
258 unsigned short io_bitmap_base;
259
260} __attribute__((packed));
261#else
262struct x86_hw_tss {
263 u32 reserved1;
264 u64 sp0;
265 u64 sp1;
266 u64 sp2;
267 u64 reserved2;
268 u64 ist[7];
269 u32 reserved3;
270 u32 reserved4;
271 u16 reserved5;
272 u16 io_bitmap_base;
273
274} __attribute__((packed)) ____cacheline_aligned;
275#endif
276
277
278
279
280#define IO_BITMAP_BITS 65536
281#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
282#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
283#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
284#define INVALID_IO_BITMAP_OFFSET 0x8000
285
286struct tss_struct {
287
288
289
290 struct x86_hw_tss x86_tss;
291
292
293
294
295
296
297
298 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
299
300#ifdef CONFIG_X86_32
301
302
303
304 unsigned long SYSENTER_stack_canary;
305 unsigned long SYSENTER_stack[64];
306#endif
307
308} ____cacheline_aligned;
309
310DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
311
312#ifdef CONFIG_X86_32
313DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
314#endif
315
316
317
318
319struct orig_ist {
320 unsigned long ist[7];
321};
322
323#ifdef CONFIG_X86_64
324DECLARE_PER_CPU(struct orig_ist, orig_ist);
325
326union irq_stack_union {
327 char irq_stack[IRQ_STACK_SIZE];
328
329
330
331
332
333 struct {
334 char gs_base[40];
335 unsigned long stack_canary;
336 };
337};
338
339DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
340DECLARE_INIT_PER_CPU(irq_stack_union);
341
342DECLARE_PER_CPU(char *, irq_stack_ptr);
343DECLARE_PER_CPU(unsigned int, irq_count);
344extern asmlinkage void ignore_sysret(void);
345#else
346#ifdef CONFIG_CC_STACKPROTECTOR
347
348
349
350
351
352
353struct stack_canary {
354 char __pad[20];
355 unsigned long canary;
356};
357DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
358#endif
359
360
361
362struct irq_stack {
363 u32 stack[THREAD_SIZE/sizeof(u32)];
364} __aligned(THREAD_SIZE);
365
366DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
367DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
368#endif
369
370extern unsigned int fpu_kernel_xstate_size;
371extern unsigned int fpu_user_xstate_size;
372
373struct perf_event;
374
375typedef struct {
376 unsigned long seg;
377} mm_segment_t;
378
379struct thread_struct {
380
381 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
382 unsigned long sp0;
383 unsigned long sp;
384#ifdef CONFIG_X86_32
385 unsigned long sysenter_cs;
386#else
387 unsigned short es;
388 unsigned short ds;
389 unsigned short fsindex;
390 unsigned short gsindex;
391#endif
392
393 u32 status;
394
395#ifdef CONFIG_X86_64
396 unsigned long fsbase;
397 unsigned long gsbase;
398#else
399
400
401
402
403 unsigned long fs;
404 unsigned long gs;
405#endif
406
407
408 struct perf_event *ptrace_bps[HBP_NUM];
409
410 unsigned long debugreg6;
411
412 unsigned long ptrace_dr7;
413
414 unsigned long cr2;
415 unsigned long trap_nr;
416 unsigned long error_code;
417#ifdef CONFIG_VM86
418
419 struct vm86 *vm86;
420#endif
421
422 unsigned long *io_bitmap_ptr;
423 unsigned long iopl;
424
425 unsigned io_bitmap_max;
426
427 mm_segment_t addr_limit;
428
429 unsigned int sig_on_uaccess_err:1;
430 unsigned int uaccess_err:1;
431
432
433 struct fpu fpu;
434
435
436
437
438};
439
440
441
442
443
444
445
446
447#define TS_COMPAT 0x0002
448
449
450
451
452static inline void native_set_iopl_mask(unsigned mask)
453{
454#ifdef CONFIG_X86_32
455 unsigned int reg;
456
457 asm volatile ("pushfl;"
458 "popl %0;"
459 "andl %1, %0;"
460 "orl %2, %0;"
461 "pushl %0;"
462 "popfl"
463 : "=&r" (reg)
464 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
465#endif
466}
467
468static inline void
469native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
470{
471 tss->x86_tss.sp0 = thread->sp0;
472#ifdef CONFIG_X86_32
473
474 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
475 tss->x86_tss.ss1 = thread->sysenter_cs;
476 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
477 }
478#endif
479}
480
481static inline void native_swapgs(void)
482{
483#ifdef CONFIG_X86_64
484 asm volatile("swapgs" ::: "memory");
485#endif
486}
487
488static inline unsigned long current_top_of_stack(void)
489{
490#ifdef CONFIG_X86_64
491 return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
492#else
493
494 return this_cpu_read_stable(cpu_current_top_of_stack);
495#endif
496}
497
498#ifdef CONFIG_PARAVIRT
499#include <asm/paravirt.h>
500#else
501#define __cpuid native_cpuid
502
503static inline void load_sp0(struct tss_struct *tss,
504 struct thread_struct *thread)
505{
506 native_load_sp0(tss, thread);
507}
508
509#define set_iopl_mask native_set_iopl_mask
510#endif
511
512
513extern void release_thread(struct task_struct *);
514
515unsigned long get_wchan(struct task_struct *p);
516
517
518
519
520
521
522static inline void cpuid(unsigned int op,
523 unsigned int *eax, unsigned int *ebx,
524 unsigned int *ecx, unsigned int *edx)
525{
526 *eax = op;
527 *ecx = 0;
528 __cpuid(eax, ebx, ecx, edx);
529}
530
531
532static inline void cpuid_count(unsigned int op, int count,
533 unsigned int *eax, unsigned int *ebx,
534 unsigned int *ecx, unsigned int *edx)
535{
536 *eax = op;
537 *ecx = count;
538 __cpuid(eax, ebx, ecx, edx);
539}
540
541
542
543
544static inline unsigned int cpuid_eax(unsigned int op)
545{
546 unsigned int eax, ebx, ecx, edx;
547
548 cpuid(op, &eax, &ebx, &ecx, &edx);
549
550 return eax;
551}
552
553static inline unsigned int cpuid_ebx(unsigned int op)
554{
555 unsigned int eax, ebx, ecx, edx;
556
557 cpuid(op, &eax, &ebx, &ecx, &edx);
558
559 return ebx;
560}
561
562static inline unsigned int cpuid_ecx(unsigned int op)
563{
564 unsigned int eax, ebx, ecx, edx;
565
566 cpuid(op, &eax, &ebx, &ecx, &edx);
567
568 return ecx;
569}
570
571static inline unsigned int cpuid_edx(unsigned int op)
572{
573 unsigned int eax, ebx, ecx, edx;
574
575 cpuid(op, &eax, &ebx, &ecx, &edx);
576
577 return edx;
578}
579
580
581static __always_inline void rep_nop(void)
582{
583 asm volatile("rep; nop" ::: "memory");
584}
585
586static __always_inline void cpu_relax(void)
587{
588 rep_nop();
589}
590
591#define cpu_relax_lowlatency() cpu_relax()
592
593
594static inline void sync_core(void)
595{
596 int tmp;
597
598#ifdef CONFIG_M486
599
600
601
602
603 asm volatile("cmpl %2,%1\n\t"
604 "jl 1f\n\t"
605 "cpuid\n"
606 "1:"
607 : "=a" (tmp)
608 : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1)
609 : "ebx", "ecx", "edx", "memory");
610#else
611
612
613
614
615
616 asm volatile("cpuid"
617 : "=a" (tmp)
618 : "0" (1)
619 : "ebx", "ecx", "edx", "memory");
620#endif
621}
622
623extern void select_idle_routine(const struct cpuinfo_x86 *c);
624extern void init_amd_e400_c1e_mask(void);
625
626extern unsigned long boot_option_idle_override;
627extern bool amd_e400_c1e_detected;
628
629enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
630 IDLE_POLL};
631
632extern void enable_sep_cpu(void);
633extern int sysenter_setup(void);
634
635extern void early_trap_init(void);
636void early_trap_pf_init(void);
637
638
639extern struct desc_ptr early_gdt_descr;
640
641extern void cpu_set_gdt(int);
642extern void switch_to_new_gdt(int);
643extern void load_percpu_segment(int);
644extern void cpu_init(void);
645
646static inline unsigned long get_debugctlmsr(void)
647{
648 unsigned long debugctlmsr = 0;
649
650#ifndef CONFIG_X86_DEBUGCTLMSR
651 if (boot_cpu_data.x86 < 6)
652 return 0;
653#endif
654 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
655
656 return debugctlmsr;
657}
658
659static inline void update_debugctlmsr(unsigned long debugctlmsr)
660{
661#ifndef CONFIG_X86_DEBUGCTLMSR
662 if (boot_cpu_data.x86 < 6)
663 return;
664#endif
665 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
666}
667
668extern void set_task_blockstep(struct task_struct *task, bool on);
669
670
671extern int bootloader_type;
672extern int bootloader_version;
673
674extern char ignore_fpu_irq;
675
676#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
677#define ARCH_HAS_PREFETCHW
678#define ARCH_HAS_SPINLOCK_PREFETCH
679
680#ifdef CONFIG_X86_32
681# define BASE_PREFETCH ""
682# define ARCH_HAS_PREFETCH
683#else
684# define BASE_PREFETCH "prefetcht0 %P1"
685#endif
686
687
688
689
690
691
692
693static inline void prefetch(const void *x)
694{
695 alternative_input(BASE_PREFETCH, "prefetchnta %P1",
696 X86_FEATURE_XMM,
697 "m" (*(const char *)x));
698}
699
700
701
702
703
704
705static inline void prefetchw(const void *x)
706{
707 alternative_input(BASE_PREFETCH, "prefetchw %P1",
708 X86_FEATURE_3DNOWPREFETCH,
709 "m" (*(const char *)x));
710}
711
712static inline void spin_lock_prefetch(const void *x)
713{
714 prefetchw(x);
715}
716
717#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
718 TOP_OF_KERNEL_STACK_PADDING)
719
720#ifdef CONFIG_X86_32
721
722
723
724#define TASK_SIZE PAGE_OFFSET
725#define TASK_SIZE_MAX TASK_SIZE
726#define STACK_TOP TASK_SIZE
727#define STACK_TOP_MAX STACK_TOP
728
729#define INIT_THREAD { \
730 .sp0 = TOP_OF_INIT_STACK, \
731 .sysenter_cs = __KERNEL_CS, \
732 .io_bitmap_ptr = NULL, \
733 .addr_limit = KERNEL_DS, \
734}
735
736
737
738
739
740
741
742
743
744
745
746#define task_pt_regs(task) \
747({ \
748 unsigned long __ptr = (unsigned long)task_stack_page(task); \
749 __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
750 ((struct pt_regs *)__ptr) - 1; \
751})
752
753#define KSTK_ESP(task) (task_pt_regs(task)->sp)
754
755#else
756
757
758
759
760
761
762
763
764
765#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
766
767
768
769
770#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
771 0xc0000000 : 0xFFFFe000)
772
773#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
774 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
775#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
776 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
777
778#define STACK_TOP TASK_SIZE
779#define STACK_TOP_MAX TASK_SIZE_MAX
780
781#define INIT_THREAD { \
782 .sp0 = TOP_OF_INIT_STACK, \
783 .addr_limit = KERNEL_DS, \
784}
785
786#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
787extern unsigned long KSTK_ESP(struct task_struct *task);
788
789#endif
790
791extern unsigned long thread_saved_pc(struct task_struct *tsk);
792
793extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
794 unsigned long new_sp);
795
796
797
798
799
800#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
801
802#define KSTK_EIP(task) (task_pt_regs(task)->ip)
803
804
805#define GET_TSC_CTL(adr) get_tsc_mode((adr))
806#define SET_TSC_CTL(val) set_tsc_mode((val))
807
808extern int get_tsc_mode(unsigned long adr);
809extern int set_tsc_mode(unsigned int val);
810
811
812#define MPX_ENABLE_MANAGEMENT() mpx_enable_management()
813#define MPX_DISABLE_MANAGEMENT() mpx_disable_management()
814
815#ifdef CONFIG_X86_INTEL_MPX
816extern int mpx_enable_management(void);
817extern int mpx_disable_management(void);
818#else
819static inline int mpx_enable_management(void)
820{
821 return -EINVAL;
822}
823static inline int mpx_disable_management(void)
824{
825 return -EINVAL;
826}
827#endif
828
829extern u16 amd_get_nb_id(int cpu);
830extern u32 amd_get_nodes_per_socket(void);
831
832static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
833{
834 uint32_t base, eax, signature[3];
835
836 for (base = 0x40000000; base < 0x40010000; base += 0x100) {
837 cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
838
839 if (!memcmp(sig, signature, 12) &&
840 (leaves == 0 || ((eax - base) >= leaves)))
841 return base;
842 }
843
844 return 0;
845}
846
847extern unsigned long arch_align_stack(unsigned long sp);
848extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
849
850void default_idle(void);
851#ifdef CONFIG_XEN
852bool xen_set_default_idle(void);
853#else
854#define xen_set_default_idle 0
855#endif
856
857void stop_this_cpu(void *dummy);
858void df_debug(struct pt_regs *regs, long error_code);
859#endif
860