1#ifndef _ASM_X86_PROCESSOR_H
2#define _ASM_X86_PROCESSOR_H
3
4#include <asm/processor-flags.h>
5
6
7struct task_struct;
8struct mm_struct;
9
10#include <asm/vm86.h>
11#include <asm/math_emu.h>
12#include <asm/segment.h>
13#include <asm/types.h>
14#include <asm/sigcontext.h>
15#include <asm/current.h>
16#include <asm/cpufeature.h>
17#include <asm/page.h>
18#include <asm/pgtable_types.h>
19#include <asm/percpu.h>
20#include <asm/msr.h>
21#include <asm/desc_defs.h>
22#include <asm/nops.h>
23#include <asm/special_insns.h>
24
25#include <linux/personality.h>
26#include <linux/cpumask.h>
27#include <linux/cache.h>
28#include <linux/threads.h>
29#include <linux/math64.h>
30#include <linux/init.h>
31#include <linux/err.h>
32#include <linux/irqflags.h>
33
34
35
36
37
38
39
40#define NET_IP_ALIGN 0
41
42#define HBP_NUM 4
43
44
45
46
47static inline void *current_text_addr(void)
48{
49 void *pc;
50
51 asm volatile("mov $1f, %0; 1:":"=r" (pc));
52
53 return pc;
54}
55
56#ifdef CONFIG_X86_VSMP
57# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
58# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
59#else
60# define ARCH_MIN_TASKALIGN 16
61# define ARCH_MIN_MMSTRUCT_ALIGN 0
62#endif
63
64enum tlb_infos {
65 ENTRIES,
66 NR_INFO
67};
68
69extern u16 __read_mostly tlb_lli_4k[NR_INFO];
70extern u16 __read_mostly tlb_lli_2m[NR_INFO];
71extern u16 __read_mostly tlb_lli_4m[NR_INFO];
72extern u16 __read_mostly tlb_lld_4k[NR_INFO];
73extern u16 __read_mostly tlb_lld_2m[NR_INFO];
74extern u16 __read_mostly tlb_lld_4m[NR_INFO];
75extern s8 __read_mostly tlb_flushall_shift;
76
77
78
79
80
81
82
83struct cpuinfo_x86 {
84 __u8 x86;
85 __u8 x86_vendor;
86 __u8 x86_model;
87 __u8 x86_mask;
88#ifdef CONFIG_X86_32
89 char wp_works_ok;
90
91
92 char rfu;
93 char pad0;
94 char pad1;
95#else
96
97 int x86_tlbsize;
98#endif
99 __u8 x86_virt_bits;
100 __u8 x86_phys_bits;
101
102 __u8 x86_coreid_bits;
103
104 __u32 extended_cpuid_level;
105
106 int cpuid_level;
107 __u32 x86_capability[NCAPINTS + NBUGINTS];
108 char x86_vendor_id[16];
109 char x86_model_id[64];
110
111 int x86_cache_size;
112 int x86_cache_alignment;
113 int x86_power;
114 unsigned long loops_per_jiffy;
115
116 u16 x86_max_cores;
117 u16 apicid;
118 u16 initial_apicid;
119 u16 x86_clflush_size;
120
121 u16 booted_cores;
122
123 u16 phys_proc_id;
124
125 u16 cpu_core_id;
126
127 u8 compute_unit_id;
128
129 u16 cpu_index;
130 u32 microcode;
131} __attribute__((__aligned__(SMP_CACHE_BYTES)));
132
133#define X86_VENDOR_INTEL 0
134#define X86_VENDOR_CYRIX 1
135#define X86_VENDOR_AMD 2
136#define X86_VENDOR_UMC 3
137#define X86_VENDOR_CENTAUR 5
138#define X86_VENDOR_TRANSMETA 7
139#define X86_VENDOR_NSC 8
140#define X86_VENDOR_NUM 9
141
142#define X86_VENDOR_UNKNOWN 0xff
143
144
145
146
147extern struct cpuinfo_x86 boot_cpu_data;
148extern struct cpuinfo_x86 new_cpu_data;
149
150extern struct tss_struct doublefault_tss;
151extern __u32 cpu_caps_cleared[NCAPINTS];
152extern __u32 cpu_caps_set[NCAPINTS];
153
154#ifdef CONFIG_SMP
155DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
156#define cpu_data(cpu) per_cpu(cpu_info, cpu)
157#else
158#define cpu_info boot_cpu_data
159#define cpu_data(cpu) boot_cpu_data
160#endif
161
162extern const struct seq_operations cpuinfo_op;
163
164#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
165
166extern void cpu_detect(struct cpuinfo_x86 *c);
167extern void fpu_detect(struct cpuinfo_x86 *c);
168
169extern void early_cpu_init(void);
170extern void identify_boot_cpu(void);
171extern void identify_secondary_cpu(struct cpuinfo_x86 *);
172extern void print_cpu_info(struct cpuinfo_x86 *);
173void print_cpu_msr(struct cpuinfo_x86 *);
174extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
175extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
176extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
177
178extern void detect_extended_topology(struct cpuinfo_x86 *c);
179extern void detect_ht(struct cpuinfo_x86 *c);
180
181#ifdef CONFIG_X86_32
182extern int have_cpuid_p(void);
183#else
184static inline int have_cpuid_p(void)
185{
186 return 1;
187}
188#endif
189static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
190 unsigned int *ecx, unsigned int *edx)
191{
192
193 asm volatile("cpuid"
194 : "=a" (*eax),
195 "=b" (*ebx),
196 "=c" (*ecx),
197 "=d" (*edx)
198 : "0" (*eax), "2" (*ecx)
199 : "memory");
200}
201
202static inline void load_cr3(pgd_t *pgdir)
203{
204 write_cr3(__pa(pgdir));
205}
206
207#ifdef CONFIG_X86_32
208
209struct x86_hw_tss {
210 unsigned short back_link, __blh;
211 unsigned long sp0;
212 unsigned short ss0, __ss0h;
213 unsigned long sp1;
214
215 unsigned short ss1, __ss1h;
216 unsigned long sp2;
217 unsigned short ss2, __ss2h;
218 unsigned long __cr3;
219 unsigned long ip;
220 unsigned long flags;
221 unsigned long ax;
222 unsigned long cx;
223 unsigned long dx;
224 unsigned long bx;
225 unsigned long sp;
226 unsigned long bp;
227 unsigned long si;
228 unsigned long di;
229 unsigned short es, __esh;
230 unsigned short cs, __csh;
231 unsigned short ss, __ssh;
232 unsigned short ds, __dsh;
233 unsigned short fs, __fsh;
234 unsigned short gs, __gsh;
235 unsigned short ldt, __ldth;
236 unsigned short trace;
237 unsigned short io_bitmap_base;
238
239} __attribute__((packed));
240#else
241struct x86_hw_tss {
242 u32 reserved1;
243 u64 sp0;
244 u64 sp1;
245 u64 sp2;
246 u64 reserved2;
247 u64 ist[7];
248 u32 reserved3;
249 u32 reserved4;
250 u16 reserved5;
251 u16 io_bitmap_base;
252
253} __attribute__((packed)) ____cacheline_aligned;
254#endif
255
256
257
258
259#define IO_BITMAP_BITS 65536
260#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
261#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
262#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
263#define INVALID_IO_BITMAP_OFFSET 0x8000
264
265struct tss_struct {
266
267
268
269 struct x86_hw_tss x86_tss;
270
271
272
273
274
275
276
277 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
278
279
280
281
282 unsigned long stack[64];
283
284} ____cacheline_aligned;
285
286DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
287
288
289
290
291struct orig_ist {
292 unsigned long ist[7];
293};
294
295#define MXCSR_DEFAULT 0x1f80
296
297struct i387_fsave_struct {
298 u32 cwd;
299 u32 swd;
300 u32 twd;
301 u32 fip;
302 u32 fcs;
303 u32 foo;
304 u32 fos;
305
306
307 u32 st_space[20];
308
309
310 u32 status;
311};
312
313struct i387_fxsave_struct {
314 u16 cwd;
315 u16 swd;
316 u16 twd;
317 u16 fop;
318 union {
319 struct {
320 u64 rip;
321 u64 rdp;
322 };
323 struct {
324 u32 fip;
325 u32 fcs;
326 u32 foo;
327 u32 fos;
328 };
329 };
330 u32 mxcsr;
331 u32 mxcsr_mask;
332
333
334 u32 st_space[32];
335
336
337 u32 xmm_space[64];
338
339 u32 padding[12];
340
341 union {
342 u32 padding1[12];
343 u32 sw_reserved[12];
344 };
345
346} __attribute__((aligned(16)));
347
348struct i387_soft_struct {
349 u32 cwd;
350 u32 swd;
351 u32 twd;
352 u32 fip;
353 u32 fcs;
354 u32 foo;
355 u32 fos;
356
357 u32 st_space[20];
358 u8 ftop;
359 u8 changed;
360 u8 lookahead;
361 u8 no_update;
362 u8 rm;
363 u8 alimit;
364 struct math_emu_info *info;
365 u32 entry_eip;
366};
367
368struct ymmh_struct {
369
370 u32 ymmh_space[64];
371};
372
373struct xsave_hdr_struct {
374 u64 xstate_bv;
375 u64 reserved1[2];
376 u64 reserved2[5];
377} __attribute__((packed));
378
379struct xsave_struct {
380 struct i387_fxsave_struct i387;
381 struct xsave_hdr_struct xsave_hdr;
382 struct ymmh_struct ymmh;
383
384} __attribute__ ((packed, aligned (64)));
385
386union thread_xstate {
387 struct i387_fsave_struct fsave;
388 struct i387_fxsave_struct fxsave;
389 struct i387_soft_struct soft;
390 struct xsave_struct xsave;
391};
392
393struct fpu {
394 unsigned int last_cpu;
395 unsigned int has_fpu;
396 union thread_xstate *state;
397};
398
399#ifdef CONFIG_X86_64
400DECLARE_PER_CPU(struct orig_ist, orig_ist);
401
402union irq_stack_union {
403 char irq_stack[IRQ_STACK_SIZE];
404
405
406
407
408
409 struct {
410 char gs_base[40];
411 unsigned long stack_canary;
412 };
413};
414
415DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
416DECLARE_INIT_PER_CPU(irq_stack_union);
417
418DECLARE_PER_CPU(char *, irq_stack_ptr);
419DECLARE_PER_CPU(unsigned int, irq_count);
420extern asmlinkage void ignore_sysret(void);
421#else
422#ifdef CONFIG_CC_STACKPROTECTOR
423
424
425
426
427
428
429struct stack_canary {
430 char __pad[20];
431 unsigned long canary;
432};
433DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
434#endif
435#endif
436
437extern unsigned int xstate_size;
438extern void free_thread_xstate(struct task_struct *);
439extern struct kmem_cache *task_xstate_cachep;
440
441struct perf_event;
442
443struct thread_struct {
444
445 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
446 unsigned long sp0;
447 unsigned long sp;
448#ifdef CONFIG_X86_32
449 unsigned long sysenter_cs;
450#else
451 unsigned long usersp;
452 unsigned short es;
453 unsigned short ds;
454 unsigned short fsindex;
455 unsigned short gsindex;
456#endif
457#ifdef CONFIG_X86_32
458 unsigned long ip;
459#endif
460#ifdef CONFIG_X86_64
461 unsigned long fs;
462#endif
463 unsigned long gs;
464
465 struct perf_event *ptrace_bps[HBP_NUM];
466
467 unsigned long debugreg6;
468
469 unsigned long ptrace_dr7;
470
471 unsigned long cr2;
472 unsigned long trap_nr;
473 unsigned long error_code;
474
475 struct fpu fpu;
476#ifdef CONFIG_X86_32
477
478 struct vm86_struct __user *vm86_info;
479 unsigned long screen_bitmap;
480 unsigned long v86flags;
481 unsigned long v86mask;
482 unsigned long saved_sp0;
483 unsigned int saved_fs;
484 unsigned int saved_gs;
485#endif
486
487 unsigned long *io_bitmap_ptr;
488 unsigned long iopl;
489
490 unsigned io_bitmap_max;
491};
492
493
494
495
496static inline void native_set_iopl_mask(unsigned mask)
497{
498#ifdef CONFIG_X86_32
499 unsigned int reg;
500
501 asm volatile ("pushfl;"
502 "popl %0;"
503 "andl %1, %0;"
504 "orl %2, %0;"
505 "pushl %0;"
506 "popfl"
507 : "=&r" (reg)
508 : "i" (~X86_EFLAGS_IOPL), "r" (mask));
509#endif
510}
511
512static inline void
513native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
514{
515 tss->x86_tss.sp0 = thread->sp0;
516#ifdef CONFIG_X86_32
517
518 if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
519 tss->x86_tss.ss1 = thread->sysenter_cs;
520 wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
521 }
522#endif
523}
524
525static inline void native_swapgs(void)
526{
527#ifdef CONFIG_X86_64
528 asm volatile("swapgs" ::: "memory");
529#endif
530}
531
532#ifdef CONFIG_PARAVIRT
533#include <asm/paravirt.h>
534#else
535#define __cpuid native_cpuid
536#define paravirt_enabled() 0
537
538static inline void load_sp0(struct tss_struct *tss,
539 struct thread_struct *thread)
540{
541 native_load_sp0(tss, thread);
542}
543
544#define set_iopl_mask native_set_iopl_mask
545#endif
546
547
548
549
550
551
552
553extern unsigned long mmu_cr4_features;
554extern u32 *trampoline_cr4_features;
555
556static inline void set_in_cr4(unsigned long mask)
557{
558 unsigned long cr4;
559
560 mmu_cr4_features |= mask;
561 if (trampoline_cr4_features)
562 *trampoline_cr4_features = mmu_cr4_features;
563 cr4 = read_cr4();
564 cr4 |= mask;
565 write_cr4(cr4);
566}
567
568static inline void clear_in_cr4(unsigned long mask)
569{
570 unsigned long cr4;
571
572 mmu_cr4_features &= ~mask;
573 if (trampoline_cr4_features)
574 *trampoline_cr4_features = mmu_cr4_features;
575 cr4 = read_cr4();
576 cr4 &= ~mask;
577 write_cr4(cr4);
578}
579
580typedef struct {
581 unsigned long seg;
582} mm_segment_t;
583
584
585
586extern void release_thread(struct task_struct *);
587
588unsigned long get_wchan(struct task_struct *p);
589
590
591
592
593
594
595static inline void cpuid(unsigned int op,
596 unsigned int *eax, unsigned int *ebx,
597 unsigned int *ecx, unsigned int *edx)
598{
599 *eax = op;
600 *ecx = 0;
601 __cpuid(eax, ebx, ecx, edx);
602}
603
604
605static inline void cpuid_count(unsigned int op, int count,
606 unsigned int *eax, unsigned int *ebx,
607 unsigned int *ecx, unsigned int *edx)
608{
609 *eax = op;
610 *ecx = count;
611 __cpuid(eax, ebx, ecx, edx);
612}
613
614
615
616
617static inline unsigned int cpuid_eax(unsigned int op)
618{
619 unsigned int eax, ebx, ecx, edx;
620
621 cpuid(op, &eax, &ebx, &ecx, &edx);
622
623 return eax;
624}
625
626static inline unsigned int cpuid_ebx(unsigned int op)
627{
628 unsigned int eax, ebx, ecx, edx;
629
630 cpuid(op, &eax, &ebx, &ecx, &edx);
631
632 return ebx;
633}
634
635static inline unsigned int cpuid_ecx(unsigned int op)
636{
637 unsigned int eax, ebx, ecx, edx;
638
639 cpuid(op, &eax, &ebx, &ecx, &edx);
640
641 return ecx;
642}
643
644static inline unsigned int cpuid_edx(unsigned int op)
645{
646 unsigned int eax, ebx, ecx, edx;
647
648 cpuid(op, &eax, &ebx, &ecx, &edx);
649
650 return edx;
651}
652
653
654static inline void rep_nop(void)
655{
656 asm volatile("rep; nop" ::: "memory");
657}
658
659static inline void cpu_relax(void)
660{
661 rep_nop();
662}
663
664
665static inline void sync_core(void)
666{
667 int tmp;
668
669#ifdef CONFIG_M486
670
671
672
673
674 asm volatile("cmpl %2,%1\n\t"
675 "jl 1f\n\t"
676 "cpuid\n"
677 "1:"
678 : "=a" (tmp)
679 : "rm" (boot_cpu_data.cpuid_level), "ri" (0), "0" (1)
680 : "ebx", "ecx", "edx", "memory");
681#else
682
683
684
685
686
687 asm volatile("cpuid"
688 : "=a" (tmp)
689 : "0" (1)
690 : "ebx", "ecx", "edx", "memory");
691#endif
692}
693
694static inline void __monitor(const void *eax, unsigned long ecx,
695 unsigned long edx)
696{
697
698 asm volatile(".byte 0x0f, 0x01, 0xc8;"
699 :: "a" (eax), "c" (ecx), "d"(edx));
700}
701
702static inline void __mwait(unsigned long eax, unsigned long ecx)
703{
704
705 asm volatile(".byte 0x0f, 0x01, 0xc9;"
706 :: "a" (eax), "c" (ecx));
707}
708
709static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
710{
711 trace_hardirqs_on();
712
713 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
714 :: "a" (eax), "c" (ecx));
715}
716
717extern void select_idle_routine(const struct cpuinfo_x86 *c);
718extern void init_amd_e400_c1e_mask(void);
719
720extern unsigned long boot_option_idle_override;
721extern bool amd_e400_c1e_detected;
722
723enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
724 IDLE_POLL};
725
726extern void enable_sep_cpu(void);
727extern int sysenter_setup(void);
728
729extern void early_trap_init(void);
730void early_trap_pf_init(void);
731
732
733extern struct desc_ptr early_gdt_descr;
734
735extern void cpu_set_gdt(int);
736extern void switch_to_new_gdt(int);
737extern void load_percpu_segment(int);
738extern void cpu_init(void);
739
740static inline unsigned long get_debugctlmsr(void)
741{
742 unsigned long debugctlmsr = 0;
743
744#ifndef CONFIG_X86_DEBUGCTLMSR
745 if (boot_cpu_data.x86 < 6)
746 return 0;
747#endif
748 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
749
750 return debugctlmsr;
751}
752
753static inline void update_debugctlmsr(unsigned long debugctlmsr)
754{
755#ifndef CONFIG_X86_DEBUGCTLMSR
756 if (boot_cpu_data.x86 < 6)
757 return;
758#endif
759 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
760}
761
762extern void set_task_blockstep(struct task_struct *task, bool on);
763
764
765
766
767
768extern unsigned int machine_id;
769extern unsigned int machine_submodel_id;
770extern unsigned int BIOS_revision;
771
772
773extern int bootloader_type;
774extern int bootloader_version;
775
776extern char ignore_fpu_irq;
777
778#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
779#define ARCH_HAS_PREFETCHW
780#define ARCH_HAS_SPINLOCK_PREFETCH
781
782#ifdef CONFIG_X86_32
783# define BASE_PREFETCH ASM_NOP4
784# define ARCH_HAS_PREFETCH
785#else
786# define BASE_PREFETCH "prefetcht0 (%1)"
787#endif
788
789
790
791
792
793
794
795static inline void prefetch(const void *x)
796{
797 alternative_input(BASE_PREFETCH,
798 "prefetchnta (%1)",
799 X86_FEATURE_XMM,
800 "r" (x));
801}
802
803
804
805
806
807
808static inline void prefetchw(const void *x)
809{
810 alternative_input(BASE_PREFETCH,
811 "prefetchw (%1)",
812 X86_FEATURE_3DNOW,
813 "r" (x));
814}
815
816static inline void spin_lock_prefetch(const void *x)
817{
818 prefetchw(x);
819}
820
821#ifdef CONFIG_X86_32
822
823
824
825#define TASK_SIZE PAGE_OFFSET
826#define TASK_SIZE_MAX TASK_SIZE
827#define STACK_TOP TASK_SIZE
828#define STACK_TOP_MAX STACK_TOP
829
830#define INIT_THREAD { \
831 .sp0 = sizeof(init_stack) + (long)&init_stack, \
832 .vm86_info = NULL, \
833 .sysenter_cs = __KERNEL_CS, \
834 .io_bitmap_ptr = NULL, \
835}
836
837
838
839
840
841
842
843#define INIT_TSS { \
844 .x86_tss = { \
845 .sp0 = sizeof(init_stack) + (long)&init_stack, \
846 .ss0 = __KERNEL_DS, \
847 .ss1 = __KERNEL_CS, \
848 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
849 }, \
850 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
851}
852
853extern unsigned long thread_saved_pc(struct task_struct *tsk);
854
855#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
856#define KSTK_TOP(info) \
857({ \
858 unsigned long *__ptr = (unsigned long *)(info); \
859 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
860})
861
862
863
864
865
866
867
868
869
870
871
872#define task_pt_regs(task) \
873({ \
874 struct pt_regs *__regs__; \
875 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
876 __regs__ - 1; \
877})
878
879#define KSTK_ESP(task) (task_pt_regs(task)->sp)
880
881#else
882
883
884
885#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
886
887
888
889
890#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
891 0xc0000000 : 0xFFFFe000)
892
893#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
894 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
895#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_ADDR32)) ? \
896 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
897
898#define STACK_TOP TASK_SIZE
899#define STACK_TOP_MAX TASK_SIZE_MAX
900
901#define INIT_THREAD { \
902 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
903}
904
905#define INIT_TSS { \
906 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
907}
908
909
910
911
912
913#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
914
915#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
916extern unsigned long KSTK_ESP(struct task_struct *task);
917
918
919
920
921DECLARE_PER_CPU(unsigned long, old_rsp);
922
923#endif
924
925extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
926 unsigned long new_sp);
927
928
929
930
931
932#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
933
934#define KSTK_EIP(task) (task_pt_regs(task)->ip)
935
936
937#define GET_TSC_CTL(adr) get_tsc_mode((adr))
938#define SET_TSC_CTL(val) set_tsc_mode((val))
939
940extern int get_tsc_mode(unsigned long adr);
941extern int set_tsc_mode(unsigned int val);
942
943extern u16 amd_get_nb_id(int cpu);
944
945static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
946{
947 uint32_t base, eax, signature[3];
948
949 for (base = 0x40000000; base < 0x40010000; base += 0x100) {
950 cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
951
952 if (!memcmp(sig, signature, 12) &&
953 (leaves == 0 || ((eax - base) >= leaves)))
954 return base;
955 }
956
957 return 0;
958}
959
960extern unsigned long arch_align_stack(unsigned long sp);
961extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
962
963void default_idle(void);
964#ifdef CONFIG_XEN
965bool xen_set_default_idle(void);
966#else
967#define xen_set_default_idle 0
968#endif
969
970void stop_this_cpu(void *dummy);
971void df_debug(struct pt_regs *regs, long error_code);
972#endif
973