1#ifndef _ASM_X86_PARAVIRT_TYPES_H
2#define _ASM_X86_PARAVIRT_TYPES_H
3
4
5#define CLBR_NONE 0
6#define CLBR_EAX (1 << 0)
7#define CLBR_ECX (1 << 1)
8#define CLBR_EDX (1 << 2)
9#define CLBR_EDI (1 << 3)
10
11#ifdef CONFIG_X86_32
12
13#define CLBR_ANY ((1 << 4) - 1)
14
15#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
16#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
17#define CLBR_SCRATCH (0)
18#else
19#define CLBR_RAX CLBR_EAX
20#define CLBR_RCX CLBR_ECX
21#define CLBR_RDX CLBR_EDX
22#define CLBR_RDI CLBR_EDI
23#define CLBR_RSI (1 << 4)
24#define CLBR_R8 (1 << 5)
25#define CLBR_R9 (1 << 6)
26#define CLBR_R10 (1 << 7)
27#define CLBR_R11 (1 << 8)
28
29#define CLBR_ANY ((1 << 9) - 1)
30
31#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
32 CLBR_RCX | CLBR_R8 | CLBR_R9)
33#define CLBR_RET_REG (CLBR_RAX)
34#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
35
36#endif
37
38#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
39
40#ifndef __ASSEMBLY__
41
42#include <asm/desc_defs.h>
43#include <asm/kmap_types.h>
44#include <asm/pgtable_types.h>
45
46struct page;
47struct thread_struct;
48struct desc_ptr;
49struct tss_struct;
50struct mm_struct;
51struct desc_struct;
52struct task_struct;
53struct cpumask;
54
55
56
57
58
59struct paravirt_callee_save {
60 void *func;
61};
62
63
64struct pv_info {
65 unsigned int kernel_rpl;
66 int shared_kernel_pmd;
67
68#ifdef CONFIG_X86_64
69 u16 extra_user_64bit_cs;
70#endif
71
72 int paravirt_enabled;
73 unsigned int features;
74 const char *name;
75};
76
77#define paravirt_has(x) paravirt_has_feature(PV_SUPPORTED_##x)
78
79#define PV_SUPPORTED_RTC (1<<0)
80
81struct pv_init_ops {
82
83
84
85
86
87
88
89
90 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
91 unsigned long addr, unsigned len);
92};
93
94
95struct pv_lazy_ops {
96
97 void (*enter)(void);
98 void (*leave)(void);
99 void (*flush)(void);
100};
101
102struct pv_time_ops {
103 unsigned long long (*sched_clock)(void);
104 unsigned long long (*steal_clock)(int cpu);
105};
106
107struct pv_cpu_ops {
108
109 unsigned long (*get_debugreg)(int regno);
110 void (*set_debugreg)(int regno, unsigned long value);
111
112 void (*clts)(void);
113
114 unsigned long (*read_cr0)(void);
115 void (*write_cr0)(unsigned long);
116
117 unsigned long (*read_cr4_safe)(void);
118 unsigned long (*read_cr4)(void);
119 void (*write_cr4)(unsigned long);
120
121#ifdef CONFIG_X86_64
122 unsigned long (*read_cr8)(void);
123 void (*write_cr8)(unsigned long);
124#endif
125
126
127 void (*load_tr_desc)(void);
128 void (*load_gdt)(const struct desc_ptr *);
129 void (*load_idt)(const struct desc_ptr *);
130
131 void (*store_idt)(struct desc_ptr *);
132 void (*set_ldt)(const void *desc, unsigned entries);
133 unsigned long (*store_tr)(void);
134 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
135#ifdef CONFIG_X86_64
136 void (*load_gs_index)(unsigned int idx);
137#endif
138 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
139 const void *desc);
140 void (*write_gdt_entry)(struct desc_struct *,
141 int entrynum, const void *desc, int size);
142 void (*write_idt_entry)(gate_desc *,
143 int entrynum, const gate_desc *gate);
144 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
145 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
146
147 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
148
149 void (*set_iopl_mask)(unsigned mask);
150
151 void (*wbinvd)(void);
152 void (*io_delay)(void);
153
154
155 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
156 unsigned int *ecx, unsigned int *edx);
157
158
159
160 u64 (*read_msr)(unsigned int msr, int *err);
161 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
162
163 u64 (*read_pmc)(int counter);
164
165
166
167
168
169
170
171 void (*usergs_sysret64)(void);
172
173
174
175 void (*iret)(void);
176
177 void (*swapgs)(void);
178
179 void (*start_context_switch)(struct task_struct *prev);
180 void (*end_context_switch)(struct task_struct *next);
181};
182
183struct pv_irq_ops {
184
185
186
187
188
189
190
191
192
193 struct paravirt_callee_save save_fl;
194 struct paravirt_callee_save restore_fl;
195 struct paravirt_callee_save irq_disable;
196 struct paravirt_callee_save irq_enable;
197
198 void (*safe_halt)(void);
199 void (*halt)(void);
200
201#ifdef CONFIG_X86_64
202 void (*adjust_exception_frame)(void);
203#endif
204};
205
206struct pv_mmu_ops {
207 unsigned long (*read_cr2)(void);
208 void (*write_cr2)(unsigned long);
209
210 unsigned long (*read_cr3)(void);
211 void (*write_cr3)(unsigned long);
212
213
214
215
216
217 void (*activate_mm)(struct mm_struct *prev,
218 struct mm_struct *next);
219 void (*dup_mmap)(struct mm_struct *oldmm,
220 struct mm_struct *mm);
221 void (*exit_mmap)(struct mm_struct *mm);
222
223
224
225 void (*flush_tlb_user)(void);
226 void (*flush_tlb_kernel)(void);
227 void (*flush_tlb_single)(unsigned long addr);
228 void (*flush_tlb_others)(const struct cpumask *cpus,
229 struct mm_struct *mm,
230 unsigned long start,
231 unsigned long end);
232
233
234 int (*pgd_alloc)(struct mm_struct *mm);
235 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
236
237
238
239
240
241 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
242 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
243 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
244 void (*release_pte)(unsigned long pfn);
245 void (*release_pmd)(unsigned long pfn);
246 void (*release_pud)(unsigned long pfn);
247
248
249 void (*set_pte)(pte_t *ptep, pte_t pteval);
250 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
251 pte_t *ptep, pte_t pteval);
252 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
253 void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr,
254 pmd_t *pmdp, pmd_t pmdval);
255 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
256 pte_t *ptep);
257
258 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
259 pte_t *ptep);
260 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
261 pte_t *ptep, pte_t pte);
262
263 struct paravirt_callee_save pte_val;
264 struct paravirt_callee_save make_pte;
265
266 struct paravirt_callee_save pgd_val;
267 struct paravirt_callee_save make_pgd;
268
269#if CONFIG_PGTABLE_LEVELS >= 3
270#ifdef CONFIG_X86_PAE
271 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
272 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
273 pte_t *ptep);
274 void (*pmd_clear)(pmd_t *pmdp);
275
276#endif
277
278 void (*set_pud)(pud_t *pudp, pud_t pudval);
279
280 struct paravirt_callee_save pmd_val;
281 struct paravirt_callee_save make_pmd;
282
283#if CONFIG_PGTABLE_LEVELS == 4
284 struct paravirt_callee_save pud_val;
285 struct paravirt_callee_save make_pud;
286
287 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
288#endif
289#endif
290
291 struct pv_lazy_ops lazy_mode;
292
293
294
295
296
297 void (*set_fixmap)(unsigned idx,
298 phys_addr_t phys, pgprot_t flags);
299};
300
301struct arch_spinlock;
302#ifdef CONFIG_SMP
303#include <asm/spinlock_types.h>
304#else
305typedef u16 __ticket_t;
306#endif
307
308struct qspinlock;
309
310struct pv_lock_ops {
311#ifdef CONFIG_QUEUED_SPINLOCKS
312 void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
313 struct paravirt_callee_save queued_spin_unlock;
314
315 void (*wait)(u8 *ptr, u8 val);
316 void (*kick)(int cpu);
317#else
318 struct paravirt_callee_save lock_spinning;
319 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
320#endif
321};
322
323
324
325
326struct paravirt_patch_template {
327 struct pv_init_ops pv_init_ops;
328 struct pv_time_ops pv_time_ops;
329 struct pv_cpu_ops pv_cpu_ops;
330 struct pv_irq_ops pv_irq_ops;
331 struct pv_mmu_ops pv_mmu_ops;
332 struct pv_lock_ops pv_lock_ops;
333};
334
335extern struct pv_info pv_info;
336extern struct pv_init_ops pv_init_ops;
337extern struct pv_time_ops pv_time_ops;
338extern struct pv_cpu_ops pv_cpu_ops;
339extern struct pv_irq_ops pv_irq_ops;
340extern struct pv_mmu_ops pv_mmu_ops;
341extern struct pv_lock_ops pv_lock_ops;
342
343#define PARAVIRT_PATCH(x) \
344 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
345
346#define paravirt_type(op) \
347 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
348 [paravirt_opptr] "i" (&(op))
349#define paravirt_clobber(clobber) \
350 [paravirt_clobber] "i" (clobber)
351
352
353
354
355
356#define _paravirt_alt(insn_string, type, clobber) \
357 "771:\n\t" insn_string "\n" "772:\n" \
358 ".pushsection .parainstructions,\"a\"\n" \
359 _ASM_ALIGN "\n" \
360 _ASM_PTR " 771b\n" \
361 " .byte " type "\n" \
362 " .byte 772b-771b\n" \
363 " .short " clobber "\n" \
364 ".popsection\n"
365
366
367#define paravirt_alt(insn_string) \
368 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
369
370
371#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
372
373#define DEF_NATIVE(ops, name, code) \
374 __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
375 asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
376
377unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
378unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
379unsigned paravirt_patch_call(void *insnbuf,
380 const void *target, u16 tgt_clobbers,
381 unsigned long addr, u16 site_clobbers,
382 unsigned len);
383unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
384 unsigned long addr, unsigned len);
385unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
386 unsigned long addr, unsigned len);
387
388unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
389 const char *start, const char *end);
390
391unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
392 unsigned long addr, unsigned len);
393
394int paravirt_disable_iospace(void);
395
396
397
398
399
400
401
402#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468#ifdef CONFIG_X86_32
469#define PVOP_VCALL_ARGS \
470 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; \
471 register void *__sp asm("esp")
472#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
473
474#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
475#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
476#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
477
478#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
479 "=c" (__ecx)
480#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
481
482#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
483#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
484
485#define EXTRA_CLOBBERS
486#define VEXTRA_CLOBBERS
487#else
488
489#define PVOP_VCALL_ARGS \
490 unsigned long __edi = __edi, __esi = __esi, \
491 __edx = __edx, __ecx = __ecx, __eax = __eax; \
492 register void *__sp asm("rsp")
493#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
494
495#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
496#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
497#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
498#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
499
500#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
501 "=S" (__esi), "=d" (__edx), \
502 "=c" (__ecx)
503#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
504
505
506#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
507#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
508
509#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
510#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
511#endif
512
513#ifdef CONFIG_PARAVIRT_DEBUG
514#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
515#else
516#define PVOP_TEST_NULL(op) ((void)op)
517#endif
518
519#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
520 pre, post, ...) \
521 ({ \
522 rettype __ret; \
523 PVOP_CALL_ARGS; \
524 PVOP_TEST_NULL(op); \
525 \
526 \
527 if (sizeof(rettype) > sizeof(unsigned long)) { \
528 asm volatile(pre \
529 paravirt_alt(PARAVIRT_CALL) \
530 post \
531 : call_clbr, "+r" (__sp) \
532 : paravirt_type(op), \
533 paravirt_clobber(clbr), \
534 ##__VA_ARGS__ \
535 : "memory", "cc" extra_clbr); \
536 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
537 } else { \
538 asm volatile(pre \
539 paravirt_alt(PARAVIRT_CALL) \
540 post \
541 : call_clbr, "+r" (__sp) \
542 : paravirt_type(op), \
543 paravirt_clobber(clbr), \
544 ##__VA_ARGS__ \
545 : "memory", "cc" extra_clbr); \
546 __ret = (rettype)__eax; \
547 } \
548 __ret; \
549 })
550
551#define __PVOP_CALL(rettype, op, pre, post, ...) \
552 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
553 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
554
555#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
556 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
557 PVOP_CALLEE_CLOBBERS, , \
558 pre, post, ##__VA_ARGS__)
559
560
561#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
562 ({ \
563 PVOP_VCALL_ARGS; \
564 PVOP_TEST_NULL(op); \
565 asm volatile(pre \
566 paravirt_alt(PARAVIRT_CALL) \
567 post \
568 : call_clbr, "+r" (__sp) \
569 : paravirt_type(op), \
570 paravirt_clobber(clbr), \
571 ##__VA_ARGS__ \
572 : "memory", "cc" extra_clbr); \
573 })
574
575#define __PVOP_VCALL(op, pre, post, ...) \
576 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
577 VEXTRA_CLOBBERS, \
578 pre, post, ##__VA_ARGS__)
579
580#define __PVOP_VCALLEESAVE(op, pre, post, ...) \
581 ____PVOP_VCALL(op.func, CLBR_RET_REG, \
582 PVOP_VCALLEE_CLOBBERS, , \
583 pre, post, ##__VA_ARGS__)
584
585
586
587#define PVOP_CALL0(rettype, op) \
588 __PVOP_CALL(rettype, op, "", "")
589#define PVOP_VCALL0(op) \
590 __PVOP_VCALL(op, "", "")
591
592#define PVOP_CALLEE0(rettype, op) \
593 __PVOP_CALLEESAVE(rettype, op, "", "")
594#define PVOP_VCALLEE0(op) \
595 __PVOP_VCALLEESAVE(op, "", "")
596
597
598#define PVOP_CALL1(rettype, op, arg1) \
599 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
600#define PVOP_VCALL1(op, arg1) \
601 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
602
603#define PVOP_CALLEE1(rettype, op, arg1) \
604 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
605#define PVOP_VCALLEE1(op, arg1) \
606 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
607
608
609#define PVOP_CALL2(rettype, op, arg1, arg2) \
610 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
611 PVOP_CALL_ARG2(arg2))
612#define PVOP_VCALL2(op, arg1, arg2) \
613 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
614 PVOP_CALL_ARG2(arg2))
615
616#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
617 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
618 PVOP_CALL_ARG2(arg2))
619#define PVOP_VCALLEE2(op, arg1, arg2) \
620 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
621 PVOP_CALL_ARG2(arg2))
622
623
624#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
625 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
626 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
627#define PVOP_VCALL3(op, arg1, arg2, arg3) \
628 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
629 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
630
631
632#ifdef CONFIG_X86_32
633#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
634 __PVOP_CALL(rettype, op, \
635 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
636 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
637 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
638#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
639 __PVOP_VCALL(op, \
640 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
641 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
642 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
643#else
644#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
645 __PVOP_CALL(rettype, op, "", "", \
646 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
647 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
648#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
649 __PVOP_VCALL(op, "", "", \
650 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
651 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
652#endif
653
654
655enum paravirt_lazy_mode {
656 PARAVIRT_LAZY_NONE,
657 PARAVIRT_LAZY_MMU,
658 PARAVIRT_LAZY_CPU,
659};
660
661enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
662void paravirt_start_context_switch(struct task_struct *prev);
663void paravirt_end_context_switch(struct task_struct *next);
664
665void paravirt_enter_lazy_mmu(void);
666void paravirt_leave_lazy_mmu(void);
667void paravirt_flush_lazy_mmu(void);
668
669void _paravirt_nop(void);
670u32 _paravirt_ident_32(u32);
671u64 _paravirt_ident_64(u64);
672
673#define paravirt_nop ((void *)_paravirt_nop)
674
675
676struct paravirt_patch_site {
677 u8 *instr;
678 u8 instrtype;
679 u8 len;
680 u16 clobbers;
681};
682
683extern struct paravirt_patch_site __parainstructions[],
684 __parainstructions_end[];
685
686#endif
687
688#endif
689