1#ifndef _ASM_X86_PARAVIRT_H
2#define _ASM_X86_PARAVIRT_H
3
4
5
6#ifdef CONFIG_PARAVIRT
7#include <asm/pgtable_types.h>
8#include <asm/asm.h>
9
10#include <asm/paravirt_types.h>
11
12#ifndef __ASSEMBLY__
13#include <linux/bug.h>
14#include <linux/types.h>
15#include <linux/cpumask.h>
16
17static inline int paravirt_enabled(void)
18{
19 return pv_info.paravirt_enabled;
20}
21
22static inline void load_sp0(struct tss_struct *tss,
23 struct thread_struct *thread)
24{
25 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
26}
27
28
29static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
30 unsigned int *ecx, unsigned int *edx)
31{
32 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
33}
34
35
36
37
38static inline unsigned long paravirt_get_debugreg(int reg)
39{
40 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
41}
42#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
43static inline void set_debugreg(unsigned long val, int reg)
44{
45 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
46}
47
48static inline void clts(void)
49{
50 PVOP_VCALL0(pv_cpu_ops.clts);
51}
52
53static inline unsigned long read_cr0(void)
54{
55 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
56}
57
58static inline void write_cr0(unsigned long x)
59{
60 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
61}
62
63static inline unsigned long read_cr2(void)
64{
65 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
66}
67
68static inline void write_cr2(unsigned long x)
69{
70 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
71}
72
73static inline unsigned long read_cr3(void)
74{
75 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
76}
77
78static inline void write_cr3(unsigned long x)
79{
80 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
81}
82
83static inline unsigned long read_cr4(void)
84{
85 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
86}
87static inline unsigned long read_cr4_safe(void)
88{
89 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
90}
91
92static inline void write_cr4(unsigned long x)
93{
94 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
95}
96
97#ifdef CONFIG_X86_64
98static inline unsigned long read_cr8(void)
99{
100 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
101}
102
103static inline void write_cr8(unsigned long x)
104{
105 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
106}
107#endif
108
109static inline void arch_safe_halt(void)
110{
111 PVOP_VCALL0(pv_irq_ops.safe_halt);
112}
113
114static inline void halt(void)
115{
116 PVOP_VCALL0(pv_irq_ops.halt);
117}
118
119static inline void wbinvd(void)
120{
121 PVOP_VCALL0(pv_cpu_ops.wbinvd);
122}
123
124#define get_kernel_rpl() (pv_info.kernel_rpl)
125
126static inline u64 paravirt_read_msr(unsigned msr, int *err)
127{
128 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
129}
130
131static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
132{
133 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
134}
135
136
137#define rdmsr(msr, val1, val2) \
138do { \
139 int _err; \
140 u64 _l = paravirt_read_msr(msr, &_err); \
141 val1 = (u32)_l; \
142 val2 = _l >> 32; \
143} while (0)
144
145#define wrmsr(msr, val1, val2) \
146do { \
147 paravirt_write_msr(msr, val1, val2); \
148} while (0)
149
150#define rdmsrl(msr, val) \
151do { \
152 int _err; \
153 val = paravirt_read_msr(msr, &_err); \
154} while (0)
155
156#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
157#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
158
159
160#define rdmsr_safe(msr, a, b) \
161({ \
162 int _err; \
163 u64 _l = paravirt_read_msr(msr, &_err); \
164 (*a) = (u32)_l; \
165 (*b) = _l >> 32; \
166 _err; \
167})
168
169static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
170{
171 int err;
172
173 *p = paravirt_read_msr(msr, &err);
174 return err;
175}
176
177static inline u64 paravirt_read_tsc(void)
178{
179 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
180}
181
182#define rdtscl(low) \
183do { \
184 u64 _l = paravirt_read_tsc(); \
185 low = (int)_l; \
186} while (0)
187
188#define rdtscll(val) (val = paravirt_read_tsc())
189
190static inline unsigned long long paravirt_sched_clock(void)
191{
192 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
193}
194
195struct static_key;
196extern struct static_key paravirt_steal_enabled;
197extern struct static_key paravirt_steal_rq_enabled;
198
199static inline u64 paravirt_steal_clock(int cpu)
200{
201 return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
202}
203
204static inline unsigned long long paravirt_read_pmc(int counter)
205{
206 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
207}
208
209#define rdpmc(counter, low, high) \
210do { \
211 u64 _l = paravirt_read_pmc(counter); \
212 low = (u32)_l; \
213 high = _l >> 32; \
214} while (0)
215
216#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
217
218static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
219{
220 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
221}
222
223#define rdtscp(low, high, aux) \
224do { \
225 int __aux; \
226 unsigned long __val = paravirt_rdtscp(&__aux); \
227 (low) = (u32)__val; \
228 (high) = (u32)(__val >> 32); \
229 (aux) = __aux; \
230} while (0)
231
232#define rdtscpll(val, aux) \
233do { \
234 unsigned long __aux; \
235 val = paravirt_rdtscp(&__aux); \
236 (aux) = __aux; \
237} while (0)
238
239static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
240{
241 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
242}
243
244static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
245{
246 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
247}
248
249static inline void load_TR_desc(void)
250{
251 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
252}
253static inline void load_gdt(const struct desc_ptr *dtr)
254{
255 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
256}
257static inline void load_idt(const struct desc_ptr *dtr)
258{
259 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
260}
261static inline void set_ldt(const void *addr, unsigned entries)
262{
263 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
264}
265static inline void store_idt(struct desc_ptr *dtr)
266{
267 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
268}
269static inline unsigned long paravirt_store_tr(void)
270{
271 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
272}
273#define store_tr(tr) ((tr) = paravirt_store_tr())
274static inline void load_TLS(struct thread_struct *t, unsigned cpu)
275{
276 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
277}
278
279#ifdef CONFIG_X86_64
280static inline void load_gs_index(unsigned int gs)
281{
282 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
283}
284#endif
285
286static inline void write_ldt_entry(struct desc_struct *dt, int entry,
287 const void *desc)
288{
289 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
290}
291
292static inline void write_gdt_entry(struct desc_struct *dt, int entry,
293 void *desc, int type)
294{
295 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
296}
297
298static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
299{
300 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
301}
302static inline void set_iopl_mask(unsigned mask)
303{
304 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
305}
306
307
308static inline void slow_down_io(void)
309{
310 pv_cpu_ops.io_delay();
311#ifdef REALLY_SLOW_IO
312 pv_cpu_ops.io_delay();
313 pv_cpu_ops.io_delay();
314 pv_cpu_ops.io_delay();
315#endif
316}
317
318#ifdef CONFIG_SMP
319static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
320 unsigned long start_esp)
321{
322 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
323 phys_apicid, start_eip, start_esp);
324}
325#endif
326
327static inline void paravirt_activate_mm(struct mm_struct *prev,
328 struct mm_struct *next)
329{
330 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
331}
332
333static inline void arch_dup_mmap(struct mm_struct *oldmm,
334 struct mm_struct *mm)
335{
336 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
337}
338
339static inline void arch_exit_mmap(struct mm_struct *mm)
340{
341 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
342}
343
344static inline void __flush_tlb(void)
345{
346 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
347}
348static inline void __flush_tlb_global(void)
349{
350 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
351}
352static inline void __flush_tlb_single(unsigned long addr)
353{
354 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
355}
356
357static inline void flush_tlb_others(const struct cpumask *cpumask,
358 struct mm_struct *mm,
359 unsigned long start,
360 unsigned long end)
361{
362 PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
363}
364
365static inline int paravirt_pgd_alloc(struct mm_struct *mm)
366{
367 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
368}
369
370static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
371{
372 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
373}
374
375static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
376{
377 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
378}
379static inline void paravirt_release_pte(unsigned long pfn)
380{
381 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
382}
383
384static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
385{
386 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
387}
388
389static inline void paravirt_release_pmd(unsigned long pfn)
390{
391 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
392}
393
394static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
395{
396 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
397}
398static inline void paravirt_release_pud(unsigned long pfn)
399{
400 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
401}
402
403static inline void pte_update(struct mm_struct *mm, unsigned long addr,
404 pte_t *ptep)
405{
406 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
407}
408static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
409 pmd_t *pmdp)
410{
411 PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
412}
413
414static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
415 pte_t *ptep)
416{
417 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
418}
419
420static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
421 pmd_t *pmdp)
422{
423 PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
424}
425
426static inline pte_t __pte(pteval_t val)
427{
428 pteval_t ret;
429
430 if (sizeof(pteval_t) > sizeof(long))
431 ret = PVOP_CALLEE2(pteval_t,
432 pv_mmu_ops.make_pte,
433 val, (u64)val >> 32);
434 else
435 ret = PVOP_CALLEE1(pteval_t,
436 pv_mmu_ops.make_pte,
437 val);
438
439 return (pte_t) { .pte = ret };
440}
441
442static inline pteval_t pte_val(pte_t pte)
443{
444 pteval_t ret;
445
446 if (sizeof(pteval_t) > sizeof(long))
447 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
448 pte.pte, (u64)pte.pte >> 32);
449 else
450 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
451 pte.pte);
452
453 return ret;
454}
455
456static inline pgd_t __pgd(pgdval_t val)
457{
458 pgdval_t ret;
459
460 if (sizeof(pgdval_t) > sizeof(long))
461 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
462 val, (u64)val >> 32);
463 else
464 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
465 val);
466
467 return (pgd_t) { ret };
468}
469
470static inline pgdval_t pgd_val(pgd_t pgd)
471{
472 pgdval_t ret;
473
474 if (sizeof(pgdval_t) > sizeof(long))
475 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
476 pgd.pgd, (u64)pgd.pgd >> 32);
477 else
478 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
479 pgd.pgd);
480
481 return ret;
482}
483
484#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
485static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
486 pte_t *ptep)
487{
488 pteval_t ret;
489
490 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
491 mm, addr, ptep);
492
493 return (pte_t) { .pte = ret };
494}
495
496static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
497 pte_t *ptep, pte_t pte)
498{
499 if (sizeof(pteval_t) > sizeof(long))
500
501 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
502 else
503 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
504 mm, addr, ptep, pte.pte);
505}
506
507static inline void set_pte(pte_t *ptep, pte_t pte)
508{
509 if (sizeof(pteval_t) > sizeof(long))
510 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
511 pte.pte, (u64)pte.pte >> 32);
512 else
513 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
514 pte.pte);
515}
516
517static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
518 pte_t *ptep, pte_t pte)
519{
520 if (sizeof(pteval_t) > sizeof(long))
521
522 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
523 else
524 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
525}
526
527static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
528 pmd_t *pmdp, pmd_t pmd)
529{
530 if (sizeof(pmdval_t) > sizeof(long))
531
532 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
533 else
534 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
535 native_pmd_val(pmd));
536}
537
538static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
539{
540 pmdval_t val = native_pmd_val(pmd);
541
542 if (sizeof(pmdval_t) > sizeof(long))
543 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
544 else
545 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
546}
547
548#if PAGETABLE_LEVELS >= 3
549static inline pmd_t __pmd(pmdval_t val)
550{
551 pmdval_t ret;
552
553 if (sizeof(pmdval_t) > sizeof(long))
554 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
555 val, (u64)val >> 32);
556 else
557 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
558 val);
559
560 return (pmd_t) { ret };
561}
562
563static inline pmdval_t pmd_val(pmd_t pmd)
564{
565 pmdval_t ret;
566
567 if (sizeof(pmdval_t) > sizeof(long))
568 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
569 pmd.pmd, (u64)pmd.pmd >> 32);
570 else
571 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
572 pmd.pmd);
573
574 return ret;
575}
576
577static inline void set_pud(pud_t *pudp, pud_t pud)
578{
579 pudval_t val = native_pud_val(pud);
580
581 if (sizeof(pudval_t) > sizeof(long))
582 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
583 val, (u64)val >> 32);
584 else
585 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
586 val);
587}
588#if PAGETABLE_LEVELS == 4
589static inline pud_t __pud(pudval_t val)
590{
591 pudval_t ret;
592
593 if (sizeof(pudval_t) > sizeof(long))
594 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
595 val, (u64)val >> 32);
596 else
597 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
598 val);
599
600 return (pud_t) { ret };
601}
602
603static inline pudval_t pud_val(pud_t pud)
604{
605 pudval_t ret;
606
607 if (sizeof(pudval_t) > sizeof(long))
608 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
609 pud.pud, (u64)pud.pud >> 32);
610 else
611 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
612 pud.pud);
613
614 return ret;
615}
616
617static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
618{
619 pgdval_t val = native_pgd_val(pgd);
620
621 if (sizeof(pgdval_t) > sizeof(long))
622 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
623 val, (u64)val >> 32);
624 else
625 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
626 val);
627}
628
629static inline void pgd_clear(pgd_t *pgdp)
630{
631 set_pgd(pgdp, __pgd(0));
632}
633
634static inline void pud_clear(pud_t *pudp)
635{
636 set_pud(pudp, __pud(0));
637}
638
639#endif
640
641#endif
642
643#ifdef CONFIG_X86_PAE
644
645
646static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
647{
648 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
649 pte.pte, pte.pte >> 32);
650}
651
652static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
653 pte_t *ptep)
654{
655 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
656}
657
658static inline void pmd_clear(pmd_t *pmdp)
659{
660 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
661}
662#else
663static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
664{
665 set_pte(ptep, pte);
666}
667
668static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
669 pte_t *ptep)
670{
671 set_pte_at(mm, addr, ptep, __pte(0));
672}
673
674static inline void pmd_clear(pmd_t *pmdp)
675{
676 set_pmd(pmdp, __pmd(0));
677}
678#endif
679
680#define __HAVE_ARCH_START_CONTEXT_SWITCH
681static inline void arch_start_context_switch(struct task_struct *prev)
682{
683 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
684}
685
686static inline void arch_end_context_switch(struct task_struct *next)
687{
688 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
689}
690
691#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
692static inline void arch_enter_lazy_mmu_mode(void)
693{
694 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
695}
696
697static inline void arch_leave_lazy_mmu_mode(void)
698{
699 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
700}
701
702static inline void arch_flush_lazy_mmu_mode(void)
703{
704 PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
705}
706
707static inline void __set_fixmap(unsigned idx,
708 phys_addr_t phys, pgprot_t flags)
709{
710 pv_mmu_ops.set_fixmap(idx, phys, flags);
711}
712
713#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
714
715static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
716 __ticket_t ticket)
717{
718 PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
719}
720
721static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
722 __ticket_t ticket)
723{
724 PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
725}
726
727#endif
728
729#ifdef CONFIG_X86_32
730#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
731#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
732
733
734#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
735#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
736
737#define PV_FLAGS_ARG "0"
738#define PV_EXTRA_CLOBBERS
739#define PV_VEXTRA_CLOBBERS
740#else
741
742#define PV_SAVE_ALL_CALLER_REGS \
743 "push %rcx;" \
744 "push %rdx;" \
745 "push %rsi;" \
746 "push %rdi;" \
747 "push %r8;" \
748 "push %r9;" \
749 "push %r10;" \
750 "push %r11;"
751#define PV_RESTORE_ALL_CALLER_REGS \
752 "pop %r11;" \
753 "pop %r10;" \
754 "pop %r9;" \
755 "pop %r8;" \
756 "pop %rdi;" \
757 "pop %rsi;" \
758 "pop %rdx;" \
759 "pop %rcx;"
760
761
762
763#define PV_SAVE_REGS "pushq %%rdi;"
764#define PV_RESTORE_REGS "popq %%rdi;"
765#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
766#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
767#define PV_FLAGS_ARG "D"
768#endif
769
770
771
772
773
774
775
776
777
778
779
780
781
782#define PV_CALLEE_SAVE_REGS_THUNK(func) \
783 extern typeof(func) __raw_callee_save_##func; \
784 \
785 asm(".pushsection .text;" \
786 ".globl __raw_callee_save_" #func " ; " \
787 "__raw_callee_save_" #func ": " \
788 PV_SAVE_ALL_CALLER_REGS \
789 "call " #func ";" \
790 PV_RESTORE_ALL_CALLER_REGS \
791 "ret;" \
792 ".popsection")
793
794
795#define PV_CALLEE_SAVE(func) \
796 ((struct paravirt_callee_save) { __raw_callee_save_##func })
797
798
799#define __PV_IS_CALLEE_SAVE(func) \
800 ((struct paravirt_callee_save) { func })
801
802static inline notrace unsigned long arch_local_save_flags(void)
803{
804 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
805}
806
807static inline notrace void arch_local_irq_restore(unsigned long f)
808{
809 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
810}
811
812static inline notrace void arch_local_irq_disable(void)
813{
814 PVOP_VCALLEE0(pv_irq_ops.irq_disable);
815}
816
817static inline notrace void arch_local_irq_enable(void)
818{
819 PVOP_VCALLEE0(pv_irq_ops.irq_enable);
820}
821
822static inline notrace unsigned long arch_local_irq_save(void)
823{
824 unsigned long f;
825
826 f = arch_local_save_flags();
827 arch_local_irq_disable();
828 return f;
829}
830
831
832
833#undef PARAVIRT_CALL
834#undef __PVOP_CALL
835#undef __PVOP_VCALL
836#undef PVOP_VCALL0
837#undef PVOP_CALL0
838#undef PVOP_VCALL1
839#undef PVOP_CALL1
840#undef PVOP_VCALL2
841#undef PVOP_CALL2
842#undef PVOP_VCALL3
843#undef PVOP_CALL3
844#undef PVOP_VCALL4
845#undef PVOP_CALL4
846
847extern void default_banner(void);
848
849#else
850
851#define _PVSITE(ptype, clobbers, ops, word, algn) \
852771:; \
853 ops; \
854772:; \
855 .pushsection .parainstructions,"a"; \
856 .align algn; \
857 word 771b; \
858 .byte ptype; \
859 .byte 772b-771b; \
860 .short clobbers; \
861 .popsection
862
863
864#define COND_PUSH(set, mask, reg) \
865 .if ((~(set)) & mask); push %reg; .endif
866#define COND_POP(set, mask, reg) \
867 .if ((~(set)) & mask); pop %reg; .endif
868
869#ifdef CONFIG_X86_64
870
871#define PV_SAVE_REGS(set) \
872 COND_PUSH(set, CLBR_RAX, rax); \
873 COND_PUSH(set, CLBR_RCX, rcx); \
874 COND_PUSH(set, CLBR_RDX, rdx); \
875 COND_PUSH(set, CLBR_RSI, rsi); \
876 COND_PUSH(set, CLBR_RDI, rdi); \
877 COND_PUSH(set, CLBR_R8, r8); \
878 COND_PUSH(set, CLBR_R9, r9); \
879 COND_PUSH(set, CLBR_R10, r10); \
880 COND_PUSH(set, CLBR_R11, r11)
881#define PV_RESTORE_REGS(set) \
882 COND_POP(set, CLBR_R11, r11); \
883 COND_POP(set, CLBR_R10, r10); \
884 COND_POP(set, CLBR_R9, r9); \
885 COND_POP(set, CLBR_R8, r8); \
886 COND_POP(set, CLBR_RDI, rdi); \
887 COND_POP(set, CLBR_RSI, rsi); \
888 COND_POP(set, CLBR_RDX, rdx); \
889 COND_POP(set, CLBR_RCX, rcx); \
890 COND_POP(set, CLBR_RAX, rax)
891
892#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
893#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
894#define PARA_INDIRECT(addr) *addr(%rip)
895#else
896#define PV_SAVE_REGS(set) \
897 COND_PUSH(set, CLBR_EAX, eax); \
898 COND_PUSH(set, CLBR_EDI, edi); \
899 COND_PUSH(set, CLBR_ECX, ecx); \
900 COND_PUSH(set, CLBR_EDX, edx)
901#define PV_RESTORE_REGS(set) \
902 COND_POP(set, CLBR_EDX, edx); \
903 COND_POP(set, CLBR_ECX, ecx); \
904 COND_POP(set, CLBR_EDI, edi); \
905 COND_POP(set, CLBR_EAX, eax)
906
907#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
908#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
909#define PARA_INDIRECT(addr) *%cs:addr
910#endif
911
912#define INTERRUPT_RETURN \
913 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
914 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
915
916#define DISABLE_INTERRUPTS(clobbers) \
917 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
918 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
919 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
920 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
921
922#define ENABLE_INTERRUPTS(clobbers) \
923 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
924 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
925 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
926 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
927
928#define USERGS_SYSRET32 \
929 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
930 CLBR_NONE, \
931 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
932
933#ifdef CONFIG_X86_32
934#define GET_CR0_INTO_EAX \
935 push %ecx; push %edx; \
936 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
937 pop %edx; pop %ecx
938
939#define ENABLE_INTERRUPTS_SYSEXIT \
940 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
941 CLBR_NONE, \
942 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
943
944
945#else
946
947
948
949
950
951
952#define SWAPGS_UNSAFE_STACK \
953 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
954 swapgs)
955
956
957
958
959
960
961
962#define SWAPGS \
963 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
964 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
965 )
966
967#define GET_CR2_INTO_RAX \
968 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
969
970#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
971 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
972 CLBR_NONE, \
973 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
974
975#define USERGS_SYSRET64 \
976 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
977 CLBR_NONE, \
978 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
979
980#define ENABLE_INTERRUPTS_SYSEXIT32 \
981 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
982 CLBR_NONE, \
983 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
984#endif
985
986#endif
987#else
988# define default_banner x86_init_noop
989#endif
990#endif
991