1#ifndef _ASM_X86_PARAVIRT_H
2#define _ASM_X86_PARAVIRT_H
3
4
5
6#ifdef CONFIG_PARAVIRT
7#include <asm/pgtable_types.h>
8#include <asm/asm.h>
9
10#include <asm/paravirt_types.h>
11
12#ifndef __ASSEMBLY__
13#include <linux/types.h>
14#include <linux/cpumask.h>
15
16static inline int paravirt_enabled(void)
17{
18 return pv_info.paravirt_enabled;
19}
20
21static inline void load_sp0(struct tss_struct *tss,
22 struct thread_struct *thread)
23{
24 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
25}
26
27
28static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
29 unsigned int *ecx, unsigned int *edx)
30{
31 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
32}
33
34
35
36
37static inline unsigned long paravirt_get_debugreg(int reg)
38{
39 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
40}
41#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
42static inline void set_debugreg(unsigned long val, int reg)
43{
44 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
45}
46
47static inline void clts(void)
48{
49 PVOP_VCALL0(pv_cpu_ops.clts);
50}
51
52static inline unsigned long read_cr0(void)
53{
54 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
55}
56
57static inline void write_cr0(unsigned long x)
58{
59 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
60}
61
62static inline unsigned long read_cr2(void)
63{
64 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
65}
66
67static inline void write_cr2(unsigned long x)
68{
69 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
70}
71
72static inline unsigned long read_cr3(void)
73{
74 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
75}
76
77static inline void write_cr3(unsigned long x)
78{
79 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
80}
81
82static inline unsigned long read_cr4(void)
83{
84 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
85}
86static inline unsigned long read_cr4_safe(void)
87{
88 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
89}
90
91static inline void write_cr4(unsigned long x)
92{
93 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
94}
95
96#ifdef CONFIG_X86_64
97static inline unsigned long read_cr8(void)
98{
99 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
100}
101
102static inline void write_cr8(unsigned long x)
103{
104 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
105}
106#endif
107
108static inline void arch_safe_halt(void)
109{
110 PVOP_VCALL0(pv_irq_ops.safe_halt);
111}
112
113static inline void halt(void)
114{
115 PVOP_VCALL0(pv_irq_ops.halt);
116}
117
118static inline void wbinvd(void)
119{
120 PVOP_VCALL0(pv_cpu_ops.wbinvd);
121}
122
123#define get_kernel_rpl() (pv_info.kernel_rpl)
124
125static inline u64 paravirt_read_msr(unsigned msr, int *err)
126{
127 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
128}
129
130static inline int paravirt_rdmsr_regs(u32 *regs)
131{
132 return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
133}
134
135static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
136{
137 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
138}
139
140static inline int paravirt_wrmsr_regs(u32 *regs)
141{
142 return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
143}
144
145
146#define rdmsr(msr, val1, val2) \
147do { \
148 int _err; \
149 u64 _l = paravirt_read_msr(msr, &_err); \
150 val1 = (u32)_l; \
151 val2 = _l >> 32; \
152} while (0)
153
154#define wrmsr(msr, val1, val2) \
155do { \
156 paravirt_write_msr(msr, val1, val2); \
157} while (0)
158
159#define rdmsrl(msr, val) \
160do { \
161 int _err; \
162 val = paravirt_read_msr(msr, &_err); \
163} while (0)
164
165#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
166#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
167
168
169#define rdmsr_safe(msr, a, b) \
170({ \
171 int _err; \
172 u64 _l = paravirt_read_msr(msr, &_err); \
173 (*a) = (u32)_l; \
174 (*b) = _l >> 32; \
175 _err; \
176})
177
178#define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
179#define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
180
181static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
182{
183 int err;
184
185 *p = paravirt_read_msr(msr, &err);
186 return err;
187}
188static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
189{
190 u32 gprs[8] = { 0 };
191 int err;
192
193 gprs[1] = msr;
194 gprs[7] = 0x9c5a203a;
195
196 err = paravirt_rdmsr_regs(gprs);
197
198 *p = gprs[0] | ((u64)gprs[2] << 32);
199
200 return err;
201}
202
203static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
204{
205 u32 gprs[8] = { 0 };
206
207 gprs[0] = (u32)val;
208 gprs[1] = msr;
209 gprs[2] = val >> 32;
210 gprs[7] = 0x9c5a203a;
211
212 return paravirt_wrmsr_regs(gprs);
213}
214
215static inline u64 paravirt_read_tsc(void)
216{
217 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
218}
219
220#define rdtscl(low) \
221do { \
222 u64 _l = paravirt_read_tsc(); \
223 low = (int)_l; \
224} while (0)
225
226#define rdtscll(val) (val = paravirt_read_tsc())
227
228static inline unsigned long long paravirt_sched_clock(void)
229{
230 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
231}
232
233static inline unsigned long long paravirt_read_pmc(int counter)
234{
235 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
236}
237
238#define rdpmc(counter, low, high) \
239do { \
240 u64 _l = paravirt_read_pmc(counter); \
241 low = (u32)_l; \
242 high = _l >> 32; \
243} while (0)
244
245static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
246{
247 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
248}
249
250#define rdtscp(low, high, aux) \
251do { \
252 int __aux; \
253 unsigned long __val = paravirt_rdtscp(&__aux); \
254 (low) = (u32)__val; \
255 (high) = (u32)(__val >> 32); \
256 (aux) = __aux; \
257} while (0)
258
259#define rdtscpll(val, aux) \
260do { \
261 unsigned long __aux; \
262 val = paravirt_rdtscp(&__aux); \
263 (aux) = __aux; \
264} while (0)
265
266static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
267{
268 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
269}
270
271static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
272{
273 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
274}
275
276static inline void load_TR_desc(void)
277{
278 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
279}
280static inline void load_gdt(const struct desc_ptr *dtr)
281{
282 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
283}
284static inline void load_idt(const struct desc_ptr *dtr)
285{
286 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
287}
288static inline void set_ldt(const void *addr, unsigned entries)
289{
290 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
291}
292static inline void store_gdt(struct desc_ptr *dtr)
293{
294 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
295}
296static inline void store_idt(struct desc_ptr *dtr)
297{
298 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
299}
300static inline unsigned long paravirt_store_tr(void)
301{
302 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
303}
304#define store_tr(tr) ((tr) = paravirt_store_tr())
305static inline void load_TLS(struct thread_struct *t, unsigned cpu)
306{
307 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
308}
309
310#ifdef CONFIG_X86_64
311static inline void load_gs_index(unsigned int gs)
312{
313 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
314}
315#endif
316
317static inline void write_ldt_entry(struct desc_struct *dt, int entry,
318 const void *desc)
319{
320 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
321}
322
323static inline void write_gdt_entry(struct desc_struct *dt, int entry,
324 void *desc, int type)
325{
326 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
327}
328
329static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
330{
331 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
332}
333static inline void set_iopl_mask(unsigned mask)
334{
335 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
336}
337
338
339static inline void slow_down_io(void)
340{
341 pv_cpu_ops.io_delay();
342#ifdef REALLY_SLOW_IO
343 pv_cpu_ops.io_delay();
344 pv_cpu_ops.io_delay();
345 pv_cpu_ops.io_delay();
346#endif
347}
348
349#ifdef CONFIG_SMP
350static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
351 unsigned long start_esp)
352{
353 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
354 phys_apicid, start_eip, start_esp);
355}
356#endif
357
358static inline void paravirt_activate_mm(struct mm_struct *prev,
359 struct mm_struct *next)
360{
361 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
362}
363
364static inline void arch_dup_mmap(struct mm_struct *oldmm,
365 struct mm_struct *mm)
366{
367 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
368}
369
370static inline void arch_exit_mmap(struct mm_struct *mm)
371{
372 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
373}
374
375static inline void __flush_tlb(void)
376{
377 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
378}
379static inline void __flush_tlb_global(void)
380{
381 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
382}
383static inline void __flush_tlb_single(unsigned long addr)
384{
385 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
386}
387
388static inline void flush_tlb_others(const struct cpumask *cpumask,
389 struct mm_struct *mm,
390 unsigned long va)
391{
392 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
393}
394
395static inline int paravirt_pgd_alloc(struct mm_struct *mm)
396{
397 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
398}
399
400static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
401{
402 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
403}
404
405static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
406{
407 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
408}
409static inline void paravirt_release_pte(unsigned long pfn)
410{
411 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
412}
413
414static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
415{
416 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
417}
418
419static inline void paravirt_release_pmd(unsigned long pfn)
420{
421 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
422}
423
424static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
425{
426 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
427}
428static inline void paravirt_release_pud(unsigned long pfn)
429{
430 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
431}
432
433static inline void pte_update(struct mm_struct *mm, unsigned long addr,
434 pte_t *ptep)
435{
436 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
437}
438static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
439 pmd_t *pmdp)
440{
441 PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
442}
443
444static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
445 pte_t *ptep)
446{
447 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
448}
449
450static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
451 pmd_t *pmdp)
452{
453 PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
454}
455
456static inline pte_t __pte(pteval_t val)
457{
458 pteval_t ret;
459
460 if (sizeof(pteval_t) > sizeof(long))
461 ret = PVOP_CALLEE2(pteval_t,
462 pv_mmu_ops.make_pte,
463 val, (u64)val >> 32);
464 else
465 ret = PVOP_CALLEE1(pteval_t,
466 pv_mmu_ops.make_pte,
467 val);
468
469 return (pte_t) { .pte = ret };
470}
471
472static inline pteval_t pte_val(pte_t pte)
473{
474 pteval_t ret;
475
476 if (sizeof(pteval_t) > sizeof(long))
477 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
478 pte.pte, (u64)pte.pte >> 32);
479 else
480 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
481 pte.pte);
482
483 return ret;
484}
485
486static inline pgd_t __pgd(pgdval_t val)
487{
488 pgdval_t ret;
489
490 if (sizeof(pgdval_t) > sizeof(long))
491 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
492 val, (u64)val >> 32);
493 else
494 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
495 val);
496
497 return (pgd_t) { ret };
498}
499
500static inline pgdval_t pgd_val(pgd_t pgd)
501{
502 pgdval_t ret;
503
504 if (sizeof(pgdval_t) > sizeof(long))
505 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
506 pgd.pgd, (u64)pgd.pgd >> 32);
507 else
508 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
509 pgd.pgd);
510
511 return ret;
512}
513
514#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
515static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
516 pte_t *ptep)
517{
518 pteval_t ret;
519
520 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
521 mm, addr, ptep);
522
523 return (pte_t) { .pte = ret };
524}
525
526static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
527 pte_t *ptep, pte_t pte)
528{
529 if (sizeof(pteval_t) > sizeof(long))
530
531 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
532 else
533 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
534 mm, addr, ptep, pte.pte);
535}
536
537static inline void set_pte(pte_t *ptep, pte_t pte)
538{
539 if (sizeof(pteval_t) > sizeof(long))
540 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
541 pte.pte, (u64)pte.pte >> 32);
542 else
543 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
544 pte.pte);
545}
546
547static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
548 pte_t *ptep, pte_t pte)
549{
550 if (sizeof(pteval_t) > sizeof(long))
551
552 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
553 else
554 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
555}
556
557#ifdef CONFIG_TRANSPARENT_HUGEPAGE
558static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
559 pmd_t *pmdp, pmd_t pmd)
560{
561 if (sizeof(pmdval_t) > sizeof(long))
562
563 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
564 else
565 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
566 native_pmd_val(pmd));
567}
568#endif
569
570static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
571{
572 pmdval_t val = native_pmd_val(pmd);
573
574 if (sizeof(pmdval_t) > sizeof(long))
575 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
576 else
577 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
578}
579
580#if PAGETABLE_LEVELS >= 3
581static inline pmd_t __pmd(pmdval_t val)
582{
583 pmdval_t ret;
584
585 if (sizeof(pmdval_t) > sizeof(long))
586 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
587 val, (u64)val >> 32);
588 else
589 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
590 val);
591
592 return (pmd_t) { ret };
593}
594
595static inline pmdval_t pmd_val(pmd_t pmd)
596{
597 pmdval_t ret;
598
599 if (sizeof(pmdval_t) > sizeof(long))
600 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
601 pmd.pmd, (u64)pmd.pmd >> 32);
602 else
603 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
604 pmd.pmd);
605
606 return ret;
607}
608
609static inline void set_pud(pud_t *pudp, pud_t pud)
610{
611 pudval_t val = native_pud_val(pud);
612
613 if (sizeof(pudval_t) > sizeof(long))
614 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
615 val, (u64)val >> 32);
616 else
617 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
618 val);
619}
620#if PAGETABLE_LEVELS == 4
621static inline pud_t __pud(pudval_t val)
622{
623 pudval_t ret;
624
625 if (sizeof(pudval_t) > sizeof(long))
626 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
627 val, (u64)val >> 32);
628 else
629 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
630 val);
631
632 return (pud_t) { ret };
633}
634
635static inline pudval_t pud_val(pud_t pud)
636{
637 pudval_t ret;
638
639 if (sizeof(pudval_t) > sizeof(long))
640 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
641 pud.pud, (u64)pud.pud >> 32);
642 else
643 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
644 pud.pud);
645
646 return ret;
647}
648
649static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
650{
651 pgdval_t val = native_pgd_val(pgd);
652
653 if (sizeof(pgdval_t) > sizeof(long))
654 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
655 val, (u64)val >> 32);
656 else
657 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
658 val);
659}
660
661static inline void pgd_clear(pgd_t *pgdp)
662{
663 set_pgd(pgdp, __pgd(0));
664}
665
666static inline void pud_clear(pud_t *pudp)
667{
668 set_pud(pudp, __pud(0));
669}
670
671#endif
672
673#endif
674
675#ifdef CONFIG_X86_PAE
676
677
678static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
679{
680 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
681 pte.pte, pte.pte >> 32);
682}
683
684static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
685 pte_t *ptep)
686{
687 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
688}
689
690static inline void pmd_clear(pmd_t *pmdp)
691{
692 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
693}
694#else
695static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
696{
697 set_pte(ptep, pte);
698}
699
700static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
701 pte_t *ptep)
702{
703 set_pte_at(mm, addr, ptep, __pte(0));
704}
705
706static inline void pmd_clear(pmd_t *pmdp)
707{
708 set_pmd(pmdp, __pmd(0));
709}
710#endif
711
712#define __HAVE_ARCH_START_CONTEXT_SWITCH
713static inline void arch_start_context_switch(struct task_struct *prev)
714{
715 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
716}
717
718static inline void arch_end_context_switch(struct task_struct *next)
719{
720 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
721}
722
723#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
724static inline void arch_enter_lazy_mmu_mode(void)
725{
726 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
727}
728
729static inline void arch_leave_lazy_mmu_mode(void)
730{
731 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
732}
733
734void arch_flush_lazy_mmu_mode(void);
735
736static inline void __set_fixmap(unsigned idx,
737 phys_addr_t phys, pgprot_t flags)
738{
739 pv_mmu_ops.set_fixmap(idx, phys, flags);
740}
741
742#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
743
744static inline int arch_spin_is_locked(struct arch_spinlock *lock)
745{
746 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
747}
748
749static inline int arch_spin_is_contended(struct arch_spinlock *lock)
750{
751 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
752}
753#define arch_spin_is_contended arch_spin_is_contended
754
755static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
756{
757 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
758}
759
760static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
761 unsigned long flags)
762{
763 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
764}
765
766static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
767{
768 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
769}
770
771static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
772{
773 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
774}
775
776#endif
777
778#ifdef CONFIG_X86_32
779#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
780#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
781
782
783#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
784#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
785
786#define PV_FLAGS_ARG "0"
787#define PV_EXTRA_CLOBBERS
788#define PV_VEXTRA_CLOBBERS
789#else
790
791#define PV_SAVE_ALL_CALLER_REGS \
792 "push %rcx;" \
793 "push %rdx;" \
794 "push %rsi;" \
795 "push %rdi;" \
796 "push %r8;" \
797 "push %r9;" \
798 "push %r10;" \
799 "push %r11;"
800#define PV_RESTORE_ALL_CALLER_REGS \
801 "pop %r11;" \
802 "pop %r10;" \
803 "pop %r9;" \
804 "pop %r8;" \
805 "pop %rdi;" \
806 "pop %rsi;" \
807 "pop %rdx;" \
808 "pop %rcx;"
809
810
811
812#define PV_SAVE_REGS "pushq %%rdi;"
813#define PV_RESTORE_REGS "popq %%rdi;"
814#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
815#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
816#define PV_FLAGS_ARG "D"
817#endif
818
819
820
821
822
823
824
825
826
827
828
829
830
831#define PV_CALLEE_SAVE_REGS_THUNK(func) \
832 extern typeof(func) __raw_callee_save_##func; \
833 static void *__##func##__ __used = func; \
834 \
835 asm(".pushsection .text;" \
836 "__raw_callee_save_" #func ": " \
837 PV_SAVE_ALL_CALLER_REGS \
838 "call " #func ";" \
839 PV_RESTORE_ALL_CALLER_REGS \
840 "ret;" \
841 ".popsection")
842
843
844#define PV_CALLEE_SAVE(func) \
845 ((struct paravirt_callee_save) { __raw_callee_save_##func })
846
847
848#define __PV_IS_CALLEE_SAVE(func) \
849 ((struct paravirt_callee_save) { func })
850
851static inline notrace unsigned long arch_local_save_flags(void)
852{
853 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
854}
855
856static inline notrace void arch_local_irq_restore(unsigned long f)
857{
858 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
859}
860
861static inline notrace void arch_local_irq_disable(void)
862{
863 PVOP_VCALLEE0(pv_irq_ops.irq_disable);
864}
865
866static inline notrace void arch_local_irq_enable(void)
867{
868 PVOP_VCALLEE0(pv_irq_ops.irq_enable);
869}
870
871static inline notrace unsigned long arch_local_irq_save(void)
872{
873 unsigned long f;
874
875 f = arch_local_save_flags();
876 arch_local_irq_disable();
877 return f;
878}
879
880
881
882#undef PARAVIRT_CALL
883#undef __PVOP_CALL
884#undef __PVOP_VCALL
885#undef PVOP_VCALL0
886#undef PVOP_CALL0
887#undef PVOP_VCALL1
888#undef PVOP_CALL1
889#undef PVOP_VCALL2
890#undef PVOP_CALL2
891#undef PVOP_VCALL3
892#undef PVOP_CALL3
893#undef PVOP_VCALL4
894#undef PVOP_CALL4
895
896extern void default_banner(void);
897
898#else
899
900#define _PVSITE(ptype, clobbers, ops, word, algn) \
901771:; \
902 ops; \
903772:; \
904 .pushsection .parainstructions,"a"; \
905 .align algn; \
906 word 771b; \
907 .byte ptype; \
908 .byte 772b-771b; \
909 .short clobbers; \
910 .popsection
911
912
913#define COND_PUSH(set, mask, reg) \
914 .if ((~(set)) & mask); push %reg; .endif
915#define COND_POP(set, mask, reg) \
916 .if ((~(set)) & mask); pop %reg; .endif
917
918#ifdef CONFIG_X86_64
919
920#define PV_SAVE_REGS(set) \
921 COND_PUSH(set, CLBR_RAX, rax); \
922 COND_PUSH(set, CLBR_RCX, rcx); \
923 COND_PUSH(set, CLBR_RDX, rdx); \
924 COND_PUSH(set, CLBR_RSI, rsi); \
925 COND_PUSH(set, CLBR_RDI, rdi); \
926 COND_PUSH(set, CLBR_R8, r8); \
927 COND_PUSH(set, CLBR_R9, r9); \
928 COND_PUSH(set, CLBR_R10, r10); \
929 COND_PUSH(set, CLBR_R11, r11)
930#define PV_RESTORE_REGS(set) \
931 COND_POP(set, CLBR_R11, r11); \
932 COND_POP(set, CLBR_R10, r10); \
933 COND_POP(set, CLBR_R9, r9); \
934 COND_POP(set, CLBR_R8, r8); \
935 COND_POP(set, CLBR_RDI, rdi); \
936 COND_POP(set, CLBR_RSI, rsi); \
937 COND_POP(set, CLBR_RDX, rdx); \
938 COND_POP(set, CLBR_RCX, rcx); \
939 COND_POP(set, CLBR_RAX, rax)
940
941#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
942#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
943#define PARA_INDIRECT(addr) *addr(%rip)
944#else
945#define PV_SAVE_REGS(set) \
946 COND_PUSH(set, CLBR_EAX, eax); \
947 COND_PUSH(set, CLBR_EDI, edi); \
948 COND_PUSH(set, CLBR_ECX, ecx); \
949 COND_PUSH(set, CLBR_EDX, edx)
950#define PV_RESTORE_REGS(set) \
951 COND_POP(set, CLBR_EDX, edx); \
952 COND_POP(set, CLBR_ECX, ecx); \
953 COND_POP(set, CLBR_EDI, edi); \
954 COND_POP(set, CLBR_EAX, eax)
955
956#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
957#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
958#define PARA_INDIRECT(addr) *%cs:addr
959#endif
960
961#define INTERRUPT_RETURN \
962 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
963 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
964
965#define DISABLE_INTERRUPTS(clobbers) \
966 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
967 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
968 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
969 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
970
971#define ENABLE_INTERRUPTS(clobbers) \
972 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
973 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
974 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
975 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
976
977#define USERGS_SYSRET32 \
978 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
979 CLBR_NONE, \
980 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
981
982#ifdef CONFIG_X86_32
983#define GET_CR0_INTO_EAX \
984 push %ecx; push %edx; \
985 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
986 pop %edx; pop %ecx
987
988#define ENABLE_INTERRUPTS_SYSEXIT \
989 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
990 CLBR_NONE, \
991 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
992
993
994#else
995
996
997
998
999
1000
1001#define SWAPGS_UNSAFE_STACK \
1002 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1003 swapgs)
1004
1005
1006
1007
1008
1009
1010
1011#define SWAPGS \
1012 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1013 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
1014 )
1015
1016#define GET_CR2_INTO_RCX \
1017 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1018 movq %rax, %rcx; \
1019 xorq %rax, %rax;
1020
1021#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1022 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1023 CLBR_NONE, \
1024 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1025
1026#define USERGS_SYSRET64 \
1027 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
1028 CLBR_NONE, \
1029 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1030
1031#define ENABLE_INTERRUPTS_SYSEXIT32 \
1032 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1033 CLBR_NONE, \
1034 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1035#endif
1036
1037#endif
1038#else
1039# define default_banner x86_init_noop
1040#endif
1041#endif
1042