linux/arch/x86/include/asm/paravirt.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_PARAVIRT_H
   3#define _ASM_X86_PARAVIRT_H
   4/* Various instructions on x86 need to be replaced for
   5 * para-virtualization: those hooks are defined here. */
   6
   7#ifdef CONFIG_PARAVIRT
   8#include <asm/pgtable_types.h>
   9#include <asm/asm.h>
  10#include <asm/nospec-branch.h>
  11
  12#include <asm/paravirt_types.h>
  13
  14#ifndef __ASSEMBLY__
  15#include <linux/bug.h>
  16#include <linux/types.h>
  17#include <linux/cpumask.h>
  18#include <asm/frame.h>
  19
  20static inline unsigned long long paravirt_sched_clock(void)
  21{
  22        return PVOP_CALL0(unsigned long long, time.sched_clock);
  23}
  24
  25struct static_key;
  26extern struct static_key paravirt_steal_enabled;
  27extern struct static_key paravirt_steal_rq_enabled;
  28
  29__visible void __native_queued_spin_unlock(struct qspinlock *lock);
  30bool pv_is_native_spin_unlock(void);
  31__visible bool __native_vcpu_is_preempted(long cpu);
  32bool pv_is_native_vcpu_is_preempted(void);
  33
  34static inline u64 paravirt_steal_clock(int cpu)
  35{
  36        return PVOP_CALL1(u64, time.steal_clock, cpu);
  37}
  38
  39/* The paravirtualized I/O functions */
  40static inline void slow_down_io(void)
  41{
  42        pv_ops.cpu.io_delay();
  43#ifdef REALLY_SLOW_IO
  44        pv_ops.cpu.io_delay();
  45        pv_ops.cpu.io_delay();
  46        pv_ops.cpu.io_delay();
  47#endif
  48}
  49
  50static inline void __flush_tlb(void)
  51{
  52        PVOP_VCALL0(mmu.flush_tlb_user);
  53}
  54
  55static inline void __flush_tlb_global(void)
  56{
  57        PVOP_VCALL0(mmu.flush_tlb_kernel);
  58}
  59
  60static inline void __flush_tlb_one_user(unsigned long addr)
  61{
  62        PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
  63}
  64
  65static inline void flush_tlb_others(const struct cpumask *cpumask,
  66                                    const struct flush_tlb_info *info)
  67{
  68        PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
  69}
  70
  71static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
  72{
  73        PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
  74}
  75
  76static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
  77{
  78        PVOP_VCALL1(mmu.exit_mmap, mm);
  79}
  80
  81#ifdef CONFIG_PARAVIRT_XXL
  82static inline void load_sp0(unsigned long sp0)
  83{
  84        PVOP_VCALL1(cpu.load_sp0, sp0);
  85}
  86
  87/* The paravirtualized CPUID instruction. */
  88static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
  89                           unsigned int *ecx, unsigned int *edx)
  90{
  91        PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
  92}
  93
  94/*
  95 * These special macros can be used to get or set a debugging register
  96 */
  97static inline unsigned long paravirt_get_debugreg(int reg)
  98{
  99        return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
 100}
 101#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
 102static inline void set_debugreg(unsigned long val, int reg)
 103{
 104        PVOP_VCALL2(cpu.set_debugreg, reg, val);
 105}
 106
 107static inline unsigned long read_cr0(void)
 108{
 109        return PVOP_CALL0(unsigned long, cpu.read_cr0);
 110}
 111
 112static inline void write_cr0(unsigned long x)
 113{
 114        PVOP_VCALL1(cpu.write_cr0, x);
 115}
 116
 117static inline unsigned long read_cr2(void)
 118{
 119        return PVOP_CALL0(unsigned long, mmu.read_cr2);
 120}
 121
 122static inline void write_cr2(unsigned long x)
 123{
 124        PVOP_VCALL1(mmu.write_cr2, x);
 125}
 126
 127static inline unsigned long __read_cr3(void)
 128{
 129        return PVOP_CALL0(unsigned long, mmu.read_cr3);
 130}
 131
 132static inline void write_cr3(unsigned long x)
 133{
 134        PVOP_VCALL1(mmu.write_cr3, x);
 135}
 136
 137static inline void __write_cr4(unsigned long x)
 138{
 139        PVOP_VCALL1(cpu.write_cr4, x);
 140}
 141
 142#ifdef CONFIG_X86_64
 143static inline unsigned long read_cr8(void)
 144{
 145        return PVOP_CALL0(unsigned long, cpu.read_cr8);
 146}
 147
 148static inline void write_cr8(unsigned long x)
 149{
 150        PVOP_VCALL1(cpu.write_cr8, x);
 151}
 152#endif
 153
 154static inline void arch_safe_halt(void)
 155{
 156        PVOP_VCALL0(irq.safe_halt);
 157}
 158
 159static inline void halt(void)
 160{
 161        PVOP_VCALL0(irq.halt);
 162}
 163
 164static inline void wbinvd(void)
 165{
 166        PVOP_VCALL0(cpu.wbinvd);
 167}
 168
 169#define get_kernel_rpl()  (pv_info.kernel_rpl)
 170
 171static inline u64 paravirt_read_msr(unsigned msr)
 172{
 173        return PVOP_CALL1(u64, cpu.read_msr, msr);
 174}
 175
 176static inline void paravirt_write_msr(unsigned msr,
 177                                      unsigned low, unsigned high)
 178{
 179        PVOP_VCALL3(cpu.write_msr, msr, low, high);
 180}
 181
 182static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
 183{
 184        return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
 185}
 186
 187static inline int paravirt_write_msr_safe(unsigned msr,
 188                                          unsigned low, unsigned high)
 189{
 190        return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
 191}
 192
 193#define rdmsr(msr, val1, val2)                  \
 194do {                                            \
 195        u64 _l = paravirt_read_msr(msr);        \
 196        val1 = (u32)_l;                         \
 197        val2 = _l >> 32;                        \
 198} while (0)
 199
 200#define wrmsr(msr, val1, val2)                  \
 201do {                                            \
 202        paravirt_write_msr(msr, val1, val2);    \
 203} while (0)
 204
 205#define rdmsrl(msr, val)                        \
 206do {                                            \
 207        val = paravirt_read_msr(msr);           \
 208} while (0)
 209
 210static inline void wrmsrl(unsigned msr, u64 val)
 211{
 212        wrmsr(msr, (u32)val, (u32)(val>>32));
 213}
 214
 215#define wrmsr_safe(msr, a, b)   paravirt_write_msr_safe(msr, a, b)
 216
 217/* rdmsr with exception handling */
 218#define rdmsr_safe(msr, a, b)                           \
 219({                                                      \
 220        int _err;                                       \
 221        u64 _l = paravirt_read_msr_safe(msr, &_err);    \
 222        (*a) = (u32)_l;                                 \
 223        (*b) = _l >> 32;                                \
 224        _err;                                           \
 225})
 226
 227static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
 228{
 229        int err;
 230
 231        *p = paravirt_read_msr_safe(msr, &err);
 232        return err;
 233}
 234
 235static inline unsigned long long paravirt_read_pmc(int counter)
 236{
 237        return PVOP_CALL1(u64, cpu.read_pmc, counter);
 238}
 239
 240#define rdpmc(counter, low, high)               \
 241do {                                            \
 242        u64 _l = paravirt_read_pmc(counter);    \
 243        low = (u32)_l;                          \
 244        high = _l >> 32;                        \
 245} while (0)
 246
 247#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
 248
 249static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
 250{
 251        PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
 252}
 253
 254static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
 255{
 256        PVOP_VCALL2(cpu.free_ldt, ldt, entries);
 257}
 258
 259static inline void load_TR_desc(void)
 260{
 261        PVOP_VCALL0(cpu.load_tr_desc);
 262}
 263static inline void load_gdt(const struct desc_ptr *dtr)
 264{
 265        PVOP_VCALL1(cpu.load_gdt, dtr);
 266}
 267static inline void load_idt(const struct desc_ptr *dtr)
 268{
 269        PVOP_VCALL1(cpu.load_idt, dtr);
 270}
 271static inline void set_ldt(const void *addr, unsigned entries)
 272{
 273        PVOP_VCALL2(cpu.set_ldt, addr, entries);
 274}
 275static inline unsigned long paravirt_store_tr(void)
 276{
 277        return PVOP_CALL0(unsigned long, cpu.store_tr);
 278}
 279
 280#define store_tr(tr)    ((tr) = paravirt_store_tr())
 281static inline void load_TLS(struct thread_struct *t, unsigned cpu)
 282{
 283        PVOP_VCALL2(cpu.load_tls, t, cpu);
 284}
 285
 286#ifdef CONFIG_X86_64
 287static inline void load_gs_index(unsigned int gs)
 288{
 289        PVOP_VCALL1(cpu.load_gs_index, gs);
 290}
 291#endif
 292
 293static inline void write_ldt_entry(struct desc_struct *dt, int entry,
 294                                   const void *desc)
 295{
 296        PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
 297}
 298
 299static inline void write_gdt_entry(struct desc_struct *dt, int entry,
 300                                   void *desc, int type)
 301{
 302        PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
 303}
 304
 305static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
 306{
 307        PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
 308}
 309static inline void set_iopl_mask(unsigned mask)
 310{
 311        PVOP_VCALL1(cpu.set_iopl_mask, mask);
 312}
 313
 314static inline void paravirt_activate_mm(struct mm_struct *prev,
 315                                        struct mm_struct *next)
 316{
 317        PVOP_VCALL2(mmu.activate_mm, prev, next);
 318}
 319
 320static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
 321                                          struct mm_struct *mm)
 322{
 323        PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
 324}
 325
 326static inline int paravirt_pgd_alloc(struct mm_struct *mm)
 327{
 328        return PVOP_CALL1(int, mmu.pgd_alloc, mm);
 329}
 330
 331static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
 332{
 333        PVOP_VCALL2(mmu.pgd_free, mm, pgd);
 334}
 335
 336static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
 337{
 338        PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
 339}
 340static inline void paravirt_release_pte(unsigned long pfn)
 341{
 342        PVOP_VCALL1(mmu.release_pte, pfn);
 343}
 344
 345static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
 346{
 347        PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
 348}
 349
 350static inline void paravirt_release_pmd(unsigned long pfn)
 351{
 352        PVOP_VCALL1(mmu.release_pmd, pfn);
 353}
 354
 355static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
 356{
 357        PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
 358}
 359static inline void paravirt_release_pud(unsigned long pfn)
 360{
 361        PVOP_VCALL1(mmu.release_pud, pfn);
 362}
 363
 364static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
 365{
 366        PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
 367}
 368
 369static inline void paravirt_release_p4d(unsigned long pfn)
 370{
 371        PVOP_VCALL1(mmu.release_p4d, pfn);
 372}
 373
 374static inline pte_t __pte(pteval_t val)
 375{
 376        pteval_t ret;
 377
 378        if (sizeof(pteval_t) > sizeof(long))
 379                ret = PVOP_CALLEE2(pteval_t, mmu.make_pte, val, (u64)val >> 32);
 380        else
 381                ret = PVOP_CALLEE1(pteval_t, mmu.make_pte, val);
 382
 383        return (pte_t) { .pte = ret };
 384}
 385
 386static inline pteval_t pte_val(pte_t pte)
 387{
 388        pteval_t ret;
 389
 390        if (sizeof(pteval_t) > sizeof(long))
 391                ret = PVOP_CALLEE2(pteval_t, mmu.pte_val,
 392                                   pte.pte, (u64)pte.pte >> 32);
 393        else
 394                ret = PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
 395
 396        return ret;
 397}
 398
 399static inline pgd_t __pgd(pgdval_t val)
 400{
 401        pgdval_t ret;
 402
 403        if (sizeof(pgdval_t) > sizeof(long))
 404                ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32);
 405        else
 406                ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val);
 407
 408        return (pgd_t) { ret };
 409}
 410
 411static inline pgdval_t pgd_val(pgd_t pgd)
 412{
 413        pgdval_t ret;
 414
 415        if (sizeof(pgdval_t) > sizeof(long))
 416                ret =  PVOP_CALLEE2(pgdval_t, mmu.pgd_val,
 417                                    pgd.pgd, (u64)pgd.pgd >> 32);
 418        else
 419                ret =  PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
 420
 421        return ret;
 422}
 423
 424#define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
 425static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
 426                                           pte_t *ptep)
 427{
 428        pteval_t ret;
 429
 430        ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
 431
 432        return (pte_t) { .pte = ret };
 433}
 434
 435static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
 436                                           pte_t *ptep, pte_t old_pte, pte_t pte)
 437{
 438
 439        if (sizeof(pteval_t) > sizeof(long))
 440                /* 5 arg words */
 441                pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte);
 442        else
 443                PVOP_VCALL4(mmu.ptep_modify_prot_commit,
 444                            vma, addr, ptep, pte.pte);
 445}
 446
 447static inline void set_pte(pte_t *ptep, pte_t pte)
 448{
 449        if (sizeof(pteval_t) > sizeof(long))
 450                PVOP_VCALL3(mmu.set_pte, ptep, pte.pte, (u64)pte.pte >> 32);
 451        else
 452                PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
 453}
 454
 455static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 456                              pte_t *ptep, pte_t pte)
 457{
 458        if (sizeof(pteval_t) > sizeof(long))
 459                /* 5 arg words */
 460                pv_ops.mmu.set_pte_at(mm, addr, ptep, pte);
 461        else
 462                PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte);
 463}
 464
 465static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 466{
 467        pmdval_t val = native_pmd_val(pmd);
 468
 469        if (sizeof(pmdval_t) > sizeof(long))
 470                PVOP_VCALL3(mmu.set_pmd, pmdp, val, (u64)val >> 32);
 471        else
 472                PVOP_VCALL2(mmu.set_pmd, pmdp, val);
 473}
 474
 475#if CONFIG_PGTABLE_LEVELS >= 3
 476static inline pmd_t __pmd(pmdval_t val)
 477{
 478        pmdval_t ret;
 479
 480        if (sizeof(pmdval_t) > sizeof(long))
 481                ret = PVOP_CALLEE2(pmdval_t, mmu.make_pmd, val, (u64)val >> 32);
 482        else
 483                ret = PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val);
 484
 485        return (pmd_t) { ret };
 486}
 487
 488static inline pmdval_t pmd_val(pmd_t pmd)
 489{
 490        pmdval_t ret;
 491
 492        if (sizeof(pmdval_t) > sizeof(long))
 493                ret =  PVOP_CALLEE2(pmdval_t, mmu.pmd_val,
 494                                    pmd.pmd, (u64)pmd.pmd >> 32);
 495        else
 496                ret =  PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
 497
 498        return ret;
 499}
 500
 501static inline void set_pud(pud_t *pudp, pud_t pud)
 502{
 503        pudval_t val = native_pud_val(pud);
 504
 505        if (sizeof(pudval_t) > sizeof(long))
 506                PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32);
 507        else
 508                PVOP_VCALL2(mmu.set_pud, pudp, val);
 509}
 510#if CONFIG_PGTABLE_LEVELS >= 4
 511static inline pud_t __pud(pudval_t val)
 512{
 513        pudval_t ret;
 514
 515        ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val);
 516
 517        return (pud_t) { ret };
 518}
 519
 520static inline pudval_t pud_val(pud_t pud)
 521{
 522        return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
 523}
 524
 525static inline void pud_clear(pud_t *pudp)
 526{
 527        set_pud(pudp, __pud(0));
 528}
 529
 530static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
 531{
 532        p4dval_t val = native_p4d_val(p4d);
 533
 534        PVOP_VCALL2(mmu.set_p4d, p4dp, val);
 535}
 536
 537#if CONFIG_PGTABLE_LEVELS >= 5
 538
 539static inline p4d_t __p4d(p4dval_t val)
 540{
 541        p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val);
 542
 543        return (p4d_t) { ret };
 544}
 545
 546static inline p4dval_t p4d_val(p4d_t p4d)
 547{
 548        return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d);
 549}
 550
 551static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
 552{
 553        PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
 554}
 555
 556#define set_pgd(pgdp, pgdval) do {                                      \
 557        if (pgtable_l5_enabled())                                               \
 558                __set_pgd(pgdp, pgdval);                                \
 559        else                                                            \
 560                set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd });     \
 561} while (0)
 562
 563#define pgd_clear(pgdp) do {                                            \
 564        if (pgtable_l5_enabled())                                               \
 565                set_pgd(pgdp, __pgd(0));                                \
 566} while (0)
 567
 568#endif  /* CONFIG_PGTABLE_LEVELS == 5 */
 569
 570static inline void p4d_clear(p4d_t *p4dp)
 571{
 572        set_p4d(p4dp, __p4d(0));
 573}
 574
 575#endif  /* CONFIG_PGTABLE_LEVELS == 4 */
 576
 577#endif  /* CONFIG_PGTABLE_LEVELS >= 3 */
 578
 579#ifdef CONFIG_X86_PAE
 580/* Special-case pte-setting operations for PAE, which can't update a
 581   64-bit pte atomically */
 582static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
 583{
 584        PVOP_VCALL3(mmu.set_pte_atomic, ptep, pte.pte, pte.pte >> 32);
 585}
 586
 587static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
 588                             pte_t *ptep)
 589{
 590        PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep);
 591}
 592
 593static inline void pmd_clear(pmd_t *pmdp)
 594{
 595        PVOP_VCALL1(mmu.pmd_clear, pmdp);
 596}
 597#else  /* !CONFIG_X86_PAE */
 598static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
 599{
 600        set_pte(ptep, pte);
 601}
 602
 603static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
 604                             pte_t *ptep)
 605{
 606        set_pte_at(mm, addr, ptep, __pte(0));
 607}
 608
 609static inline void pmd_clear(pmd_t *pmdp)
 610{
 611        set_pmd(pmdp, __pmd(0));
 612}
 613#endif  /* CONFIG_X86_PAE */
 614
 615#define  __HAVE_ARCH_START_CONTEXT_SWITCH
 616static inline void arch_start_context_switch(struct task_struct *prev)
 617{
 618        PVOP_VCALL1(cpu.start_context_switch, prev);
 619}
 620
 621static inline void arch_end_context_switch(struct task_struct *next)
 622{
 623        PVOP_VCALL1(cpu.end_context_switch, next);
 624}
 625
 626#define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 627static inline void arch_enter_lazy_mmu_mode(void)
 628{
 629        PVOP_VCALL0(mmu.lazy_mode.enter);
 630}
 631
 632static inline void arch_leave_lazy_mmu_mode(void)
 633{
 634        PVOP_VCALL0(mmu.lazy_mode.leave);
 635}
 636
 637static inline void arch_flush_lazy_mmu_mode(void)
 638{
 639        PVOP_VCALL0(mmu.lazy_mode.flush);
 640}
 641
 642static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
 643                                phys_addr_t phys, pgprot_t flags)
 644{
 645        pv_ops.mmu.set_fixmap(idx, phys, flags);
 646}
 647#endif
 648
 649#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
 650
 651static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
 652                                                        u32 val)
 653{
 654        PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
 655}
 656
 657static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
 658{
 659        PVOP_VCALLEE1(lock.queued_spin_unlock, lock);
 660}
 661
 662static __always_inline void pv_wait(u8 *ptr, u8 val)
 663{
 664        PVOP_VCALL2(lock.wait, ptr, val);
 665}
 666
 667static __always_inline void pv_kick(int cpu)
 668{
 669        PVOP_VCALL1(lock.kick, cpu);
 670}
 671
 672static __always_inline bool pv_vcpu_is_preempted(long cpu)
 673{
 674        return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu);
 675}
 676
 677void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
 678bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
 679
 680#endif /* SMP && PARAVIRT_SPINLOCKS */
 681
 682#ifdef CONFIG_X86_32
 683#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
 684#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
 685
 686/* save and restore all caller-save registers, except return value */
 687#define PV_SAVE_ALL_CALLER_REGS         "pushl %ecx;"
 688#define PV_RESTORE_ALL_CALLER_REGS      "popl  %ecx;"
 689
 690#define PV_FLAGS_ARG "0"
 691#define PV_EXTRA_CLOBBERS
 692#define PV_VEXTRA_CLOBBERS
 693#else
 694/* save and restore all caller-save registers, except return value */
 695#define PV_SAVE_ALL_CALLER_REGS                                         \
 696        "push %rcx;"                                                    \
 697        "push %rdx;"                                                    \
 698        "push %rsi;"                                                    \
 699        "push %rdi;"                                                    \
 700        "push %r8;"                                                     \
 701        "push %r9;"                                                     \
 702        "push %r10;"                                                    \
 703        "push %r11;"
 704#define PV_RESTORE_ALL_CALLER_REGS                                      \
 705        "pop %r11;"                                                     \
 706        "pop %r10;"                                                     \
 707        "pop %r9;"                                                      \
 708        "pop %r8;"                                                      \
 709        "pop %rdi;"                                                     \
 710        "pop %rsi;"                                                     \
 711        "pop %rdx;"                                                     \
 712        "pop %rcx;"
 713
 714/* We save some registers, but all of them, that's too much. We clobber all
 715 * caller saved registers but the argument parameter */
 716#define PV_SAVE_REGS "pushq %%rdi;"
 717#define PV_RESTORE_REGS "popq %%rdi;"
 718#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
 719#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
 720#define PV_FLAGS_ARG "D"
 721#endif
 722
 723/*
 724 * Generate a thunk around a function which saves all caller-save
 725 * registers except for the return value.  This allows C functions to
 726 * be called from assembler code where fewer than normal registers are
 727 * available.  It may also help code generation around calls from C
 728 * code if the common case doesn't use many registers.
 729 *
 730 * When a callee is wrapped in a thunk, the caller can assume that all
 731 * arg regs and all scratch registers are preserved across the
 732 * call. The return value in rax/eax will not be saved, even for void
 733 * functions.
 734 */
 735#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
 736#define PV_CALLEE_SAVE_REGS_THUNK(func)                                 \
 737        extern typeof(func) __raw_callee_save_##func;                   \
 738                                                                        \
 739        asm(".pushsection .text;"                                       \
 740            ".globl " PV_THUNK_NAME(func) ";"                           \
 741            ".type " PV_THUNK_NAME(func) ", @function;"                 \
 742            PV_THUNK_NAME(func) ":"                                     \
 743            FRAME_BEGIN                                                 \
 744            PV_SAVE_ALL_CALLER_REGS                                     \
 745            "call " #func ";"                                           \
 746            PV_RESTORE_ALL_CALLER_REGS                                  \
 747            FRAME_END                                                   \
 748            "ret;"                                                      \
 749            ".popsection")
 750
 751/* Get a reference to a callee-save function */
 752#define PV_CALLEE_SAVE(func)                                            \
 753        ((struct paravirt_callee_save) { __raw_callee_save_##func })
 754
 755/* Promise that "func" already uses the right calling convention */
 756#define __PV_IS_CALLEE_SAVE(func)                       \
 757        ((struct paravirt_callee_save) { func })
 758
 759#ifdef CONFIG_PARAVIRT_XXL
 760static inline notrace unsigned long arch_local_save_flags(void)
 761{
 762        return PVOP_CALLEE0(unsigned long, irq.save_fl);
 763}
 764
 765static inline notrace void arch_local_irq_restore(unsigned long f)
 766{
 767        PVOP_VCALLEE1(irq.restore_fl, f);
 768}
 769
 770static inline notrace void arch_local_irq_disable(void)
 771{
 772        PVOP_VCALLEE0(irq.irq_disable);
 773}
 774
 775static inline notrace void arch_local_irq_enable(void)
 776{
 777        PVOP_VCALLEE0(irq.irq_enable);
 778}
 779
 780static inline notrace unsigned long arch_local_irq_save(void)
 781{
 782        unsigned long f;
 783
 784        f = arch_local_save_flags();
 785        arch_local_irq_disable();
 786        return f;
 787}
 788#endif
 789
 790
 791/* Make sure as little as possible of this mess escapes. */
 792#undef PARAVIRT_CALL
 793#undef __PVOP_CALL
 794#undef __PVOP_VCALL
 795#undef PVOP_VCALL0
 796#undef PVOP_CALL0
 797#undef PVOP_VCALL1
 798#undef PVOP_CALL1
 799#undef PVOP_VCALL2
 800#undef PVOP_CALL2
 801#undef PVOP_VCALL3
 802#undef PVOP_CALL3
 803#undef PVOP_VCALL4
 804#undef PVOP_CALL4
 805
 806extern void default_banner(void);
 807
 808#else  /* __ASSEMBLY__ */
 809
 810#define _PVSITE(ptype, ops, word, algn)         \
 811771:;                                           \
 812        ops;                                    \
 813772:;                                           \
 814        .pushsection .parainstructions,"a";     \
 815         .align algn;                           \
 816         word 771b;                             \
 817         .byte ptype;                           \
 818         .byte 772b-771b;                       \
 819        .popsection
 820
 821
 822#define COND_PUSH(set, mask, reg)                       \
 823        .if ((~(set)) & mask); push %reg; .endif
 824#define COND_POP(set, mask, reg)                        \
 825        .if ((~(set)) & mask); pop %reg; .endif
 826
 827#ifdef CONFIG_X86_64
 828
 829#define PV_SAVE_REGS(set)                       \
 830        COND_PUSH(set, CLBR_RAX, rax);          \
 831        COND_PUSH(set, CLBR_RCX, rcx);          \
 832        COND_PUSH(set, CLBR_RDX, rdx);          \
 833        COND_PUSH(set, CLBR_RSI, rsi);          \
 834        COND_PUSH(set, CLBR_RDI, rdi);          \
 835        COND_PUSH(set, CLBR_R8, r8);            \
 836        COND_PUSH(set, CLBR_R9, r9);            \
 837        COND_PUSH(set, CLBR_R10, r10);          \
 838        COND_PUSH(set, CLBR_R11, r11)
 839#define PV_RESTORE_REGS(set)                    \
 840        COND_POP(set, CLBR_R11, r11);           \
 841        COND_POP(set, CLBR_R10, r10);           \
 842        COND_POP(set, CLBR_R9, r9);             \
 843        COND_POP(set, CLBR_R8, r8);             \
 844        COND_POP(set, CLBR_RDI, rdi);           \
 845        COND_POP(set, CLBR_RSI, rsi);           \
 846        COND_POP(set, CLBR_RDX, rdx);           \
 847        COND_POP(set, CLBR_RCX, rcx);           \
 848        COND_POP(set, CLBR_RAX, rax)
 849
 850#define PARA_PATCH(off)         ((off) / 8)
 851#define PARA_SITE(ptype, ops)   _PVSITE(ptype, ops, .quad, 8)
 852#define PARA_INDIRECT(addr)     *addr(%rip)
 853#else
 854#define PV_SAVE_REGS(set)                       \
 855        COND_PUSH(set, CLBR_EAX, eax);          \
 856        COND_PUSH(set, CLBR_EDI, edi);          \
 857        COND_PUSH(set, CLBR_ECX, ecx);          \
 858        COND_PUSH(set, CLBR_EDX, edx)
 859#define PV_RESTORE_REGS(set)                    \
 860        COND_POP(set, CLBR_EDX, edx);           \
 861        COND_POP(set, CLBR_ECX, ecx);           \
 862        COND_POP(set, CLBR_EDI, edi);           \
 863        COND_POP(set, CLBR_EAX, eax)
 864
 865#define PARA_PATCH(off)         ((off) / 4)
 866#define PARA_SITE(ptype, ops)   _PVSITE(ptype, ops, .long, 4)
 867#define PARA_INDIRECT(addr)     *%cs:addr
 868#endif
 869
 870#ifdef CONFIG_PARAVIRT_XXL
 871#define INTERRUPT_RETURN                                                \
 872        PARA_SITE(PARA_PATCH(PV_CPU_iret),                              \
 873                  ANNOTATE_RETPOLINE_SAFE;                              \
 874                  jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
 875
 876#define DISABLE_INTERRUPTS(clobbers)                                    \
 877        PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable),                       \
 878                  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
 879                  ANNOTATE_RETPOLINE_SAFE;                              \
 880                  call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable);        \
 881                  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 882
 883#define ENABLE_INTERRUPTS(clobbers)                                     \
 884        PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable),                        \
 885                  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
 886                  ANNOTATE_RETPOLINE_SAFE;                              \
 887                  call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable);         \
 888                  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 889#endif
 890
 891#ifdef CONFIG_X86_64
 892#ifdef CONFIG_PARAVIRT_XXL
 893/*
 894 * If swapgs is used while the userspace stack is still current,
 895 * there's no way to call a pvop.  The PV replacement *must* be
 896 * inlined, or the swapgs instruction must be trapped and emulated.
 897 */
 898#define SWAPGS_UNSAFE_STACK                                             \
 899        PARA_SITE(PARA_PATCH(PV_CPU_swapgs), swapgs)
 900
 901/*
 902 * Note: swapgs is very special, and in practise is either going to be
 903 * implemented with a single "swapgs" instruction or something very
 904 * special.  Either way, we don't need to save any registers for
 905 * it.
 906 */
 907#define SWAPGS                                                          \
 908        PARA_SITE(PARA_PATCH(PV_CPU_swapgs),                            \
 909                  ANNOTATE_RETPOLINE_SAFE;                              \
 910                  call PARA_INDIRECT(pv_ops+PV_CPU_swapgs);             \
 911                 )
 912#endif
 913
 914#define GET_CR2_INTO_RAX                                \
 915        ANNOTATE_RETPOLINE_SAFE;                                \
 916        call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2);
 917
 918#ifdef CONFIG_PARAVIRT_XXL
 919#define USERGS_SYSRET64                                                 \
 920        PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64),                   \
 921                  ANNOTATE_RETPOLINE_SAFE;                              \
 922                  jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
 923
 924#ifdef CONFIG_DEBUG_ENTRY
 925#define SAVE_FLAGS(clobbers)                                        \
 926        PARA_SITE(PARA_PATCH(PV_IRQ_save_fl),                       \
 927                  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);        \
 928                  ANNOTATE_RETPOLINE_SAFE;                          \
 929                  call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);        \
 930                  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 931#endif
 932#endif
 933
 934#endif  /* CONFIG_X86_32 */
 935
 936#endif /* __ASSEMBLY__ */
 937#else  /* CONFIG_PARAVIRT */
 938# define default_banner x86_init_noop
 939#endif /* !CONFIG_PARAVIRT */
 940
 941#ifndef __ASSEMBLY__
 942#ifndef CONFIG_PARAVIRT_XXL
 943static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
 944                                          struct mm_struct *mm)
 945{
 946}
 947#endif
 948
 949#ifndef CONFIG_PARAVIRT
 950static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
 951{
 952}
 953#endif
 954#endif /* __ASSEMBLY__ */
 955#endif /* _ASM_X86_PARAVIRT_H */
 956