linux/arch/x86/include/asm/paravirt.h
<<
>>
Prefs
   1#ifndef _ASM_X86_PARAVIRT_H
   2#define _ASM_X86_PARAVIRT_H
   3/* Various instructions on x86 need to be replaced for
   4 * para-virtualization: those hooks are defined here. */
   5
   6#ifdef CONFIG_PARAVIRT
   7#include <asm/pgtable_types.h>
   8#include <asm/asm.h>
   9
  10#include <asm/paravirt_types.h>
  11
  12#ifndef __ASSEMBLY__
  13#include <linux/bug.h>
  14#include <linux/types.h>
  15#include <linux/cpumask.h>
  16#include <asm/frame.h>
  17
  18static inline void load_sp0(struct tss_struct *tss,
  19                             struct thread_struct *thread)
  20{
  21        PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
  22}
  23
  24/* The paravirtualized CPUID instruction. */
  25static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
  26                           unsigned int *ecx, unsigned int *edx)
  27{
  28        PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
  29}
  30
  31/*
  32 * These special macros can be used to get or set a debugging register
  33 */
  34static inline unsigned long paravirt_get_debugreg(int reg)
  35{
  36        return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
  37}
  38#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
  39static inline void set_debugreg(unsigned long val, int reg)
  40{
  41        PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
  42}
  43
  44static inline void clts(void)
  45{
  46        PVOP_VCALL0(pv_cpu_ops.clts);
  47}
  48
  49static inline unsigned long read_cr0(void)
  50{
  51        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
  52}
  53
  54static inline void write_cr0(unsigned long x)
  55{
  56        PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
  57}
  58
  59static inline unsigned long read_cr2(void)
  60{
  61        return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
  62}
  63
  64static inline void write_cr2(unsigned long x)
  65{
  66        PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
  67}
  68
  69static inline unsigned long read_cr3(void)
  70{
  71        return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
  72}
  73
  74static inline void write_cr3(unsigned long x)
  75{
  76        PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
  77}
  78
  79static inline unsigned long __read_cr4(void)
  80{
  81        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
  82}
  83
  84static inline void __write_cr4(unsigned long x)
  85{
  86        PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
  87}
  88
  89#ifdef CONFIG_X86_64
  90static inline unsigned long read_cr8(void)
  91{
  92        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
  93}
  94
  95static inline void write_cr8(unsigned long x)
  96{
  97        PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
  98}
  99#endif
 100
 101static inline void arch_safe_halt(void)
 102{
 103        PVOP_VCALL0(pv_irq_ops.safe_halt);
 104}
 105
 106static inline void halt(void)
 107{
 108        PVOP_VCALL0(pv_irq_ops.halt);
 109}
 110
 111static inline void wbinvd(void)
 112{
 113        PVOP_VCALL0(pv_cpu_ops.wbinvd);
 114}
 115
 116#define get_kernel_rpl()  (pv_info.kernel_rpl)
 117
 118static inline u64 paravirt_read_msr(unsigned msr)
 119{
 120        return PVOP_CALL1(u64, pv_cpu_ops.read_msr, msr);
 121}
 122
 123static inline void paravirt_write_msr(unsigned msr,
 124                                      unsigned low, unsigned high)
 125{
 126        return PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high);
 127}
 128
 129static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
 130{
 131        return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err);
 132}
 133
 134static inline int paravirt_write_msr_safe(unsigned msr,
 135                                          unsigned low, unsigned high)
 136{
 137        return PVOP_CALL3(int, pv_cpu_ops.write_msr_safe, msr, low, high);
 138}
 139
 140#define rdmsr(msr, val1, val2)                  \
 141do {                                            \
 142        u64 _l = paravirt_read_msr(msr);        \
 143        val1 = (u32)_l;                         \
 144        val2 = _l >> 32;                        \
 145} while (0)
 146
 147#define wrmsr(msr, val1, val2)                  \
 148do {                                            \
 149        paravirt_write_msr(msr, val1, val2);    \
 150} while (0)
 151
 152#define rdmsrl(msr, val)                        \
 153do {                                            \
 154        val = paravirt_read_msr(msr);           \
 155} while (0)
 156
 157static inline void wrmsrl(unsigned msr, u64 val)
 158{
 159        wrmsr(msr, (u32)val, (u32)(val>>32));
 160}
 161
 162#define wrmsr_safe(msr, a, b)   paravirt_write_msr_safe(msr, a, b)
 163
 164/* rdmsr with exception handling */
 165#define rdmsr_safe(msr, a, b)                           \
 166({                                                      \
 167        int _err;                                       \
 168        u64 _l = paravirt_read_msr_safe(msr, &_err);    \
 169        (*a) = (u32)_l;                                 \
 170        (*b) = _l >> 32;                                \
 171        _err;                                           \
 172})
 173
 174static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
 175{
 176        int err;
 177
 178        *p = paravirt_read_msr_safe(msr, &err);
 179        return err;
 180}
 181
 182static inline unsigned long long paravirt_sched_clock(void)
 183{
 184        return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
 185}
 186
 187struct static_key;
 188extern struct static_key paravirt_steal_enabled;
 189extern struct static_key paravirt_steal_rq_enabled;
 190
 191static inline u64 paravirt_steal_clock(int cpu)
 192{
 193        return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
 194}
 195
 196static inline unsigned long long paravirt_read_pmc(int counter)
 197{
 198        return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
 199}
 200
 201#define rdpmc(counter, low, high)               \
 202do {                                            \
 203        u64 _l = paravirt_read_pmc(counter);    \
 204        low = (u32)_l;                          \
 205        high = _l >> 32;                        \
 206} while (0)
 207
 208#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
 209
 210static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
 211{
 212        PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
 213}
 214
 215static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
 216{
 217        PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
 218}
 219
 220static inline void load_TR_desc(void)
 221{
 222        PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
 223}
 224static inline void load_gdt(const struct desc_ptr *dtr)
 225{
 226        PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
 227}
 228static inline void load_idt(const struct desc_ptr *dtr)
 229{
 230        PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
 231}
 232static inline void set_ldt(const void *addr, unsigned entries)
 233{
 234        PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
 235}
 236static inline void store_idt(struct desc_ptr *dtr)
 237{
 238        PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
 239}
 240static inline unsigned long paravirt_store_tr(void)
 241{
 242        return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
 243}
 244#define store_tr(tr)    ((tr) = paravirt_store_tr())
 245static inline void load_TLS(struct thread_struct *t, unsigned cpu)
 246{
 247        PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
 248}
 249
 250#ifdef CONFIG_X86_64
 251static inline void load_gs_index(unsigned int gs)
 252{
 253        PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
 254}
 255#endif
 256
 257static inline void write_ldt_entry(struct desc_struct *dt, int entry,
 258                                   const void *desc)
 259{
 260        PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
 261}
 262
 263static inline void write_gdt_entry(struct desc_struct *dt, int entry,
 264                                   void *desc, int type)
 265{
 266        PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
 267}
 268
 269static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
 270{
 271        PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
 272}
 273static inline void set_iopl_mask(unsigned mask)
 274{
 275        PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
 276}
 277
 278/* The paravirtualized I/O functions */
 279static inline void slow_down_io(void)
 280{
 281        pv_cpu_ops.io_delay();
 282#ifdef REALLY_SLOW_IO
 283        pv_cpu_ops.io_delay();
 284        pv_cpu_ops.io_delay();
 285        pv_cpu_ops.io_delay();
 286#endif
 287}
 288
 289static inline void paravirt_activate_mm(struct mm_struct *prev,
 290                                        struct mm_struct *next)
 291{
 292        PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
 293}
 294
 295static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
 296                                          struct mm_struct *mm)
 297{
 298        PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
 299}
 300
 301static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
 302{
 303        PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
 304}
 305
 306static inline void __flush_tlb(void)
 307{
 308        PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
 309}
 310static inline void __flush_tlb_global(void)
 311{
 312        PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
 313}
 314static inline void __flush_tlb_single(unsigned long addr)
 315{
 316        PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
 317}
 318
 319static inline void flush_tlb_others(const struct cpumask *cpumask,
 320                                    struct mm_struct *mm,
 321                                    unsigned long start,
 322                                    unsigned long end)
 323{
 324        PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
 325}
 326
 327static inline int paravirt_pgd_alloc(struct mm_struct *mm)
 328{
 329        return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
 330}
 331
 332static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
 333{
 334        PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
 335}
 336
 337static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
 338{
 339        PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
 340}
 341static inline void paravirt_release_pte(unsigned long pfn)
 342{
 343        PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
 344}
 345
 346static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
 347{
 348        PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
 349}
 350
 351static inline void paravirt_release_pmd(unsigned long pfn)
 352{
 353        PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
 354}
 355
 356static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
 357{
 358        PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
 359}
 360static inline void paravirt_release_pud(unsigned long pfn)
 361{
 362        PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
 363}
 364
 365static inline void pte_update(struct mm_struct *mm, unsigned long addr,
 366                              pte_t *ptep)
 367{
 368        PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
 369}
 370
 371static inline pte_t __pte(pteval_t val)
 372{
 373        pteval_t ret;
 374
 375        if (sizeof(pteval_t) > sizeof(long))
 376                ret = PVOP_CALLEE2(pteval_t,
 377                                   pv_mmu_ops.make_pte,
 378                                   val, (u64)val >> 32);
 379        else
 380                ret = PVOP_CALLEE1(pteval_t,
 381                                   pv_mmu_ops.make_pte,
 382                                   val);
 383
 384        return (pte_t) { .pte = ret };
 385}
 386
 387static inline pteval_t pte_val(pte_t pte)
 388{
 389        pteval_t ret;
 390
 391        if (sizeof(pteval_t) > sizeof(long))
 392                ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
 393                                   pte.pte, (u64)pte.pte >> 32);
 394        else
 395                ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
 396                                   pte.pte);
 397
 398        return ret;
 399}
 400
 401static inline pgd_t __pgd(pgdval_t val)
 402{
 403        pgdval_t ret;
 404
 405        if (sizeof(pgdval_t) > sizeof(long))
 406                ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
 407                                   val, (u64)val >> 32);
 408        else
 409                ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
 410                                   val);
 411
 412        return (pgd_t) { ret };
 413}
 414
 415static inline pgdval_t pgd_val(pgd_t pgd)
 416{
 417        pgdval_t ret;
 418
 419        if (sizeof(pgdval_t) > sizeof(long))
 420                ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
 421                                    pgd.pgd, (u64)pgd.pgd >> 32);
 422        else
 423                ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
 424                                    pgd.pgd);
 425
 426        return ret;
 427}
 428
 429#define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
 430static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
 431                                           pte_t *ptep)
 432{
 433        pteval_t ret;
 434
 435        ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
 436                         mm, addr, ptep);
 437
 438        return (pte_t) { .pte = ret };
 439}
 440
 441static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
 442                                           pte_t *ptep, pte_t pte)
 443{
 444        if (sizeof(pteval_t) > sizeof(long))
 445                /* 5 arg words */
 446                pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
 447        else
 448                PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
 449                            mm, addr, ptep, pte.pte);
 450}
 451
 452static inline void set_pte(pte_t *ptep, pte_t pte)
 453{
 454        if (sizeof(pteval_t) > sizeof(long))
 455                PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
 456                            pte.pte, (u64)pte.pte >> 32);
 457        else
 458                PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
 459                            pte.pte);
 460}
 461
 462static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 463                              pte_t *ptep, pte_t pte)
 464{
 465        if (sizeof(pteval_t) > sizeof(long))
 466                /* 5 arg words */
 467                pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
 468        else
 469                PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
 470}
 471
 472static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 473                              pmd_t *pmdp, pmd_t pmd)
 474{
 475        if (sizeof(pmdval_t) > sizeof(long))
 476                /* 5 arg words */
 477                pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
 478        else
 479                PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
 480                            native_pmd_val(pmd));
 481}
 482
 483static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 484{
 485        pmdval_t val = native_pmd_val(pmd);
 486
 487        if (sizeof(pmdval_t) > sizeof(long))
 488                PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
 489        else
 490                PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
 491}
 492
 493#if CONFIG_PGTABLE_LEVELS >= 3
 494static inline pmd_t __pmd(pmdval_t val)
 495{
 496        pmdval_t ret;
 497
 498        if (sizeof(pmdval_t) > sizeof(long))
 499                ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
 500                                   val, (u64)val >> 32);
 501        else
 502                ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
 503                                   val);
 504
 505        return (pmd_t) { ret };
 506}
 507
 508static inline pmdval_t pmd_val(pmd_t pmd)
 509{
 510        pmdval_t ret;
 511
 512        if (sizeof(pmdval_t) > sizeof(long))
 513                ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
 514                                    pmd.pmd, (u64)pmd.pmd >> 32);
 515        else
 516                ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
 517                                    pmd.pmd);
 518
 519        return ret;
 520}
 521
 522static inline void set_pud(pud_t *pudp, pud_t pud)
 523{
 524        pudval_t val = native_pud_val(pud);
 525
 526        if (sizeof(pudval_t) > sizeof(long))
 527                PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
 528                            val, (u64)val >> 32);
 529        else
 530                PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
 531                            val);
 532}
 533#if CONFIG_PGTABLE_LEVELS == 4
 534static inline pud_t __pud(pudval_t val)
 535{
 536        pudval_t ret;
 537
 538        if (sizeof(pudval_t) > sizeof(long))
 539                ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
 540                                   val, (u64)val >> 32);
 541        else
 542                ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
 543                                   val);
 544
 545        return (pud_t) { ret };
 546}
 547
 548static inline pudval_t pud_val(pud_t pud)
 549{
 550        pudval_t ret;
 551
 552        if (sizeof(pudval_t) > sizeof(long))
 553                ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
 554                                    pud.pud, (u64)pud.pud >> 32);
 555        else
 556                ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
 557                                    pud.pud);
 558
 559        return ret;
 560}
 561
 562static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
 563{
 564        pgdval_t val = native_pgd_val(pgd);
 565
 566        if (sizeof(pgdval_t) > sizeof(long))
 567                PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
 568                            val, (u64)val >> 32);
 569        else
 570                PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
 571                            val);
 572}
 573
 574static inline void pgd_clear(pgd_t *pgdp)
 575{
 576        set_pgd(pgdp, __pgd(0));
 577}
 578
 579static inline void pud_clear(pud_t *pudp)
 580{
 581        set_pud(pudp, __pud(0));
 582}
 583
 584#endif  /* CONFIG_PGTABLE_LEVELS == 4 */
 585
 586#endif  /* CONFIG_PGTABLE_LEVELS >= 3 */
 587
 588#ifdef CONFIG_X86_PAE
 589/* Special-case pte-setting operations for PAE, which can't update a
 590   64-bit pte atomically */
 591static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
 592{
 593        PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
 594                    pte.pte, pte.pte >> 32);
 595}
 596
 597static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
 598                             pte_t *ptep)
 599{
 600        PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
 601}
 602
 603static inline void pmd_clear(pmd_t *pmdp)
 604{
 605        PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
 606}
 607#else  /* !CONFIG_X86_PAE */
 608static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
 609{
 610        set_pte(ptep, pte);
 611}
 612
 613static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
 614                             pte_t *ptep)
 615{
 616        set_pte_at(mm, addr, ptep, __pte(0));
 617}
 618
 619static inline void pmd_clear(pmd_t *pmdp)
 620{
 621        set_pmd(pmdp, __pmd(0));
 622}
 623#endif  /* CONFIG_X86_PAE */
 624
 625#define  __HAVE_ARCH_START_CONTEXT_SWITCH
 626static inline void arch_start_context_switch(struct task_struct *prev)
 627{
 628        PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
 629}
 630
 631static inline void arch_end_context_switch(struct task_struct *next)
 632{
 633        PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
 634}
 635
 636#define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 637static inline void arch_enter_lazy_mmu_mode(void)
 638{
 639        PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
 640}
 641
 642static inline void arch_leave_lazy_mmu_mode(void)
 643{
 644        PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
 645}
 646
 647static inline void arch_flush_lazy_mmu_mode(void)
 648{
 649        PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
 650}
 651
 652static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
 653                                phys_addr_t phys, pgprot_t flags)
 654{
 655        pv_mmu_ops.set_fixmap(idx, phys, flags);
 656}
 657
 658#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
 659
 660static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
 661                                                        u32 val)
 662{
 663        PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
 664}
 665
 666static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
 667{
 668        PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
 669}
 670
 671static __always_inline void pv_wait(u8 *ptr, u8 val)
 672{
 673        PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
 674}
 675
 676static __always_inline void pv_kick(int cpu)
 677{
 678        PVOP_VCALL1(pv_lock_ops.kick, cpu);
 679}
 680
 681#endif /* SMP && PARAVIRT_SPINLOCKS */
 682
 683#ifdef CONFIG_X86_32
 684#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
 685#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
 686
 687/* save and restore all caller-save registers, except return value */
 688#define PV_SAVE_ALL_CALLER_REGS         "pushl %ecx;"
 689#define PV_RESTORE_ALL_CALLER_REGS      "popl  %ecx;"
 690
 691#define PV_FLAGS_ARG "0"
 692#define PV_EXTRA_CLOBBERS
 693#define PV_VEXTRA_CLOBBERS
 694#else
 695/* save and restore all caller-save registers, except return value */
 696#define PV_SAVE_ALL_CALLER_REGS                                         \
 697        "push %rcx;"                                                    \
 698        "push %rdx;"                                                    \
 699        "push %rsi;"                                                    \
 700        "push %rdi;"                                                    \
 701        "push %r8;"                                                     \
 702        "push %r9;"                                                     \
 703        "push %r10;"                                                    \
 704        "push %r11;"
 705#define PV_RESTORE_ALL_CALLER_REGS                                      \
 706        "pop %r11;"                                                     \
 707        "pop %r10;"                                                     \
 708        "pop %r9;"                                                      \
 709        "pop %r8;"                                                      \
 710        "pop %rdi;"                                                     \
 711        "pop %rsi;"                                                     \
 712        "pop %rdx;"                                                     \
 713        "pop %rcx;"
 714
 715/* We save some registers, but all of them, that's too much. We clobber all
 716 * caller saved registers but the argument parameter */
 717#define PV_SAVE_REGS "pushq %%rdi;"
 718#define PV_RESTORE_REGS "popq %%rdi;"
 719#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
 720#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
 721#define PV_FLAGS_ARG "D"
 722#endif
 723
 724/*
 725 * Generate a thunk around a function which saves all caller-save
 726 * registers except for the return value.  This allows C functions to
 727 * be called from assembler code where fewer than normal registers are
 728 * available.  It may also help code generation around calls from C
 729 * code if the common case doesn't use many registers.
 730 *
 731 * When a callee is wrapped in a thunk, the caller can assume that all
 732 * arg regs and all scratch registers are preserved across the
 733 * call. The return value in rax/eax will not be saved, even for void
 734 * functions.
 735 */
 736#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
 737#define PV_CALLEE_SAVE_REGS_THUNK(func)                                 \
 738        extern typeof(func) __raw_callee_save_##func;                   \
 739                                                                        \
 740        asm(".pushsection .text;"                                       \
 741            ".globl " PV_THUNK_NAME(func) ";"                           \
 742            ".type " PV_THUNK_NAME(func) ", @function;"                 \
 743            PV_THUNK_NAME(func) ":"                                     \
 744            FRAME_BEGIN                                                 \
 745            PV_SAVE_ALL_CALLER_REGS                                     \
 746            "call " #func ";"                                           \
 747            PV_RESTORE_ALL_CALLER_REGS                                  \
 748            FRAME_END                                                   \
 749            "ret;"                                                      \
 750            ".popsection")
 751
 752/* Get a reference to a callee-save function */
 753#define PV_CALLEE_SAVE(func)                                            \
 754        ((struct paravirt_callee_save) { __raw_callee_save_##func })
 755
 756/* Promise that "func" already uses the right calling convention */
 757#define __PV_IS_CALLEE_SAVE(func)                       \
 758        ((struct paravirt_callee_save) { func })
 759
 760static inline notrace unsigned long arch_local_save_flags(void)
 761{
 762        return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
 763}
 764
 765static inline notrace void arch_local_irq_restore(unsigned long f)
 766{
 767        PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
 768}
 769
 770static inline notrace void arch_local_irq_disable(void)
 771{
 772        PVOP_VCALLEE0(pv_irq_ops.irq_disable);
 773}
 774
 775static inline notrace void arch_local_irq_enable(void)
 776{
 777        PVOP_VCALLEE0(pv_irq_ops.irq_enable);
 778}
 779
 780static inline notrace unsigned long arch_local_irq_save(void)
 781{
 782        unsigned long f;
 783
 784        f = arch_local_save_flags();
 785        arch_local_irq_disable();
 786        return f;
 787}
 788
 789
 790/* Make sure as little as possible of this mess escapes. */
 791#undef PARAVIRT_CALL
 792#undef __PVOP_CALL
 793#undef __PVOP_VCALL
 794#undef PVOP_VCALL0
 795#undef PVOP_CALL0
 796#undef PVOP_VCALL1
 797#undef PVOP_CALL1
 798#undef PVOP_VCALL2
 799#undef PVOP_CALL2
 800#undef PVOP_VCALL3
 801#undef PVOP_CALL3
 802#undef PVOP_VCALL4
 803#undef PVOP_CALL4
 804
 805extern void default_banner(void);
 806
 807#else  /* __ASSEMBLY__ */
 808
 809#define _PVSITE(ptype, clobbers, ops, word, algn)       \
 810771:;                                           \
 811        ops;                                    \
 812772:;                                           \
 813        .pushsection .parainstructions,"a";     \
 814         .align algn;                           \
 815         word 771b;                             \
 816         .byte ptype;                           \
 817         .byte 772b-771b;                       \
 818         .short clobbers;                       \
 819        .popsection
 820
 821
 822#define COND_PUSH(set, mask, reg)                       \
 823        .if ((~(set)) & mask); push %reg; .endif
 824#define COND_POP(set, mask, reg)                        \
 825        .if ((~(set)) & mask); pop %reg; .endif
 826
 827#ifdef CONFIG_X86_64
 828
 829#define PV_SAVE_REGS(set)                       \
 830        COND_PUSH(set, CLBR_RAX, rax);          \
 831        COND_PUSH(set, CLBR_RCX, rcx);          \
 832        COND_PUSH(set, CLBR_RDX, rdx);          \
 833        COND_PUSH(set, CLBR_RSI, rsi);          \
 834        COND_PUSH(set, CLBR_RDI, rdi);          \
 835        COND_PUSH(set, CLBR_R8, r8);            \
 836        COND_PUSH(set, CLBR_R9, r9);            \
 837        COND_PUSH(set, CLBR_R10, r10);          \
 838        COND_PUSH(set, CLBR_R11, r11)
 839#define PV_RESTORE_REGS(set)                    \
 840        COND_POP(set, CLBR_R11, r11);           \
 841        COND_POP(set, CLBR_R10, r10);           \
 842        COND_POP(set, CLBR_R9, r9);             \
 843        COND_POP(set, CLBR_R8, r8);             \
 844        COND_POP(set, CLBR_RDI, rdi);           \
 845        COND_POP(set, CLBR_RSI, rsi);           \
 846        COND_POP(set, CLBR_RDX, rdx);           \
 847        COND_POP(set, CLBR_RCX, rcx);           \
 848        COND_POP(set, CLBR_RAX, rax)
 849
 850#define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
 851#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
 852#define PARA_INDIRECT(addr)     *addr(%rip)
 853#else
 854#define PV_SAVE_REGS(set)                       \
 855        COND_PUSH(set, CLBR_EAX, eax);          \
 856        COND_PUSH(set, CLBR_EDI, edi);          \
 857        COND_PUSH(set, CLBR_ECX, ecx);          \
 858        COND_PUSH(set, CLBR_EDX, edx)
 859#define PV_RESTORE_REGS(set)                    \
 860        COND_POP(set, CLBR_EDX, edx);           \
 861        COND_POP(set, CLBR_ECX, ecx);           \
 862        COND_POP(set, CLBR_EDI, edi);           \
 863        COND_POP(set, CLBR_EAX, eax)
 864
 865#define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
 866#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
 867#define PARA_INDIRECT(addr)     *%cs:addr
 868#endif
 869
 870#define INTERRUPT_RETURN                                                \
 871        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
 872                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
 873
 874#define DISABLE_INTERRUPTS(clobbers)                                    \
 875        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
 876                  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
 877                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
 878                  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 879
 880#define ENABLE_INTERRUPTS(clobbers)                                     \
 881        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
 882                  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
 883                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
 884                  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 885
 886#ifdef CONFIG_X86_32
 887#define GET_CR0_INTO_EAX                                \
 888        push %ecx; push %edx;                           \
 889        call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
 890        pop %edx; pop %ecx
 891#else   /* !CONFIG_X86_32 */
 892
 893/*
 894 * If swapgs is used while the userspace stack is still current,
 895 * there's no way to call a pvop.  The PV replacement *must* be
 896 * inlined, or the swapgs instruction must be trapped and emulated.
 897 */
 898#define SWAPGS_UNSAFE_STACK                                             \
 899        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
 900                  swapgs)
 901
 902/*
 903 * Note: swapgs is very special, and in practise is either going to be
 904 * implemented with a single "swapgs" instruction or something very
 905 * special.  Either way, we don't need to save any registers for
 906 * it.
 907 */
 908#define SWAPGS                                                          \
 909        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
 910                  call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)          \
 911                 )
 912
 913#define GET_CR2_INTO_RAX                                \
 914        call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
 915
 916#define PARAVIRT_ADJUST_EXCEPTION_FRAME                                 \
 917        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
 918                  CLBR_NONE,                                            \
 919                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
 920
 921#define USERGS_SYSRET64                                                 \
 922        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
 923                  CLBR_NONE,                                            \
 924                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
 925#endif  /* CONFIG_X86_32 */
 926
 927#endif /* __ASSEMBLY__ */
 928#else  /* CONFIG_PARAVIRT */
 929# define default_banner x86_init_noop
 930#ifndef __ASSEMBLY__
 931static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
 932                                          struct mm_struct *mm)
 933{
 934}
 935
 936static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
 937{
 938}
 939#endif /* __ASSEMBLY__ */
 940#endif /* !CONFIG_PARAVIRT */
 941#endif /* _ASM_X86_PARAVIRT_H */
 942