linux/arch/x86/include/asm/paravirt.h
<<
>>
Prefs
   1#ifndef _ASM_X86_PARAVIRT_H
   2#define _ASM_X86_PARAVIRT_H
   3/* Various instructions on x86 need to be replaced for
   4 * para-virtualization: those hooks are defined here. */
   5
   6#ifdef CONFIG_PARAVIRT
   7#include <asm/pgtable_types.h>
   8#include <asm/asm.h>
   9
  10#include <asm/paravirt_types.h>
  11
  12#ifndef __ASSEMBLY__
  13#include <linux/types.h>
  14#include <linux/cpumask.h>
  15
  16static inline int paravirt_enabled(void)
  17{
  18        return pv_info.paravirt_enabled;
  19}
  20
  21static inline void load_sp0(struct tss_struct *tss,
  22                             struct thread_struct *thread)
  23{
  24        PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
  25}
  26
  27/* The paravirtualized CPUID instruction. */
  28static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
  29                           unsigned int *ecx, unsigned int *edx)
  30{
  31        PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
  32}
  33
  34/*
  35 * These special macros can be used to get or set a debugging register
  36 */
  37static inline unsigned long paravirt_get_debugreg(int reg)
  38{
  39        return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
  40}
  41#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
  42static inline void set_debugreg(unsigned long val, int reg)
  43{
  44        PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
  45}
  46
  47static inline void clts(void)
  48{
  49        PVOP_VCALL0(pv_cpu_ops.clts);
  50}
  51
  52static inline unsigned long read_cr0(void)
  53{
  54        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
  55}
  56
  57static inline void write_cr0(unsigned long x)
  58{
  59        PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
  60}
  61
  62static inline unsigned long read_cr2(void)
  63{
  64        return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
  65}
  66
  67static inline void write_cr2(unsigned long x)
  68{
  69        PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
  70}
  71
  72static inline unsigned long read_cr3(void)
  73{
  74        return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
  75}
  76
  77static inline void write_cr3(unsigned long x)
  78{
  79        PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
  80}
  81
  82static inline unsigned long read_cr4(void)
  83{
  84        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
  85}
  86static inline unsigned long read_cr4_safe(void)
  87{
  88        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
  89}
  90
  91static inline void write_cr4(unsigned long x)
  92{
  93        PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
  94}
  95
  96#ifdef CONFIG_X86_64
  97static inline unsigned long read_cr8(void)
  98{
  99        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
 100}
 101
 102static inline void write_cr8(unsigned long x)
 103{
 104        PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
 105}
 106#endif
 107
 108static inline void arch_safe_halt(void)
 109{
 110        PVOP_VCALL0(pv_irq_ops.safe_halt);
 111}
 112
 113static inline void halt(void)
 114{
 115        PVOP_VCALL0(pv_irq_ops.halt);
 116}
 117
 118static inline void wbinvd(void)
 119{
 120        PVOP_VCALL0(pv_cpu_ops.wbinvd);
 121}
 122
 123#define get_kernel_rpl()  (pv_info.kernel_rpl)
 124
 125static inline u64 paravirt_read_msr(unsigned msr, int *err)
 126{
 127        return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
 128}
 129
 130static inline int paravirt_rdmsr_regs(u32 *regs)
 131{
 132        return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
 133}
 134
 135static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
 136{
 137        return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
 138}
 139
 140static inline int paravirt_wrmsr_regs(u32 *regs)
 141{
 142        return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
 143}
 144
 145/* These should all do BUG_ON(_err), but our headers are too tangled. */
 146#define rdmsr(msr, val1, val2)                  \
 147do {                                            \
 148        int _err;                               \
 149        u64 _l = paravirt_read_msr(msr, &_err); \
 150        val1 = (u32)_l;                         \
 151        val2 = _l >> 32;                        \
 152} while (0)
 153
 154#define wrmsr(msr, val1, val2)                  \
 155do {                                            \
 156        paravirt_write_msr(msr, val1, val2);    \
 157} while (0)
 158
 159#define rdmsrl(msr, val)                        \
 160do {                                            \
 161        int _err;                               \
 162        val = paravirt_read_msr(msr, &_err);    \
 163} while (0)
 164
 165#define wrmsrl(msr, val)        wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
 166#define wrmsr_safe(msr, a, b)   paravirt_write_msr(msr, a, b)
 167
 168/* rdmsr with exception handling */
 169#define rdmsr_safe(msr, a, b)                   \
 170({                                              \
 171        int _err;                               \
 172        u64 _l = paravirt_read_msr(msr, &_err); \
 173        (*a) = (u32)_l;                         \
 174        (*b) = _l >> 32;                        \
 175        _err;                                   \
 176})
 177
 178#define rdmsr_safe_regs(regs)   paravirt_rdmsr_regs(regs)
 179#define wrmsr_safe_regs(regs)   paravirt_wrmsr_regs(regs)
 180
 181static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
 182{
 183        int err;
 184
 185        *p = paravirt_read_msr(msr, &err);
 186        return err;
 187}
 188static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
 189{
 190        u32 gprs[8] = { 0 };
 191        int err;
 192
 193        gprs[1] = msr;
 194        gprs[7] = 0x9c5a203a;
 195
 196        err = paravirt_rdmsr_regs(gprs);
 197
 198        *p = gprs[0] | ((u64)gprs[2] << 32);
 199
 200        return err;
 201}
 202
 203static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
 204{
 205        u32 gprs[8] = { 0 };
 206
 207        gprs[0] = (u32)val;
 208        gprs[1] = msr;
 209        gprs[2] = val >> 32;
 210        gprs[7] = 0x9c5a203a;
 211
 212        return paravirt_wrmsr_regs(gprs);
 213}
 214
 215static inline u64 paravirt_read_tsc(void)
 216{
 217        return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
 218}
 219
 220#define rdtscl(low)                             \
 221do {                                            \
 222        u64 _l = paravirt_read_tsc();           \
 223        low = (int)_l;                          \
 224} while (0)
 225
 226#define rdtscll(val) (val = paravirt_read_tsc())
 227
 228static inline unsigned long long paravirt_sched_clock(void)
 229{
 230        return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
 231}
 232
 233struct jump_label_key;
 234extern struct jump_label_key paravirt_steal_enabled;
 235extern struct jump_label_key paravirt_steal_rq_enabled;
 236
 237static inline u64 paravirt_steal_clock(int cpu)
 238{
 239        return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
 240}
 241
 242static inline unsigned long long paravirt_read_pmc(int counter)
 243{
 244        return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
 245}
 246
 247#define rdpmc(counter, low, high)               \
 248do {                                            \
 249        u64 _l = paravirt_read_pmc(counter);    \
 250        low = (u32)_l;                          \
 251        high = _l >> 32;                        \
 252} while (0)
 253
 254static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
 255{
 256        return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
 257}
 258
 259#define rdtscp(low, high, aux)                          \
 260do {                                                    \
 261        int __aux;                                      \
 262        unsigned long __val = paravirt_rdtscp(&__aux);  \
 263        (low) = (u32)__val;                             \
 264        (high) = (u32)(__val >> 32);                    \
 265        (aux) = __aux;                                  \
 266} while (0)
 267
 268#define rdtscpll(val, aux)                              \
 269do {                                                    \
 270        unsigned long __aux;                            \
 271        val = paravirt_rdtscp(&__aux);                  \
 272        (aux) = __aux;                                  \
 273} while (0)
 274
 275static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
 276{
 277        PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
 278}
 279
 280static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
 281{
 282        PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
 283}
 284
 285static inline void load_TR_desc(void)
 286{
 287        PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
 288}
 289static inline void load_gdt(const struct desc_ptr *dtr)
 290{
 291        PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
 292}
 293static inline void load_idt(const struct desc_ptr *dtr)
 294{
 295        PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
 296}
 297static inline void set_ldt(const void *addr, unsigned entries)
 298{
 299        PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
 300}
 301static inline void store_gdt(struct desc_ptr *dtr)
 302{
 303        PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
 304}
 305static inline void store_idt(struct desc_ptr *dtr)
 306{
 307        PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
 308}
 309static inline unsigned long paravirt_store_tr(void)
 310{
 311        return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
 312}
 313#define store_tr(tr)    ((tr) = paravirt_store_tr())
 314static inline void load_TLS(struct thread_struct *t, unsigned cpu)
 315{
 316        PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
 317}
 318
 319#ifdef CONFIG_X86_64
 320static inline void load_gs_index(unsigned int gs)
 321{
 322        PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
 323}
 324#endif
 325
 326static inline void write_ldt_entry(struct desc_struct *dt, int entry,
 327                                   const void *desc)
 328{
 329        PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
 330}
 331
 332static inline void write_gdt_entry(struct desc_struct *dt, int entry,
 333                                   void *desc, int type)
 334{
 335        PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
 336}
 337
 338static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
 339{
 340        PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
 341}
 342static inline void set_iopl_mask(unsigned mask)
 343{
 344        PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
 345}
 346
 347/* The paravirtualized I/O functions */
 348static inline void slow_down_io(void)
 349{
 350        pv_cpu_ops.io_delay();
 351#ifdef REALLY_SLOW_IO
 352        pv_cpu_ops.io_delay();
 353        pv_cpu_ops.io_delay();
 354        pv_cpu_ops.io_delay();
 355#endif
 356}
 357
 358#ifdef CONFIG_SMP
 359static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
 360                                    unsigned long start_esp)
 361{
 362        PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
 363                    phys_apicid, start_eip, start_esp);
 364}
 365#endif
 366
 367static inline void paravirt_activate_mm(struct mm_struct *prev,
 368                                        struct mm_struct *next)
 369{
 370        PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
 371}
 372
 373static inline void arch_dup_mmap(struct mm_struct *oldmm,
 374                                 struct mm_struct *mm)
 375{
 376        PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
 377}
 378
 379static inline void arch_exit_mmap(struct mm_struct *mm)
 380{
 381        PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
 382}
 383
 384static inline void __flush_tlb(void)
 385{
 386        PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
 387}
 388static inline void __flush_tlb_global(void)
 389{
 390        PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
 391}
 392static inline void __flush_tlb_single(unsigned long addr)
 393{
 394        PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
 395}
 396
 397static inline void flush_tlb_others(const struct cpumask *cpumask,
 398                                    struct mm_struct *mm,
 399                                    unsigned long va)
 400{
 401        PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
 402}
 403
 404static inline int paravirt_pgd_alloc(struct mm_struct *mm)
 405{
 406        return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
 407}
 408
 409static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
 410{
 411        PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
 412}
 413
 414static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
 415{
 416        PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
 417}
 418static inline void paravirt_release_pte(unsigned long pfn)
 419{
 420        PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
 421}
 422
 423static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
 424{
 425        PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
 426}
 427
 428static inline void paravirt_release_pmd(unsigned long pfn)
 429{
 430        PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
 431}
 432
 433static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
 434{
 435        PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
 436}
 437static inline void paravirt_release_pud(unsigned long pfn)
 438{
 439        PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
 440}
 441
 442static inline void pte_update(struct mm_struct *mm, unsigned long addr,
 443                              pte_t *ptep)
 444{
 445        PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
 446}
 447static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
 448                              pmd_t *pmdp)
 449{
 450        PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
 451}
 452
 453static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
 454                                    pte_t *ptep)
 455{
 456        PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
 457}
 458
 459static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
 460                                    pmd_t *pmdp)
 461{
 462        PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
 463}
 464
 465static inline pte_t __pte(pteval_t val)
 466{
 467        pteval_t ret;
 468
 469        if (sizeof(pteval_t) > sizeof(long))
 470                ret = PVOP_CALLEE2(pteval_t,
 471                                   pv_mmu_ops.make_pte,
 472                                   val, (u64)val >> 32);
 473        else
 474                ret = PVOP_CALLEE1(pteval_t,
 475                                   pv_mmu_ops.make_pte,
 476                                   val);
 477
 478        return (pte_t) { .pte = ret };
 479}
 480
 481static inline pteval_t pte_val(pte_t pte)
 482{
 483        pteval_t ret;
 484
 485        if (sizeof(pteval_t) > sizeof(long))
 486                ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
 487                                   pte.pte, (u64)pte.pte >> 32);
 488        else
 489                ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
 490                                   pte.pte);
 491
 492        return ret;
 493}
 494
 495static inline pgd_t __pgd(pgdval_t val)
 496{
 497        pgdval_t ret;
 498
 499        if (sizeof(pgdval_t) > sizeof(long))
 500                ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
 501                                   val, (u64)val >> 32);
 502        else
 503                ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
 504                                   val);
 505
 506        return (pgd_t) { ret };
 507}
 508
 509static inline pgdval_t pgd_val(pgd_t pgd)
 510{
 511        pgdval_t ret;
 512
 513        if (sizeof(pgdval_t) > sizeof(long))
 514                ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
 515                                    pgd.pgd, (u64)pgd.pgd >> 32);
 516        else
 517                ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
 518                                    pgd.pgd);
 519
 520        return ret;
 521}
 522
 523#define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
 524static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
 525                                           pte_t *ptep)
 526{
 527        pteval_t ret;
 528
 529        ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
 530                         mm, addr, ptep);
 531
 532        return (pte_t) { .pte = ret };
 533}
 534
 535static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
 536                                           pte_t *ptep, pte_t pte)
 537{
 538        if (sizeof(pteval_t) > sizeof(long))
 539                /* 5 arg words */
 540                pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
 541        else
 542                PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
 543                            mm, addr, ptep, pte.pte);
 544}
 545
 546static inline void set_pte(pte_t *ptep, pte_t pte)
 547{
 548        if (sizeof(pteval_t) > sizeof(long))
 549                PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
 550                            pte.pte, (u64)pte.pte >> 32);
 551        else
 552                PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
 553                            pte.pte);
 554}
 555
 556static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 557                              pte_t *ptep, pte_t pte)
 558{
 559        if (sizeof(pteval_t) > sizeof(long))
 560                /* 5 arg words */
 561                pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
 562        else
 563                PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
 564}
 565
 566#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 567static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 568                              pmd_t *pmdp, pmd_t pmd)
 569{
 570        if (sizeof(pmdval_t) > sizeof(long))
 571                /* 5 arg words */
 572                pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
 573        else
 574                PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
 575                            native_pmd_val(pmd));
 576}
 577#endif
 578
 579static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 580{
 581        pmdval_t val = native_pmd_val(pmd);
 582
 583        if (sizeof(pmdval_t) > sizeof(long))
 584                PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
 585        else
 586                PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
 587}
 588
 589#if PAGETABLE_LEVELS >= 3
 590static inline pmd_t __pmd(pmdval_t val)
 591{
 592        pmdval_t ret;
 593
 594        if (sizeof(pmdval_t) > sizeof(long))
 595                ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
 596                                   val, (u64)val >> 32);
 597        else
 598                ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
 599                                   val);
 600
 601        return (pmd_t) { ret };
 602}
 603
 604static inline pmdval_t pmd_val(pmd_t pmd)
 605{
 606        pmdval_t ret;
 607
 608        if (sizeof(pmdval_t) > sizeof(long))
 609                ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
 610                                    pmd.pmd, (u64)pmd.pmd >> 32);
 611        else
 612                ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
 613                                    pmd.pmd);
 614
 615        return ret;
 616}
 617
 618static inline void set_pud(pud_t *pudp, pud_t pud)
 619{
 620        pudval_t val = native_pud_val(pud);
 621
 622        if (sizeof(pudval_t) > sizeof(long))
 623                PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
 624                            val, (u64)val >> 32);
 625        else
 626                PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
 627                            val);
 628}
 629#if PAGETABLE_LEVELS == 4
 630static inline pud_t __pud(pudval_t val)
 631{
 632        pudval_t ret;
 633
 634        if (sizeof(pudval_t) > sizeof(long))
 635                ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
 636                                   val, (u64)val >> 32);
 637        else
 638                ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
 639                                   val);
 640
 641        return (pud_t) { ret };
 642}
 643
 644static inline pudval_t pud_val(pud_t pud)
 645{
 646        pudval_t ret;
 647
 648        if (sizeof(pudval_t) > sizeof(long))
 649                ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
 650                                    pud.pud, (u64)pud.pud >> 32);
 651        else
 652                ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
 653                                    pud.pud);
 654
 655        return ret;
 656}
 657
 658static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
 659{
 660        pgdval_t val = native_pgd_val(pgd);
 661
 662        if (sizeof(pgdval_t) > sizeof(long))
 663                PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
 664                            val, (u64)val >> 32);
 665        else
 666                PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
 667                            val);
 668}
 669
 670static inline void pgd_clear(pgd_t *pgdp)
 671{
 672        set_pgd(pgdp, __pgd(0));
 673}
 674
 675static inline void pud_clear(pud_t *pudp)
 676{
 677        set_pud(pudp, __pud(0));
 678}
 679
 680#endif  /* PAGETABLE_LEVELS == 4 */
 681
 682#endif  /* PAGETABLE_LEVELS >= 3 */
 683
 684#ifdef CONFIG_X86_PAE
 685/* Special-case pte-setting operations for PAE, which can't update a
 686   64-bit pte atomically */
 687static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
 688{
 689        PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
 690                    pte.pte, pte.pte >> 32);
 691}
 692
 693static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
 694                             pte_t *ptep)
 695{
 696        PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
 697}
 698
 699static inline void pmd_clear(pmd_t *pmdp)
 700{
 701        PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
 702}
 703#else  /* !CONFIG_X86_PAE */
 704static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
 705{
 706        set_pte(ptep, pte);
 707}
 708
 709static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
 710                             pte_t *ptep)
 711{
 712        set_pte_at(mm, addr, ptep, __pte(0));
 713}
 714
 715static inline void pmd_clear(pmd_t *pmdp)
 716{
 717        set_pmd(pmdp, __pmd(0));
 718}
 719#endif  /* CONFIG_X86_PAE */
 720
 721#define  __HAVE_ARCH_START_CONTEXT_SWITCH
 722static inline void arch_start_context_switch(struct task_struct *prev)
 723{
 724        PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
 725}
 726
 727static inline void arch_end_context_switch(struct task_struct *next)
 728{
 729        PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
 730}
 731
 732#define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 733static inline void arch_enter_lazy_mmu_mode(void)
 734{
 735        PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
 736}
 737
 738static inline void arch_leave_lazy_mmu_mode(void)
 739{
 740        PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
 741}
 742
 743void arch_flush_lazy_mmu_mode(void);
 744
 745static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
 746                                phys_addr_t phys, pgprot_t flags)
 747{
 748        pv_mmu_ops.set_fixmap(idx, phys, flags);
 749}
 750
 751#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
 752
 753static inline int arch_spin_is_locked(struct arch_spinlock *lock)
 754{
 755        return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
 756}
 757
 758static inline int arch_spin_is_contended(struct arch_spinlock *lock)
 759{
 760        return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
 761}
 762#define arch_spin_is_contended  arch_spin_is_contended
 763
 764static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
 765{
 766        PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
 767}
 768
 769static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
 770                                                  unsigned long flags)
 771{
 772        PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
 773}
 774
 775static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
 776{
 777        return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
 778}
 779
 780static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
 781{
 782        PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
 783}
 784
 785#endif
 786
 787#ifdef CONFIG_X86_32
 788#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
 789#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
 790
 791/* save and restore all caller-save registers, except return value */
 792#define PV_SAVE_ALL_CALLER_REGS         "pushl %ecx;"
 793#define PV_RESTORE_ALL_CALLER_REGS      "popl  %ecx;"
 794
 795#define PV_FLAGS_ARG "0"
 796#define PV_EXTRA_CLOBBERS
 797#define PV_VEXTRA_CLOBBERS
 798#else
 799/* save and restore all caller-save registers, except return value */
 800#define PV_SAVE_ALL_CALLER_REGS                                         \
 801        "push %rcx;"                                                    \
 802        "push %rdx;"                                                    \
 803        "push %rsi;"                                                    \
 804        "push %rdi;"                                                    \
 805        "push %r8;"                                                     \
 806        "push %r9;"                                                     \
 807        "push %r10;"                                                    \
 808        "push %r11;"
 809#define PV_RESTORE_ALL_CALLER_REGS                                      \
 810        "pop %r11;"                                                     \
 811        "pop %r10;"                                                     \
 812        "pop %r9;"                                                      \
 813        "pop %r8;"                                                      \
 814        "pop %rdi;"                                                     \
 815        "pop %rsi;"                                                     \
 816        "pop %rdx;"                                                     \
 817        "pop %rcx;"
 818
 819/* We save some registers, but all of them, that's too much. We clobber all
 820 * caller saved registers but the argument parameter */
 821#define PV_SAVE_REGS "pushq %%rdi;"
 822#define PV_RESTORE_REGS "popq %%rdi;"
 823#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
 824#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
 825#define PV_FLAGS_ARG "D"
 826#endif
 827
 828/*
 829 * Generate a thunk around a function which saves all caller-save
 830 * registers except for the return value.  This allows C functions to
 831 * be called from assembler code where fewer than normal registers are
 832 * available.  It may also help code generation around calls from C
 833 * code if the common case doesn't use many registers.
 834 *
 835 * When a callee is wrapped in a thunk, the caller can assume that all
 836 * arg regs and all scratch registers are preserved across the
 837 * call. The return value in rax/eax will not be saved, even for void
 838 * functions.
 839 */
 840#define PV_CALLEE_SAVE_REGS_THUNK(func)                                 \
 841        extern typeof(func) __raw_callee_save_##func;                   \
 842        static void *__##func##__ __used = func;                        \
 843                                                                        \
 844        asm(".pushsection .text;"                                       \
 845            "__raw_callee_save_" #func ": "                             \
 846            PV_SAVE_ALL_CALLER_REGS                                     \
 847            "call " #func ";"                                           \
 848            PV_RESTORE_ALL_CALLER_REGS                                  \
 849            "ret;"                                                      \
 850            ".popsection")
 851
 852/* Get a reference to a callee-save function */
 853#define PV_CALLEE_SAVE(func)                                            \
 854        ((struct paravirt_callee_save) { __raw_callee_save_##func })
 855
 856/* Promise that "func" already uses the right calling convention */
 857#define __PV_IS_CALLEE_SAVE(func)                       \
 858        ((struct paravirt_callee_save) { func })
 859
 860static inline notrace unsigned long arch_local_save_flags(void)
 861{
 862        return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
 863}
 864
 865static inline notrace void arch_local_irq_restore(unsigned long f)
 866{
 867        PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
 868}
 869
 870static inline notrace void arch_local_irq_disable(void)
 871{
 872        PVOP_VCALLEE0(pv_irq_ops.irq_disable);
 873}
 874
 875static inline notrace void arch_local_irq_enable(void)
 876{
 877        PVOP_VCALLEE0(pv_irq_ops.irq_enable);
 878}
 879
 880static inline notrace unsigned long arch_local_irq_save(void)
 881{
 882        unsigned long f;
 883
 884        f = arch_local_save_flags();
 885        arch_local_irq_disable();
 886        return f;
 887}
 888
 889
 890/* Make sure as little as possible of this mess escapes. */
 891#undef PARAVIRT_CALL
 892#undef __PVOP_CALL
 893#undef __PVOP_VCALL
 894#undef PVOP_VCALL0
 895#undef PVOP_CALL0
 896#undef PVOP_VCALL1
 897#undef PVOP_CALL1
 898#undef PVOP_VCALL2
 899#undef PVOP_CALL2
 900#undef PVOP_VCALL3
 901#undef PVOP_CALL3
 902#undef PVOP_VCALL4
 903#undef PVOP_CALL4
 904
 905extern void default_banner(void);
 906
 907#else  /* __ASSEMBLY__ */
 908
 909#define _PVSITE(ptype, clobbers, ops, word, algn)       \
 910771:;                                           \
 911        ops;                                    \
 912772:;                                           \
 913        .pushsection .parainstructions,"a";     \
 914         .align algn;                           \
 915         word 771b;                             \
 916         .byte ptype;                           \
 917         .byte 772b-771b;                       \
 918         .short clobbers;                       \
 919        .popsection
 920
 921
 922#define COND_PUSH(set, mask, reg)                       \
 923        .if ((~(set)) & mask); push %reg; .endif
 924#define COND_POP(set, mask, reg)                        \
 925        .if ((~(set)) & mask); pop %reg; .endif
 926
 927#ifdef CONFIG_X86_64
 928
 929#define PV_SAVE_REGS(set)                       \
 930        COND_PUSH(set, CLBR_RAX, rax);          \
 931        COND_PUSH(set, CLBR_RCX, rcx);          \
 932        COND_PUSH(set, CLBR_RDX, rdx);          \
 933        COND_PUSH(set, CLBR_RSI, rsi);          \
 934        COND_PUSH(set, CLBR_RDI, rdi);          \
 935        COND_PUSH(set, CLBR_R8, r8);            \
 936        COND_PUSH(set, CLBR_R9, r9);            \
 937        COND_PUSH(set, CLBR_R10, r10);          \
 938        COND_PUSH(set, CLBR_R11, r11)
 939#define PV_RESTORE_REGS(set)                    \
 940        COND_POP(set, CLBR_R11, r11);           \
 941        COND_POP(set, CLBR_R10, r10);           \
 942        COND_POP(set, CLBR_R9, r9);             \
 943        COND_POP(set, CLBR_R8, r8);             \
 944        COND_POP(set, CLBR_RDI, rdi);           \
 945        COND_POP(set, CLBR_RSI, rsi);           \
 946        COND_POP(set, CLBR_RDX, rdx);           \
 947        COND_POP(set, CLBR_RCX, rcx);           \
 948        COND_POP(set, CLBR_RAX, rax)
 949
 950#define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
 951#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
 952#define PARA_INDIRECT(addr)     *addr(%rip)
 953#else
 954#define PV_SAVE_REGS(set)                       \
 955        COND_PUSH(set, CLBR_EAX, eax);          \
 956        COND_PUSH(set, CLBR_EDI, edi);          \
 957        COND_PUSH(set, CLBR_ECX, ecx);          \
 958        COND_PUSH(set, CLBR_EDX, edx)
 959#define PV_RESTORE_REGS(set)                    \
 960        COND_POP(set, CLBR_EDX, edx);           \
 961        COND_POP(set, CLBR_ECX, ecx);           \
 962        COND_POP(set, CLBR_EDI, edi);           \
 963        COND_POP(set, CLBR_EAX, eax)
 964
 965#define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
 966#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
 967#define PARA_INDIRECT(addr)     *%cs:addr
 968#endif
 969
 970#define INTERRUPT_RETURN                                                \
 971        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
 972                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
 973
 974#define DISABLE_INTERRUPTS(clobbers)                                    \
 975        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
 976                  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
 977                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
 978                  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 979
 980#define ENABLE_INTERRUPTS(clobbers)                                     \
 981        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
 982                  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
 983                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
 984                  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 985
 986#define USERGS_SYSRET32                                                 \
 987        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),       \
 988                  CLBR_NONE,                                            \
 989                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
 990
 991#ifdef CONFIG_X86_32
 992#define GET_CR0_INTO_EAX                                \
 993        push %ecx; push %edx;                           \
 994        call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
 995        pop %edx; pop %ecx
 996
 997#define ENABLE_INTERRUPTS_SYSEXIT                                       \
 998        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
 999                  CLBR_NONE,                                            \
1000                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1001
1002
1003#else   /* !CONFIG_X86_32 */
1004
1005/*
1006 * If swapgs is used while the userspace stack is still current,
1007 * there's no way to call a pvop.  The PV replacement *must* be
1008 * inlined, or the swapgs instruction must be trapped and emulated.
1009 */
1010#define SWAPGS_UNSAFE_STACK                                             \
1011        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1012                  swapgs)
1013
1014/*
1015 * Note: swapgs is very special, and in practise is either going to be
1016 * implemented with a single "swapgs" instruction or something very
1017 * special.  Either way, we don't need to save any registers for
1018 * it.
1019 */
1020#define SWAPGS                                                          \
1021        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1022                  call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)          \
1023                 )
1024
1025#define GET_CR2_INTO_RCX                                \
1026        call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1027        movq %rax, %rcx;                                \
1028        xorq %rax, %rax;
1029
1030#define PARAVIRT_ADJUST_EXCEPTION_FRAME                                 \
1031        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1032                  CLBR_NONE,                                            \
1033                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1034
1035#define USERGS_SYSRET64                                                 \
1036        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
1037                  CLBR_NONE,                                            \
1038                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1039
1040#define ENABLE_INTERRUPTS_SYSEXIT32                                     \
1041        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1042                  CLBR_NONE,                                            \
1043                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1044#endif  /* CONFIG_X86_32 */
1045
1046#endif /* __ASSEMBLY__ */
1047#else  /* CONFIG_PARAVIRT */
1048# define default_banner x86_init_noop
1049#endif /* !CONFIG_PARAVIRT */
1050#endif /* _ASM_X86_PARAVIRT_H */
1051