linux/arch/x86/include/asm/paravirt.h
<<
>>
Prefs
   1#ifndef _ASM_X86_PARAVIRT_H
   2#define _ASM_X86_PARAVIRT_H
   3/* Various instructions on x86 need to be replaced for
   4 * para-virtualization: those hooks are defined here. */
   5
   6#ifdef CONFIG_PARAVIRT
   7#include <asm/pgtable_types.h>
   8#include <asm/asm.h>
   9
  10#include <asm/paravirt_types.h>
  11
  12#ifndef __ASSEMBLY__
  13#include <linux/types.h>
  14#include <linux/cpumask.h>
  15
  16static inline int paravirt_enabled(void)
  17{
  18        return pv_info.paravirt_enabled;
  19}
  20
  21static inline void load_sp0(struct tss_struct *tss,
  22                             struct thread_struct *thread)
  23{
  24        PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
  25}
  26
  27/* The paravirtualized CPUID instruction. */
  28static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
  29                           unsigned int *ecx, unsigned int *edx)
  30{
  31        PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
  32}
  33
  34/*
  35 * These special macros can be used to get or set a debugging register
  36 */
  37static inline unsigned long paravirt_get_debugreg(int reg)
  38{
  39        return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
  40}
  41#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
  42static inline void set_debugreg(unsigned long val, int reg)
  43{
  44        PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
  45}
  46
  47static inline void clts(void)
  48{
  49        PVOP_VCALL0(pv_cpu_ops.clts);
  50}
  51
  52static inline unsigned long read_cr0(void)
  53{
  54        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
  55}
  56
  57static inline void write_cr0(unsigned long x)
  58{
  59        PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
  60}
  61
  62static inline unsigned long read_cr2(void)
  63{
  64        return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
  65}
  66
  67static inline void write_cr2(unsigned long x)
  68{
  69        PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
  70}
  71
  72static inline unsigned long read_cr3(void)
  73{
  74        return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
  75}
  76
  77static inline void write_cr3(unsigned long x)
  78{
  79        PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
  80}
  81
  82static inline unsigned long read_cr4(void)
  83{
  84        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
  85}
  86static inline unsigned long read_cr4_safe(void)
  87{
  88        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
  89}
  90
  91static inline void write_cr4(unsigned long x)
  92{
  93        PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
  94}
  95
  96#ifdef CONFIG_X86_64
  97static inline unsigned long read_cr8(void)
  98{
  99        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
 100}
 101
 102static inline void write_cr8(unsigned long x)
 103{
 104        PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
 105}
 106#endif
 107
 108static inline void raw_safe_halt(void)
 109{
 110        PVOP_VCALL0(pv_irq_ops.safe_halt);
 111}
 112
 113static inline void halt(void)
 114{
 115        PVOP_VCALL0(pv_irq_ops.safe_halt);
 116}
 117
 118static inline void wbinvd(void)
 119{
 120        PVOP_VCALL0(pv_cpu_ops.wbinvd);
 121}
 122
 123#define get_kernel_rpl()  (pv_info.kernel_rpl)
 124
 125static inline u64 paravirt_read_msr(unsigned msr, int *err)
 126{
 127        return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
 128}
 129
 130static inline int paravirt_rdmsr_regs(u32 *regs)
 131{
 132        return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
 133}
 134
 135static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
 136{
 137        return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
 138}
 139
 140static inline int paravirt_wrmsr_regs(u32 *regs)
 141{
 142        return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
 143}
 144
 145/* These should all do BUG_ON(_err), but our headers are too tangled. */
 146#define rdmsr(msr, val1, val2)                  \
 147do {                                            \
 148        int _err;                               \
 149        u64 _l = paravirt_read_msr(msr, &_err); \
 150        val1 = (u32)_l;                         \
 151        val2 = _l >> 32;                        \
 152} while (0)
 153
 154#define wrmsr(msr, val1, val2)                  \
 155do {                                            \
 156        paravirt_write_msr(msr, val1, val2);    \
 157} while (0)
 158
 159#define rdmsrl(msr, val)                        \
 160do {                                            \
 161        int _err;                               \
 162        val = paravirt_read_msr(msr, &_err);    \
 163} while (0)
 164
 165#define wrmsrl(msr, val)        wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
 166#define wrmsr_safe(msr, a, b)   paravirt_write_msr(msr, a, b)
 167
 168/* rdmsr with exception handling */
 169#define rdmsr_safe(msr, a, b)                   \
 170({                                              \
 171        int _err;                               \
 172        u64 _l = paravirt_read_msr(msr, &_err); \
 173        (*a) = (u32)_l;                         \
 174        (*b) = _l >> 32;                        \
 175        _err;                                   \
 176})
 177
 178#define rdmsr_safe_regs(regs)   paravirt_rdmsr_regs(regs)
 179#define wrmsr_safe_regs(regs)   paravirt_wrmsr_regs(regs)
 180
 181static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
 182{
 183        int err;
 184
 185        *p = paravirt_read_msr(msr, &err);
 186        return err;
 187}
 188static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
 189{
 190        u32 gprs[8] = { 0 };
 191        int err;
 192
 193        gprs[1] = msr;
 194        gprs[7] = 0x9c5a203a;
 195
 196        err = paravirt_rdmsr_regs(gprs);
 197
 198        *p = gprs[0] | ((u64)gprs[2] << 32);
 199
 200        return err;
 201}
 202
 203static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
 204{
 205        u32 gprs[8] = { 0 };
 206
 207        gprs[0] = (u32)val;
 208        gprs[1] = msr;
 209        gprs[2] = val >> 32;
 210        gprs[7] = 0x9c5a203a;
 211
 212        return paravirt_wrmsr_regs(gprs);
 213}
 214
 215static inline u64 paravirt_read_tsc(void)
 216{
 217        return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
 218}
 219
 220#define rdtscl(low)                             \
 221do {                                            \
 222        u64 _l = paravirt_read_tsc();           \
 223        low = (int)_l;                          \
 224} while (0)
 225
 226#define rdtscll(val) (val = paravirt_read_tsc())
 227
 228static inline unsigned long long paravirt_sched_clock(void)
 229{
 230        return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
 231}
 232
 233static inline unsigned long long paravirt_read_pmc(int counter)
 234{
 235        return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
 236}
 237
 238#define rdpmc(counter, low, high)               \
 239do {                                            \
 240        u64 _l = paravirt_read_pmc(counter);    \
 241        low = (u32)_l;                          \
 242        high = _l >> 32;                        \
 243} while (0)
 244
 245static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
 246{
 247        return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
 248}
 249
 250#define rdtscp(low, high, aux)                          \
 251do {                                                    \
 252        int __aux;                                      \
 253        unsigned long __val = paravirt_rdtscp(&__aux);  \
 254        (low) = (u32)__val;                             \
 255        (high) = (u32)(__val >> 32);                    \
 256        (aux) = __aux;                                  \
 257} while (0)
 258
 259#define rdtscpll(val, aux)                              \
 260do {                                                    \
 261        unsigned long __aux;                            \
 262        val = paravirt_rdtscp(&__aux);                  \
 263        (aux) = __aux;                                  \
 264} while (0)
 265
 266static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
 267{
 268        PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
 269}
 270
 271static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
 272{
 273        PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
 274}
 275
 276static inline void load_TR_desc(void)
 277{
 278        PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
 279}
 280static inline void load_gdt(const struct desc_ptr *dtr)
 281{
 282        PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
 283}
 284static inline void load_idt(const struct desc_ptr *dtr)
 285{
 286        PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
 287}
 288static inline void set_ldt(const void *addr, unsigned entries)
 289{
 290        PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
 291}
 292static inline void store_gdt(struct desc_ptr *dtr)
 293{
 294        PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
 295}
 296static inline void store_idt(struct desc_ptr *dtr)
 297{
 298        PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
 299}
 300static inline unsigned long paravirt_store_tr(void)
 301{
 302        return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
 303}
 304#define store_tr(tr)    ((tr) = paravirt_store_tr())
 305static inline void load_TLS(struct thread_struct *t, unsigned cpu)
 306{
 307        PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
 308}
 309
 310#ifdef CONFIG_X86_64
 311static inline void load_gs_index(unsigned int gs)
 312{
 313        PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
 314}
 315#endif
 316
 317static inline void write_ldt_entry(struct desc_struct *dt, int entry,
 318                                   const void *desc)
 319{
 320        PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
 321}
 322
 323static inline void write_gdt_entry(struct desc_struct *dt, int entry,
 324                                   void *desc, int type)
 325{
 326        PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
 327}
 328
 329static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
 330{
 331        PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
 332}
 333static inline void set_iopl_mask(unsigned mask)
 334{
 335        PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
 336}
 337
 338/* The paravirtualized I/O functions */
 339static inline void slow_down_io(void)
 340{
 341        pv_cpu_ops.io_delay();
 342#ifdef REALLY_SLOW_IO
 343        pv_cpu_ops.io_delay();
 344        pv_cpu_ops.io_delay();
 345        pv_cpu_ops.io_delay();
 346#endif
 347}
 348
 349#ifdef CONFIG_SMP
 350static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
 351                                    unsigned long start_esp)
 352{
 353        PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
 354                    phys_apicid, start_eip, start_esp);
 355}
 356#endif
 357
 358static inline void paravirt_activate_mm(struct mm_struct *prev,
 359                                        struct mm_struct *next)
 360{
 361        PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
 362}
 363
 364static inline void arch_dup_mmap(struct mm_struct *oldmm,
 365                                 struct mm_struct *mm)
 366{
 367        PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
 368}
 369
 370static inline void arch_exit_mmap(struct mm_struct *mm)
 371{
 372        PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
 373}
 374
 375static inline void __flush_tlb(void)
 376{
 377        PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
 378}
 379static inline void __flush_tlb_global(void)
 380{
 381        PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
 382}
 383static inline void __flush_tlb_single(unsigned long addr)
 384{
 385        PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
 386}
 387
 388static inline void flush_tlb_others(const struct cpumask *cpumask,
 389                                    struct mm_struct *mm,
 390                                    unsigned long va)
 391{
 392        PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
 393}
 394
 395static inline int paravirt_pgd_alloc(struct mm_struct *mm)
 396{
 397        return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
 398}
 399
 400static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
 401{
 402        PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
 403}
 404
 405static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
 406{
 407        PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
 408}
 409static inline void paravirt_release_pte(unsigned long pfn)
 410{
 411        PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
 412}
 413
 414static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
 415{
 416        PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
 417}
 418
 419static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
 420                                            unsigned long start, unsigned long count)
 421{
 422        PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
 423}
 424static inline void paravirt_release_pmd(unsigned long pfn)
 425{
 426        PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
 427}
 428
 429static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
 430{
 431        PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
 432}
 433static inline void paravirt_release_pud(unsigned long pfn)
 434{
 435        PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
 436}
 437
 438#ifdef CONFIG_HIGHPTE
 439static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
 440{
 441        unsigned long ret;
 442        ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
 443        return (void *)ret;
 444}
 445#endif
 446
 447static inline void pte_update(struct mm_struct *mm, unsigned long addr,
 448                              pte_t *ptep)
 449{
 450        PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
 451}
 452
 453static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
 454                                    pte_t *ptep)
 455{
 456        PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
 457}
 458
 459static inline pte_t __pte(pteval_t val)
 460{
 461        pteval_t ret;
 462
 463        if (sizeof(pteval_t) > sizeof(long))
 464                ret = PVOP_CALLEE2(pteval_t,
 465                                   pv_mmu_ops.make_pte,
 466                                   val, (u64)val >> 32);
 467        else
 468                ret = PVOP_CALLEE1(pteval_t,
 469                                   pv_mmu_ops.make_pte,
 470                                   val);
 471
 472        return (pte_t) { .pte = ret };
 473}
 474
 475static inline pteval_t pte_val(pte_t pte)
 476{
 477        pteval_t ret;
 478
 479        if (sizeof(pteval_t) > sizeof(long))
 480                ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
 481                                   pte.pte, (u64)pte.pte >> 32);
 482        else
 483                ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
 484                                   pte.pte);
 485
 486        return ret;
 487}
 488
 489static inline pgd_t __pgd(pgdval_t val)
 490{
 491        pgdval_t ret;
 492
 493        if (sizeof(pgdval_t) > sizeof(long))
 494                ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
 495                                   val, (u64)val >> 32);
 496        else
 497                ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
 498                                   val);
 499
 500        return (pgd_t) { ret };
 501}
 502
 503static inline pgdval_t pgd_val(pgd_t pgd)
 504{
 505        pgdval_t ret;
 506
 507        if (sizeof(pgdval_t) > sizeof(long))
 508                ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
 509                                    pgd.pgd, (u64)pgd.pgd >> 32);
 510        else
 511                ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
 512                                    pgd.pgd);
 513
 514        return ret;
 515}
 516
 517#define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
 518static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
 519                                           pte_t *ptep)
 520{
 521        pteval_t ret;
 522
 523        ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
 524                         mm, addr, ptep);
 525
 526        return (pte_t) { .pte = ret };
 527}
 528
 529static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
 530                                           pte_t *ptep, pte_t pte)
 531{
 532        if (sizeof(pteval_t) > sizeof(long))
 533                /* 5 arg words */
 534                pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
 535        else
 536                PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
 537                            mm, addr, ptep, pte.pte);
 538}
 539
 540static inline void set_pte(pte_t *ptep, pte_t pte)
 541{
 542        if (sizeof(pteval_t) > sizeof(long))
 543                PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
 544                            pte.pte, (u64)pte.pte >> 32);
 545        else
 546                PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
 547                            pte.pte);
 548}
 549
 550static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 551                              pte_t *ptep, pte_t pte)
 552{
 553        if (sizeof(pteval_t) > sizeof(long))
 554                /* 5 arg words */
 555                pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
 556        else
 557                PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
 558}
 559
 560static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 561{
 562        pmdval_t val = native_pmd_val(pmd);
 563
 564        if (sizeof(pmdval_t) > sizeof(long))
 565                PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
 566        else
 567                PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
 568}
 569
 570#if PAGETABLE_LEVELS >= 3
 571static inline pmd_t __pmd(pmdval_t val)
 572{
 573        pmdval_t ret;
 574
 575        if (sizeof(pmdval_t) > sizeof(long))
 576                ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
 577                                   val, (u64)val >> 32);
 578        else
 579                ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
 580                                   val);
 581
 582        return (pmd_t) { ret };
 583}
 584
 585static inline pmdval_t pmd_val(pmd_t pmd)
 586{
 587        pmdval_t ret;
 588
 589        if (sizeof(pmdval_t) > sizeof(long))
 590                ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
 591                                    pmd.pmd, (u64)pmd.pmd >> 32);
 592        else
 593                ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
 594                                    pmd.pmd);
 595
 596        return ret;
 597}
 598
 599static inline void set_pud(pud_t *pudp, pud_t pud)
 600{
 601        pudval_t val = native_pud_val(pud);
 602
 603        if (sizeof(pudval_t) > sizeof(long))
 604                PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
 605                            val, (u64)val >> 32);
 606        else
 607                PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
 608                            val);
 609}
 610#if PAGETABLE_LEVELS == 4
 611static inline pud_t __pud(pudval_t val)
 612{
 613        pudval_t ret;
 614
 615        if (sizeof(pudval_t) > sizeof(long))
 616                ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
 617                                   val, (u64)val >> 32);
 618        else
 619                ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
 620                                   val);
 621
 622        return (pud_t) { ret };
 623}
 624
 625static inline pudval_t pud_val(pud_t pud)
 626{
 627        pudval_t ret;
 628
 629        if (sizeof(pudval_t) > sizeof(long))
 630                ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
 631                                    pud.pud, (u64)pud.pud >> 32);
 632        else
 633                ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
 634                                    pud.pud);
 635
 636        return ret;
 637}
 638
 639static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
 640{
 641        pgdval_t val = native_pgd_val(pgd);
 642
 643        if (sizeof(pgdval_t) > sizeof(long))
 644                PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
 645                            val, (u64)val >> 32);
 646        else
 647                PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
 648                            val);
 649}
 650
 651static inline void pgd_clear(pgd_t *pgdp)
 652{
 653        set_pgd(pgdp, __pgd(0));
 654}
 655
 656static inline void pud_clear(pud_t *pudp)
 657{
 658        set_pud(pudp, __pud(0));
 659}
 660
 661#endif  /* PAGETABLE_LEVELS == 4 */
 662
 663#endif  /* PAGETABLE_LEVELS >= 3 */
 664
 665#ifdef CONFIG_X86_PAE
 666/* Special-case pte-setting operations for PAE, which can't update a
 667   64-bit pte atomically */
 668static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
 669{
 670        PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
 671                    pte.pte, pte.pte >> 32);
 672}
 673
 674static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
 675                             pte_t *ptep)
 676{
 677        PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
 678}
 679
 680static inline void pmd_clear(pmd_t *pmdp)
 681{
 682        PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
 683}
 684#else  /* !CONFIG_X86_PAE */
 685static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
 686{
 687        set_pte(ptep, pte);
 688}
 689
 690static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
 691                             pte_t *ptep)
 692{
 693        set_pte_at(mm, addr, ptep, __pte(0));
 694}
 695
 696static inline void pmd_clear(pmd_t *pmdp)
 697{
 698        set_pmd(pmdp, __pmd(0));
 699}
 700#endif  /* CONFIG_X86_PAE */
 701
 702#define  __HAVE_ARCH_START_CONTEXT_SWITCH
 703static inline void arch_start_context_switch(struct task_struct *prev)
 704{
 705        PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
 706}
 707
 708static inline void arch_end_context_switch(struct task_struct *next)
 709{
 710        PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
 711}
 712
 713#define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 714static inline void arch_enter_lazy_mmu_mode(void)
 715{
 716        PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
 717}
 718
 719static inline void arch_leave_lazy_mmu_mode(void)
 720{
 721        PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
 722}
 723
 724void arch_flush_lazy_mmu_mode(void);
 725
 726static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
 727                                phys_addr_t phys, pgprot_t flags)
 728{
 729        pv_mmu_ops.set_fixmap(idx, phys, flags);
 730}
 731
 732#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
 733
 734static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
 735{
 736        return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
 737}
 738
 739static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
 740{
 741        return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
 742}
 743#define __raw_spin_is_contended __raw_spin_is_contended
 744
 745static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
 746{
 747        PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
 748}
 749
 750static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
 751                                                  unsigned long flags)
 752{
 753        PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
 754}
 755
 756static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
 757{
 758        return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
 759}
 760
 761static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
 762{
 763        PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
 764}
 765
 766#endif
 767
 768#ifdef CONFIG_X86_32
 769#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
 770#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
 771
 772/* save and restore all caller-save registers, except return value */
 773#define PV_SAVE_ALL_CALLER_REGS         "pushl %ecx;"
 774#define PV_RESTORE_ALL_CALLER_REGS      "popl  %ecx;"
 775
 776#define PV_FLAGS_ARG "0"
 777#define PV_EXTRA_CLOBBERS
 778#define PV_VEXTRA_CLOBBERS
 779#else
 780/* save and restore all caller-save registers, except return value */
 781#define PV_SAVE_ALL_CALLER_REGS                                         \
 782        "push %rcx;"                                                    \
 783        "push %rdx;"                                                    \
 784        "push %rsi;"                                                    \
 785        "push %rdi;"                                                    \
 786        "push %r8;"                                                     \
 787        "push %r9;"                                                     \
 788        "push %r10;"                                                    \
 789        "push %r11;"
 790#define PV_RESTORE_ALL_CALLER_REGS                                      \
 791        "pop %r11;"                                                     \
 792        "pop %r10;"                                                     \
 793        "pop %r9;"                                                      \
 794        "pop %r8;"                                                      \
 795        "pop %rdi;"                                                     \
 796        "pop %rsi;"                                                     \
 797        "pop %rdx;"                                                     \
 798        "pop %rcx;"
 799
 800/* We save some registers, but all of them, that's too much. We clobber all
 801 * caller saved registers but the argument parameter */
 802#define PV_SAVE_REGS "pushq %%rdi;"
 803#define PV_RESTORE_REGS "popq %%rdi;"
 804#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
 805#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
 806#define PV_FLAGS_ARG "D"
 807#endif
 808
 809/*
 810 * Generate a thunk around a function which saves all caller-save
 811 * registers except for the return value.  This allows C functions to
 812 * be called from assembler code where fewer than normal registers are
 813 * available.  It may also help code generation around calls from C
 814 * code if the common case doesn't use many registers.
 815 *
 816 * When a callee is wrapped in a thunk, the caller can assume that all
 817 * arg regs and all scratch registers are preserved across the
 818 * call. The return value in rax/eax will not be saved, even for void
 819 * functions.
 820 */
 821#define PV_CALLEE_SAVE_REGS_THUNK(func)                                 \
 822        extern typeof(func) __raw_callee_save_##func;                   \
 823        static void *__##func##__ __used = func;                        \
 824                                                                        \
 825        asm(".pushsection .text;"                                       \
 826            "__raw_callee_save_" #func ": "                             \
 827            PV_SAVE_ALL_CALLER_REGS                                     \
 828            "call " #func ";"                                           \
 829            PV_RESTORE_ALL_CALLER_REGS                                  \
 830            "ret;"                                                      \
 831            ".popsection")
 832
 833/* Get a reference to a callee-save function */
 834#define PV_CALLEE_SAVE(func)                                            \
 835        ((struct paravirt_callee_save) { __raw_callee_save_##func })
 836
 837/* Promise that "func" already uses the right calling convention */
 838#define __PV_IS_CALLEE_SAVE(func)                       \
 839        ((struct paravirt_callee_save) { func })
 840
 841static inline unsigned long __raw_local_save_flags(void)
 842{
 843        return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
 844}
 845
 846static inline void raw_local_irq_restore(unsigned long f)
 847{
 848        PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
 849}
 850
 851static inline void raw_local_irq_disable(void)
 852{
 853        PVOP_VCALLEE0(pv_irq_ops.irq_disable);
 854}
 855
 856static inline void raw_local_irq_enable(void)
 857{
 858        PVOP_VCALLEE0(pv_irq_ops.irq_enable);
 859}
 860
 861static inline unsigned long __raw_local_irq_save(void)
 862{
 863        unsigned long f;
 864
 865        f = __raw_local_save_flags();
 866        raw_local_irq_disable();
 867        return f;
 868}
 869
 870
 871/* Make sure as little as possible of this mess escapes. */
 872#undef PARAVIRT_CALL
 873#undef __PVOP_CALL
 874#undef __PVOP_VCALL
 875#undef PVOP_VCALL0
 876#undef PVOP_CALL0
 877#undef PVOP_VCALL1
 878#undef PVOP_CALL1
 879#undef PVOP_VCALL2
 880#undef PVOP_CALL2
 881#undef PVOP_VCALL3
 882#undef PVOP_CALL3
 883#undef PVOP_VCALL4
 884#undef PVOP_CALL4
 885
 886extern void default_banner(void);
 887
 888#else  /* __ASSEMBLY__ */
 889
 890#define _PVSITE(ptype, clobbers, ops, word, algn)       \
 891771:;                                           \
 892        ops;                                    \
 893772:;                                           \
 894        .pushsection .parainstructions,"a";     \
 895         .align algn;                           \
 896         word 771b;                             \
 897         .byte ptype;                           \
 898         .byte 772b-771b;                       \
 899         .short clobbers;                       \
 900        .popsection
 901
 902
 903#define COND_PUSH(set, mask, reg)                       \
 904        .if ((~(set)) & mask); push %reg; .endif
 905#define COND_POP(set, mask, reg)                        \
 906        .if ((~(set)) & mask); pop %reg; .endif
 907
 908#ifdef CONFIG_X86_64
 909
 910#define PV_SAVE_REGS(set)                       \
 911        COND_PUSH(set, CLBR_RAX, rax);          \
 912        COND_PUSH(set, CLBR_RCX, rcx);          \
 913        COND_PUSH(set, CLBR_RDX, rdx);          \
 914        COND_PUSH(set, CLBR_RSI, rsi);          \
 915        COND_PUSH(set, CLBR_RDI, rdi);          \
 916        COND_PUSH(set, CLBR_R8, r8);            \
 917        COND_PUSH(set, CLBR_R9, r9);            \
 918        COND_PUSH(set, CLBR_R10, r10);          \
 919        COND_PUSH(set, CLBR_R11, r11)
 920#define PV_RESTORE_REGS(set)                    \
 921        COND_POP(set, CLBR_R11, r11);           \
 922        COND_POP(set, CLBR_R10, r10);           \
 923        COND_POP(set, CLBR_R9, r9);             \
 924        COND_POP(set, CLBR_R8, r8);             \
 925        COND_POP(set, CLBR_RDI, rdi);           \
 926        COND_POP(set, CLBR_RSI, rsi);           \
 927        COND_POP(set, CLBR_RDX, rdx);           \
 928        COND_POP(set, CLBR_RCX, rcx);           \
 929        COND_POP(set, CLBR_RAX, rax)
 930
 931#define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
 932#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
 933#define PARA_INDIRECT(addr)     *addr(%rip)
 934#else
 935#define PV_SAVE_REGS(set)                       \
 936        COND_PUSH(set, CLBR_EAX, eax);          \
 937        COND_PUSH(set, CLBR_EDI, edi);          \
 938        COND_PUSH(set, CLBR_ECX, ecx);          \
 939        COND_PUSH(set, CLBR_EDX, edx)
 940#define PV_RESTORE_REGS(set)                    \
 941        COND_POP(set, CLBR_EDX, edx);           \
 942        COND_POP(set, CLBR_ECX, ecx);           \
 943        COND_POP(set, CLBR_EDI, edi);           \
 944        COND_POP(set, CLBR_EAX, eax)
 945
 946#define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
 947#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
 948#define PARA_INDIRECT(addr)     *%cs:addr
 949#endif
 950
 951#define INTERRUPT_RETURN                                                \
 952        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
 953                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
 954
 955#define DISABLE_INTERRUPTS(clobbers)                                    \
 956        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
 957                  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
 958                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
 959                  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 960
 961#define ENABLE_INTERRUPTS(clobbers)                                     \
 962        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
 963                  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
 964                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
 965                  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 966
 967#define USERGS_SYSRET32                                                 \
 968        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),       \
 969                  CLBR_NONE,                                            \
 970                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
 971
 972#ifdef CONFIG_X86_32
 973#define GET_CR0_INTO_EAX                                \
 974        push %ecx; push %edx;                           \
 975        call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
 976        pop %edx; pop %ecx
 977
 978#define ENABLE_INTERRUPTS_SYSEXIT                                       \
 979        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
 980                  CLBR_NONE,                                            \
 981                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
 982
 983
 984#else   /* !CONFIG_X86_32 */
 985
 986/*
 987 * If swapgs is used while the userspace stack is still current,
 988 * there's no way to call a pvop.  The PV replacement *must* be
 989 * inlined, or the swapgs instruction must be trapped and emulated.
 990 */
 991#define SWAPGS_UNSAFE_STACK                                             \
 992        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
 993                  swapgs)
 994
 995/*
 996 * Note: swapgs is very special, and in practise is either going to be
 997 * implemented with a single "swapgs" instruction or something very
 998 * special.  Either way, we don't need to save any registers for
 999 * it.
1000 */
1001#define SWAPGS                                                          \
1002        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1003                  call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)          \
1004                 )
1005
1006#define GET_CR2_INTO_RCX                                \
1007        call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1008        movq %rax, %rcx;                                \
1009        xorq %rax, %rax;
1010
1011#define PARAVIRT_ADJUST_EXCEPTION_FRAME                                 \
1012        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1013                  CLBR_NONE,                                            \
1014                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1015
1016#define USERGS_SYSRET64                                                 \
1017        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
1018                  CLBR_NONE,                                            \
1019                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1020
1021#define ENABLE_INTERRUPTS_SYSEXIT32                                     \
1022        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1023                  CLBR_NONE,                                            \
1024                  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1025#endif  /* CONFIG_X86_32 */
1026
1027#endif /* __ASSEMBLY__ */
1028#else  /* CONFIG_PARAVIRT */
1029# define default_banner x86_init_noop
1030#endif /* !CONFIG_PARAVIRT */
1031#endif /* _ASM_X86_PARAVIRT_H */
1032