linux/arch/x86/include/asm/mshyperv.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_MSHYPER_H
   3#define _ASM_X86_MSHYPER_H
   4
   5#include <linux/types.h>
   6#include <linux/atomic.h>
   7#include <linux/nmi.h>
   8#include <asm/io.h>
   9#include <asm/hyperv-tlfs.h>
  10#include <asm/nospec-branch.h>
  11
  12#define VP_INVAL        U32_MAX
  13
  14struct ms_hyperv_info {
  15        u32 features;
  16        u32 misc_features;
  17        u32 hints;
  18        u32 nested_features;
  19        u32 max_vp_index;
  20        u32 max_lp_index;
  21};
  22
  23extern struct ms_hyperv_info ms_hyperv;
  24
  25
  26typedef int (*hyperv_fill_flush_list_func)(
  27                struct hv_guest_mapping_flush_list *flush,
  28                void *data);
  29
  30/*
  31 * Generate the guest ID.
  32 */
  33
  34static inline  __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
  35                                       __u64 d_info2)
  36{
  37        __u64 guest_id = 0;
  38
  39        guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
  40        guest_id |= (d_info1 << 48);
  41        guest_id |= (kernel_version << 16);
  42        guest_id |= d_info2;
  43
  44        return guest_id;
  45}
  46
  47
  48/* Free the message slot and signal end-of-message if required */
  49static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
  50{
  51        /*
  52         * On crash we're reading some other CPU's message page and we need
  53         * to be careful: this other CPU may already had cleared the header
  54         * and the host may already had delivered some other message there.
  55         * In case we blindly write msg->header.message_type we're going
  56         * to lose it. We can still lose a message of the same type but
  57         * we count on the fact that there can only be one
  58         * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
  59         * on crash.
  60         */
  61        if (cmpxchg(&msg->header.message_type, old_msg_type,
  62                    HVMSG_NONE) != old_msg_type)
  63                return;
  64
  65        /*
  66         * Make sure the write to MessageType (ie set to
  67         * HVMSG_NONE) happens before we read the
  68         * MessagePending and EOMing. Otherwise, the EOMing
  69         * will not deliver any more messages since there is
  70         * no empty slot
  71         */
  72        mb();
  73
  74        if (msg->header.message_flags.msg_pending) {
  75                /*
  76                 * This will cause message queue rescan to
  77                 * possibly deliver another msg from the
  78                 * hypervisor
  79                 */
  80                wrmsrl(HV_X64_MSR_EOM, 0);
  81        }
  82}
  83
  84#define hv_init_timer(timer, tick) \
  85        wrmsrl(HV_X64_MSR_STIMER0_COUNT + (2*timer), tick)
  86#define hv_init_timer_config(timer, val) \
  87        wrmsrl(HV_X64_MSR_STIMER0_CONFIG + (2*timer), val)
  88
  89#define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val)
  90#define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val)
  91
  92#define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val)
  93#define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val)
  94
  95#define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val)
  96#define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val)
  97
  98#define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index)
  99
 100#define hv_get_synint_state(int_num, val) \
 101        rdmsrl(HV_X64_MSR_SINT0 + int_num, val)
 102#define hv_set_synint_state(int_num, val) \
 103        wrmsrl(HV_X64_MSR_SINT0 + int_num, val)
 104
 105#define hv_get_crash_ctl(val) \
 106        rdmsrl(HV_X64_MSR_CRASH_CTL, val)
 107
 108void hyperv_callback_vector(void);
 109void hyperv_reenlightenment_vector(void);
 110#ifdef CONFIG_TRACING
 111#define trace_hyperv_callback_vector hyperv_callback_vector
 112#endif
 113void hyperv_vector_handler(struct pt_regs *regs);
 114void hv_setup_vmbus_irq(void (*handler)(void));
 115void hv_remove_vmbus_irq(void);
 116
 117void hv_setup_kexec_handler(void (*handler)(void));
 118void hv_remove_kexec_handler(void);
 119void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
 120void hv_remove_crash_handler(void);
 121
 122/*
 123 * Routines for stimer0 Direct Mode handling.
 124 * On x86/x64, there are no percpu actions to take.
 125 */
 126void hv_stimer0_vector_handler(struct pt_regs *regs);
 127void hv_stimer0_callback_vector(void);
 128int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void));
 129void hv_remove_stimer0_irq(int irq);
 130
 131static inline void hv_enable_stimer0_percpu_irq(int irq) {}
 132static inline void hv_disable_stimer0_percpu_irq(int irq) {}
 133
 134
 135#if IS_ENABLED(CONFIG_HYPERV)
 136extern struct clocksource *hyperv_cs;
 137extern void *hv_hypercall_pg;
 138extern void  __percpu  **hyperv_pcpu_input_arg;
 139
 140static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
 141{
 142        u64 input_address = input ? virt_to_phys(input) : 0;
 143        u64 output_address = output ? virt_to_phys(output) : 0;
 144        u64 hv_status;
 145
 146#ifdef CONFIG_X86_64
 147        if (!hv_hypercall_pg)
 148                return U64_MAX;
 149
 150        __asm__ __volatile__("mov %4, %%r8\n"
 151                             CALL_NOSPEC
 152                             : "=a" (hv_status), ASM_CALL_CONSTRAINT,
 153                               "+c" (control), "+d" (input_address)
 154                             :  "r" (output_address),
 155                                THUNK_TARGET(hv_hypercall_pg)
 156                             : "cc", "memory", "r8", "r9", "r10", "r11");
 157#else
 158        u32 input_address_hi = upper_32_bits(input_address);
 159        u32 input_address_lo = lower_32_bits(input_address);
 160        u32 output_address_hi = upper_32_bits(output_address);
 161        u32 output_address_lo = lower_32_bits(output_address);
 162
 163        if (!hv_hypercall_pg)
 164                return U64_MAX;
 165
 166        __asm__ __volatile__(CALL_NOSPEC
 167                             : "=A" (hv_status),
 168                               "+c" (input_address_lo), ASM_CALL_CONSTRAINT
 169                             : "A" (control),
 170                               "b" (input_address_hi),
 171                               "D"(output_address_hi), "S"(output_address_lo),
 172                               THUNK_TARGET(hv_hypercall_pg)
 173                             : "cc", "memory");
 174#endif /* !x86_64 */
 175        return hv_status;
 176}
 177
 178/* Fast hypercall with 8 bytes of input and no output */
 179static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
 180{
 181        u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
 182
 183#ifdef CONFIG_X86_64
 184        {
 185                __asm__ __volatile__(CALL_NOSPEC
 186                                     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
 187                                       "+c" (control), "+d" (input1)
 188                                     : THUNK_TARGET(hv_hypercall_pg)
 189                                     : "cc", "r8", "r9", "r10", "r11");
 190        }
 191#else
 192        {
 193                u32 input1_hi = upper_32_bits(input1);
 194                u32 input1_lo = lower_32_bits(input1);
 195
 196                __asm__ __volatile__ (CALL_NOSPEC
 197                                      : "=A"(hv_status),
 198                                        "+c"(input1_lo),
 199                                        ASM_CALL_CONSTRAINT
 200                                      : "A" (control),
 201                                        "b" (input1_hi),
 202                                        THUNK_TARGET(hv_hypercall_pg)
 203                                      : "cc", "edi", "esi");
 204        }
 205#endif
 206                return hv_status;
 207}
 208
 209/* Fast hypercall with 16 bytes of input */
 210static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
 211{
 212        u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
 213
 214#ifdef CONFIG_X86_64
 215        {
 216                __asm__ __volatile__("mov %4, %%r8\n"
 217                                     CALL_NOSPEC
 218                                     : "=a" (hv_status), ASM_CALL_CONSTRAINT,
 219                                       "+c" (control), "+d" (input1)
 220                                     : "r" (input2),
 221                                       THUNK_TARGET(hv_hypercall_pg)
 222                                     : "cc", "r8", "r9", "r10", "r11");
 223        }
 224#else
 225        {
 226                u32 input1_hi = upper_32_bits(input1);
 227                u32 input1_lo = lower_32_bits(input1);
 228                u32 input2_hi = upper_32_bits(input2);
 229                u32 input2_lo = lower_32_bits(input2);
 230
 231                __asm__ __volatile__ (CALL_NOSPEC
 232                                      : "=A"(hv_status),
 233                                        "+c"(input1_lo), ASM_CALL_CONSTRAINT
 234                                      : "A" (control), "b" (input1_hi),
 235                                        "D"(input2_hi), "S"(input2_lo),
 236                                        THUNK_TARGET(hv_hypercall_pg)
 237                                      : "cc");
 238        }
 239#endif
 240        return hv_status;
 241}
 242
 243/*
 244 * Rep hypercalls. Callers of this functions are supposed to ensure that
 245 * rep_count and varhead_size comply with Hyper-V hypercall definition.
 246 */
 247static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
 248                                      void *input, void *output)
 249{
 250        u64 control = code;
 251        u64 status;
 252        u16 rep_comp;
 253
 254        control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
 255        control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
 256
 257        do {
 258                status = hv_do_hypercall(control, input, output);
 259                if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS)
 260                        return status;
 261
 262                /* Bits 32-43 of status have 'Reps completed' data. */
 263                rep_comp = (status & HV_HYPERCALL_REP_COMP_MASK) >>
 264                        HV_HYPERCALL_REP_COMP_OFFSET;
 265
 266                control &= ~HV_HYPERCALL_REP_START_MASK;
 267                control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
 268
 269                touch_nmi_watchdog();
 270        } while (rep_comp < rep_count);
 271
 272        return status;
 273}
 274
 275/*
 276 * Hypervisor's notion of virtual processor ID is different from
 277 * Linux' notion of CPU ID. This information can only be retrieved
 278 * in the context of the calling CPU. Setup a map for easy access
 279 * to this information.
 280 */
 281extern u32 *hv_vp_index;
 282extern u32 hv_max_vp_index;
 283extern struct hv_vp_assist_page **hv_vp_assist_page;
 284
 285static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
 286{
 287        if (!hv_vp_assist_page)
 288                return NULL;
 289
 290        return hv_vp_assist_page[cpu];
 291}
 292
 293/**
 294 * hv_cpu_number_to_vp_number() - Map CPU to VP.
 295 * @cpu_number: CPU number in Linux terms
 296 *
 297 * This function returns the mapping between the Linux processor
 298 * number and the hypervisor's virtual processor number, useful
 299 * in making hypercalls and such that talk about specific
 300 * processors.
 301 *
 302 * Return: Virtual processor number in Hyper-V terms
 303 */
 304static inline int hv_cpu_number_to_vp_number(int cpu_number)
 305{
 306        return hv_vp_index[cpu_number];
 307}
 308
 309static inline int cpumask_to_vpset(struct hv_vpset *vpset,
 310                                    const struct cpumask *cpus)
 311{
 312        int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
 313
 314        /* valid_bank_mask can represent up to 64 banks */
 315        if (hv_max_vp_index / 64 >= 64)
 316                return 0;
 317
 318        /*
 319         * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
 320         * structs are not cleared between calls, we risk flushing unneeded
 321         * vCPUs otherwise.
 322         */
 323        for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
 324                vpset->bank_contents[vcpu_bank] = 0;
 325
 326        /*
 327         * Some banks may end up being empty but this is acceptable.
 328         */
 329        for_each_cpu(cpu, cpus) {
 330                vcpu = hv_cpu_number_to_vp_number(cpu);
 331                if (vcpu == VP_INVAL)
 332                        return -1;
 333                vcpu_bank = vcpu / 64;
 334                vcpu_offset = vcpu % 64;
 335                __set_bit(vcpu_offset, (unsigned long *)
 336                          &vpset->bank_contents[vcpu_bank]);
 337                if (vcpu_bank >= nr_bank)
 338                        nr_bank = vcpu_bank + 1;
 339        }
 340        vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
 341        return nr_bank;
 342}
 343
 344void __init hyperv_init(void);
 345void hyperv_setup_mmu_ops(void);
 346void hyperv_report_panic(struct pt_regs *regs, long err);
 347void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
 348bool hv_is_hyperv_initialized(void);
 349void hyperv_cleanup(void);
 350
 351void hyperv_reenlightenment_intr(struct pt_regs *regs);
 352void set_hv_tscchange_cb(void (*cb)(void));
 353void clear_hv_tscchange_cb(void);
 354void hyperv_stop_tsc_emulation(void);
 355int hyperv_flush_guest_mapping(u64 as);
 356int hyperv_flush_guest_mapping_range(u64 as,
 357                hyperv_fill_flush_list_func fill_func, void *data);
 358int hyperv_fill_flush_guest_mapping_list(
 359                struct hv_guest_mapping_flush_list *flush,
 360                u64 start_gfn, u64 end_gfn);
 361
 362#ifdef CONFIG_X86_64
 363void hv_apic_init(void);
 364void __init hv_init_spinlocks(void);
 365bool hv_vcpu_is_preempted(int vcpu);
 366#else
 367static inline void hv_apic_init(void) {}
 368#endif
 369
 370#else /* CONFIG_HYPERV */
 371static inline void hyperv_init(void) {}
 372static inline bool hv_is_hyperv_initialized(void) { return false; }
 373static inline void hyperv_cleanup(void) {}
 374static inline void hyperv_setup_mmu_ops(void) {}
 375static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
 376static inline void clear_hv_tscchange_cb(void) {}
 377static inline void hyperv_stop_tsc_emulation(void) {};
 378static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
 379{
 380        return NULL;
 381}
 382static inline int hyperv_flush_guest_mapping(u64 as) { return -1; }
 383static inline int hyperv_flush_guest_mapping_range(u64 as,
 384                hyperv_fill_flush_list_func fill_func, void *data)
 385{
 386        return -1;
 387}
 388#endif /* CONFIG_HYPERV */
 389
 390#ifdef CONFIG_HYPERV_TSCPAGE
 391struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
 392static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
 393                                       u64 *cur_tsc)
 394{
 395        u64 scale, offset;
 396        u32 sequence;
 397
 398        /*
 399         * The protocol for reading Hyper-V TSC page is specified in Hypervisor
 400         * Top-Level Functional Specification ver. 3.0 and above. To get the
 401         * reference time we must do the following:
 402         * - READ ReferenceTscSequence
 403         *   A special '0' value indicates the time source is unreliable and we
 404         *   need to use something else. The currently published specification
 405         *   versions (up to 4.0b) contain a mistake and wrongly claim '-1'
 406         *   instead of '0' as the special value, see commit c35b82ef0294.
 407         * - ReferenceTime =
 408         *        ((RDTSC() * ReferenceTscScale) >> 64) + ReferenceTscOffset
 409         * - READ ReferenceTscSequence again. In case its value has changed
 410         *   since our first reading we need to discard ReferenceTime and repeat
 411         *   the whole sequence as the hypervisor was updating the page in
 412         *   between.
 413         */
 414        do {
 415                sequence = READ_ONCE(tsc_pg->tsc_sequence);
 416                if (!sequence)
 417                        return U64_MAX;
 418                /*
 419                 * Make sure we read sequence before we read other values from
 420                 * TSC page.
 421                 */
 422                smp_rmb();
 423
 424                scale = READ_ONCE(tsc_pg->tsc_scale);
 425                offset = READ_ONCE(tsc_pg->tsc_offset);
 426                *cur_tsc = rdtsc_ordered();
 427
 428                /*
 429                 * Make sure we read sequence after we read all other values
 430                 * from TSC page.
 431                 */
 432                smp_rmb();
 433
 434        } while (READ_ONCE(tsc_pg->tsc_sequence) != sequence);
 435
 436        return mul_u64_u64_shr(*cur_tsc, scale, 64) + offset;
 437}
 438
 439static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
 440{
 441        u64 cur_tsc;
 442
 443        return hv_read_tsc_page_tsc(tsc_pg, &cur_tsc);
 444}
 445
 446#else
 447static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
 448{
 449        return NULL;
 450}
 451
 452static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
 453                                       u64 *cur_tsc)
 454{
 455        BUG();
 456        return U64_MAX;
 457}
 458#endif
 459#endif
 460