linux/arch/x86/hyperv/hv_spinlock.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/*
   4 * Hyper-V specific spinlock code.
   5 *
   6 * Copyright (C) 2018, Intel, Inc.
   7 *
   8 * Author : Yi Sun <yi.y.sun@intel.com>
   9 */
  10
  11#define pr_fmt(fmt) "Hyper-V: " fmt
  12
  13#include <linux/spinlock.h>
  14
  15#include <asm/mshyperv.h>
  16#include <asm/paravirt.h>
  17#include <asm/apic.h>
  18
  19static bool __initdata hv_pvspin = true;
  20
  21static void hv_qlock_kick(int cpu)
  22{
  23        apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
  24}
  25
  26static void hv_qlock_wait(u8 *byte, u8 val)
  27{
  28        unsigned long flags;
  29
  30        if (in_nmi())
  31                return;
  32
  33        /*
  34         * Reading HV_X64_MSR_GUEST_IDLE MSR tells the hypervisor that the
  35         * vCPU can be put into 'idle' state. This 'idle' state is
  36         * terminated by an IPI, usually from hv_qlock_kick(), even if
  37         * interrupts are disabled on the vCPU.
  38         *
  39         * To prevent a race against the unlock path it is required to
  40         * disable interrupts before accessing the HV_X64_MSR_GUEST_IDLE
  41         * MSR. Otherwise, if the IPI from hv_qlock_kick() arrives between
  42         * the lock value check and the rdmsrl() then the vCPU might be put
  43         * into 'idle' state by the hypervisor and kept in that state for
  44         * an unspecified amount of time.
  45         */
  46        local_irq_save(flags);
  47        /*
  48         * Only issue the rdmsrl() when the lock state has not changed.
  49         */
  50        if (READ_ONCE(*byte) == val) {
  51                unsigned long msr_val;
  52
  53                rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val);
  54
  55                (void)msr_val;
  56        }
  57        local_irq_restore(flags);
  58}
  59
  60/*
  61 * Hyper-V does not support this so far.
  62 */
  63__visible bool hv_vcpu_is_preempted(int vcpu)
  64{
  65        return false;
  66}
  67PV_CALLEE_SAVE_REGS_THUNK(hv_vcpu_is_preempted);
  68
  69void __init hv_init_spinlocks(void)
  70{
  71        if (!hv_pvspin || !apic ||
  72            !(ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) ||
  73            !(ms_hyperv.features & HV_MSR_GUEST_IDLE_AVAILABLE)) {
  74                pr_info("PV spinlocks disabled\n");
  75                return;
  76        }
  77        pr_info("PV spinlocks enabled\n");
  78
  79        __pv_init_lock_hash();
  80        pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
  81        pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
  82        pv_ops.lock.wait = hv_qlock_wait;
  83        pv_ops.lock.kick = hv_qlock_kick;
  84        pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
  85}
  86
  87static __init int hv_parse_nopvspin(char *arg)
  88{
  89        hv_pvspin = false;
  90        return 0;
  91}
  92early_param("hv_nopvspin", hv_parse_nopvspin);
  93