root/arch/x86/hyperv/hv_spinlock.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. hv_qlock_kick
  2. hv_qlock_wait
  3. hv_vcpu_is_preempted
  4. hv_init_spinlocks
  5. hv_parse_nopvspin

   1 // SPDX-License-Identifier: GPL-2.0
   2 
   3 /*
   4  * Hyper-V specific spinlock code.
   5  *
   6  * Copyright (C) 2018, Intel, Inc.
   7  *
   8  * Author : Yi Sun <yi.y.sun@intel.com>
   9  */
  10 
  11 #define pr_fmt(fmt) "Hyper-V: " fmt
  12 
  13 #include <linux/spinlock.h>
  14 
  15 #include <asm/mshyperv.h>
  16 #include <asm/paravirt.h>
  17 #include <asm/apic.h>
  18 
  19 static bool __initdata hv_pvspin = true;
  20 
  21 static void hv_qlock_kick(int cpu)
  22 {
  23         apic->send_IPI(cpu, X86_PLATFORM_IPI_VECTOR);
  24 }
  25 
  26 static void hv_qlock_wait(u8 *byte, u8 val)
  27 {
  28         unsigned long msr_val;
  29         unsigned long flags;
  30 
  31         if (in_nmi())
  32                 return;
  33 
  34         /*
  35          * Reading HV_X64_MSR_GUEST_IDLE MSR tells the hypervisor that the
  36          * vCPU can be put into 'idle' state. This 'idle' state is
  37          * terminated by an IPI, usually from hv_qlock_kick(), even if
  38          * interrupts are disabled on the vCPU.
  39          *
  40          * To prevent a race against the unlock path it is required to
  41          * disable interrupts before accessing the HV_X64_MSR_GUEST_IDLE
  42          * MSR. Otherwise, if the IPI from hv_qlock_kick() arrives between
  43          * the lock value check and the rdmsrl() then the vCPU might be put
  44          * into 'idle' state by the hypervisor and kept in that state for
  45          * an unspecified amount of time.
  46          */
  47         local_irq_save(flags);
  48         /*
  49          * Only issue the rdmsrl() when the lock state has not changed.
  50          */
  51         if (READ_ONCE(*byte) == val)
  52                 rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val);
  53         local_irq_restore(flags);
  54 }
  55 
  56 /*
  57  * Hyper-V does not support this so far.
  58  */
  59 __visible bool hv_vcpu_is_preempted(int vcpu)
  60 {
  61         return false;
  62 }
  63 PV_CALLEE_SAVE_REGS_THUNK(hv_vcpu_is_preempted);
  64 
  65 void __init hv_init_spinlocks(void)
  66 {
  67         if (!hv_pvspin || !apic ||
  68             !(ms_hyperv.hints & HV_X64_CLUSTER_IPI_RECOMMENDED) ||
  69             !(ms_hyperv.features & HV_X64_MSR_GUEST_IDLE_AVAILABLE)) {
  70                 pr_info("PV spinlocks disabled\n");
  71                 return;
  72         }
  73         pr_info("PV spinlocks enabled\n");
  74 
  75         __pv_init_lock_hash();
  76         pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
  77         pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
  78         pv_ops.lock.wait = hv_qlock_wait;
  79         pv_ops.lock.kick = hv_qlock_kick;
  80         pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
  81 }
  82 
  83 static __init int hv_parse_nopvspin(char *arg)
  84 {
  85         hv_pvspin = false;
  86         return 0;
  87 }
  88 early_param("hv_nopvspin", hv_parse_nopvspin);

/* [<][>][^][v][top][bottom][index][help] */