root/arch/x86/kvm/hyperv.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. synic_read_sint
  2. synic_get_sint_vector
  3. synic_has_vector_connected
  4. synic_has_vector_auto_eoi
  5. synic_update_vector
  6. synic_set_sint
  7. get_vcpu_by_vpidx
  8. synic_get
  9. kvm_hv_notify_acked_sint
  10. synic_exit
  11. synic_set_msr
  12. synic_get_msr
  13. synic_set_irq
  14. kvm_hv_synic_set_irq
  15. kvm_hv_synic_send_eoi
  16. kvm_hv_set_sint_gsi
  17. kvm_hv_irq_routing_update
  18. synic_init
  19. get_time_ref_counter
  20. stimer_mark_pending
  21. stimer_cleanup
  22. stimer_timer_callback
  23. stimer_start
  24. stimer_set_config
  25. stimer_set_count
  26. stimer_get_config
  27. stimer_get_count
  28. synic_deliver_msg
  29. stimer_send_msg
  30. stimer_notify_direct
  31. stimer_expiration
  32. kvm_hv_process_stimers
  33. kvm_hv_vcpu_uninit
  34. kvm_hv_assist_page_enabled
  35. kvm_hv_get_assist_page
  36. stimer_prepare_msg
  37. stimer_init
  38. kvm_hv_vcpu_init
  39. kvm_hv_vcpu_postcreate
  40. kvm_hv_activate_synic
  41. kvm_hv_msr_partition_wide
  42. kvm_hv_msr_get_crash_data
  43. kvm_hv_msr_get_crash_ctl
  44. kvm_hv_msr_set_crash_ctl
  45. kvm_hv_msr_set_crash_data
  46. compute_tsc_page_parameters
  47. kvm_hv_setup_tsc_page
  48. kvm_hv_set_msr_pw
  49. current_task_runtime_100ns
  50. kvm_hv_set_msr
  51. kvm_hv_get_msr_pw
  52. kvm_hv_get_msr
  53. kvm_hv_set_msr_common
  54. kvm_hv_get_msr_common
  55. sparse_set_to_vcpu_mask
  56. kvm_hv_flush_tlb
  57. kvm_send_ipi_to_many
  58. kvm_hv_send_ipi
  59. kvm_hv_hypercall_enabled
  60. kvm_hv_hypercall_set_result
  61. kvm_hv_hypercall_complete
  62. kvm_hv_hypercall_complete_userspace
  63. kvm_hvcall_signal_event
  64. kvm_hv_hypercall
  65. kvm_hv_init_vm
  66. kvm_hv_destroy_vm
  67. kvm_hv_eventfd_assign
  68. kvm_hv_eventfd_deassign
  69. kvm_vm_ioctl_hv_eventfd
  70. kvm_vcpu_ioctl_get_hv_cpuid

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * KVM Microsoft Hyper-V emulation
   4  *
   5  * derived from arch/x86/kvm/x86.c
   6  *
   7  * Copyright (C) 2006 Qumranet, Inc.
   8  * Copyright (C) 2008 Qumranet, Inc.
   9  * Copyright IBM Corporation, 2008
  10  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  11  * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
  12  *
  13  * Authors:
  14  *   Avi Kivity   <avi@qumranet.com>
  15  *   Yaniv Kamay  <yaniv@qumranet.com>
  16  *   Amit Shah    <amit.shah@qumranet.com>
  17  *   Ben-Ami Yassour <benami@il.ibm.com>
  18  *   Andrey Smetanin <asmetanin@virtuozzo.com>
  19  */
  20 
  21 #include "x86.h"
  22 #include "lapic.h"
  23 #include "ioapic.h"
  24 #include "hyperv.h"
  25 
  26 #include <linux/cpu.h>
  27 #include <linux/kvm_host.h>
  28 #include <linux/highmem.h>
  29 #include <linux/sched/cputime.h>
  30 #include <linux/eventfd.h>
  31 
  32 #include <asm/apicdef.h>
  33 #include <trace/events/kvm.h>
  34 
  35 #include "trace.h"
  36 
  37 #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
  38 
  39 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
  40                                 bool vcpu_kick);
  41 
  42 static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
  43 {
  44         return atomic64_read(&synic->sint[sint]);
  45 }
  46 
  47 static inline int synic_get_sint_vector(u64 sint_value)
  48 {
  49         if (sint_value & HV_SYNIC_SINT_MASKED)
  50                 return -1;
  51         return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
  52 }
  53 
  54 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
  55                                       int vector)
  56 {
  57         int i;
  58 
  59         for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
  60                 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
  61                         return true;
  62         }
  63         return false;
  64 }
  65 
  66 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
  67                                      int vector)
  68 {
  69         int i;
  70         u64 sint_value;
  71 
  72         for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
  73                 sint_value = synic_read_sint(synic, i);
  74                 if (synic_get_sint_vector(sint_value) == vector &&
  75                     sint_value & HV_SYNIC_SINT_AUTO_EOI)
  76                         return true;
  77         }
  78         return false;
  79 }
  80 
  81 static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
  82                                 int vector)
  83 {
  84         if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
  85                 return;
  86 
  87         if (synic_has_vector_connected(synic, vector))
  88                 __set_bit(vector, synic->vec_bitmap);
  89         else
  90                 __clear_bit(vector, synic->vec_bitmap);
  91 
  92         if (synic_has_vector_auto_eoi(synic, vector))
  93                 __set_bit(vector, synic->auto_eoi_bitmap);
  94         else
  95                 __clear_bit(vector, synic->auto_eoi_bitmap);
  96 }
  97 
  98 static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
  99                           u64 data, bool host)
 100 {
 101         int vector, old_vector;
 102         bool masked;
 103 
 104         vector = data & HV_SYNIC_SINT_VECTOR_MASK;
 105         masked = data & HV_SYNIC_SINT_MASKED;
 106 
 107         /*
 108          * Valid vectors are 16-255, however, nested Hyper-V attempts to write
 109          * default '0x10000' value on boot and this should not #GP. We need to
 110          * allow zero-initing the register from host as well.
 111          */
 112         if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
 113                 return 1;
 114         /*
 115          * Guest may configure multiple SINTs to use the same vector, so
 116          * we maintain a bitmap of vectors handled by synic, and a
 117          * bitmap of vectors with auto-eoi behavior.  The bitmaps are
 118          * updated here, and atomically queried on fast paths.
 119          */
 120         old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
 121 
 122         atomic64_set(&synic->sint[sint], data);
 123 
 124         synic_update_vector(synic, old_vector);
 125 
 126         synic_update_vector(synic, vector);
 127 
 128         /* Load SynIC vectors into EOI exit bitmap */
 129         kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic));
 130         return 0;
 131 }
 132 
 133 static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
 134 {
 135         struct kvm_vcpu *vcpu = NULL;
 136         int i;
 137 
 138         if (vpidx >= KVM_MAX_VCPUS)
 139                 return NULL;
 140 
 141         vcpu = kvm_get_vcpu(kvm, vpidx);
 142         if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
 143                 return vcpu;
 144         kvm_for_each_vcpu(i, vcpu, kvm)
 145                 if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
 146                         return vcpu;
 147         return NULL;
 148 }
 149 
 150 static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
 151 {
 152         struct kvm_vcpu *vcpu;
 153         struct kvm_vcpu_hv_synic *synic;
 154 
 155         vcpu = get_vcpu_by_vpidx(kvm, vpidx);
 156         if (!vcpu)
 157                 return NULL;
 158         synic = vcpu_to_synic(vcpu);
 159         return (synic->active) ? synic : NULL;
 160 }
 161 
 162 static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
 163 {
 164         struct kvm *kvm = vcpu->kvm;
 165         struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
 166         struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
 167         struct kvm_vcpu_hv_stimer *stimer;
 168         int gsi, idx;
 169 
 170         trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
 171 
 172         /* Try to deliver pending Hyper-V SynIC timers messages */
 173         for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
 174                 stimer = &hv_vcpu->stimer[idx];
 175                 if (stimer->msg_pending && stimer->config.enable &&
 176                     !stimer->config.direct_mode &&
 177                     stimer->config.sintx == sint)
 178                         stimer_mark_pending(stimer, false);
 179         }
 180 
 181         idx = srcu_read_lock(&kvm->irq_srcu);
 182         gsi = atomic_read(&synic->sint_to_gsi[sint]);
 183         if (gsi != -1)
 184                 kvm_notify_acked_gsi(kvm, gsi);
 185         srcu_read_unlock(&kvm->irq_srcu, idx);
 186 }
 187 
 188 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
 189 {
 190         struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
 191         struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
 192 
 193         hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
 194         hv_vcpu->exit.u.synic.msr = msr;
 195         hv_vcpu->exit.u.synic.control = synic->control;
 196         hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
 197         hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
 198 
 199         kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
 200 }
 201 
 202 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
 203                          u32 msr, u64 data, bool host)
 204 {
 205         struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
 206         int ret;
 207 
 208         if (!synic->active && !host)
 209                 return 1;
 210 
 211         trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
 212 
 213         ret = 0;
 214         switch (msr) {
 215         case HV_X64_MSR_SCONTROL:
 216                 synic->control = data;
 217                 if (!host)
 218                         synic_exit(synic, msr);
 219                 break;
 220         case HV_X64_MSR_SVERSION:
 221                 if (!host) {
 222                         ret = 1;
 223                         break;
 224                 }
 225                 synic->version = data;
 226                 break;
 227         case HV_X64_MSR_SIEFP:
 228                 if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
 229                     !synic->dont_zero_synic_pages)
 230                         if (kvm_clear_guest(vcpu->kvm,
 231                                             data & PAGE_MASK, PAGE_SIZE)) {
 232                                 ret = 1;
 233                                 break;
 234                         }
 235                 synic->evt_page = data;
 236                 if (!host)
 237                         synic_exit(synic, msr);
 238                 break;
 239         case HV_X64_MSR_SIMP:
 240                 if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
 241                     !synic->dont_zero_synic_pages)
 242                         if (kvm_clear_guest(vcpu->kvm,
 243                                             data & PAGE_MASK, PAGE_SIZE)) {
 244                                 ret = 1;
 245                                 break;
 246                         }
 247                 synic->msg_page = data;
 248                 if (!host)
 249                         synic_exit(synic, msr);
 250                 break;
 251         case HV_X64_MSR_EOM: {
 252                 int i;
 253 
 254                 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
 255                         kvm_hv_notify_acked_sint(vcpu, i);
 256                 break;
 257         }
 258         case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
 259                 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
 260                 break;
 261         default:
 262                 ret = 1;
 263                 break;
 264         }
 265         return ret;
 266 }
 267 
 268 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
 269                          bool host)
 270 {
 271         int ret;
 272 
 273         if (!synic->active && !host)
 274                 return 1;
 275 
 276         ret = 0;
 277         switch (msr) {
 278         case HV_X64_MSR_SCONTROL:
 279                 *pdata = synic->control;
 280                 break;
 281         case HV_X64_MSR_SVERSION:
 282                 *pdata = synic->version;
 283                 break;
 284         case HV_X64_MSR_SIEFP:
 285                 *pdata = synic->evt_page;
 286                 break;
 287         case HV_X64_MSR_SIMP:
 288                 *pdata = synic->msg_page;
 289                 break;
 290         case HV_X64_MSR_EOM:
 291                 *pdata = 0;
 292                 break;
 293         case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
 294                 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
 295                 break;
 296         default:
 297                 ret = 1;
 298                 break;
 299         }
 300         return ret;
 301 }
 302 
 303 static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
 304 {
 305         struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
 306         struct kvm_lapic_irq irq;
 307         int ret, vector;
 308 
 309         if (sint >= ARRAY_SIZE(synic->sint))
 310                 return -EINVAL;
 311 
 312         vector = synic_get_sint_vector(synic_read_sint(synic, sint));
 313         if (vector < 0)
 314                 return -ENOENT;
 315 
 316         memset(&irq, 0, sizeof(irq));
 317         irq.shorthand = APIC_DEST_SELF;
 318         irq.dest_mode = APIC_DEST_PHYSICAL;
 319         irq.delivery_mode = APIC_DM_FIXED;
 320         irq.vector = vector;
 321         irq.level = 1;
 322 
 323         ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
 324         trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
 325         return ret;
 326 }
 327 
 328 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
 329 {
 330         struct kvm_vcpu_hv_synic *synic;
 331 
 332         synic = synic_get(kvm, vpidx);
 333         if (!synic)
 334                 return -EINVAL;
 335 
 336         return synic_set_irq(synic, sint);
 337 }
 338 
 339 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
 340 {
 341         struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
 342         int i;
 343 
 344         trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
 345 
 346         for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
 347                 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
 348                         kvm_hv_notify_acked_sint(vcpu, i);
 349 }
 350 
 351 static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
 352 {
 353         struct kvm_vcpu_hv_synic *synic;
 354 
 355         synic = synic_get(kvm, vpidx);
 356         if (!synic)
 357                 return -EINVAL;
 358 
 359         if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
 360                 return -EINVAL;
 361 
 362         atomic_set(&synic->sint_to_gsi[sint], gsi);
 363         return 0;
 364 }
 365 
 366 void kvm_hv_irq_routing_update(struct kvm *kvm)
 367 {
 368         struct kvm_irq_routing_table *irq_rt;
 369         struct kvm_kernel_irq_routing_entry *e;
 370         u32 gsi;
 371 
 372         irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
 373                                         lockdep_is_held(&kvm->irq_lock));
 374 
 375         for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
 376                 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
 377                         if (e->type == KVM_IRQ_ROUTING_HV_SINT)
 378                                 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
 379                                                     e->hv_sint.sint, gsi);
 380                 }
 381         }
 382 }
 383 
 384 static void synic_init(struct kvm_vcpu_hv_synic *synic)
 385 {
 386         int i;
 387 
 388         memset(synic, 0, sizeof(*synic));
 389         synic->version = HV_SYNIC_VERSION_1;
 390         for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
 391                 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
 392                 atomic_set(&synic->sint_to_gsi[i], -1);
 393         }
 394 }
 395 
 396 static u64 get_time_ref_counter(struct kvm *kvm)
 397 {
 398         struct kvm_hv *hv = &kvm->arch.hyperv;
 399         struct kvm_vcpu *vcpu;
 400         u64 tsc;
 401 
 402         /*
 403          * The guest has not set up the TSC page or the clock isn't
 404          * stable, fall back to get_kvmclock_ns.
 405          */
 406         if (!hv->tsc_ref.tsc_sequence)
 407                 return div_u64(get_kvmclock_ns(kvm), 100);
 408 
 409         vcpu = kvm_get_vcpu(kvm, 0);
 410         tsc = kvm_read_l1_tsc(vcpu, rdtsc());
 411         return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
 412                 + hv->tsc_ref.tsc_offset;
 413 }
 414 
 415 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
 416                                 bool vcpu_kick)
 417 {
 418         struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
 419 
 420         set_bit(stimer->index,
 421                 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
 422         kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
 423         if (vcpu_kick)
 424                 kvm_vcpu_kick(vcpu);
 425 }
 426 
 427 static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
 428 {
 429         struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
 430 
 431         trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id,
 432                                     stimer->index);
 433 
 434         hrtimer_cancel(&stimer->timer);
 435         clear_bit(stimer->index,
 436                   vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap);
 437         stimer->msg_pending = false;
 438         stimer->exp_time = 0;
 439 }
 440 
 441 static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
 442 {
 443         struct kvm_vcpu_hv_stimer *stimer;
 444 
 445         stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
 446         trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id,
 447                                      stimer->index);
 448         stimer_mark_pending(stimer, true);
 449 
 450         return HRTIMER_NORESTART;
 451 }
 452 
 453 /*
 454  * stimer_start() assumptions:
 455  * a) stimer->count is not equal to 0
 456  * b) stimer->config has HV_STIMER_ENABLE flag
 457  */
 458 static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
 459 {
 460         u64 time_now;
 461         ktime_t ktime_now;
 462 
 463         time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
 464         ktime_now = ktime_get();
 465 
 466         if (stimer->config.periodic) {
 467                 if (stimer->exp_time) {
 468                         if (time_now >= stimer->exp_time) {
 469                                 u64 remainder;
 470 
 471                                 div64_u64_rem(time_now - stimer->exp_time,
 472                                               stimer->count, &remainder);
 473                                 stimer->exp_time =
 474                                         time_now + (stimer->count - remainder);
 475                         }
 476                 } else
 477                         stimer->exp_time = time_now + stimer->count;
 478 
 479                 trace_kvm_hv_stimer_start_periodic(
 480                                         stimer_to_vcpu(stimer)->vcpu_id,
 481                                         stimer->index,
 482                                         time_now, stimer->exp_time);
 483 
 484                 hrtimer_start(&stimer->timer,
 485                               ktime_add_ns(ktime_now,
 486                                            100 * (stimer->exp_time - time_now)),
 487                               HRTIMER_MODE_ABS);
 488                 return 0;
 489         }
 490         stimer->exp_time = stimer->count;
 491         if (time_now >= stimer->count) {
 492                 /*
 493                  * Expire timer according to Hypervisor Top-Level Functional
 494                  * specification v4(15.3.1):
 495                  * "If a one shot is enabled and the specified count is in
 496                  * the past, it will expire immediately."
 497                  */
 498                 stimer_mark_pending(stimer, false);
 499                 return 0;
 500         }
 501 
 502         trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id,
 503                                            stimer->index,
 504                                            time_now, stimer->count);
 505 
 506         hrtimer_start(&stimer->timer,
 507                       ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
 508                       HRTIMER_MODE_ABS);
 509         return 0;
 510 }
 511 
 512 static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
 513                              bool host)
 514 {
 515         union hv_stimer_config new_config = {.as_uint64 = config},
 516                 old_config = {.as_uint64 = stimer->config.as_uint64};
 517 
 518         trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id,
 519                                        stimer->index, config, host);
 520 
 521         stimer_cleanup(stimer);
 522         if (old_config.enable &&
 523             !new_config.direct_mode && new_config.sintx == 0)
 524                 new_config.enable = 0;
 525         stimer->config.as_uint64 = new_config.as_uint64;
 526 
 527         if (stimer->config.enable)
 528                 stimer_mark_pending(stimer, false);
 529 
 530         return 0;
 531 }
 532 
 533 static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
 534                             bool host)
 535 {
 536         trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id,
 537                                       stimer->index, count, host);
 538 
 539         stimer_cleanup(stimer);
 540         stimer->count = count;
 541         if (stimer->count == 0)
 542                 stimer->config.enable = 0;
 543         else if (stimer->config.auto_enable)
 544                 stimer->config.enable = 1;
 545 
 546         if (stimer->config.enable)
 547                 stimer_mark_pending(stimer, false);
 548 
 549         return 0;
 550 }
 551 
 552 static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
 553 {
 554         *pconfig = stimer->config.as_uint64;
 555         return 0;
 556 }
 557 
 558 static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
 559 {
 560         *pcount = stimer->count;
 561         return 0;
 562 }
 563 
 564 static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
 565                              struct hv_message *src_msg, bool no_retry)
 566 {
 567         struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
 568         int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
 569         gfn_t msg_page_gfn;
 570         struct hv_message_header hv_hdr;
 571         int r;
 572 
 573         if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
 574                 return -ENOENT;
 575 
 576         msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
 577 
 578         /*
 579          * Strictly following the spec-mandated ordering would assume setting
 580          * .msg_pending before checking .message_type.  However, this function
 581          * is only called in vcpu context so the entire update is atomic from
 582          * guest POV and thus the exact order here doesn't matter.
 583          */
 584         r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
 585                                      msg_off + offsetof(struct hv_message,
 586                                                         header.message_type),
 587                                      sizeof(hv_hdr.message_type));
 588         if (r < 0)
 589                 return r;
 590 
 591         if (hv_hdr.message_type != HVMSG_NONE) {
 592                 if (no_retry)
 593                         return 0;
 594 
 595                 hv_hdr.message_flags.msg_pending = 1;
 596                 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
 597                                               &hv_hdr.message_flags,
 598                                               msg_off +
 599                                               offsetof(struct hv_message,
 600                                                        header.message_flags),
 601                                               sizeof(hv_hdr.message_flags));
 602                 if (r < 0)
 603                         return r;
 604                 return -EAGAIN;
 605         }
 606 
 607         r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
 608                                       sizeof(src_msg->header) +
 609                                       src_msg->header.payload_size);
 610         if (r < 0)
 611                 return r;
 612 
 613         r = synic_set_irq(synic, sint);
 614         if (r < 0)
 615                 return r;
 616         if (r == 0)
 617                 return -EFAULT;
 618         return 0;
 619 }
 620 
 621 static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
 622 {
 623         struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
 624         struct hv_message *msg = &stimer->msg;
 625         struct hv_timer_message_payload *payload =
 626                         (struct hv_timer_message_payload *)&msg->u.payload;
 627 
 628         /*
 629          * To avoid piling up periodic ticks, don't retry message
 630          * delivery for them (within "lazy" lost ticks policy).
 631          */
 632         bool no_retry = stimer->config.periodic;
 633 
 634         payload->expiration_time = stimer->exp_time;
 635         payload->delivery_time = get_time_ref_counter(vcpu->kvm);
 636         return synic_deliver_msg(vcpu_to_synic(vcpu),
 637                                  stimer->config.sintx, msg,
 638                                  no_retry);
 639 }
 640 
 641 static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
 642 {
 643         struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
 644         struct kvm_lapic_irq irq = {
 645                 .delivery_mode = APIC_DM_FIXED,
 646                 .vector = stimer->config.apic_vector
 647         };
 648 
 649         if (lapic_in_kernel(vcpu))
 650                 return !kvm_apic_set_irq(vcpu, &irq, NULL);
 651         return 0;
 652 }
 653 
 654 static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
 655 {
 656         int r, direct = stimer->config.direct_mode;
 657 
 658         stimer->msg_pending = true;
 659         if (!direct)
 660                 r = stimer_send_msg(stimer);
 661         else
 662                 r = stimer_notify_direct(stimer);
 663         trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id,
 664                                        stimer->index, direct, r);
 665         if (!r) {
 666                 stimer->msg_pending = false;
 667                 if (!(stimer->config.periodic))
 668                         stimer->config.enable = 0;
 669         }
 670 }
 671 
 672 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
 673 {
 674         struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
 675         struct kvm_vcpu_hv_stimer *stimer;
 676         u64 time_now, exp_time;
 677         int i;
 678 
 679         for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
 680                 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
 681                         stimer = &hv_vcpu->stimer[i];
 682                         if (stimer->config.enable) {
 683                                 exp_time = stimer->exp_time;
 684 
 685                                 if (exp_time) {
 686                                         time_now =
 687                                                 get_time_ref_counter(vcpu->kvm);
 688                                         if (time_now >= exp_time)
 689                                                 stimer_expiration(stimer);
 690                                 }
 691 
 692                                 if ((stimer->config.enable) &&
 693                                     stimer->count) {
 694                                         if (!stimer->msg_pending)
 695                                                 stimer_start(stimer);
 696                                 } else
 697                                         stimer_cleanup(stimer);
 698                         }
 699                 }
 700 }
 701 
 702 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
 703 {
 704         struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
 705         int i;
 706 
 707         for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
 708                 stimer_cleanup(&hv_vcpu->stimer[i]);
 709 }
 710 
 711 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
 712 {
 713         if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
 714                 return false;
 715         return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
 716 }
 717 EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
 718 
 719 bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
 720                             struct hv_vp_assist_page *assist_page)
 721 {
 722         if (!kvm_hv_assist_page_enabled(vcpu))
 723                 return false;
 724         return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
 725                                       assist_page, sizeof(*assist_page));
 726 }
 727 EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
 728 
 729 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
 730 {
 731         struct hv_message *msg = &stimer->msg;
 732         struct hv_timer_message_payload *payload =
 733                         (struct hv_timer_message_payload *)&msg->u.payload;
 734 
 735         memset(&msg->header, 0, sizeof(msg->header));
 736         msg->header.message_type = HVMSG_TIMER_EXPIRED;
 737         msg->header.payload_size = sizeof(*payload);
 738 
 739         payload->timer_index = stimer->index;
 740         payload->expiration_time = 0;
 741         payload->delivery_time = 0;
 742 }
 743 
 744 static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
 745 {
 746         memset(stimer, 0, sizeof(*stimer));
 747         stimer->index = timer_index;
 748         hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 749         stimer->timer.function = stimer_timer_callback;
 750         stimer_prepare_msg(stimer);
 751 }
 752 
 753 void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
 754 {
 755         struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
 756         int i;
 757 
 758         synic_init(&hv_vcpu->synic);
 759 
 760         bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
 761         for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
 762                 stimer_init(&hv_vcpu->stimer[i], i);
 763 }
 764 
 765 void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
 766 {
 767         struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
 768 
 769         hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
 770 }
 771 
 772 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
 773 {
 774         struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
 775 
 776         /*
 777          * Hyper-V SynIC auto EOI SINT's are
 778          * not compatible with APICV, so deactivate APICV
 779          */
 780         kvm_vcpu_deactivate_apicv(vcpu);
 781         synic->active = true;
 782         synic->dont_zero_synic_pages = dont_zero_synic_pages;
 783         return 0;
 784 }
 785 
 786 static bool kvm_hv_msr_partition_wide(u32 msr)
 787 {
 788         bool r = false;
 789 
 790         switch (msr) {
 791         case HV_X64_MSR_GUEST_OS_ID:
 792         case HV_X64_MSR_HYPERCALL:
 793         case HV_X64_MSR_REFERENCE_TSC:
 794         case HV_X64_MSR_TIME_REF_COUNT:
 795         case HV_X64_MSR_CRASH_CTL:
 796         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
 797         case HV_X64_MSR_RESET:
 798         case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
 799         case HV_X64_MSR_TSC_EMULATION_CONTROL:
 800         case HV_X64_MSR_TSC_EMULATION_STATUS:
 801                 r = true;
 802                 break;
 803         }
 804 
 805         return r;
 806 }
 807 
 808 static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
 809                                      u32 index, u64 *pdata)
 810 {
 811         struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
 812         size_t size = ARRAY_SIZE(hv->hv_crash_param);
 813 
 814         if (WARN_ON_ONCE(index >= size))
 815                 return -EINVAL;
 816 
 817         *pdata = hv->hv_crash_param[array_index_nospec(index, size)];
 818         return 0;
 819 }
 820 
 821 static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
 822 {
 823         struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
 824 
 825         *pdata = hv->hv_crash_ctl;
 826         return 0;
 827 }
 828 
 829 static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
 830 {
 831         struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
 832 
 833         if (host)
 834                 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
 835 
 836         if (!host && (data & HV_CRASH_CTL_CRASH_NOTIFY)) {
 837 
 838                 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
 839                           hv->hv_crash_param[0],
 840                           hv->hv_crash_param[1],
 841                           hv->hv_crash_param[2],
 842                           hv->hv_crash_param[3],
 843                           hv->hv_crash_param[4]);
 844 
 845                 /* Send notification about crash to user space */
 846                 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
 847         }
 848 
 849         return 0;
 850 }
 851 
 852 static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
 853                                      u32 index, u64 data)
 854 {
 855         struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
 856         size_t size = ARRAY_SIZE(hv->hv_crash_param);
 857 
 858         if (WARN_ON_ONCE(index >= size))
 859                 return -EINVAL;
 860 
 861         hv->hv_crash_param[array_index_nospec(index, size)] = data;
 862         return 0;
 863 }
 864 
 865 /*
 866  * The kvmclock and Hyper-V TSC page use similar formulas, and converting
 867  * between them is possible:
 868  *
 869  * kvmclock formula:
 870  *    nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
 871  *           + system_time
 872  *
 873  * Hyper-V formula:
 874  *    nsec/100 = ticks * scale / 2^64 + offset
 875  *
 876  * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
 877  * By dividing the kvmclock formula by 100 and equating what's left we get:
 878  *    ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
 879  *            scale / 2^64 =         tsc_to_system_mul * 2^(tsc_shift-32) / 100
 880  *            scale        =         tsc_to_system_mul * 2^(32+tsc_shift) / 100
 881  *
 882  * Now expand the kvmclock formula and divide by 100:
 883  *    nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
 884  *           - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
 885  *           + system_time
 886  *    nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
 887  *               - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
 888  *               + system_time / 100
 889  *
 890  * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
 891  *    nsec/100 = ticks * scale / 2^64
 892  *               - tsc_timestamp * scale / 2^64
 893  *               + system_time / 100
 894  *
 895  * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
 896  *    offset = system_time / 100 - tsc_timestamp * scale / 2^64
 897  *
 898  * These two equivalencies are implemented in this function.
 899  */
 900 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
 901                                         HV_REFERENCE_TSC_PAGE *tsc_ref)
 902 {
 903         u64 max_mul;
 904 
 905         if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
 906                 return false;
 907 
 908         /*
 909          * check if scale would overflow, if so we use the time ref counter
 910          *    tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
 911          *    tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
 912          *    tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
 913          */
 914         max_mul = 100ull << (32 - hv_clock->tsc_shift);
 915         if (hv_clock->tsc_to_system_mul >= max_mul)
 916                 return false;
 917 
 918         /*
 919          * Otherwise compute the scale and offset according to the formulas
 920          * derived above.
 921          */
 922         tsc_ref->tsc_scale =
 923                 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
 924                                 hv_clock->tsc_to_system_mul,
 925                                 100);
 926 
 927         tsc_ref->tsc_offset = hv_clock->system_time;
 928         do_div(tsc_ref->tsc_offset, 100);
 929         tsc_ref->tsc_offset -=
 930                 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
 931         return true;
 932 }
 933 
 934 void kvm_hv_setup_tsc_page(struct kvm *kvm,
 935                            struct pvclock_vcpu_time_info *hv_clock)
 936 {
 937         struct kvm_hv *hv = &kvm->arch.hyperv;
 938         u32 tsc_seq;
 939         u64 gfn;
 940 
 941         BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
 942         BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE, tsc_sequence) != 0);
 943 
 944         if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
 945                 return;
 946 
 947         mutex_lock(&kvm->arch.hyperv.hv_lock);
 948         if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
 949                 goto out_unlock;
 950 
 951         gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
 952         /*
 953          * Because the TSC parameters only vary when there is a
 954          * change in the master clock, do not bother with caching.
 955          */
 956         if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
 957                                     &tsc_seq, sizeof(tsc_seq))))
 958                 goto out_unlock;
 959 
 960         /*
 961          * While we're computing and writing the parameters, force the
 962          * guest to use the time reference count MSR.
 963          */
 964         hv->tsc_ref.tsc_sequence = 0;
 965         if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
 966                             &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
 967                 goto out_unlock;
 968 
 969         if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
 970                 goto out_unlock;
 971 
 972         /* Ensure sequence is zero before writing the rest of the struct.  */
 973         smp_wmb();
 974         if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
 975                 goto out_unlock;
 976 
 977         /*
 978          * Now switch to the TSC page mechanism by writing the sequence.
 979          */
 980         tsc_seq++;
 981         if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
 982                 tsc_seq = 1;
 983 
 984         /* Write the struct entirely before the non-zero sequence.  */
 985         smp_wmb();
 986 
 987         hv->tsc_ref.tsc_sequence = tsc_seq;
 988         kvm_write_guest(kvm, gfn_to_gpa(gfn),
 989                         &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
 990 out_unlock:
 991         mutex_unlock(&kvm->arch.hyperv.hv_lock);
 992 }
 993 
 994 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
 995                              bool host)
 996 {
 997         struct kvm *kvm = vcpu->kvm;
 998         struct kvm_hv *hv = &kvm->arch.hyperv;
 999 
1000         switch (msr) {
1001         case HV_X64_MSR_GUEST_OS_ID:
1002                 hv->hv_guest_os_id = data;
1003                 /* setting guest os id to zero disables hypercall page */
1004                 if (!hv->hv_guest_os_id)
1005                         hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1006                 break;
1007         case HV_X64_MSR_HYPERCALL: {
1008                 u64 gfn;
1009                 unsigned long addr;
1010                 u8 instructions[4];
1011 
1012                 /* if guest os id is not set hypercall should remain disabled */
1013                 if (!hv->hv_guest_os_id)
1014                         break;
1015                 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1016                         hv->hv_hypercall = data;
1017                         break;
1018                 }
1019                 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1020                 addr = gfn_to_hva(kvm, gfn);
1021                 if (kvm_is_error_hva(addr))
1022                         return 1;
1023                 kvm_x86_ops->patch_hypercall(vcpu, instructions);
1024                 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
1025                 if (__copy_to_user((void __user *)addr, instructions, 4))
1026                         return 1;
1027                 hv->hv_hypercall = data;
1028                 mark_page_dirty(kvm, gfn);
1029                 break;
1030         }
1031         case HV_X64_MSR_REFERENCE_TSC:
1032                 hv->hv_tsc_page = data;
1033                 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
1034                         kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1035                 break;
1036         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1037                 return kvm_hv_msr_set_crash_data(vcpu,
1038                                                  msr - HV_X64_MSR_CRASH_P0,
1039                                                  data);
1040         case HV_X64_MSR_CRASH_CTL:
1041                 return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
1042         case HV_X64_MSR_RESET:
1043                 if (data == 1) {
1044                         vcpu_debug(vcpu, "hyper-v reset requested\n");
1045                         kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1046                 }
1047                 break;
1048         case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1049                 hv->hv_reenlightenment_control = data;
1050                 break;
1051         case HV_X64_MSR_TSC_EMULATION_CONTROL:
1052                 hv->hv_tsc_emulation_control = data;
1053                 break;
1054         case HV_X64_MSR_TSC_EMULATION_STATUS:
1055                 hv->hv_tsc_emulation_status = data;
1056                 break;
1057         case HV_X64_MSR_TIME_REF_COUNT:
1058                 /* read-only, but still ignore it if host-initiated */
1059                 if (!host)
1060                         return 1;
1061                 break;
1062         default:
1063                 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1064                             msr, data);
1065                 return 1;
1066         }
1067         return 0;
1068 }
1069 
1070 /* Calculate cpu time spent by current task in 100ns units */
1071 static u64 current_task_runtime_100ns(void)
1072 {
1073         u64 utime, stime;
1074 
1075         task_cputime_adjusted(current, &utime, &stime);
1076 
1077         return div_u64(utime + stime, 100);
1078 }
1079 
1080 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1081 {
1082         struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
1083 
1084         switch (msr) {
1085         case HV_X64_MSR_VP_INDEX: {
1086                 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
1087                 int vcpu_idx = kvm_vcpu_get_idx(vcpu);
1088                 u32 new_vp_index = (u32)data;
1089 
1090                 if (!host || new_vp_index >= KVM_MAX_VCPUS)
1091                         return 1;
1092 
1093                 if (new_vp_index == hv_vcpu->vp_index)
1094                         return 0;
1095 
1096                 /*
1097                  * The VP index is initialized to vcpu_index by
1098                  * kvm_hv_vcpu_postcreate so they initially match.  Now the
1099                  * VP index is changing, adjust num_mismatched_vp_indexes if
1100                  * it now matches or no longer matches vcpu_idx.
1101                  */
1102                 if (hv_vcpu->vp_index == vcpu_idx)
1103                         atomic_inc(&hv->num_mismatched_vp_indexes);
1104                 else if (new_vp_index == vcpu_idx)
1105                         atomic_dec(&hv->num_mismatched_vp_indexes);
1106 
1107                 hv_vcpu->vp_index = new_vp_index;
1108                 break;
1109         }
1110         case HV_X64_MSR_VP_ASSIST_PAGE: {
1111                 u64 gfn;
1112                 unsigned long addr;
1113 
1114                 if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1115                         hv_vcpu->hv_vapic = data;
1116                         if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
1117                                 return 1;
1118                         break;
1119                 }
1120                 gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1121                 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1122                 if (kvm_is_error_hva(addr))
1123                         return 1;
1124 
1125                 /*
1126                  * Clear apic_assist portion of f(struct hv_vp_assist_page
1127                  * only, there can be valuable data in the rest which needs
1128                  * to be preserved e.g. on migration.
1129                  */
1130                 if (__clear_user((void __user *)addr, sizeof(u32)))
1131                         return 1;
1132                 hv_vcpu->hv_vapic = data;
1133                 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1134                 if (kvm_lapic_enable_pv_eoi(vcpu,
1135                                             gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1136                                             sizeof(struct hv_vp_assist_page)))
1137                         return 1;
1138                 break;
1139         }
1140         case HV_X64_MSR_EOI:
1141                 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1142         case HV_X64_MSR_ICR:
1143                 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1144         case HV_X64_MSR_TPR:
1145                 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1146         case HV_X64_MSR_VP_RUNTIME:
1147                 if (!host)
1148                         return 1;
1149                 hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1150                 break;
1151         case HV_X64_MSR_SCONTROL:
1152         case HV_X64_MSR_SVERSION:
1153         case HV_X64_MSR_SIEFP:
1154         case HV_X64_MSR_SIMP:
1155         case HV_X64_MSR_EOM:
1156         case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1157                 return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host);
1158         case HV_X64_MSR_STIMER0_CONFIG:
1159         case HV_X64_MSR_STIMER1_CONFIG:
1160         case HV_X64_MSR_STIMER2_CONFIG:
1161         case HV_X64_MSR_STIMER3_CONFIG: {
1162                 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1163 
1164                 return stimer_set_config(vcpu_to_stimer(vcpu, timer_index),
1165                                          data, host);
1166         }
1167         case HV_X64_MSR_STIMER0_COUNT:
1168         case HV_X64_MSR_STIMER1_COUNT:
1169         case HV_X64_MSR_STIMER2_COUNT:
1170         case HV_X64_MSR_STIMER3_COUNT: {
1171                 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1172 
1173                 return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
1174                                         data, host);
1175         }
1176         case HV_X64_MSR_TSC_FREQUENCY:
1177         case HV_X64_MSR_APIC_FREQUENCY:
1178                 /* read-only, but still ignore it if host-initiated */
1179                 if (!host)
1180                         return 1;
1181                 break;
1182         default:
1183                 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
1184                             msr, data);
1185                 return 1;
1186         }
1187 
1188         return 0;
1189 }
1190 
1191 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1192 {
1193         u64 data = 0;
1194         struct kvm *kvm = vcpu->kvm;
1195         struct kvm_hv *hv = &kvm->arch.hyperv;
1196 
1197         switch (msr) {
1198         case HV_X64_MSR_GUEST_OS_ID:
1199                 data = hv->hv_guest_os_id;
1200                 break;
1201         case HV_X64_MSR_HYPERCALL:
1202                 data = hv->hv_hypercall;
1203                 break;
1204         case HV_X64_MSR_TIME_REF_COUNT:
1205                 data = get_time_ref_counter(kvm);
1206                 break;
1207         case HV_X64_MSR_REFERENCE_TSC:
1208                 data = hv->hv_tsc_page;
1209                 break;
1210         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1211                 return kvm_hv_msr_get_crash_data(vcpu,
1212                                                  msr - HV_X64_MSR_CRASH_P0,
1213                                                  pdata);
1214         case HV_X64_MSR_CRASH_CTL:
1215                 return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
1216         case HV_X64_MSR_RESET:
1217                 data = 0;
1218                 break;
1219         case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1220                 data = hv->hv_reenlightenment_control;
1221                 break;
1222         case HV_X64_MSR_TSC_EMULATION_CONTROL:
1223                 data = hv->hv_tsc_emulation_control;
1224                 break;
1225         case HV_X64_MSR_TSC_EMULATION_STATUS:
1226                 data = hv->hv_tsc_emulation_status;
1227                 break;
1228         default:
1229                 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1230                 return 1;
1231         }
1232 
1233         *pdata = data;
1234         return 0;
1235 }
1236 
1237 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1238                           bool host)
1239 {
1240         u64 data = 0;
1241         struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
1242 
1243         switch (msr) {
1244         case HV_X64_MSR_VP_INDEX:
1245                 data = hv_vcpu->vp_index;
1246                 break;
1247         case HV_X64_MSR_EOI:
1248                 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1249         case HV_X64_MSR_ICR:
1250                 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1251         case HV_X64_MSR_TPR:
1252                 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1253         case HV_X64_MSR_VP_ASSIST_PAGE:
1254                 data = hv_vcpu->hv_vapic;
1255                 break;
1256         case HV_X64_MSR_VP_RUNTIME:
1257                 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1258                 break;
1259         case HV_X64_MSR_SCONTROL:
1260         case HV_X64_MSR_SVERSION:
1261         case HV_X64_MSR_SIEFP:
1262         case HV_X64_MSR_SIMP:
1263         case HV_X64_MSR_EOM:
1264         case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1265                 return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host);
1266         case HV_X64_MSR_STIMER0_CONFIG:
1267         case HV_X64_MSR_STIMER1_CONFIG:
1268         case HV_X64_MSR_STIMER2_CONFIG:
1269         case HV_X64_MSR_STIMER3_CONFIG: {
1270                 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1271 
1272                 return stimer_get_config(vcpu_to_stimer(vcpu, timer_index),
1273                                          pdata);
1274         }
1275         case HV_X64_MSR_STIMER0_COUNT:
1276         case HV_X64_MSR_STIMER1_COUNT:
1277         case HV_X64_MSR_STIMER2_COUNT:
1278         case HV_X64_MSR_STIMER3_COUNT: {
1279                 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1280 
1281                 return stimer_get_count(vcpu_to_stimer(vcpu, timer_index),
1282                                         pdata);
1283         }
1284         case HV_X64_MSR_TSC_FREQUENCY:
1285                 data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1286                 break;
1287         case HV_X64_MSR_APIC_FREQUENCY:
1288                 data = APIC_BUS_FREQUENCY;
1289                 break;
1290         default:
1291                 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1292                 return 1;
1293         }
1294         *pdata = data;
1295         return 0;
1296 }
1297 
1298 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1299 {
1300         if (kvm_hv_msr_partition_wide(msr)) {
1301                 int r;
1302 
1303                 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1304                 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1305                 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1306                 return r;
1307         } else
1308                 return kvm_hv_set_msr(vcpu, msr, data, host);
1309 }
1310 
1311 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1312 {
1313         if (kvm_hv_msr_partition_wide(msr)) {
1314                 int r;
1315 
1316                 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
1317                 r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
1318                 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
1319                 return r;
1320         } else
1321                 return kvm_hv_get_msr(vcpu, msr, pdata, host);
1322 }
1323 
1324 static __always_inline unsigned long *sparse_set_to_vcpu_mask(
1325         struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask,
1326         u64 *vp_bitmap, unsigned long *vcpu_bitmap)
1327 {
1328         struct kvm_hv *hv = &kvm->arch.hyperv;
1329         struct kvm_vcpu *vcpu;
1330         int i, bank, sbank = 0;
1331 
1332         memset(vp_bitmap, 0,
1333                KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap));
1334         for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
1335                          KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
1336                 vp_bitmap[bank] = sparse_banks[sbank++];
1337 
1338         if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) {
1339                 /* for all vcpus vp_index == vcpu_idx */
1340                 return (unsigned long *)vp_bitmap;
1341         }
1342 
1343         bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
1344         kvm_for_each_vcpu(i, vcpu, kvm) {
1345                 if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index,
1346                              (unsigned long *)vp_bitmap))
1347                         __set_bit(i, vcpu_bitmap);
1348         }
1349         return vcpu_bitmap;
1350 }
1351 
1352 static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
1353                             u16 rep_cnt, bool ex)
1354 {
1355         struct kvm *kvm = current_vcpu->kvm;
1356         struct kvm_vcpu_hv *hv_vcpu = &current_vcpu->arch.hyperv;
1357         struct hv_tlb_flush_ex flush_ex;
1358         struct hv_tlb_flush flush;
1359         u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1360         DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1361         unsigned long *vcpu_mask;
1362         u64 valid_bank_mask;
1363         u64 sparse_banks[64];
1364         int sparse_banks_len;
1365         bool all_cpus;
1366 
1367         if (!ex) {
1368                 if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush))))
1369                         return HV_STATUS_INVALID_HYPERCALL_INPUT;
1370 
1371                 trace_kvm_hv_flush_tlb(flush.processor_mask,
1372                                        flush.address_space, flush.flags);
1373 
1374                 valid_bank_mask = BIT_ULL(0);
1375                 sparse_banks[0] = flush.processor_mask;
1376 
1377                 /*
1378                  * Work around possible WS2012 bug: it sends hypercalls
1379                  * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
1380                  * while also expecting us to flush something and crashing if
1381                  * we don't. Let's treat processor_mask == 0 same as
1382                  * HV_FLUSH_ALL_PROCESSORS.
1383                  */
1384                 all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
1385                         flush.processor_mask == 0;
1386         } else {
1387                 if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
1388                                             sizeof(flush_ex))))
1389                         return HV_STATUS_INVALID_HYPERCALL_INPUT;
1390 
1391                 trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
1392                                           flush_ex.hv_vp_set.format,
1393                                           flush_ex.address_space,
1394                                           flush_ex.flags);
1395 
1396                 valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
1397                 all_cpus = flush_ex.hv_vp_set.format !=
1398                         HV_GENERIC_SET_SPARSE_4K;
1399 
1400                 sparse_banks_len =
1401                         bitmap_weight((unsigned long *)&valid_bank_mask, 64) *
1402                         sizeof(sparse_banks[0]);
1403 
1404                 if (!sparse_banks_len && !all_cpus)
1405                         goto ret_success;
1406 
1407                 if (!all_cpus &&
1408                     kvm_read_guest(kvm,
1409                                    ingpa + offsetof(struct hv_tlb_flush_ex,
1410                                                     hv_vp_set.bank_contents),
1411                                    sparse_banks,
1412                                    sparse_banks_len))
1413                         return HV_STATUS_INVALID_HYPERCALL_INPUT;
1414         }
1415 
1416         cpumask_clear(&hv_vcpu->tlb_flush);
1417 
1418         vcpu_mask = all_cpus ? NULL :
1419                 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1420                                         vp_bitmap, vcpu_bitmap);
1421 
1422         /*
1423          * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
1424          * analyze it here, flush TLB regardless of the specified address space.
1425          */
1426         kvm_make_vcpus_request_mask(kvm,
1427                                     KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
1428                                     vcpu_mask, &hv_vcpu->tlb_flush);
1429 
1430 ret_success:
1431         /* We always do full TLB flush, set rep_done = rep_cnt. */
1432         return (u64)HV_STATUS_SUCCESS |
1433                 ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
1434 }
1435 
1436 static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
1437                                  unsigned long *vcpu_bitmap)
1438 {
1439         struct kvm_lapic_irq irq = {
1440                 .delivery_mode = APIC_DM_FIXED,
1441                 .vector = vector
1442         };
1443         struct kvm_vcpu *vcpu;
1444         int i;
1445 
1446         kvm_for_each_vcpu(i, vcpu, kvm) {
1447                 if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
1448                         continue;
1449 
1450                 /* We fail only when APIC is disabled */
1451                 kvm_apic_set_irq(vcpu, &irq, NULL);
1452         }
1453 }
1454 
1455 static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa,
1456                            bool ex, bool fast)
1457 {
1458         struct kvm *kvm = current_vcpu->kvm;
1459         struct hv_send_ipi_ex send_ipi_ex;
1460         struct hv_send_ipi send_ipi;
1461         u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1462         DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
1463         unsigned long *vcpu_mask;
1464         unsigned long valid_bank_mask;
1465         u64 sparse_banks[64];
1466         int sparse_banks_len;
1467         u32 vector;
1468         bool all_cpus;
1469 
1470         if (!ex) {
1471                 if (!fast) {
1472                         if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi,
1473                                                     sizeof(send_ipi))))
1474                                 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1475                         sparse_banks[0] = send_ipi.cpu_mask;
1476                         vector = send_ipi.vector;
1477                 } else {
1478                         /* 'reserved' part of hv_send_ipi should be 0 */
1479                         if (unlikely(ingpa >> 32 != 0))
1480                                 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1481                         sparse_banks[0] = outgpa;
1482                         vector = (u32)ingpa;
1483                 }
1484                 all_cpus = false;
1485                 valid_bank_mask = BIT_ULL(0);
1486 
1487                 trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
1488         } else {
1489                 if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex,
1490                                             sizeof(send_ipi_ex))))
1491                         return HV_STATUS_INVALID_HYPERCALL_INPUT;
1492 
1493                 trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
1494                                          send_ipi_ex.vp_set.format,
1495                                          send_ipi_ex.vp_set.valid_bank_mask);
1496 
1497                 vector = send_ipi_ex.vector;
1498                 valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
1499                 sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
1500                         sizeof(sparse_banks[0]);
1501 
1502                 all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
1503 
1504                 if (!sparse_banks_len)
1505                         goto ret_success;
1506 
1507                 if (!all_cpus &&
1508                     kvm_read_guest(kvm,
1509                                    ingpa + offsetof(struct hv_send_ipi_ex,
1510                                                     vp_set.bank_contents),
1511                                    sparse_banks,
1512                                    sparse_banks_len))
1513                         return HV_STATUS_INVALID_HYPERCALL_INPUT;
1514         }
1515 
1516         if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
1517                 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1518 
1519         vcpu_mask = all_cpus ? NULL :
1520                 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
1521                                         vp_bitmap, vcpu_bitmap);
1522 
1523         kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
1524 
1525 ret_success:
1526         return HV_STATUS_SUCCESS;
1527 }
1528 
1529 bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1530 {
1531         return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
1532 }
1533 
1534 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
1535 {
1536         bool longmode;
1537 
1538         longmode = is_64_bit_mode(vcpu);
1539         if (longmode)
1540                 kvm_rax_write(vcpu, result);
1541         else {
1542                 kvm_rdx_write(vcpu, result >> 32);
1543                 kvm_rax_write(vcpu, result & 0xffffffff);
1544         }
1545 }
1546 
1547 static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
1548 {
1549         kvm_hv_hypercall_set_result(vcpu, result);
1550         ++vcpu->stat.hypercalls;
1551         return kvm_skip_emulated_instruction(vcpu);
1552 }
1553 
1554 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
1555 {
1556         return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
1557 }
1558 
1559 static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
1560 {
1561         struct eventfd_ctx *eventfd;
1562 
1563         if (unlikely(!fast)) {
1564                 int ret;
1565                 gpa_t gpa = param;
1566 
1567                 if ((gpa & (__alignof__(param) - 1)) ||
1568                     offset_in_page(gpa) + sizeof(param) > PAGE_SIZE)
1569                         return HV_STATUS_INVALID_ALIGNMENT;
1570 
1571                 ret = kvm_vcpu_read_guest(vcpu, gpa, &param, sizeof(param));
1572                 if (ret < 0)
1573                         return HV_STATUS_INVALID_ALIGNMENT;
1574         }
1575 
1576         /*
1577          * Per spec, bits 32-47 contain the extra "flag number".  However, we
1578          * have no use for it, and in all known usecases it is zero, so just
1579          * report lookup failure if it isn't.
1580          */
1581         if (param & 0xffff00000000ULL)
1582                 return HV_STATUS_INVALID_PORT_ID;
1583         /* remaining bits are reserved-zero */
1584         if (param & ~KVM_HYPERV_CONN_ID_MASK)
1585                 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1586 
1587         /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
1588         rcu_read_lock();
1589         eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param);
1590         rcu_read_unlock();
1591         if (!eventfd)
1592                 return HV_STATUS_INVALID_PORT_ID;
1593 
1594         eventfd_signal(eventfd, 1);
1595         return HV_STATUS_SUCCESS;
1596 }
1597 
1598 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1599 {
1600         u64 param, ingpa, outgpa, ret = HV_STATUS_SUCCESS;
1601         uint16_t code, rep_idx, rep_cnt;
1602         bool fast, rep;
1603 
1604         /*
1605          * hypercall generates UD from non zero cpl and real mode
1606          * per HYPER-V spec
1607          */
1608         if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
1609                 kvm_queue_exception(vcpu, UD_VECTOR);
1610                 return 1;
1611         }
1612 
1613 #ifdef CONFIG_X86_64
1614         if (is_64_bit_mode(vcpu)) {
1615                 param = kvm_rcx_read(vcpu);
1616                 ingpa = kvm_rdx_read(vcpu);
1617                 outgpa = kvm_r8_read(vcpu);
1618         } else
1619 #endif
1620         {
1621                 param = ((u64)kvm_rdx_read(vcpu) << 32) |
1622                         (kvm_rax_read(vcpu) & 0xffffffff);
1623                 ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
1624                         (kvm_rcx_read(vcpu) & 0xffffffff);
1625                 outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
1626                         (kvm_rsi_read(vcpu) & 0xffffffff);
1627         }
1628 
1629         code = param & 0xffff;
1630         fast = !!(param & HV_HYPERCALL_FAST_BIT);
1631         rep_cnt = (param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
1632         rep_idx = (param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
1633         rep = !!(rep_cnt || rep_idx);
1634 
1635         trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
1636 
1637         switch (code) {
1638         case HVCALL_NOTIFY_LONG_SPIN_WAIT:
1639                 if (unlikely(rep)) {
1640                         ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1641                         break;
1642                 }
1643                 kvm_vcpu_on_spin(vcpu, true);
1644                 break;
1645         case HVCALL_SIGNAL_EVENT:
1646                 if (unlikely(rep)) {
1647                         ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1648                         break;
1649                 }
1650                 ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
1651                 if (ret != HV_STATUS_INVALID_PORT_ID)
1652                         break;
1653                 /* fall through - maybe userspace knows this conn_id. */
1654         case HVCALL_POST_MESSAGE:
1655                 /* don't bother userspace if it has no way to handle it */
1656                 if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) {
1657                         ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1658                         break;
1659                 }
1660                 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
1661                 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
1662                 vcpu->run->hyperv.u.hcall.input = param;
1663                 vcpu->run->hyperv.u.hcall.params[0] = ingpa;
1664                 vcpu->run->hyperv.u.hcall.params[1] = outgpa;
1665                 vcpu->arch.complete_userspace_io =
1666                                 kvm_hv_hypercall_complete_userspace;
1667                 return 0;
1668         case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
1669                 if (unlikely(fast || !rep_cnt || rep_idx)) {
1670                         ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1671                         break;
1672                 }
1673                 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1674                 break;
1675         case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
1676                 if (unlikely(fast || rep)) {
1677                         ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1678                         break;
1679                 }
1680                 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1681                 break;
1682         case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
1683                 if (unlikely(fast || !rep_cnt || rep_idx)) {
1684                         ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1685                         break;
1686                 }
1687                 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1688                 break;
1689         case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
1690                 if (unlikely(fast || rep)) {
1691                         ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1692                         break;
1693                 }
1694                 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1695                 break;
1696         case HVCALL_SEND_IPI:
1697                 if (unlikely(rep)) {
1698                         ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1699                         break;
1700                 }
1701                 ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast);
1702                 break;
1703         case HVCALL_SEND_IPI_EX:
1704                 if (unlikely(fast || rep)) {
1705                         ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1706                         break;
1707                 }
1708                 ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false);
1709                 break;
1710         default:
1711                 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
1712                 break;
1713         }
1714 
1715         return kvm_hv_hypercall_complete(vcpu, ret);
1716 }
1717 
1718 void kvm_hv_init_vm(struct kvm *kvm)
1719 {
1720         mutex_init(&kvm->arch.hyperv.hv_lock);
1721         idr_init(&kvm->arch.hyperv.conn_to_evt);
1722 }
1723 
1724 void kvm_hv_destroy_vm(struct kvm *kvm)
1725 {
1726         struct eventfd_ctx *eventfd;
1727         int i;
1728 
1729         idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i)
1730                 eventfd_ctx_put(eventfd);
1731         idr_destroy(&kvm->arch.hyperv.conn_to_evt);
1732 }
1733 
1734 static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
1735 {
1736         struct kvm_hv *hv = &kvm->arch.hyperv;
1737         struct eventfd_ctx *eventfd;
1738         int ret;
1739 
1740         eventfd = eventfd_ctx_fdget(fd);
1741         if (IS_ERR(eventfd))
1742                 return PTR_ERR(eventfd);
1743 
1744         mutex_lock(&hv->hv_lock);
1745         ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
1746                         GFP_KERNEL_ACCOUNT);
1747         mutex_unlock(&hv->hv_lock);
1748 
1749         if (ret >= 0)
1750                 return 0;
1751 
1752         if (ret == -ENOSPC)
1753                 ret = -EEXIST;
1754         eventfd_ctx_put(eventfd);
1755         return ret;
1756 }
1757 
1758 static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
1759 {
1760         struct kvm_hv *hv = &kvm->arch.hyperv;
1761         struct eventfd_ctx *eventfd;
1762 
1763         mutex_lock(&hv->hv_lock);
1764         eventfd = idr_remove(&hv->conn_to_evt, conn_id);
1765         mutex_unlock(&hv->hv_lock);
1766 
1767         if (!eventfd)
1768                 return -ENOENT;
1769 
1770         synchronize_srcu(&kvm->srcu);
1771         eventfd_ctx_put(eventfd);
1772         return 0;
1773 }
1774 
1775 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
1776 {
1777         if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
1778             (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
1779                 return -EINVAL;
1780 
1781         if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
1782                 return kvm_hv_eventfd_deassign(kvm, args->conn_id);
1783         return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
1784 }
1785 
1786 int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
1787                                 struct kvm_cpuid_entry2 __user *entries)
1788 {
1789         uint16_t evmcs_ver = 0;
1790         struct kvm_cpuid_entry2 cpuid_entries[] = {
1791                 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
1792                 { .function = HYPERV_CPUID_INTERFACE },
1793                 { .function = HYPERV_CPUID_VERSION },
1794                 { .function = HYPERV_CPUID_FEATURES },
1795                 { .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
1796                 { .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
1797                 { .function = HYPERV_CPUID_NESTED_FEATURES },
1798         };
1799         int i, nent = ARRAY_SIZE(cpuid_entries);
1800 
1801         if (kvm_x86_ops->nested_get_evmcs_version)
1802                 evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
1803 
1804         /* Skip NESTED_FEATURES if eVMCS is not supported */
1805         if (!evmcs_ver)
1806                 --nent;
1807 
1808         if (cpuid->nent < nent)
1809                 return -E2BIG;
1810 
1811         if (cpuid->nent > nent)
1812                 cpuid->nent = nent;
1813 
1814         for (i = 0; i < nent; i++) {
1815                 struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
1816                 u32 signature[3];
1817 
1818                 switch (ent->function) {
1819                 case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
1820                         memcpy(signature, "Linux KVM Hv", 12);
1821 
1822                         ent->eax = HYPERV_CPUID_NESTED_FEATURES;
1823                         ent->ebx = signature[0];
1824                         ent->ecx = signature[1];
1825                         ent->edx = signature[2];
1826                         break;
1827 
1828                 case HYPERV_CPUID_INTERFACE:
1829                         memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
1830                         ent->eax = signature[0];
1831                         break;
1832 
1833                 case HYPERV_CPUID_VERSION:
1834                         /*
1835                          * We implement some Hyper-V 2016 functions so let's use
1836                          * this version.
1837                          */
1838                         ent->eax = 0x00003839;
1839                         ent->ebx = 0x000A0000;
1840                         break;
1841 
1842                 case HYPERV_CPUID_FEATURES:
1843                         ent->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
1844                         ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
1845                         ent->eax |= HV_X64_MSR_SYNIC_AVAILABLE;
1846                         ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
1847                         ent->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
1848                         ent->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
1849                         ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
1850                         ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
1851                         ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
1852                         ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
1853                         ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
1854 
1855                         ent->ebx |= HV_X64_POST_MESSAGES;
1856                         ent->ebx |= HV_X64_SIGNAL_EVENTS;
1857 
1858                         ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
1859                         ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
1860 
1861                         /*
1862                          * Direct Synthetic timers only make sense with in-kernel
1863                          * LAPIC
1864                          */
1865                         if (lapic_in_kernel(vcpu))
1866                                 ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
1867 
1868                         break;
1869 
1870                 case HYPERV_CPUID_ENLIGHTMENT_INFO:
1871                         ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
1872                         ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
1873                         ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
1874                         ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
1875                         ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
1876                         if (evmcs_ver)
1877                                 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
1878                         if (!cpu_smt_possible())
1879                                 ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
1880                         /*
1881                          * Default number of spinlock retry attempts, matches
1882                          * HyperV 2016.
1883                          */
1884                         ent->ebx = 0x00000FFF;
1885 
1886                         break;
1887 
1888                 case HYPERV_CPUID_IMPLEMENT_LIMITS:
1889                         /* Maximum number of virtual processors */
1890                         ent->eax = KVM_MAX_VCPUS;
1891                         /*
1892                          * Maximum number of logical processors, matches
1893                          * HyperV 2016.
1894                          */
1895                         ent->ebx = 64;
1896 
1897                         break;
1898 
1899                 case HYPERV_CPUID_NESTED_FEATURES:
1900                         ent->eax = evmcs_ver;
1901 
1902                         break;
1903 
1904                 default:
1905                         break;
1906                 }
1907         }
1908 
1909         if (copy_to_user(entries, cpuid_entries,
1910                          nent * sizeof(struct kvm_cpuid_entry2)))
1911                 return -EFAULT;
1912 
1913         return 0;
1914 }

/* [<][>][^][v][top][bottom][index][help] */