root/arch/mips/kvm/mips.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kvm_guest_mode_change_trace_reg
  2. kvm_guest_mode_change_trace_unreg
  3. kvm_arch_vcpu_runnable
  4. kvm_arch_vcpu_in_kernel
  5. kvm_arch_vcpu_should_kick
  6. kvm_arch_hardware_enable
  7. kvm_arch_hardware_disable
  8. kvm_arch_hardware_setup
  9. kvm_arch_check_processor_compat
  10. kvm_arch_init_vm
  11. kvm_mips_free_vcpus
  12. kvm_mips_free_gpa_pt
  13. kvm_arch_destroy_vm
  14. kvm_arch_dev_ioctl
  15. kvm_arch_create_memslot
  16. kvm_arch_flush_shadow_all
  17. kvm_arch_flush_shadow_memslot
  18. kvm_arch_prepare_memory_region
  19. kvm_arch_commit_memory_region
  20. dump_handler
  21. kvm_arch_vcpu_create
  22. kvm_arch_vcpu_free
  23. kvm_arch_vcpu_destroy
  24. kvm_arch_vcpu_ioctl_set_guest_debug
  25. kvm_arch_vcpu_ioctl_run
  26. kvm_vcpu_ioctl_interrupt
  27. kvm_arch_vcpu_ioctl_get_mpstate
  28. kvm_arch_vcpu_ioctl_set_mpstate
  29. kvm_mips_num_regs
  30. kvm_mips_copy_reg_indices
  31. kvm_mips_get_reg
  32. kvm_mips_set_reg
  33. kvm_vcpu_ioctl_enable_cap
  34. kvm_arch_vcpu_async_ioctl
  35. kvm_arch_vcpu_ioctl
  36. kvm_vm_ioctl_get_dirty_log
  37. kvm_vm_ioctl_clear_dirty_log
  38. kvm_arch_vm_ioctl
  39. kvm_arch_init
  40. kvm_arch_exit
  41. kvm_arch_vcpu_ioctl_get_sregs
  42. kvm_arch_vcpu_ioctl_set_sregs
  43. kvm_arch_vcpu_postcreate
  44. kvm_arch_vcpu_ioctl_get_fpu
  45. kvm_arch_vcpu_ioctl_set_fpu
  46. kvm_arch_vcpu_fault
  47. kvm_vm_ioctl_check_extension
  48. kvm_cpu_has_pending_timer
  49. kvm_arch_vcpu_dump_regs
  50. kvm_arch_vcpu_ioctl_set_regs
  51. kvm_arch_vcpu_ioctl_get_regs
  52. kvm_mips_comparecount_func
  53. kvm_mips_comparecount_wakeup
  54. kvm_arch_vcpu_init
  55. kvm_arch_vcpu_uninit
  56. kvm_arch_vcpu_ioctl_translate
  57. kvm_arch_vcpu_setup
  58. kvm_mips_set_c0_status
  59. kvm_mips_handle_exit
  60. kvm_own_fpu
  61. kvm_own_msa
  62. kvm_drop_fpu
  63. kvm_lose_fpu
  64. kvm_mips_csr_die_notify
  65. kvm_mips_init
  66. kvm_mips_exit

   1 /*
   2  * This file is subject to the terms and conditions of the GNU General Public
   3  * License.  See the file "COPYING" in the main directory of this archive
   4  * for more details.
   5  *
   6  * KVM/MIPS: MIPS specific KVM APIs
   7  *
   8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
   9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10  */
  11 
  12 #include <linux/bitops.h>
  13 #include <linux/errno.h>
  14 #include <linux/err.h>
  15 #include <linux/kdebug.h>
  16 #include <linux/module.h>
  17 #include <linux/uaccess.h>
  18 #include <linux/vmalloc.h>
  19 #include <linux/sched/signal.h>
  20 #include <linux/fs.h>
  21 #include <linux/memblock.h>
  22 
  23 #include <asm/fpu.h>
  24 #include <asm/page.h>
  25 #include <asm/cacheflush.h>
  26 #include <asm/mmu_context.h>
  27 #include <asm/pgalloc.h>
  28 #include <asm/pgtable.h>
  29 
  30 #include <linux/kvm_host.h>
  31 
  32 #include "interrupt.h"
  33 #include "commpage.h"
  34 
  35 #define CREATE_TRACE_POINTS
  36 #include "trace.h"
  37 
  38 #ifndef VECTORSPACING
  39 #define VECTORSPACING 0x100     /* for EI/VI mode */
  40 #endif
  41 
  42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
  43 struct kvm_stats_debugfs_item debugfs_entries[] = {
  44         { "wait",         VCPU_STAT(wait_exits),         KVM_STAT_VCPU },
  45         { "cache",        VCPU_STAT(cache_exits),        KVM_STAT_VCPU },
  46         { "signal",       VCPU_STAT(signal_exits),       KVM_STAT_VCPU },
  47         { "interrupt",    VCPU_STAT(int_exits),          KVM_STAT_VCPU },
  48         { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
  49         { "tlbmod",       VCPU_STAT(tlbmod_exits),       KVM_STAT_VCPU },
  50         { "tlbmiss_ld",   VCPU_STAT(tlbmiss_ld_exits),   KVM_STAT_VCPU },
  51         { "tlbmiss_st",   VCPU_STAT(tlbmiss_st_exits),   KVM_STAT_VCPU },
  52         { "addrerr_st",   VCPU_STAT(addrerr_st_exits),   KVM_STAT_VCPU },
  53         { "addrerr_ld",   VCPU_STAT(addrerr_ld_exits),   KVM_STAT_VCPU },
  54         { "syscall",      VCPU_STAT(syscall_exits),      KVM_STAT_VCPU },
  55         { "resvd_inst",   VCPU_STAT(resvd_inst_exits),   KVM_STAT_VCPU },
  56         { "break_inst",   VCPU_STAT(break_inst_exits),   KVM_STAT_VCPU },
  57         { "trap_inst",    VCPU_STAT(trap_inst_exits),    KVM_STAT_VCPU },
  58         { "msa_fpe",      VCPU_STAT(msa_fpe_exits),      KVM_STAT_VCPU },
  59         { "fpe",          VCPU_STAT(fpe_exits),          KVM_STAT_VCPU },
  60         { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
  61         { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
  62 #ifdef CONFIG_KVM_MIPS_VZ
  63         { "vz_gpsi",      VCPU_STAT(vz_gpsi_exits),      KVM_STAT_VCPU },
  64         { "vz_gsfc",      VCPU_STAT(vz_gsfc_exits),      KVM_STAT_VCPU },
  65         { "vz_hc",        VCPU_STAT(vz_hc_exits),        KVM_STAT_VCPU },
  66         { "vz_grr",       VCPU_STAT(vz_grr_exits),       KVM_STAT_VCPU },
  67         { "vz_gva",       VCPU_STAT(vz_gva_exits),       KVM_STAT_VCPU },
  68         { "vz_ghfc",      VCPU_STAT(vz_ghfc_exits),      KVM_STAT_VCPU },
  69         { "vz_gpa",       VCPU_STAT(vz_gpa_exits),       KVM_STAT_VCPU },
  70         { "vz_resvd",     VCPU_STAT(vz_resvd_exits),     KVM_STAT_VCPU },
  71 #endif
  72         { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
  73         { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
  74         { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
  75         { "halt_wakeup",  VCPU_STAT(halt_wakeup),        KVM_STAT_VCPU },
  76         {NULL}
  77 };
  78 
  79 bool kvm_trace_guest_mode_change;
  80 
  81 int kvm_guest_mode_change_trace_reg(void)
  82 {
  83         kvm_trace_guest_mode_change = 1;
  84         return 0;
  85 }
  86 
  87 void kvm_guest_mode_change_trace_unreg(void)
  88 {
  89         kvm_trace_guest_mode_change = 0;
  90 }
  91 
  92 /*
  93  * XXXKYMA: We are simulatoring a processor that has the WII bit set in
  94  * Config7, so we are "runnable" if interrupts are pending
  95  */
  96 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  97 {
  98         return !!(vcpu->arch.pending_exceptions);
  99 }
 100 
 101 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 102 {
 103         return false;
 104 }
 105 
 106 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 107 {
 108         return 1;
 109 }
 110 
 111 int kvm_arch_hardware_enable(void)
 112 {
 113         return kvm_mips_callbacks->hardware_enable();
 114 }
 115 
 116 void kvm_arch_hardware_disable(void)
 117 {
 118         kvm_mips_callbacks->hardware_disable();
 119 }
 120 
 121 int kvm_arch_hardware_setup(void)
 122 {
 123         return 0;
 124 }
 125 
 126 int kvm_arch_check_processor_compat(void)
 127 {
 128         return 0;
 129 }
 130 
 131 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 132 {
 133         switch (type) {
 134 #ifdef CONFIG_KVM_MIPS_VZ
 135         case KVM_VM_MIPS_VZ:
 136 #else
 137         case KVM_VM_MIPS_TE:
 138 #endif
 139                 break;
 140         default:
 141                 /* Unsupported KVM type */
 142                 return -EINVAL;
 143         };
 144 
 145         /* Allocate page table to map GPA -> RPA */
 146         kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
 147         if (!kvm->arch.gpa_mm.pgd)
 148                 return -ENOMEM;
 149 
 150         return 0;
 151 }
 152 
 153 void kvm_mips_free_vcpus(struct kvm *kvm)
 154 {
 155         unsigned int i;
 156         struct kvm_vcpu *vcpu;
 157 
 158         kvm_for_each_vcpu(i, vcpu, kvm) {
 159                 kvm_arch_vcpu_free(vcpu);
 160         }
 161 
 162         mutex_lock(&kvm->lock);
 163 
 164         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
 165                 kvm->vcpus[i] = NULL;
 166 
 167         atomic_set(&kvm->online_vcpus, 0);
 168 
 169         mutex_unlock(&kvm->lock);
 170 }
 171 
 172 static void kvm_mips_free_gpa_pt(struct kvm *kvm)
 173 {
 174         /* It should always be safe to remove after flushing the whole range */
 175         WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
 176         pgd_free(NULL, kvm->arch.gpa_mm.pgd);
 177 }
 178 
 179 void kvm_arch_destroy_vm(struct kvm *kvm)
 180 {
 181         kvm_mips_free_vcpus(kvm);
 182         kvm_mips_free_gpa_pt(kvm);
 183 }
 184 
 185 long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
 186                         unsigned long arg)
 187 {
 188         return -ENOIOCTLCMD;
 189 }
 190 
 191 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
 192                             unsigned long npages)
 193 {
 194         return 0;
 195 }
 196 
 197 void kvm_arch_flush_shadow_all(struct kvm *kvm)
 198 {
 199         /* Flush whole GPA */
 200         kvm_mips_flush_gpa_pt(kvm, 0, ~0);
 201 
 202         /* Let implementation do the rest */
 203         kvm_mips_callbacks->flush_shadow_all(kvm);
 204 }
 205 
 206 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 207                                    struct kvm_memory_slot *slot)
 208 {
 209         /*
 210          * The slot has been made invalid (ready for moving or deletion), so we
 211          * need to ensure that it can no longer be accessed by any guest VCPUs.
 212          */
 213 
 214         spin_lock(&kvm->mmu_lock);
 215         /* Flush slot from GPA */
 216         kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
 217                               slot->base_gfn + slot->npages - 1);
 218         /* Let implementation do the rest */
 219         kvm_mips_callbacks->flush_shadow_memslot(kvm, slot);
 220         spin_unlock(&kvm->mmu_lock);
 221 }
 222 
 223 int kvm_arch_prepare_memory_region(struct kvm *kvm,
 224                                    struct kvm_memory_slot *memslot,
 225                                    const struct kvm_userspace_memory_region *mem,
 226                                    enum kvm_mr_change change)
 227 {
 228         return 0;
 229 }
 230 
 231 void kvm_arch_commit_memory_region(struct kvm *kvm,
 232                                    const struct kvm_userspace_memory_region *mem,
 233                                    const struct kvm_memory_slot *old,
 234                                    const struct kvm_memory_slot *new,
 235                                    enum kvm_mr_change change)
 236 {
 237         int needs_flush;
 238 
 239         kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
 240                   __func__, kvm, mem->slot, mem->guest_phys_addr,
 241                   mem->memory_size, mem->userspace_addr);
 242 
 243         /*
 244          * If dirty page logging is enabled, write protect all pages in the slot
 245          * ready for dirty logging.
 246          *
 247          * There is no need to do this in any of the following cases:
 248          * CREATE:      No dirty mappings will already exist.
 249          * MOVE/DELETE: The old mappings will already have been cleaned up by
 250          *              kvm_arch_flush_shadow_memslot()
 251          */
 252         if (change == KVM_MR_FLAGS_ONLY &&
 253             (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
 254              new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
 255                 spin_lock(&kvm->mmu_lock);
 256                 /* Write protect GPA page table entries */
 257                 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
 258                                         new->base_gfn + new->npages - 1);
 259                 /* Let implementation do the rest */
 260                 if (needs_flush)
 261                         kvm_mips_callbacks->flush_shadow_memslot(kvm, new);
 262                 spin_unlock(&kvm->mmu_lock);
 263         }
 264 }
 265 
 266 static inline void dump_handler(const char *symbol, void *start, void *end)
 267 {
 268         u32 *p;
 269 
 270         pr_debug("LEAF(%s)\n", symbol);
 271 
 272         pr_debug("\t.set push\n");
 273         pr_debug("\t.set noreorder\n");
 274 
 275         for (p = start; p < (u32 *)end; ++p)
 276                 pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
 277 
 278         pr_debug("\t.set\tpop\n");
 279 
 280         pr_debug("\tEND(%s)\n", symbol);
 281 }
 282 
 283 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 284 {
 285         int err, size;
 286         void *gebase, *p, *handler, *refill_start, *refill_end;
 287         int i;
 288 
 289         struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
 290 
 291         if (!vcpu) {
 292                 err = -ENOMEM;
 293                 goto out;
 294         }
 295 
 296         err = kvm_vcpu_init(vcpu, kvm, id);
 297 
 298         if (err)
 299                 goto out_free_cpu;
 300 
 301         kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
 302 
 303         /*
 304          * Allocate space for host mode exception handlers that handle
 305          * guest mode exits
 306          */
 307         if (cpu_has_veic || cpu_has_vint)
 308                 size = 0x200 + VECTORSPACING * 64;
 309         else
 310                 size = 0x4000;
 311 
 312         gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
 313 
 314         if (!gebase) {
 315                 err = -ENOMEM;
 316                 goto out_uninit_cpu;
 317         }
 318         kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
 319                   ALIGN(size, PAGE_SIZE), gebase);
 320 
 321         /*
 322          * Check new ebase actually fits in CP0_EBase. The lack of a write gate
 323          * limits us to the low 512MB of physical address space. If the memory
 324          * we allocate is out of range, just give up now.
 325          */
 326         if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
 327                 kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
 328                         gebase);
 329                 err = -ENOMEM;
 330                 goto out_free_gebase;
 331         }
 332 
 333         /* Save new ebase */
 334         vcpu->arch.guest_ebase = gebase;
 335 
 336         /* Build guest exception vectors dynamically in unmapped memory */
 337         handler = gebase + 0x2000;
 338 
 339         /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
 340         refill_start = gebase;
 341         if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT))
 342                 refill_start += 0x080;
 343         refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
 344 
 345         /* General Exception Entry point */
 346         kvm_mips_build_exception(gebase + 0x180, handler);
 347 
 348         /* For vectored interrupts poke the exception code @ all offsets 0-7 */
 349         for (i = 0; i < 8; i++) {
 350                 kvm_debug("L1 Vectored handler @ %p\n",
 351                           gebase + 0x200 + (i * VECTORSPACING));
 352                 kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
 353                                          handler);
 354         }
 355 
 356         /* General exit handler */
 357         p = handler;
 358         p = kvm_mips_build_exit(p);
 359 
 360         /* Guest entry routine */
 361         vcpu->arch.vcpu_run = p;
 362         p = kvm_mips_build_vcpu_run(p);
 363 
 364         /* Dump the generated code */
 365         pr_debug("#include <asm/asm.h>\n");
 366         pr_debug("#include <asm/regdef.h>\n");
 367         pr_debug("\n");
 368         dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
 369         dump_handler("kvm_tlb_refill", refill_start, refill_end);
 370         dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
 371         dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
 372 
 373         /* Invalidate the icache for these ranges */
 374         flush_icache_range((unsigned long)gebase,
 375                            (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
 376 
 377         /*
 378          * Allocate comm page for guest kernel, a TLB will be reserved for
 379          * mapping GVA @ 0xFFFF8000 to this page
 380          */
 381         vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
 382 
 383         if (!vcpu->arch.kseg0_commpage) {
 384                 err = -ENOMEM;
 385                 goto out_free_gebase;
 386         }
 387 
 388         kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
 389         kvm_mips_commpage_init(vcpu);
 390 
 391         /* Init */
 392         vcpu->arch.last_sched_cpu = -1;
 393         vcpu->arch.last_exec_cpu = -1;
 394 
 395         return vcpu;
 396 
 397 out_free_gebase:
 398         kfree(gebase);
 399 
 400 out_uninit_cpu:
 401         kvm_vcpu_uninit(vcpu);
 402 
 403 out_free_cpu:
 404         kfree(vcpu);
 405 
 406 out:
 407         return ERR_PTR(err);
 408 }
 409 
 410 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 411 {
 412         hrtimer_cancel(&vcpu->arch.comparecount_timer);
 413 
 414         kvm_vcpu_uninit(vcpu);
 415 
 416         kvm_mips_dump_stats(vcpu);
 417 
 418         kvm_mmu_free_memory_caches(vcpu);
 419         kfree(vcpu->arch.guest_ebase);
 420         kfree(vcpu->arch.kseg0_commpage);
 421         kfree(vcpu);
 422 }
 423 
 424 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 425 {
 426         kvm_arch_vcpu_free(vcpu);
 427 }
 428 
 429 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 430                                         struct kvm_guest_debug *dbg)
 431 {
 432         return -ENOIOCTLCMD;
 433 }
 434 
 435 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 436 {
 437         int r = -EINTR;
 438 
 439         vcpu_load(vcpu);
 440 
 441         kvm_sigset_activate(vcpu);
 442 
 443         if (vcpu->mmio_needed) {
 444                 if (!vcpu->mmio_is_write)
 445                         kvm_mips_complete_mmio_load(vcpu, run);
 446                 vcpu->mmio_needed = 0;
 447         }
 448 
 449         if (run->immediate_exit)
 450                 goto out;
 451 
 452         lose_fpu(1);
 453 
 454         local_irq_disable();
 455         guest_enter_irqoff();
 456         trace_kvm_enter(vcpu);
 457 
 458         /*
 459          * Make sure the read of VCPU requests in vcpu_run() callback is not
 460          * reordered ahead of the write to vcpu->mode, or we could miss a TLB
 461          * flush request while the requester sees the VCPU as outside of guest
 462          * mode and not needing an IPI.
 463          */
 464         smp_store_mb(vcpu->mode, IN_GUEST_MODE);
 465 
 466         r = kvm_mips_callbacks->vcpu_run(run, vcpu);
 467 
 468         trace_kvm_out(vcpu);
 469         guest_exit_irqoff();
 470         local_irq_enable();
 471 
 472 out:
 473         kvm_sigset_deactivate(vcpu);
 474 
 475         vcpu_put(vcpu);
 476         return r;
 477 }
 478 
 479 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
 480                              struct kvm_mips_interrupt *irq)
 481 {
 482         int intr = (int)irq->irq;
 483         struct kvm_vcpu *dvcpu = NULL;
 484 
 485         if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
 486                 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
 487                           (int)intr);
 488 
 489         if (irq->cpu == -1)
 490                 dvcpu = vcpu;
 491         else
 492                 dvcpu = vcpu->kvm->vcpus[irq->cpu];
 493 
 494         if (intr == 2 || intr == 3 || intr == 4) {
 495                 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
 496 
 497         } else if (intr == -2 || intr == -3 || intr == -4) {
 498                 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
 499         } else {
 500                 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
 501                         irq->cpu, irq->irq);
 502                 return -EINVAL;
 503         }
 504 
 505         dvcpu->arch.wait = 0;
 506 
 507         if (swq_has_sleeper(&dvcpu->wq))
 508                 swake_up_one(&dvcpu->wq);
 509 
 510         return 0;
 511 }
 512 
 513 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 514                                     struct kvm_mp_state *mp_state)
 515 {
 516         return -ENOIOCTLCMD;
 517 }
 518 
 519 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 520                                     struct kvm_mp_state *mp_state)
 521 {
 522         return -ENOIOCTLCMD;
 523 }
 524 
 525 static u64 kvm_mips_get_one_regs[] = {
 526         KVM_REG_MIPS_R0,
 527         KVM_REG_MIPS_R1,
 528         KVM_REG_MIPS_R2,
 529         KVM_REG_MIPS_R3,
 530         KVM_REG_MIPS_R4,
 531         KVM_REG_MIPS_R5,
 532         KVM_REG_MIPS_R6,
 533         KVM_REG_MIPS_R7,
 534         KVM_REG_MIPS_R8,
 535         KVM_REG_MIPS_R9,
 536         KVM_REG_MIPS_R10,
 537         KVM_REG_MIPS_R11,
 538         KVM_REG_MIPS_R12,
 539         KVM_REG_MIPS_R13,
 540         KVM_REG_MIPS_R14,
 541         KVM_REG_MIPS_R15,
 542         KVM_REG_MIPS_R16,
 543         KVM_REG_MIPS_R17,
 544         KVM_REG_MIPS_R18,
 545         KVM_REG_MIPS_R19,
 546         KVM_REG_MIPS_R20,
 547         KVM_REG_MIPS_R21,
 548         KVM_REG_MIPS_R22,
 549         KVM_REG_MIPS_R23,
 550         KVM_REG_MIPS_R24,
 551         KVM_REG_MIPS_R25,
 552         KVM_REG_MIPS_R26,
 553         KVM_REG_MIPS_R27,
 554         KVM_REG_MIPS_R28,
 555         KVM_REG_MIPS_R29,
 556         KVM_REG_MIPS_R30,
 557         KVM_REG_MIPS_R31,
 558 
 559 #ifndef CONFIG_CPU_MIPSR6
 560         KVM_REG_MIPS_HI,
 561         KVM_REG_MIPS_LO,
 562 #endif
 563         KVM_REG_MIPS_PC,
 564 };
 565 
 566 static u64 kvm_mips_get_one_regs_fpu[] = {
 567         KVM_REG_MIPS_FCR_IR,
 568         KVM_REG_MIPS_FCR_CSR,
 569 };
 570 
 571 static u64 kvm_mips_get_one_regs_msa[] = {
 572         KVM_REG_MIPS_MSA_IR,
 573         KVM_REG_MIPS_MSA_CSR,
 574 };
 575 
 576 static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
 577 {
 578         unsigned long ret;
 579 
 580         ret = ARRAY_SIZE(kvm_mips_get_one_regs);
 581         if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
 582                 ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
 583                 /* odd doubles */
 584                 if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
 585                         ret += 16;
 586         }
 587         if (kvm_mips_guest_can_have_msa(&vcpu->arch))
 588                 ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
 589         ret += kvm_mips_callbacks->num_regs(vcpu);
 590 
 591         return ret;
 592 }
 593 
 594 static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
 595 {
 596         u64 index;
 597         unsigned int i;
 598 
 599         if (copy_to_user(indices, kvm_mips_get_one_regs,
 600                          sizeof(kvm_mips_get_one_regs)))
 601                 return -EFAULT;
 602         indices += ARRAY_SIZE(kvm_mips_get_one_regs);
 603 
 604         if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
 605                 if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
 606                                  sizeof(kvm_mips_get_one_regs_fpu)))
 607                         return -EFAULT;
 608                 indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
 609 
 610                 for (i = 0; i < 32; ++i) {
 611                         index = KVM_REG_MIPS_FPR_32(i);
 612                         if (copy_to_user(indices, &index, sizeof(index)))
 613                                 return -EFAULT;
 614                         ++indices;
 615 
 616                         /* skip odd doubles if no F64 */
 617                         if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
 618                                 continue;
 619 
 620                         index = KVM_REG_MIPS_FPR_64(i);
 621                         if (copy_to_user(indices, &index, sizeof(index)))
 622                                 return -EFAULT;
 623                         ++indices;
 624                 }
 625         }
 626 
 627         if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
 628                 if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
 629                                  sizeof(kvm_mips_get_one_regs_msa)))
 630                         return -EFAULT;
 631                 indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
 632 
 633                 for (i = 0; i < 32; ++i) {
 634                         index = KVM_REG_MIPS_VEC_128(i);
 635                         if (copy_to_user(indices, &index, sizeof(index)))
 636                                 return -EFAULT;
 637                         ++indices;
 638                 }
 639         }
 640 
 641         return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
 642 }
 643 
 644 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
 645                             const struct kvm_one_reg *reg)
 646 {
 647         struct mips_coproc *cop0 = vcpu->arch.cop0;
 648         struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
 649         int ret;
 650         s64 v;
 651         s64 vs[2];
 652         unsigned int idx;
 653 
 654         switch (reg->id) {
 655         /* General purpose registers */
 656         case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
 657                 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
 658                 break;
 659 #ifndef CONFIG_CPU_MIPSR6
 660         case KVM_REG_MIPS_HI:
 661                 v = (long)vcpu->arch.hi;
 662                 break;
 663         case KVM_REG_MIPS_LO:
 664                 v = (long)vcpu->arch.lo;
 665                 break;
 666 #endif
 667         case KVM_REG_MIPS_PC:
 668                 v = (long)vcpu->arch.pc;
 669                 break;
 670 
 671         /* Floating point registers */
 672         case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
 673                 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 674                         return -EINVAL;
 675                 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
 676                 /* Odd singles in top of even double when FR=0 */
 677                 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
 678                         v = get_fpr32(&fpu->fpr[idx], 0);
 679                 else
 680                         v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
 681                 break;
 682         case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
 683                 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 684                         return -EINVAL;
 685                 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
 686                 /* Can't access odd doubles in FR=0 mode */
 687                 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
 688                         return -EINVAL;
 689                 v = get_fpr64(&fpu->fpr[idx], 0);
 690                 break;
 691         case KVM_REG_MIPS_FCR_IR:
 692                 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 693                         return -EINVAL;
 694                 v = boot_cpu_data.fpu_id;
 695                 break;
 696         case KVM_REG_MIPS_FCR_CSR:
 697                 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 698                         return -EINVAL;
 699                 v = fpu->fcr31;
 700                 break;
 701 
 702         /* MIPS SIMD Architecture (MSA) registers */
 703         case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
 704                 if (!kvm_mips_guest_has_msa(&vcpu->arch))
 705                         return -EINVAL;
 706                 /* Can't access MSA registers in FR=0 mode */
 707                 if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
 708                         return -EINVAL;
 709                 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
 710 #ifdef CONFIG_CPU_LITTLE_ENDIAN
 711                 /* least significant byte first */
 712                 vs[0] = get_fpr64(&fpu->fpr[idx], 0);
 713                 vs[1] = get_fpr64(&fpu->fpr[idx], 1);
 714 #else
 715                 /* most significant byte first */
 716                 vs[0] = get_fpr64(&fpu->fpr[idx], 1);
 717                 vs[1] = get_fpr64(&fpu->fpr[idx], 0);
 718 #endif
 719                 break;
 720         case KVM_REG_MIPS_MSA_IR:
 721                 if (!kvm_mips_guest_has_msa(&vcpu->arch))
 722                         return -EINVAL;
 723                 v = boot_cpu_data.msa_id;
 724                 break;
 725         case KVM_REG_MIPS_MSA_CSR:
 726                 if (!kvm_mips_guest_has_msa(&vcpu->arch))
 727                         return -EINVAL;
 728                 v = fpu->msacsr;
 729                 break;
 730 
 731         /* registers to be handled specially */
 732         default:
 733                 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
 734                 if (ret)
 735                         return ret;
 736                 break;
 737         }
 738         if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
 739                 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
 740 
 741                 return put_user(v, uaddr64);
 742         } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
 743                 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
 744                 u32 v32 = (u32)v;
 745 
 746                 return put_user(v32, uaddr32);
 747         } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
 748                 void __user *uaddr = (void __user *)(long)reg->addr;
 749 
 750                 return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
 751         } else {
 752                 return -EINVAL;
 753         }
 754 }
 755 
 756 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
 757                             const struct kvm_one_reg *reg)
 758 {
 759         struct mips_coproc *cop0 = vcpu->arch.cop0;
 760         struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
 761         s64 v;
 762         s64 vs[2];
 763         unsigned int idx;
 764 
 765         if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
 766                 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
 767 
 768                 if (get_user(v, uaddr64) != 0)
 769                         return -EFAULT;
 770         } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
 771                 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
 772                 s32 v32;
 773 
 774                 if (get_user(v32, uaddr32) != 0)
 775                         return -EFAULT;
 776                 v = (s64)v32;
 777         } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
 778                 void __user *uaddr = (void __user *)(long)reg->addr;
 779 
 780                 return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
 781         } else {
 782                 return -EINVAL;
 783         }
 784 
 785         switch (reg->id) {
 786         /* General purpose registers */
 787         case KVM_REG_MIPS_R0:
 788                 /* Silently ignore requests to set $0 */
 789                 break;
 790         case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
 791                 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
 792                 break;
 793 #ifndef CONFIG_CPU_MIPSR6
 794         case KVM_REG_MIPS_HI:
 795                 vcpu->arch.hi = v;
 796                 break;
 797         case KVM_REG_MIPS_LO:
 798                 vcpu->arch.lo = v;
 799                 break;
 800 #endif
 801         case KVM_REG_MIPS_PC:
 802                 vcpu->arch.pc = v;
 803                 break;
 804 
 805         /* Floating point registers */
 806         case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
 807                 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 808                         return -EINVAL;
 809                 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
 810                 /* Odd singles in top of even double when FR=0 */
 811                 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
 812                         set_fpr32(&fpu->fpr[idx], 0, v);
 813                 else
 814                         set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
 815                 break;
 816         case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
 817                 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 818                         return -EINVAL;
 819                 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
 820                 /* Can't access odd doubles in FR=0 mode */
 821                 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
 822                         return -EINVAL;
 823                 set_fpr64(&fpu->fpr[idx], 0, v);
 824                 break;
 825         case KVM_REG_MIPS_FCR_IR:
 826                 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 827                         return -EINVAL;
 828                 /* Read-only */
 829                 break;
 830         case KVM_REG_MIPS_FCR_CSR:
 831                 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
 832                         return -EINVAL;
 833                 fpu->fcr31 = v;
 834                 break;
 835 
 836         /* MIPS SIMD Architecture (MSA) registers */
 837         case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
 838                 if (!kvm_mips_guest_has_msa(&vcpu->arch))
 839                         return -EINVAL;
 840                 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
 841 #ifdef CONFIG_CPU_LITTLE_ENDIAN
 842                 /* least significant byte first */
 843                 set_fpr64(&fpu->fpr[idx], 0, vs[0]);
 844                 set_fpr64(&fpu->fpr[idx], 1, vs[1]);
 845 #else
 846                 /* most significant byte first */
 847                 set_fpr64(&fpu->fpr[idx], 1, vs[0]);
 848                 set_fpr64(&fpu->fpr[idx], 0, vs[1]);
 849 #endif
 850                 break;
 851         case KVM_REG_MIPS_MSA_IR:
 852                 if (!kvm_mips_guest_has_msa(&vcpu->arch))
 853                         return -EINVAL;
 854                 /* Read-only */
 855                 break;
 856         case KVM_REG_MIPS_MSA_CSR:
 857                 if (!kvm_mips_guest_has_msa(&vcpu->arch))
 858                         return -EINVAL;
 859                 fpu->msacsr = v;
 860                 break;
 861 
 862         /* registers to be handled specially */
 863         default:
 864                 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
 865         }
 866         return 0;
 867 }
 868 
 869 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
 870                                      struct kvm_enable_cap *cap)
 871 {
 872         int r = 0;
 873 
 874         if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
 875                 return -EINVAL;
 876         if (cap->flags)
 877                 return -EINVAL;
 878         if (cap->args[0])
 879                 return -EINVAL;
 880 
 881         switch (cap->cap) {
 882         case KVM_CAP_MIPS_FPU:
 883                 vcpu->arch.fpu_enabled = true;
 884                 break;
 885         case KVM_CAP_MIPS_MSA:
 886                 vcpu->arch.msa_enabled = true;
 887                 break;
 888         default:
 889                 r = -EINVAL;
 890                 break;
 891         }
 892 
 893         return r;
 894 }
 895 
 896 long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
 897                                unsigned long arg)
 898 {
 899         struct kvm_vcpu *vcpu = filp->private_data;
 900         void __user *argp = (void __user *)arg;
 901 
 902         if (ioctl == KVM_INTERRUPT) {
 903                 struct kvm_mips_interrupt irq;
 904 
 905                 if (copy_from_user(&irq, argp, sizeof(irq)))
 906                         return -EFAULT;
 907                 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
 908                           irq.irq);
 909 
 910                 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
 911         }
 912 
 913         return -ENOIOCTLCMD;
 914 }
 915 
 916 long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
 917                          unsigned long arg)
 918 {
 919         struct kvm_vcpu *vcpu = filp->private_data;
 920         void __user *argp = (void __user *)arg;
 921         long r;
 922 
 923         vcpu_load(vcpu);
 924 
 925         switch (ioctl) {
 926         case KVM_SET_ONE_REG:
 927         case KVM_GET_ONE_REG: {
 928                 struct kvm_one_reg reg;
 929 
 930                 r = -EFAULT;
 931                 if (copy_from_user(&reg, argp, sizeof(reg)))
 932                         break;
 933                 if (ioctl == KVM_SET_ONE_REG)
 934                         r = kvm_mips_set_reg(vcpu, &reg);
 935                 else
 936                         r = kvm_mips_get_reg(vcpu, &reg);
 937                 break;
 938         }
 939         case KVM_GET_REG_LIST: {
 940                 struct kvm_reg_list __user *user_list = argp;
 941                 struct kvm_reg_list reg_list;
 942                 unsigned n;
 943 
 944                 r = -EFAULT;
 945                 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
 946                         break;
 947                 n = reg_list.n;
 948                 reg_list.n = kvm_mips_num_regs(vcpu);
 949                 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
 950                         break;
 951                 r = -E2BIG;
 952                 if (n < reg_list.n)
 953                         break;
 954                 r = kvm_mips_copy_reg_indices(vcpu, user_list->reg);
 955                 break;
 956         }
 957         case KVM_ENABLE_CAP: {
 958                 struct kvm_enable_cap cap;
 959 
 960                 r = -EFAULT;
 961                 if (copy_from_user(&cap, argp, sizeof(cap)))
 962                         break;
 963                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
 964                 break;
 965         }
 966         default:
 967                 r = -ENOIOCTLCMD;
 968         }
 969 
 970         vcpu_put(vcpu);
 971         return r;
 972 }
 973 
 974 /**
 975  * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
 976  * @kvm: kvm instance
 977  * @log: slot id and address to which we copy the log
 978  *
 979  * Steps 1-4 below provide general overview of dirty page logging. See
 980  * kvm_get_dirty_log_protect() function description for additional details.
 981  *
 982  * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
 983  * always flush the TLB (step 4) even if previous step failed  and the dirty
 984  * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
 985  * does not preclude user space subsequent dirty log read. Flushing TLB ensures
 986  * writes will be marked dirty for next log read.
 987  *
 988  *   1. Take a snapshot of the bit and clear it if needed.
 989  *   2. Write protect the corresponding page.
 990  *   3. Copy the snapshot to the userspace.
 991  *   4. Flush TLB's if needed.
 992  */
 993 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
 994 {
 995         struct kvm_memslots *slots;
 996         struct kvm_memory_slot *memslot;
 997         bool flush = false;
 998         int r;
 999 
1000         mutex_lock(&kvm->slots_lock);
1001 
1002         r = kvm_get_dirty_log_protect(kvm, log, &flush);
1003 
1004         if (flush) {
1005                 slots = kvm_memslots(kvm);
1006                 memslot = id_to_memslot(slots, log->slot);
1007 
1008                 /* Let implementation handle TLB/GVA invalidation */
1009                 kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
1010         }
1011 
1012         mutex_unlock(&kvm->slots_lock);
1013         return r;
1014 }
1015 
1016 int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
1017 {
1018         struct kvm_memslots *slots;
1019         struct kvm_memory_slot *memslot;
1020         bool flush = false;
1021         int r;
1022 
1023         mutex_lock(&kvm->slots_lock);
1024 
1025         r = kvm_clear_dirty_log_protect(kvm, log, &flush);
1026 
1027         if (flush) {
1028                 slots = kvm_memslots(kvm);
1029                 memslot = id_to_memslot(slots, log->slot);
1030 
1031                 /* Let implementation handle TLB/GVA invalidation */
1032                 kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
1033         }
1034 
1035         mutex_unlock(&kvm->slots_lock);
1036         return r;
1037 }
1038 
1039 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1040 {
1041         long r;
1042 
1043         switch (ioctl) {
1044         default:
1045                 r = -ENOIOCTLCMD;
1046         }
1047 
1048         return r;
1049 }
1050 
1051 int kvm_arch_init(void *opaque)
1052 {
1053         if (kvm_mips_callbacks) {
1054                 kvm_err("kvm: module already exists\n");
1055                 return -EEXIST;
1056         }
1057 
1058         return kvm_mips_emulation_init(&kvm_mips_callbacks);
1059 }
1060 
1061 void kvm_arch_exit(void)
1062 {
1063         kvm_mips_callbacks = NULL;
1064 }
1065 
1066 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1067                                   struct kvm_sregs *sregs)
1068 {
1069         return -ENOIOCTLCMD;
1070 }
1071 
1072 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1073                                   struct kvm_sregs *sregs)
1074 {
1075         return -ENOIOCTLCMD;
1076 }
1077 
1078 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1079 {
1080 }
1081 
1082 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1083 {
1084         return -ENOIOCTLCMD;
1085 }
1086 
1087 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1088 {
1089         return -ENOIOCTLCMD;
1090 }
1091 
1092 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1093 {
1094         return VM_FAULT_SIGBUS;
1095 }
1096 
1097 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1098 {
1099         int r;
1100 
1101         switch (ext) {
1102         case KVM_CAP_ONE_REG:
1103         case KVM_CAP_ENABLE_CAP:
1104         case KVM_CAP_READONLY_MEM:
1105         case KVM_CAP_SYNC_MMU:
1106         case KVM_CAP_IMMEDIATE_EXIT:
1107                 r = 1;
1108                 break;
1109         case KVM_CAP_NR_VCPUS:
1110                 r = num_online_cpus();
1111                 break;
1112         case KVM_CAP_MAX_VCPUS:
1113                 r = KVM_MAX_VCPUS;
1114                 break;
1115         case KVM_CAP_MAX_VCPU_ID:
1116                 r = KVM_MAX_VCPU_ID;
1117                 break;
1118         case KVM_CAP_MIPS_FPU:
1119                 /* We don't handle systems with inconsistent cpu_has_fpu */
1120                 r = !!raw_cpu_has_fpu;
1121                 break;
1122         case KVM_CAP_MIPS_MSA:
1123                 /*
1124                  * We don't support MSA vector partitioning yet:
1125                  * 1) It would require explicit support which can't be tested
1126                  *    yet due to lack of support in current hardware.
1127                  * 2) It extends the state that would need to be saved/restored
1128                  *    by e.g. QEMU for migration.
1129                  *
1130                  * When vector partitioning hardware becomes available, support
1131                  * could be added by requiring a flag when enabling
1132                  * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1133                  * to save/restore the appropriate extra state.
1134                  */
1135                 r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1136                 break;
1137         default:
1138                 r = kvm_mips_callbacks->check_extension(kvm, ext);
1139                 break;
1140         }
1141         return r;
1142 }
1143 
1144 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1145 {
1146         return kvm_mips_pending_timer(vcpu) ||
1147                 kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
1148 }
1149 
1150 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1151 {
1152         int i;
1153         struct mips_coproc *cop0;
1154 
1155         if (!vcpu)
1156                 return -1;
1157 
1158         kvm_debug("VCPU Register Dump:\n");
1159         kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1160         kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1161 
1162         for (i = 0; i < 32; i += 4) {
1163                 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1164                        vcpu->arch.gprs[i],
1165                        vcpu->arch.gprs[i + 1],
1166                        vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1167         }
1168         kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1169         kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1170 
1171         cop0 = vcpu->arch.cop0;
1172         kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
1173                   kvm_read_c0_guest_status(cop0),
1174                   kvm_read_c0_guest_cause(cop0));
1175 
1176         kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1177 
1178         return 0;
1179 }
1180 
1181 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1182 {
1183         int i;
1184 
1185         vcpu_load(vcpu);
1186 
1187         for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1188                 vcpu->arch.gprs[i] = regs->gpr[i];
1189         vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1190         vcpu->arch.hi = regs->hi;
1191         vcpu->arch.lo = regs->lo;
1192         vcpu->arch.pc = regs->pc;
1193 
1194         vcpu_put(vcpu);
1195         return 0;
1196 }
1197 
1198 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1199 {
1200         int i;
1201 
1202         vcpu_load(vcpu);
1203 
1204         for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1205                 regs->gpr[i] = vcpu->arch.gprs[i];
1206 
1207         regs->hi = vcpu->arch.hi;
1208         regs->lo = vcpu->arch.lo;
1209         regs->pc = vcpu->arch.pc;
1210 
1211         vcpu_put(vcpu);
1212         return 0;
1213 }
1214 
1215 static void kvm_mips_comparecount_func(unsigned long data)
1216 {
1217         struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1218 
1219         kvm_mips_callbacks->queue_timer_int(vcpu);
1220 
1221         vcpu->arch.wait = 0;
1222         if (swq_has_sleeper(&vcpu->wq))
1223                 swake_up_one(&vcpu->wq);
1224 }
1225 
1226 /* low level hrtimer wake routine */
1227 static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
1228 {
1229         struct kvm_vcpu *vcpu;
1230 
1231         vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
1232         kvm_mips_comparecount_func((unsigned long) vcpu);
1233         return kvm_mips_count_timeout(vcpu);
1234 }
1235 
1236 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1237 {
1238         int err;
1239 
1240         err = kvm_mips_callbacks->vcpu_init(vcpu);
1241         if (err)
1242                 return err;
1243 
1244         hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
1245                      HRTIMER_MODE_REL);
1246         vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
1247         return 0;
1248 }
1249 
1250 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1251 {
1252         kvm_mips_callbacks->vcpu_uninit(vcpu);
1253 }
1254 
1255 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1256                                   struct kvm_translation *tr)
1257 {
1258         return 0;
1259 }
1260 
1261 /* Initial guest state */
1262 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1263 {
1264         return kvm_mips_callbacks->vcpu_setup(vcpu);
1265 }
1266 
1267 static void kvm_mips_set_c0_status(void)
1268 {
1269         u32 status = read_c0_status();
1270 
1271         if (cpu_has_dsp)
1272                 status |= (ST0_MX);
1273 
1274         write_c0_status(status);
1275         ehb();
1276 }
1277 
1278 /*
1279  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1280  */
1281 int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1282 {
1283         u32 cause = vcpu->arch.host_cp0_cause;
1284         u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1285         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1286         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1287         enum emulation_result er = EMULATE_DONE;
1288         u32 inst;
1289         int ret = RESUME_GUEST;
1290 
1291         vcpu->mode = OUTSIDE_GUEST_MODE;
1292 
1293         /* re-enable HTW before enabling interrupts */
1294         if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
1295                 htw_start();
1296 
1297         /* Set a default exit reason */
1298         run->exit_reason = KVM_EXIT_UNKNOWN;
1299         run->ready_for_interrupt_injection = 1;
1300 
1301         /*
1302          * Set the appropriate status bits based on host CPU features,
1303          * before we hit the scheduler
1304          */
1305         kvm_mips_set_c0_status();
1306 
1307         local_irq_enable();
1308 
1309         kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1310                         cause, opc, run, vcpu);
1311         trace_kvm_exit(vcpu, exccode);
1312 
1313         if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1314                 /*
1315                  * Do a privilege check, if in UM most of these exit conditions
1316                  * end up causing an exception to be delivered to the Guest
1317                  * Kernel
1318                  */
1319                 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1320                 if (er == EMULATE_PRIV_FAIL) {
1321                         goto skip_emul;
1322                 } else if (er == EMULATE_FAIL) {
1323                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1324                         ret = RESUME_HOST;
1325                         goto skip_emul;
1326                 }
1327         }
1328 
1329         switch (exccode) {
1330         case EXCCODE_INT:
1331                 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
1332 
1333                 ++vcpu->stat.int_exits;
1334 
1335                 if (need_resched())
1336                         cond_resched();
1337 
1338                 ret = RESUME_GUEST;
1339                 break;
1340 
1341         case EXCCODE_CPU:
1342                 kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
1343 
1344                 ++vcpu->stat.cop_unusable_exits;
1345                 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1346                 /* XXXKYMA: Might need to return to user space */
1347                 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1348                         ret = RESUME_HOST;
1349                 break;
1350 
1351         case EXCCODE_MOD:
1352                 ++vcpu->stat.tlbmod_exits;
1353                 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1354                 break;
1355 
1356         case EXCCODE_TLBS:
1357                 kvm_debug("TLB ST fault:  cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
1358                           cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1359                           badvaddr);
1360 
1361                 ++vcpu->stat.tlbmiss_st_exits;
1362                 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1363                 break;
1364 
1365         case EXCCODE_TLBL:
1366                 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1367                           cause, opc, badvaddr);
1368 
1369                 ++vcpu->stat.tlbmiss_ld_exits;
1370                 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1371                 break;
1372 
1373         case EXCCODE_ADES:
1374                 ++vcpu->stat.addrerr_st_exits;
1375                 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1376                 break;
1377 
1378         case EXCCODE_ADEL:
1379                 ++vcpu->stat.addrerr_ld_exits;
1380                 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1381                 break;
1382 
1383         case EXCCODE_SYS:
1384                 ++vcpu->stat.syscall_exits;
1385                 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1386                 break;
1387 
1388         case EXCCODE_RI:
1389                 ++vcpu->stat.resvd_inst_exits;
1390                 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1391                 break;
1392 
1393         case EXCCODE_BP:
1394                 ++vcpu->stat.break_inst_exits;
1395                 ret = kvm_mips_callbacks->handle_break(vcpu);
1396                 break;
1397 
1398         case EXCCODE_TR:
1399                 ++vcpu->stat.trap_inst_exits;
1400                 ret = kvm_mips_callbacks->handle_trap(vcpu);
1401                 break;
1402 
1403         case EXCCODE_MSAFPE:
1404                 ++vcpu->stat.msa_fpe_exits;
1405                 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1406                 break;
1407 
1408         case EXCCODE_FPE:
1409                 ++vcpu->stat.fpe_exits;
1410                 ret = kvm_mips_callbacks->handle_fpe(vcpu);
1411                 break;
1412 
1413         case EXCCODE_MSADIS:
1414                 ++vcpu->stat.msa_disabled_exits;
1415                 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1416                 break;
1417 
1418         case EXCCODE_GE:
1419                 /* defer exit accounting to handler */
1420                 ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
1421                 break;
1422 
1423         default:
1424                 if (cause & CAUSEF_BD)
1425                         opc += 1;
1426                 inst = 0;
1427                 kvm_get_badinstr(opc, vcpu, &inst);
1428                 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#x\n",
1429                         exccode, opc, inst, badvaddr,
1430                         kvm_read_c0_guest_status(vcpu->arch.cop0));
1431                 kvm_arch_vcpu_dump_regs(vcpu);
1432                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1433                 ret = RESUME_HOST;
1434                 break;
1435 
1436         }
1437 
1438 skip_emul:
1439         local_irq_disable();
1440 
1441         if (ret == RESUME_GUEST)
1442                 kvm_vz_acquire_htimer(vcpu);
1443 
1444         if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1445                 kvm_mips_deliver_interrupts(vcpu, cause);
1446 
1447         if (!(ret & RESUME_HOST)) {
1448                 /* Only check for signals if not already exiting to userspace */
1449                 if (signal_pending(current)) {
1450                         run->exit_reason = KVM_EXIT_INTR;
1451                         ret = (-EINTR << 2) | RESUME_HOST;
1452                         ++vcpu->stat.signal_exits;
1453                         trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
1454                 }
1455         }
1456 
1457         if (ret == RESUME_GUEST) {
1458                 trace_kvm_reenter(vcpu);
1459 
1460                 /*
1461                  * Make sure the read of VCPU requests in vcpu_reenter()
1462                  * callback is not reordered ahead of the write to vcpu->mode,
1463                  * or we could miss a TLB flush request while the requester sees
1464                  * the VCPU as outside of guest mode and not needing an IPI.
1465                  */
1466                 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1467 
1468                 kvm_mips_callbacks->vcpu_reenter(run, vcpu);
1469 
1470                 /*
1471                  * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1472                  * is live), restore FCR31 / MSACSR.
1473                  *
1474                  * This should be before returning to the guest exception
1475                  * vector, as it may well cause an [MSA] FP exception if there
1476                  * are pending exception bits unmasked. (see
1477                  * kvm_mips_csr_die_notifier() for how that is handled).
1478                  */
1479                 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1480                     read_c0_status() & ST0_CU1)
1481                         __kvm_restore_fcsr(&vcpu->arch);
1482 
1483                 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1484                     read_c0_config5() & MIPS_CONF5_MSAEN)
1485                         __kvm_restore_msacsr(&vcpu->arch);
1486         }
1487 
1488         /* Disable HTW before returning to guest or host */
1489         if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
1490                 htw_stop();
1491 
1492         return ret;
1493 }
1494 
1495 /* Enable FPU for guest and restore context */
1496 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1497 {
1498         struct mips_coproc *cop0 = vcpu->arch.cop0;
1499         unsigned int sr, cfg5;
1500 
1501         preempt_disable();
1502 
1503         sr = kvm_read_c0_guest_status(cop0);
1504 
1505         /*
1506          * If MSA state is already live, it is undefined how it interacts with
1507          * FR=0 FPU state, and we don't want to hit reserved instruction
1508          * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1509          * play it safe and save it first.
1510          *
1511          * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1512          * get called when guest CU1 is set, however we can't trust the guest
1513          * not to clobber the status register directly via the commpage.
1514          */
1515         if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1516             vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1517                 kvm_lose_fpu(vcpu);
1518 
1519         /*
1520          * Enable FPU for guest
1521          * We set FR and FRE according to guest context
1522          */
1523         change_c0_status(ST0_CU1 | ST0_FR, sr);
1524         if (cpu_has_fre) {
1525                 cfg5 = kvm_read_c0_guest_config5(cop0);
1526                 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1527         }
1528         enable_fpu_hazard();
1529 
1530         /* If guest FPU state not active, restore it now */
1531         if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1532                 __kvm_restore_fpu(&vcpu->arch);
1533                 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1534                 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1535         } else {
1536                 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
1537         }
1538 
1539         preempt_enable();
1540 }
1541 
1542 #ifdef CONFIG_CPU_HAS_MSA
1543 /* Enable MSA for guest and restore context */
1544 void kvm_own_msa(struct kvm_vcpu *vcpu)
1545 {
1546         struct mips_coproc *cop0 = vcpu->arch.cop0;
1547         unsigned int sr, cfg5;
1548 
1549         preempt_disable();
1550 
1551         /*
1552          * Enable FPU if enabled in guest, since we're restoring FPU context
1553          * anyway. We set FR and FRE according to guest context.
1554          */
1555         if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1556                 sr = kvm_read_c0_guest_status(cop0);
1557 
1558                 /*
1559                  * If FR=0 FPU state is already live, it is undefined how it
1560                  * interacts with MSA state, so play it safe and save it first.
1561                  */
1562                 if (!(sr & ST0_FR) &&
1563                     (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1564                                 KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
1565                         kvm_lose_fpu(vcpu);
1566 
1567                 change_c0_status(ST0_CU1 | ST0_FR, sr);
1568                 if (sr & ST0_CU1 && cpu_has_fre) {
1569                         cfg5 = kvm_read_c0_guest_config5(cop0);
1570                         change_c0_config5(MIPS_CONF5_FRE, cfg5);
1571                 }
1572         }
1573 
1574         /* Enable MSA for guest */
1575         set_c0_config5(MIPS_CONF5_MSAEN);
1576         enable_fpu_hazard();
1577 
1578         switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1579         case KVM_MIPS_AUX_FPU:
1580                 /*
1581                  * Guest FPU state already loaded, only restore upper MSA state
1582                  */
1583                 __kvm_restore_msa_upper(&vcpu->arch);
1584                 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1585                 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
1586                 break;
1587         case 0:
1588                 /* Neither FPU or MSA already active, restore full MSA state */
1589                 __kvm_restore_msa(&vcpu->arch);
1590                 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1591                 if (kvm_mips_guest_has_fpu(&vcpu->arch))
1592                         vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1593                 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1594                               KVM_TRACE_AUX_FPU_MSA);
1595                 break;
1596         default:
1597                 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
1598                 break;
1599         }
1600 
1601         preempt_enable();
1602 }
1603 #endif
1604 
1605 /* Drop FPU & MSA without saving it */
1606 void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1607 {
1608         preempt_disable();
1609         if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1610                 disable_msa();
1611                 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
1612                 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
1613         }
1614         if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1615                 clear_c0_status(ST0_CU1 | ST0_FR);
1616                 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
1617                 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1618         }
1619         preempt_enable();
1620 }
1621 
1622 /* Save and disable FPU & MSA */
1623 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1624 {
1625         /*
1626          * With T&E, FPU & MSA get disabled in root context (hardware) when it
1627          * is disabled in guest context (software), but the register state in
1628          * the hardware may still be in use.
1629          * This is why we explicitly re-enable the hardware before saving.
1630          */
1631 
1632         preempt_disable();
1633         if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1634                 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1635                         set_c0_config5(MIPS_CONF5_MSAEN);
1636                         enable_fpu_hazard();
1637                 }
1638 
1639                 __kvm_save_msa(&vcpu->arch);
1640                 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
1641 
1642                 /* Disable MSA & FPU */
1643                 disable_msa();
1644                 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1645                         clear_c0_status(ST0_CU1 | ST0_FR);
1646                         disable_fpu_hazard();
1647                 }
1648                 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1649         } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1650                 if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
1651                         set_c0_status(ST0_CU1);
1652                         enable_fpu_hazard();
1653                 }
1654 
1655                 __kvm_save_fpu(&vcpu->arch);
1656                 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1657                 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1658 
1659                 /* Disable FPU */
1660                 clear_c0_status(ST0_CU1 | ST0_FR);
1661                 disable_fpu_hazard();
1662         }
1663         preempt_enable();
1664 }
1665 
1666 /*
1667  * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1668  * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1669  * exception if cause bits are set in the value being written.
1670  */
1671 static int kvm_mips_csr_die_notify(struct notifier_block *self,
1672                                    unsigned long cmd, void *ptr)
1673 {
1674         struct die_args *args = (struct die_args *)ptr;
1675         struct pt_regs *regs = args->regs;
1676         unsigned long pc;
1677 
1678         /* Only interested in FPE and MSAFPE */
1679         if (cmd != DIE_FP && cmd != DIE_MSAFP)
1680                 return NOTIFY_DONE;
1681 
1682         /* Return immediately if guest context isn't active */
1683         if (!(current->flags & PF_VCPU))
1684                 return NOTIFY_DONE;
1685 
1686         /* Should never get here from user mode */
1687         BUG_ON(user_mode(regs));
1688 
1689         pc = instruction_pointer(regs);
1690         switch (cmd) {
1691         case DIE_FP:
1692                 /* match 2nd instruction in __kvm_restore_fcsr */
1693                 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1694                         return NOTIFY_DONE;
1695                 break;
1696         case DIE_MSAFP:
1697                 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1698                 if (!cpu_has_msa ||
1699                     pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1700                     pc > (unsigned long)&__kvm_restore_msacsr + 8)
1701                         return NOTIFY_DONE;
1702                 break;
1703         }
1704 
1705         /* Move PC forward a little and continue executing */
1706         instruction_pointer(regs) += 4;
1707 
1708         return NOTIFY_STOP;
1709 }
1710 
1711 static struct notifier_block kvm_mips_csr_die_notifier = {
1712         .notifier_call = kvm_mips_csr_die_notify,
1713 };
1714 
1715 static int __init kvm_mips_init(void)
1716 {
1717         int ret;
1718 
1719         if (cpu_has_mmid) {
1720                 pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
1721                 return -EOPNOTSUPP;
1722         }
1723 
1724         ret = kvm_mips_entry_setup();
1725         if (ret)
1726                 return ret;
1727 
1728         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1729 
1730         if (ret)
1731                 return ret;
1732 
1733         register_die_notifier(&kvm_mips_csr_die_notifier);
1734 
1735         return 0;
1736 }
1737 
1738 static void __exit kvm_mips_exit(void)
1739 {
1740         kvm_exit();
1741 
1742         unregister_die_notifier(&kvm_mips_csr_die_notifier);
1743 }
1744 
1745 module_init(kvm_mips_init);
1746 module_exit(kvm_mips_exit);
1747 
1748 EXPORT_TRACEPOINT_SYMBOL(kvm_exit);

/* [<][>][^][v][top][bottom][index][help] */