Lines Matching refs:run
86 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvmppc_prepare_to_enter()
270 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio() argument
286 run->exit_reason = KVM_EXIT_MMIO; in kvmppc_emulate_mmio()
721 struct kvm_run *run) in kvmppc_complete_mmio_load() argument
725 if (run->mmio.len > sizeof(gpr)) { in kvmppc_complete_mmio_load()
726 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); in kvmppc_complete_mmio_load()
731 switch (run->mmio.len) { in kvmppc_complete_mmio_load()
732 case 8: gpr = *(u64 *)run->mmio.data; break; in kvmppc_complete_mmio_load()
733 case 4: gpr = *(u32 *)run->mmio.data; break; in kvmppc_complete_mmio_load()
734 case 2: gpr = *(u16 *)run->mmio.data; break; in kvmppc_complete_mmio_load()
735 case 1: gpr = *(u8 *)run->mmio.data; break; in kvmppc_complete_mmio_load()
738 switch (run->mmio.len) { in kvmppc_complete_mmio_load()
739 case 8: gpr = swab64(*(u64 *)run->mmio.data); break; in kvmppc_complete_mmio_load()
740 case 4: gpr = swab32(*(u32 *)run->mmio.data); break; in kvmppc_complete_mmio_load()
741 case 2: gpr = swab16(*(u16 *)run->mmio.data); break; in kvmppc_complete_mmio_load()
742 case 1: gpr = *(u8 *)run->mmio.data; break; in kvmppc_complete_mmio_load()
747 switch (run->mmio.len) { in kvmppc_complete_mmio_load()
785 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_handle_load() argument
799 if (bytes > sizeof(run->mmio.data)) { in kvmppc_handle_load()
801 run->mmio.len); in kvmppc_handle_load()
804 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in kvmppc_handle_load()
805 run->mmio.len = bytes; in kvmppc_handle_load()
806 run->mmio.is_write = 0; in kvmppc_handle_load()
816 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in kvmppc_handle_load()
817 bytes, &run->mmio.data); in kvmppc_handle_load()
822 kvmppc_complete_mmio_load(vcpu, run); in kvmppc_handle_load()
832 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_handle_loads() argument
839 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian); in kvmppc_handle_loads()
844 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_handle_store() argument
847 void *data = run->mmio.data; in kvmppc_handle_store()
858 if (bytes > sizeof(run->mmio.data)) { in kvmppc_handle_store()
860 run->mmio.len); in kvmppc_handle_store()
863 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in kvmppc_handle_store()
864 run->mmio.len = bytes; in kvmppc_handle_store()
865 run->mmio.is_write = 1; in kvmppc_handle_store()
888 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in kvmppc_handle_store()
889 bytes, &run->mmio.data); in kvmppc_handle_store()
999 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) in kvm_arch_vcpu_ioctl_run() argument
1009 kvmppc_complete_mmio_load(vcpu, run); in kvm_arch_vcpu_ioctl_run()
1012 u64 *gprs = run->osi.gprs; in kvm_arch_vcpu_ioctl_run()
1021 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); in kvm_arch_vcpu_ioctl_run()
1023 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); in kvm_arch_vcpu_ioctl_run()
1027 kvmppc_set_epr(vcpu, run->epr.epr); in kvm_arch_vcpu_ioctl_run()
1032 r = kvmppc_vcpu_run(run, vcpu); in kvm_arch_vcpu_ioctl_run()