Lines Matching refs:assigned_dev
66 *assigned_dev, int irq) in find_index_from_host_irq()
71 host_msix_entries = assigned_dev->host_msix_entries; in find_index_from_host_irq()
74 for (i = 0; i < assigned_dev->entries_nr; i++) in find_index_from_host_irq()
87 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; in kvm_assigned_dev_intx() local
90 spin_lock(&assigned_dev->intx_lock); in kvm_assigned_dev_intx()
91 if (pci_check_and_mask_intx(assigned_dev->dev)) { in kvm_assigned_dev_intx()
92 assigned_dev->host_irq_disabled = true; in kvm_assigned_dev_intx()
96 spin_unlock(&assigned_dev->intx_lock); in kvm_assigned_dev_intx()
102 kvm_assigned_dev_raise_guest_irq(struct kvm_assigned_dev_kernel *assigned_dev, in kvm_assigned_dev_raise_guest_irq() argument
105 if (unlikely(assigned_dev->irq_requested_type & in kvm_assigned_dev_raise_guest_irq()
107 spin_lock(&assigned_dev->intx_mask_lock); in kvm_assigned_dev_raise_guest_irq()
108 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) in kvm_assigned_dev_raise_guest_irq()
109 kvm_set_irq(assigned_dev->kvm, in kvm_assigned_dev_raise_guest_irq()
110 assigned_dev->irq_source_id, vector, 1, in kvm_assigned_dev_raise_guest_irq()
112 spin_unlock(&assigned_dev->intx_mask_lock); in kvm_assigned_dev_raise_guest_irq()
114 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, in kvm_assigned_dev_raise_guest_irq()
120 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; in kvm_assigned_dev_thread_intx() local
122 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) { in kvm_assigned_dev_thread_intx()
123 spin_lock_irq(&assigned_dev->intx_lock); in kvm_assigned_dev_thread_intx()
125 assigned_dev->host_irq_disabled = true; in kvm_assigned_dev_thread_intx()
126 spin_unlock_irq(&assigned_dev->intx_lock); in kvm_assigned_dev_thread_intx()
129 kvm_assigned_dev_raise_guest_irq(assigned_dev, in kvm_assigned_dev_thread_intx()
130 assigned_dev->guest_irq); in kvm_assigned_dev_thread_intx()
173 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; in kvm_assigned_dev_msi() local
174 int ret = kvm_set_irq_inatomic(assigned_dev->kvm, in kvm_assigned_dev_msi()
175 assigned_dev->irq_source_id, in kvm_assigned_dev_msi()
176 assigned_dev->guest_irq, 1); in kvm_assigned_dev_msi()
182 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; in kvm_assigned_dev_thread_msi() local
184 kvm_assigned_dev_raise_guest_irq(assigned_dev, in kvm_assigned_dev_thread_msi()
185 assigned_dev->guest_irq); in kvm_assigned_dev_thread_msi()
192 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; in kvm_assigned_dev_msix() local
193 int index = find_index_from_host_irq(assigned_dev, irq); in kvm_assigned_dev_msix()
198 vector = assigned_dev->guest_msix_entries[index].vector; in kvm_assigned_dev_msix()
199 ret = kvm_set_irq_inatomic(assigned_dev->kvm, in kvm_assigned_dev_msix()
200 assigned_dev->irq_source_id, in kvm_assigned_dev_msix()
209 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; in kvm_assigned_dev_thread_msix() local
210 int index = find_index_from_host_irq(assigned_dev, irq); in kvm_assigned_dev_thread_msix()
214 vector = assigned_dev->guest_msix_entries[index].vector; in kvm_assigned_dev_thread_msix()
215 kvm_assigned_dev_raise_guest_irq(assigned_dev, vector); in kvm_assigned_dev_thread_msix()
258 struct kvm_assigned_dev_kernel *assigned_dev) in deassign_guest_irq() argument
260 if (assigned_dev->ack_notifier.gsi != -1) in deassign_guest_irq()
262 &assigned_dev->ack_notifier); in deassign_guest_irq()
264 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, in deassign_guest_irq()
265 assigned_dev->guest_irq, 0, false); in deassign_guest_irq()
267 if (assigned_dev->irq_source_id != -1) in deassign_guest_irq()
268 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); in deassign_guest_irq()
269 assigned_dev->irq_source_id = -1; in deassign_guest_irq()
270 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK); in deassign_guest_irq()
275 struct kvm_assigned_dev_kernel *assigned_dev) in deassign_host_irq() argument
287 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { in deassign_host_irq()
289 for (i = 0; i < assigned_dev->entries_nr; i++) in deassign_host_irq()
290 disable_irq(assigned_dev->host_msix_entries[i].vector); in deassign_host_irq()
292 for (i = 0; i < assigned_dev->entries_nr; i++) in deassign_host_irq()
293 free_irq(assigned_dev->host_msix_entries[i].vector, in deassign_host_irq()
294 assigned_dev); in deassign_host_irq()
296 assigned_dev->entries_nr = 0; in deassign_host_irq()
297 kfree(assigned_dev->host_msix_entries); in deassign_host_irq()
298 kfree(assigned_dev->guest_msix_entries); in deassign_host_irq()
299 pci_disable_msix(assigned_dev->dev); in deassign_host_irq()
302 if ((assigned_dev->irq_requested_type & in deassign_host_irq()
304 (assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) { in deassign_host_irq()
305 spin_lock_irq(&assigned_dev->intx_lock); in deassign_host_irq()
306 pci_intx(assigned_dev->dev, false); in deassign_host_irq()
307 spin_unlock_irq(&assigned_dev->intx_lock); in deassign_host_irq()
308 synchronize_irq(assigned_dev->host_irq); in deassign_host_irq()
310 disable_irq(assigned_dev->host_irq); in deassign_host_irq()
312 free_irq(assigned_dev->host_irq, assigned_dev); in deassign_host_irq()
314 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI) in deassign_host_irq()
315 pci_disable_msi(assigned_dev->dev); in deassign_host_irq()
318 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK); in deassign_host_irq()
322 struct kvm_assigned_dev_kernel *assigned_dev, in kvm_deassign_irq() argument
330 if (!assigned_dev->irq_requested_type) in kvm_deassign_irq()
337 deassign_host_irq(kvm, assigned_dev); in kvm_deassign_irq()
339 deassign_guest_irq(kvm, assigned_dev); in kvm_deassign_irq()
345 struct kvm_assigned_dev_kernel *assigned_dev) in kvm_free_assigned_irq() argument
347 kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type); in kvm_free_assigned_irq()
352 *assigned_dev) in kvm_free_assigned_device()
354 kvm_free_assigned_irq(kvm, assigned_dev); in kvm_free_assigned_device()
356 pci_reset_function(assigned_dev->dev); in kvm_free_assigned_device()
357 if (pci_load_and_free_saved_state(assigned_dev->dev, in kvm_free_assigned_device()
358 &assigned_dev->pci_saved_state)) in kvm_free_assigned_device()
360 __func__, dev_name(&assigned_dev->dev->dev)); in kvm_free_assigned_device()
362 pci_restore_state(assigned_dev->dev); in kvm_free_assigned_device()
364 pci_clear_dev_assigned(assigned_dev->dev); in kvm_free_assigned_device()
366 pci_release_regions(assigned_dev->dev); in kvm_free_assigned_device()
367 pci_disable_device(assigned_dev->dev); in kvm_free_assigned_device()
368 pci_dev_put(assigned_dev->dev); in kvm_free_assigned_device()
370 list_del(&assigned_dev->list); in kvm_free_assigned_device()
371 kfree(assigned_dev); in kvm_free_assigned_device()
377 struct kvm_assigned_dev_kernel *assigned_dev; in kvm_free_all_assigned_devices() local
380 assigned_dev = list_entry(ptr, in kvm_free_all_assigned_devices()
384 kvm_free_assigned_device(kvm, assigned_dev); in kvm_free_all_assigned_devices()
706 struct kvm_assigned_pci_dev *assigned_dev) in kvm_vm_ioctl_assign_device() argument
712 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)) in kvm_vm_ioctl_assign_device()
719 assigned_dev->assigned_dev_id); in kvm_vm_ioctl_assign_device()
733 dev = pci_get_domain_bus_and_slot(assigned_dev->segnr, in kvm_vm_ioctl_assign_device()
734 assigned_dev->busnr, in kvm_vm_ioctl_assign_device()
735 assigned_dev->devfn); in kvm_vm_ioctl_assign_device()
772 assigned_dev->flags &= ~KVM_DEV_ASSIGN_PCI_2_3; in kvm_vm_ioctl_assign_device()
774 match->assigned_dev_id = assigned_dev->assigned_dev_id; in kvm_vm_ioctl_assign_device()
775 match->host_segnr = assigned_dev->segnr; in kvm_vm_ioctl_assign_device()
776 match->host_busnr = assigned_dev->busnr; in kvm_vm_ioctl_assign_device()
777 match->host_devfn = assigned_dev->devfn; in kvm_vm_ioctl_assign_device()
778 match->flags = assigned_dev->flags; in kvm_vm_ioctl_assign_device()
819 struct kvm_assigned_pci_dev *assigned_dev) in kvm_vm_ioctl_deassign_device() argument
827 assigned_dev->assigned_dev_id); in kvm_vm_ioctl_deassign_device()
926 struct kvm_assigned_pci_dev *assigned_dev) in kvm_vm_ioctl_set_pci_irq_mask() argument
934 assigned_dev->assigned_dev_id); in kvm_vm_ioctl_set_pci_irq_mask()
943 match->flags |= assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX; in kvm_vm_ioctl_set_pci_irq_mask()
946 if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) { in kvm_vm_ioctl_set_pci_irq_mask()
953 } else if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) { in kvm_vm_ioctl_set_pci_irq_mask()
982 struct kvm_assigned_pci_dev assigned_dev; in kvm_vm_ioctl_assigned_device() local
985 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) in kvm_vm_ioctl_assigned_device()
987 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev); in kvm_vm_ioctl_assigned_device()
1019 struct kvm_assigned_pci_dev assigned_dev; in kvm_vm_ioctl_assigned_device() local
1022 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) in kvm_vm_ioctl_assigned_device()
1024 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev); in kvm_vm_ioctl_assigned_device()
1050 struct kvm_assigned_pci_dev assigned_dev; in kvm_vm_ioctl_assigned_device() local
1053 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) in kvm_vm_ioctl_assigned_device()
1055 r = kvm_vm_ioctl_set_pci_irq_mask(kvm, &assigned_dev); in kvm_vm_ioctl_assigned_device()