Searched refs:vqs (Results 1 - 29 of 29) sorted by relevance

/linux-4.1.27/drivers/vhost/
H A Dtest.c33 struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX]; member in struct:vhost_test
40 struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ]; handle_vq()
106 struct vhost_virtqueue **vqs; vhost_test_open() local
110 vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL); vhost_test_open()
111 if (!vqs) { vhost_test_open()
117 vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; vhost_test_open()
118 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; vhost_test_open()
119 vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX); vhost_test_open()
140 *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ); vhost_test_stop()
145 vhost_poll_flush(&n->vqs[index].poll); vhost_test_flush_vq()
184 if (!vhost_vq_access_ok(&n->vqs[index])) { vhost_test_run()
191 vq = n->vqs + index; vhost_test_run()
199 r = vhost_init_used(&n->vqs[index]); vhost_test_run()
252 vq = &n->vqs[VHOST_TEST_VQ]; vhost_test_set_features()
H A Dnet.c103 struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX]; member in struct:vhost_net
163 kfree(n->vqs[i].ubuf_info); vhost_net_clear_ubuf_info()
164 n->vqs[i].ubuf_info = NULL; vhost_net_clear_ubuf_info()
177 n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) * vhost_net_set_ubuf_info()
179 if (!n->vqs[i].ubuf_info) vhost_net_set_ubuf_info()
196 n->vqs[i].done_idx = 0; vhost_net_vq_reset()
197 n->vqs[i].upend_idx = 0; vhost_net_vq_reset()
198 n->vqs[i].ubufs = NULL; vhost_net_vq_reset()
199 n->vqs[i].vhost_hlen = 0; vhost_net_vq_reset()
200 n->vqs[i].sock_hlen = 0; vhost_net_vq_reset()
295 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; handle_tx()
520 struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX]; handle_rx()
680 struct vhost_virtqueue **vqs; vhost_net_open() local
689 vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL); vhost_net_open()
690 if (!vqs) { vhost_net_open()
696 vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; vhost_net_open()
697 vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; vhost_net_open()
698 n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; vhost_net_open()
699 n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; vhost_net_open()
701 n->vqs[i].ubufs = NULL; vhost_net_open()
702 n->vqs[i].ubuf_info = NULL; vhost_net_open()
703 n->vqs[i].upend_idx = 0; vhost_net_open()
704 n->vqs[i].done_idx = 0; vhost_net_open()
705 n->vqs[i].vhost_hlen = 0; vhost_net_open()
706 n->vqs[i].sock_hlen = 0; vhost_net_open()
708 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); vhost_net_open()
723 struct vhost_poll *poll = n->poll + (nvq - n->vqs); vhost_net_disable_vq()
734 struct vhost_poll *poll = n->poll + (nvq - n->vqs); vhost_net_enable_vq()
760 *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); vhost_net_stop()
761 *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); vhost_net_stop()
767 vhost_poll_flush(&n->vqs[index].vq.poll); vhost_net_flush_vq()
774 if (n->vqs[VHOST_NET_VQ_TX].ubufs) { vhost_net_flush()
775 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); vhost_net_flush()
777 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); vhost_net_flush()
779 vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); vhost_net_flush()
780 mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); vhost_net_flush()
782 atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); vhost_net_flush()
783 mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); vhost_net_flush()
807 kfree(n->dev.vqs); vhost_net_release()
894 vq = &n->vqs[index].vq; vhost_net_set_backend()
895 nvq = &n->vqs[index]; vhost_net_set_backend()
1021 mutex_lock(&n->vqs[i].vq.mutex); vhost_net_set_features()
1022 n->vqs[i].vq.acked_features = features; vhost_net_set_features()
1023 n->vqs[i].vhost_hlen = vhost_hlen; vhost_net_set_features()
1024 n->vqs[i].sock_hlen = sock_hlen; vhost_net_set_features()
1025 mutex_unlock(&n->vqs[i].vq.mutex); vhost_net_set_features()
H A Dvhost.c263 /* Helper to allocate iovec buffers for all vqs. */ vhost_dev_alloc_iovecs()
270 vq = dev->vqs[i]; vhost_dev_alloc_iovecs()
282 vhost_vq_free_iovecs(dev->vqs[i]); vhost_dev_alloc_iovecs()
291 vhost_vq_free_iovecs(dev->vqs[i]); vhost_dev_free_iovecs()
295 struct vhost_virtqueue **vqs, int nvqs) vhost_dev_init()
300 dev->vqs = vqs; vhost_dev_init()
312 vq = dev->vqs[i]; vhost_dev_init()
430 dev->vqs[i]->memory = memory; vhost_dev_reset_owner()
439 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) { vhost_dev_stop()
440 vhost_poll_stop(&dev->vqs[i]->poll); vhost_dev_stop()
441 vhost_poll_flush(&dev->vqs[i]->poll); vhost_dev_stop()
453 if (dev->vqs[i]->error_ctx) vhost_dev_cleanup()
454 eventfd_ctx_put(dev->vqs[i]->error_ctx); vhost_dev_cleanup()
455 if (dev->vqs[i]->error) vhost_dev_cleanup()
456 fput(dev->vqs[i]->error); vhost_dev_cleanup()
457 if (dev->vqs[i]->kick) vhost_dev_cleanup()
458 fput(dev->vqs[i]->kick); vhost_dev_cleanup()
459 if (dev->vqs[i]->call_ctx) vhost_dev_cleanup()
460 eventfd_ctx_put(dev->vqs[i]->call_ctx); vhost_dev_cleanup()
461 if (dev->vqs[i]->call) vhost_dev_cleanup()
462 fput(dev->vqs[i]->call); vhost_dev_cleanup()
463 vhost_vq_reset(dev, dev->vqs[i]); vhost_dev_cleanup()
535 mutex_lock(&d->vqs[i]->mutex); memory_access_ok()
536 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL); memory_access_ok()
538 if (d->vqs[i]->private_data) memory_access_ok()
539 ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, log); memory_access_ok()
542 mutex_unlock(&d->vqs[i]->mutex); memory_access_ok()
625 mutex_lock(&d->vqs[i]->mutex); vhost_set_memory()
626 d->vqs[i]->memory = newmem; vhost_set_memory()
627 mutex_unlock(&d->vqs[i]->mutex); vhost_set_memory()
652 vq = d->vqs[idx]; vhost_vring_ioctl()
868 vq = d->vqs[i]; vhost_dev_ioctl()
896 mutex_lock(&d->vqs[i]->mutex); vhost_dev_ioctl()
897 d->vqs[i]->log_ctx = d->log_ctx; vhost_dev_ioctl()
898 mutex_unlock(&d->vqs[i]->mutex); vhost_dev_ioctl()
294 vhost_dev_init(struct vhost_dev *dev, struct vhost_virtqueue **vqs, int nvqs) vhost_dev_init() argument
H A Dscsi.c207 struct vhost_scsi_virtqueue vqs[VHOST_SCSI_MAX_VQ]; member in struct:vhost_scsi
248 vq = &vs->vqs[i].vq; vhost_scsi_init_inflight()
253 idx = vs->vqs[i].inflight_idx; vhost_scsi_init_inflight()
255 old_inflight[i] = &vs->vqs[i].inflights[idx]; vhost_scsi_init_inflight()
258 vs->vqs[i].inflight_idx = idx ^ 1; vhost_scsi_init_inflight()
259 new_inflight = &vs->vqs[i].inflights[idx ^ 1]; vhost_scsi_init_inflight()
577 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; vhost_scsi_allocate_evt()
616 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; vhost_scsi_do_evt_work()
667 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; vhost_scsi_evt_work()
726 vq = q - vs->vqs; vhost_scsi_complete_cmd_work()
737 vhost_signal(&vs->dev, &vs->vqs[vq].vq); vhost_scsi_complete_cmd_work()
1315 vhost_poll_flush(&vs->vqs[index].vq.poll); vhost_scsi_flush_vq()
1371 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { vhost_scsi_set_endpoint()
1432 vq = &vs->vqs[i].vq; vhost_scsi_set_endpoint()
1473 if (!vhost_vq_access_ok(&vs->vqs[index].vq)) { vhost_scsi_clear_endpoint()
1519 vq = &vs->vqs[i].vq; vhost_scsi_clear_endpoint()
1561 vq = &vs->vqs[i].vq; vhost_scsi_set_features()
1573 struct vhost_virtqueue **vqs; vhost_scsi_open() local
1583 vqs = kmalloc(VHOST_SCSI_MAX_VQ * sizeof(*vqs), GFP_KERNEL); vhost_scsi_open()
1584 if (!vqs) vhost_scsi_open()
1593 vqs[VHOST_SCSI_VQ_CTL] = &vs->vqs[VHOST_SCSI_VQ_CTL].vq; vhost_scsi_open()
1594 vqs[VHOST_SCSI_VQ_EVT] = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; vhost_scsi_open()
1595 vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick; vhost_scsi_open()
1596 vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick; vhost_scsi_open()
1598 vqs[i] = &vs->vqs[i].vq; vhost_scsi_open()
1599 vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; vhost_scsi_open()
1601 vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); vhost_scsi_open()
1627 kfree(vs->dev.vqs); vhost_scsi_release()
1645 struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; vhost_scsi_ioctl()
1770 vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; vhost_scsi_do_plug()
H A Dvhost.h115 struct vhost_virtqueue **vqs; member in struct:vhost_dev
124 void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
/linux-4.1.27/drivers/virtio/
H A Dvirtio_pci_common.c252 vp_dev->vqs[index] = info; vp_setup_vq()
263 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; vp_del_vq()
281 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { vp_del_vqs()
282 info = vp_dev->vqs[vq->index]; vp_del_vqs()
292 kfree(vp_dev->vqs); vp_del_vqs()
293 vp_dev->vqs = NULL; vp_del_vqs()
297 struct virtqueue *vqs[], vp_try_to_find_vqs()
307 vp_dev->vqs = kmalloc(nvqs * sizeof *vp_dev->vqs, GFP_KERNEL); vp_try_to_find_vqs()
308 if (!vp_dev->vqs) vp_try_to_find_vqs()
312 /* Old style: one normal interrupt for change and all vqs. */ vp_try_to_find_vqs()
324 /* Second best: one for change, shared for all vqs. */ vp_try_to_find_vqs()
337 vqs[i] = NULL; vp_try_to_find_vqs()
345 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], msix_vec); vp_try_to_find_vqs()
346 if (IS_ERR(vqs[i])) { vp_try_to_find_vqs()
347 err = PTR_ERR(vqs[i]); vp_try_to_find_vqs()
362 vqs[i]); vp_try_to_find_vqs()
364 vp_del_vq(vqs[i]); vp_try_to_find_vqs()
377 struct virtqueue *vqs[], vp_find_vqs()
384 err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); vp_find_vqs()
388 err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, vp_find_vqs()
393 return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, vp_find_vqs()
413 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; vp_set_vq_affinity()
296 vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[], bool use_msix, bool per_vq_vectors) vp_try_to_find_vqs() argument
376 vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]) vp_find_vqs() argument
H A Dvirtio_pci_common.h87 struct virtio_pci_vq_info **vqs; member in struct:virtio_pci_device
138 struct virtqueue *vqs[],
H A Dvirtio_input.c170 struct virtqueue *vqs[2]; virtinput_init_vqs() local
176 err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names); virtinput_init_vqs()
179 vi->evt = vqs[0]; virtinput_init_vqs()
180 vi->sts = vqs[1]; virtinput_init_vqs()
H A Dvirtio_balloon.c385 struct virtqueue *vqs[3]; init_vqs() local
395 err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names); init_vqs()
399 vb->inflate_vq = vqs[0]; init_vqs()
400 vb->deflate_vq = vqs[1]; init_vqs()
403 vb->stats_vq = vqs[2]; init_vqs()
H A Dvirtio_mmio.c352 list_for_each_entry_safe(vq, n, &vdev->vqs, list) vm_del_vqs()
482 struct virtqueue *vqs[], vm_find_vqs()
496 vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]); vm_find_vqs()
497 if (IS_ERR(vqs[i])) { vm_find_vqs()
499 return PTR_ERR(vqs[i]); vm_find_vqs()
481 vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]) vm_find_vqs() argument
H A Dvirtio_pci_modern.c424 struct virtqueue *vqs[], vp_modern_find_vqs()
430 int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names); vp_modern_find_vqs()
438 list_for_each_entry(vq, &vdev->vqs, list) { vp_modern_find_vqs()
423 vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]) vp_modern_find_vqs() argument
H A Dvirtio.c321 INIT_LIST_HEAD(&dev->vqs); register_virtio_device()
H A Dvirtio_ring.c739 list_add_tail(&vq->vq.list, &vdev->vqs); vring_new_virtqueue()
824 list_for_each_entry(_vq, &dev->vqs, list) { virtio_break_device()
/linux-4.1.27/drivers/block/
H A Dvirtio_blk.c49 /* num of vqs */
51 struct virtio_blk_vq *vqs; member in struct:virtio_blk
143 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); virtblk_done()
146 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { virtblk_done()
157 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); virtblk_done()
212 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); virtio_queue_rq()
213 err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num); virtio_queue_rq()
215 virtqueue_kick(vblk->vqs[qid].vq); virtio_queue_rq()
217 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); virtio_queue_rq()
225 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) virtio_queue_rq()
227 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); virtio_queue_rq()
230 virtqueue_notify(vblk->vqs[qid].vq); virtio_queue_rq()
386 struct virtqueue **vqs; init_vq() local
396 vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL); init_vq()
397 if (!vblk->vqs) { init_vq()
410 vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL); init_vq()
411 if (!vqs) init_vq()
416 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i); init_vq()
417 names[i] = vblk->vqs[i].name; init_vq()
421 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); init_vq()
426 spin_lock_init(&vblk->vqs[i].lock); init_vq()
427 vblk->vqs[i].vq = vqs[i]; init_vq()
432 kfree(vqs); init_vq()
439 kfree(vblk->vqs); init_vq()
624 virtblk_queue_depth = vblk->vqs[0].vq->num_free; virtblk_probe()
787 kfree(vblk->vqs); virtblk_remove()
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/
H A Di40evf_virtchnl.c267 struct i40e_virtchnl_queue_select vqs; i40evf_enable_queues() local
276 vqs.vsi_id = adapter->vsi_res->vsi_id; i40evf_enable_queues()
277 vqs.tx_queues = (1 << adapter->num_active_queues) - 1; i40evf_enable_queues()
278 vqs.rx_queues = vqs.tx_queues; i40evf_enable_queues()
281 (u8 *)&vqs, sizeof(vqs)); i40evf_enable_queues()
292 struct i40e_virtchnl_queue_select vqs; i40evf_disable_queues() local
301 vqs.vsi_id = adapter->vsi_res->vsi_id; i40evf_disable_queues()
302 vqs.tx_queues = (1 << adapter->num_active_queues) - 1; i40evf_disable_queues()
303 vqs.rx_queues = vqs.tx_queues; i40evf_disable_queues()
306 (u8 *)&vqs, sizeof(vqs)); i40evf_disable_queues()
631 struct i40e_virtchnl_queue_select vqs; i40evf_request_stats() local
638 vqs.vsi_id = adapter->vsi_res->vsi_id; i40evf_request_stats()
641 (u8 *)&vqs, sizeof(vqs))) i40evf_request_stats()
/linux-4.1.27/drivers/remoteproc/
H A Dremoteproc_virtio.c129 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { __rproc_virtio_del_vqs()
141 /* power down the remote processor before deleting vqs */ rproc_virtio_del_vqs()
148 struct virtqueue *vqs[], rproc_virtio_find_vqs()
156 vqs[i] = rp_find_vq(vdev, i, callbacks[i], names[i]); rproc_virtio_find_vqs()
157 if (IS_ERR(vqs[i])) { rproc_virtio_find_vqs()
158 ret = PTR_ERR(vqs[i]); rproc_virtio_find_vqs()
163 /* now that the vqs are all set, boot the remote processor */ rproc_virtio_find_vqs()
147 rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]) rproc_virtio_find_vqs() argument
/linux-4.1.27/include/linux/
H A Dvirtio_config.h39 * vqs: on success, includes new virtqueues
41 * include a NULL entry for vqs that do not need a callback
43 * include a NULL entry for vqs unused by driver
71 struct virtqueue *vqs[],
169 * Driver must call this to use vqs in the probe function.
171 * Note: vqs are enabled automatically after probe returns.
H A Dvirtio.h92 * @vqs: the list of virtqueues for this device.
106 struct list_head vqs; member in struct:virtio_device
H A Dvringh.h66 * include a NULL entry for vqs that do not need a callback
/linux-4.1.27/drivers/s390/kvm/
H A Dkvm_virtio.c251 list_for_each_entry_safe(vq, n, &vdev->vqs, list) kvm_del_vqs()
256 struct virtqueue *vqs[], kvm_find_vqs()
268 vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]); kvm_find_vqs()
269 if (IS_ERR(vqs[i])) kvm_find_vqs()
276 return PTR_ERR(vqs[i]); kvm_find_vqs()
255 kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]) kvm_find_vqs() argument
H A Dvirtio_ccw.c247 static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, get_airq_indicator() argument
273 (unsigned long)vqs[j]); get_airq_indicator()
473 list_for_each_entry_safe(vq, n, &vdev->vqs, list) virtio_ccw_del_vqs()
571 struct virtqueue *vqs[], int nvqs, virtio_ccw_register_adapter_ind()
584 thinint_area->indicator = get_airq_indicator(vqs, nvqs, virtio_ccw_register_adapter_ind()
619 struct virtqueue *vqs[], virtio_ccw_find_vqs()
633 vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], virtio_ccw_find_vqs()
635 if (IS_ERR(vqs[i])) { virtio_ccw_find_vqs()
636 ret = PTR_ERR(vqs[i]); virtio_ccw_find_vqs()
637 vqs[i] = NULL; virtio_ccw_find_vqs()
648 ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw); virtio_ccw_find_vqs()
570 virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev, struct virtqueue *vqs[], int nvqs, struct ccw1 *ccw) virtio_ccw_register_adapter_ind() argument
618 virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]) virtio_ccw_find_vqs() argument
/linux-4.1.27/tools/virtio/
H A Dvirtio_test.c38 struct vq_info vqs[1]; member in struct:vdev_info
92 struct vq_info *info = &dev->vqs[dev->nvqs]; vq_info_add()
300 run_test(&dev, &dev.vqs[0], delayed, 0x100000); main()
/linux-4.1.27/drivers/char/
H A Dvirtio_console.c200 /* The IO vqs for this port */
524 /* Device has been unplugged. vqs are already gone. */ discard_port_data()
606 /* Device has been unplugged. vqs are already gone. */ reclaim_consumed_buffers()
1877 struct virtqueue **vqs; init_vqs() local
1884 vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); init_vqs()
1891 if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || init_vqs()
1899 * spawns a console port first and also inits the vqs for port init_vqs()
1924 err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, init_vqs()
1931 portdev->in_vqs[0] = vqs[0]; init_vqs()
1932 portdev->out_vqs[0] = vqs[1]; init_vqs()
1935 portdev->c_ivq = vqs[j]; init_vqs()
1936 portdev->c_ovq = vqs[j + 1]; init_vqs()
1940 portdev->in_vqs[i] = vqs[j]; init_vqs()
1941 portdev->out_vqs[i] = vqs[j + 1]; init_vqs()
1946 kfree(vqs); init_vqs()
1955 kfree(vqs); init_vqs()
2046 dev_err(&vdev->dev, "Error %d initializing vqs\n", err); virtcons_probe()
2125 /* Disable interrupts for vqs */ virtcons_remove()
2143 * have to just stop using the port, as the vqs are going virtcons_remove()
/linux-4.1.27/drivers/misc/mic/card/
H A Dmic_virtio.c221 list_for_each_entry_safe(vq, n, &vdev->vqs, list) mic_del_vqs()
312 struct virtqueue *vqs[], mic_find_vqs()
327 vqs[i] = mic_find_vq(vdev, i, callbacks[i], names[i]); mic_find_vqs()
328 if (IS_ERR(vqs[i])) { mic_find_vqs()
329 err = PTR_ERR(vqs[i]); mic_find_vqs()
380 list_for_each_entry(vq, &mvdev->vdev.vqs, list) mic_virtio_intr_handler()
311 mic_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], const char *names[]) mic_find_vqs() argument
/linux-4.1.27/drivers/scsi/
H A Dvirtio_scsi.c895 struct virtqueue **vqs; virtscsi_init() local
898 vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL); virtscsi_init()
902 if (!callbacks || !vqs || !names) { virtscsi_init()
917 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); virtscsi_init()
921 virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]); virtscsi_init()
922 virtscsi_init_vq(&vscsi->event_vq, vqs[1]); virtscsi_init()
925 vqs[i]); virtscsi_init()
937 kfree(vqs); virtscsi_init()
/linux-4.1.27/include/uapi/linux/
H A Dvirtio_blk.h87 /* number of vqs, only available when VIRTIO_BLK_F_MQ is set */
/linux-4.1.27/drivers/net/ethernet/intel/i40e/
H A Di40e_virtchnl_pf.c1378 struct i40e_virtchnl_queue_select *vqs = i40e_vc_enable_queues_msg() local
1381 u16 vsi_id = vqs->vsi_id; i40e_vc_enable_queues_msg()
1394 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { i40e_vc_enable_queues_msg()
1418 struct i40e_virtchnl_queue_select *vqs = i40e_vc_disable_queues_msg() local
1428 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { i40e_vc_disable_queues_msg()
1433 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) { i40e_vc_disable_queues_msg()
1457 struct i40e_virtchnl_queue_select *vqs = i40e_vc_get_stats_msg() local
1471 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) { i40e_vc_get_stats_msg()
/linux-4.1.27/drivers/net/
H A Dvirtio_net.c1528 struct virtqueue **vqs; virtnet_find_vqs() local
1541 vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); virtnet_find_vqs()
1542 if (!vqs) virtnet_find_vqs()
1567 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, virtnet_find_vqs()
1573 vi->cvq = vqs[total_vqs - 1]; virtnet_find_vqs()
1579 vi->rq[i].vq = vqs[rxq2vq(i)]; virtnet_find_vqs()
1580 vi->sq[i].vq = vqs[txq2vq(i)]; virtnet_find_vqs()
1585 kfree(vqs); virtnet_find_vqs()
1594 kfree(vqs); virtnet_find_vqs()
/linux-4.1.27/drivers/rpmsg/
H A Dvirtio_rpmsg_bus.c949 struct virtqueue *vqs[2]; rpmsg_probe() local
968 err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names); rpmsg_probe()
972 vrp->rvq = vqs[0]; rpmsg_probe()
973 vrp->svq = vqs[1]; rpmsg_probe()

Completed in 678 milliseconds