Lines Matching refs:rq
105 struct receive_queue *rq; member
187 static void give_pages(struct receive_queue *rq, struct page *page) in give_pages() argument
193 end->private = (unsigned long)rq->pages; in give_pages()
194 rq->pages = page; in give_pages()
197 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) in get_a_page() argument
199 struct page *p = rq->pages; in get_a_page()
202 rq->pages = (struct page *)p->private; in get_a_page()
241 struct receive_queue *rq, in page_to_skb() argument
309 give_pages(rq, page); in page_to_skb()
326 struct receive_queue *rq, in receive_big() argument
331 struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); in receive_big()
340 give_pages(rq, page); in receive_big()
346 struct receive_queue *rq, in receive_mergeable() argument
357 struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len, in receive_mergeable()
366 ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); in receive_mergeable()
410 ewma_add(&rq->mrg_avg_pkt_len, head_skb->len); in receive_mergeable()
416 ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); in receive_mergeable()
432 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, in receive_buf() argument
448 give_pages(rq, buf); in receive_buf()
456 skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len); in receive_buf()
458 skb = receive_big(dev, vi, rq, buf, len); in receive_buf()
519 skb_mark_napi_id(skb, &rq->napi); in receive_buf()
529 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_small() argument
543 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); in add_recvbuf_small()
544 sg_set_buf(rq->sg, hdr, vi->hdr_len); in add_recvbuf_small()
545 skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); in add_recvbuf_small()
547 err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); in add_recvbuf_small()
554 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_big() argument
561 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); in add_recvbuf_big()
565 first = get_a_page(rq, gfp); in add_recvbuf_big()
568 give_pages(rq, list); in add_recvbuf_big()
571 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); in add_recvbuf_big()
578 first = get_a_page(rq, gfp); in add_recvbuf_big()
580 give_pages(rq, list); in add_recvbuf_big()
587 sg_set_buf(&rq->sg[0], p, vi->hdr_len); in add_recvbuf_big()
591 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); in add_recvbuf_big()
595 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, in add_recvbuf_big()
598 give_pages(rq, first); in add_recvbuf_big()
613 static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) in add_recvbuf_mergeable() argument
615 struct page_frag *alloc_frag = &rq->alloc_frag; in add_recvbuf_mergeable()
621 len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); in add_recvbuf_mergeable()
640 sg_init_one(rq->sg, buf, len); in add_recvbuf_mergeable()
641 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp); in add_recvbuf_mergeable()
655 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, in try_fill_recv() argument
664 err = add_recvbuf_mergeable(rq, gfp); in try_fill_recv()
666 err = add_recvbuf_big(vi, rq, gfp); in try_fill_recv()
668 err = add_recvbuf_small(vi, rq, gfp); in try_fill_recv()
673 } while (rq->vq->num_free); in try_fill_recv()
674 virtqueue_kick(rq->vq); in try_fill_recv()
681 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; in skb_recv_done() local
684 if (napi_schedule_prep(&rq->napi)) { in skb_recv_done()
686 __napi_schedule(&rq->napi); in skb_recv_done()
690 static void virtnet_napi_enable(struct receive_queue *rq) in virtnet_napi_enable() argument
692 napi_enable(&rq->napi); in virtnet_napi_enable()
698 if (napi_schedule_prep(&rq->napi)) { in virtnet_napi_enable()
699 virtqueue_disable_cb(rq->vq); in virtnet_napi_enable()
701 __napi_schedule(&rq->napi); in virtnet_napi_enable()
714 struct receive_queue *rq = &vi->rq[i]; in refill_work() local
716 napi_disable(&rq->napi); in refill_work()
717 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); in refill_work()
718 virtnet_napi_enable(rq); in refill_work()
728 static int virtnet_receive(struct receive_queue *rq, int budget) in virtnet_receive() argument
730 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_receive()
735 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { in virtnet_receive()
736 receive_buf(vi, rq, buf, len); in virtnet_receive()
740 if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { in virtnet_receive()
741 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) in virtnet_receive()
750 struct receive_queue *rq = in virtnet_poll() local
754 received = virtnet_receive(rq, budget); in virtnet_poll()
758 r = virtqueue_enable_cb_prepare(rq->vq); in virtnet_poll()
760 if (unlikely(virtqueue_poll(rq->vq, r)) && in virtnet_poll()
762 virtqueue_disable_cb(rq->vq); in virtnet_poll()
774 struct receive_queue *rq = in virtnet_busy_poll() local
776 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_busy_poll()
785 virtqueue_disable_cb(rq->vq); in virtnet_busy_poll()
788 received += virtnet_receive(rq, budget); in virtnet_busy_poll()
790 r = virtqueue_enable_cb_prepare(rq->vq); in virtnet_busy_poll()
792 if (unlikely(virtqueue_poll(rq->vq, r)) && in virtnet_busy_poll()
794 virtqueue_disable_cb(rq->vq); in virtnet_busy_poll()
815 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_open()
817 virtnet_napi_enable(&vi->rq[i]); in virtnet_open()
1096 napi_schedule(&vi->rq[i].napi); in virtnet_netpoll()
1145 napi_disable(&vi->rq[i].napi); in virtnet_close()
1255 virtqueue_set_affinity(vi->rq[i].vq, -1); in virtnet_clean_affinity()
1280 virtqueue_set_affinity(vi->rq[i].vq, cpu); in virtnet_set_affinity()
1315 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
1460 napi_hash_del(&vi->rq[i].napi); in virtnet_free_queues()
1461 netif_napi_del(&vi->rq[i].napi); in virtnet_free_queues()
1464 kfree(vi->rq); in virtnet_free_queues()
1473 while (vi->rq[i].pages) in free_receive_bufs()
1474 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); in free_receive_bufs()
1482 if (vi->rq[i].alloc_frag.page) in free_receive_page_frags()
1483 put_page(vi->rq[i].alloc_frag.page); in free_receive_page_frags()
1498 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs()
1506 give_pages(&vi->rq[i], buf); in free_unused_bufs()
1561 sprintf(vi->rq[i].name, "input.%d", i); in virtnet_find_vqs()
1563 names[rxq2vq(i)] = vi->rq[i].name; in virtnet_find_vqs()
1579 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
1606 vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); in virtnet_alloc_queues()
1607 if (!vi->rq) in virtnet_alloc_queues()
1612 vi->rq[i].pages = NULL; in virtnet_alloc_queues()
1613 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, in virtnet_alloc_queues()
1615 napi_hash_add(&vi->rq[i].napi); in virtnet_alloc_queues()
1617 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); in virtnet_alloc_queues()
1618 ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT); in virtnet_alloc_queues()
1664 avg = &vi->rq[queue_index].mrg_avg_pkt_len; in mergeable_rx_buffer_size_show()
1867 try_fill_recv(vi, &vi->rq[i], GFP_KERNEL); in virtnet_probe()
1870 if (vi->rq[i].vq->num_free == in virtnet_probe()
1871 virtqueue_get_vring_size(vi->rq[i].vq)) { in virtnet_probe()
1963 napi_disable(&vi->rq[i].napi); in virtnet_freeze()
1984 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_restore()
1988 virtnet_napi_enable(&vi->rq[i]); in virtnet_restore()