Lines Matching refs:vi
212 struct virtnet_info *vi = vq->vdev->priv; in skb_xmit_done() local
218 netif_wake_subqueue(vi->dev, vq2txq(vq)); in skb_xmit_done()
240 static struct sk_buff *page_to_skb(struct virtnet_info *vi, in page_to_skb() argument
253 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); in page_to_skb()
259 hdr_len = vi->hdr_len; in page_to_skb()
260 if (vi->mergeable_rx_bufs) in page_to_skb()
279 if (vi->mergeable_rx_bufs) { in page_to_skb()
314 static struct sk_buff *receive_small(struct virtnet_info *vi, void *buf, unsigned int len) in receive_small() argument
318 len -= vi->hdr_len; in receive_small()
325 struct virtnet_info *vi, in receive_big() argument
331 struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); in receive_big()
345 struct virtnet_info *vi, in receive_mergeable() argument
352 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable()
357 struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len, in receive_mergeable()
370 virtio16_to_cpu(vi->vdev, in receive_mergeable()
432 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, in receive_buf() argument
435 struct net_device *dev = vi->dev; in receive_buf()
436 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); in receive_buf()
440 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { in receive_buf()
443 if (vi->mergeable_rx_bufs) { in receive_buf()
447 } else if (vi->big_packets) { in receive_buf()
455 if (vi->mergeable_rx_bufs) in receive_buf()
456 skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len); in receive_buf()
457 else if (vi->big_packets) in receive_buf()
458 skb = receive_big(dev, vi, rq, buf, len); in receive_buf()
460 skb = receive_small(vi, buf, len); in receive_buf()
475 virtio16_to_cpu(vi->vdev, hdr->hdr.csum_start), in receive_buf()
476 virtio16_to_cpu(vi->vdev, hdr->hdr.csum_offset))) in receive_buf()
507 skb_shinfo(skb)->gso_size = virtio16_to_cpu(vi->vdev, in receive_buf()
529 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_small() argument
536 skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); in add_recvbuf_small()
544 sg_set_buf(rq->sg, hdr, vi->hdr_len); in add_recvbuf_small()
554 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_big() argument
587 sg_set_buf(&rq->sg[0], p, vi->hdr_len); in add_recvbuf_big()
655 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, in try_fill_recv() argument
663 if (vi->mergeable_rx_bufs) in try_fill_recv()
665 else if (vi->big_packets) in try_fill_recv()
666 err = add_recvbuf_big(vi, rq, gfp); in try_fill_recv()
668 err = add_recvbuf_small(vi, rq, gfp); in try_fill_recv()
680 struct virtnet_info *vi = rvq->vdev->priv; in skb_recv_done() local
681 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; in skb_recv_done()
708 struct virtnet_info *vi = in refill_work() local
713 for (i = 0; i < vi->curr_queue_pairs; i++) { in refill_work()
714 struct receive_queue *rq = &vi->rq[i]; in refill_work()
717 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); in refill_work()
724 schedule_delayed_work(&vi->refill, HZ/2); in refill_work()
730 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_receive() local
736 receive_buf(vi, rq, buf, len); in virtnet_receive()
741 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) in virtnet_receive()
742 schedule_delayed_work(&vi->refill, 0); in virtnet_receive()
776 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_busy_poll() local
779 if (!(vi->status & VIRTIO_NET_S_LINK_UP)) in virtnet_busy_poll()
809 struct virtnet_info *vi = netdev_priv(dev); in virtnet_open() local
812 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_open()
813 if (i < vi->curr_queue_pairs) in virtnet_open()
815 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_open()
816 schedule_delayed_work(&vi->refill, 0); in virtnet_open()
817 virtnet_napi_enable(&vi->rq[i]); in virtnet_open()
827 struct virtnet_info *vi = sq->vq->vdev->priv; in free_old_xmit_skbs() local
828 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); in free_old_xmit_skbs()
846 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb() local
848 unsigned hdr_len = vi->hdr_len; in xmit_skb()
851 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); in xmit_skb()
853 can_push = vi->any_header_sg && in xmit_skb()
865 hdr->hdr.csum_start = cpu_to_virtio16(vi->vdev, in xmit_skb()
867 hdr->hdr.csum_offset = cpu_to_virtio16(vi->vdev, in xmit_skb()
875 hdr->hdr.hdr_len = cpu_to_virtio16(vi->vdev, skb_headlen(skb)); in xmit_skb()
876 hdr->hdr.gso_size = cpu_to_virtio16(vi->vdev, in xmit_skb()
893 if (vi->mergeable_rx_bufs) in xmit_skb()
911 struct virtnet_info *vi = netdev_priv(dev); in start_xmit() local
913 struct send_queue *sq = &vi->sq[qnum]; in start_xmit()
975 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, in virtnet_send_command() argument
984 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); in virtnet_send_command()
1000 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); in virtnet_send_command()
1002 if (unlikely(!virtqueue_kick(vi->cvq))) in virtnet_send_command()
1008 while (!virtqueue_get_buf(vi->cvq, &tmp) && in virtnet_send_command()
1009 !virtqueue_is_broken(vi->cvq)) in virtnet_send_command()
1017 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_mac_address() local
1018 struct virtio_device *vdev = vi->vdev; in virtnet_set_mac_address()
1029 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_set_mac_address()
1054 struct virtnet_info *vi = netdev_priv(dev); in virtnet_stats() local
1059 struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); in virtnet_stats()
1092 struct virtnet_info *vi = netdev_priv(dev); in virtnet_netpoll() local
1095 for (i = 0; i < vi->curr_queue_pairs; i++) in virtnet_netpoll()
1096 napi_schedule(&vi->rq[i].napi); in virtnet_netpoll()
1100 static void virtnet_ack_link_announce(struct virtnet_info *vi) in virtnet_ack_link_announce() argument
1103 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, in virtnet_ack_link_announce()
1105 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); in virtnet_ack_link_announce()
1109 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) in virtnet_set_queues() argument
1113 struct net_device *dev = vi->dev; in virtnet_set_queues()
1115 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) in virtnet_set_queues()
1118 s.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); in virtnet_set_queues()
1121 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, in virtnet_set_queues()
1127 vi->curr_queue_pairs = queue_pairs; in virtnet_set_queues()
1130 schedule_delayed_work(&vi->refill, 0); in virtnet_set_queues()
1138 struct virtnet_info *vi = netdev_priv(dev); in virtnet_close() local
1142 cancel_delayed_work_sync(&vi->refill); in virtnet_close()
1144 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_close()
1145 napi_disable(&vi->rq[i].napi); in virtnet_close()
1152 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_rx_mode() local
1163 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) in virtnet_set_rx_mode()
1171 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, in virtnet_set_rx_mode()
1178 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, in virtnet_set_rx_mode()
1195 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); in virtnet_set_rx_mode()
1206 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); in virtnet_set_rx_mode()
1214 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_set_rx_mode()
1224 struct virtnet_info *vi = netdev_priv(dev); in virtnet_vlan_rx_add_vid() local
1229 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, in virtnet_vlan_rx_add_vid()
1238 struct virtnet_info *vi = netdev_priv(dev); in virtnet_vlan_rx_kill_vid() local
1243 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, in virtnet_vlan_rx_kill_vid()
1249 static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) in virtnet_clean_affinity() argument
1253 if (vi->affinity_hint_set) { in virtnet_clean_affinity()
1254 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_clean_affinity()
1255 virtqueue_set_affinity(vi->rq[i].vq, -1); in virtnet_clean_affinity()
1256 virtqueue_set_affinity(vi->sq[i].vq, -1); in virtnet_clean_affinity()
1259 vi->affinity_hint_set = false; in virtnet_clean_affinity()
1263 static void virtnet_set_affinity(struct virtnet_info *vi) in virtnet_set_affinity() argument
1272 if (vi->curr_queue_pairs == 1 || in virtnet_set_affinity()
1273 vi->max_queue_pairs != num_online_cpus()) { in virtnet_set_affinity()
1274 virtnet_clean_affinity(vi, -1); in virtnet_set_affinity()
1280 virtqueue_set_affinity(vi->rq[i].vq, cpu); in virtnet_set_affinity()
1281 virtqueue_set_affinity(vi->sq[i].vq, cpu); in virtnet_set_affinity()
1282 netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); in virtnet_set_affinity()
1286 vi->affinity_hint_set = true; in virtnet_set_affinity()
1292 struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); in virtnet_cpu_callback() local
1298 virtnet_set_affinity(vi); in virtnet_cpu_callback()
1301 virtnet_clean_affinity(vi, (long)hcpu); in virtnet_cpu_callback()
1313 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_ringparam() local
1315 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
1316 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
1325 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_drvinfo() local
1326 struct virtio_device *vdev = vi->vdev; in virtnet_get_drvinfo()
1338 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_channels() local
1348 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) in virtnet_set_channels()
1352 err = virtnet_set_queues(vi, queue_pairs); in virtnet_set_channels()
1357 virtnet_set_affinity(vi); in virtnet_set_channels()
1367 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_channels() local
1369 channels->combined_count = vi->curr_queue_pairs; in virtnet_get_channels()
1370 channels->max_combined = vi->max_queue_pairs; in virtnet_get_channels()
1418 struct virtnet_info *vi = in virtnet_config_changed_work() local
1422 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, in virtnet_config_changed_work()
1427 netdev_notify_peers(vi->dev); in virtnet_config_changed_work()
1428 virtnet_ack_link_announce(vi); in virtnet_config_changed_work()
1434 if (vi->status == v) in virtnet_config_changed_work()
1437 vi->status = v; in virtnet_config_changed_work()
1439 if (vi->status & VIRTIO_NET_S_LINK_UP) { in virtnet_config_changed_work()
1440 netif_carrier_on(vi->dev); in virtnet_config_changed_work()
1441 netif_tx_wake_all_queues(vi->dev); in virtnet_config_changed_work()
1443 netif_carrier_off(vi->dev); in virtnet_config_changed_work()
1444 netif_tx_stop_all_queues(vi->dev); in virtnet_config_changed_work()
1450 struct virtnet_info *vi = vdev->priv; in virtnet_config_changed() local
1452 schedule_work(&vi->config_work); in virtnet_config_changed()
1455 static void virtnet_free_queues(struct virtnet_info *vi) in virtnet_free_queues() argument
1459 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_free_queues()
1460 napi_hash_del(&vi->rq[i].napi); in virtnet_free_queues()
1461 netif_napi_del(&vi->rq[i].napi); in virtnet_free_queues()
1464 kfree(vi->rq); in virtnet_free_queues()
1465 kfree(vi->sq); in virtnet_free_queues()
1468 static void free_receive_bufs(struct virtnet_info *vi) in free_receive_bufs() argument
1472 for (i = 0; i < vi->max_queue_pairs; i++) { in free_receive_bufs()
1473 while (vi->rq[i].pages) in free_receive_bufs()
1474 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); in free_receive_bufs()
1478 static void free_receive_page_frags(struct virtnet_info *vi) in free_receive_page_frags() argument
1481 for (i = 0; i < vi->max_queue_pairs; i++) in free_receive_page_frags()
1482 if (vi->rq[i].alloc_frag.page) in free_receive_page_frags()
1483 put_page(vi->rq[i].alloc_frag.page); in free_receive_page_frags()
1486 static void free_unused_bufs(struct virtnet_info *vi) in free_unused_bufs() argument
1491 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
1492 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs()
1497 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
1498 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs()
1501 if (vi->mergeable_rx_bufs) { in free_unused_bufs()
1505 } else if (vi->big_packets) { in free_unused_bufs()
1506 give_pages(&vi->rq[i], buf); in free_unused_bufs()
1514 static void virtnet_del_vqs(struct virtnet_info *vi) in virtnet_del_vqs() argument
1516 struct virtio_device *vdev = vi->vdev; in virtnet_del_vqs()
1518 virtnet_clean_affinity(vi, -1); in virtnet_del_vqs()
1522 virtnet_free_queues(vi); in virtnet_del_vqs()
1525 static int virtnet_find_vqs(struct virtnet_info *vi) in virtnet_find_vqs() argument
1537 total_vqs = vi->max_queue_pairs * 2 + in virtnet_find_vqs()
1538 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); in virtnet_find_vqs()
1552 if (vi->has_cvq) { in virtnet_find_vqs()
1558 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
1561 sprintf(vi->rq[i].name, "input.%d", i); in virtnet_find_vqs()
1562 sprintf(vi->sq[i].name, "output.%d", i); in virtnet_find_vqs()
1563 names[rxq2vq(i)] = vi->rq[i].name; in virtnet_find_vqs()
1564 names[txq2vq(i)] = vi->sq[i].name; in virtnet_find_vqs()
1567 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, in virtnet_find_vqs()
1572 if (vi->has_cvq) { in virtnet_find_vqs()
1573 vi->cvq = vqs[total_vqs - 1]; in virtnet_find_vqs()
1574 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) in virtnet_find_vqs()
1575 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in virtnet_find_vqs()
1578 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
1579 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
1580 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
1599 static int virtnet_alloc_queues(struct virtnet_info *vi) in virtnet_alloc_queues() argument
1603 vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); in virtnet_alloc_queues()
1604 if (!vi->sq) in virtnet_alloc_queues()
1606 vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); in virtnet_alloc_queues()
1607 if (!vi->rq) in virtnet_alloc_queues()
1610 INIT_DELAYED_WORK(&vi->refill, refill_work); in virtnet_alloc_queues()
1611 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_alloc_queues()
1612 vi->rq[i].pages = NULL; in virtnet_alloc_queues()
1613 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, in virtnet_alloc_queues()
1615 napi_hash_add(&vi->rq[i].napi); in virtnet_alloc_queues()
1617 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); in virtnet_alloc_queues()
1618 ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT); in virtnet_alloc_queues()
1619 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); in virtnet_alloc_queues()
1625 kfree(vi->sq); in virtnet_alloc_queues()
1630 static int init_vqs(struct virtnet_info *vi) in init_vqs() argument
1635 ret = virtnet_alloc_queues(vi); in init_vqs()
1639 ret = virtnet_find_vqs(vi); in init_vqs()
1644 virtnet_set_affinity(vi); in init_vqs()
1650 virtnet_free_queues(vi); in init_vqs()
1659 struct virtnet_info *vi = netdev_priv(queue->dev); in mergeable_rx_buffer_size_show() local
1663 BUG_ON(queue_index >= vi->max_queue_pairs); in mergeable_rx_buffer_size_show()
1664 avg = &vi->rq[queue_index].mrg_avg_pkt_len; in mergeable_rx_buffer_size_show()
1720 struct virtnet_info *vi; in virtnet_probe() local
1797 vi = netdev_priv(dev); in virtnet_probe()
1798 vi->dev = dev; in virtnet_probe()
1799 vi->vdev = vdev; in virtnet_probe()
1800 vdev->priv = vi; in virtnet_probe()
1801 vi->stats = alloc_percpu(struct virtnet_stats); in virtnet_probe()
1803 if (vi->stats == NULL) in virtnet_probe()
1808 virtnet_stats = per_cpu_ptr(vi->stats, i); in virtnet_probe()
1813 INIT_WORK(&vi->config_work, virtnet_config_changed_work); in virtnet_probe()
1820 vi->big_packets = true; in virtnet_probe()
1823 vi->mergeable_rx_bufs = true; in virtnet_probe()
1827 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); in virtnet_probe()
1829 vi->hdr_len = sizeof(struct virtio_net_hdr); in virtnet_probe()
1833 vi->any_header_sg = true; in virtnet_probe()
1836 vi->has_cvq = true; in virtnet_probe()
1838 if (vi->any_header_sg) in virtnet_probe()
1839 dev->needed_headroom = vi->hdr_len; in virtnet_probe()
1842 vi->curr_queue_pairs = 1; in virtnet_probe()
1843 vi->max_queue_pairs = max_queue_pairs; in virtnet_probe()
1846 err = init_vqs(vi); in virtnet_probe()
1851 if (vi->mergeable_rx_bufs) in virtnet_probe()
1854 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
1855 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
1866 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_probe()
1867 try_fill_recv(vi, &vi->rq[i], GFP_KERNEL); in virtnet_probe()
1870 if (vi->rq[i].vq->num_free == in virtnet_probe()
1871 virtqueue_get_vring_size(vi->rq[i].vq)) { in virtnet_probe()
1872 free_unused_bufs(vi); in virtnet_probe()
1878 vi->nb.notifier_call = &virtnet_cpu_callback; in virtnet_probe()
1879 err = register_hotcpu_notifier(&vi->nb); in virtnet_probe()
1887 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_probe()
1889 schedule_work(&vi->config_work); in virtnet_probe()
1891 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_probe()
1901 vi->vdev->config->reset(vdev); in virtnet_probe()
1903 free_receive_bufs(vi); in virtnet_probe()
1906 cancel_delayed_work_sync(&vi->refill); in virtnet_probe()
1907 free_receive_page_frags(vi); in virtnet_probe()
1908 virtnet_del_vqs(vi); in virtnet_probe()
1910 free_percpu(vi->stats); in virtnet_probe()
1916 static void remove_vq_common(struct virtnet_info *vi) in remove_vq_common() argument
1918 vi->vdev->config->reset(vi->vdev); in remove_vq_common()
1921 free_unused_bufs(vi); in remove_vq_common()
1923 free_receive_bufs(vi); in remove_vq_common()
1925 free_receive_page_frags(vi); in remove_vq_common()
1927 virtnet_del_vqs(vi); in remove_vq_common()
1932 struct virtnet_info *vi = vdev->priv; in virtnet_remove() local
1934 unregister_hotcpu_notifier(&vi->nb); in virtnet_remove()
1937 flush_work(&vi->config_work); in virtnet_remove()
1939 unregister_netdev(vi->dev); in virtnet_remove()
1941 remove_vq_common(vi); in virtnet_remove()
1943 free_percpu(vi->stats); in virtnet_remove()
1944 free_netdev(vi->dev); in virtnet_remove()
1950 struct virtnet_info *vi = vdev->priv; in virtnet_freeze() local
1953 unregister_hotcpu_notifier(&vi->nb); in virtnet_freeze()
1956 flush_work(&vi->config_work); in virtnet_freeze()
1958 netif_device_detach(vi->dev); in virtnet_freeze()
1959 cancel_delayed_work_sync(&vi->refill); in virtnet_freeze()
1961 if (netif_running(vi->dev)) { in virtnet_freeze()
1962 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_freeze()
1963 napi_disable(&vi->rq[i].napi); in virtnet_freeze()
1966 remove_vq_common(vi); in virtnet_freeze()
1973 struct virtnet_info *vi = vdev->priv; in virtnet_restore() local
1976 err = init_vqs(vi); in virtnet_restore()
1982 if (netif_running(vi->dev)) { in virtnet_restore()
1983 for (i = 0; i < vi->curr_queue_pairs; i++) in virtnet_restore()
1984 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_restore()
1985 schedule_delayed_work(&vi->refill, 0); in virtnet_restore()
1987 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_restore()
1988 virtnet_napi_enable(&vi->rq[i]); in virtnet_restore()
1991 netif_device_attach(vi->dev); in virtnet_restore()
1994 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_restore()
1997 err = register_hotcpu_notifier(&vi->nb); in virtnet_restore()