Lines Matching refs:enic

116 static void enic_init_affinity_hint(struct enic *enic)  in enic_init_affinity_hint()  argument
118 int numa_node = dev_to_node(&enic->pdev->dev); in enic_init_affinity_hint()
121 for (i = 0; i < enic->intr_count; i++) { in enic_init_affinity_hint()
122 if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) || in enic_init_affinity_hint()
123 (enic->msix[i].affinity_mask && in enic_init_affinity_hint()
124 !cpumask_empty(enic->msix[i].affinity_mask))) in enic_init_affinity_hint()
126 if (zalloc_cpumask_var(&enic->msix[i].affinity_mask, in enic_init_affinity_hint()
129 enic->msix[i].affinity_mask); in enic_init_affinity_hint()
133 static void enic_free_affinity_hint(struct enic *enic) in enic_free_affinity_hint() argument
137 for (i = 0; i < enic->intr_count; i++) { in enic_free_affinity_hint()
138 if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i)) in enic_free_affinity_hint()
140 free_cpumask_var(enic->msix[i].affinity_mask); in enic_free_affinity_hint()
144 static void enic_set_affinity_hint(struct enic *enic) in enic_set_affinity_hint() argument
149 for (i = 0; i < enic->intr_count; i++) { in enic_set_affinity_hint()
150 if (enic_is_err_intr(enic, i) || in enic_set_affinity_hint()
151 enic_is_notify_intr(enic, i) || in enic_set_affinity_hint()
152 !enic->msix[i].affinity_mask || in enic_set_affinity_hint()
153 cpumask_empty(enic->msix[i].affinity_mask)) in enic_set_affinity_hint()
155 err = irq_set_affinity_hint(enic->msix_entry[i].vector, in enic_set_affinity_hint()
156 enic->msix[i].affinity_mask); in enic_set_affinity_hint()
158 netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n", in enic_set_affinity_hint()
162 for (i = 0; i < enic->wq_count; i++) { in enic_set_affinity_hint()
163 int wq_intr = enic_msix_wq_intr(enic, i); in enic_set_affinity_hint()
165 if (enic->msix[wq_intr].affinity_mask && in enic_set_affinity_hint()
166 !cpumask_empty(enic->msix[wq_intr].affinity_mask)) in enic_set_affinity_hint()
167 netif_set_xps_queue(enic->netdev, in enic_set_affinity_hint()
168 enic->msix[wq_intr].affinity_mask, in enic_set_affinity_hint()
173 static void enic_unset_affinity_hint(struct enic *enic) in enic_unset_affinity_hint() argument
177 for (i = 0; i < enic->intr_count; i++) in enic_unset_affinity_hint()
178 irq_set_affinity_hint(enic->msix_entry[i].vector, NULL); in enic_unset_affinity_hint()
181 int enic_is_dynamic(struct enic *enic) in enic_is_dynamic() argument
183 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; in enic_is_dynamic()
186 int enic_sriov_enabled(struct enic *enic) in enic_sriov_enabled() argument
188 return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0; in enic_sriov_enabled()
191 static int enic_is_sriov_vf(struct enic *enic) in enic_is_sriov_vf() argument
193 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF; in enic_is_sriov_vf()
196 int enic_is_valid_vf(struct enic *enic, int vf) in enic_is_valid_vf() argument
199 return vf >= 0 && vf < enic->num_vfs; in enic_is_valid_vf()
207 struct enic *enic = vnic_dev_priv(wq->vdev); in enic_free_wq_buf() local
210 pci_unmap_single(enic->pdev, buf->dma_addr, in enic_free_wq_buf()
213 pci_unmap_page(enic->pdev, buf->dma_addr, in enic_free_wq_buf()
229 struct enic *enic = vnic_dev_priv(vdev); in enic_wq_service() local
231 spin_lock(&enic->wq_lock[q_number]); in enic_wq_service()
233 vnic_wq_service(&enic->wq[q_number], cq_desc, in enic_wq_service()
237 if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && in enic_wq_service()
238 vnic_wq_desc_avail(&enic->wq[q_number]) >= in enic_wq_service()
240 netif_wake_subqueue(enic->netdev, q_number); in enic_wq_service()
242 spin_unlock(&enic->wq_lock[q_number]); in enic_wq_service()
247 static bool enic_log_q_error(struct enic *enic) in enic_log_q_error() argument
253 for (i = 0; i < enic->wq_count; i++) { in enic_log_q_error()
254 error_status = vnic_wq_error_status(&enic->wq[i]); in enic_log_q_error()
257 netdev_err(enic->netdev, "WQ[%d] error_status %d\n", in enic_log_q_error()
261 for (i = 0; i < enic->rq_count; i++) { in enic_log_q_error()
262 error_status = vnic_rq_error_status(&enic->rq[i]); in enic_log_q_error()
265 netdev_err(enic->netdev, "RQ[%d] error_status %d\n", in enic_log_q_error()
272 static void enic_msglvl_check(struct enic *enic) in enic_msglvl_check() argument
274 u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); in enic_msglvl_check()
276 if (msg_enable != enic->msg_enable) { in enic_msglvl_check()
277 netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n", in enic_msglvl_check()
278 enic->msg_enable, msg_enable); in enic_msglvl_check()
279 enic->msg_enable = msg_enable; in enic_msglvl_check()
283 static void enic_mtu_check(struct enic *enic) in enic_mtu_check() argument
285 u32 mtu = vnic_dev_mtu(enic->vdev); in enic_mtu_check()
286 struct net_device *netdev = enic->netdev; in enic_mtu_check()
288 if (mtu && mtu != enic->port_mtu) { in enic_mtu_check()
289 enic->port_mtu = mtu; in enic_mtu_check()
290 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { in enic_mtu_check()
294 schedule_work(&enic->change_mtu_work); in enic_mtu_check()
305 static void enic_link_check(struct enic *enic) in enic_link_check() argument
307 int link_status = vnic_dev_link_status(enic->vdev); in enic_link_check()
308 int carrier_ok = netif_carrier_ok(enic->netdev); in enic_link_check()
311 netdev_info(enic->netdev, "Link UP\n"); in enic_link_check()
312 netif_carrier_on(enic->netdev); in enic_link_check()
314 netdev_info(enic->netdev, "Link DOWN\n"); in enic_link_check()
315 netif_carrier_off(enic->netdev); in enic_link_check()
319 static void enic_notify_check(struct enic *enic) in enic_notify_check() argument
321 enic_msglvl_check(enic); in enic_notify_check()
322 enic_mtu_check(enic); in enic_notify_check()
323 enic_link_check(enic); in enic_notify_check()
331 struct enic *enic = netdev_priv(netdev); in enic_isr_legacy() local
337 vnic_intr_mask(&enic->intr[io_intr]); in enic_isr_legacy()
339 pba = vnic_intr_legacy_pba(enic->legacy_pba); in enic_isr_legacy()
341 vnic_intr_unmask(&enic->intr[io_intr]); in enic_isr_legacy()
346 enic_notify_check(enic); in enic_isr_legacy()
347 vnic_intr_return_all_credits(&enic->intr[notify_intr]); in enic_isr_legacy()
351 vnic_intr_return_all_credits(&enic->intr[err_intr]); in enic_isr_legacy()
352 enic_log_q_error(enic); in enic_isr_legacy()
354 schedule_work(&enic->reset); in enic_isr_legacy()
359 napi_schedule_irqoff(&enic->napi[0]); in enic_isr_legacy()
361 vnic_intr_unmask(&enic->intr[io_intr]); in enic_isr_legacy()
368 struct enic *enic = data; in enic_isr_msi() local
386 napi_schedule_irqoff(&enic->napi[0]); in enic_isr_msi()
402 struct enic *enic = data; in enic_isr_msix_err() local
403 unsigned int intr = enic_msix_err_intr(enic); in enic_isr_msix_err()
405 vnic_intr_return_all_credits(&enic->intr[intr]); in enic_isr_msix_err()
407 if (enic_log_q_error(enic)) in enic_isr_msix_err()
409 schedule_work(&enic->reset); in enic_isr_msix_err()
416 struct enic *enic = data; in enic_isr_msix_notify() local
417 unsigned int intr = enic_msix_notify_intr(enic); in enic_isr_msix_notify()
419 enic_notify_check(enic); in enic_isr_msix_notify()
420 vnic_intr_return_all_credits(&enic->intr[intr]); in enic_isr_msix_notify()
425 static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_cont() argument
435 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0, in enic_queue_wq_skb_cont()
438 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_cont()
448 static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_vlan() argument
458 dma_addr = pci_map_single(enic->pdev, skb->data, head_len, in enic_queue_wq_skb_vlan()
460 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_vlan()
472 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); in enic_queue_wq_skb_vlan()
477 static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_csum_l4() argument
489 dma_addr = pci_map_single(enic->pdev, skb->data, head_len, in enic_queue_wq_skb_csum_l4()
491 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_csum_l4()
504 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); in enic_queue_wq_skb_csum_l4()
509 static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_tso() argument
542 dma_addr = pci_map_single(enic->pdev, skb->data + offset, len, in enic_queue_wq_skb_tso()
544 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_tso()
567 dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, in enic_queue_wq_skb_tso()
570 if (unlikely(enic_dma_map_check(enic, dma_addr))) in enic_queue_wq_skb_tso()
584 static inline void enic_queue_wq_skb(struct enic *enic, in enic_queue_wq_skb() argument
597 } else if (enic->loop_enable) { in enic_queue_wq_skb()
598 vlan_tag = enic->loop_tag; in enic_queue_wq_skb()
603 err = enic_queue_wq_skb_tso(enic, wq, skb, mss, in enic_queue_wq_skb()
607 err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, in enic_queue_wq_skb()
610 err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert, in enic_queue_wq_skb()
633 struct enic *enic = netdev_priv(netdev); in enic_hard_start_xmit() local
643 txq_map = skb_get_queue_mapping(skb) % enic->wq_count; in enic_hard_start_xmit()
644 wq = &enic->wq[txq_map]; in enic_hard_start_xmit()
659 spin_lock(&enic->wq_lock[txq_map]); in enic_hard_start_xmit()
666 spin_unlock(&enic->wq_lock[txq_map]); in enic_hard_start_xmit()
670 enic_queue_wq_skb(enic, wq, skb); in enic_hard_start_xmit()
677 spin_unlock(&enic->wq_lock[txq_map]); in enic_hard_start_xmit()
686 struct enic *enic = netdev_priv(netdev); in enic_get_stats() local
690 err = enic_dev_stats_dump(enic, &stats); in enic_get_stats()
707 net_stats->rx_over_errors = enic->rq_truncated_pkts; in enic_get_stats()
708 net_stats->rx_crc_errors = enic->rq_bad_fcs; in enic_get_stats()
716 struct enic *enic = netdev_priv(netdev); in enic_mc_sync() local
718 if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) { in enic_mc_sync()
727 enic_dev_add_addr(enic, mc_addr); in enic_mc_sync()
728 enic->mc_count++; in enic_mc_sync()
735 struct enic *enic = netdev_priv(netdev); in enic_mc_unsync() local
737 enic_dev_del_addr(enic, mc_addr); in enic_mc_unsync()
738 enic->mc_count--; in enic_mc_unsync()
745 struct enic *enic = netdev_priv(netdev); in enic_uc_sync() local
747 if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) { in enic_uc_sync()
756 enic_dev_add_addr(enic, uc_addr); in enic_uc_sync()
757 enic->uc_count++; in enic_uc_sync()
764 struct enic *enic = netdev_priv(netdev); in enic_uc_unsync() local
766 enic_dev_del_addr(enic, uc_addr); in enic_uc_unsync()
767 enic->uc_count--; in enic_uc_unsync()
772 void enic_reset_addr_lists(struct enic *enic) in enic_reset_addr_lists() argument
774 struct net_device *netdev = enic->netdev; in enic_reset_addr_lists()
779 enic->mc_count = 0; in enic_reset_addr_lists()
780 enic->uc_count = 0; in enic_reset_addr_lists()
781 enic->flags = 0; in enic_reset_addr_lists()
786 struct enic *enic = netdev_priv(netdev); in enic_set_mac_addr() local
788 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) { in enic_set_mac_addr()
803 struct enic *enic = netdev_priv(netdev); in enic_set_mac_address_dynamic() local
808 if (netif_running(enic->netdev)) { in enic_set_mac_address_dynamic()
809 err = enic_dev_del_station_addr(enic); in enic_set_mac_address_dynamic()
818 if (netif_running(enic->netdev)) { in enic_set_mac_address_dynamic()
819 err = enic_dev_add_station_addr(enic); in enic_set_mac_address_dynamic()
831 struct enic *enic = netdev_priv(netdev); in enic_set_mac_address() local
834 err = enic_dev_del_station_addr(enic); in enic_set_mac_address()
842 return enic_dev_add_station_addr(enic); in enic_set_mac_address()
848 struct enic *enic = netdev_priv(netdev); in enic_set_rx_mode() local
860 if (enic->flags != flags) { in enic_set_rx_mode()
861 enic->flags = flags; in enic_set_rx_mode()
862 enic_dev_packet_filter(enic, directed, in enic_set_rx_mode()
876 struct enic *enic = netdev_priv(netdev); in enic_tx_timeout() local
877 schedule_work(&enic->tx_hang_reset); in enic_tx_timeout()
882 struct enic *enic = netdev_priv(netdev); in enic_set_vf_mac() local
886 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_set_vf_mac()
898 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, in enic_set_vf_mac()
909 struct enic *enic = netdev_priv(netdev); in enic_set_vf_port() local
914 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_set_vf_port()
921 memcpy(&prev_pp, pp, sizeof(*enic->pp)); in enic_set_vf_port()
922 memset(pp, 0, sizeof(*enic->pp)); in enic_set_vf_port()
954 ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, in enic_set_vf_port()
963 err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp); in enic_set_vf_port()
998 struct enic *enic = netdev_priv(netdev); in enic_get_vf_port() local
1003 ENIC_PP_BY_INDEX(enic, vf, pp, &err); in enic_get_vf_port()
1010 err = enic_process_get_pp_request(enic, vf, pp->request, &response); in enic_get_vf_port()
1032 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_free_rq_buf() local
1037 pci_unmap_single(enic->pdev, buf->dma_addr, in enic_free_rq_buf()
1045 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_rq_alloc_buf() local
1046 struct net_device *netdev = enic->netdev; in enic_rq_alloc_buf()
1063 dma_addr = pci_map_single(enic->pdev, skb->data, len, in enic_rq_alloc_buf()
1065 if (unlikely(enic_dma_map_check(enic, dma_addr))) { in enic_rq_alloc_buf()
1088 struct enic *enic = netdev_priv(netdev); in enic_rxcopybreak() local
1091 if (len > enic->rx_copybreak) in enic_rxcopybreak()
1096 pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len, in enic_rxcopybreak()
1108 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_rq_indicate_buf() local
1109 struct net_device *netdev = enic->netdev; in enic_rq_indicate_buf()
1111 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_rq_indicate_buf()
1140 enic->rq_bad_fcs++; in enic_rq_indicate_buf()
1142 enic->rq_truncated_pkts++; in enic_rq_indicate_buf()
1145 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, in enic_rq_indicate_buf()
1160 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, in enic_rq_indicate_buf()
1189 skb_mark_napi_id(skb, &enic->napi[rq->index]); in enic_rq_indicate_buf()
1194 napi_gro_receive(&enic->napi[q_number], skb); in enic_rq_indicate_buf()
1195 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_rq_indicate_buf()
1203 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len, in enic_rq_indicate_buf()
1213 struct enic *enic = vnic_dev_priv(vdev); in enic_rq_service() local
1215 vnic_rq_service(&enic->rq[q_number], cq_desc, in enic_rq_service()
1222 static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) in enic_set_int_moderation() argument
1224 unsigned int intr = enic_msix_rq_intr(enic, rq->index); in enic_set_int_moderation()
1225 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_set_int_moderation()
1229 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer); in enic_set_int_moderation()
1234 static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq) in enic_calc_int_moderation() argument
1236 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; in enic_calc_int_moderation()
1237 struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; in enic_calc_int_moderation()
1283 struct enic *enic = netdev_priv(netdev); in enic_poll() local
1284 unsigned int cq_rq = enic_cq_rq(enic, 0); in enic_poll()
1285 unsigned int cq_wq = enic_cq_wq(enic, 0); in enic_poll()
1292 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, in enic_poll()
1295 if (!enic_poll_lock_napi(&enic->rq[cq_rq])) { in enic_poll()
1297 vnic_intr_return_credits(&enic->intr[intr], in enic_poll()
1305 rq_work_done = vnic_cq_service(&enic->cq[cq_rq], in enic_poll()
1316 vnic_intr_return_credits(&enic->intr[intr], in enic_poll()
1321 err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); in enic_poll()
1322 enic_poll_unlock_napi(&enic->rq[cq_rq], napi); in enic_poll()
1330 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll()
1334 enic_calc_int_moderation(enic, &enic->rq[0]); in enic_poll()
1343 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll()
1344 enic_set_int_moderation(enic, &enic->rq[0]); in enic_poll()
1345 vnic_intr_unmask(&enic->intr[intr]); in enic_poll()
1352 static void enic_free_rx_cpu_rmap(struct enic *enic) in enic_free_rx_cpu_rmap() argument
1354 free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap); in enic_free_rx_cpu_rmap()
1355 enic->netdev->rx_cpu_rmap = NULL; in enic_free_rx_cpu_rmap()
1358 static void enic_set_rx_cpu_rmap(struct enic *enic) in enic_set_rx_cpu_rmap() argument
1362 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { in enic_set_rx_cpu_rmap()
1363 enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count); in enic_set_rx_cpu_rmap()
1364 if (unlikely(!enic->netdev->rx_cpu_rmap)) in enic_set_rx_cpu_rmap()
1366 for (i = 0; i < enic->rq_count; i++) { in enic_set_rx_cpu_rmap()
1367 res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap, in enic_set_rx_cpu_rmap()
1368 enic->msix_entry[i].vector); in enic_set_rx_cpu_rmap()
1370 enic_free_rx_cpu_rmap(enic); in enic_set_rx_cpu_rmap()
1379 static void enic_free_rx_cpu_rmap(struct enic *enic) in enic_free_rx_cpu_rmap() argument
1383 static void enic_set_rx_cpu_rmap(struct enic *enic) in enic_set_rx_cpu_rmap() argument
1393 struct enic *enic = netdev_priv(netdev); in enic_busy_poll() local
1394 unsigned int rq = (napi - &enic->napi[0]); in enic_busy_poll()
1395 unsigned int cq = enic_cq_rq(enic, rq); in enic_busy_poll()
1396 unsigned int intr = enic_msix_rq_intr(enic, rq); in enic_busy_poll()
1400 if (!enic_poll_lock_poll(&enic->rq[rq])) in enic_busy_poll()
1402 work_done = vnic_cq_service(&enic->cq[cq], work_to_do, in enic_busy_poll()
1406 vnic_intr_return_credits(&enic->intr[intr], in enic_busy_poll()
1408 vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); in enic_busy_poll()
1409 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_busy_poll()
1410 enic_calc_int_moderation(enic, &enic->rq[rq]); in enic_busy_poll()
1411 enic_poll_unlock_poll(&enic->rq[rq]); in enic_busy_poll()
1420 struct enic *enic = netdev_priv(netdev); in enic_poll_msix_wq() local
1421 unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count; in enic_poll_msix_wq()
1422 struct vnic_wq *wq = &enic->wq[wq_index]; in enic_poll_msix_wq()
1430 cq = enic_cq_wq(enic, wq_irq); in enic_poll_msix_wq()
1431 intr = enic_msix_wq_intr(enic, wq_irq); in enic_poll_msix_wq()
1432 wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, in enic_poll_msix_wq()
1435 vnic_intr_return_credits(&enic->intr[intr], wq_work_done, in enic_poll_msix_wq()
1440 vnic_intr_unmask(&enic->intr[intr]); in enic_poll_msix_wq()
1450 struct enic *enic = netdev_priv(netdev); in enic_poll_msix_rq() local
1451 unsigned int rq = (napi - &enic->napi[0]); in enic_poll_msix_rq()
1452 unsigned int cq = enic_cq_rq(enic, rq); in enic_poll_msix_rq()
1453 unsigned int intr = enic_msix_rq_intr(enic, rq); in enic_poll_msix_rq()
1458 if (!enic_poll_lock_napi(&enic->rq[rq])) in enic_poll_msix_rq()
1464 work_done = vnic_cq_service(&enic->cq[cq], in enic_poll_msix_rq()
1473 vnic_intr_return_credits(&enic->intr[intr], in enic_poll_msix_rq()
1478 err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); in enic_poll_msix_rq()
1486 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll_msix_rq()
1490 enic_calc_int_moderation(enic, &enic->rq[rq]); in enic_poll_msix_rq()
1492 enic_poll_unlock_napi(&enic->rq[rq], napi); in enic_poll_msix_rq()
1500 if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) in enic_poll_msix_rq()
1501 enic_set_int_moderation(enic, &enic->rq[rq]); in enic_poll_msix_rq()
1502 vnic_intr_unmask(&enic->intr[intr]); in enic_poll_msix_rq()
1510 struct enic *enic = (struct enic *)data; in enic_notify_timer() local
1512 enic_notify_check(enic); in enic_notify_timer()
1514 mod_timer(&enic->notify_timer, in enic_notify_timer()
1518 static void enic_free_intr(struct enic *enic) in enic_free_intr() argument
1520 struct net_device *netdev = enic->netdev; in enic_free_intr()
1523 enic_free_rx_cpu_rmap(enic); in enic_free_intr()
1524 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_free_intr()
1526 free_irq(enic->pdev->irq, netdev); in enic_free_intr()
1529 free_irq(enic->pdev->irq, enic); in enic_free_intr()
1532 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) in enic_free_intr()
1533 if (enic->msix[i].requested) in enic_free_intr()
1534 free_irq(enic->msix_entry[i].vector, in enic_free_intr()
1535 enic->msix[i].devid); in enic_free_intr()
1542 static int enic_request_intr(struct enic *enic) in enic_request_intr() argument
1544 struct net_device *netdev = enic->netdev; in enic_request_intr()
1548 enic_set_rx_cpu_rmap(enic); in enic_request_intr()
1549 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_request_intr()
1553 err = request_irq(enic->pdev->irq, enic_isr_legacy, in enic_request_intr()
1559 err = request_irq(enic->pdev->irq, enic_isr_msi, in enic_request_intr()
1560 0, netdev->name, enic); in enic_request_intr()
1565 for (i = 0; i < enic->rq_count; i++) { in enic_request_intr()
1566 intr = enic_msix_rq_intr(enic, i); in enic_request_intr()
1567 snprintf(enic->msix[intr].devname, in enic_request_intr()
1568 sizeof(enic->msix[intr].devname), in enic_request_intr()
1570 enic->msix[intr].isr = enic_isr_msix; in enic_request_intr()
1571 enic->msix[intr].devid = &enic->napi[i]; in enic_request_intr()
1574 for (i = 0; i < enic->wq_count; i++) { in enic_request_intr()
1575 int wq = enic_cq_wq(enic, i); in enic_request_intr()
1577 intr = enic_msix_wq_intr(enic, i); in enic_request_intr()
1578 snprintf(enic->msix[intr].devname, in enic_request_intr()
1579 sizeof(enic->msix[intr].devname), in enic_request_intr()
1581 enic->msix[intr].isr = enic_isr_msix; in enic_request_intr()
1582 enic->msix[intr].devid = &enic->napi[wq]; in enic_request_intr()
1585 intr = enic_msix_err_intr(enic); in enic_request_intr()
1586 snprintf(enic->msix[intr].devname, in enic_request_intr()
1587 sizeof(enic->msix[intr].devname), in enic_request_intr()
1589 enic->msix[intr].isr = enic_isr_msix_err; in enic_request_intr()
1590 enic->msix[intr].devid = enic; in enic_request_intr()
1592 intr = enic_msix_notify_intr(enic); in enic_request_intr()
1593 snprintf(enic->msix[intr].devname, in enic_request_intr()
1594 sizeof(enic->msix[intr].devname), in enic_request_intr()
1596 enic->msix[intr].isr = enic_isr_msix_notify; in enic_request_intr()
1597 enic->msix[intr].devid = enic; in enic_request_intr()
1599 for (i = 0; i < ARRAY_SIZE(enic->msix); i++) in enic_request_intr()
1600 enic->msix[i].requested = 0; in enic_request_intr()
1602 for (i = 0; i < enic->intr_count; i++) { in enic_request_intr()
1603 err = request_irq(enic->msix_entry[i].vector, in enic_request_intr()
1604 enic->msix[i].isr, 0, in enic_request_intr()
1605 enic->msix[i].devname, in enic_request_intr()
1606 enic->msix[i].devid); in enic_request_intr()
1608 enic_free_intr(enic); in enic_request_intr()
1611 enic->msix[i].requested = 1; in enic_request_intr()
1623 static void enic_synchronize_irqs(struct enic *enic) in enic_synchronize_irqs() argument
1627 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_synchronize_irqs()
1630 synchronize_irq(enic->pdev->irq); in enic_synchronize_irqs()
1633 for (i = 0; i < enic->intr_count; i++) in enic_synchronize_irqs()
1634 synchronize_irq(enic->msix_entry[i].vector); in enic_synchronize_irqs()
1641 static void enic_set_rx_coal_setting(struct enic *enic) in enic_set_rx_coal_setting() argument
1645 struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting; in enic_set_rx_coal_setting()
1651 speed = vnic_dev_port_speed(enic->vdev); in enic_set_rx_coal_setting()
1664 for (index = 0; index < enic->rq_count; index++) in enic_set_rx_coal_setting()
1665 enic->cq[index].cur_rx_coal_timeval = in enic_set_rx_coal_setting()
1666 enic->config.intr_timer_usec; in enic_set_rx_coal_setting()
1671 static int enic_dev_notify_set(struct enic *enic) in enic_dev_notify_set() argument
1675 spin_lock_bh(&enic->devcmd_lock); in enic_dev_notify_set()
1676 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_dev_notify_set()
1678 err = vnic_dev_notify_set(enic->vdev, in enic_dev_notify_set()
1682 err = vnic_dev_notify_set(enic->vdev, in enic_dev_notify_set()
1683 enic_msix_notify_intr(enic)); in enic_dev_notify_set()
1686 err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); in enic_dev_notify_set()
1689 spin_unlock_bh(&enic->devcmd_lock); in enic_dev_notify_set()
1694 static void enic_notify_timer_start(struct enic *enic) in enic_notify_timer_start() argument
1696 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_notify_timer_start()
1698 mod_timer(&enic->notify_timer, jiffies); in enic_notify_timer_start()
1709 struct enic *enic = netdev_priv(netdev); in enic_open() local
1713 err = enic_request_intr(enic); in enic_open()
1718 enic_init_affinity_hint(enic); in enic_open()
1719 enic_set_affinity_hint(enic); in enic_open()
1721 err = enic_dev_notify_set(enic); in enic_open()
1728 for (i = 0; i < enic->rq_count; i++) { in enic_open()
1729 vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); in enic_open()
1731 if (vnic_rq_desc_used(&enic->rq[i]) == 0) { in enic_open()
1738 for (i = 0; i < enic->wq_count; i++) in enic_open()
1739 vnic_wq_enable(&enic->wq[i]); in enic_open()
1740 for (i = 0; i < enic->rq_count; i++) in enic_open()
1741 vnic_rq_enable(&enic->rq[i]); in enic_open()
1743 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) in enic_open()
1744 enic_dev_add_station_addr(enic); in enic_open()
1750 for (i = 0; i < enic->rq_count; i++) { in enic_open()
1751 enic_busy_poll_init_lock(&enic->rq[i]); in enic_open()
1752 napi_enable(&enic->napi[i]); in enic_open()
1754 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_open()
1755 for (i = 0; i < enic->wq_count; i++) in enic_open()
1756 napi_enable(&enic->napi[enic_cq_wq(enic, i)]); in enic_open()
1757 enic_dev_enable(enic); in enic_open()
1759 for (i = 0; i < enic->intr_count; i++) in enic_open()
1760 vnic_intr_unmask(&enic->intr[i]); in enic_open()
1762 enic_notify_timer_start(enic); in enic_open()
1763 enic_rfs_flw_tbl_init(enic); in enic_open()
1768 for (i = 0; i < enic->rq_count; i++) in enic_open()
1769 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); in enic_open()
1770 enic_dev_notify_unset(enic); in enic_open()
1772 enic_unset_affinity_hint(enic); in enic_open()
1773 enic_free_intr(enic); in enic_open()
1781 struct enic *enic = netdev_priv(netdev); in enic_stop() local
1785 for (i = 0; i < enic->intr_count; i++) { in enic_stop()
1786 vnic_intr_mask(&enic->intr[i]); in enic_stop()
1787 (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ in enic_stop()
1790 enic_synchronize_irqs(enic); in enic_stop()
1792 del_timer_sync(&enic->notify_timer); in enic_stop()
1793 enic_rfs_flw_tbl_free(enic); in enic_stop()
1795 enic_dev_disable(enic); in enic_stop()
1797 for (i = 0; i < enic->rq_count; i++) { in enic_stop()
1798 napi_disable(&enic->napi[i]); in enic_stop()
1800 while (!enic_poll_lock_napi(&enic->rq[i])) in enic_stop()
1807 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_stop()
1808 for (i = 0; i < enic->wq_count; i++) in enic_stop()
1809 napi_disable(&enic->napi[enic_cq_wq(enic, i)]); in enic_stop()
1811 if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) in enic_stop()
1812 enic_dev_del_station_addr(enic); in enic_stop()
1814 for (i = 0; i < enic->wq_count; i++) { in enic_stop()
1815 err = vnic_wq_disable(&enic->wq[i]); in enic_stop()
1819 for (i = 0; i < enic->rq_count; i++) { in enic_stop()
1820 err = vnic_rq_disable(&enic->rq[i]); in enic_stop()
1825 enic_dev_notify_unset(enic); in enic_stop()
1826 enic_unset_affinity_hint(enic); in enic_stop()
1827 enic_free_intr(enic); in enic_stop()
1829 for (i = 0; i < enic->wq_count; i++) in enic_stop()
1830 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); in enic_stop()
1831 for (i = 0; i < enic->rq_count; i++) in enic_stop()
1832 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); in enic_stop()
1833 for (i = 0; i < enic->cq_count; i++) in enic_stop()
1834 vnic_cq_clean(&enic->cq[i]); in enic_stop()
1835 for (i = 0; i < enic->intr_count; i++) in enic_stop()
1836 vnic_intr_clean(&enic->intr[i]); in enic_stop()
1843 struct enic *enic = netdev_priv(netdev); in enic_change_mtu() local
1849 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) in enic_change_mtu()
1857 if (netdev->mtu > enic->port_mtu) in enic_change_mtu()
1860 netdev->mtu, enic->port_mtu); in enic_change_mtu()
1870 struct enic *enic = container_of(work, struct enic, change_mtu_work); in enic_change_mtu_work() local
1871 struct net_device *netdev = enic->netdev; in enic_change_mtu_work()
1872 int new_mtu = vnic_dev_mtu(enic->vdev); in enic_change_mtu_work()
1881 del_timer_sync(&enic->notify_timer); in enic_change_mtu_work()
1883 for (i = 0; i < enic->rq_count; i++) in enic_change_mtu_work()
1884 napi_disable(&enic->napi[i]); in enic_change_mtu_work()
1886 vnic_intr_mask(&enic->intr[0]); in enic_change_mtu_work()
1887 enic_synchronize_irqs(enic); in enic_change_mtu_work()
1888 err = vnic_rq_disable(&enic->rq[0]); in enic_change_mtu_work()
1894 vnic_rq_clean(&enic->rq[0], enic_free_rq_buf); in enic_change_mtu_work()
1895 vnic_cq_clean(&enic->cq[0]); in enic_change_mtu_work()
1896 vnic_intr_clean(&enic->intr[0]); in enic_change_mtu_work()
1900 vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); in enic_change_mtu_work()
1902 if (vnic_rq_desc_used(&enic->rq[0]) == 0) { in enic_change_mtu_work()
1909 vnic_rq_enable(&enic->rq[0]); in enic_change_mtu_work()
1910 napi_enable(&enic->napi[0]); in enic_change_mtu_work()
1911 vnic_intr_unmask(&enic->intr[0]); in enic_change_mtu_work()
1912 enic_notify_timer_start(enic); in enic_change_mtu_work()
1922 struct enic *enic = netdev_priv(netdev); in enic_poll_controller() local
1923 struct vnic_dev *vdev = enic->vdev; in enic_poll_controller()
1928 for (i = 0; i < enic->rq_count; i++) { in enic_poll_controller()
1929 intr = enic_msix_rq_intr(enic, i); in enic_poll_controller()
1930 enic_isr_msix(enic->msix_entry[intr].vector, in enic_poll_controller()
1931 &enic->napi[i]); in enic_poll_controller()
1934 for (i = 0; i < enic->wq_count; i++) { in enic_poll_controller()
1935 intr = enic_msix_wq_intr(enic, i); in enic_poll_controller()
1936 enic_isr_msix(enic->msix_entry[intr].vector, in enic_poll_controller()
1937 &enic->napi[enic_cq_wq(enic, i)]); in enic_poll_controller()
1942 enic_isr_msi(enic->pdev->irq, enic); in enic_poll_controller()
1945 enic_isr_legacy(enic->pdev->irq, netdev); in enic_poll_controller()
1988 static int enic_dev_open(struct enic *enic) in enic_dev_open() argument
1992 err = enic_dev_wait(enic->vdev, vnic_dev_open, in enic_dev_open()
1995 dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", in enic_dev_open()
2001 static int enic_dev_soft_reset(struct enic *enic) in enic_dev_soft_reset() argument
2005 err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, in enic_dev_soft_reset()
2008 netdev_err(enic->netdev, "vNIC soft reset failed, err %d\n", in enic_dev_soft_reset()
2014 static int enic_dev_hang_reset(struct enic *enic) in enic_dev_hang_reset() argument
2018 err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset, in enic_dev_hang_reset()
2021 netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n", in enic_dev_hang_reset()
2027 int __enic_set_rsskey(struct enic *enic) in __enic_set_rsskey() argument
2033 rss_key_buf_va = pci_zalloc_consistent(enic->pdev, in __enic_set_rsskey()
2042 rss_key_buf_va->key[kidx].b[bidx] = enic->rss_key[i]; in __enic_set_rsskey()
2044 spin_lock_bh(&enic->devcmd_lock); in __enic_set_rsskey()
2045 err = enic_set_rss_key(enic, in __enic_set_rsskey()
2048 spin_unlock_bh(&enic->devcmd_lock); in __enic_set_rsskey()
2050 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key), in __enic_set_rsskey()
2056 static int enic_set_rsskey(struct enic *enic) in enic_set_rsskey() argument
2058 netdev_rss_key_fill(enic->rss_key, ENIC_RSS_LEN); in enic_set_rsskey()
2060 return __enic_set_rsskey(enic); in enic_set_rsskey()
2063 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) in enic_set_rsscpu() argument
2070 rss_cpu_buf_va = pci_alloc_consistent(enic->pdev, in enic_set_rsscpu()
2076 (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; in enic_set_rsscpu()
2078 spin_lock_bh(&enic->devcmd_lock); in enic_set_rsscpu()
2079 err = enic_set_rss_cpu(enic, in enic_set_rsscpu()
2082 spin_unlock_bh(&enic->devcmd_lock); in enic_set_rsscpu()
2084 pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), in enic_set_rsscpu()
2090 static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, in enic_set_niccfg() argument
2100 spin_lock_bh(&enic->devcmd_lock); in enic_set_niccfg()
2101 err = enic_set_nic_cfg(enic, in enic_set_niccfg()
2106 spin_unlock_bh(&enic->devcmd_lock); in enic_set_niccfg()
2111 static int enic_set_rss_nic_cfg(struct enic *enic) in enic_set_rss_nic_cfg() argument
2113 struct device *dev = enic_get_dev(enic); in enic_set_rss_nic_cfg()
2121 u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); in enic_set_rss_nic_cfg()
2124 if (!enic_set_rsskey(enic)) { in enic_set_rss_nic_cfg()
2125 if (enic_set_rsscpu(enic, rss_hash_bits)) { in enic_set_rss_nic_cfg()
2136 return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, in enic_set_rss_nic_cfg()
2142 struct enic *enic = container_of(work, struct enic, reset); in enic_reset() local
2144 if (!netif_running(enic->netdev)) in enic_reset()
2149 spin_lock(&enic->enic_api_lock); in enic_reset()
2150 enic_stop(enic->netdev); in enic_reset()
2151 enic_dev_soft_reset(enic); in enic_reset()
2152 enic_reset_addr_lists(enic); in enic_reset()
2153 enic_init_vnic_resources(enic); in enic_reset()
2154 enic_set_rss_nic_cfg(enic); in enic_reset()
2155 enic_dev_set_ig_vlan_rewrite_mode(enic); in enic_reset()
2156 enic_open(enic->netdev); in enic_reset()
2157 spin_unlock(&enic->enic_api_lock); in enic_reset()
2158 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); in enic_reset()
2165 struct enic *enic = container_of(work, struct enic, tx_hang_reset); in enic_tx_hang_reset() local
2169 spin_lock(&enic->enic_api_lock); in enic_tx_hang_reset()
2170 enic_dev_hang_notify(enic); in enic_tx_hang_reset()
2171 enic_stop(enic->netdev); in enic_tx_hang_reset()
2172 enic_dev_hang_reset(enic); in enic_tx_hang_reset()
2173 enic_reset_addr_lists(enic); in enic_tx_hang_reset()
2174 enic_init_vnic_resources(enic); in enic_tx_hang_reset()
2175 enic_set_rss_nic_cfg(enic); in enic_tx_hang_reset()
2176 enic_dev_set_ig_vlan_rewrite_mode(enic); in enic_tx_hang_reset()
2177 enic_open(enic->netdev); in enic_tx_hang_reset()
2178 spin_unlock(&enic->enic_api_lock); in enic_tx_hang_reset()
2179 call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); in enic_tx_hang_reset()
2184 static int enic_set_intr_mode(struct enic *enic) in enic_set_intr_mode() argument
2186 unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); in enic_set_intr_mode()
2187 unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX); in enic_set_intr_mode()
2200 BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); in enic_set_intr_mode()
2202 enic->msix_entry[i].entry = i; in enic_set_intr_mode()
2207 if (ENIC_SETTING(enic, RSS) && in enic_set_intr_mode()
2208 enic->config.intr_mode < 1 && in enic_set_intr_mode()
2209 enic->rq_count >= n && in enic_set_intr_mode()
2210 enic->wq_count >= m && in enic_set_intr_mode()
2211 enic->cq_count >= n + m && in enic_set_intr_mode()
2212 enic->intr_count >= n + m + 2) { in enic_set_intr_mode()
2214 if (pci_enable_msix_range(enic->pdev, enic->msix_entry, in enic_set_intr_mode()
2217 enic->rq_count = n; in enic_set_intr_mode()
2218 enic->wq_count = m; in enic_set_intr_mode()
2219 enic->cq_count = n + m; in enic_set_intr_mode()
2220 enic->intr_count = n + m + 2; in enic_set_intr_mode()
2222 vnic_dev_set_intr_mode(enic->vdev, in enic_set_intr_mode()
2229 if (enic->config.intr_mode < 1 && in enic_set_intr_mode()
2230 enic->rq_count >= 1 && in enic_set_intr_mode()
2231 enic->wq_count >= m && in enic_set_intr_mode()
2232 enic->cq_count >= 1 + m && in enic_set_intr_mode()
2233 enic->intr_count >= 1 + m + 2) { in enic_set_intr_mode()
2234 if (pci_enable_msix_range(enic->pdev, enic->msix_entry, in enic_set_intr_mode()
2237 enic->rq_count = 1; in enic_set_intr_mode()
2238 enic->wq_count = m; in enic_set_intr_mode()
2239 enic->cq_count = 1 + m; in enic_set_intr_mode()
2240 enic->intr_count = 1 + m + 2; in enic_set_intr_mode()
2242 vnic_dev_set_intr_mode(enic->vdev, in enic_set_intr_mode()
2254 if (enic->config.intr_mode < 2 && in enic_set_intr_mode()
2255 enic->rq_count >= 1 && in enic_set_intr_mode()
2256 enic->wq_count >= 1 && in enic_set_intr_mode()
2257 enic->cq_count >= 2 && in enic_set_intr_mode()
2258 enic->intr_count >= 1 && in enic_set_intr_mode()
2259 !pci_enable_msi(enic->pdev)) { in enic_set_intr_mode()
2261 enic->rq_count = 1; in enic_set_intr_mode()
2262 enic->wq_count = 1; in enic_set_intr_mode()
2263 enic->cq_count = 2; in enic_set_intr_mode()
2264 enic->intr_count = 1; in enic_set_intr_mode()
2266 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); in enic_set_intr_mode()
2279 if (enic->config.intr_mode < 3 && in enic_set_intr_mode()
2280 enic->rq_count >= 1 && in enic_set_intr_mode()
2281 enic->wq_count >= 1 && in enic_set_intr_mode()
2282 enic->cq_count >= 2 && in enic_set_intr_mode()
2283 enic->intr_count >= 3) { in enic_set_intr_mode()
2285 enic->rq_count = 1; in enic_set_intr_mode()
2286 enic->wq_count = 1; in enic_set_intr_mode()
2287 enic->cq_count = 2; in enic_set_intr_mode()
2288 enic->intr_count = 3; in enic_set_intr_mode()
2290 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); in enic_set_intr_mode()
2295 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); in enic_set_intr_mode()
2300 static void enic_clear_intr_mode(struct enic *enic) in enic_clear_intr_mode() argument
2302 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_clear_intr_mode()
2304 pci_disable_msix(enic->pdev); in enic_clear_intr_mode()
2307 pci_disable_msi(enic->pdev); in enic_clear_intr_mode()
2313 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); in enic_clear_intr_mode()
2368 static void enic_dev_deinit(struct enic *enic) in enic_dev_deinit() argument
2372 for (i = 0; i < enic->rq_count; i++) { in enic_dev_deinit()
2373 napi_hash_del(&enic->napi[i]); in enic_dev_deinit()
2374 netif_napi_del(&enic->napi[i]); in enic_dev_deinit()
2376 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) in enic_dev_deinit()
2377 for (i = 0; i < enic->wq_count; i++) in enic_dev_deinit()
2378 netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]); in enic_dev_deinit()
2380 enic_free_vnic_resources(enic); in enic_dev_deinit()
2381 enic_clear_intr_mode(enic); in enic_dev_deinit()
2382 enic_free_affinity_hint(enic); in enic_dev_deinit()
2385 static void enic_kdump_kernel_config(struct enic *enic) in enic_kdump_kernel_config() argument
2388 dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n"); in enic_kdump_kernel_config()
2389 enic->rq_count = 1; in enic_kdump_kernel_config()
2390 enic->wq_count = 1; in enic_kdump_kernel_config()
2391 enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; in enic_kdump_kernel_config()
2392 enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; in enic_kdump_kernel_config()
2393 enic->config.mtu = min_t(u16, 1500, enic->config.mtu); in enic_kdump_kernel_config()
2397 static int enic_dev_init(struct enic *enic) in enic_dev_init() argument
2399 struct device *dev = enic_get_dev(enic); in enic_dev_init()
2400 struct net_device *netdev = enic->netdev; in enic_dev_init()
2405 err = enic_dev_intr_coal_timer_info(enic); in enic_dev_init()
2409 vnic_dev_intr_coal_timer_info_default(enic->vdev); in enic_dev_init()
2415 err = enic_get_vnic_config(enic); in enic_dev_init()
2424 enic_get_res_counts(enic); in enic_dev_init()
2428 enic_kdump_kernel_config(enic); in enic_dev_init()
2434 err = enic_set_intr_mode(enic); in enic_dev_init()
2444 err = enic_alloc_vnic_resources(enic); in enic_dev_init()
2450 enic_init_vnic_resources(enic); in enic_dev_init()
2452 err = enic_set_rss_nic_cfg(enic); in enic_dev_init()
2458 switch (vnic_dev_get_intr_mode(enic->vdev)) { in enic_dev_init()
2460 netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); in enic_dev_init()
2461 napi_hash_add(&enic->napi[0]); in enic_dev_init()
2464 for (i = 0; i < enic->rq_count; i++) { in enic_dev_init()
2465 netif_napi_add(netdev, &enic->napi[i], in enic_dev_init()
2467 napi_hash_add(&enic->napi[i]); in enic_dev_init()
2469 for (i = 0; i < enic->wq_count; i++) in enic_dev_init()
2470 netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)], in enic_dev_init()
2478 enic_free_affinity_hint(enic); in enic_dev_init()
2479 enic_clear_intr_mode(enic); in enic_dev_init()
2480 enic_free_vnic_resources(enic); in enic_dev_init()
2485 static void enic_iounmap(struct enic *enic) in enic_iounmap() argument
2489 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) in enic_iounmap()
2490 if (enic->bar[i].vaddr) in enic_iounmap()
2491 iounmap(enic->bar[i].vaddr); in enic_iounmap()
2498 struct enic *enic; in enic_probe() local
2511 netdev = alloc_etherdev_mqs(sizeof(struct enic), in enic_probe()
2520 enic = netdev_priv(netdev); in enic_probe()
2521 enic->netdev = netdev; in enic_probe()
2522 enic->pdev = pdev; in enic_probe()
2572 for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { in enic_probe()
2575 enic->bar[i].len = pci_resource_len(pdev, i); in enic_probe()
2576 enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); in enic_probe()
2577 if (!enic->bar[i].vaddr) { in enic_probe()
2582 enic->bar[i].bus_addr = pci_resource_start(pdev, i); in enic_probe()
2588 enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, in enic_probe()
2589 ARRAY_SIZE(enic->bar)); in enic_probe()
2590 if (!enic->vdev) { in enic_probe()
2596 err = vnic_devcmd_init(enic->vdev); in enic_probe()
2606 &enic->num_vfs); in enic_probe()
2607 if (enic->num_vfs) { in enic_probe()
2608 err = pci_enable_sriov(pdev, enic->num_vfs); in enic_probe()
2615 enic->priv_flags |= ENIC_SRIOV_ENABLED; in enic_probe()
2616 num_pps = enic->num_vfs; in enic_probe()
2622 enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); in enic_probe()
2623 if (!enic->pp) { in enic_probe()
2631 err = enic_dev_open(enic); in enic_probe()
2640 spin_lock_init(&enic->devcmd_lock); in enic_probe()
2641 spin_lock_init(&enic->enic_api_lock); in enic_probe()
2647 err = enic_dev_set_ig_vlan_rewrite_mode(enic); in enic_probe()
2669 if (!enic_is_dynamic(enic)) { in enic_probe()
2670 err = vnic_dev_init(enic->vdev, 0); in enic_probe()
2677 err = enic_dev_init(enic); in enic_probe()
2683 netif_set_real_num_tx_queues(netdev, enic->wq_count); in enic_probe()
2684 netif_set_real_num_rx_queues(netdev, enic->rq_count); in enic_probe()
2689 init_timer(&enic->notify_timer); in enic_probe()
2690 enic->notify_timer.function = enic_notify_timer; in enic_probe()
2691 enic->notify_timer.data = (unsigned long)enic; in enic_probe()
2693 enic_set_rx_coal_setting(enic); in enic_probe()
2694 INIT_WORK(&enic->reset, enic_reset); in enic_probe()
2695 INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset); in enic_probe()
2696 INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); in enic_probe()
2698 for (i = 0; i < enic->wq_count; i++) in enic_probe()
2699 spin_lock_init(&enic->wq_lock[i]); in enic_probe()
2704 enic->port_mtu = enic->config.mtu; in enic_probe()
2705 (void)enic_change_mtu(netdev, enic->port_mtu); in enic_probe()
2707 err = enic_set_mac_addr(netdev, enic->mac_addr); in enic_probe()
2713 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; in enic_probe()
2717 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; in enic_probe()
2719 if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) in enic_probe()
2728 if (ENIC_SETTING(enic, LOOP)) { in enic_probe()
2730 enic->loop_enable = 1; in enic_probe()
2731 enic->loop_tag = enic->config.loop_tag; in enic_probe()
2732 dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); in enic_probe()
2734 if (ENIC_SETTING(enic, TXCSUM)) in enic_probe()
2736 if (ENIC_SETTING(enic, TSO)) in enic_probe()
2739 if (ENIC_SETTING(enic, RSS)) in enic_probe()
2741 if (ENIC_SETTING(enic, RXCSUM)) in enic_probe()
2760 enic->rx_copybreak = RX_COPYBREAK_DEFAULT; in enic_probe()
2765 enic_dev_deinit(enic); in enic_probe()
2767 vnic_dev_close(enic->vdev); in enic_probe()
2769 kfree(enic->pp); in enic_probe()
2772 if (enic_sriov_enabled(enic)) { in enic_probe()
2774 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; in enic_probe()
2778 vnic_dev_unregister(enic->vdev); in enic_probe()
2780 enic_iounmap(enic); in enic_probe()
2796 struct enic *enic = netdev_priv(netdev); in enic_remove() local
2798 cancel_work_sync(&enic->reset); in enic_remove()
2799 cancel_work_sync(&enic->change_mtu_work); in enic_remove()
2801 enic_dev_deinit(enic); in enic_remove()
2802 vnic_dev_close(enic->vdev); in enic_remove()
2804 if (enic_sriov_enabled(enic)) { in enic_remove()
2806 enic->priv_flags &= ~ENIC_SRIOV_ENABLED; in enic_remove()
2809 kfree(enic->pp); in enic_remove()
2810 vnic_dev_unregister(enic->vdev); in enic_remove()
2811 enic_iounmap(enic); in enic_remove()