Lines Matching refs:adapter

143 static void igb_set_uta(struct igb_adapter *adapter);
168 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
180 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
229 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
362 static void igb_dump(struct igb_adapter *adapter) in igb_dump() argument
364 struct net_device *netdev = adapter->netdev; in igb_dump()
365 struct e1000_hw *hw = &adapter->hw; in igb_dump()
375 if (!netif_msg_hw(adapter)) in igb_dump()
380 dev_info(&adapter->pdev->dev, "Net device Info\n"); in igb_dump()
387 dev_info(&adapter->pdev->dev, "Register Dump\n"); in igb_dump()
398 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); in igb_dump()
400 for (n = 0; n < adapter->num_tx_queues; n++) { in igb_dump()
402 tx_ring = adapter->tx_ring[n]; in igb_dump()
413 if (!netif_msg_tx_done(adapter)) in igb_dump()
416 dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); in igb_dump()
429 for (n = 0; n < adapter->num_tx_queues; n++) { in igb_dump()
430 tx_ring = adapter->tx_ring[n]; in igb_dump()
461 if (netif_msg_pktdata(adapter) && buffer_info->skb) in igb_dump()
472 dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); in igb_dump()
474 for (n = 0; n < adapter->num_rx_queues; n++) { in igb_dump()
475 rx_ring = adapter->rx_ring[n]; in igb_dump()
481 if (!netif_msg_rx_status(adapter)) in igb_dump()
484 dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); in igb_dump()
507 for (n = 0; n < adapter->num_rx_queues; n++) { in igb_dump()
508 rx_ring = adapter->rx_ring[n]; in igb_dump()
545 if (netif_msg_pktdata(adapter) && in igb_dump()
571 struct igb_adapter *adapter = (struct igb_adapter *)data; in igb_get_i2c_data() local
572 struct e1000_hw *hw = &adapter->hw; in igb_get_i2c_data()
587 struct igb_adapter *adapter = (struct igb_adapter *)data; in igb_set_i2c_data() local
588 struct e1000_hw *hw = &adapter->hw; in igb_set_i2c_data()
612 struct igb_adapter *adapter = (struct igb_adapter *)data; in igb_set_i2c_clk() local
613 struct e1000_hw *hw = &adapter->hw; in igb_set_i2c_clk()
635 struct igb_adapter *adapter = (struct igb_adapter *)data; in igb_get_i2c_clk() local
636 struct e1000_hw *hw = &adapter->hw; in igb_get_i2c_clk()
659 struct igb_adapter *adapter = hw->back; in igb_get_hw_dev() local
660 return adapter->netdev; in igb_get_hw_dev()
710 static void igb_cache_ring_register(struct igb_adapter *adapter) in igb_cache_ring_register() argument
713 u32 rbase_offset = adapter->vfs_allocated_count; in igb_cache_ring_register()
715 switch (adapter->hw.mac.type) { in igb_cache_ring_register()
722 if (adapter->vfs_allocated_count) { in igb_cache_ring_register()
723 for (; i < adapter->rss_queues; i++) in igb_cache_ring_register()
724 adapter->rx_ring[i]->reg_idx = rbase_offset + in igb_cache_ring_register()
736 for (; i < adapter->num_rx_queues; i++) in igb_cache_ring_register()
737 adapter->rx_ring[i]->reg_idx = rbase_offset + i; in igb_cache_ring_register()
738 for (; j < adapter->num_tx_queues; j++) in igb_cache_ring_register()
739 adapter->tx_ring[j]->reg_idx = rbase_offset + j; in igb_cache_ring_register()
795 struct igb_adapter *adapter = q_vector->adapter; in igb_assign_vector() local
796 struct e1000_hw *hw = &adapter->hw; in igb_assign_vector()
817 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0) in igb_assign_vector()
865 adapter->eims_enable_mask |= q_vector->eims_value; in igb_assign_vector()
878 static void igb_configure_msix(struct igb_adapter *adapter) in igb_configure_msix() argument
882 struct e1000_hw *hw = &adapter->hw; in igb_configure_msix()
884 adapter->eims_enable_mask = 0; in igb_configure_msix()
901 adapter->eims_other = E1000_EIMS_OTHER; in igb_configure_msix()
919 adapter->eims_other = 1 << vector; in igb_configure_msix()
929 adapter->eims_enable_mask |= adapter->eims_other; in igb_configure_msix()
931 for (i = 0; i < adapter->num_q_vectors; i++) in igb_configure_msix()
932 igb_assign_vector(adapter->q_vector[i], vector++); in igb_configure_msix()
944 static int igb_request_msix(struct igb_adapter *adapter) in igb_request_msix() argument
946 struct net_device *netdev = adapter->netdev; in igb_request_msix()
947 struct e1000_hw *hw = &adapter->hw; in igb_request_msix()
950 err = request_irq(adapter->msix_entries[vector].vector, in igb_request_msix()
951 igb_msix_other, 0, netdev->name, adapter); in igb_request_msix()
955 for (i = 0; i < adapter->num_q_vectors; i++) { in igb_request_msix()
956 struct igb_q_vector *q_vector = adapter->q_vector[i]; in igb_request_msix()
974 err = request_irq(adapter->msix_entries[vector].vector, in igb_request_msix()
981 igb_configure_msix(adapter); in igb_request_msix()
986 free_irq(adapter->msix_entries[free_vector++].vector, adapter); in igb_request_msix()
990 free_irq(adapter->msix_entries[free_vector++].vector, in igb_request_msix()
991 adapter->q_vector[i]); in igb_request_msix()
1004 static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) in igb_free_q_vector() argument
1006 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; in igb_free_q_vector()
1008 adapter->q_vector[v_idx] = NULL; in igb_free_q_vector()
1025 static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx) in igb_reset_q_vector() argument
1027 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; in igb_reset_q_vector()
1036 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; in igb_reset_q_vector()
1039 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; in igb_reset_q_vector()
1045 static void igb_reset_interrupt_capability(struct igb_adapter *adapter) in igb_reset_interrupt_capability() argument
1047 int v_idx = adapter->num_q_vectors; in igb_reset_interrupt_capability()
1049 if (adapter->flags & IGB_FLAG_HAS_MSIX) in igb_reset_interrupt_capability()
1050 pci_disable_msix(adapter->pdev); in igb_reset_interrupt_capability()
1051 else if (adapter->flags & IGB_FLAG_HAS_MSI) in igb_reset_interrupt_capability()
1052 pci_disable_msi(adapter->pdev); in igb_reset_interrupt_capability()
1055 igb_reset_q_vector(adapter, v_idx); in igb_reset_interrupt_capability()
1066 static void igb_free_q_vectors(struct igb_adapter *adapter) in igb_free_q_vectors() argument
1068 int v_idx = adapter->num_q_vectors; in igb_free_q_vectors()
1070 adapter->num_tx_queues = 0; in igb_free_q_vectors()
1071 adapter->num_rx_queues = 0; in igb_free_q_vectors()
1072 adapter->num_q_vectors = 0; in igb_free_q_vectors()
1075 igb_reset_q_vector(adapter, v_idx); in igb_free_q_vectors()
1076 igb_free_q_vector(adapter, v_idx); in igb_free_q_vectors()
1087 static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) in igb_clear_interrupt_scheme() argument
1089 igb_free_q_vectors(adapter); in igb_clear_interrupt_scheme()
1090 igb_reset_interrupt_capability(adapter); in igb_clear_interrupt_scheme()
1101 static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) in igb_set_interrupt_capability() argument
1108 adapter->flags |= IGB_FLAG_HAS_MSIX; in igb_set_interrupt_capability()
1111 adapter->num_rx_queues = adapter->rss_queues; in igb_set_interrupt_capability()
1112 if (adapter->vfs_allocated_count) in igb_set_interrupt_capability()
1113 adapter->num_tx_queues = 1; in igb_set_interrupt_capability()
1115 adapter->num_tx_queues = adapter->rss_queues; in igb_set_interrupt_capability()
1118 numvecs = adapter->num_rx_queues; in igb_set_interrupt_capability()
1121 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) in igb_set_interrupt_capability()
1122 numvecs += adapter->num_tx_queues; in igb_set_interrupt_capability()
1125 adapter->num_q_vectors = numvecs; in igb_set_interrupt_capability()
1130 adapter->msix_entries[i].entry = i; in igb_set_interrupt_capability()
1132 err = pci_enable_msix_range(adapter->pdev, in igb_set_interrupt_capability()
1133 adapter->msix_entries, in igb_set_interrupt_capability()
1139 igb_reset_interrupt_capability(adapter); in igb_set_interrupt_capability()
1143 adapter->flags &= ~IGB_FLAG_HAS_MSIX; in igb_set_interrupt_capability()
1146 if (adapter->vf_data) { in igb_set_interrupt_capability()
1147 struct e1000_hw *hw = &adapter->hw; in igb_set_interrupt_capability()
1149 pci_disable_sriov(adapter->pdev); in igb_set_interrupt_capability()
1152 kfree(adapter->vf_data); in igb_set_interrupt_capability()
1153 adapter->vf_data = NULL; in igb_set_interrupt_capability()
1157 dev_info(&adapter->pdev->dev, "IOV Disabled\n"); in igb_set_interrupt_capability()
1160 adapter->vfs_allocated_count = 0; in igb_set_interrupt_capability()
1161 adapter->rss_queues = 1; in igb_set_interrupt_capability()
1162 adapter->flags |= IGB_FLAG_QUEUE_PAIRS; in igb_set_interrupt_capability()
1163 adapter->num_rx_queues = 1; in igb_set_interrupt_capability()
1164 adapter->num_tx_queues = 1; in igb_set_interrupt_capability()
1165 adapter->num_q_vectors = 1; in igb_set_interrupt_capability()
1166 if (!pci_enable_msi(adapter->pdev)) in igb_set_interrupt_capability()
1167 adapter->flags |= IGB_FLAG_HAS_MSI; in igb_set_interrupt_capability()
1189 static int igb_alloc_q_vector(struct igb_adapter *adapter, in igb_alloc_q_vector() argument
1207 q_vector = adapter->q_vector[v_idx]; in igb_alloc_q_vector()
1220 netif_napi_add(adapter->netdev, &q_vector->napi, in igb_alloc_q_vector()
1224 adapter->q_vector[v_idx] = q_vector; in igb_alloc_q_vector()
1225 q_vector->adapter = adapter; in igb_alloc_q_vector()
1228 q_vector->tx.work_limit = adapter->tx_work_limit; in igb_alloc_q_vector()
1231 q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0); in igb_alloc_q_vector()
1240 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) in igb_alloc_q_vector()
1241 q_vector->itr_val = adapter->rx_itr_setting; in igb_alloc_q_vector()
1244 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) in igb_alloc_q_vector()
1245 q_vector->itr_val = adapter->tx_itr_setting; in igb_alloc_q_vector()
1250 ring->dev = &adapter->pdev->dev; in igb_alloc_q_vector()
1251 ring->netdev = adapter->netdev; in igb_alloc_q_vector()
1260 if (adapter->hw.mac.type == e1000_82575) in igb_alloc_q_vector()
1264 ring->count = adapter->tx_ring_count; in igb_alloc_q_vector()
1271 adapter->tx_ring[txr_idx] = ring; in igb_alloc_q_vector()
1279 ring->dev = &adapter->pdev->dev; in igb_alloc_q_vector()
1280 ring->netdev = adapter->netdev; in igb_alloc_q_vector()
1289 if (adapter->hw.mac.type >= e1000_82576) in igb_alloc_q_vector()
1295 if (adapter->hw.mac.type >= e1000_i350) in igb_alloc_q_vector()
1299 ring->count = adapter->rx_ring_count; in igb_alloc_q_vector()
1305 adapter->rx_ring[rxr_idx] = ring; in igb_alloc_q_vector()
1319 static int igb_alloc_q_vectors(struct igb_adapter *adapter) in igb_alloc_q_vectors() argument
1321 int q_vectors = adapter->num_q_vectors; in igb_alloc_q_vectors()
1322 int rxr_remaining = adapter->num_rx_queues; in igb_alloc_q_vectors()
1323 int txr_remaining = adapter->num_tx_queues; in igb_alloc_q_vectors()
1329 err = igb_alloc_q_vector(adapter, q_vectors, v_idx, in igb_alloc_q_vectors()
1345 err = igb_alloc_q_vector(adapter, q_vectors, v_idx, in igb_alloc_q_vectors()
1361 adapter->num_tx_queues = 0; in igb_alloc_q_vectors()
1362 adapter->num_rx_queues = 0; in igb_alloc_q_vectors()
1363 adapter->num_q_vectors = 0; in igb_alloc_q_vectors()
1366 igb_free_q_vector(adapter, v_idx); in igb_alloc_q_vectors()
1378 static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) in igb_init_interrupt_scheme() argument
1380 struct pci_dev *pdev = adapter->pdev; in igb_init_interrupt_scheme()
1383 igb_set_interrupt_capability(adapter, msix); in igb_init_interrupt_scheme()
1385 err = igb_alloc_q_vectors(adapter); in igb_init_interrupt_scheme()
1391 igb_cache_ring_register(adapter); in igb_init_interrupt_scheme()
1396 igb_reset_interrupt_capability(adapter); in igb_init_interrupt_scheme()
1407 static int igb_request_irq(struct igb_adapter *adapter) in igb_request_irq() argument
1409 struct net_device *netdev = adapter->netdev; in igb_request_irq()
1410 struct pci_dev *pdev = adapter->pdev; in igb_request_irq()
1413 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_request_irq()
1414 err = igb_request_msix(adapter); in igb_request_irq()
1418 igb_free_all_tx_resources(adapter); in igb_request_irq()
1419 igb_free_all_rx_resources(adapter); in igb_request_irq()
1421 igb_clear_interrupt_scheme(adapter); in igb_request_irq()
1422 err = igb_init_interrupt_scheme(adapter, false); in igb_request_irq()
1426 igb_setup_all_tx_resources(adapter); in igb_request_irq()
1427 igb_setup_all_rx_resources(adapter); in igb_request_irq()
1428 igb_configure(adapter); in igb_request_irq()
1431 igb_assign_vector(adapter->q_vector[0], 0); in igb_request_irq()
1433 if (adapter->flags & IGB_FLAG_HAS_MSI) { in igb_request_irq()
1435 netdev->name, adapter); in igb_request_irq()
1440 igb_reset_interrupt_capability(adapter); in igb_request_irq()
1441 adapter->flags &= ~IGB_FLAG_HAS_MSI; in igb_request_irq()
1445 netdev->name, adapter); in igb_request_irq()
1455 static void igb_free_irq(struct igb_adapter *adapter) in igb_free_irq() argument
1457 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_free_irq()
1460 free_irq(adapter->msix_entries[vector++].vector, adapter); in igb_free_irq()
1462 for (i = 0; i < adapter->num_q_vectors; i++) in igb_free_irq()
1463 free_irq(adapter->msix_entries[vector++].vector, in igb_free_irq()
1464 adapter->q_vector[i]); in igb_free_irq()
1466 free_irq(adapter->pdev->irq, adapter); in igb_free_irq()
1474 static void igb_irq_disable(struct igb_adapter *adapter) in igb_irq_disable() argument
1476 struct e1000_hw *hw = &adapter->hw; in igb_irq_disable()
1482 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_irq_disable()
1485 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); in igb_irq_disable()
1486 wr32(E1000_EIMC, adapter->eims_enable_mask); in igb_irq_disable()
1488 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask); in igb_irq_disable()
1494 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_irq_disable()
1497 for (i = 0; i < adapter->num_q_vectors; i++) in igb_irq_disable()
1498 synchronize_irq(adapter->msix_entries[i].vector); in igb_irq_disable()
1500 synchronize_irq(adapter->pdev->irq); in igb_irq_disable()
1508 static void igb_irq_enable(struct igb_adapter *adapter) in igb_irq_enable() argument
1510 struct e1000_hw *hw = &adapter->hw; in igb_irq_enable()
1512 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_irq_enable()
1516 wr32(E1000_EIAC, regval | adapter->eims_enable_mask); in igb_irq_enable()
1518 wr32(E1000_EIAM, regval | adapter->eims_enable_mask); in igb_irq_enable()
1519 wr32(E1000_EIMS, adapter->eims_enable_mask); in igb_irq_enable()
1520 if (adapter->vfs_allocated_count) { in igb_irq_enable()
1533 static void igb_update_mng_vlan(struct igb_adapter *adapter) in igb_update_mng_vlan() argument
1535 struct e1000_hw *hw = &adapter->hw; in igb_update_mng_vlan()
1536 u16 vid = adapter->hw.mng_cookie.vlan_id; in igb_update_mng_vlan()
1537 u16 old_vid = adapter->mng_vlan_id; in igb_update_mng_vlan()
1542 adapter->mng_vlan_id = vid; in igb_update_mng_vlan()
1544 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; in igb_update_mng_vlan()
1549 !test_bit(old_vid, adapter->active_vlans)) { in igb_update_mng_vlan()
1563 static void igb_release_hw_control(struct igb_adapter *adapter) in igb_release_hw_control() argument
1565 struct e1000_hw *hw = &adapter->hw; in igb_release_hw_control()
1582 static void igb_get_hw_control(struct igb_adapter *adapter) in igb_get_hw_control() argument
1584 struct e1000_hw *hw = &adapter->hw; in igb_get_hw_control()
1597 static void igb_configure(struct igb_adapter *adapter) in igb_configure() argument
1599 struct net_device *netdev = adapter->netdev; in igb_configure()
1602 igb_get_hw_control(adapter); in igb_configure()
1605 igb_restore_vlan(adapter); in igb_configure()
1607 igb_setup_tctl(adapter); in igb_configure()
1608 igb_setup_mrqc(adapter); in igb_configure()
1609 igb_setup_rctl(adapter); in igb_configure()
1611 igb_configure_tx(adapter); in igb_configure()
1612 igb_configure_rx(adapter); in igb_configure()
1614 igb_rx_fifo_flush_82575(&adapter->hw); in igb_configure()
1620 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_configure()
1621 struct igb_ring *ring = adapter->rx_ring[i]; in igb_configure()
1630 void igb_power_up_link(struct igb_adapter *adapter) in igb_power_up_link() argument
1632 igb_reset_phy(&adapter->hw); in igb_power_up_link()
1634 if (adapter->hw.phy.media_type == e1000_media_type_copper) in igb_power_up_link()
1635 igb_power_up_phy_copper(&adapter->hw); in igb_power_up_link()
1637 igb_power_up_serdes_link_82575(&adapter->hw); in igb_power_up_link()
1639 igb_setup_link(&adapter->hw); in igb_power_up_link()
1646 static void igb_power_down_link(struct igb_adapter *adapter) in igb_power_down_link() argument
1648 if (adapter->hw.phy.media_type == e1000_media_type_copper) in igb_power_down_link()
1649 igb_power_down_phy_copper_82575(&adapter->hw); in igb_power_down_link()
1651 igb_shutdown_serdes_link_82575(&adapter->hw); in igb_power_down_link()
1658 static void igb_check_swap_media(struct igb_adapter *adapter) in igb_check_swap_media() argument
1660 struct e1000_hw *hw = &adapter->hw; in igb_check_swap_media()
1676 if (adapter->copper_tries < 4) { in igb_check_swap_media()
1677 adapter->copper_tries++; in igb_check_swap_media()
1682 adapter->copper_tries = 0; in igb_check_swap_media()
1697 netdev_info(adapter->netdev, in igb_check_swap_media()
1701 adapter->flags |= IGB_FLAG_MEDIA_RESET; in igb_check_swap_media()
1702 adapter->copper_tries = 0; in igb_check_swap_media()
1706 netdev_info(adapter->netdev, in igb_check_swap_media()
1710 adapter->flags |= IGB_FLAG_MEDIA_RESET; in igb_check_swap_media()
1714 netdev_err(adapter->netdev, in igb_check_swap_media()
1725 int igb_up(struct igb_adapter *adapter) in igb_up() argument
1727 struct e1000_hw *hw = &adapter->hw; in igb_up()
1731 igb_configure(adapter); in igb_up()
1733 clear_bit(__IGB_DOWN, &adapter->state); in igb_up()
1735 for (i = 0; i < adapter->num_q_vectors; i++) in igb_up()
1736 napi_enable(&(adapter->q_vector[i]->napi)); in igb_up()
1738 if (adapter->flags & IGB_FLAG_HAS_MSIX) in igb_up()
1739 igb_configure_msix(adapter); in igb_up()
1741 igb_assign_vector(adapter->q_vector[0], 0); in igb_up()
1745 igb_irq_enable(adapter); in igb_up()
1748 if (adapter->vfs_allocated_count) { in igb_up()
1755 netif_tx_start_all_queues(adapter->netdev); in igb_up()
1759 schedule_work(&adapter->watchdog_task); in igb_up()
1761 if ((adapter->flags & IGB_FLAG_EEE) && in igb_up()
1763 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; in igb_up()
1768 void igb_down(struct igb_adapter *adapter) in igb_down() argument
1770 struct net_device *netdev = adapter->netdev; in igb_down()
1771 struct e1000_hw *hw = &adapter->hw; in igb_down()
1778 set_bit(__IGB_DOWN, &adapter->state); in igb_down()
1796 igb_irq_disable(adapter); in igb_down()
1798 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; in igb_down()
1800 for (i = 0; i < adapter->num_q_vectors; i++) { in igb_down()
1801 if (adapter->q_vector[i]) { in igb_down()
1802 napi_synchronize(&adapter->q_vector[i]->napi); in igb_down()
1803 napi_disable(&adapter->q_vector[i]->napi); in igb_down()
1807 del_timer_sync(&adapter->watchdog_timer); in igb_down()
1808 del_timer_sync(&adapter->phy_info_timer); in igb_down()
1811 spin_lock(&adapter->stats64_lock); in igb_down()
1812 igb_update_stats(adapter, &adapter->stats64); in igb_down()
1813 spin_unlock(&adapter->stats64_lock); in igb_down()
1815 adapter->link_speed = 0; in igb_down()
1816 adapter->link_duplex = 0; in igb_down()
1818 if (!pci_channel_offline(adapter->pdev)) in igb_down()
1819 igb_reset(adapter); in igb_down()
1820 igb_clean_all_tx_rings(adapter); in igb_down()
1821 igb_clean_all_rx_rings(adapter); in igb_down()
1825 igb_setup_dca(adapter); in igb_down()
1829 void igb_reinit_locked(struct igb_adapter *adapter) in igb_reinit_locked() argument
1832 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) in igb_reinit_locked()
1834 igb_down(adapter); in igb_reinit_locked()
1835 igb_up(adapter); in igb_reinit_locked()
1836 clear_bit(__IGB_RESETTING, &adapter->state); in igb_reinit_locked()
1843 static s32 igb_enable_mas(struct igb_adapter *adapter) in igb_enable_mas() argument
1845 struct e1000_hw *hw = &adapter->hw; in igb_enable_mas()
1863 netdev_info(adapter->netdev, in igb_enable_mas()
1865 adapter->flags &= ~IGB_FLAG_MAS_ENABLE; in igb_enable_mas()
1870 void igb_reset(struct igb_adapter *adapter) in igb_reset() argument
1872 struct pci_dev *pdev = adapter->pdev; in igb_reset()
1873 struct e1000_hw *hw = &adapter->hw; in igb_reset()
1900 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && in igb_reset()
1920 min_tx_space = (adapter->max_frame_size + in igb_reset()
1926 min_rx_space = adapter->max_frame_size; in igb_reset()
1955 ((pba << 10) - 2 * adapter->max_frame_size)); in igb_reset()
1964 if (adapter->vfs_allocated_count) { in igb_reset()
1967 for (i = 0 ; i < adapter->vfs_allocated_count; i++) in igb_reset()
1968 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC; in igb_reset()
1971 igb_ping_all_vfs(adapter); in igb_reset()
1982 if (adapter->flags & IGB_FLAG_MEDIA_RESET) { in igb_reset()
1984 adapter->ei.get_invariants(hw); in igb_reset()
1985 adapter->flags &= ~IGB_FLAG_MEDIA_RESET; in igb_reset()
1987 if (adapter->flags & IGB_FLAG_MAS_ENABLE) { in igb_reset()
1988 if (igb_enable_mas(adapter)) in igb_reset()
2001 igb_init_dmac(adapter, pba); in igb_reset()
2004 if (!test_bit(__IGB_DOWN, &adapter->state)) { in igb_reset()
2009 if (adapter->ets) in igb_reset()
2029 if (!netif_running(adapter->netdev)) in igb_reset()
2030 igb_power_down_link(adapter); in igb_reset()
2032 igb_update_mng_vlan(adapter); in igb_reset()
2038 igb_ptp_reset(adapter); in igb_reset()
2061 struct igb_adapter *adapter = netdev_priv(netdev); in igb_set_features() local
2072 igb_reinit_locked(adapter); in igb_set_features()
2074 igb_reset(adapter); in igb_set_features()
2109 void igb_set_fw_version(struct igb_adapter *adapter) in igb_set_fw_version() argument
2111 struct e1000_hw *hw = &adapter->hw; in igb_set_fw_version()
2120 snprintf(adapter->fw_version, in igb_set_fw_version()
2121 sizeof(adapter->fw_version), in igb_set_fw_version()
2131 snprintf(adapter->fw_version, in igb_set_fw_version()
2132 sizeof(adapter->fw_version), in igb_set_fw_version()
2138 snprintf(adapter->fw_version, in igb_set_fw_version()
2139 sizeof(adapter->fw_version), in igb_set_fw_version()
2143 snprintf(adapter->fw_version, in igb_set_fw_version()
2144 sizeof(adapter->fw_version), in igb_set_fw_version()
2157 static void igb_init_mas(struct igb_adapter *adapter) in igb_init_mas() argument
2159 struct e1000_hw *hw = &adapter->hw; in igb_init_mas()
2166 adapter->flags |= IGB_FLAG_MAS_ENABLE; in igb_init_mas()
2167 netdev_info(adapter->netdev, in igb_init_mas()
2174 adapter->flags |= IGB_FLAG_MAS_ENABLE; in igb_init_mas()
2175 netdev_info(adapter->netdev, in igb_init_mas()
2182 adapter->flags |= IGB_FLAG_MAS_ENABLE; in igb_init_mas()
2183 netdev_info(adapter->netdev, in igb_init_mas()
2190 adapter->flags |= IGB_FLAG_MAS_ENABLE; in igb_init_mas()
2191 netdev_info(adapter->netdev, in igb_init_mas()
2198 netdev_err(adapter->netdev, in igb_init_mas()
2208 static s32 igb_init_i2c(struct igb_adapter *adapter) in igb_init_i2c() argument
2213 if (adapter->hw.mac.type != e1000_i350) in igb_init_i2c()
2220 adapter->i2c_adap.owner = THIS_MODULE; in igb_init_i2c()
2221 adapter->i2c_algo = igb_i2c_algo; in igb_init_i2c()
2222 adapter->i2c_algo.data = adapter; in igb_init_i2c()
2223 adapter->i2c_adap.algo_data = &adapter->i2c_algo; in igb_init_i2c()
2224 adapter->i2c_adap.dev.parent = &adapter->pdev->dev; in igb_init_i2c()
2225 strlcpy(adapter->i2c_adap.name, "igb BB", in igb_init_i2c()
2226 sizeof(adapter->i2c_adap.name)); in igb_init_i2c()
2227 status = i2c_bit_add_bus(&adapter->i2c_adap); in igb_init_i2c()
2245 struct igb_adapter *adapter; in igb_probe() local
2300 adapter = netdev_priv(netdev); in igb_probe()
2301 adapter->netdev = netdev; in igb_probe()
2302 adapter->pdev = pdev; in igb_probe()
2303 hw = &adapter->hw; in igb_probe()
2304 hw->back = adapter; in igb_probe()
2305 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in igb_probe()
2338 err = igb_sw_init(adapter); in igb_probe()
2398 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); in igb_probe()
2442 igb_set_fw_version(adapter); in igb_probe()
2450 setup_timer(&adapter->watchdog_timer, igb_watchdog, in igb_probe()
2451 (unsigned long) adapter); in igb_probe()
2452 setup_timer(&adapter->phy_info_timer, igb_update_phy_info, in igb_probe()
2453 (unsigned long) adapter); in igb_probe()
2455 INIT_WORK(&adapter->reset_task, igb_reset_task); in igb_probe()
2456 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); in igb_probe()
2459 adapter->fc_autoneg = true; in igb_probe()
2470 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
2481 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
2489 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; in igb_probe()
2498 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; in igb_probe()
2504 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; in igb_probe()
2506 adapter->flags |= IGB_FLAG_QUAD_PORT_A; in igb_probe()
2513 if (!device_can_wakeup(&adapter->pdev->dev)) in igb_probe()
2514 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; in igb_probe()
2518 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED) in igb_probe()
2519 adapter->wol |= E1000_WUFC_MAG; in igb_probe()
2524 adapter->flags |= IGB_FLAG_WOL_SUPPORTED; in igb_probe()
2525 adapter->wol = 0; in igb_probe()
2528 device_set_wakeup_enable(&adapter->pdev->dev, in igb_probe()
2529 adapter->flags & IGB_FLAG_WOL_SUPPORTED); in igb_probe()
2532 igb_reset(adapter); in igb_probe()
2535 err = igb_init_i2c(adapter); in igb_probe()
2544 igb_get_hw_control(adapter); in igb_probe()
2556 adapter->flags |= IGB_FLAG_DCA_ENABLED; in igb_probe()
2558 igb_setup_dca(adapter); in igb_probe()
2572 adapter->ets = true; in igb_probe()
2574 adapter->ets = false; in igb_probe()
2575 if (igb_sysfs_init(adapter)) in igb_probe()
2579 adapter->ets = false; in igb_probe()
2583 adapter->ei = *ei; in igb_probe()
2585 igb_init_mas(adapter); in igb_probe()
2588 igb_ptp_init(adapter); in igb_probe()
2619 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : in igb_probe()
2620 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", in igb_probe()
2621 adapter->num_rx_queues, adapter->num_tx_queues); in igb_probe()
2631 adapter->eee_advert = in igb_probe()
2633 adapter->flags |= IGB_FLAG_EEE; in igb_probe()
2642 adapter->eee_advert = in igb_probe()
2644 adapter->flags |= IGB_FLAG_EEE; in igb_probe()
2656 igb_release_hw_control(adapter); in igb_probe()
2657 memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap)); in igb_probe()
2665 igb_clear_interrupt_scheme(adapter); in igb_probe()
2682 struct igb_adapter *adapter = netdev_priv(netdev); in igb_disable_sriov() local
2683 struct e1000_hw *hw = &adapter->hw; in igb_disable_sriov()
2686 if (adapter->vf_data) { in igb_disable_sriov()
2697 kfree(adapter->vf_data); in igb_disable_sriov()
2698 adapter->vf_data = NULL; in igb_disable_sriov()
2699 adapter->vfs_allocated_count = 0; in igb_disable_sriov()
2706 adapter->flags |= IGB_FLAG_DMAC; in igb_disable_sriov()
2715 struct igb_adapter *adapter = netdev_priv(netdev); in igb_enable_sriov() local
2720 if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) { in igb_enable_sriov()
2730 adapter->vfs_allocated_count = old_vfs; in igb_enable_sriov()
2732 adapter->vfs_allocated_count = num_vfs; in igb_enable_sriov()
2734 adapter->vf_data = kcalloc(adapter->vfs_allocated_count, in igb_enable_sriov()
2738 if (!adapter->vf_data) { in igb_enable_sriov()
2739 adapter->vfs_allocated_count = 0; in igb_enable_sriov()
2748 err = pci_enable_sriov(pdev, adapter->vfs_allocated_count); in igb_enable_sriov()
2753 adapter->vfs_allocated_count); in igb_enable_sriov()
2754 for (i = 0; i < adapter->vfs_allocated_count; i++) in igb_enable_sriov()
2755 igb_vf_configure(adapter, i); in igb_enable_sriov()
2758 adapter->flags &= ~IGB_FLAG_DMAC; in igb_enable_sriov()
2762 kfree(adapter->vf_data); in igb_enable_sriov()
2763 adapter->vf_data = NULL; in igb_enable_sriov()
2764 adapter->vfs_allocated_count = 0; in igb_enable_sriov()
2774 static void igb_remove_i2c(struct igb_adapter *adapter) in igb_remove_i2c() argument
2777 i2c_del_adapter(&adapter->i2c_adap); in igb_remove_i2c()
2792 struct igb_adapter *adapter = netdev_priv(netdev); in igb_remove() local
2793 struct e1000_hw *hw = &adapter->hw; in igb_remove()
2797 igb_sysfs_exit(adapter); in igb_remove()
2799 igb_remove_i2c(adapter); in igb_remove()
2800 igb_ptp_stop(adapter); in igb_remove()
2804 set_bit(__IGB_DOWN, &adapter->state); in igb_remove()
2805 del_timer_sync(&adapter->watchdog_timer); in igb_remove()
2806 del_timer_sync(&adapter->phy_info_timer); in igb_remove()
2808 cancel_work_sync(&adapter->reset_task); in igb_remove()
2809 cancel_work_sync(&adapter->watchdog_task); in igb_remove()
2812 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { in igb_remove()
2815 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; in igb_remove()
2823 igb_release_hw_control(adapter); in igb_remove()
2827 igb_clear_interrupt_scheme(adapter); in igb_remove()
2839 kfree(adapter->shadow_vfta); in igb_remove()
2856 static void igb_probe_vfs(struct igb_adapter *adapter) in igb_probe_vfs() argument
2859 struct pci_dev *pdev = adapter->pdev; in igb_probe_vfs()
2860 struct e1000_hw *hw = &adapter->hw; in igb_probe_vfs()
2872 static void igb_init_queue_configuration(struct igb_adapter *adapter) in igb_init_queue_configuration() argument
2874 struct e1000_hw *hw = &adapter->hw; in igb_init_queue_configuration()
2888 if (!!adapter->vfs_allocated_count) { in igb_init_queue_configuration()
2894 if (!!adapter->vfs_allocated_count) { in igb_init_queue_configuration()
2906 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); in igb_init_queue_configuration()
2908 igb_set_flag_queue_pairs(adapter, max_rss_queues); in igb_init_queue_configuration()
2911 void igb_set_flag_queue_pairs(struct igb_adapter *adapter, in igb_set_flag_queue_pairs() argument
2914 struct e1000_hw *hw = &adapter->hw; in igb_set_flag_queue_pairs()
2927 if ((adapter->rss_queues > 1) && in igb_set_flag_queue_pairs()
2928 (adapter->vfs_allocated_count > 6)) in igb_set_flag_queue_pairs()
2929 adapter->flags |= IGB_FLAG_QUEUE_PAIRS; in igb_set_flag_queue_pairs()
2939 if (adapter->rss_queues > (max_rss_queues / 2)) in igb_set_flag_queue_pairs()
2940 adapter->flags |= IGB_FLAG_QUEUE_PAIRS; in igb_set_flag_queue_pairs()
2953 static int igb_sw_init(struct igb_adapter *adapter) in igb_sw_init() argument
2955 struct e1000_hw *hw = &adapter->hw; in igb_sw_init()
2956 struct net_device *netdev = adapter->netdev; in igb_sw_init()
2957 struct pci_dev *pdev = adapter->pdev; in igb_sw_init()
2962 adapter->tx_ring_count = IGB_DEFAULT_TXD; in igb_sw_init()
2963 adapter->rx_ring_count = IGB_DEFAULT_RXD; in igb_sw_init()
2966 adapter->rx_itr_setting = IGB_DEFAULT_ITR; in igb_sw_init()
2967 adapter->tx_itr_setting = IGB_DEFAULT_ITR; in igb_sw_init()
2970 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK; in igb_sw_init()
2972 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + in igb_sw_init()
2974 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; in igb_sw_init()
2976 spin_lock_init(&adapter->stats64_lock); in igb_sw_init()
2984 max_vfs = adapter->vfs_allocated_count = 7; in igb_sw_init()
2986 adapter->vfs_allocated_count = max_vfs; in igb_sw_init()
2987 if (adapter->vfs_allocated_count) in igb_sw_init()
2996 igb_init_queue_configuration(adapter); in igb_sw_init()
2999 adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), in igb_sw_init()
3003 if (igb_init_interrupt_scheme(adapter, true)) { in igb_sw_init()
3008 igb_probe_vfs(adapter); in igb_sw_init()
3011 igb_irq_disable(adapter); in igb_sw_init()
3014 adapter->flags &= ~IGB_FLAG_DMAC; in igb_sw_init()
3016 set_bit(__IGB_DOWN, &adapter->state); in igb_sw_init()
3034 struct igb_adapter *adapter = netdev_priv(netdev); in __igb_open() local
3035 struct e1000_hw *hw = &adapter->hw; in __igb_open()
3036 struct pci_dev *pdev = adapter->pdev; in __igb_open()
3041 if (test_bit(__IGB_TESTING, &adapter->state)) { in __igb_open()
3052 err = igb_setup_all_tx_resources(adapter); in __igb_open()
3057 err = igb_setup_all_rx_resources(adapter); in __igb_open()
3061 igb_power_up_link(adapter); in __igb_open()
3068 igb_configure(adapter); in __igb_open()
3070 err = igb_request_irq(adapter); in __igb_open()
3075 err = netif_set_real_num_tx_queues(adapter->netdev, in __igb_open()
3076 adapter->num_tx_queues); in __igb_open()
3080 err = netif_set_real_num_rx_queues(adapter->netdev, in __igb_open()
3081 adapter->num_rx_queues); in __igb_open()
3086 clear_bit(__IGB_DOWN, &adapter->state); in __igb_open()
3088 for (i = 0; i < adapter->num_q_vectors; i++) in __igb_open()
3089 napi_enable(&(adapter->q_vector[i]->napi)); in __igb_open()
3094 igb_irq_enable(adapter); in __igb_open()
3097 if (adapter->vfs_allocated_count) { in __igb_open()
3111 schedule_work(&adapter->watchdog_task); in __igb_open()
3116 igb_free_irq(adapter); in __igb_open()
3118 igb_release_hw_control(adapter); in __igb_open()
3119 igb_power_down_link(adapter); in __igb_open()
3120 igb_free_all_rx_resources(adapter); in __igb_open()
3122 igb_free_all_tx_resources(adapter); in __igb_open()
3124 igb_reset(adapter); in __igb_open()
3149 struct igb_adapter *adapter = netdev_priv(netdev); in __igb_close() local
3150 struct pci_dev *pdev = adapter->pdev; in __igb_close()
3152 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state)); in __igb_close()
3157 igb_down(adapter); in __igb_close()
3158 igb_free_irq(adapter); in __igb_close()
3160 igb_free_all_tx_resources(adapter); in __igb_close()
3161 igb_free_all_rx_resources(adapter); in __igb_close()
3218 static int igb_setup_all_tx_resources(struct igb_adapter *adapter) in igb_setup_all_tx_resources() argument
3220 struct pci_dev *pdev = adapter->pdev; in igb_setup_all_tx_resources()
3223 for (i = 0; i < adapter->num_tx_queues; i++) { in igb_setup_all_tx_resources()
3224 err = igb_setup_tx_resources(adapter->tx_ring[i]); in igb_setup_all_tx_resources()
3229 igb_free_tx_resources(adapter->tx_ring[i]); in igb_setup_all_tx_resources()
3241 void igb_setup_tctl(struct igb_adapter *adapter) in igb_setup_tctl() argument
3243 struct e1000_hw *hw = &adapter->hw; in igb_setup_tctl()
3270 void igb_configure_tx_ring(struct igb_adapter *adapter, in igb_configure_tx_ring() argument
3273 struct e1000_hw *hw = &adapter->hw; in igb_configure_tx_ring()
3307 static void igb_configure_tx(struct igb_adapter *adapter) in igb_configure_tx() argument
3311 for (i = 0; i < adapter->num_tx_queues; i++) in igb_configure_tx()
3312 igb_configure_tx_ring(adapter, adapter->tx_ring[i]); in igb_configure_tx()
3361 static int igb_setup_all_rx_resources(struct igb_adapter *adapter) in igb_setup_all_rx_resources() argument
3363 struct pci_dev *pdev = adapter->pdev; in igb_setup_all_rx_resources()
3366 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_setup_all_rx_resources()
3367 err = igb_setup_rx_resources(adapter->rx_ring[i]); in igb_setup_all_rx_resources()
3372 igb_free_rx_resources(adapter->rx_ring[i]); in igb_setup_all_rx_resources()
3384 static void igb_setup_mrqc(struct igb_adapter *adapter) in igb_setup_mrqc() argument
3386 struct e1000_hw *hw = &adapter->hw; in igb_setup_mrqc()
3395 num_rx_queues = adapter->rss_queues; in igb_setup_mrqc()
3400 if (adapter->vfs_allocated_count) in igb_setup_mrqc()
3407 if (adapter->rss_indir_tbl_init != num_rx_queues) { in igb_setup_mrqc()
3409 adapter->rss_indir_tbl[j] = in igb_setup_mrqc()
3411 adapter->rss_indir_tbl_init = num_rx_queues; in igb_setup_mrqc()
3413 igb_write_rss_indir_tbl(adapter); in igb_setup_mrqc()
3422 if (adapter->hw.mac.type >= e1000_82576) in igb_setup_mrqc()
3438 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) in igb_setup_mrqc()
3440 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) in igb_setup_mrqc()
3447 if (adapter->vfs_allocated_count) { in igb_setup_mrqc()
3454 vtctl |= adapter->vfs_allocated_count << in igb_setup_mrqc()
3458 if (adapter->rss_queues > 1) in igb_setup_mrqc()
3466 igb_vmm_control(adapter); in igb_setup_mrqc()
3475 void igb_setup_rctl(struct igb_adapter *adapter) in igb_setup_rctl() argument
3477 struct e1000_hw *hw = &adapter->hw; in igb_setup_rctl()
3507 if (adapter->vfs_allocated_count) { in igb_setup_rctl()
3513 if (adapter->netdev->features & NETIF_F_RXALL) { in igb_setup_rctl()
3532 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, in igb_set_vf_rlpml() argument
3535 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_rlpml()
3541 if (vfn < adapter->vfs_allocated_count && in igb_set_vf_rlpml()
3542 adapter->vf_data[vfn].vlans_enabled) in igb_set_vf_rlpml()
3559 static void igb_rlpml_set(struct igb_adapter *adapter) in igb_rlpml_set() argument
3561 u32 max_frame_size = adapter->max_frame_size; in igb_rlpml_set()
3562 struct e1000_hw *hw = &adapter->hw; in igb_rlpml_set()
3563 u16 pf_id = adapter->vfs_allocated_count; in igb_rlpml_set()
3566 igb_set_vf_rlpml(adapter, max_frame_size, pf_id); in igb_rlpml_set()
3579 static inline void igb_set_vmolr(struct igb_adapter *adapter, in igb_set_vmolr() argument
3582 struct e1000_hw *hw = &adapter->hw; in igb_set_vmolr()
3608 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) in igb_set_vmolr()
3613 if (vfn <= adapter->vfs_allocated_count) in igb_set_vmolr()
3626 void igb_configure_rx_ring(struct igb_adapter *adapter, in igb_configure_rx_ring() argument
3629 struct e1000_hw *hw = &adapter->hw; in igb_configure_rx_ring()
3656 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) in igb_configure_rx_ring()
3662 igb_set_vmolr(adapter, reg_idx & 0x7, true); in igb_configure_rx_ring()
3679 static void igb_configure_rx(struct igb_adapter *adapter) in igb_configure_rx() argument
3684 igb_set_uta(adapter); in igb_configure_rx()
3687 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, in igb_configure_rx()
3688 adapter->vfs_allocated_count); in igb_configure_rx()
3693 for (i = 0; i < adapter->num_rx_queues; i++) in igb_configure_rx()
3694 igb_configure_rx_ring(adapter, adapter->rx_ring[i]); in igb_configure_rx()
3726 static void igb_free_all_tx_resources(struct igb_adapter *adapter) in igb_free_all_tx_resources() argument
3730 for (i = 0; i < adapter->num_tx_queues; i++) in igb_free_all_tx_resources()
3731 if (adapter->tx_ring[i]) in igb_free_all_tx_resources()
3732 igb_free_tx_resources(adapter->tx_ring[i]); in igb_free_all_tx_resources()
3792 static void igb_clean_all_tx_rings(struct igb_adapter *adapter) in igb_clean_all_tx_rings() argument
3796 for (i = 0; i < adapter->num_tx_queues; i++) in igb_clean_all_tx_rings()
3797 if (adapter->tx_ring[i]) in igb_clean_all_tx_rings()
3798 igb_clean_tx_ring(adapter->tx_ring[i]); in igb_clean_all_tx_rings()
3830 static void igb_free_all_rx_resources(struct igb_adapter *adapter) in igb_free_all_rx_resources() argument
3834 for (i = 0; i < adapter->num_rx_queues; i++) in igb_free_all_rx_resources()
3835 if (adapter->rx_ring[i]) in igb_free_all_rx_resources()
3836 igb_free_rx_resources(adapter->rx_ring[i]); in igb_free_all_rx_resources()
3886 static void igb_clean_all_rx_rings(struct igb_adapter *adapter) in igb_clean_all_rx_rings() argument
3890 for (i = 0; i < adapter->num_rx_queues; i++) in igb_clean_all_rx_rings()
3891 if (adapter->rx_ring[i]) in igb_clean_all_rx_rings()
3892 igb_clean_rx_ring(adapter->rx_ring[i]); in igb_clean_all_rx_rings()
3904 struct igb_adapter *adapter = netdev_priv(netdev); in igb_set_mac() local
3905 struct e1000_hw *hw = &adapter->hw; in igb_set_mac()
3915 igb_rar_set_qsel(adapter, hw->mac.addr, 0, in igb_set_mac()
3916 adapter->vfs_allocated_count); in igb_set_mac()
3932 struct igb_adapter *adapter = netdev_priv(netdev); in igb_write_mc_addr_list() local
3933 struct e1000_hw *hw = &adapter->hw; in igb_write_mc_addr_list()
3941 igb_restore_vf_multicasts(adapter); in igb_write_mc_addr_list()
3971 struct igb_adapter *adapter = netdev_priv(netdev); in igb_write_uc_addr_list() local
3972 struct e1000_hw *hw = &adapter->hw; in igb_write_uc_addr_list()
3973 unsigned int vfn = adapter->vfs_allocated_count; in igb_write_uc_addr_list()
3987 igb_rar_set_qsel(adapter, ha->addr, in igb_write_uc_addr_list()
4014 struct igb_adapter *adapter = netdev_priv(netdev); in igb_set_rx_mode() local
4015 struct e1000_hw *hw = &adapter->hw; in igb_set_rx_mode()
4016 unsigned int vfn = adapter->vfs_allocated_count; in igb_set_rx_mode()
4028 if (adapter->vfs_allocated_count) in igb_set_rx_mode()
4073 igb_restore_vf_multicasts(adapter); in igb_set_rx_mode()
4076 static void igb_check_wvbr(struct igb_adapter *adapter) in igb_check_wvbr() argument
4078 struct e1000_hw *hw = &adapter->hw; in igb_check_wvbr()
4092 adapter->wvbr |= wvbr; in igb_check_wvbr()
4097 static void igb_spoof_check(struct igb_adapter *adapter) in igb_spoof_check() argument
4101 if (!adapter->wvbr) in igb_spoof_check()
4104 for (j = 0; j < adapter->vfs_allocated_count; j++) { in igb_spoof_check()
4105 if (adapter->wvbr & (1 << j) || in igb_spoof_check()
4106 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { in igb_spoof_check()
4107 dev_warn(&adapter->pdev->dev, in igb_spoof_check()
4109 adapter->wvbr &= in igb_spoof_check()
4121 struct igb_adapter *adapter = (struct igb_adapter *) data; in igb_update_phy_info() local
4122 igb_get_phy_info(&adapter->hw); in igb_update_phy_info()
4129 bool igb_has_link(struct igb_adapter *adapter) in igb_has_link() argument
4131 struct e1000_hw *hw = &adapter->hw; in igb_has_link()
4155 if (!netif_carrier_ok(adapter->netdev)) { in igb_has_link()
4156 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; in igb_has_link()
4157 } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) { in igb_has_link()
4158 adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE; in igb_has_link()
4159 adapter->link_check_timeout = jiffies; in igb_has_link()
4189 static void igb_check_lvmmc(struct igb_adapter *adapter) in igb_check_lvmmc() argument
4191 struct e1000_hw *hw = &adapter->hw; in igb_check_lvmmc()
4197 netdev_warn(adapter->netdev, in igb_check_lvmmc()
4210 struct igb_adapter *adapter = (struct igb_adapter *)data; in igb_watchdog() local
4212 schedule_work(&adapter->watchdog_task); in igb_watchdog()
4217 struct igb_adapter *adapter = container_of(work, in igb_watchdog_task() local
4220 struct e1000_hw *hw = &adapter->hw; in igb_watchdog_task()
4222 struct net_device *netdev = adapter->netdev; in igb_watchdog_task()
4227 link = igb_has_link(adapter); in igb_watchdog_task()
4229 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) { in igb_watchdog_task()
4230 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) in igb_watchdog_task()
4231 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; in igb_watchdog_task()
4237 if (adapter->flags & IGB_FLAG_MAS_ENABLE) { in igb_watchdog_task()
4248 adapter->flags |= IGB_FLAG_MEDIA_RESET; in igb_watchdog_task()
4249 igb_reset(adapter); in igb_watchdog_task()
4258 &adapter->link_speed, in igb_watchdog_task()
4259 &adapter->link_duplex); in igb_watchdog_task()
4266 adapter->link_speed, in igb_watchdog_task()
4267 adapter->link_duplex == FULL_DUPLEX ? in igb_watchdog_task()
4275 if ((adapter->flags & IGB_FLAG_EEE) && in igb_watchdog_task()
4276 (adapter->link_duplex == HALF_DUPLEX)) { in igb_watchdog_task()
4277 dev_info(&adapter->pdev->dev, in igb_watchdog_task()
4279 adapter->hw.dev_spec._82575.eee_disable = true; in igb_watchdog_task()
4280 adapter->flags &= ~IGB_FLAG_EEE; in igb_watchdog_task()
4294 adapter->tx_timeout_factor = 1; in igb_watchdog_task()
4295 switch (adapter->link_speed) { in igb_watchdog_task()
4297 adapter->tx_timeout_factor = 14; in igb_watchdog_task()
4306 igb_ping_all_vfs(adapter); in igb_watchdog_task()
4307 igb_check_vf_rate_limit(adapter); in igb_watchdog_task()
4310 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_watchdog_task()
4311 mod_timer(&adapter->phy_info_timer, in igb_watchdog_task()
4316 adapter->link_speed = 0; in igb_watchdog_task()
4317 adapter->link_duplex = 0; in igb_watchdog_task()
4330 igb_ping_all_vfs(adapter); in igb_watchdog_task()
4333 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_watchdog_task()
4334 mod_timer(&adapter->phy_info_timer, in igb_watchdog_task()
4338 if (adapter->flags & IGB_FLAG_MAS_ENABLE) { in igb_watchdog_task()
4339 igb_check_swap_media(adapter); in igb_watchdog_task()
4340 if (adapter->flags & IGB_FLAG_MEDIA_RESET) { in igb_watchdog_task()
4341 schedule_work(&adapter->reset_task); in igb_watchdog_task()
4351 (adapter->flags & IGB_FLAG_MAS_ENABLE)) { in igb_watchdog_task()
4352 igb_check_swap_media(adapter); in igb_watchdog_task()
4353 if (adapter->flags & IGB_FLAG_MEDIA_RESET) { in igb_watchdog_task()
4354 schedule_work(&adapter->reset_task); in igb_watchdog_task()
4361 spin_lock(&adapter->stats64_lock); in igb_watchdog_task()
4362 igb_update_stats(adapter, &adapter->stats64); in igb_watchdog_task()
4363 spin_unlock(&adapter->stats64_lock); in igb_watchdog_task()
4365 for (i = 0; i < adapter->num_tx_queues; i++) { in igb_watchdog_task()
4366 struct igb_ring *tx_ring = adapter->tx_ring[i]; in igb_watchdog_task()
4374 adapter->tx_timeout_count++; in igb_watchdog_task()
4375 schedule_work(&adapter->reset_task); in igb_watchdog_task()
4386 if (adapter->flags & IGB_FLAG_HAS_MSIX) { in igb_watchdog_task()
4389 for (i = 0; i < adapter->num_q_vectors; i++) in igb_watchdog_task()
4390 eics |= adapter->q_vector[i]->eims_value; in igb_watchdog_task()
4396 igb_spoof_check(adapter); in igb_watchdog_task()
4397 igb_ptp_rx_hang(adapter); in igb_watchdog_task()
4400 if ((adapter->hw.mac.type == e1000_i350) || in igb_watchdog_task()
4401 (adapter->hw.mac.type == e1000_i354)) in igb_watchdog_task()
4402 igb_check_lvmmc(adapter); in igb_watchdog_task()
4405 if (!test_bit(__IGB_DOWN, &adapter->state)) { in igb_watchdog_task()
4406 if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) in igb_watchdog_task()
4407 mod_timer(&adapter->watchdog_timer, in igb_watchdog_task()
4410 mod_timer(&adapter->watchdog_timer, in igb_watchdog_task()
4441 struct igb_adapter *adapter = q_vector->adapter; in igb_update_ring_itr() local
4447 if (adapter->link_speed != SPEED_1000) { in igb_update_ring_itr()
4479 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igb_update_ring_itr()
4480 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igb_update_ring_itr()
4565 struct igb_adapter *adapter = q_vector->adapter; in igb_set_itr() local
4570 if (adapter->link_speed != SPEED_1000) { in igb_set_itr()
4583 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igb_set_itr()
4584 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igb_set_itr()
5035 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); in igb_xmit_frame_ring() local
5038 &adapter->state)) { in igb_xmit_frame_ring()
5042 adapter->ptp_tx_skb = skb_get(skb); in igb_xmit_frame_ring()
5043 adapter->ptp_tx_start = jiffies; in igb_xmit_frame_ring()
5044 if (adapter->hw.mac.type == e1000_82576) in igb_xmit_frame_ring()
5045 schedule_work(&adapter->ptp_tx_work); in igb_xmit_frame_ring()
5076 static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, in igb_tx_queue_mapping() argument
5081 if (r_idx >= adapter->num_tx_queues) in igb_tx_queue_mapping()
5082 r_idx = r_idx % adapter->num_tx_queues; in igb_tx_queue_mapping()
5084 return adapter->tx_ring[r_idx]; in igb_tx_queue_mapping()
5090 struct igb_adapter *adapter = netdev_priv(netdev); in igb_xmit_frame() local
5092 if (test_bit(__IGB_DOWN, &adapter->state)) { in igb_xmit_frame()
5108 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); in igb_xmit_frame()
5117 struct igb_adapter *adapter = netdev_priv(netdev); in igb_tx_timeout() local
5118 struct e1000_hw *hw = &adapter->hw; in igb_tx_timeout()
5121 adapter->tx_timeout_count++; in igb_tx_timeout()
5126 schedule_work(&adapter->reset_task); in igb_tx_timeout()
5128 (adapter->eims_enable_mask & ~adapter->eims_other)); in igb_tx_timeout()
5133 struct igb_adapter *adapter; in igb_reset_task() local
5134 adapter = container_of(work, struct igb_adapter, reset_task); in igb_reset_task()
5136 igb_dump(adapter); in igb_reset_task()
5137 netdev_err(adapter->netdev, "Reset adapter\n"); in igb_reset_task()
5138 igb_reinit_locked(adapter); in igb_reset_task()
5149 struct igb_adapter *adapter = netdev_priv(netdev); in igb_get_stats64() local
5151 spin_lock(&adapter->stats64_lock); in igb_get_stats64()
5152 igb_update_stats(adapter, &adapter->stats64); in igb_get_stats64()
5153 memcpy(stats, &adapter->stats64, sizeof(*stats)); in igb_get_stats64()
5154 spin_unlock(&adapter->stats64_lock); in igb_get_stats64()
5168 struct igb_adapter *adapter = netdev_priv(netdev); in igb_change_mtu() local
5169 struct pci_dev *pdev = adapter->pdev; in igb_change_mtu()
5187 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) in igb_change_mtu()
5191 adapter->max_frame_size = max_frame; in igb_change_mtu()
5194 igb_down(adapter); in igb_change_mtu()
5201 igb_up(adapter); in igb_change_mtu()
5203 igb_reset(adapter); in igb_change_mtu()
5205 clear_bit(__IGB_RESETTING, &adapter->state); in igb_change_mtu()
5214 void igb_update_stats(struct igb_adapter *adapter, in igb_update_stats() argument
5217 struct e1000_hw *hw = &adapter->hw; in igb_update_stats()
5218 struct pci_dev *pdev = adapter->pdev; in igb_update_stats()
5228 if (adapter->link_speed == 0) in igb_update_stats()
5237 for (i = 0; i < adapter->num_rx_queues; i++) { in igb_update_stats()
5238 struct igb_ring *ring = adapter->rx_ring[i]; in igb_update_stats()
5262 for (i = 0; i < adapter->num_tx_queues; i++) { in igb_update_stats()
5263 struct igb_ring *ring = adapter->tx_ring[i]; in igb_update_stats()
5277 adapter->stats.crcerrs += rd32(E1000_CRCERRS); in igb_update_stats()
5278 adapter->stats.gprc += rd32(E1000_GPRC); in igb_update_stats()
5279 adapter->stats.gorc += rd32(E1000_GORCL); in igb_update_stats()
5281 adapter->stats.bprc += rd32(E1000_BPRC); in igb_update_stats()
5282 adapter->stats.mprc += rd32(E1000_MPRC); in igb_update_stats()
5283 adapter->stats.roc += rd32(E1000_ROC); in igb_update_stats()
5285 adapter->stats.prc64 += rd32(E1000_PRC64); in igb_update_stats()
5286 adapter->stats.prc127 += rd32(E1000_PRC127); in igb_update_stats()
5287 adapter->stats.prc255 += rd32(E1000_PRC255); in igb_update_stats()
5288 adapter->stats.prc511 += rd32(E1000_PRC511); in igb_update_stats()
5289 adapter->stats.prc1023 += rd32(E1000_PRC1023); in igb_update_stats()
5290 adapter->stats.prc1522 += rd32(E1000_PRC1522); in igb_update_stats()
5291 adapter->stats.symerrs += rd32(E1000_SYMERRS); in igb_update_stats()
5292 adapter->stats.sec += rd32(E1000_SEC); in igb_update_stats()
5295 adapter->stats.mpc += mpc; in igb_update_stats()
5297 adapter->stats.scc += rd32(E1000_SCC); in igb_update_stats()
5298 adapter->stats.ecol += rd32(E1000_ECOL); in igb_update_stats()
5299 adapter->stats.mcc += rd32(E1000_MCC); in igb_update_stats()
5300 adapter->stats.latecol += rd32(E1000_LATECOL); in igb_update_stats()
5301 adapter->stats.dc += rd32(E1000_DC); in igb_update_stats()
5302 adapter->stats.rlec += rd32(E1000_RLEC); in igb_update_stats()
5303 adapter->stats.xonrxc += rd32(E1000_XONRXC); in igb_update_stats()
5304 adapter->stats.xontxc += rd32(E1000_XONTXC); in igb_update_stats()
5305 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC); in igb_update_stats()
5306 adapter->stats.xofftxc += rd32(E1000_XOFFTXC); in igb_update_stats()
5307 adapter->stats.fcruc += rd32(E1000_FCRUC); in igb_update_stats()
5308 adapter->stats.gptc += rd32(E1000_GPTC); in igb_update_stats()
5309 adapter->stats.gotc += rd32(E1000_GOTCL); in igb_update_stats()
5311 adapter->stats.rnbc += rd32(E1000_RNBC); in igb_update_stats()
5312 adapter->stats.ruc += rd32(E1000_RUC); in igb_update_stats()
5313 adapter->stats.rfc += rd32(E1000_RFC); in igb_update_stats()
5314 adapter->stats.rjc += rd32(E1000_RJC); in igb_update_stats()
5315 adapter->stats.tor += rd32(E1000_TORH); in igb_update_stats()
5316 adapter->stats.tot += rd32(E1000_TOTH); in igb_update_stats()
5317 adapter->stats.tpr += rd32(E1000_TPR); in igb_update_stats()
5319 adapter->stats.ptc64 += rd32(E1000_PTC64); in igb_update_stats()
5320 adapter->stats.ptc127 += rd32(E1000_PTC127); in igb_update_stats()
5321 adapter->stats.ptc255 += rd32(E1000_PTC255); in igb_update_stats()
5322 adapter->stats.ptc511 += rd32(E1000_PTC511); in igb_update_stats()
5323 adapter->stats.ptc1023 += rd32(E1000_PTC1023); in igb_update_stats()
5324 adapter->stats.ptc1522 += rd32(E1000_PTC1522); in igb_update_stats()
5326 adapter->stats.mptc += rd32(E1000_MPTC); in igb_update_stats()
5327 adapter->stats.bptc += rd32(E1000_BPTC); in igb_update_stats()
5329 adapter->stats.tpt += rd32(E1000_TPT); in igb_update_stats()
5330 adapter->stats.colc += rd32(E1000_COLC); in igb_update_stats()
5332 adapter->stats.algnerrc += rd32(E1000_ALGNERRC); in igb_update_stats()
5336 adapter->stats.rxerrc += rd32(E1000_RXERRC); in igb_update_stats()
5341 adapter->stats.tncrs += rd32(E1000_TNCRS); in igb_update_stats()
5344 adapter->stats.tsctc += rd32(E1000_TSCTC); in igb_update_stats()
5345 adapter->stats.tsctfc += rd32(E1000_TSCTFC); in igb_update_stats()
5347 adapter->stats.iac += rd32(E1000_IAC); in igb_update_stats()
5348 adapter->stats.icrxoc += rd32(E1000_ICRXOC); in igb_update_stats()
5349 adapter->stats.icrxptc += rd32(E1000_ICRXPTC); in igb_update_stats()
5350 adapter->stats.icrxatc += rd32(E1000_ICRXATC); in igb_update_stats()
5351 adapter->stats.ictxptc += rd32(E1000_ICTXPTC); in igb_update_stats()
5352 adapter->stats.ictxatc += rd32(E1000_ICTXATC); in igb_update_stats()
5353 adapter->stats.ictxqec += rd32(E1000_ICTXQEC); in igb_update_stats()
5354 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC); in igb_update_stats()
5355 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); in igb_update_stats()
5358 net_stats->multicast = adapter->stats.mprc; in igb_update_stats()
5359 net_stats->collisions = adapter->stats.colc; in igb_update_stats()
5366 net_stats->rx_errors = adapter->stats.rxerrc + in igb_update_stats()
5367 adapter->stats.crcerrs + adapter->stats.algnerrc + in igb_update_stats()
5368 adapter->stats.ruc + adapter->stats.roc + in igb_update_stats()
5369 adapter->stats.cexterr; in igb_update_stats()
5370 net_stats->rx_length_errors = adapter->stats.ruc + in igb_update_stats()
5371 adapter->stats.roc; in igb_update_stats()
5372 net_stats->rx_crc_errors = adapter->stats.crcerrs; in igb_update_stats()
5373 net_stats->rx_frame_errors = adapter->stats.algnerrc; in igb_update_stats()
5374 net_stats->rx_missed_errors = adapter->stats.mpc; in igb_update_stats()
5377 net_stats->tx_errors = adapter->stats.ecol + in igb_update_stats()
5378 adapter->stats.latecol; in igb_update_stats()
5379 net_stats->tx_aborted_errors = adapter->stats.ecol; in igb_update_stats()
5380 net_stats->tx_window_errors = adapter->stats.latecol; in igb_update_stats()
5381 net_stats->tx_carrier_errors = adapter->stats.tncrs; in igb_update_stats()
5386 adapter->stats.mgptc += rd32(E1000_MGTPTC); in igb_update_stats()
5387 adapter->stats.mgprc += rd32(E1000_MGTPRC); in igb_update_stats()
5388 adapter->stats.mgpdc += rd32(E1000_MGTPDC); in igb_update_stats()
5393 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); in igb_update_stats()
5394 adapter->stats.o2bspc += rd32(E1000_O2BSPC); in igb_update_stats()
5395 adapter->stats.b2ospc += rd32(E1000_B2OSPC); in igb_update_stats()
5396 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); in igb_update_stats()
5400 static void igb_tsync_interrupt(struct igb_adapter *adapter) in igb_tsync_interrupt() argument
5402 struct e1000_hw *hw = &adapter->hw; in igb_tsync_interrupt()
5409 if (adapter->ptp_caps.pps) in igb_tsync_interrupt()
5410 ptp_clock_event(adapter->ptp_clock, &event); in igb_tsync_interrupt()
5412 dev_err(&adapter->pdev->dev, "unexpected SYS WRAP"); in igb_tsync_interrupt()
5418 schedule_work(&adapter->ptp_tx_work); in igb_tsync_interrupt()
5423 spin_lock(&adapter->tmreg_lock); in igb_tsync_interrupt()
5424 ts = timespec_add(adapter->perout[0].start, in igb_tsync_interrupt()
5425 adapter->perout[0].period); in igb_tsync_interrupt()
5431 adapter->perout[0].start = ts; in igb_tsync_interrupt()
5432 spin_unlock(&adapter->tmreg_lock); in igb_tsync_interrupt()
5437 spin_lock(&adapter->tmreg_lock); in igb_tsync_interrupt()
5438 ts = timespec_add(adapter->perout[1].start, in igb_tsync_interrupt()
5439 adapter->perout[1].period); in igb_tsync_interrupt()
5445 adapter->perout[1].start = ts; in igb_tsync_interrupt()
5446 spin_unlock(&adapter->tmreg_lock); in igb_tsync_interrupt()
5456 ptp_clock_event(adapter->ptp_clock, &event); in igb_tsync_interrupt()
5466 ptp_clock_event(adapter->ptp_clock, &event); in igb_tsync_interrupt()
5476 struct igb_adapter *adapter = data; in igb_msix_other() local
5477 struct e1000_hw *hw = &adapter->hw; in igb_msix_other()
5482 schedule_work(&adapter->reset_task); in igb_msix_other()
5486 adapter->stats.doosync++; in igb_msix_other()
5491 igb_check_wvbr(adapter); in igb_msix_other()
5496 igb_msg_task(adapter); in igb_msix_other()
5501 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_msix_other()
5502 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igb_msix_other()
5506 igb_tsync_interrupt(adapter); in igb_msix_other()
5508 wr32(E1000_EIMS, adapter->eims_other); in igb_msix_other()
5515 struct igb_adapter *adapter = q_vector->adapter; in igb_write_itr() local
5524 if (adapter->hw.mac.type == e1000_82575) in igb_write_itr()
5546 static void igb_update_tx_dca(struct igb_adapter *adapter, in igb_update_tx_dca() argument
5550 struct e1000_hw *hw = &adapter->hw; in igb_update_tx_dca()
5567 static void igb_update_rx_dca(struct igb_adapter *adapter, in igb_update_rx_dca() argument
5571 struct e1000_hw *hw = &adapter->hw; in igb_update_rx_dca()
5572 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu); in igb_update_rx_dca()
5589 struct igb_adapter *adapter = q_vector->adapter; in igb_update_dca() local
5596 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu); in igb_update_dca()
5599 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu); in igb_update_dca()
5606 static void igb_setup_dca(struct igb_adapter *adapter) in igb_setup_dca() argument
5608 struct e1000_hw *hw = &adapter->hw; in igb_setup_dca()
5611 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED)) in igb_setup_dca()
5617 for (i = 0; i < adapter->num_q_vectors; i++) { in igb_setup_dca()
5618 adapter->q_vector[i]->cpu = -1; in igb_setup_dca()
5619 igb_update_dca(adapter->q_vector[i]); in igb_setup_dca()
5626 struct igb_adapter *adapter = netdev_priv(netdev); in __igb_notify_dca() local
5627 struct pci_dev *pdev = adapter->pdev; in __igb_notify_dca()
5628 struct e1000_hw *hw = &adapter->hw; in __igb_notify_dca()
5634 if (adapter->flags & IGB_FLAG_DCA_ENABLED) in __igb_notify_dca()
5637 adapter->flags |= IGB_FLAG_DCA_ENABLED; in __igb_notify_dca()
5639 igb_setup_dca(adapter); in __igb_notify_dca()
5644 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { in __igb_notify_dca()
5650 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; in __igb_notify_dca()
5672 static int igb_vf_configure(struct igb_adapter *adapter, int vf) in igb_vf_configure() argument
5677 igb_set_vf_mac(adapter, vf, mac_addr); in igb_vf_configure()
5680 adapter->vf_data[vf].spoofchk_enabled = true; in igb_vf_configure()
5686 static void igb_ping_all_vfs(struct igb_adapter *adapter) in igb_ping_all_vfs() argument
5688 struct e1000_hw *hw = &adapter->hw; in igb_ping_all_vfs()
5692 for (i = 0 ; i < adapter->vfs_allocated_count; i++) { in igb_ping_all_vfs()
5694 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS) in igb_ping_all_vfs()
5700 static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) in igb_set_vf_promisc() argument
5702 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_promisc()
5704 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_set_vf_promisc()
5739 static int igb_set_vf_multicasts(struct igb_adapter *adapter, in igb_set_vf_multicasts() argument
5744 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_set_vf_multicasts()
5762 igb_set_rx_mode(adapter->netdev); in igb_set_vf_multicasts()
5767 static void igb_restore_vf_multicasts(struct igb_adapter *adapter) in igb_restore_vf_multicasts() argument
5769 struct e1000_hw *hw = &adapter->hw; in igb_restore_vf_multicasts()
5773 for (i = 0; i < adapter->vfs_allocated_count; i++) { in igb_restore_vf_multicasts()
5778 vf_data = &adapter->vf_data[i]; in igb_restore_vf_multicasts()
5792 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) in igb_clear_vf_vfta() argument
5794 struct e1000_hw *hw = &adapter->hw; in igb_clear_vf_vfta()
5818 adapter->vf_data[vf].vlans_enabled = 0; in igb_clear_vf_vfta()
5821 static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) in igb_vlvf_set() argument
5823 struct e1000_hw *hw = &adapter->hw; in igb_vlvf_set()
5831 if (!adapter->vfs_allocated_count) in igb_vlvf_set()
5869 if (vf >= adapter->vfs_allocated_count) in igb_vlvf_set()
5872 if (!adapter->vf_data[vf].vlans_enabled) { in igb_vlvf_set()
5883 adapter->vf_data[vf].vlans_enabled++; in igb_vlvf_set()
5897 if (vf >= adapter->vfs_allocated_count) in igb_vlvf_set()
5900 adapter->vf_data[vf].vlans_enabled--; in igb_vlvf_set()
5901 if (!adapter->vf_data[vf].vlans_enabled) { in igb_vlvf_set()
5916 static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) in igb_set_vmvir() argument
5918 struct e1000_hw *hw = &adapter->hw; in igb_set_vmvir()
5930 struct igb_adapter *adapter = netdev_priv(netdev); in igb_ndo_set_vf_vlan() local
5932 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) in igb_ndo_set_vf_vlan()
5935 err = igb_vlvf_set(adapter, vlan, !!vlan, vf); in igb_ndo_set_vf_vlan()
5938 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); in igb_ndo_set_vf_vlan()
5939 igb_set_vmolr(adapter, vf, !vlan); in igb_ndo_set_vf_vlan()
5940 adapter->vf_data[vf].pf_vlan = vlan; in igb_ndo_set_vf_vlan()
5941 adapter->vf_data[vf].pf_qos = qos; in igb_ndo_set_vf_vlan()
5942 dev_info(&adapter->pdev->dev, in igb_ndo_set_vf_vlan()
5944 if (test_bit(__IGB_DOWN, &adapter->state)) { in igb_ndo_set_vf_vlan()
5945 dev_warn(&adapter->pdev->dev, in igb_ndo_set_vf_vlan()
5947 dev_warn(&adapter->pdev->dev, in igb_ndo_set_vf_vlan()
5951 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, in igb_ndo_set_vf_vlan()
5953 igb_set_vmvir(adapter, vlan, vf); in igb_ndo_set_vf_vlan()
5954 igb_set_vmolr(adapter, vf, true); in igb_ndo_set_vf_vlan()
5955 adapter->vf_data[vf].pf_vlan = 0; in igb_ndo_set_vf_vlan()
5956 adapter->vf_data[vf].pf_qos = 0; in igb_ndo_set_vf_vlan()
5962 static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid) in igb_find_vlvf_entry() argument
5964 struct e1000_hw *hw = &adapter->hw; in igb_find_vlvf_entry()
5982 static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) in igb_set_vf_vlan() argument
5984 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_vlan()
5992 if (add && (adapter->netdev->flags & IFF_PROMISC)) in igb_set_vf_vlan()
5993 err = igb_vlvf_set(adapter, vid, add, in igb_set_vf_vlan()
5994 adapter->vfs_allocated_count); in igb_set_vf_vlan()
5998 err = igb_vlvf_set(adapter, vid, add, vf); in igb_set_vf_vlan()
6006 if (!add && (adapter->netdev->flags & IFF_PROMISC)) { in igb_set_vf_vlan()
6008 int regndx = igb_find_vlvf_entry(adapter, vid); in igb_set_vf_vlan()
6017 adapter->vfs_allocated_count); in igb_set_vf_vlan()
6023 !test_bit(vid, adapter->active_vlans) && in igb_set_vf_vlan()
6025 igb_vlvf_set(adapter, vid, add, in igb_set_vf_vlan()
6026 adapter->vfs_allocated_count); in igb_set_vf_vlan()
6033 static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) in igb_vf_reset() argument
6036 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC; in igb_vf_reset()
6037 adapter->vf_data[vf].last_nack = jiffies; in igb_vf_reset()
6040 igb_set_vmolr(adapter, vf, true); in igb_vf_reset()
6043 igb_clear_vf_vfta(adapter, vf); in igb_vf_reset()
6044 if (adapter->vf_data[vf].pf_vlan) in igb_vf_reset()
6045 igb_ndo_set_vf_vlan(adapter->netdev, vf, in igb_vf_reset()
6046 adapter->vf_data[vf].pf_vlan, in igb_vf_reset()
6047 adapter->vf_data[vf].pf_qos); in igb_vf_reset()
6049 igb_clear_vf_vfta(adapter, vf); in igb_vf_reset()
6052 adapter->vf_data[vf].num_vf_mc_hashes = 0; in igb_vf_reset()
6055 igb_set_rx_mode(adapter->netdev); in igb_vf_reset()
6058 static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) in igb_vf_reset_event() argument
6060 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; in igb_vf_reset_event()
6063 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) in igb_vf_reset_event()
6067 igb_vf_reset(adapter, vf); in igb_vf_reset_event()
6070 static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) in igb_vf_reset_msg() argument
6072 struct e1000_hw *hw = &adapter->hw; in igb_vf_reset_msg()
6073 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; in igb_vf_reset_msg()
6079 igb_vf_reset(adapter, vf); in igb_vf_reset_msg()
6082 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf); in igb_vf_reset_msg()
6090 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; in igb_vf_reset_msg()
6102 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) in igb_set_vf_mac_addr() argument
6111 err = igb_set_vf_mac(adapter, vf, addr); in igb_set_vf_mac_addr()
6116 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) in igb_rcv_ack_from_vf() argument
6118 struct e1000_hw *hw = &adapter->hw; in igb_rcv_ack_from_vf()
6119 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_rcv_ack_from_vf()
6130 static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) in igb_rcv_msg_from_vf() argument
6132 struct pci_dev *pdev = adapter->pdev; in igb_rcv_msg_from_vf()
6134 struct e1000_hw *hw = &adapter->hw; in igb_rcv_msg_from_vf()
6135 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; in igb_rcv_msg_from_vf()
6157 igb_vf_reset_msg(adapter, vf); in igb_rcv_msg_from_vf()
6172 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); in igb_rcv_msg_from_vf()
6179 retval = igb_set_vf_promisc(adapter, msgbuf, vf); in igb_rcv_msg_from_vf()
6182 retval = igb_set_vf_multicasts(adapter, msgbuf, vf); in igb_rcv_msg_from_vf()
6185 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); in igb_rcv_msg_from_vf()
6194 retval = igb_set_vf_vlan(adapter, msgbuf, vf); in igb_rcv_msg_from_vf()
6213 static void igb_msg_task(struct igb_adapter *adapter) in igb_msg_task() argument
6215 struct e1000_hw *hw = &adapter->hw; in igb_msg_task()
6218 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { in igb_msg_task()
6221 igb_vf_reset_event(adapter, vf); in igb_msg_task()
6225 igb_rcv_msg_from_vf(adapter, vf); in igb_msg_task()
6229 igb_rcv_ack_from_vf(adapter, vf); in igb_msg_task()
6243 static void igb_set_uta(struct igb_adapter *adapter) in igb_set_uta() argument
6245 struct e1000_hw *hw = &adapter->hw; in igb_set_uta()
6253 if (!adapter->vfs_allocated_count) in igb_set_uta()
6267 struct igb_adapter *adapter = data; in igb_intr_msi() local
6268 struct igb_q_vector *q_vector = adapter->q_vector[0]; in igb_intr_msi()
6269 struct e1000_hw *hw = &adapter->hw; in igb_intr_msi()
6276 schedule_work(&adapter->reset_task); in igb_intr_msi()
6280 adapter->stats.doosync++; in igb_intr_msi()
6285 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_intr_msi()
6286 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igb_intr_msi()
6290 igb_tsync_interrupt(adapter); in igb_intr_msi()
6304 struct igb_adapter *adapter = data; in igb_intr() local
6305 struct igb_q_vector *q_vector = adapter->q_vector[0]; in igb_intr()
6306 struct e1000_hw *hw = &adapter->hw; in igb_intr()
6321 schedule_work(&adapter->reset_task); in igb_intr()
6325 adapter->stats.doosync++; in igb_intr()
6331 if (!test_bit(__IGB_DOWN, &adapter->state)) in igb_intr()
6332 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igb_intr()
6336 igb_tsync_interrupt(adapter); in igb_intr()
6345 struct igb_adapter *adapter = q_vector->adapter; in igb_ring_irq_enable() local
6346 struct e1000_hw *hw = &adapter->hw; in igb_ring_irq_enable()
6348 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || in igb_ring_irq_enable()
6349 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { in igb_ring_irq_enable()
6350 if ((adapter->num_q_vectors == 1) && !adapter->vf_data) in igb_ring_irq_enable()
6356 if (!test_bit(__IGB_DOWN, &adapter->state)) { in igb_ring_irq_enable()
6357 if (adapter->flags & IGB_FLAG_HAS_MSIX) in igb_ring_irq_enable()
6360 igb_irq_enable(adapter); in igb_ring_irq_enable()
6377 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) in igb_poll()
6405 struct igb_adapter *adapter = q_vector->adapter; in igb_clean_tx_irq() local
6413 if (test_bit(__IGB_DOWN, &adapter->state)) in igb_clean_tx_irq()
6504 struct e1000_hw *hw = &adapter->hw; in igb_clean_tx_irq()
6512 (adapter->tx_timeout_factor * HZ)) && in igb_clean_tx_irq()
6555 !(test_bit(__IGB_DOWN, &adapter->state))) { in igb_clean_tx_irq()
7142 struct igb_adapter *adapter = netdev_priv(netdev); in igb_mii_ioctl() local
7145 if (adapter->hw.phy.media_type != e1000_media_type_copper) in igb_mii_ioctl()
7150 data->phy_id = adapter->hw.phy.addr; in igb_mii_ioctl()
7153 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, in igb_mii_ioctl()
7188 struct igb_adapter *adapter = hw->back; in igb_read_pci_cfg() local
7190 pci_read_config_word(adapter->pdev, reg, value); in igb_read_pci_cfg()
7195 struct igb_adapter *adapter = hw->back; in igb_write_pci_cfg() local
7197 pci_write_config_word(adapter->pdev, reg, *value); in igb_write_pci_cfg()
7202 struct igb_adapter *adapter = hw->back; in igb_read_pcie_cap_reg() local
7204 if (pcie_capability_read_word(adapter->pdev, reg, value)) in igb_read_pcie_cap_reg()
7212 struct igb_adapter *adapter = hw->back; in igb_write_pcie_cap_reg() local
7214 if (pcie_capability_write_word(adapter->pdev, reg, *value)) in igb_write_pcie_cap_reg()
7222 struct igb_adapter *adapter = netdev_priv(netdev); in igb_vlan_mode() local
7223 struct e1000_hw *hw = &adapter->hw; in igb_vlan_mode()
7244 igb_rlpml_set(adapter); in igb_vlan_mode()
7250 struct igb_adapter *adapter = netdev_priv(netdev); in igb_vlan_rx_add_vid() local
7251 struct e1000_hw *hw = &adapter->hw; in igb_vlan_rx_add_vid()
7252 int pf_id = adapter->vfs_allocated_count; in igb_vlan_rx_add_vid()
7255 igb_vlvf_set(adapter, vid, true, pf_id); in igb_vlan_rx_add_vid()
7260 set_bit(vid, adapter->active_vlans); in igb_vlan_rx_add_vid()
7268 struct igb_adapter *adapter = netdev_priv(netdev); in igb_vlan_rx_kill_vid() local
7269 struct e1000_hw *hw = &adapter->hw; in igb_vlan_rx_kill_vid()
7270 int pf_id = adapter->vfs_allocated_count; in igb_vlan_rx_kill_vid()
7274 err = igb_vlvf_set(adapter, vid, false, pf_id); in igb_vlan_rx_kill_vid()
7280 clear_bit(vid, adapter->active_vlans); in igb_vlan_rx_kill_vid()
7285 static void igb_restore_vlan(struct igb_adapter *adapter) in igb_restore_vlan() argument
7289 igb_vlan_mode(adapter->netdev, adapter->netdev->features); in igb_restore_vlan()
7291 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in igb_restore_vlan()
7292 igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); in igb_restore_vlan()
7295 int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) in igb_set_spd_dplx() argument
7297 struct pci_dev *pdev = adapter->pdev; in igb_set_spd_dplx()
7298 struct e1000_mac_info *mac = &adapter->hw.mac; in igb_set_spd_dplx()
7311 if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { in igb_set_spd_dplx()
7337 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; in igb_set_spd_dplx()
7345 adapter->hw.phy.mdix = AUTO_ALL_MODES; in igb_set_spd_dplx()
7358 struct igb_adapter *adapter = netdev_priv(netdev); in __igb_shutdown() local
7359 struct e1000_hw *hw = &adapter->hw; in __igb_shutdown()
7361 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; in __igb_shutdown()
7371 igb_clear_interrupt_scheme(adapter); in __igb_shutdown()
7384 igb_setup_rctl(adapter); in __igb_shutdown()
7412 *enable_wake = wufc || adapter->en_mng_pt; in __igb_shutdown()
7414 igb_power_down_link(adapter); in __igb_shutdown()
7416 igb_power_up_link(adapter); in __igb_shutdown()
7421 igb_release_hw_control(adapter); in __igb_shutdown()
7455 struct igb_adapter *adapter = netdev_priv(netdev); in igb_resume() local
7456 struct e1000_hw *hw = &adapter->hw; in igb_resume()
7476 if (igb_init_interrupt_scheme(adapter, true)) { in igb_resume()
7481 igb_reset(adapter); in igb_resume()
7486 igb_get_hw_control(adapter); in igb_resume()
7506 struct igb_adapter *adapter = netdev_priv(netdev); in igb_runtime_idle() local
7508 if (!igb_has_link(adapter)) in igb_runtime_idle()
7556 struct igb_adapter *adapter = netdev_priv(netdev); in igb_sriov_reinit() local
7557 struct pci_dev *pdev = adapter->pdev; in igb_sriov_reinit()
7564 igb_reset(adapter); in igb_sriov_reinit()
7566 igb_clear_interrupt_scheme(adapter); in igb_sriov_reinit()
7568 igb_init_queue_configuration(adapter); in igb_sriov_reinit()
7570 if (igb_init_interrupt_scheme(adapter, true)) { in igb_sriov_reinit()
7627 struct igb_adapter *adapter = netdev_priv(netdev); in igb_netpoll() local
7628 struct e1000_hw *hw = &adapter->hw; in igb_netpoll()
7632 for (i = 0; i < adapter->num_q_vectors; i++) { in igb_netpoll()
7633 q_vector = adapter->q_vector[i]; in igb_netpoll()
7634 if (adapter->flags & IGB_FLAG_HAS_MSIX) in igb_netpoll()
7637 igb_irq_disable(adapter); in igb_netpoll()
7655 struct igb_adapter *adapter = netdev_priv(netdev); in igb_io_error_detected() local
7663 igb_down(adapter); in igb_io_error_detected()
7680 struct igb_adapter *adapter = netdev_priv(netdev); in igb_io_slot_reset() local
7681 struct e1000_hw *hw = &adapter->hw; in igb_io_slot_reset()
7697 igb_reset(adapter); in igb_io_slot_reset()
7724 struct igb_adapter *adapter = netdev_priv(netdev); in igb_io_resume() local
7727 if (igb_up(adapter)) { in igb_io_resume()
7738 igb_get_hw_control(adapter); in igb_io_resume()
7741 static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, in igb_rar_set_qsel() argument
7745 struct e1000_hw *hw = &adapter->hw; in igb_rar_set_qsel()
7768 static int igb_set_vf_mac(struct igb_adapter *adapter, in igb_set_vf_mac() argument
7771 struct e1000_hw *hw = &adapter->hw; in igb_set_vf_mac()
7777 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); in igb_set_vf_mac()
7779 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf); in igb_set_vf_mac()
7786 struct igb_adapter *adapter = netdev_priv(netdev); in igb_ndo_set_vf_mac() local
7787 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count)) in igb_ndo_set_vf_mac()
7789 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; in igb_ndo_set_vf_mac()
7790 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); in igb_ndo_set_vf_mac()
7791 dev_info(&adapter->pdev->dev, in igb_ndo_set_vf_mac()
7793 if (test_bit(__IGB_DOWN, &adapter->state)) { in igb_ndo_set_vf_mac()
7794 dev_warn(&adapter->pdev->dev, in igb_ndo_set_vf_mac()
7796 dev_warn(&adapter->pdev->dev, in igb_ndo_set_vf_mac()
7799 return igb_set_vf_mac(adapter, vf, mac); in igb_ndo_set_vf_mac()
7843 static void igb_check_vf_rate_limit(struct igb_adapter *adapter) in igb_check_vf_rate_limit() argument
7849 if ((adapter->vf_rate_link_speed == 0) || in igb_check_vf_rate_limit()
7850 (adapter->hw.mac.type != e1000_82576)) in igb_check_vf_rate_limit()
7853 actual_link_speed = igb_link_mbps(adapter->link_speed); in igb_check_vf_rate_limit()
7854 if (actual_link_speed != adapter->vf_rate_link_speed) { in igb_check_vf_rate_limit()
7856 adapter->vf_rate_link_speed = 0; in igb_check_vf_rate_limit()
7857 dev_info(&adapter->pdev->dev, in igb_check_vf_rate_limit()
7861 for (i = 0; i < adapter->vfs_allocated_count; i++) { in igb_check_vf_rate_limit()
7863 adapter->vf_data[i].tx_rate = 0; in igb_check_vf_rate_limit()
7865 igb_set_vf_rate_limit(&adapter->hw, i, in igb_check_vf_rate_limit()
7866 adapter->vf_data[i].tx_rate, in igb_check_vf_rate_limit()
7874 struct igb_adapter *adapter = netdev_priv(netdev); in igb_ndo_set_vf_bw() local
7875 struct e1000_hw *hw = &adapter->hw; in igb_ndo_set_vf_bw()
7884 actual_link_speed = igb_link_mbps(adapter->link_speed); in igb_ndo_set_vf_bw()
7885 if ((vf >= adapter->vfs_allocated_count) || in igb_ndo_set_vf_bw()
7891 adapter->vf_rate_link_speed = actual_link_speed; in igb_ndo_set_vf_bw()
7892 adapter->vf_data[vf].tx_rate = (u16)max_tx_rate; in igb_ndo_set_vf_bw()
7901 struct igb_adapter *adapter = netdev_priv(netdev); in igb_ndo_set_vf_spoofchk() local
7902 struct e1000_hw *hw = &adapter->hw; in igb_ndo_set_vf_spoofchk()
7905 if (!adapter->vfs_allocated_count) in igb_ndo_set_vf_spoofchk()
7908 if (vf >= adapter->vfs_allocated_count) in igb_ndo_set_vf_spoofchk()
7921 adapter->vf_data[vf].spoofchk_enabled = setting; in igb_ndo_set_vf_spoofchk()
7928 struct igb_adapter *adapter = netdev_priv(netdev); in igb_ndo_get_vf_config() local
7929 if (vf >= adapter->vfs_allocated_count) in igb_ndo_get_vf_config()
7932 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); in igb_ndo_get_vf_config()
7933 ivi->max_tx_rate = adapter->vf_data[vf].tx_rate; in igb_ndo_get_vf_config()
7935 ivi->vlan = adapter->vf_data[vf].pf_vlan; in igb_ndo_get_vf_config()
7936 ivi->qos = adapter->vf_data[vf].pf_qos; in igb_ndo_get_vf_config()
7937 ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled; in igb_ndo_get_vf_config()
7941 static void igb_vmm_control(struct igb_adapter *adapter) in igb_vmm_control() argument
7943 struct e1000_hw *hw = &adapter->hw; in igb_vmm_control()
7971 if (adapter->vfs_allocated_count) { in igb_vmm_control()
7975 adapter->vfs_allocated_count); in igb_vmm_control()
7982 static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) in igb_init_dmac() argument
7984 struct e1000_hw *hw = &adapter->hw; in igb_init_dmac()
7989 if (adapter->flags & IGB_FLAG_DMAC) { in igb_init_dmac()
7999 hwm = 64 * pba - adapter->max_frame_size / 16; in igb_init_dmac()
8011 dmac_thr = pba - adapter->max_frame_size / 512; in igb_init_dmac()
8044 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); in igb_init_dmac()
8074 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); in igb_read_i2c_byte() local
8075 struct i2c_client *this_client = adapter->i2c_client; in igb_read_i2c_byte()
8111 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); in igb_write_i2c_byte() local
8112 struct i2c_client *this_client = adapter->i2c_client; in igb_write_i2c_byte()
8131 int igb_reinit_queues(struct igb_adapter *adapter) in igb_reinit_queues() argument
8133 struct net_device *netdev = adapter->netdev; in igb_reinit_queues()
8134 struct pci_dev *pdev = adapter->pdev; in igb_reinit_queues()
8140 igb_reset_interrupt_capability(adapter); in igb_reinit_queues()
8142 if (igb_init_interrupt_scheme(adapter, true)) { in igb_reinit_queues()