Lines Matching refs:bp
133 static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) in bnxt_tx_avail() argument
138 return bp->tx_ring_size - in bnxt_tx_avail()
139 ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); in bnxt_tx_avail()
166 struct bnxt *bp = netdev_priv(dev); in bnxt_start_xmit() local
175 struct pci_dev *pdev = bp->pdev; in bnxt_start_xmit()
181 if (unlikely(i >= bp->tx_nr_rings)) { in bnxt_start_xmit()
186 bnapi = bp->bnapi[i]; in bnxt_start_xmit()
191 free_size = bnxt_tx_avail(bp, txr); in bnxt_start_xmit()
221 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { in bnxt_start_xmit()
380 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) { in bnxt_start_xmit()
389 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) in bnxt_start_xmit()
418 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) in bnxt_tx_int() argument
422 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index); in bnxt_tx_int()
424 struct pci_dev *pdev = bp->pdev; in bnxt_tx_int()
475 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) { in bnxt_tx_int()
478 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh && in bnxt_tx_int()
485 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, in __bnxt_alloc_rx_data() argument
489 struct pci_dev *pdev = bp->pdev; in __bnxt_alloc_rx_data()
491 data = kmalloc(bp->rx_buf_size, gfp); in __bnxt_alloc_rx_data()
496 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); in __bnxt_alloc_rx_data()
505 static inline int bnxt_alloc_rx_data(struct bnxt *bp, in bnxt_alloc_rx_data() argument
514 data = __bnxt_alloc_rx_data(bp, &mapping, gfp); in bnxt_alloc_rx_data()
557 static inline int bnxt_alloc_rx_page(struct bnxt *bp, in bnxt_alloc_rx_page() argument
564 struct pci_dev *pdev = bp->pdev; in bnxt_alloc_rx_page()
597 struct bnxt *bp = bnapi->bp; in bnxt_reuse_rx_agg_bufs() local
645 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, in bnxt_rx_skb() argument
653 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); in bnxt_rx_skb()
660 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, in bnxt_rx_skb()
672 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, in bnxt_rx_pages() argument
676 struct pci_dev *pdev = bp->pdev; in bnxt_rx_pages()
707 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { in bnxt_rx_pages()
741 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, in bnxt_agg_bufs_valid() argument
758 struct bnxt *bp = bnapi->bp; in bnxt_copy_skb() local
759 struct pci_dev *pdev = bp->pdev; in bnxt_copy_skb()
767 bp->rx_copy_thresh, PCI_DMA_FROMDEVICE); in bnxt_copy_skb()
772 bp->rx_copy_thresh, in bnxt_copy_skb()
779 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, in bnxt_tpa_start() argument
825 if (netif_msg_rx_err(bp)) in bnxt_tpa_start()
826 netdev_warn(bp->dev, "TPA packet without valid hash\n"); in bnxt_tpa_start()
840 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi, in bnxt_abort_tpa() argument
924 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, in bnxt_tpa_end() argument
951 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) in bnxt_tpa_end()
959 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); in bnxt_tpa_end()
960 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", in bnxt_tpa_end()
965 if (len <= bp->rx_copy_thresh) { in bnxt_tpa_end()
968 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); in bnxt_tpa_end()
975 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); in bnxt_tpa_end()
977 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); in bnxt_tpa_end()
985 dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size, in bnxt_tpa_end()
990 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs); in bnxt_tpa_end()
998 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); in bnxt_tpa_end()
1004 skb->protocol = eth_type_trans(skb, bp->dev); in bnxt_tpa_end()
1044 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, in bnxt_rx_pkt() argument
1049 struct net_device *dev = bp->dev; in bnxt_rx_pkt()
1077 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp, in bnxt_rx_pkt()
1083 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons, in bnxt_rx_pkt()
1113 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) in bnxt_rx_pkt()
1133 if (len <= bp->rx_copy_thresh) { in bnxt_rx_pkt()
1141 skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len); in bnxt_rx_pkt()
1149 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs); in bnxt_rx_pkt()
1211 static int bnxt_async_event_process(struct bnxt *bp, in bnxt_async_event_process() argument
1219 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); in bnxt_async_event_process()
1220 schedule_work(&bp->sp_task); in bnxt_async_event_process()
1223 netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n", in bnxt_async_event_process()
1230 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) in bnxt_hwrm_handler() argument
1240 if (seq_id == bp->hwrm_intr_seq_id) in bnxt_hwrm_handler()
1241 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID; in bnxt_hwrm_handler()
1243 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); in bnxt_hwrm_handler()
1249 if ((vf_id < bp->pf.first_vf_id) || in bnxt_hwrm_handler()
1250 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { in bnxt_hwrm_handler()
1251 netdev_err(bp->dev, "Msg contains invalid VF id %x\n", in bnxt_hwrm_handler()
1256 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); in bnxt_hwrm_handler()
1257 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); in bnxt_hwrm_handler()
1258 schedule_work(&bp->sp_task); in bnxt_hwrm_handler()
1262 bnxt_async_event_process(bp, in bnxt_hwrm_handler()
1275 struct bnxt *bp = bnapi->bp; in bnxt_msix() local
1284 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) in bnxt_has_work() argument
1298 struct bnxt *bp = bnapi->bp; in bnxt_inta() local
1305 if (!bnxt_has_work(bp, cpr)) { in bnxt_inta()
1306 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS); in bnxt_inta()
1316 if (unlikely(atomic_read(&bp->intr_sem) != 0)) in bnxt_inta()
1323 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) in bnxt_poll_work() argument
1346 if (unlikely(tx_pkts > bp->tx_wake_thresh)) in bnxt_poll_work()
1349 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event); in bnxt_poll_work()
1361 bnxt_hwrm_handler(bp, txcmp); in bnxt_poll_work()
1377 bnxt_tx_int(bp, bnapi, tx_pkts); in bnxt_poll_work()
1397 struct bnxt *bp = bnapi->bp; in bnxt_poll() local
1405 work_done += bnxt_poll_work(bp, bnapi, budget - work_done); in bnxt_poll()
1410 if (!bnxt_has_work(bp, cpr)) { in bnxt_poll()
1425 struct bnxt *bp = bnapi->bp; in bnxt_busy_poll() local
1429 if (atomic_read(&bp->intr_sem) != 0) in bnxt_busy_poll()
1435 rx_work = bnxt_poll_work(bp, bnapi, budget); in bnxt_busy_poll()
1444 static void bnxt_free_tx_skbs(struct bnxt *bp) in bnxt_free_tx_skbs() argument
1447 struct pci_dev *pdev = bp->pdev; in bnxt_free_tx_skbs()
1449 if (!bp->bnapi) in bnxt_free_tx_skbs()
1452 max_idx = bp->tx_nr_pages * TX_DESC_CNT; in bnxt_free_tx_skbs()
1453 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_free_tx_skbs()
1454 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_free_tx_skbs()
1498 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); in bnxt_free_tx_skbs()
1502 static void bnxt_free_rx_skbs(struct bnxt *bp) in bnxt_free_rx_skbs() argument
1505 struct pci_dev *pdev = bp->pdev; in bnxt_free_rx_skbs()
1507 if (!bp->bnapi) in bnxt_free_rx_skbs()
1510 max_idx = bp->rx_nr_pages * RX_DESC_CNT; in bnxt_free_rx_skbs()
1511 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; in bnxt_free_rx_skbs()
1512 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_free_rx_skbs()
1513 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_free_rx_skbs()
1534 bp->rx_buf_use_size, in bnxt_free_rx_skbs()
1552 bp->rx_buf_use_size, in bnxt_free_rx_skbs()
1580 static void bnxt_free_skbs(struct bnxt *bp) in bnxt_free_skbs() argument
1582 bnxt_free_tx_skbs(bp); in bnxt_free_skbs()
1583 bnxt_free_rx_skbs(bp); in bnxt_free_skbs()
1586 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) in bnxt_free_ring() argument
1588 struct pci_dev *pdev = bp->pdev; in bnxt_free_ring()
1611 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring) in bnxt_alloc_ring() argument
1614 struct pci_dev *pdev = bp->pdev; in bnxt_alloc_ring()
1645 static void bnxt_free_rx_rings(struct bnxt *bp) in bnxt_free_rx_rings() argument
1649 if (!bp->bnapi) in bnxt_free_rx_rings()
1652 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_free_rx_rings()
1653 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_free_rx_rings()
1669 bnxt_free_ring(bp, ring); in bnxt_free_rx_rings()
1672 bnxt_free_ring(bp, ring); in bnxt_free_rx_rings()
1676 static int bnxt_alloc_rx_rings(struct bnxt *bp) in bnxt_alloc_rx_rings() argument
1680 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_alloc_rx_rings()
1683 if (bp->flags & BNXT_FLAG_TPA) in bnxt_alloc_rx_rings()
1686 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_rx_rings()
1687 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_alloc_rx_rings()
1697 rc = bnxt_alloc_ring(bp, ring); in bnxt_alloc_rx_rings()
1705 rc = bnxt_alloc_ring(bp, ring); in bnxt_alloc_rx_rings()
1709 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; in bnxt_alloc_rx_rings()
1727 static void bnxt_free_tx_rings(struct bnxt *bp) in bnxt_free_tx_rings() argument
1730 struct pci_dev *pdev = bp->pdev; in bnxt_free_tx_rings()
1732 if (!bp->bnapi) in bnxt_free_tx_rings()
1735 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_free_tx_rings()
1736 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_free_tx_rings()
1746 dma_free_coherent(&pdev->dev, bp->tx_push_size, in bnxt_free_tx_rings()
1753 bnxt_free_ring(bp, ring); in bnxt_free_tx_rings()
1757 static int bnxt_alloc_tx_rings(struct bnxt *bp) in bnxt_alloc_tx_rings() argument
1760 struct pci_dev *pdev = bp->pdev; in bnxt_alloc_tx_rings()
1762 bp->tx_push_size = 0; in bnxt_alloc_tx_rings()
1763 if (bp->tx_push_thresh) { in bnxt_alloc_tx_rings()
1767 bp->tx_push_thresh); in bnxt_alloc_tx_rings()
1771 bp->tx_push_thresh = 0; in bnxt_alloc_tx_rings()
1774 bp->tx_push_size = push_size; in bnxt_alloc_tx_rings()
1777 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { in bnxt_alloc_tx_rings()
1778 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_alloc_tx_rings()
1788 rc = bnxt_alloc_ring(bp, ring); in bnxt_alloc_tx_rings()
1792 if (bp->tx_push_size) { in bnxt_alloc_tx_rings()
1800 bp->tx_push_size, in bnxt_alloc_tx_rings()
1815 ring->queue_id = bp->q_info[j].queue_id; in bnxt_alloc_tx_rings()
1816 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) in bnxt_alloc_tx_rings()
1822 static void bnxt_free_cp_rings(struct bnxt *bp) in bnxt_free_cp_rings() argument
1826 if (!bp->bnapi) in bnxt_free_cp_rings()
1829 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_cp_rings()
1830 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_free_cp_rings()
1840 bnxt_free_ring(bp, ring); in bnxt_free_cp_rings()
1844 static int bnxt_alloc_cp_rings(struct bnxt *bp) in bnxt_alloc_cp_rings() argument
1848 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_alloc_cp_rings()
1849 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_alloc_cp_rings()
1859 rc = bnxt_alloc_ring(bp, ring); in bnxt_alloc_cp_rings()
1866 static void bnxt_init_ring_struct(struct bnxt *bp) in bnxt_init_ring_struct() argument
1870 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_init_ring_struct()
1871 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_init_ring_struct()
1882 ring->nr_pages = bp->cp_nr_pages; in bnxt_init_ring_struct()
1890 ring->nr_pages = bp->rx_nr_pages; in bnxt_init_ring_struct()
1894 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; in bnxt_init_ring_struct()
1898 ring->nr_pages = bp->rx_agg_nr_pages; in bnxt_init_ring_struct()
1902 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; in bnxt_init_ring_struct()
1907 ring->nr_pages = bp->tx_nr_pages; in bnxt_init_ring_struct()
1911 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; in bnxt_init_ring_struct()
1938 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) in bnxt_init_one_rx_ring() argument
1940 struct net_device *dev = bp->dev; in bnxt_init_one_rx_ring()
1941 struct bnxt_napi *bnapi = bp->bnapi[ring_nr]; in bnxt_init_one_rx_ring()
1950 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | in bnxt_init_one_rx_ring()
1961 for (i = 0; i < bp->rx_ring_size; i++) { in bnxt_init_one_rx_ring()
1962 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) { in bnxt_init_one_rx_ring()
1964 ring_nr, i, bp->rx_ring_size); in bnxt_init_one_rx_ring()
1972 if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) in bnxt_init_one_rx_ring()
1983 for (i = 0; i < bp->rx_agg_ring_size; i++) { in bnxt_init_one_rx_ring()
1984 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) { in bnxt_init_one_rx_ring()
1986 ring_nr, i, bp->rx_ring_size); in bnxt_init_one_rx_ring()
1994 if (bp->flags & BNXT_FLAG_TPA) { in bnxt_init_one_rx_ring()
2000 data = __bnxt_alloc_rx_data(bp, &mapping, in bnxt_init_one_rx_ring()
2009 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n"); in bnxt_init_one_rx_ring()
2017 static int bnxt_init_rx_rings(struct bnxt *bp) in bnxt_init_rx_rings() argument
2021 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_init_rx_rings()
2022 rc = bnxt_init_one_rx_ring(bp, i); in bnxt_init_rx_rings()
2030 static int bnxt_init_tx_rings(struct bnxt *bp) in bnxt_init_tx_rings() argument
2034 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, in bnxt_init_tx_rings()
2037 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_init_tx_rings()
2038 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_init_tx_rings()
2048 static void bnxt_free_ring_grps(struct bnxt *bp) in bnxt_free_ring_grps() argument
2050 kfree(bp->grp_info); in bnxt_free_ring_grps()
2051 bp->grp_info = NULL; in bnxt_free_ring_grps()
2054 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) in bnxt_init_ring_grps() argument
2059 bp->grp_info = kcalloc(bp->cp_nr_rings, in bnxt_init_ring_grps()
2062 if (!bp->grp_info) in bnxt_init_ring_grps()
2065 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_init_ring_grps()
2067 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; in bnxt_init_ring_grps()
2068 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; in bnxt_init_ring_grps()
2069 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; in bnxt_init_ring_grps()
2070 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; in bnxt_init_ring_grps()
2071 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; in bnxt_init_ring_grps()
2076 static void bnxt_free_vnics(struct bnxt *bp) in bnxt_free_vnics() argument
2078 kfree(bp->vnic_info); in bnxt_free_vnics()
2079 bp->vnic_info = NULL; in bnxt_free_vnics()
2080 bp->nr_vnics = 0; in bnxt_free_vnics()
2083 static int bnxt_alloc_vnics(struct bnxt *bp) in bnxt_alloc_vnics() argument
2088 if (bp->flags & BNXT_FLAG_RFS) in bnxt_alloc_vnics()
2089 num_vnics += bp->rx_nr_rings; in bnxt_alloc_vnics()
2092 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), in bnxt_alloc_vnics()
2094 if (!bp->vnic_info) in bnxt_alloc_vnics()
2097 bp->nr_vnics = num_vnics; in bnxt_alloc_vnics()
2101 static void bnxt_init_vnics(struct bnxt *bp) in bnxt_init_vnics() argument
2105 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_init_vnics()
2106 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; in bnxt_init_vnics()
2112 if (bp->vnic_info[i].rss_hash_key) { in bnxt_init_vnics()
2118 bp->vnic_info[0].rss_hash_key, in bnxt_init_vnics()
2141 static void bnxt_set_tpa_flags(struct bnxt *bp) in bnxt_set_tpa_flags() argument
2143 bp->flags &= ~BNXT_FLAG_TPA; in bnxt_set_tpa_flags()
2144 if (bp->dev->features & NETIF_F_LRO) in bnxt_set_tpa_flags()
2145 bp->flags |= BNXT_FLAG_LRO; in bnxt_set_tpa_flags()
2146 if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0)) in bnxt_set_tpa_flags()
2147 bp->flags |= BNXT_FLAG_GRO; in bnxt_set_tpa_flags()
2153 void bnxt_set_ring_params(struct bnxt *bp) in bnxt_set_ring_params() argument
2159 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8); in bnxt_set_ring_params()
2164 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; in bnxt_set_ring_params()
2165 ring_size = bp->rx_ring_size; in bnxt_set_ring_params()
2166 bp->rx_agg_ring_size = 0; in bnxt_set_ring_params()
2167 bp->rx_agg_nr_pages = 0; in bnxt_set_ring_params()
2169 if (bp->flags & BNXT_FLAG_TPA) in bnxt_set_ring_params()
2172 bp->flags &= ~BNXT_FLAG_JUMBO; in bnxt_set_ring_params()
2176 bp->flags |= BNXT_FLAG_JUMBO; in bnxt_set_ring_params()
2177 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; in bnxt_set_ring_params()
2184 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, in bnxt_set_ring_params()
2186 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { in bnxt_set_ring_params()
2189 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; in bnxt_set_ring_params()
2191 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", in bnxt_set_ring_params()
2194 bp->rx_agg_ring_size = agg_ring_size; in bnxt_set_ring_params()
2195 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; in bnxt_set_ring_params()
2201 bp->rx_buf_use_size = rx_size; in bnxt_set_ring_params()
2202 bp->rx_buf_size = rx_space; in bnxt_set_ring_params()
2204 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); in bnxt_set_ring_params()
2205 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; in bnxt_set_ring_params()
2207 ring_size = bp->tx_ring_size; in bnxt_set_ring_params()
2208 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); in bnxt_set_ring_params()
2209 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; in bnxt_set_ring_params()
2211 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size; in bnxt_set_ring_params()
2212 bp->cp_ring_size = ring_size; in bnxt_set_ring_params()
2214 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); in bnxt_set_ring_params()
2215 if (bp->cp_nr_pages > MAX_CP_PAGES) { in bnxt_set_ring_params()
2216 bp->cp_nr_pages = MAX_CP_PAGES; in bnxt_set_ring_params()
2217 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1; in bnxt_set_ring_params()
2218 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n", in bnxt_set_ring_params()
2219 ring_size, bp->cp_ring_size); in bnxt_set_ring_params()
2221 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; in bnxt_set_ring_params()
2222 bp->cp_ring_mask = bp->cp_bit - 1; in bnxt_set_ring_params()
2225 static void bnxt_free_vnic_attributes(struct bnxt *bp) in bnxt_free_vnic_attributes() argument
2229 struct pci_dev *pdev = bp->pdev; in bnxt_free_vnic_attributes()
2231 if (!bp->vnic_info) in bnxt_free_vnic_attributes()
2234 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_free_vnic_attributes()
2235 vnic = &bp->vnic_info[i]; in bnxt_free_vnic_attributes()
2261 static int bnxt_alloc_vnic_attributes(struct bnxt *bp) in bnxt_alloc_vnic_attributes() argument
2265 struct pci_dev *pdev = bp->pdev; in bnxt_alloc_vnic_attributes()
2268 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_alloc_vnic_attributes()
2269 vnic = &bp->vnic_info[i]; in bnxt_alloc_vnic_attributes()
2297 max_rings = bp->rx_nr_rings; in bnxt_alloc_vnic_attributes()
2327 static void bnxt_free_hwrm_resources(struct bnxt *bp) in bnxt_free_hwrm_resources() argument
2329 struct pci_dev *pdev = bp->pdev; in bnxt_free_hwrm_resources()
2331 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, in bnxt_free_hwrm_resources()
2332 bp->hwrm_cmd_resp_dma_addr); in bnxt_free_hwrm_resources()
2334 bp->hwrm_cmd_resp_addr = NULL; in bnxt_free_hwrm_resources()
2335 if (bp->hwrm_dbg_resp_addr) { in bnxt_free_hwrm_resources()
2337 bp->hwrm_dbg_resp_addr, in bnxt_free_hwrm_resources()
2338 bp->hwrm_dbg_resp_dma_addr); in bnxt_free_hwrm_resources()
2340 bp->hwrm_dbg_resp_addr = NULL; in bnxt_free_hwrm_resources()
2344 static int bnxt_alloc_hwrm_resources(struct bnxt *bp) in bnxt_alloc_hwrm_resources() argument
2346 struct pci_dev *pdev = bp->pdev; in bnxt_alloc_hwrm_resources()
2348 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, in bnxt_alloc_hwrm_resources()
2349 &bp->hwrm_cmd_resp_dma_addr, in bnxt_alloc_hwrm_resources()
2351 if (!bp->hwrm_cmd_resp_addr) in bnxt_alloc_hwrm_resources()
2353 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev, in bnxt_alloc_hwrm_resources()
2355 &bp->hwrm_dbg_resp_dma_addr, in bnxt_alloc_hwrm_resources()
2357 if (!bp->hwrm_dbg_resp_addr) in bnxt_alloc_hwrm_resources()
2358 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n"); in bnxt_alloc_hwrm_resources()
2363 static void bnxt_free_stats(struct bnxt *bp) in bnxt_free_stats() argument
2366 struct pci_dev *pdev = bp->pdev; in bnxt_free_stats()
2368 if (!bp->bnapi) in bnxt_free_stats()
2373 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_stats()
2374 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_free_stats()
2385 static int bnxt_alloc_stats(struct bnxt *bp) in bnxt_alloc_stats() argument
2388 struct pci_dev *pdev = bp->pdev; in bnxt_alloc_stats()
2392 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_alloc_stats()
2393 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_alloc_stats()
2407 static void bnxt_clear_ring_indices(struct bnxt *bp) in bnxt_clear_ring_indices() argument
2411 if (!bp->bnapi) in bnxt_clear_ring_indices()
2414 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_clear_ring_indices()
2415 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_clear_ring_indices()
2437 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit) in bnxt_free_ntp_fltrs() argument
2450 head = &bp->ntp_fltr_hash_tbl[i]; in bnxt_free_ntp_fltrs()
2457 kfree(bp->ntp_fltr_bmap); in bnxt_free_ntp_fltrs()
2458 bp->ntp_fltr_bmap = NULL; in bnxt_free_ntp_fltrs()
2460 bp->ntp_fltr_count = 0; in bnxt_free_ntp_fltrs()
2464 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) in bnxt_alloc_ntp_fltrs() argument
2469 if (!(bp->flags & BNXT_FLAG_RFS)) in bnxt_alloc_ntp_fltrs()
2473 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); in bnxt_alloc_ntp_fltrs()
2475 bp->ntp_fltr_count = 0; in bnxt_alloc_ntp_fltrs()
2476 bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR), in bnxt_alloc_ntp_fltrs()
2479 if (!bp->ntp_fltr_bmap) in bnxt_alloc_ntp_fltrs()
2488 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) in bnxt_free_mem() argument
2490 bnxt_free_vnic_attributes(bp); in bnxt_free_mem()
2491 bnxt_free_tx_rings(bp); in bnxt_free_mem()
2492 bnxt_free_rx_rings(bp); in bnxt_free_mem()
2493 bnxt_free_cp_rings(bp); in bnxt_free_mem()
2494 bnxt_free_ntp_fltrs(bp, irq_re_init); in bnxt_free_mem()
2496 bnxt_free_stats(bp); in bnxt_free_mem()
2497 bnxt_free_ring_grps(bp); in bnxt_free_mem()
2498 bnxt_free_vnics(bp); in bnxt_free_mem()
2499 kfree(bp->bnapi); in bnxt_free_mem()
2500 bp->bnapi = NULL; in bnxt_free_mem()
2502 bnxt_clear_ring_indices(bp); in bnxt_free_mem()
2506 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) in bnxt_alloc_mem() argument
2516 bp->cp_nr_rings); in bnxt_alloc_mem()
2518 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); in bnxt_alloc_mem()
2522 bp->bnapi = bnapi; in bnxt_alloc_mem()
2524 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { in bnxt_alloc_mem()
2525 bp->bnapi[i] = bnapi; in bnxt_alloc_mem()
2526 bp->bnapi[i]->index = i; in bnxt_alloc_mem()
2527 bp->bnapi[i]->bp = bp; in bnxt_alloc_mem()
2530 rc = bnxt_alloc_stats(bp); in bnxt_alloc_mem()
2534 rc = bnxt_alloc_ntp_fltrs(bp); in bnxt_alloc_mem()
2538 rc = bnxt_alloc_vnics(bp); in bnxt_alloc_mem()
2543 bnxt_init_ring_struct(bp); in bnxt_alloc_mem()
2545 rc = bnxt_alloc_rx_rings(bp); in bnxt_alloc_mem()
2549 rc = bnxt_alloc_tx_rings(bp); in bnxt_alloc_mem()
2553 rc = bnxt_alloc_cp_rings(bp); in bnxt_alloc_mem()
2557 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | in bnxt_alloc_mem()
2559 rc = bnxt_alloc_vnic_attributes(bp); in bnxt_alloc_mem()
2565 bnxt_free_mem(bp, true); in bnxt_alloc_mem()
2569 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, in bnxt_hwrm_cmd_hdr_init() argument
2577 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); in bnxt_hwrm_cmd_hdr_init()
2580 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) in _hwrm_send_message() argument
2587 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; in _hwrm_send_message()
2589 req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++); in _hwrm_send_message()
2597 __iowrite32_copy(bp->bar0, data, msg_len / 4); in _hwrm_send_message()
2601 bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) & in _hwrm_send_message()
2605 writel(1, bp->bar0 + 0x100); in _hwrm_send_message()
2610 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && in _hwrm_send_message()
2615 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { in _hwrm_send_message()
2616 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", in _hwrm_send_message()
2622 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; in _hwrm_send_message()
2632 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", in _hwrm_send_message()
2639 valid = bp->hwrm_cmd_resp_addr + len - 4; in _hwrm_send_message()
2647 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", in _hwrm_send_message()
2656 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", in _hwrm_send_message()
2664 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) in hwrm_send_message() argument
2668 mutex_lock(&bp->hwrm_cmd_lock); in hwrm_send_message()
2669 rc = _hwrm_send_message(bp, msg, msg_len, timeout); in hwrm_send_message()
2670 mutex_unlock(&bp->hwrm_cmd_lock); in hwrm_send_message()
2674 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) in bnxt_hwrm_func_drv_rgtr() argument
2679 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); in bnxt_hwrm_func_drv_rgtr()
2695 if (BNXT_PF(bp)) { in bnxt_hwrm_func_drv_rgtr()
2710 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_func_drv_rgtr()
2713 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) in bnxt_hwrm_tunnel_dst_port_free() argument
2718 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); in bnxt_hwrm_tunnel_dst_port_free()
2723 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; in bnxt_hwrm_tunnel_dst_port_free()
2726 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; in bnxt_hwrm_tunnel_dst_port_free()
2732 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_tunnel_dst_port_free()
2734 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", in bnxt_hwrm_tunnel_dst_port_free()
2739 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, in bnxt_hwrm_tunnel_dst_port_alloc() argument
2744 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; in bnxt_hwrm_tunnel_dst_port_alloc()
2746 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); in bnxt_hwrm_tunnel_dst_port_alloc()
2751 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_hwrm_tunnel_dst_port_alloc()
2752 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_tunnel_dst_port_alloc()
2754 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", in bnxt_hwrm_tunnel_dst_port_alloc()
2760 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; in bnxt_hwrm_tunnel_dst_port_alloc()
2763 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; in bnxt_hwrm_tunnel_dst_port_alloc()
2765 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_hwrm_tunnel_dst_port_alloc()
2769 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) in bnxt_hwrm_cfa_l2_set_rx_mask() argument
2772 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; in bnxt_hwrm_cfa_l2_set_rx_mask()
2774 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); in bnxt_hwrm_cfa_l2_set_rx_mask()
2780 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_cfa_l2_set_rx_mask()
2784 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, in bnxt_hwrm_cfa_ntuple_filter_free() argument
2789 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); in bnxt_hwrm_cfa_ntuple_filter_free()
2791 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_cfa_ntuple_filter_free()
2810 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, in bnxt_hwrm_cfa_ntuple_filter_alloc() argument
2816 bp->hwrm_cmd_resp_addr; in bnxt_hwrm_cfa_ntuple_filter_alloc()
2818 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; in bnxt_hwrm_cfa_ntuple_filter_alloc()
2820 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); in bnxt_hwrm_cfa_ntuple_filter_alloc()
2821 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0]; in bnxt_hwrm_cfa_ntuple_filter_alloc()
2841 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_hwrm_cfa_ntuple_filter_alloc()
2842 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_cfa_ntuple_filter_alloc()
2845 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_hwrm_cfa_ntuple_filter_alloc()
2850 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, in bnxt_hwrm_set_vnic_filter() argument
2855 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; in bnxt_hwrm_set_vnic_filter()
2857 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); in bnxt_hwrm_set_vnic_filter()
2860 req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); in bnxt_hwrm_set_vnic_filter()
2873 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_hwrm_set_vnic_filter()
2874 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_set_vnic_filter()
2876 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = in bnxt_hwrm_set_vnic_filter()
2878 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_hwrm_set_vnic_filter()
2882 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) in bnxt_hwrm_clear_vnic_filter() argument
2888 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_hwrm_clear_vnic_filter()
2890 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; in bnxt_hwrm_clear_vnic_filter()
2895 bnxt_hwrm_cmd_hdr_init(bp, &req, in bnxt_hwrm_clear_vnic_filter()
2900 rc = _hwrm_send_message(bp, &req, sizeof(req), in bnxt_hwrm_clear_vnic_filter()
2905 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_hwrm_clear_vnic_filter()
2910 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) in bnxt_hwrm_vnic_set_tpa() argument
2912 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; in bnxt_hwrm_vnic_set_tpa()
2915 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); in bnxt_hwrm_vnic_set_tpa()
2918 u16 mss = bp->dev->mtu - 40; in bnxt_hwrm_vnic_set_tpa()
2954 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_vnic_set_tpa()
2957 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) in bnxt_hwrm_vnic_set_rss() argument
2960 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; in bnxt_hwrm_vnic_set_rss()
2966 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); in bnxt_hwrm_vnic_set_rss()
2976 max_rings = bp->rx_nr_rings; in bnxt_hwrm_vnic_set_rss()
2992 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_vnic_set_rss()
2995 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) in bnxt_hwrm_vnic_set_hds() argument
2997 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; in bnxt_hwrm_vnic_set_hds()
3000 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); in bnxt_hwrm_vnic_set_hds()
3008 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); in bnxt_hwrm_vnic_set_hds()
3009 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); in bnxt_hwrm_vnic_set_hds()
3011 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_vnic_set_hds()
3014 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id) in bnxt_hwrm_vnic_ctx_free_one() argument
3018 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); in bnxt_hwrm_vnic_ctx_free_one()
3020 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx); in bnxt_hwrm_vnic_ctx_free_one()
3022 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_vnic_ctx_free_one()
3023 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; in bnxt_hwrm_vnic_ctx_free_one()
3026 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) in bnxt_hwrm_vnic_ctx_free() argument
3030 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_hwrm_vnic_ctx_free()
3031 struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; in bnxt_hwrm_vnic_ctx_free()
3034 bnxt_hwrm_vnic_ctx_free_one(bp, i); in bnxt_hwrm_vnic_ctx_free()
3036 bp->rsscos_nr_ctxs = 0; in bnxt_hwrm_vnic_ctx_free()
3039 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id) in bnxt_hwrm_vnic_ctx_alloc() argument
3044 bp->hwrm_cmd_resp_addr; in bnxt_hwrm_vnic_ctx_alloc()
3046 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, in bnxt_hwrm_vnic_ctx_alloc()
3049 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_hwrm_vnic_ctx_alloc()
3050 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_vnic_ctx_alloc()
3052 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = in bnxt_hwrm_vnic_ctx_alloc()
3054 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_hwrm_vnic_ctx_alloc()
3059 static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) in bnxt_hwrm_vnic_cfg() argument
3062 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; in bnxt_hwrm_vnic_cfg()
3065 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); in bnxt_hwrm_vnic_cfg()
3077 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); in bnxt_hwrm_vnic_cfg()
3080 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + in bnxt_hwrm_vnic_cfg()
3083 if (bp->flags & BNXT_FLAG_STRIP_VLAN) in bnxt_hwrm_vnic_cfg()
3086 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_vnic_cfg()
3089 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) in bnxt_hwrm_vnic_free_one() argument
3093 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { in bnxt_hwrm_vnic_free_one()
3096 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); in bnxt_hwrm_vnic_free_one()
3098 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); in bnxt_hwrm_vnic_free_one()
3100 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_vnic_free_one()
3103 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; in bnxt_hwrm_vnic_free_one()
3108 static void bnxt_hwrm_vnic_free(struct bnxt *bp) in bnxt_hwrm_vnic_free() argument
3112 for (i = 0; i < bp->nr_vnics; i++) in bnxt_hwrm_vnic_free()
3113 bnxt_hwrm_vnic_free_one(bp, i); in bnxt_hwrm_vnic_free()
3116 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id, in bnxt_hwrm_vnic_alloc() argument
3121 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; in bnxt_hwrm_vnic_alloc()
3125 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) { in bnxt_hwrm_vnic_alloc()
3126 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", in bnxt_hwrm_vnic_alloc()
3130 bp->vnic_info[vnic_id].fw_grp_ids[j] = in bnxt_hwrm_vnic_alloc()
3131 bp->grp_info[i].fw_grp_id; in bnxt_hwrm_vnic_alloc()
3134 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; in bnxt_hwrm_vnic_alloc()
3138 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); in bnxt_hwrm_vnic_alloc()
3140 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_hwrm_vnic_alloc()
3141 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_vnic_alloc()
3143 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id); in bnxt_hwrm_vnic_alloc()
3144 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_hwrm_vnic_alloc()
3148 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) in bnxt_hwrm_ring_grp_alloc() argument
3153 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_hwrm_ring_grp_alloc()
3154 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_grp_alloc()
3157 bp->hwrm_cmd_resp_addr; in bnxt_hwrm_ring_grp_alloc()
3159 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); in bnxt_hwrm_ring_grp_alloc()
3161 req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); in bnxt_hwrm_ring_grp_alloc()
3162 req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id); in bnxt_hwrm_ring_grp_alloc()
3163 req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id); in bnxt_hwrm_ring_grp_alloc()
3164 req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx); in bnxt_hwrm_ring_grp_alloc()
3166 rc = _hwrm_send_message(bp, &req, sizeof(req), in bnxt_hwrm_ring_grp_alloc()
3171 bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id); in bnxt_hwrm_ring_grp_alloc()
3173 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_hwrm_ring_grp_alloc()
3177 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) in bnxt_hwrm_ring_grp_free() argument
3183 if (!bp->grp_info) in bnxt_hwrm_ring_grp_free()
3186 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); in bnxt_hwrm_ring_grp_free()
3188 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_hwrm_ring_grp_free()
3189 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_ring_grp_free()
3190 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) in bnxt_hwrm_ring_grp_free()
3193 cpu_to_le32(bp->grp_info[i].fw_grp_id); in bnxt_hwrm_ring_grp_free()
3195 rc = _hwrm_send_message(bp, &req, sizeof(req), in bnxt_hwrm_ring_grp_free()
3199 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; in bnxt_hwrm_ring_grp_free()
3201 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_hwrm_ring_grp_free()
3205 static int hwrm_ring_alloc_send_msg(struct bnxt *bp, in hwrm_ring_alloc_send_msg() argument
3212 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; in hwrm_ring_alloc_send_msg()
3215 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); in hwrm_ring_alloc_send_msg()
3235 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id); in hwrm_ring_alloc_send_msg()
3236 req.length = cpu_to_le32(bp->tx_ring_mask + 1); in hwrm_ring_alloc_send_msg()
3242 req.length = cpu_to_le32(bp->rx_ring_mask + 1); in hwrm_ring_alloc_send_msg()
3246 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); in hwrm_ring_alloc_send_msg()
3250 req.length = cpu_to_le32(bp->cp_ring_mask + 1); in hwrm_ring_alloc_send_msg()
3251 if (bp->flags & BNXT_FLAG_USING_MSIX) in hwrm_ring_alloc_send_msg()
3255 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", in hwrm_ring_alloc_send_msg()
3260 mutex_lock(&bp->hwrm_cmd_lock); in hwrm_ring_alloc_send_msg()
3261 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in hwrm_ring_alloc_send_msg()
3264 mutex_unlock(&bp->hwrm_cmd_lock); in hwrm_ring_alloc_send_msg()
3269 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n", in hwrm_ring_alloc_send_msg()
3274 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n", in hwrm_ring_alloc_send_msg()
3279 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n", in hwrm_ring_alloc_send_msg()
3284 netdev_err(bp->dev, "Invalid ring\n"); in hwrm_ring_alloc_send_msg()
3292 static int bnxt_hwrm_ring_alloc(struct bnxt *bp) in bnxt_hwrm_ring_alloc() argument
3296 if (bp->cp_nr_rings) { in bnxt_hwrm_ring_alloc()
3297 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
3298 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_ring_alloc()
3302 rc = hwrm_ring_alloc_send_msg(bp, ring, in bnxt_hwrm_ring_alloc()
3307 cpr->cp_doorbell = bp->bar1 + i * 0x80; in bnxt_hwrm_ring_alloc()
3309 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; in bnxt_hwrm_ring_alloc()
3313 if (bp->tx_nr_rings) { in bnxt_hwrm_ring_alloc()
3314 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
3315 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_ring_alloc()
3318 u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx; in bnxt_hwrm_ring_alloc()
3320 rc = hwrm_ring_alloc_send_msg(bp, ring, in bnxt_hwrm_ring_alloc()
3325 txr->tx_doorbell = bp->bar1 + i * 0x80; in bnxt_hwrm_ring_alloc()
3329 if (bp->rx_nr_rings) { in bnxt_hwrm_ring_alloc()
3330 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
3331 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_ring_alloc()
3335 rc = hwrm_ring_alloc_send_msg(bp, ring, in bnxt_hwrm_ring_alloc()
3340 rxr->rx_doorbell = bp->bar1 + i * 0x80; in bnxt_hwrm_ring_alloc()
3342 bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id; in bnxt_hwrm_ring_alloc()
3346 if (bp->flags & BNXT_FLAG_AGG_RINGS) { in bnxt_hwrm_ring_alloc()
3347 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_alloc()
3348 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_ring_alloc()
3353 rc = hwrm_ring_alloc_send_msg(bp, ring, in bnxt_hwrm_ring_alloc()
3355 bp->rx_nr_rings + i, in bnxt_hwrm_ring_alloc()
3361 bp->bar1 + (bp->rx_nr_rings + i) * 0x80; in bnxt_hwrm_ring_alloc()
3364 bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id; in bnxt_hwrm_ring_alloc()
3371 static int hwrm_ring_free_send_msg(struct bnxt *bp, in hwrm_ring_free_send_msg() argument
3377 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; in hwrm_ring_free_send_msg()
3380 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1); in hwrm_ring_free_send_msg()
3384 mutex_lock(&bp->hwrm_cmd_lock); in hwrm_ring_free_send_msg()
3385 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in hwrm_ring_free_send_msg()
3387 mutex_unlock(&bp->hwrm_cmd_lock); in hwrm_ring_free_send_msg()
3392 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n", in hwrm_ring_free_send_msg()
3396 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n", in hwrm_ring_free_send_msg()
3400 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n", in hwrm_ring_free_send_msg()
3404 netdev_err(bp->dev, "Invalid ring\n"); in hwrm_ring_free_send_msg()
3411 static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) in bnxt_hwrm_ring_free() argument
3415 if (!bp->bnapi) in bnxt_hwrm_ring_free()
3418 if (bp->tx_nr_rings) { in bnxt_hwrm_ring_free()
3419 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_hwrm_ring_free()
3420 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_ring_free()
3423 u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; in bnxt_hwrm_ring_free()
3427 bp, ring, in bnxt_hwrm_ring_free()
3436 if (bp->rx_nr_rings) { in bnxt_hwrm_ring_free()
3437 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_free()
3438 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_ring_free()
3441 u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; in bnxt_hwrm_ring_free()
3445 bp, ring, in bnxt_hwrm_ring_free()
3450 bp->grp_info[i].rx_fw_ring_id = in bnxt_hwrm_ring_free()
3456 if (bp->rx_agg_nr_pages) { in bnxt_hwrm_ring_free()
3457 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_hwrm_ring_free()
3458 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_ring_free()
3462 u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id; in bnxt_hwrm_ring_free()
3466 bp, ring, in bnxt_hwrm_ring_free()
3471 bp->grp_info[i].agg_fw_ring_id = in bnxt_hwrm_ring_free()
3477 if (bp->cp_nr_rings) { in bnxt_hwrm_ring_free()
3478 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_ring_free()
3479 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_ring_free()
3485 bp, ring, in bnxt_hwrm_ring_free()
3489 bp->grp_info[i].cp_fw_ring_id = in bnxt_hwrm_ring_free()
3498 int bnxt_hwrm_set_coal(struct bnxt *bp) in bnxt_hwrm_set_coal() argument
3506 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, in bnxt_hwrm_set_coal()
3510 max_buf = min_t(u16, bp->coal_bufs / 4, 2); in bnxt_hwrm_set_coal()
3513 max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63); in bnxt_hwrm_set_coal()
3514 buf_tmr = max_t(u16, bp->coal_ticks / 4, 1); in bnxt_hwrm_set_coal()
3515 buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1); in bnxt_hwrm_set_coal()
3522 if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25) in bnxt_hwrm_set_coal()
3531 req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks); in bnxt_hwrm_set_coal()
3532 req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs); in bnxt_hwrm_set_coal()
3534 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_hwrm_set_coal()
3535 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_set_coal()
3536 req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); in bnxt_hwrm_set_coal()
3538 rc = _hwrm_send_message(bp, &req, sizeof(req), in bnxt_hwrm_set_coal()
3543 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_hwrm_set_coal()
3547 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) in bnxt_hwrm_stat_ctx_free() argument
3552 if (!bp->bnapi) in bnxt_hwrm_stat_ctx_free()
3555 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); in bnxt_hwrm_stat_ctx_free()
3557 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_hwrm_stat_ctx_free()
3558 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_stat_ctx_free()
3559 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_stat_ctx_free()
3565 rc = _hwrm_send_message(bp, &req, sizeof(req), in bnxt_hwrm_stat_ctx_free()
3573 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_hwrm_stat_ctx_free()
3577 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) in bnxt_hwrm_stat_ctx_alloc() argument
3581 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; in bnxt_hwrm_stat_ctx_alloc()
3583 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); in bnxt_hwrm_stat_ctx_alloc()
3587 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_hwrm_stat_ctx_alloc()
3588 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_hwrm_stat_ctx_alloc()
3589 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_hwrm_stat_ctx_alloc()
3594 rc = _hwrm_send_message(bp, &req, sizeof(req), in bnxt_hwrm_stat_ctx_alloc()
3601 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; in bnxt_hwrm_stat_ctx_alloc()
3603 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_hwrm_stat_ctx_alloc()
3607 static int bnxt_hwrm_func_qcaps(struct bnxt *bp) in bnxt_hwrm_func_qcaps() argument
3611 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; in bnxt_hwrm_func_qcaps()
3613 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); in bnxt_hwrm_func_qcaps()
3616 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_hwrm_func_qcaps()
3617 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_func_qcaps()
3621 if (BNXT_PF(bp)) { in bnxt_hwrm_func_qcaps()
3622 struct bnxt_pf_info *pf = &bp->pf; in bnxt_hwrm_func_qcaps()
3627 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); in bnxt_hwrm_func_qcaps()
3647 struct bnxt_vf_info *vf = &bp->vf; in bnxt_hwrm_func_qcaps()
3653 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); in bnxt_hwrm_func_qcaps()
3655 random_ether_addr(bp->dev->dev_addr); in bnxt_hwrm_func_qcaps()
3667 bp->tx_push_thresh = 0; in bnxt_hwrm_func_qcaps()
3670 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; in bnxt_hwrm_func_qcaps()
3673 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_hwrm_func_qcaps()
3677 static int bnxt_hwrm_func_reset(struct bnxt *bp) in bnxt_hwrm_func_reset() argument
3681 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); in bnxt_hwrm_func_reset()
3684 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); in bnxt_hwrm_func_reset()
3687 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) in bnxt_hwrm_queue_qportcfg() argument
3691 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; in bnxt_hwrm_queue_qportcfg()
3694 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); in bnxt_hwrm_queue_qportcfg()
3696 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_hwrm_queue_qportcfg()
3697 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_queue_qportcfg()
3705 bp->max_tc = resp->max_configurable_queues; in bnxt_hwrm_queue_qportcfg()
3706 if (bp->max_tc > BNXT_MAX_QUEUE) in bnxt_hwrm_queue_qportcfg()
3707 bp->max_tc = BNXT_MAX_QUEUE; in bnxt_hwrm_queue_qportcfg()
3710 for (i = 0; i < bp->max_tc; i++) { in bnxt_hwrm_queue_qportcfg()
3711 bp->q_info[i].queue_id = *qptr++; in bnxt_hwrm_queue_qportcfg()
3712 bp->q_info[i].queue_profile = *qptr++; in bnxt_hwrm_queue_qportcfg()
3716 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_hwrm_queue_qportcfg()
3720 static int bnxt_hwrm_ver_get(struct bnxt *bp) in bnxt_hwrm_ver_get() argument
3724 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; in bnxt_hwrm_ver_get()
3726 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); in bnxt_hwrm_ver_get()
3730 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_hwrm_ver_get()
3731 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_ver_get()
3735 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); in bnxt_hwrm_ver_get()
3740 netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n", in bnxt_hwrm_ver_get()
3744 netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n"); in bnxt_hwrm_ver_get()
3746 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d", in bnxt_hwrm_ver_get()
3751 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_hwrm_ver_get()
3755 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) in bnxt_hwrm_free_tunnel_ports() argument
3757 if (bp->vxlan_port_cnt) { in bnxt_hwrm_free_tunnel_ports()
3759 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); in bnxt_hwrm_free_tunnel_ports()
3761 bp->vxlan_port_cnt = 0; in bnxt_hwrm_free_tunnel_ports()
3762 if (bp->nge_port_cnt) { in bnxt_hwrm_free_tunnel_ports()
3764 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); in bnxt_hwrm_free_tunnel_ports()
3766 bp->nge_port_cnt = 0; in bnxt_hwrm_free_tunnel_ports()
3769 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) in bnxt_set_tpa() argument
3775 tpa_flags = bp->flags & BNXT_FLAG_TPA; in bnxt_set_tpa()
3776 for (i = 0; i < bp->nr_vnics; i++) { in bnxt_set_tpa()
3777 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); in bnxt_set_tpa()
3779 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", in bnxt_set_tpa()
3787 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) in bnxt_hwrm_clear_vnic_rss() argument
3791 for (i = 0; i < bp->nr_vnics; i++) in bnxt_hwrm_clear_vnic_rss()
3792 bnxt_hwrm_vnic_set_rss(bp, i, false); in bnxt_hwrm_clear_vnic_rss()
3795 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, in bnxt_hwrm_resource_free() argument
3798 if (bp->vnic_info) { in bnxt_hwrm_resource_free()
3799 bnxt_hwrm_clear_vnic_filter(bp); in bnxt_hwrm_resource_free()
3801 bnxt_hwrm_clear_vnic_rss(bp); in bnxt_hwrm_resource_free()
3802 bnxt_hwrm_vnic_ctx_free(bp); in bnxt_hwrm_resource_free()
3804 if (bp->flags & BNXT_FLAG_TPA) in bnxt_hwrm_resource_free()
3805 bnxt_set_tpa(bp, false); in bnxt_hwrm_resource_free()
3806 bnxt_hwrm_vnic_free(bp); in bnxt_hwrm_resource_free()
3808 bnxt_hwrm_ring_free(bp, close_path); in bnxt_hwrm_resource_free()
3809 bnxt_hwrm_ring_grp_free(bp); in bnxt_hwrm_resource_free()
3811 bnxt_hwrm_stat_ctx_free(bp); in bnxt_hwrm_resource_free()
3812 bnxt_hwrm_free_tunnel_ports(bp); in bnxt_hwrm_resource_free()
3816 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) in bnxt_setup_vnic() argument
3821 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id); in bnxt_setup_vnic()
3823 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", in bnxt_setup_vnic()
3827 bp->rsscos_nr_ctxs++; in bnxt_setup_vnic()
3830 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); in bnxt_setup_vnic()
3832 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", in bnxt_setup_vnic()
3838 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); in bnxt_setup_vnic()
3840 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", in bnxt_setup_vnic()
3845 if (bp->flags & BNXT_FLAG_AGG_RINGS) { in bnxt_setup_vnic()
3846 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); in bnxt_setup_vnic()
3848 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", in bnxt_setup_vnic()
3857 static int bnxt_alloc_rfs_vnics(struct bnxt *bp) in bnxt_alloc_rfs_vnics() argument
3862 for (i = 0; i < bp->rx_nr_rings; i++) { in bnxt_alloc_rfs_vnics()
3866 if (vnic_id >= bp->nr_vnics) in bnxt_alloc_rfs_vnics()
3869 bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG; in bnxt_alloc_rfs_vnics()
3870 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1); in bnxt_alloc_rfs_vnics()
3872 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", in bnxt_alloc_rfs_vnics()
3876 rc = bnxt_setup_vnic(bp, vnic_id); in bnxt_alloc_rfs_vnics()
3888 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) in bnxt_init_chip() argument
3893 rc = bnxt_hwrm_stat_ctx_alloc(bp); in bnxt_init_chip()
3895 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", in bnxt_init_chip()
3901 rc = bnxt_hwrm_ring_alloc(bp); in bnxt_init_chip()
3903 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); in bnxt_init_chip()
3907 rc = bnxt_hwrm_ring_grp_alloc(bp); in bnxt_init_chip()
3909 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); in bnxt_init_chip()
3914 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings); in bnxt_init_chip()
3916 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); in bnxt_init_chip()
3920 rc = bnxt_setup_vnic(bp, 0); in bnxt_init_chip()
3924 if (bp->flags & BNXT_FLAG_RFS) { in bnxt_init_chip()
3925 rc = bnxt_alloc_rfs_vnics(bp); in bnxt_init_chip()
3930 if (bp->flags & BNXT_FLAG_TPA) { in bnxt_init_chip()
3931 rc = bnxt_set_tpa(bp, true); in bnxt_init_chip()
3936 if (BNXT_VF(bp)) in bnxt_init_chip()
3937 bnxt_update_vf_mac(bp); in bnxt_init_chip()
3940 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); in bnxt_init_chip()
3942 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); in bnxt_init_chip()
3945 bp->vnic_info[0].uc_filter_count = 1; in bnxt_init_chip()
3947 bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST | in bnxt_init_chip()
3950 if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp)) in bnxt_init_chip()
3951 bp->vnic_info[0].rx_mask |= in bnxt_init_chip()
3954 rc = bnxt_cfg_rx_mode(bp); in bnxt_init_chip()
3958 rc = bnxt_hwrm_set_coal(bp); in bnxt_init_chip()
3960 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", in bnxt_init_chip()
3966 bnxt_hwrm_resource_free(bp, 0, true); in bnxt_init_chip()
3971 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) in bnxt_shutdown_nic() argument
3973 bnxt_hwrm_resource_free(bp, 1, irq_re_init); in bnxt_shutdown_nic()
3977 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) in bnxt_init_nic() argument
3979 bnxt_init_rx_rings(bp); in bnxt_init_nic()
3980 bnxt_init_tx_rings(bp); in bnxt_init_nic()
3981 bnxt_init_ring_grps(bp, irq_re_init); in bnxt_init_nic()
3982 bnxt_init_vnics(bp); in bnxt_init_nic()
3984 return bnxt_init_chip(bp, irq_re_init); in bnxt_init_nic()
3987 static void bnxt_disable_int(struct bnxt *bp) in bnxt_disable_int() argument
3991 if (!bp->bnapi) in bnxt_disable_int()
3994 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_disable_int()
3995 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_disable_int()
4002 static void bnxt_enable_int(struct bnxt *bp) in bnxt_enable_int() argument
4006 atomic_set(&bp->intr_sem, 0); in bnxt_enable_int()
4007 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_enable_int()
4008 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_enable_int()
4015 static int bnxt_set_real_num_queues(struct bnxt *bp) in bnxt_set_real_num_queues() argument
4018 struct net_device *dev = bp->dev; in bnxt_set_real_num_queues()
4020 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings); in bnxt_set_real_num_queues()
4024 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); in bnxt_set_real_num_queues()
4029 if (bp->rx_nr_rings) in bnxt_set_real_num_queues()
4030 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); in bnxt_set_real_num_queues()
4038 static int bnxt_setup_msix(struct bnxt *bp) in bnxt_setup_msix() argument
4041 struct net_device *dev = bp->dev; in bnxt_setup_msix()
4043 const int len = sizeof(bp->irq_tbl[0].name); in bnxt_setup_msix()
4045 bp->flags &= ~BNXT_FLAG_USING_MSIX; in bnxt_setup_msix()
4046 total_vecs = bp->cp_nr_rings; in bnxt_setup_msix()
4057 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs); in bnxt_setup_msix()
4063 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); in bnxt_setup_msix()
4064 if (bp->irq_tbl) { in bnxt_setup_msix()
4068 bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings); in bnxt_setup_msix()
4069 bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings); in bnxt_setup_msix()
4070 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; in bnxt_setup_msix()
4073 bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs; in bnxt_setup_msix()
4074 if (bp->tx_nr_rings_per_tc == 0) { in bnxt_setup_msix()
4076 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; in bnxt_setup_msix()
4080 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; in bnxt_setup_msix()
4082 count = bp->tx_nr_rings_per_tc; in bnxt_setup_msix()
4088 bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings); in bnxt_setup_msix()
4090 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_setup_msix()
4091 bp->irq_tbl[i].vector = msix_ent[i].vector; in bnxt_setup_msix()
4092 snprintf(bp->irq_tbl[i].name, len, in bnxt_setup_msix()
4094 bp->irq_tbl[i].handler = bnxt_msix; in bnxt_setup_msix()
4096 rc = bnxt_set_real_num_queues(bp); in bnxt_setup_msix()
4103 bp->flags |= BNXT_FLAG_USING_MSIX; in bnxt_setup_msix()
4108 netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc); in bnxt_setup_msix()
4109 pci_disable_msix(bp->pdev); in bnxt_setup_msix()
4114 static int bnxt_setup_inta(struct bnxt *bp) in bnxt_setup_inta() argument
4117 const int len = sizeof(bp->irq_tbl[0].name); in bnxt_setup_inta()
4119 if (netdev_get_num_tc(bp->dev)) in bnxt_setup_inta()
4120 netdev_reset_tc(bp->dev); in bnxt_setup_inta()
4122 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); in bnxt_setup_inta()
4123 if (!bp->irq_tbl) { in bnxt_setup_inta()
4127 bp->rx_nr_rings = 1; in bnxt_setup_inta()
4128 bp->tx_nr_rings = 1; in bnxt_setup_inta()
4129 bp->cp_nr_rings = 1; in bnxt_setup_inta()
4130 bp->tx_nr_rings_per_tc = bp->tx_nr_rings; in bnxt_setup_inta()
4131 bp->irq_tbl[0].vector = bp->pdev->irq; in bnxt_setup_inta()
4132 snprintf(bp->irq_tbl[0].name, len, in bnxt_setup_inta()
4133 "%s-%s-%d", bp->dev->name, "TxRx", 0); in bnxt_setup_inta()
4134 bp->irq_tbl[0].handler = bnxt_inta; in bnxt_setup_inta()
4135 rc = bnxt_set_real_num_queues(bp); in bnxt_setup_inta()
4139 static int bnxt_setup_int_mode(struct bnxt *bp) in bnxt_setup_int_mode() argument
4143 if (bp->flags & BNXT_FLAG_MSIX_CAP) in bnxt_setup_int_mode()
4144 rc = bnxt_setup_msix(bp); in bnxt_setup_int_mode()
4146 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { in bnxt_setup_int_mode()
4148 rc = bnxt_setup_inta(bp); in bnxt_setup_int_mode()
4153 static void bnxt_free_irq(struct bnxt *bp) in bnxt_free_irq() argument
4159 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); in bnxt_free_irq()
4160 bp->dev->rx_cpu_rmap = NULL; in bnxt_free_irq()
4162 if (!bp->irq_tbl) in bnxt_free_irq()
4165 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_free_irq()
4166 irq = &bp->irq_tbl[i]; in bnxt_free_irq()
4168 free_irq(irq->vector, bp->bnapi[i]); in bnxt_free_irq()
4171 if (bp->flags & BNXT_FLAG_USING_MSIX) in bnxt_free_irq()
4172 pci_disable_msix(bp->pdev); in bnxt_free_irq()
4173 kfree(bp->irq_tbl); in bnxt_free_irq()
4174 bp->irq_tbl = NULL; in bnxt_free_irq()
4177 static int bnxt_request_irq(struct bnxt *bp) in bnxt_request_irq() argument
4182 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap; in bnxt_request_irq()
4185 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) in bnxt_request_irq()
4188 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_request_irq()
4189 struct bnxt_irq *irq = &bp->irq_tbl[i]; in bnxt_request_irq()
4191 if (rmap && (i < bp->rx_nr_rings)) { in bnxt_request_irq()
4194 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", in bnxt_request_irq()
4199 bp->bnapi[i]); in bnxt_request_irq()
4208 static void bnxt_del_napi(struct bnxt *bp) in bnxt_del_napi() argument
4212 if (!bp->bnapi) in bnxt_del_napi()
4215 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_del_napi()
4216 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_del_napi()
4223 static void bnxt_init_napi(struct bnxt *bp) in bnxt_init_napi() argument
4228 if (bp->flags & BNXT_FLAG_USING_MSIX) { in bnxt_init_napi()
4229 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_init_napi()
4230 bnapi = bp->bnapi[i]; in bnxt_init_napi()
4231 netif_napi_add(bp->dev, &bnapi->napi, in bnxt_init_napi()
4236 bnapi = bp->bnapi[0]; in bnxt_init_napi()
4237 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); in bnxt_init_napi()
4242 static void bnxt_disable_napi(struct bnxt *bp) in bnxt_disable_napi() argument
4246 if (!bp->bnapi) in bnxt_disable_napi()
4249 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_disable_napi()
4250 napi_disable(&bp->bnapi[i]->napi); in bnxt_disable_napi()
4251 bnxt_disable_poll(bp->bnapi[i]); in bnxt_disable_napi()
4255 static void bnxt_enable_napi(struct bnxt *bp) in bnxt_enable_napi() argument
4259 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_enable_napi()
4260 bnxt_enable_poll(bp->bnapi[i]); in bnxt_enable_napi()
4261 napi_enable(&bp->bnapi[i]->napi); in bnxt_enable_napi()
4265 static void bnxt_tx_disable(struct bnxt *bp) in bnxt_tx_disable() argument
4272 if (bp->bnapi) { in bnxt_tx_disable()
4273 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_tx_disable()
4274 bnapi = bp->bnapi[i]; in bnxt_tx_disable()
4276 txq = netdev_get_tx_queue(bp->dev, i); in bnxt_tx_disable()
4283 netif_tx_disable(bp->dev); in bnxt_tx_disable()
4284 netif_carrier_off(bp->dev); in bnxt_tx_disable()
4287 static void bnxt_tx_enable(struct bnxt *bp) in bnxt_tx_enable() argument
4294 for (i = 0; i < bp->tx_nr_rings; i++) { in bnxt_tx_enable()
4295 bnapi = bp->bnapi[i]; in bnxt_tx_enable()
4297 txq = netdev_get_tx_queue(bp->dev, i); in bnxt_tx_enable()
4300 netif_tx_wake_all_queues(bp->dev); in bnxt_tx_enable()
4301 if (bp->link_info.link_up) in bnxt_tx_enable()
4302 netif_carrier_on(bp->dev); in bnxt_tx_enable()
4305 static void bnxt_report_link(struct bnxt *bp) in bnxt_report_link() argument
4307 if (bp->link_info.link_up) { in bnxt_report_link()
4312 netif_carrier_on(bp->dev); in bnxt_report_link()
4313 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) in bnxt_report_link()
4317 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) in bnxt_report_link()
4319 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) in bnxt_report_link()
4321 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) in bnxt_report_link()
4325 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); in bnxt_report_link()
4326 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", in bnxt_report_link()
4329 netif_carrier_off(bp->dev); in bnxt_report_link()
4330 netdev_err(bp->dev, "NIC Link is Down\n"); in bnxt_report_link()
4334 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) in bnxt_update_link() argument
4337 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_update_link()
4339 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; in bnxt_update_link()
4342 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); in bnxt_update_link()
4344 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_update_link()
4345 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_update_link()
4347 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_update_link()
4382 bnxt_report_link(bp); in bnxt_update_link()
4387 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_update_link()
4392 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) in bnxt_hwrm_set_pause_common() argument
4394 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { in bnxt_hwrm_set_pause_common()
4395 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) in bnxt_hwrm_set_pause_common()
4397 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) in bnxt_hwrm_set_pause_common()
4402 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) in bnxt_hwrm_set_pause_common()
4404 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) in bnxt_hwrm_set_pause_common()
4411 static void bnxt_hwrm_set_link_common(struct bnxt *bp, in bnxt_hwrm_set_link_common() argument
4414 u8 autoneg = bp->link_info.autoneg; in bnxt_hwrm_set_link_common()
4415 u16 fw_link_speed = bp->link_info.req_link_speed; in bnxt_hwrm_set_link_common()
4416 u32 advertising = bp->link_info.advertising; in bnxt_hwrm_set_link_common()
4441 int bnxt_hwrm_set_pause(struct bnxt *bp) in bnxt_hwrm_set_pause() argument
4446 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); in bnxt_hwrm_set_pause()
4447 bnxt_hwrm_set_pause_common(bp, &req); in bnxt_hwrm_set_pause()
4449 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || in bnxt_hwrm_set_pause()
4450 bp->link_info.force_link_chng) in bnxt_hwrm_set_pause()
4451 bnxt_hwrm_set_link_common(bp, &req); in bnxt_hwrm_set_pause()
4453 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_hwrm_set_pause()
4454 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_set_pause()
4455 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { in bnxt_hwrm_set_pause()
4460 bp->link_info.pause = in bnxt_hwrm_set_pause()
4461 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; in bnxt_hwrm_set_pause()
4462 bp->link_info.auto_pause_setting = 0; in bnxt_hwrm_set_pause()
4463 if (!bp->link_info.force_link_chng) in bnxt_hwrm_set_pause()
4464 bnxt_report_link(bp); in bnxt_hwrm_set_pause()
4466 bp->link_info.force_link_chng = false; in bnxt_hwrm_set_pause()
4467 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_hwrm_set_pause()
4471 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause) in bnxt_hwrm_set_link_setting() argument
4475 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); in bnxt_hwrm_set_link_setting()
4477 bnxt_hwrm_set_pause_common(bp, &req); in bnxt_hwrm_set_link_setting()
4479 bnxt_hwrm_set_link_common(bp, &req); in bnxt_hwrm_set_link_setting()
4480 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_hwrm_set_link_setting()
4483 static int bnxt_update_phy_setting(struct bnxt *bp) in bnxt_update_phy_setting() argument
4488 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_update_phy_setting()
4490 rc = bnxt_update_link(bp, true); in bnxt_update_phy_setting()
4492 netdev_err(bp->dev, "failed to update link (rc: %x)\n", in bnxt_update_phy_setting()
4519 rc = bnxt_hwrm_set_link_setting(bp, update_pause); in bnxt_update_phy_setting()
4521 rc = bnxt_hwrm_set_pause(bp); in bnxt_update_phy_setting()
4523 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", in bnxt_update_phy_setting()
4536 static void bnxt_preset_reg_win(struct bnxt *bp) in bnxt_preset_reg_win() argument
4538 if (BNXT_PF(bp)) { in bnxt_preset_reg_win()
4541 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12); in bnxt_preset_reg_win()
4545 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) in __bnxt_open_nic() argument
4549 bnxt_preset_reg_win(bp); in __bnxt_open_nic()
4550 netif_carrier_off(bp->dev); in __bnxt_open_nic()
4552 rc = bnxt_setup_int_mode(bp); in __bnxt_open_nic()
4554 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", in __bnxt_open_nic()
4559 if ((bp->flags & BNXT_FLAG_RFS) && in __bnxt_open_nic()
4560 !(bp->flags & BNXT_FLAG_USING_MSIX)) { in __bnxt_open_nic()
4562 bp->dev->hw_features &= ~NETIF_F_NTUPLE; in __bnxt_open_nic()
4563 bp->flags &= ~BNXT_FLAG_RFS; in __bnxt_open_nic()
4566 rc = bnxt_alloc_mem(bp, irq_re_init); in __bnxt_open_nic()
4568 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); in __bnxt_open_nic()
4573 bnxt_init_napi(bp); in __bnxt_open_nic()
4574 rc = bnxt_request_irq(bp); in __bnxt_open_nic()
4576 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); in __bnxt_open_nic()
4581 bnxt_enable_napi(bp); in __bnxt_open_nic()
4583 rc = bnxt_init_nic(bp, irq_re_init); in __bnxt_open_nic()
4585 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); in __bnxt_open_nic()
4590 rc = bnxt_update_phy_setting(bp); in __bnxt_open_nic()
4597 vxlan_get_rx_port(bp->dev); in __bnxt_open_nic()
4600 bp, htons(0x17c1), in __bnxt_open_nic()
4602 bp->nge_port_cnt = 1; in __bnxt_open_nic()
4605 set_bit(BNXT_STATE_OPEN, &bp->state); in __bnxt_open_nic()
4606 bnxt_enable_int(bp); in __bnxt_open_nic()
4608 bnxt_tx_enable(bp); in __bnxt_open_nic()
4609 mod_timer(&bp->timer, jiffies + bp->current_interval); in __bnxt_open_nic()
4614 bnxt_disable_napi(bp); in __bnxt_open_nic()
4615 bnxt_del_napi(bp); in __bnxt_open_nic()
4618 bnxt_free_skbs(bp); in __bnxt_open_nic()
4619 bnxt_free_irq(bp); in __bnxt_open_nic()
4620 bnxt_free_mem(bp, true); in __bnxt_open_nic()
4625 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) in bnxt_open_nic() argument
4629 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); in bnxt_open_nic()
4631 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); in bnxt_open_nic()
4632 dev_close(bp->dev); in bnxt_open_nic()
4639 struct bnxt *bp = netdev_priv(dev); in bnxt_open() local
4642 rc = bnxt_hwrm_func_reset(bp); in bnxt_open()
4644 netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n", in bnxt_open()
4649 return __bnxt_open_nic(bp, true, true); in bnxt_open()
4652 static void bnxt_disable_int_sync(struct bnxt *bp) in bnxt_disable_int_sync() argument
4656 atomic_inc(&bp->intr_sem); in bnxt_disable_int_sync()
4657 if (!netif_running(bp->dev)) in bnxt_disable_int_sync()
4660 bnxt_disable_int(bp); in bnxt_disable_int_sync()
4661 for (i = 0; i < bp->cp_nr_rings; i++) in bnxt_disable_int_sync()
4662 synchronize_irq(bp->irq_tbl[i].vector); in bnxt_disable_int_sync()
4665 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) in bnxt_close_nic() argument
4670 if (bp->sriov_cfg) { in bnxt_close_nic()
4671 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, in bnxt_close_nic()
4672 !bp->sriov_cfg, in bnxt_close_nic()
4675 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); in bnxt_close_nic()
4679 bnxt_tx_disable(bp); in bnxt_close_nic()
4681 clear_bit(BNXT_STATE_OPEN, &bp->state); in bnxt_close_nic()
4683 while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) in bnxt_close_nic()
4687 bnxt_shutdown_nic(bp, irq_re_init); in bnxt_close_nic()
4691 bnxt_disable_napi(bp); in bnxt_close_nic()
4692 bnxt_disable_int_sync(bp); in bnxt_close_nic()
4693 del_timer_sync(&bp->timer); in bnxt_close_nic()
4694 bnxt_free_skbs(bp); in bnxt_close_nic()
4697 bnxt_free_irq(bp); in bnxt_close_nic()
4698 bnxt_del_napi(bp); in bnxt_close_nic()
4700 bnxt_free_mem(bp, irq_re_init); in bnxt_close_nic()
4706 struct bnxt *bp = netdev_priv(dev); in bnxt_close() local
4708 bnxt_close_nic(bp, true, true); in bnxt_close()
4742 struct bnxt *bp = netdev_priv(dev); in bnxt_get_stats64() local
4746 if (!bp->bnapi) in bnxt_get_stats64()
4750 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_get_stats64()
4751 struct bnxt_napi *bnapi = bp->bnapi[i]; in bnxt_get_stats64()
4784 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) in bnxt_mc_list_updated() argument
4786 struct net_device *dev = bp->dev; in bnxt_mc_list_updated()
4787 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; in bnxt_mc_list_updated()
4818 static bool bnxt_uc_list_updated(struct bnxt *bp) in bnxt_uc_list_updated() argument
4820 struct net_device *dev = bp->dev; in bnxt_uc_list_updated()
4821 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; in bnxt_uc_list_updated()
4839 struct bnxt *bp = netdev_priv(dev); in bnxt_set_rx_mode() local
4840 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; in bnxt_set_rx_mode()
4853 if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp)) in bnxt_set_rx_mode()
4856 uc_update = bnxt_uc_list_updated(bp); in bnxt_set_rx_mode()
4862 mc_update = bnxt_mc_list_updated(bp, &mask); in bnxt_set_rx_mode()
4868 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); in bnxt_set_rx_mode()
4869 schedule_work(&bp->sp_task); in bnxt_set_rx_mode()
4873 static int bnxt_cfg_rx_mode(struct bnxt *bp) in bnxt_cfg_rx_mode() argument
4875 struct net_device *dev = bp->dev; in bnxt_cfg_rx_mode()
4876 struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; in bnxt_cfg_rx_mode()
4882 uc_update = bnxt_uc_list_updated(bp); in bnxt_cfg_rx_mode()
4888 mutex_lock(&bp->hwrm_cmd_lock); in bnxt_cfg_rx_mode()
4892 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, in bnxt_cfg_rx_mode()
4897 rc = _hwrm_send_message(bp, &req, sizeof(req), in bnxt_cfg_rx_mode()
4900 mutex_unlock(&bp->hwrm_cmd_lock); in bnxt_cfg_rx_mode()
4917 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); in bnxt_cfg_rx_mode()
4919 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", in bnxt_cfg_rx_mode()
4927 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); in bnxt_cfg_rx_mode()
4929 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", in bnxt_cfg_rx_mode()
4943 struct bnxt *bp = netdev_priv(dev); in bnxt_set_features() local
4944 u32 flags = bp->flags; in bnxt_set_features()
4951 if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0)) in bnxt_set_features()
4962 changes = flags ^ bp->flags; in bnxt_set_features()
4965 if ((bp->flags & BNXT_FLAG_TPA) == 0 || in bnxt_set_features()
4973 if (flags != bp->flags) { in bnxt_set_features()
4974 u32 old_flags = bp->flags; in bnxt_set_features()
4976 bp->flags = flags; in bnxt_set_features()
4980 bnxt_set_ring_params(bp); in bnxt_set_features()
4985 bnxt_close_nic(bp, false, false); in bnxt_set_features()
4987 bnxt_set_ring_params(bp); in bnxt_set_features()
4989 return bnxt_open_nic(bp, false, false); in bnxt_set_features()
4992 rc = bnxt_set_tpa(bp, in bnxt_set_features()
4996 bp->flags = old_flags; in bnxt_set_features()
5002 static void bnxt_dbg_dump_states(struct bnxt *bp) in bnxt_dbg_dump_states() argument
5010 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_dbg_dump_states()
5011 bnapi = bp->bnapi[i]; in bnxt_dbg_dump_states()
5015 if (netif_msg_drv(bp)) { in bnxt_dbg_dump_states()
5016 netdev_info(bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n", in bnxt_dbg_dump_states()
5019 …netdev_info(bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: … in bnxt_dbg_dump_states()
5024 netdev_info(bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", in bnxt_dbg_dump_states()
5031 static void bnxt_reset_task(struct bnxt *bp) in bnxt_reset_task() argument
5033 bnxt_dbg_dump_states(bp); in bnxt_reset_task()
5034 if (netif_running(bp->dev)) { in bnxt_reset_task()
5035 bnxt_close_nic(bp, false, false); in bnxt_reset_task()
5036 bnxt_open_nic(bp, false, false); in bnxt_reset_task()
5042 struct bnxt *bp = netdev_priv(dev); in bnxt_tx_timeout() local
5044 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); in bnxt_tx_timeout()
5045 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); in bnxt_tx_timeout()
5046 schedule_work(&bp->sp_task); in bnxt_tx_timeout()
5052 struct bnxt *bp = netdev_priv(dev); in bnxt_poll_controller() local
5055 for (i = 0; i < bp->cp_nr_rings; i++) { in bnxt_poll_controller()
5056 struct bnxt_irq *irq = &bp->irq_tbl[i]; in bnxt_poll_controller()
5059 irq->handler(irq->vector, bp->bnapi[i]); in bnxt_poll_controller()
5067 struct bnxt *bp = (struct bnxt *)data; in bnxt_timer() local
5068 struct net_device *dev = bp->dev; in bnxt_timer()
5073 if (atomic_read(&bp->intr_sem) != 0) in bnxt_timer()
5077 mod_timer(&bp->timer, jiffies + bp->current_interval); in bnxt_timer()
5084 struct bnxt *bp = container_of(work, struct bnxt, sp_task); in bnxt_sp_task() local
5087 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); in bnxt_sp_task()
5089 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { in bnxt_sp_task()
5090 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); in bnxt_sp_task()
5094 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
5095 bnxt_cfg_rx_mode(bp); in bnxt_sp_task()
5097 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
5098 bnxt_cfg_ntp_filters(bp); in bnxt_sp_task()
5099 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
5100 rc = bnxt_update_link(bp, true); in bnxt_sp_task()
5102 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", in bnxt_sp_task()
5105 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) in bnxt_sp_task()
5106 bnxt_hwrm_exec_fwd_req(bp); in bnxt_sp_task()
5107 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
5109 bp, bp->vxlan_port, in bnxt_sp_task()
5112 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
5114 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); in bnxt_sp_task()
5116 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) { in bnxt_sp_task()
5120 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); in bnxt_sp_task()
5122 bnxt_reset_task(bp); in bnxt_sp_task()
5123 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); in bnxt_sp_task()
5128 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); in bnxt_sp_task()
5134 struct bnxt *bp = netdev_priv(dev); in bnxt_init_board() local
5166 bp->dev = dev; in bnxt_init_board()
5167 bp->pdev = pdev; in bnxt_init_board()
5169 bp->bar0 = pci_ioremap_bar(pdev, 0); in bnxt_init_board()
5170 if (!bp->bar0) { in bnxt_init_board()
5176 bp->bar1 = pci_ioremap_bar(pdev, 2); in bnxt_init_board()
5177 if (!bp->bar1) { in bnxt_init_board()
5183 bp->bar2 = pci_ioremap_bar(pdev, 4); in bnxt_init_board()
5184 if (!bp->bar2) { in bnxt_init_board()
5190 INIT_WORK(&bp->sp_task, bnxt_sp_task); in bnxt_init_board()
5192 spin_lock_init(&bp->ntp_fltr_lock); in bnxt_init_board()
5194 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; in bnxt_init_board()
5195 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; in bnxt_init_board()
5197 bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4); in bnxt_init_board()
5198 bp->coal_bufs = 20; in bnxt_init_board()
5199 bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1); in bnxt_init_board()
5200 bp->coal_bufs_irq = 2; in bnxt_init_board()
5202 init_timer(&bp->timer); in bnxt_init_board()
5203 bp->timer.data = (unsigned long)bp; in bnxt_init_board()
5204 bp->timer.function = bnxt_timer; in bnxt_init_board()
5205 bp->current_interval = BNXT_TIMER_INTERVAL; in bnxt_init_board()
5207 clear_bit(BNXT_STATE_OPEN, &bp->state); in bnxt_init_board()
5212 if (bp->bar2) { in bnxt_init_board()
5213 pci_iounmap(pdev, bp->bar2); in bnxt_init_board()
5214 bp->bar2 = NULL; in bnxt_init_board()
5217 if (bp->bar1) { in bnxt_init_board()
5218 pci_iounmap(pdev, bp->bar1); in bnxt_init_board()
5219 bp->bar1 = NULL; in bnxt_init_board()
5222 if (bp->bar0) { in bnxt_init_board()
5223 pci_iounmap(pdev, bp->bar0); in bnxt_init_board()
5224 bp->bar0 = NULL; in bnxt_init_board()
5240 struct bnxt *bp = netdev_priv(dev); in bnxt_change_mac_addr() local
5247 if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr)) in bnxt_change_mac_addr()
5256 bnxt_close_nic(bp, false, false); in bnxt_change_mac_addr()
5257 rc = bnxt_open_nic(bp, false, false); in bnxt_change_mac_addr()
5266 struct bnxt *bp = netdev_priv(dev); in bnxt_change_mtu() local
5272 bnxt_close_nic(bp, false, false); in bnxt_change_mtu()
5275 bnxt_set_ring_params(bp); in bnxt_change_mtu()
5278 return bnxt_open_nic(bp, false, false); in bnxt_change_mtu()
5285 struct bnxt *bp = netdev_priv(dev); in bnxt_setup_tc() local
5287 if (tc > bp->max_tc) { in bnxt_setup_tc()
5289 tc, bp->max_tc); in bnxt_setup_tc()
5299 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); in bnxt_setup_tc()
5300 if (bp->tx_nr_rings_per_tc * tc > max_tx_rings) in bnxt_setup_tc()
5305 if (netif_running(bp->dev)) in bnxt_setup_tc()
5306 bnxt_close_nic(bp, true, false); in bnxt_setup_tc()
5309 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; in bnxt_setup_tc()
5312 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; in bnxt_setup_tc()
5315 bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); in bnxt_setup_tc()
5316 bp->num_stat_ctxs = bp->cp_nr_rings; in bnxt_setup_tc()
5318 if (netif_running(bp->dev)) in bnxt_setup_tc()
5319 return bnxt_open_nic(bp, true, false); in bnxt_setup_tc()
5345 struct bnxt *bp = netdev_priv(dev); in bnxt_rx_flow_steer() local
5375 head = &bp->ntp_fltr_hash_tbl[idx]; in bnxt_rx_flow_steer()
5386 spin_lock_bh(&bp->ntp_fltr_lock); in bnxt_rx_flow_steer()
5387 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, in bnxt_rx_flow_steer()
5390 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_rx_flow_steer()
5399 bp->ntp_fltr_count++; in bnxt_rx_flow_steer()
5400 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_rx_flow_steer()
5402 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); in bnxt_rx_flow_steer()
5403 schedule_work(&bp->sp_task); in bnxt_rx_flow_steer()
5412 static void bnxt_cfg_ntp_filters(struct bnxt *bp) in bnxt_cfg_ntp_filters() argument
5422 head = &bp->ntp_fltr_hash_tbl[i]; in bnxt_cfg_ntp_filters()
5427 if (rps_may_expire_flow(bp->dev, fltr->rxq, in bnxt_cfg_ntp_filters()
5430 bnxt_hwrm_cfa_ntuple_filter_free(bp, in bnxt_cfg_ntp_filters()
5435 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, in bnxt_cfg_ntp_filters()
5444 spin_lock_bh(&bp->ntp_fltr_lock); in bnxt_cfg_ntp_filters()
5446 bp->ntp_fltr_count--; in bnxt_cfg_ntp_filters()
5447 spin_unlock_bh(&bp->ntp_fltr_lock); in bnxt_cfg_ntp_filters()
5449 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); in bnxt_cfg_ntp_filters()
5458 static void bnxt_cfg_ntp_filters(struct bnxt *bp) in bnxt_cfg_ntp_filters() argument
5467 struct bnxt *bp = netdev_priv(dev); in bnxt_add_vxlan_port() local
5475 if (bp->vxlan_port_cnt && bp->vxlan_port != port) in bnxt_add_vxlan_port()
5478 bp->vxlan_port_cnt++; in bnxt_add_vxlan_port()
5479 if (bp->vxlan_port_cnt == 1) { in bnxt_add_vxlan_port()
5480 bp->vxlan_port = port; in bnxt_add_vxlan_port()
5481 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); in bnxt_add_vxlan_port()
5482 schedule_work(&bp->sp_task); in bnxt_add_vxlan_port()
5489 struct bnxt *bp = netdev_priv(dev); in bnxt_del_vxlan_port() local
5497 if (bp->vxlan_port_cnt && bp->vxlan_port == port) { in bnxt_del_vxlan_port()
5498 bp->vxlan_port_cnt--; in bnxt_del_vxlan_port()
5500 if (bp->vxlan_port_cnt == 0) { in bnxt_del_vxlan_port()
5501 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); in bnxt_del_vxlan_port()
5502 schedule_work(&bp->sp_task); in bnxt_del_vxlan_port()
5545 struct bnxt *bp = netdev_priv(dev); in bnxt_remove_one() local
5547 if (BNXT_PF(bp)) in bnxt_remove_one()
5548 bnxt_sriov_disable(bp); in bnxt_remove_one()
5551 cancel_work_sync(&bp->sp_task); in bnxt_remove_one()
5552 bp->sp_event = 0; in bnxt_remove_one()
5554 bnxt_free_hwrm_resources(bp); in bnxt_remove_one()
5555 pci_iounmap(pdev, bp->bar2); in bnxt_remove_one()
5556 pci_iounmap(pdev, bp->bar1); in bnxt_remove_one()
5557 pci_iounmap(pdev, bp->bar0); in bnxt_remove_one()
5564 static int bnxt_probe_phy(struct bnxt *bp) in bnxt_probe_phy() argument
5567 struct bnxt_link_info *link_info = &bp->link_info; in bnxt_probe_phy()
5570 rc = bnxt_update_link(bp, false); in bnxt_probe_phy()
5572 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", in bnxt_probe_phy()
5598 strcat(bp->fw_ver_str, phy_ver); in bnxt_probe_phy()
5613 void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx) in bnxt_get_max_rings() argument
5617 if (BNXT_PF(bp)) { in bnxt_get_max_rings()
5618 *max_tx = bp->pf.max_pf_tx_rings; in bnxt_get_max_rings()
5619 *max_rx = bp->pf.max_pf_rx_rings; in bnxt_get_max_rings()
5620 max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings); in bnxt_get_max_rings()
5621 max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs); in bnxt_get_max_rings()
5624 *max_tx = bp->vf.max_tx_rings; in bnxt_get_max_rings()
5625 *max_rx = bp->vf.max_rx_rings; in bnxt_get_max_rings()
5626 max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings); in bnxt_get_max_rings()
5627 max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs); in bnxt_get_max_rings()
5630 if (bp->flags & BNXT_FLAG_AGG_RINGS) in bnxt_get_max_rings()
5641 struct bnxt *bp; in bnxt_init_one() local
5648 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs); in bnxt_init_one()
5652 bp = netdev_priv(dev); in bnxt_init_one()
5655 bp->flags |= BNXT_FLAG_VF; in bnxt_init_one()
5658 bp->flags |= BNXT_FLAG_MSIX_CAP; in bnxt_init_one()
5659 if (BNXT_PF(bp)) in bnxt_init_one()
5660 bp->flags |= BNXT_FLAG_RFS; in bnxt_init_one()
5680 if (bp->flags & BNXT_FLAG_RFS) in bnxt_init_one()
5695 init_waitqueue_head(&bp->sriov_cfg_wait); in bnxt_init_one()
5697 rc = bnxt_alloc_hwrm_resources(bp); in bnxt_init_one()
5701 mutex_init(&bp->hwrm_cmd_lock); in bnxt_init_one()
5702 bnxt_hwrm_ver_get(bp); in bnxt_init_one()
5704 rc = bnxt_hwrm_func_drv_rgtr(bp); in bnxt_init_one()
5709 rc = bnxt_hwrm_func_qcaps(bp); in bnxt_init_one()
5711 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", in bnxt_init_one()
5717 rc = bnxt_hwrm_queue_qportcfg(bp); in bnxt_init_one()
5719 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", in bnxt_init_one()
5725 bnxt_set_tpa_flags(bp); in bnxt_init_one()
5726 bnxt_set_ring_params(bp); in bnxt_init_one()
5728 if (BNXT_PF(bp)) in bnxt_init_one()
5729 bp->pf.max_irqs = max_irqs; in bnxt_init_one()
5732 bp->vf.max_irqs = max_irqs; in bnxt_init_one()
5734 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); in bnxt_init_one()
5735 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); in bnxt_init_one()
5736 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); in bnxt_init_one()
5737 bp->tx_nr_rings = bp->tx_nr_rings_per_tc; in bnxt_init_one()
5738 bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings); in bnxt_init_one()
5739 bp->num_stat_ctxs = bp->cp_nr_rings; in bnxt_init_one()
5742 bp->flags |= BNXT_FLAG_STRIP_VLAN; in bnxt_init_one()
5744 rc = bnxt_probe_phy(bp); in bnxt_init_one()
5759 pci_iounmap(pdev, bp->bar0); in bnxt_init_one()