Lines Matching refs:txq
1135 static void txq_stop(struct sge_eth_txq *txq) in txq_stop() argument
1137 netif_tx_stop_queue(txq->txq); in txq_stop()
1138 txq->q.stops++; in txq_stop()
1166 struct sge_eth_txq *txq; in t4vf_eth_xmit() local
1193 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in t4vf_eth_xmit()
1199 reclaim_completed_tx(adapter, &txq->q, true); in t4vf_eth_xmit()
1208 credits = txq_avail(&txq->q) - ndesc; in t4vf_eth_xmit()
1217 txq_stop(txq); in t4vf_eth_xmit()
1231 txq->mapping_err++; in t4vf_eth_xmit()
1246 txq_stop(txq); in t4vf_eth_xmit()
1257 wr = (void *)&txq->q.desc[txq->q.pidx]; in t4vf_eth_xmit()
1307 txq->tso++; in t4vf_eth_xmit()
1308 txq->tx_cso += ssi->gso_segs; in t4vf_eth_xmit()
1324 txq->tx_cso++; in t4vf_eth_xmit()
1334 txq->vlan_ins++; in t4vf_eth_xmit()
1349 T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7], in t4vf_eth_xmit()
1351 ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags); in t4vf_eth_xmit()
1363 inline_tx_skb(skb, &txq->q, cpl + 1); in t4vf_eth_xmit()
1404 struct sge_txq *tq = &txq->q; in t4vf_eth_xmit()
1433 txq_advance(&txq->q, ndesc); in t4vf_eth_xmit()
1435 ring_tx_db(adapter, &txq->q, ndesc); in t4vf_eth_xmit()
2105 struct sge_eth_txq *txq = &s->ethtxq[i]; in sge_tx_timer_cb() local
2107 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) { in sge_tx_timer_cb()
2108 int avail = reclaimable(&txq->q); in sge_tx_timer_cb()
2113 free_tx_desc(adapter, &txq->q, avail, true); in sge_tx_timer_cb()
2114 txq->q.in_use -= avail; in sge_tx_timer_cb()
2115 __netif_tx_unlock(txq->txq); in sge_tx_timer_cb()
2360 int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, in t4vf_sge_alloc_eth_txq() argument
2373 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4vf_sge_alloc_eth_txq()
2379 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, in t4vf_sge_alloc_eth_txq()
2382 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len); in t4vf_sge_alloc_eth_txq()
2383 if (!txq->q.desc) in t4vf_sge_alloc_eth_txq()
2413 cmd.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4vf_sge_alloc_eth_txq()
2425 kfree(txq->q.sdesc); in t4vf_sge_alloc_eth_txq()
2426 txq->q.sdesc = NULL; in t4vf_sge_alloc_eth_txq()
2429 txq->q.desc, txq->q.phys_addr); in t4vf_sge_alloc_eth_txq()
2430 txq->q.desc = NULL; in t4vf_sge_alloc_eth_txq()
2434 txq->q.in_use = 0; in t4vf_sge_alloc_eth_txq()
2435 txq->q.cidx = 0; in t4vf_sge_alloc_eth_txq()
2436 txq->q.pidx = 0; in t4vf_sge_alloc_eth_txq()
2437 txq->q.stat = (void *)&txq->q.desc[txq->q.size]; in t4vf_sge_alloc_eth_txq()
2438 txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd)); in t4vf_sge_alloc_eth_txq()
2439 txq->q.bar2_addr = bar2_address(adapter, in t4vf_sge_alloc_eth_txq()
2440 txq->q.cntxt_id, in t4vf_sge_alloc_eth_txq()
2442 &txq->q.bar2_qid); in t4vf_sge_alloc_eth_txq()
2443 txq->q.abs_id = in t4vf_sge_alloc_eth_txq()
2445 txq->txq = devq; in t4vf_sge_alloc_eth_txq()
2446 txq->tso = 0; in t4vf_sge_alloc_eth_txq()
2447 txq->tx_cso = 0; in t4vf_sge_alloc_eth_txq()
2448 txq->vlan_ins = 0; in t4vf_sge_alloc_eth_txq()
2449 txq->q.stops = 0; in t4vf_sge_alloc_eth_txq()
2450 txq->q.restarts = 0; in t4vf_sge_alloc_eth_txq()
2451 txq->mapping_err = 0; in t4vf_sge_alloc_eth_txq()
2512 struct sge_eth_txq *txq = s->ethtxq; in t4vf_free_sge_resources() local
2517 for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) { in t4vf_free_sge_resources()
2520 if (txq->q.desc) { in t4vf_free_sge_resources()
2521 t4vf_eth_eq_free(adapter, txq->q.cntxt_id); in t4vf_free_sge_resources()
2522 free_tx_desc(adapter, &txq->q, txq->q.in_use, true); in t4vf_free_sge_resources()
2523 kfree(txq->q.sdesc); in t4vf_free_sge_resources()
2524 free_txq(adapter, &txq->q); in t4vf_free_sge_resources()