Lines Matching refs:q

217 static inline unsigned int txq_avail(const struct sge_txq *q)  in txq_avail()  argument
219 return q->size - 1 - q->in_use; in txq_avail()
312 const struct ulptx_sgl *sgl, const struct sge_txq *q) in unmap_sgl() argument
331 if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { in unmap_sgl()
337 } else if ((u8 *)p == (u8 *)q->stat) { in unmap_sgl()
338 p = (const struct ulptx_sge_pair *)q->desc; in unmap_sgl()
340 } else if ((u8 *)p + 8 == (u8 *)q->stat) { in unmap_sgl()
341 const __be64 *addr = (const __be64 *)q->desc; in unmap_sgl()
349 const __be64 *addr = (const __be64 *)q->desc; in unmap_sgl()
361 if ((u8 *)p == (u8 *)q->stat) in unmap_sgl()
362 p = (const struct ulptx_sge_pair *)q->desc; in unmap_sgl()
363 addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : in unmap_sgl()
364 *(const __be64 *)q->desc; in unmap_sgl()
380 static void free_tx_desc(struct adapter *adap, struct sge_txq *q, in free_tx_desc() argument
384 unsigned int cidx = q->cidx; in free_tx_desc()
387 d = &q->sdesc[cidx]; in free_tx_desc()
391 unmap_sgl(dev, d->skb, d->sgl, q); in free_tx_desc()
396 if (++cidx == q->size) { in free_tx_desc()
398 d = q->sdesc; in free_tx_desc()
401 q->cidx = cidx; in free_tx_desc()
407 static inline int reclaimable(const struct sge_txq *q) in reclaimable() argument
409 int hw_cidx = ntohs(q->stat->cidx); in reclaimable()
410 hw_cidx -= q->cidx; in reclaimable()
411 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; in reclaimable()
424 static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, in reclaim_completed_tx() argument
427 int avail = reclaimable(q); in reclaim_completed_tx()
437 free_tx_desc(adap, q, avail, unmap); in reclaim_completed_tx()
438 q->in_use -= avail; in reclaim_completed_tx()
482 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) in free_rx_bufs() argument
485 struct rx_sw_desc *d = &q->sdesc[q->cidx]; in free_rx_bufs()
493 if (++q->cidx == q->size) in free_rx_bufs()
494 q->cidx = 0; in free_rx_bufs()
495 q->avail--; in free_rx_bufs()
510 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) in unmap_rx_buf() argument
512 struct rx_sw_desc *d = &q->sdesc[q->cidx]; in unmap_rx_buf()
518 if (++q->cidx == q->size) in unmap_rx_buf()
519 q->cidx = 0; in unmap_rx_buf()
520 q->avail--; in unmap_rx_buf()
523 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) in ring_fl_db() argument
525 if (q->pend_cred >= 8) { in ring_fl_db()
529 val |= PIDX_V(q->pend_cred / 8); in ring_fl_db()
531 val |= PIDX_T5_V(q->pend_cred / 8); in ring_fl_db()
542 if (unlikely(q->bar2_addr == NULL)) { in ring_fl_db()
544 val | QID_V(q->cntxt_id)); in ring_fl_db()
546 writel(val | QID_V(q->bar2_qid), in ring_fl_db()
547 q->bar2_addr + SGE_UDB_KDOORBELL); in ring_fl_db()
554 q->pend_cred &= 7; in ring_fl_db()
579 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, in refill_fl() argument
585 unsigned int cred = q->avail; in refill_fl()
586 __be64 *d = &q->desc[q->pidx]; in refill_fl()
587 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; in refill_fl()
591 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) in refill_fl()
607 q->large_alloc_failed++; in refill_fl()
624 q->avail++; in refill_fl()
625 if (++q->pidx == q->size) { in refill_fl()
626 q->pidx = 0; in refill_fl()
627 sd = q->sdesc; in refill_fl()
628 d = q->desc; in refill_fl()
637 q->alloc_failed++; in refill_fl()
652 q->avail++; in refill_fl()
653 if (++q->pidx == q->size) { in refill_fl()
654 q->pidx = 0; in refill_fl()
655 sd = q->sdesc; in refill_fl()
656 d = q->desc; in refill_fl()
660 out: cred = q->avail - cred; in refill_fl()
661 q->pend_cred += cred; in refill_fl()
662 ring_fl_db(adap, q); in refill_fl()
664 if (unlikely(fl_starving(adap, q))) { in refill_fl()
666 set_bit(q->cntxt_id - adap->sge.egr_start, in refill_fl()
850 static void write_sgl(const struct sk_buff *skb, struct sge_txq *q, in write_sgl() argument
879 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; in write_sgl()
892 if (unlikely((u8 *)end > (u8 *)q->stat)) { in write_sgl()
893 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; in write_sgl()
897 part1 = (u8 *)end - (u8 *)q->stat; in write_sgl()
898 memcpy(q->desc, (u8 *)buf + part0, part1); in write_sgl()
899 end = (void *)q->desc + part1; in write_sgl()
929 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) in ring_tx_db() argument
939 if (unlikely(q->bar2_addr == NULL)) { in ring_tx_db()
946 spin_lock_irqsave(&q->db_lock, flags); in ring_tx_db()
947 if (!q->db_disabled) in ring_tx_db()
949 QID_V(q->cntxt_id) | val); in ring_tx_db()
951 q->db_pidx_inc += n; in ring_tx_db()
952 q->db_pidx = q->pidx; in ring_tx_db()
953 spin_unlock_irqrestore(&q->db_lock, flags); in ring_tx_db()
969 if (n == 1 && q->bar2_qid == 0) { in ring_tx_db()
970 int index = (q->pidx in ring_tx_db()
971 ? (q->pidx - 1) in ring_tx_db()
972 : (q->size - 1)); in ring_tx_db()
973 u64 *wr = (u64 *)&q->desc[index]; in ring_tx_db()
976 (q->bar2_addr + SGE_UDB_WCDOORBELL), in ring_tx_db()
979 writel(val | QID_V(q->bar2_qid), in ring_tx_db()
980 q->bar2_addr + SGE_UDB_KDOORBELL); in ring_tx_db()
1008 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, in inline_tx_skb() argument
1012 int left = (void *)q->stat - pos; in inline_tx_skb()
1022 skb_copy_bits(skb, left, q->desc, skb->len - left); in inline_tx_skb()
1023 pos = (void *)q->desc + (skb->len - left); in inline_tx_skb()
1085 static void eth_txq_stop(struct sge_eth_txq *q) in eth_txq_stop() argument
1087 netif_tx_stop_queue(q->txq); in eth_txq_stop()
1088 q->q.stops++; in eth_txq_stop()
1091 static inline void txq_advance(struct sge_txq *q, unsigned int n) in txq_advance() argument
1093 q->in_use += n; in txq_advance()
1094 q->pidx += n; in txq_advance()
1095 if (q->pidx >= q->size) in txq_advance()
1096 q->pidx -= q->size; in txq_advance()
1145 struct sge_eth_txq *q; in t4_eth_xmit() local
1176 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in t4_eth_xmit()
1178 reclaim_completed_tx(adap, &q->q, true); in t4_eth_xmit()
1189 credits = txq_avail(&q->q) - ndesc; in t4_eth_xmit()
1192 eth_txq_stop(q); in t4_eth_xmit()
1204 q->mapping_err++; in t4_eth_xmit()
1210 eth_txq_stop(q); in t4_eth_xmit()
1214 wr = (void *)&q->q.desc[q->q.pidx]; in t4_eth_xmit()
1253 q->tso++; in t4_eth_xmit()
1254 q->tx_cso += ssi->gso_segs; in t4_eth_xmit()
1263 q->tx_cso++; in t4_eth_xmit()
1268 q->vlan_ins++; in t4_eth_xmit()
1281 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); in t4_eth_xmit()
1283 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio); in t4_eth_xmit()
1291 inline_tx_skb(skb, &q->q, cpl + 1); in t4_eth_xmit()
1296 write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0, in t4_eth_xmit()
1300 last_desc = q->q.pidx + ndesc - 1; in t4_eth_xmit()
1301 if (last_desc >= q->q.size) in t4_eth_xmit()
1302 last_desc -= q->q.size; in t4_eth_xmit()
1303 q->q.sdesc[last_desc].skb = skb; in t4_eth_xmit()
1304 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); in t4_eth_xmit()
1307 txq_advance(&q->q, ndesc); in t4_eth_xmit()
1309 ring_tx_db(adap, &q->q, ndesc); in t4_eth_xmit()
1321 static inline void reclaim_completed_tx_imm(struct sge_txq *q) in reclaim_completed_tx_imm() argument
1323 int hw_cidx = ntohs(q->stat->cidx); in reclaim_completed_tx_imm()
1324 int reclaim = hw_cidx - q->cidx; in reclaim_completed_tx_imm()
1327 reclaim += q->size; in reclaim_completed_tx_imm()
1329 q->in_use -= reclaim; in reclaim_completed_tx_imm()
1330 q->cidx = hw_cidx; in reclaim_completed_tx_imm()
1354 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) in ctrlq_check_stop() argument
1356 reclaim_completed_tx_imm(&q->q); in ctrlq_check_stop()
1357 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { in ctrlq_check_stop()
1359 q->q.stops++; in ctrlq_check_stop()
1360 q->full = 1; in ctrlq_check_stop()
1372 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) in ctrl_xmit() argument
1384 spin_lock(&q->sendq.lock); in ctrl_xmit()
1386 if (unlikely(q->full)) { in ctrl_xmit()
1388 __skb_queue_tail(&q->sendq, skb); in ctrl_xmit()
1389 spin_unlock(&q->sendq.lock); in ctrl_xmit()
1393 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; in ctrl_xmit()
1394 inline_tx_skb(skb, &q->q, wr); in ctrl_xmit()
1396 txq_advance(&q->q, ndesc); in ctrl_xmit()
1397 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) in ctrl_xmit()
1398 ctrlq_check_stop(q, wr); in ctrl_xmit()
1400 ring_tx_db(q->adap, &q->q, ndesc); in ctrl_xmit()
1401 spin_unlock(&q->sendq.lock); in ctrl_xmit()
1417 struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; in restart_ctrlq() local
1419 spin_lock(&q->sendq.lock); in restart_ctrlq()
1420 reclaim_completed_tx_imm(&q->q); in restart_ctrlq()
1421 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ in restart_ctrlq()
1423 while ((skb = __skb_dequeue(&q->sendq)) != NULL) { in restart_ctrlq()
1431 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; in restart_ctrlq()
1432 txq_advance(&q->q, ndesc); in restart_ctrlq()
1433 spin_unlock(&q->sendq.lock); in restart_ctrlq()
1435 inline_tx_skb(skb, &q->q, wr); in restart_ctrlq()
1438 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { in restart_ctrlq()
1439 unsigned long old = q->q.stops; in restart_ctrlq()
1441 ctrlq_check_stop(q, wr); in restart_ctrlq()
1442 if (q->q.stops != old) { /* suspended anew */ in restart_ctrlq()
1443 spin_lock(&q->sendq.lock); in restart_ctrlq()
1448 ring_tx_db(q->adap, &q->q, written); in restart_ctrlq()
1451 spin_lock(&q->sendq.lock); in restart_ctrlq()
1453 q->full = 0; in restart_ctrlq()
1455 ring_tx_db(q->adap, &q->q, written); in restart_ctrlq()
1456 spin_unlock(&q->sendq.lock); in restart_ctrlq()
1519 static void txq_stop_maperr(struct sge_ofld_txq *q) in txq_stop_maperr() argument
1521 q->mapping_err++; in txq_stop_maperr()
1522 q->q.stops++; in txq_stop_maperr()
1523 set_bit(q->q.cntxt_id - q->adap->sge.egr_start, in txq_stop_maperr()
1524 q->adap->sge.txq_maperr); in txq_stop_maperr()
1535 static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb) in ofldtxq_stop() argument
1540 q->q.stops++; in ofldtxq_stop()
1541 q->full = 1; in ofldtxq_stop()
1551 static void service_ofldq(struct sge_ofld_txq *q) in service_ofldq() argument
1559 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { in service_ofldq()
1564 spin_unlock(&q->sendq.lock); in service_ofldq()
1566 reclaim_completed_tx(q->adap, &q->q, false); in service_ofldq()
1570 credits = txq_avail(&q->q) - ndesc; in service_ofldq()
1573 ofldtxq_stop(q, skb); in service_ofldq()
1575 pos = (u64 *)&q->q.desc[q->q.pidx]; in service_ofldq()
1577 inline_tx_skb(skb, &q->q, pos); in service_ofldq()
1578 else if (map_skb(q->adap->pdev_dev, skb, in service_ofldq()
1580 txq_stop_maperr(q); in service_ofldq()
1581 spin_lock(&q->sendq.lock); in service_ofldq()
1587 write_sgl(skb, &q->q, (void *)pos + hdr_len, in service_ofldq()
1591 skb->dev = q->adap->port[0]; in service_ofldq()
1594 last_desc = q->q.pidx + ndesc - 1; in service_ofldq()
1595 if (last_desc >= q->q.size) in service_ofldq()
1596 last_desc -= q->q.size; in service_ofldq()
1597 q->q.sdesc[last_desc].skb = skb; in service_ofldq()
1600 txq_advance(&q->q, ndesc); in service_ofldq()
1603 ring_tx_db(q->adap, &q->q, written); in service_ofldq()
1607 spin_lock(&q->sendq.lock); in service_ofldq()
1608 __skb_unlink(skb, &q->sendq); in service_ofldq()
1613 ring_tx_db(q->adap, &q->q, written); in service_ofldq()
1623 static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb) in ofld_xmit() argument
1626 spin_lock(&q->sendq.lock); in ofld_xmit()
1627 __skb_queue_tail(&q->sendq, skb); in ofld_xmit()
1628 if (q->sendq.qlen == 1) in ofld_xmit()
1629 service_ofldq(q); in ofld_xmit()
1630 spin_unlock(&q->sendq.lock); in ofld_xmit()
1642 struct sge_ofld_txq *q = (struct sge_ofld_txq *)data; in restart_ofldq() local
1644 spin_lock(&q->sendq.lock); in restart_ofldq()
1645 q->full = 0; /* the queue actually is completely empty now */ in restart_ofldq()
1646 service_ofldq(q); in restart_ofldq()
1647 spin_unlock(&q->sendq.lock); in restart_ofldq()
1897 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, in t4_ethrx_handler() argument
1903 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); in t4_ethrx_handler()
1904 struct sge *s = &q->adap->sge; in t4_ethrx_handler()
1905 int cpl_trace_pkt = is_t4(q->adap->params.chip) ? in t4_ethrx_handler()
1910 return handle_trace_pkt(q->adap, si); in t4_ethrx_handler()
1914 (q->netdev->features & NETIF_F_RXCSUM); in t4_ethrx_handler()
1916 !(cxgb_poll_busy_polling(q)) && in t4_ethrx_handler()
1917 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { in t4_ethrx_handler()
1930 skb->protocol = eth_type_trans(skb, q->netdev); in t4_ethrx_handler()
1931 skb_record_rx_queue(skb, q->idx); in t4_ethrx_handler()
1940 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb), in t4_ethrx_handler()
1974 skb_mark_napi_id(skb, &q->napi); in t4_ethrx_handler()
1994 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, in restore_rx_bufs() argument
2000 if (q->cidx == 0) in restore_rx_bufs()
2001 q->cidx = q->size - 1; in restore_rx_bufs()
2003 q->cidx--; in restore_rx_bufs()
2004 d = &q->sdesc[q->cidx]; in restore_rx_bufs()
2007 q->avail++; in restore_rx_bufs()
2020 const struct sge_rspq *q) in is_new_response() argument
2022 return (r->type_gen >> RSPD_GEN_S) == q->gen; in is_new_response()
2031 static inline void rspq_next(struct sge_rspq *q) in rspq_next() argument
2033 q->cur_desc = (void *)q->cur_desc + q->iqe_len; in rspq_next()
2034 if (unlikely(++q->cidx == q->size)) { in rspq_next()
2035 q->cidx = 0; in rspq_next()
2036 q->gen ^= 1; in rspq_next()
2037 q->cur_desc = q->desc; in rspq_next()
2054 static int process_responses(struct sge_rspq *q, int budget) in process_responses() argument
2059 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); in process_responses()
2060 struct adapter *adapter = q->adap; in process_responses()
2064 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); in process_responses()
2065 if (!is_new_response(rc, q)) in process_responses()
2077 if (likely(q->offset > 0)) { in process_responses()
2078 free_rx_bufs(q->adap, &rxq->fl, 1); in process_responses()
2079 q->offset = 0; in process_responses()
2090 fp->offset = q->offset; in process_responses()
2095 unmap_rx_buf(q->adap, &rxq->fl); in process_responses()
2104 dma_sync_single_for_cpu(q->adap->pdev_dev, in process_responses()
2113 ret = q->handler(q, q->cur_desc, &si); in process_responses()
2115 q->offset += ALIGN(fp->size, s->fl_align); in process_responses()
2119 ret = q->handler(q, q->cur_desc, NULL); in process_responses()
2121 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); in process_responses()
2126 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX); in process_responses()
2130 rspq_next(q); in process_responses()
2134 if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16) in process_responses()
2135 __refill_fl(q->adap, &rxq->fl); in process_responses()
2142 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); in cxgb_busy_poll() local
2146 if (!cxgb_poll_lock_poll(q)) in cxgb_busy_poll()
2149 work_done = process_responses(q, 4); in cxgb_busy_poll()
2151 q->next_intr_params = params; in cxgb_busy_poll()
2157 if (unlikely(!q->bar2_addr)) in cxgb_busy_poll()
2158 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), in cxgb_busy_poll()
2159 val | INGRESSQID_V((u32)q->cntxt_id)); in cxgb_busy_poll()
2161 writel(val | INGRESSQID_V(q->bar2_qid), in cxgb_busy_poll()
2162 q->bar2_addr + SGE_UDB_GTS); in cxgb_busy_poll()
2166 cxgb_poll_unlock_poll(q); in cxgb_busy_poll()
2185 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); in napi_rx_handler() local
2189 if (!cxgb_poll_lock_napi(q)) in napi_rx_handler()
2192 work_done = process_responses(q, budget); in napi_rx_handler()
2197 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params); in napi_rx_handler()
2199 if (q->adaptive_rx) { in napi_rx_handler()
2207 q->next_intr_params = in napi_rx_handler()
2210 params = q->next_intr_params; in napi_rx_handler()
2212 params = q->next_intr_params; in napi_rx_handler()
2213 q->next_intr_params = q->intr_params; in napi_rx_handler()
2223 if (unlikely(q->bar2_addr == NULL)) { in napi_rx_handler()
2224 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), in napi_rx_handler()
2225 val | INGRESSQID_V((u32)q->cntxt_id)); in napi_rx_handler()
2227 writel(val | INGRESSQID_V(q->bar2_qid), in napi_rx_handler()
2228 q->bar2_addr + SGE_UDB_GTS); in napi_rx_handler()
2231 cxgb_poll_unlock_napi(q); in napi_rx_handler()
2240 struct sge_rspq *q = cookie; in t4_sge_intr_msix() local
2242 napi_schedule(&q->napi); in t4_sge_intr_msix()
2254 struct sge_rspq *q = &adap->sge.intrq; in process_intrq() local
2259 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); in process_intrq()
2260 if (!is_new_response(rc, q)) in process_intrq()
2271 rspq_next(q); in process_intrq()
2274 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params); in process_intrq()
2279 if (unlikely(q->bar2_addr == NULL)) { in process_intrq()
2281 val | INGRESSQID_V(q->cntxt_id)); in process_intrq()
2283 writel(val | INGRESSQID_V(q->bar2_qid), in process_intrq()
2284 q->bar2_addr + SGE_UDB_GTS); in process_intrq()
2393 struct sge_eth_txq *q = &s->ethtxq[i]; in sge_tx_timer_cb() local
2395 if (q->q.in_use && in sge_tx_timer_cb()
2396 time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && in sge_tx_timer_cb()
2397 __netif_tx_trylock(q->txq)) { in sge_tx_timer_cb()
2398 int avail = reclaimable(&q->q); in sge_tx_timer_cb()
2404 free_tx_desc(adap, &q->q, avail, true); in sge_tx_timer_cb()
2405 q->q.in_use -= avail; in sge_tx_timer_cb()
2408 __netif_tx_unlock(q->txq); in sge_tx_timer_cb()
2622 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) in init_txq() argument
2624 q->cntxt_id = id; in init_txq()
2625 q->bar2_addr = bar2_address(adap, in init_txq()
2626 q->cntxt_id, in init_txq()
2628 &q->bar2_qid); in init_txq()
2629 q->in_use = 0; in init_txq()
2630 q->cidx = q->pidx = 0; in init_txq()
2631 q->stops = q->restarts = 0; in init_txq()
2632 q->stat = (void *)&q->desc[q->size]; in init_txq()
2633 spin_lock_init(&q->db_lock); in init_txq()
2634 adap->sge.egr_map[id - adap->sge.egr_start] = q; in init_txq()
2647 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_eth_txq()
2649 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_eth_txq()
2651 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, in t4_sge_alloc_eth_txq()
2653 if (!txq->q.desc) in t4_sge_alloc_eth_txq()
2674 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_eth_txq()
2678 kfree(txq->q.sdesc); in t4_sge_alloc_eth_txq()
2679 txq->q.sdesc = NULL; in t4_sge_alloc_eth_txq()
2682 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_eth_txq()
2683 txq->q.desc = NULL; in t4_sge_alloc_eth_txq()
2687 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); in t4_sge_alloc_eth_txq()
2704 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_ctrl_txq()
2706 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, in t4_sge_alloc_ctrl_txq()
2707 sizeof(struct tx_desc), 0, &txq->q.phys_addr, in t4_sge_alloc_ctrl_txq()
2709 if (!txq->q.desc) in t4_sge_alloc_ctrl_txq()
2729 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
2735 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
2736 txq->q.desc = NULL; in t4_sge_alloc_ctrl_txq()
2740 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); in t4_sge_alloc_ctrl_txq()
2757 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_ofld_txq()
2759 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_ofld_txq()
2761 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, in t4_sge_alloc_ofld_txq()
2763 if (!txq->q.desc) in t4_sge_alloc_ofld_txq()
2782 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_ofld_txq()
2786 kfree(txq->q.sdesc); in t4_sge_alloc_ofld_txq()
2787 txq->q.sdesc = NULL; in t4_sge_alloc_ofld_txq()
2790 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_ofld_txq()
2791 txq->q.desc = NULL; in t4_sge_alloc_ofld_txq()
2795 init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); in t4_sge_alloc_ofld_txq()
2804 static void free_txq(struct adapter *adap, struct sge_txq *q) in free_txq() argument
2809 q->size * sizeof(struct tx_desc) + s->stat_len, in free_txq()
2810 q->desc, q->phys_addr); in free_txq()
2811 q->cntxt_id = 0; in free_txq()
2812 q->sdesc = NULL; in free_txq()
2813 q->desc = NULL; in free_txq()
2852 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) in t4_free_ofld_rxqs() argument
2854 for ( ; n; n--, q++) in t4_free_ofld_rxqs()
2855 if (q->rspq.desc) in t4_free_ofld_rxqs()
2856 free_rspq_fl(adap, &q->rspq, in t4_free_ofld_rxqs()
2857 q->fl.size ? &q->fl : NULL); in t4_free_ofld_rxqs()
2877 if (etq->q.desc) { in t4_free_sge_resources()
2879 etq->q.cntxt_id); in t4_free_sge_resources()
2880 free_tx_desc(adap, &etq->q, etq->q.in_use, true); in t4_free_sge_resources()
2881 kfree(etq->q.sdesc); in t4_free_sge_resources()
2882 free_txq(adap, &etq->q); in t4_free_sge_resources()
2893 struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; in t4_free_sge_resources() local
2895 if (q->q.desc) { in t4_free_sge_resources()
2896 tasklet_kill(&q->qresume_tsk); in t4_free_sge_resources()
2898 q->q.cntxt_id); in t4_free_sge_resources()
2899 free_tx_desc(adap, &q->q, q->q.in_use, false); in t4_free_sge_resources()
2900 kfree(q->q.sdesc); in t4_free_sge_resources()
2901 __skb_queue_purge(&q->sendq); in t4_free_sge_resources()
2902 free_txq(adap, &q->q); in t4_free_sge_resources()
2910 if (cq->q.desc) { in t4_free_sge_resources()
2913 cq->q.cntxt_id); in t4_free_sge_resources()
2915 free_txq(adap, &cq->q); in t4_free_sge_resources()
2959 struct sge_ofld_txq *q = &s->ofldtxq[i]; in t4_sge_stop() local
2961 if (q->q.desc) in t4_sge_stop()
2962 tasklet_kill(&q->qresume_tsk); in t4_sge_stop()
2967 if (cq->q.desc) in t4_sge_stop()