Lines Matching refs:txq

213 #define IS_TSO_HEADER(txq, addr) \  argument
214 ((addr >= txq->tso_hdrs_dma) && \
215 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
226 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; in fec_enet_get_nextdesc() local
232 if (bdp >= txq->tx_bd_base) { in fec_enet_get_nextdesc()
233 base = txq->tx_bd_base; in fec_enet_get_nextdesc()
234 ring_size = txq->tx_ring_size; in fec_enet_get_nextdesc()
235 ex_base = (struct bufdesc_ex *)txq->tx_bd_base; in fec_enet_get_nextdesc()
257 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; in fec_enet_get_prevdesc() local
263 if (bdp >= txq->tx_bd_base) { in fec_enet_get_prevdesc()
264 base = txq->tx_bd_base; in fec_enet_get_prevdesc()
265 ring_size = txq->tx_ring_size; in fec_enet_get_prevdesc()
266 ex_base = (struct bufdesc_ex *)txq->tx_bd_base; in fec_enet_get_prevdesc()
287 struct fec_enet_priv_tx_q *txq) in fec_enet_get_free_txdesc_num() argument
291 entries = ((const char *)txq->dirty_tx - in fec_enet_get_free_txdesc_num()
292 (const char *)txq->cur_tx) / fep->bufdesc_size - 1; in fec_enet_get_free_txdesc_num()
294 return entries > 0 ? entries : entries + txq->tx_ring_size; in fec_enet_get_free_txdesc_num()
320 struct fec_enet_priv_tx_q *txq; in fec_dump() local
326 txq = fep->tx_queue[0]; in fec_dump()
327 bdp = txq->tx_bd_base; in fec_dump()
332 bdp == txq->cur_tx ? 'S' : ' ', in fec_dump()
333 bdp == txq->dirty_tx ? 'H' : ' ', in fec_dump()
335 txq->tx_skbuff[index]); in fec_dump()
338 } while (bdp != txq->tx_bd_base); in fec_dump()
364 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, in fec_enet_txq_submit_frag_skb() argument
369 struct bufdesc *bdp = txq->cur_tx; in fec_enet_txq_submit_frag_skb()
414 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); in fec_enet_txq_submit_frag_skb()
417 memcpy(txq->tx_bounce[index], bufaddr, frag_len); in fec_enet_txq_submit_frag_skb()
418 bufaddr = txq->tx_bounce[index]; in fec_enet_txq_submit_frag_skb()
438 txq->cur_tx = bdp; in fec_enet_txq_submit_frag_skb()
443 bdp = txq->cur_tx; in fec_enet_txq_submit_frag_skb()
452 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, in fec_enet_txq_submit_skb() argument
468 entries_free = fec_enet_get_free_txdesc_num(fep, txq); in fec_enet_txq_submit_skb()
483 bdp = txq->cur_tx; in fec_enet_txq_submit_skb()
492 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); in fec_enet_txq_submit_skb()
495 memcpy(txq->tx_bounce[index], skb->data, buflen); in fec_enet_txq_submit_skb()
496 bufaddr = txq->tx_bounce[index]; in fec_enet_txq_submit_skb()
512 ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev); in fec_enet_txq_submit_skb()
543 last_bdp = txq->cur_tx; in fec_enet_txq_submit_skb()
544 index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); in fec_enet_txq_submit_skb()
546 txq->tx_skbuff[index] = skb; in fec_enet_txq_submit_skb()
562 txq->cur_tx = bdp; in fec_enet_txq_submit_skb()
571 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, in fec_enet_txq_put_data_tso() argument
590 memcpy(txq->tx_bounce[index], data, size); in fec_enet_txq_put_data_tso()
591 data = txq->tx_bounce[index]; in fec_enet_txq_put_data_tso()
632 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, in fec_enet_txq_put_hdr_tso() argument
649 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; in fec_enet_txq_put_hdr_tso()
650 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; in fec_enet_txq_put_hdr_tso()
653 memcpy(txq->tx_bounce[index], skb->data, hdr_len); in fec_enet_txq_put_hdr_tso()
654 bufaddr = txq->tx_bounce[index]; in fec_enet_txq_put_hdr_tso()
686 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, in fec_enet_txq_submit_tso() argument
693 struct bufdesc *bdp = txq->cur_tx; in fec_enet_txq_submit_tso()
699 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { in fec_enet_txq_submit_tso()
719 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); in fec_enet_txq_submit_tso()
724 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; in fec_enet_txq_submit_tso()
726 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); in fec_enet_txq_submit_tso()
735 index = fec_enet_get_bd_index(txq->tx_bd_base, in fec_enet_txq_submit_tso()
737 ret = fec_enet_txq_put_data_tso(txq, skb, ndev, in fec_enet_txq_submit_tso()
753 txq->tx_skbuff[index] = skb; in fec_enet_txq_submit_tso()
756 txq->cur_tx = bdp; in fec_enet_txq_submit_tso()
779 struct fec_enet_priv_tx_q *txq; in fec_enet_start_xmit() local
784 txq = fep->tx_queue[queue]; in fec_enet_start_xmit()
788 ret = fec_enet_txq_submit_tso(txq, skb, ndev); in fec_enet_start_xmit()
790 ret = fec_enet_txq_submit_skb(txq, skb, ndev); in fec_enet_start_xmit()
794 entries_free = fec_enet_get_free_txdesc_num(fep, txq); in fec_enet_start_xmit()
795 if (entries_free <= txq->tx_stop_threshold) in fec_enet_start_xmit()
806 struct fec_enet_priv_tx_q *txq; in fec_enet_bd_init() local
836 txq = fep->tx_queue[q]; in fec_enet_bd_init()
837 bdp = txq->tx_bd_base; in fec_enet_bd_init()
838 txq->cur_tx = bdp; in fec_enet_bd_init()
840 for (i = 0; i < txq->tx_ring_size; i++) { in fec_enet_bd_init()
843 if (txq->tx_skbuff[i]) { in fec_enet_bd_init()
844 dev_kfree_skb_any(txq->tx_skbuff[i]); in fec_enet_bd_init()
845 txq->tx_skbuff[i] = NULL; in fec_enet_bd_init()
854 txq->dirty_tx = bdp; in fec_enet_bd_init()
870 struct fec_enet_priv_tx_q *txq; in fec_enet_enable_ring() local
886 txq = fep->tx_queue[i]; in fec_enet_enable_ring()
887 writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); in fec_enet_enable_ring()
899 struct fec_enet_priv_tx_q *txq; in fec_enet_reset_skb() local
903 txq = fep->tx_queue[i]; in fec_enet_reset_skb()
905 for (j = 0; j < txq->tx_ring_size; j++) { in fec_enet_reset_skb()
906 if (txq->tx_skbuff[j]) { in fec_enet_reset_skb()
907 dev_kfree_skb_any(txq->tx_skbuff[j]); in fec_enet_reset_skb()
908 txq->tx_skbuff[j] = NULL; in fec_enet_reset_skb()
1198 struct fec_enet_priv_tx_q *txq; in fec_enet_tx_queue() local
1207 txq = fep->tx_queue[queue_id]; in fec_enet_tx_queue()
1210 bdp = txq->dirty_tx; in fec_enet_tx_queue()
1218 if (bdp == txq->cur_tx) in fec_enet_tx_queue()
1221 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); in fec_enet_tx_queue()
1223 skb = txq->tx_skbuff[index]; in fec_enet_tx_queue()
1224 txq->tx_skbuff[index] = NULL; in fec_enet_tx_queue()
1225 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) in fec_enet_tx_queue()
1272 txq->dirty_tx = bdp; in fec_enet_tx_queue()
1280 entries_free = fec_enet_get_free_txdesc_num(fep, txq); in fec_enet_tx_queue()
1281 if (entries_free >= txq->tx_wake_threshold) in fec_enet_tx_queue()
1287 if (bdp != txq->cur_tx && in fec_enet_tx_queue()
2564 struct fec_enet_priv_tx_q *txq; in fec_enet_free_buffers() local
2586 txq = fep->tx_queue[q]; in fec_enet_free_buffers()
2587 bdp = txq->tx_bd_base; in fec_enet_free_buffers()
2588 for (i = 0; i < txq->tx_ring_size; i++) { in fec_enet_free_buffers()
2589 kfree(txq->tx_bounce[i]); in fec_enet_free_buffers()
2590 txq->tx_bounce[i] = NULL; in fec_enet_free_buffers()
2591 skb = txq->tx_skbuff[i]; in fec_enet_free_buffers()
2592 txq->tx_skbuff[i] = NULL; in fec_enet_free_buffers()
2602 struct fec_enet_priv_tx_q *txq; in fec_enet_free_queue() local
2606 txq = fep->tx_queue[i]; in fec_enet_free_queue()
2608 txq->tx_ring_size * TSO_HEADER_SIZE, in fec_enet_free_queue()
2609 txq->tso_hdrs, in fec_enet_free_queue()
2610 txq->tso_hdrs_dma); in fec_enet_free_queue()
2624 struct fec_enet_priv_tx_q *txq; in fec_enet_alloc_queue() local
2627 txq = kzalloc(sizeof(*txq), GFP_KERNEL); in fec_enet_alloc_queue()
2628 if (!txq) { in fec_enet_alloc_queue()
2633 fep->tx_queue[i] = txq; in fec_enet_alloc_queue()
2634 txq->tx_ring_size = TX_RING_SIZE; in fec_enet_alloc_queue()
2637 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; in fec_enet_alloc_queue()
2638 txq->tx_wake_threshold = in fec_enet_alloc_queue()
2639 (txq->tx_ring_size - txq->tx_stop_threshold) / 2; in fec_enet_alloc_queue()
2641 txq->tso_hdrs = dma_alloc_coherent(NULL, in fec_enet_alloc_queue()
2642 txq->tx_ring_size * TSO_HEADER_SIZE, in fec_enet_alloc_queue()
2643 &txq->tso_hdrs_dma, in fec_enet_alloc_queue()
2645 if (!txq->tso_hdrs) { in fec_enet_alloc_queue()
2717 struct fec_enet_priv_tx_q *txq; in fec_enet_alloc_txq_buffers() local
2719 txq = fep->tx_queue[queue]; in fec_enet_alloc_txq_buffers()
2720 bdp = txq->tx_bd_base; in fec_enet_alloc_txq_buffers()
2721 for (i = 0; i < txq->tx_ring_size; i++) { in fec_enet_alloc_txq_buffers()
2722 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); in fec_enet_alloc_txq_buffers()
2723 if (!txq->tx_bounce[i]) in fec_enet_alloc_txq_buffers()
3010 struct fec_enet_priv_tx_q *txq; in fec_enet_init() local
3065 txq = fep->tx_queue[i]; in fec_enet_init()
3066 txq->index = i; in fec_enet_init()
3067 txq->tx_bd_base = (struct bufdesc *)cbd_base; in fec_enet_init()
3068 txq->bd_dma = bd_dma; in fec_enet_init()
3070 bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; in fec_enet_init()
3072 (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); in fec_enet_init()
3074 bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; in fec_enet_init()
3075 cbd_base += txq->tx_ring_size; in fec_enet_init()