Searched refs:rxq (Results 1 - 91 of 91) sorted by relevance

/linux-4.1.27/drivers/net/wireless/iwlwifi/pcie/
H A Drx.c77 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
78 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
79 * to replenish the iwl->rxq->rx_free.
81 * iwl->rxq is replenished and the READ INDEX is updated (updating the
84 * detached from the iwl->rxq. The driver 'processed' index is updated.
85 * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
86 * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
114 static int iwl_rxq_space(const struct iwl_rxq *rxq) iwl_rxq_space() argument
125 return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1); iwl_rxq_space()
152 struct iwl_rxq *rxq = &trans_pcie->rxq; iwl_pcie_rxq_inc_wr_ptr() local
155 lockdep_assert_held(&rxq->lock); iwl_pcie_rxq_inc_wr_ptr()
171 rxq->need_update = true; iwl_pcie_rxq_inc_wr_ptr()
176 rxq->write_actual = round_down(rxq->write, 8); iwl_pcie_rxq_inc_wr_ptr()
177 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); iwl_pcie_rxq_inc_wr_ptr()
183 struct iwl_rxq *rxq = &trans_pcie->rxq; iwl_pcie_rxq_check_wrptr() local
185 spin_lock(&rxq->lock); iwl_pcie_rxq_check_wrptr()
187 if (!rxq->need_update) iwl_pcie_rxq_check_wrptr()
191 rxq->need_update = false; iwl_pcie_rxq_check_wrptr()
194 spin_unlock(&rxq->lock); iwl_pcie_rxq_check_wrptr()
211 struct iwl_rxq *rxq = &trans_pcie->rxq; iwl_pcie_rxq_restock() local
225 spin_lock(&rxq->lock); iwl_pcie_rxq_restock()
226 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { iwl_pcie_rxq_restock()
228 rxb = rxq->queue[rxq->write]; iwl_pcie_rxq_restock()
232 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, iwl_pcie_rxq_restock()
237 rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); iwl_pcie_rxq_restock()
238 rxq->queue[rxq->write] = rxb; iwl_pcie_rxq_restock()
239 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; iwl_pcie_rxq_restock()
240 rxq->free_count--; iwl_pcie_rxq_restock()
242 spin_unlock(&rxq->lock); iwl_pcie_rxq_restock()
245 if (rxq->free_count <= RX_LOW_WATERMARK) iwl_pcie_rxq_restock()
250 if (rxq->write_actual != (rxq->write & ~0x7)) { iwl_pcie_rxq_restock()
251 spin_lock(&rxq->lock); iwl_pcie_rxq_restock()
253 spin_unlock(&rxq->lock); iwl_pcie_rxq_restock()
269 struct iwl_rxq *rxq = &trans_pcie->rxq; iwl_pcie_rxq_alloc_rbs() local
275 spin_lock(&rxq->lock); iwl_pcie_rxq_alloc_rbs()
276 if (list_empty(&rxq->rx_used)) { iwl_pcie_rxq_alloc_rbs()
277 spin_unlock(&rxq->lock); iwl_pcie_rxq_alloc_rbs()
280 spin_unlock(&rxq->lock); iwl_pcie_rxq_alloc_rbs()
282 if (rxq->free_count > RX_LOW_WATERMARK) iwl_pcie_rxq_alloc_rbs()
296 if ((rxq->free_count <= RX_LOW_WATERMARK) && iwl_pcie_rxq_alloc_rbs()
302 rxq->free_count); iwl_pcie_rxq_alloc_rbs()
309 spin_lock(&rxq->lock); iwl_pcie_rxq_alloc_rbs()
311 if (list_empty(&rxq->rx_used)) { iwl_pcie_rxq_alloc_rbs()
312 spin_unlock(&rxq->lock); iwl_pcie_rxq_alloc_rbs()
316 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, iwl_pcie_rxq_alloc_rbs()
319 spin_unlock(&rxq->lock); iwl_pcie_rxq_alloc_rbs()
330 spin_lock(&rxq->lock); iwl_pcie_rxq_alloc_rbs()
331 list_add(&rxb->list, &rxq->rx_used); iwl_pcie_rxq_alloc_rbs()
332 spin_unlock(&rxq->lock); iwl_pcie_rxq_alloc_rbs()
341 spin_lock(&rxq->lock); iwl_pcie_rxq_alloc_rbs()
343 list_add_tail(&rxb->list, &rxq->rx_free); iwl_pcie_rxq_alloc_rbs()
344 rxq->free_count++; iwl_pcie_rxq_alloc_rbs()
346 spin_unlock(&rxq->lock); iwl_pcie_rxq_alloc_rbs()
353 struct iwl_rxq *rxq = &trans_pcie->rxq; iwl_pcie_rxq_free_rbs() local
356 lockdep_assert_held(&rxq->lock); iwl_pcie_rxq_free_rbs()
359 if (!rxq->pool[i].page) iwl_pcie_rxq_free_rbs()
361 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, iwl_pcie_rxq_free_rbs()
364 __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order); iwl_pcie_rxq_free_rbs()
365 rxq->pool[i].page = NULL; iwl_pcie_rxq_free_rbs()
395 struct iwl_rxq *rxq = &trans_pcie->rxq; iwl_pcie_rx_alloc() local
398 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); iwl_pcie_rx_alloc()
400 spin_lock_init(&rxq->lock); iwl_pcie_rx_alloc()
402 if (WARN_ON(rxq->bd || rxq->rb_stts)) iwl_pcie_rx_alloc()
406 rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, iwl_pcie_rx_alloc()
407 &rxq->bd_dma, GFP_KERNEL); iwl_pcie_rx_alloc()
408 if (!rxq->bd) iwl_pcie_rx_alloc()
412 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), iwl_pcie_rx_alloc()
413 &rxq->rb_stts_dma, GFP_KERNEL); iwl_pcie_rx_alloc()
414 if (!rxq->rb_stts) iwl_pcie_rx_alloc()
421 rxq->bd, rxq->bd_dma); iwl_pcie_rx_alloc()
422 rxq->bd_dma = 0; iwl_pcie_rx_alloc()
423 rxq->bd = NULL; iwl_pcie_rx_alloc()
428 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) iwl_pcie_rx_hw_init() argument
451 (u32)(rxq->bd_dma >> 8)); iwl_pcie_rx_hw_init()
455 rxq->rb_stts_dma >> 4); iwl_pcie_rx_hw_init()
481 static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) iwl_pcie_rx_init_rxb_lists() argument
485 lockdep_assert_held(&rxq->lock); iwl_pcie_rx_init_rxb_lists()
487 INIT_LIST_HEAD(&rxq->rx_free); iwl_pcie_rx_init_rxb_lists()
488 INIT_LIST_HEAD(&rxq->rx_used); iwl_pcie_rx_init_rxb_lists()
489 rxq->free_count = 0; iwl_pcie_rx_init_rxb_lists()
492 list_add(&rxq->pool[i].list, &rxq->rx_used); iwl_pcie_rx_init_rxb_lists()
498 struct iwl_rxq *rxq = &trans_pcie->rxq; iwl_pcie_rx_init() local
501 if (!rxq->bd) { iwl_pcie_rx_init()
507 spin_lock(&rxq->lock); iwl_pcie_rx_init()
513 iwl_pcie_rx_init_rxb_lists(rxq); iwl_pcie_rx_init()
516 rxq->queue[i] = NULL; iwl_pcie_rx_init()
520 rxq->read = rxq->write = 0; iwl_pcie_rx_init()
521 rxq->write_actual = 0; iwl_pcie_rx_init()
522 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); iwl_pcie_rx_init()
523 spin_unlock(&rxq->lock); iwl_pcie_rx_init()
527 iwl_pcie_rx_hw_init(trans, rxq); iwl_pcie_rx_init()
529 spin_lock(&rxq->lock); iwl_pcie_rx_init()
531 spin_unlock(&rxq->lock); iwl_pcie_rx_init()
539 struct iwl_rxq *rxq = &trans_pcie->rxq; iwl_pcie_rx_free() local
541 /*if rxq->bd is NULL, it means that nothing has been allocated, iwl_pcie_rx_free()
543 if (!rxq->bd) { iwl_pcie_rx_free()
550 spin_lock(&rxq->lock); iwl_pcie_rx_free()
552 spin_unlock(&rxq->lock); iwl_pcie_rx_free()
555 rxq->bd, rxq->bd_dma); iwl_pcie_rx_free()
556 rxq->bd_dma = 0; iwl_pcie_rx_free()
557 rxq->bd = NULL; iwl_pcie_rx_free()
559 if (rxq->rb_stts) iwl_pcie_rx_free()
562 rxq->rb_stts, rxq->rb_stts_dma); iwl_pcie_rx_free()
564 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); iwl_pcie_rx_free()
565 rxq->rb_stts_dma = 0; iwl_pcie_rx_free()
566 rxq->rb_stts = NULL; iwl_pcie_rx_free()
573 struct iwl_rxq *rxq = &trans_pcie->rxq; iwl_pcie_rx_handle_rb() local
691 list_add_tail(&rxb->list, &rxq->rx_used); iwl_pcie_rx_handle_rb()
693 list_add_tail(&rxb->list, &rxq->rx_free); iwl_pcie_rx_handle_rb()
694 rxq->free_count++; iwl_pcie_rx_handle_rb()
697 list_add_tail(&rxb->list, &rxq->rx_used); iwl_pcie_rx_handle_rb()
706 struct iwl_rxq *rxq = &trans_pcie->rxq; iwl_pcie_rx_handle() local
713 spin_lock(&rxq->lock); iwl_pcie_rx_handle()
716 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; iwl_pcie_rx_handle()
717 i = rxq->read; iwl_pcie_rx_handle()
724 total_empty = r - rxq->write_actual; iwl_pcie_rx_handle()
734 rxb = rxq->queue[i]; iwl_pcie_rx_handle()
735 rxq->queue[i] = NULL; iwl_pcie_rx_handle()
747 rxq->read = i; iwl_pcie_rx_handle()
748 spin_unlock(&rxq->lock); iwl_pcie_rx_handle()
757 rxq->read = i; iwl_pcie_rx_handle()
758 spin_unlock(&rxq->lock); iwl_pcie_rx_handle()
H A Dtrans.c1849 struct iwl_rxq *rxq = &trans_pcie->rxq; iwl_dbgfs_rx_queue_read() local
1855 rxq->read); iwl_dbgfs_rx_queue_read()
1857 rxq->write); iwl_dbgfs_rx_queue_read()
1859 rxq->write_actual); iwl_dbgfs_rx_queue_read()
1861 rxq->need_update); iwl_dbgfs_rx_queue_read()
1863 rxq->free_count); iwl_dbgfs_rx_queue_read()
1864 if (rxq->rb_stts) { iwl_dbgfs_rx_queue_read()
1866 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); iwl_dbgfs_rx_queue_read()
H A Dinternal.h252 * @rxq: all the RX queue data
275 struct iwl_rxq rxq; member in struct:iwl_trans_pcie
/linux-4.1.27/drivers/net/ethernet/marvell/
H A Dmvneta.c576 struct mvneta_rx_queue *rxq, mvneta_rxq_non_occup_desc_add()
583 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), mvneta_rxq_non_occup_desc_add()
589 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), mvneta_rxq_non_occup_desc_add()
595 struct mvneta_rx_queue *rxq) mvneta_rxq_busy_desc_num_get()
599 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); mvneta_rxq_busy_desc_num_get()
607 struct mvneta_rx_queue *rxq, mvneta_rxq_desc_num_update()
615 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); mvneta_rxq_desc_num_update()
635 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); mvneta_rxq_desc_num_update()
641 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) mvneta_rxq_next_desc_get() argument
643 int rx_desc = rxq->next_desc_to_proc; mvneta_rxq_next_desc_get()
645 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); mvneta_rxq_next_desc_get()
646 prefetch(rxq->descs + rxq->next_desc_to_proc); mvneta_rxq_next_desc_get()
647 return rxq->descs + rx_desc; mvneta_rxq_next_desc_get()
665 struct mvneta_rx_queue *rxq, mvneta_rxq_offset_set()
670 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); mvneta_rxq_offset_set()
675 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); mvneta_rxq_offset_set()
716 /* Set rxq buf size */ mvneta_rxq_buf_size_set()
718 struct mvneta_rx_queue *rxq, mvneta_rxq_buf_size_set()
723 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); mvneta_rxq_buf_size_set()
728 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); mvneta_rxq_buf_size_set()
733 struct mvneta_rx_queue *rxq) mvneta_rxq_bm_disable()
737 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); mvneta_rxq_bm_disable()
739 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); mvneta_rxq_bm_disable()
761 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; mvneta_port_up() local
762 if (rxq->descs != NULL) mvneta_port_up()
1129 struct mvneta_rx_queue *rxq, u32 value) mvneta_rx_pkts_coal_set()
1131 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), mvneta_rx_pkts_coal_set()
1133 rxq->pkts_coal = value; mvneta_rx_pkts_coal_set()
1140 struct mvneta_rx_queue *rxq, u32 value) mvneta_rx_time_coal_set()
1148 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); mvneta_rx_time_coal_set()
1149 rxq->time_coal = value; mvneta_rx_time_coal_set()
1442 struct mvneta_rx_queue *rxq) mvneta_rxq_drop_pkts()
1446 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); mvneta_rxq_drop_pkts()
1447 for (i = 0; i < rxq->size; i++) { mvneta_rxq_drop_pkts()
1448 struct mvneta_rx_desc *rx_desc = rxq->descs + i; mvneta_rxq_drop_pkts()
1457 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); mvneta_rxq_drop_pkts()
1462 struct mvneta_rx_queue *rxq) mvneta_rx()
1470 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); mvneta_rx()
1480 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); mvneta_rx()
1551 rxq->missed++; mvneta_rx()
1565 /* Update rxq management counters */ mvneta_rx()
1566 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); mvneta_rx()
2142 struct mvneta_rx_queue *rxq; mvneta_poll() local
2144 rxq = mvneta_rx_policy(pp, cause_rx_tx); mvneta_poll()
2145 if (!rxq) mvneta_poll()
2149 count = mvneta_rx(pp, budget, rxq); mvneta_poll()
2159 cause_rx_tx &= ~((1 << rxq->id) << 8); mvneta_poll()
2182 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ mvneta_rxq_fill()
2183 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, mvneta_rxq_fill() argument
2189 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc)); mvneta_rxq_fill()
2190 if (mvneta_rx_refill(pp, rxq->descs + i) != 0) { mvneta_rxq_fill()
2191 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n", mvneta_rxq_fill()
2192 __func__, rxq->id, i, num); mvneta_rxq_fill()
2200 mvneta_rxq_non_occup_desc_add(pp, rxq, i); mvneta_rxq_fill()
2228 struct mvneta_rx_queue *rxq) mvneta_rxq_init()
2231 rxq->size = pp->rx_ring_size; mvneta_rxq_init()
2234 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, mvneta_rxq_init()
2235 rxq->size * MVNETA_DESC_ALIGNED_SIZE, mvneta_rxq_init()
2236 &rxq->descs_phys, GFP_KERNEL); mvneta_rxq_init()
2237 if (rxq->descs == NULL) mvneta_rxq_init()
2240 BUG_ON(rxq->descs != mvneta_rxq_init()
2241 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); mvneta_rxq_init()
2243 rxq->last_desc = rxq->size - 1; mvneta_rxq_init()
2246 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); mvneta_rxq_init()
2247 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); mvneta_rxq_init()
2250 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD); mvneta_rxq_init()
2253 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); mvneta_rxq_init()
2254 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); mvneta_rxq_init()
2257 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size)); mvneta_rxq_init()
2258 mvneta_rxq_bm_disable(pp, rxq); mvneta_rxq_init()
2259 mvneta_rxq_fill(pp, rxq, rxq->size); mvneta_rxq_init()
2266 struct mvneta_rx_queue *rxq) mvneta_rxq_deinit()
2268 mvneta_rxq_drop_pkts(pp, rxq); mvneta_rxq_deinit()
2270 if (rxq->descs) mvneta_rxq_deinit()
2272 rxq->size * MVNETA_DESC_ALIGNED_SIZE, mvneta_rxq_deinit()
2273 rxq->descs, mvneta_rxq_deinit()
2274 rxq->descs_phys); mvneta_rxq_deinit()
2276 rxq->descs = NULL; mvneta_rxq_deinit()
2277 rxq->last_desc = 0; mvneta_rxq_deinit()
2278 rxq->next_desc_to_proc = 0; mvneta_rxq_deinit()
2279 rxq->descs_phys = 0; mvneta_rxq_deinit()
2397 netdev_err(pp->dev, "%s: can't create rxq=%d\n", mvneta_setup_rxqs()
2797 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; mvneta_ethtool_set_coalesce() local
2798 rxq->time_coal = c->rx_coalesce_usecs; mvneta_ethtool_set_coalesce()
2799 rxq->pkts_coal = c->rx_max_coalesced_frames; mvneta_ethtool_set_coalesce()
2800 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); mvneta_ethtool_set_coalesce()
2801 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); mvneta_ethtool_set_coalesce()
2932 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; mvneta_init() local
2933 rxq->id = queue; mvneta_init()
2934 rxq->size = pp->rx_ring_size; mvneta_init()
2935 rxq->pkts_coal = MVNETA_RX_COAL_PKTS; mvneta_init()
2936 rxq->time_coal = MVNETA_RX_COAL_USEC; mvneta_init()
575 mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int ndescs) mvneta_rxq_non_occup_desc_add() argument
594 mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) mvneta_rxq_busy_desc_num_get() argument
606 mvneta_rxq_desc_num_update(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int rx_done, int rx_filled) mvneta_rxq_desc_num_update() argument
664 mvneta_rxq_offset_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int offset) mvneta_rxq_offset_set() argument
717 mvneta_rxq_buf_size_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, int buf_size) mvneta_rxq_buf_size_set() argument
732 mvneta_rxq_bm_disable(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) mvneta_rxq_bm_disable() argument
1128 mvneta_rx_pkts_coal_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, u32 value) mvneta_rx_pkts_coal_set() argument
1139 mvneta_rx_time_coal_set(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, u32 value) mvneta_rx_time_coal_set() argument
1441 mvneta_rxq_drop_pkts(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) mvneta_rxq_drop_pkts() argument
1461 mvneta_rx(struct mvneta_port *pp, int rx_todo, struct mvneta_rx_queue *rxq) mvneta_rx() argument
2227 mvneta_rxq_init(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) mvneta_rxq_init() argument
2265 mvneta_rxq_deinit(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) mvneta_rxq_deinit() argument
H A Dmv643xx_eth.c414 struct rx_queue rxq[8]; member in struct:mv643xx_eth_private
455 /* rxq/txq helper functions *************************************************/ rxq_to_mp()
456 static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) rxq_to_mp() argument
458 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]); rxq_to_mp()
466 static void rxq_enable(struct rx_queue *rxq) rxq_enable() argument
468 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); rxq_enable()
469 wrlp(mp, RXQ_COMMAND, 1 << rxq->index); rxq_enable()
472 static void rxq_disable(struct rx_queue *rxq) rxq_disable() argument
474 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); rxq_disable()
475 u8 mask = 1 << rxq->index; rxq_disable()
521 static int rxq_process(struct rx_queue *rxq, int budget) rxq_process() argument
523 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); rxq_process()
528 while (rx < budget && rxq->rx_desc_count) { rxq_process()
534 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; rxq_process()
541 skb = rxq->rx_skb[rxq->rx_curr_desc]; rxq_process()
542 rxq->rx_skb[rxq->rx_curr_desc] = NULL; rxq_process()
544 rxq->rx_curr_desc++; rxq_process()
545 if (rxq->rx_curr_desc == rxq->rx_ring_size) rxq_process()
546 rxq->rx_curr_desc = 0; rxq_process()
550 rxq->rx_desc_count--; rxq_process()
553 mp->work_rx_refill |= 1 << rxq->index; rxq_process()
608 mp->work_rx &= ~(1 << rxq->index); rxq_process()
613 static int rxq_refill(struct rx_queue *rxq, int budget) rxq_refill() argument
615 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); rxq_refill()
619 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { rxq_refill()
636 rxq->rx_desc_count++; rxq_refill()
638 rx = rxq->rx_used_desc++; rxq_refill()
639 if (rxq->rx_used_desc == rxq->rx_ring_size) rxq_refill()
640 rxq->rx_used_desc = 0; rxq_refill()
642 rx_desc = rxq->rx_desc_area + rx; rxq_refill()
649 rxq->rx_skb[rx] = skb; rxq_refill()
663 mp->work_rx_refill &= ~(1 << rxq->index); rxq_refill()
1920 struct rx_queue *rxq = mp->rxq + index; rxq_init() local
1925 rxq->index = index; rxq_init()
1927 rxq->rx_ring_size = mp->rx_ring_size; rxq_init()
1929 rxq->rx_desc_count = 0; rxq_init()
1930 rxq->rx_curr_desc = 0; rxq_init()
1931 rxq->rx_used_desc = 0; rxq_init()
1933 size = rxq->rx_ring_size * sizeof(struct rx_desc); rxq_init()
1936 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, rxq_init()
1938 rxq->rx_desc_dma = mp->rx_desc_sram_addr; rxq_init()
1940 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, rxq_init()
1941 size, &rxq->rx_desc_dma, rxq_init()
1945 if (rxq->rx_desc_area == NULL) { rxq_init()
1950 memset(rxq->rx_desc_area, 0, size); rxq_init()
1952 rxq->rx_desc_area_size = size; rxq_init()
1953 rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb), rxq_init()
1955 if (rxq->rx_skb == NULL) rxq_init()
1958 rx_desc = rxq->rx_desc_area; rxq_init()
1959 for (i = 0; i < rxq->rx_ring_size; i++) { rxq_init()
1963 if (nexti == rxq->rx_ring_size) rxq_init()
1966 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + rxq_init()
1975 iounmap(rxq->rx_desc_area); rxq_init()
1978 rxq->rx_desc_area, rxq_init()
1979 rxq->rx_desc_dma); rxq_init()
1985 static void rxq_deinit(struct rx_queue *rxq) rxq_deinit() argument
1987 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); rxq_deinit()
1990 rxq_disable(rxq); rxq_deinit()
1992 for (i = 0; i < rxq->rx_ring_size; i++) { rxq_deinit()
1993 if (rxq->rx_skb[i]) { rxq_deinit()
1994 dev_kfree_skb(rxq->rx_skb[i]); rxq_deinit()
1995 rxq->rx_desc_count--; rxq_deinit()
1999 if (rxq->rx_desc_count) { rxq_deinit()
2001 rxq->rx_desc_count); rxq_deinit()
2004 if (rxq->index == 0 && rxq_deinit()
2005 rxq->rx_desc_area_size <= mp->rx_desc_sram_size) rxq_deinit()
2006 iounmap(rxq->rx_desc_area); rxq_deinit()
2008 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, rxq_deinit()
2009 rxq->rx_desc_area, rxq->rx_desc_dma); rxq_deinit()
2011 kfree(rxq->rx_skb); rxq_deinit()
2273 work_done += rxq_process(mp->rxq + queue, work_tbd); mv643xx_eth_poll()
2275 work_done += rxq_refill(mp->rxq + queue, work_tbd); mv643xx_eth_poll()
2361 struct rx_queue *rxq = mp->rxq + i; port_start() local
2364 addr = (u32)rxq->rx_desc_dma; port_start()
2365 addr += rxq->rx_curr_desc * sizeof(struct rx_desc); port_start()
2368 rxq_enable(rxq); port_start()
2427 rxq_deinit(mp->rxq + i); mv643xx_eth_open()
2431 rxq_refill(mp->rxq + i, INT_MAX); mv643xx_eth_open()
2461 rxq_deinit(mp->rxq + i); mv643xx_eth_open()
2474 rxq_disable(mp->rxq + i); port_reset()
2518 rxq_deinit(mp->rxq + i); mv643xx_eth_stop()
H A Dmvpp2.c46 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
102 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
105 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
154 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
155 #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
3488 /* Attach long pool to rxq */ mvpp2_rxq_long_pool_set()
3506 /* Attach short pool to rxq */ mvpp2_rxq_short_pool_set()
3707 int rxq; mvpp2_swf_bm_pool_init() local
3721 for (rxq = 0; rxq < rxq_number; rxq++) mvpp2_swf_bm_pool_init()
3722 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); mvpp2_swf_bm_pool_init()
3737 for (rxq = 0; rxq < rxq_number; rxq++) mvpp2_swf_bm_pool_init()
3738 mvpp2_rxq_short_pool_set(port, rxq, mvpp2_swf_bm_pool_init()
4090 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) mvpp2_rxq_next_desc_get() argument
4092 int rx_desc = rxq->next_desc_to_proc; mvpp2_rxq_next_desc_get()
4094 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); mvpp2_rxq_next_desc_get()
4095 prefetch(rxq->descs + rxq->next_desc_to_proc); mvpp2_rxq_next_desc_get()
4096 return rxq->descs + rx_desc; mvpp2_rxq_next_desc_get()
4362 struct mvpp2_rx_queue *rxq, u32 pkts) mvpp2_rx_pkts_coal_set()
4367 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); mvpp2_rx_pkts_coal_set()
4370 rxq->pkts_coal = pkts; mvpp2_rx_pkts_coal_set()
4375 struct mvpp2_rx_queue *rxq, u32 usec) mvpp2_rx_time_coal_set()
4380 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); mvpp2_rx_time_coal_set()
4382 rxq->time_coal = usec; mvpp2_rx_time_coal_set()
4499 struct mvpp2_rx_queue *rxq) mvpp2_rxq_init()
4502 rxq->size = port->rx_ring_size; mvpp2_rxq_init()
4505 rxq->descs = dma_alloc_coherent(port->dev->dev.parent, mvpp2_rxq_init()
4506 rxq->size * MVPP2_DESC_ALIGNED_SIZE, mvpp2_rxq_init()
4507 &rxq->descs_phys, GFP_KERNEL); mvpp2_rxq_init()
4508 if (!rxq->descs) mvpp2_rxq_init()
4511 BUG_ON(rxq->descs != mvpp2_rxq_init()
4512 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); mvpp2_rxq_init()
4514 rxq->last_desc = rxq->size - 1; mvpp2_rxq_init()
4517 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); mvpp2_rxq_init()
4520 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); mvpp2_rxq_init()
4521 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys); mvpp2_rxq_init()
4522 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); mvpp2_rxq_init()
4526 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); mvpp2_rxq_init()
4529 mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal); mvpp2_rxq_init()
4530 mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal); mvpp2_rxq_init()
4533 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); mvpp2_rxq_init()
4540 struct mvpp2_rx_queue *rxq) mvpp2_rxq_drop_pkts()
4544 rx_received = mvpp2_rxq_received(port, rxq->id); mvpp2_rxq_drop_pkts()
4549 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); mvpp2_rxq_drop_pkts()
4555 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); mvpp2_rxq_drop_pkts()
4560 struct mvpp2_rx_queue *rxq) mvpp2_rxq_deinit()
4562 mvpp2_rxq_drop_pkts(port, rxq); mvpp2_rxq_deinit()
4564 if (rxq->descs) mvpp2_rxq_deinit()
4566 rxq->size * MVPP2_DESC_ALIGNED_SIZE, mvpp2_rxq_deinit()
4567 rxq->descs, mvpp2_rxq_deinit()
4568 rxq->descs_phys); mvpp2_rxq_deinit()
4570 rxq->descs = NULL; mvpp2_rxq_deinit()
4571 rxq->last_desc = 0; mvpp2_rxq_deinit()
4572 rxq->next_desc_to_proc = 0; mvpp2_rxq_deinit()
4573 rxq->descs_phys = 0; mvpp2_rxq_deinit()
4578 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); mvpp2_rxq_deinit()
4579 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); mvpp2_rxq_deinit()
5024 struct mvpp2_rx_queue *rxq) mvpp2_rx()
5032 rx_received = mvpp2_rxq_received(port, rxq->id); mvpp2_rx()
5038 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); mvpp2_rx()
5101 mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled); mvpp2_rx()
5326 struct mvpp2_rx_queue *rxq; mvpp2_poll() local
5328 rxq = mvpp2_get_rx_queue(port, cause_rx); mvpp2_poll()
5329 if (!rxq) mvpp2_poll()
5332 count = mvpp2_rx(port, budget, rxq); mvpp2_poll()
5340 cause_rx &= ~(1 << rxq->logic_rxq); mvpp2_poll()
5773 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; mvpp2_ethtool_set_coalesce() local
5775 rxq->time_coal = c->rx_coalesce_usecs; mvpp2_ethtool_set_coalesce()
5776 rxq->pkts_coal = c->rx_max_coalesced_frames; mvpp2_ethtool_set_coalesce()
5777 mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal); mvpp2_ethtool_set_coalesce()
5778 mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal); mvpp2_ethtool_set_coalesce()
5976 struct mvpp2_rx_queue *rxq; local
5979 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
5980 if (!rxq)
5983 rxq->id = port->first_rxq + queue;
5984 rxq->port = port->id;
5985 rxq->logic_rxq = queue;
5987 port->rxqs[queue] = rxq;
5995 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; local
5997 rxq->size = port->rx_ring_size;
5998 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
5999 rxq->time_coal = MVPP2_RX_COAL_USEC;
4361 mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq, u32 pkts) mvpp2_rx_pkts_coal_set() argument
4374 mvpp2_rx_time_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq, u32 usec) mvpp2_rx_time_coal_set() argument
4498 mvpp2_rxq_init(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) mvpp2_rxq_init() argument
4539 mvpp2_rxq_drop_pkts(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) mvpp2_rxq_drop_pkts() argument
4559 mvpp2_rxq_deinit(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) mvpp2_rxq_deinit() argument
5023 mvpp2_rx(struct mvpp2_port *port, int rx_todo, struct mvpp2_rx_queue *rxq) mvpp2_rx() argument
H A Dsky2.c1339 unsigned rxq = rxqaddr[sky2->port]; sky2_rx_stop() local
1343 sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD); sky2_rx_stop()
1346 if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL)) sky2_rx_stop()
1347 == sky2_read8(hw, RB_ADDR(rxq, Q_RL))) sky2_rx_stop()
1352 sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST); sky2_rx_stop()
1355 sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); sky2_rx_stop()
1493 static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq) sky2_rx_update() argument
1495 sky2_put_idx(sky2->hw, rxq, sky2->rx_put); sky2_rx_update()
1535 unsigned rxq = rxqaddr[sky2->port]; sky2_rx_start() local
1539 sky2_qset(hw, rxq); sky2_rx_start()
1543 sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX); sky2_rx_start()
1549 sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS); sky2_rx_start()
1551 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); sky2_rx_start()
1580 sky2_rx_update(sky2, rxq); sky2_rx_start()
2959 unsigned rxq = rxqaddr[port]; sky2_rx_hung() local
2962 u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP)); sky2_rx_hung()
2963 u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL)); sky2_rx_hung()
2975 fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP))); sky2_rx_hung()
/linux-4.1.27/drivers/net/ethernet/brocade/bna/
H A Dbnad_ethtool.c650 sprintf(string, "rxq%d_packets", q_num); bnad_get_strings()
652 sprintf(string, "rxq%d_bytes", q_num); bnad_get_strings()
654 sprintf(string, "rxq%d_packets_with_error", bnad_get_strings()
657 sprintf(string, "rxq%d_allocbuf_failed", q_num); bnad_get_strings()
659 sprintf(string, "rxq%d_producer_index", q_num); bnad_get_strings()
661 sprintf(string, "rxq%d_consumer_index", q_num); bnad_get_strings()
668 rcb[1]->rxq) { bnad_get_strings()
669 sprintf(string, "rxq%d_packets", q_num); bnad_get_strings()
671 sprintf(string, "rxq%d_bytes", q_num); bnad_get_strings()
674 "rxq%d_packets_with_error", q_num); bnad_get_strings()
676 sprintf(string, "rxq%d_allocbuf_failed", bnad_get_strings()
679 sprintf(string, "rxq%d_producer_index", bnad_get_strings()
682 sprintf(string, "rxq%d_consumer_index", bnad_get_strings()
750 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq) bnad_get_stats_count_locked()
775 bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) { bnad_per_q_stats_fill()
801 rcb[0]->rxq) { bnad_per_q_stats_fill()
804 buf[bi++] = rcb->rxq->rx_packets; bnad_per_q_stats_fill()
805 buf[bi++] = rcb->rxq->rx_bytes; bnad_per_q_stats_fill()
806 buf[bi++] = rcb->rxq-> bnad_per_q_stats_fill()
808 buf[bi++] = rcb->rxq-> bnad_per_q_stats_fill()
815 rcb[1]->rxq) { bnad_per_q_stats_fill()
818 buf[bi++] = rcb->rxq->rx_packets; bnad_per_q_stats_fill()
819 buf[bi++] = rcb->rxq->rx_bytes; bnad_per_q_stats_fill()
820 buf[bi++] = rcb->rxq-> bnad_per_q_stats_fill()
822 buf[bi++] = rcb->rxq-> bnad_per_q_stats_fill()
H A Dbna_tx_rx.c1959 struct bna_rxq *rxq = NULL; bna_rxq_get() local
1964 rxq = (struct bna_rxq *)qe; bna_rxq_get()
1965 bfa_q_qe_init(&rxq->qe); bna_rxq_get()
1967 return rxq; bna_rxq_get()
1971 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq) bna_rxq_put() argument
1973 bfa_q_qe_init(&rxq->qe); bna_rxq_put()
1974 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q); bna_rxq_put()
2058 rxp->rxq.single.only = q0; bna_rxp_add_rxqs()
2059 rxp->rxq.single.reserved = NULL; bna_rxp_add_rxqs()
2062 rxp->rxq.slr.large = q0; bna_rxp_add_rxqs()
2063 rxp->rxq.slr.small = q1; bna_rxp_add_rxqs()
2066 rxp->rxq.hds.data = q0; bna_rxp_add_rxqs()
2067 rxp->rxq.hds.hdr = q1; bna_rxp_add_rxqs()
2075 bna_rxq_qpt_setup(struct bna_rxq *rxq, bna_rxq_qpt_setup() argument
2088 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; bna_rxq_qpt_setup()
2089 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; bna_rxq_qpt_setup()
2090 rxq->qpt.kv_qpt_ptr = qpt_mem->kva; bna_rxq_qpt_setup()
2091 rxq->qpt.page_count = page_count; bna_rxq_qpt_setup()
2092 rxq->qpt.page_size = page_size; bna_rxq_qpt_setup()
2094 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva; bna_rxq_qpt_setup()
2095 rxq->rcb->sw_q = page_mem->kva; bna_rxq_qpt_setup()
2100 for (i = 0; i < rxq->qpt.page_count; i++) { bna_rxq_qpt_setup()
2101 rxq->rcb->sw_qpt[i] = kva; bna_rxq_qpt_setup()
2105 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb = bna_rxq_qpt_setup()
2107 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb = bna_rxq_qpt_setup()
2269 rx_mod->rxq = (struct bna_rxq *) bna_rx_mod_init()
2306 rxq_ptr = &rx_mod->rxq[index]; bna_rx_mod_init()
2643 q0->rcb->rxq = q0; bna_rx_create()
2668 q1->rcb->rxq = q1; bna_rx_create()
2762 rxp->rxq.slr.large = NULL; bna_rx_destroy()
2763 rxp->rxq.slr.small = NULL; bna_rx_destroy()
H A Dbna.h310 (q0) = rxp->rxq.single.only; \
314 (q0) = rxp->rxq.slr.large; \
315 (q1) = rxp->rxq.slr.small; \
318 (q0) = rxp->rxq.hds.data; \
319 (q1) = rxp->rxq.hds.hdr; \
H A Dbnad.c291 order = get_order(rcb->rxq->buffer_size); bnad_rxq_alloc_init()
297 unmap_q->map_size = rcb->rxq->buffer_size; bnad_rxq_alloc_init()
299 if (rcb->rxq->multi_buffer) { bnad_rxq_alloc_init()
301 unmap_q->map_size = rcb->rxq->buffer_size; bnad_rxq_alloc_init()
306 (rcb->rxq->buffer_size > 2048) ? bnad_rxq_alloc_init()
396 rcb->rxq->rxbuf_alloc_failed++; bnad_rxq_refill_page()
441 buff_sz = rcb->rxq->buffer_size; bnad_rxq_refill_skb()
453 rcb->rxq->rxbuf_alloc_failed++; bnad_rxq_refill_skb()
685 rcb->rxq->rx_packets_with_error++; bnad_cq_process()
695 rcb->rxq->rx_packets++; bnad_cq_process()
696 rcb->rxq->rx_bytes += totlen; bnad_cq_process()
2079 * small packets may get routed to same rxq. bnad_init_rx_config()
2409 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets; bnad_netdev_qstats_fill()
2411 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes; bnad_netdev_qstats_fill()
2414 rcb[1]->rxq) { bnad_netdev_qstats_fill()
2417 ccb->rcb[1]->rxq->rx_packets; bnad_netdev_qstats_fill()
2420 ccb->rcb[1]->rxq->rx_bytes; bnad_netdev_qstats_fill()
H A Dbna_types.h574 struct bna_rxq *rxq; member in struct:bna_rcb
711 union bna_rxq_u rxq; member in struct:bna_rxp
858 struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */ member in struct:bna_rx_mod
/linux-4.1.27/drivers/net/ethernet/atheros/alx/
H A Dmain.c75 struct alx_rx_queue *rxq = &alx->rxq; alx_refill_rx_ring() local
81 next = cur = rxq->write_idx; alx_refill_rx_ring()
84 cur_buf = &rxq->bufs[cur]; alx_refill_rx_ring()
86 while (!cur_buf->skb && next != rxq->read_idx) { alx_refill_rx_ring()
87 struct alx_rfd *rfd = &rxq->rfd[cur]; alx_refill_rx_ring()
116 cur_buf = &rxq->bufs[cur]; alx_refill_rx_ring()
123 rxq->write_idx = cur; alx_refill_rx_ring()
189 struct alx_rx_queue *rxq = &alx->rxq; alx_clean_rx_irq() local
197 rrd = &rxq->rrd[rxq->rrd_read_idx]; alx_clean_rx_irq()
203 RRD_SI) != rxq->read_idx || alx_clean_rx_irq()
210 rxb = &rxq->bufs[rxq->read_idx]; alx_clean_rx_irq()
250 if (++rxq->read_idx == alx->rx_ringsz) alx_clean_rx_irq()
251 rxq->read_idx = 0; alx_clean_rx_irq()
252 if (++rxq->rrd_read_idx == alx->rx_ringsz) alx_clean_rx_irq()
253 rxq->rrd_read_idx = 0; alx_clean_rx_irq()
366 alx->rxq.read_idx = 0; alx_init_ring_ptrs()
367 alx->rxq.write_idx = 0; alx_init_ring_ptrs()
368 alx->rxq.rrd_read_idx = 0; alx_init_ring_ptrs()
370 alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma); alx_init_ring_ptrs()
372 alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma); alx_init_ring_ptrs()
407 struct alx_rx_queue *rxq = &alx->rxq; alx_free_rxring_buf() local
411 if (rxq == NULL) alx_free_rxring_buf()
415 cur_buf = rxq->bufs + i; alx_free_rxring_buf()
428 rxq->write_idx = 0; alx_free_rxring_buf()
429 rxq->read_idx = 0; alx_free_rxring_buf()
430 rxq->rrd_read_idx = 0; alx_free_rxring_buf()
518 alx->rxq.bufs = kcalloc(alx->rx_ringsz, alx_alloc_descriptors()
521 if (!alx->rxq.bufs) alx_alloc_descriptors()
546 alx->rxq.rrd = alx_alloc_descriptors()
549 alx->rxq.rrd_dma = alx->descmem.dma + alx_alloc_descriptors()
555 alx->rxq.rfd = alx_alloc_descriptors()
559 alx->rxq.rfd_dma = alx->descmem.dma + alx_alloc_descriptors()
566 kfree(alx->rxq.bufs); alx_alloc_descriptors()
594 kfree(alx->rxq.bufs); alx_free_rings()
H A Dalx.h101 struct alx_rx_queue rxq; member in struct:alx_priv
H A Dhw.c378 u32 rxq, txq, val; alx_stop_mac() local
381 rxq = alx_read_mem32(hw, ALX_RXQ0); alx_stop_mac()
382 alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN); alx_stop_mac()
609 u32 mac, txq, rxq; alx_start_mac() local
611 rxq = alx_read_mem32(hw, ALX_RXQ0); alx_start_mac()
612 alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN); alx_start_mac()
991 /* rxq, flow control */ alx_configure_basic()
/linux-4.1.27/drivers/atm/
H A Dambassador.c688 amb_rxq * rxq = &dev->rxq[pool]; rx_give() local
693 spin_lock_irqsave (&rxq->lock, flags); rx_give()
695 if (rxq->pending < rxq->maximum) { rx_give()
696 PRINTD (DBG_RX, "RX in slot %p", rxq->in.ptr); rx_give()
698 *rxq->in.ptr = *rx; rx_give()
699 rxq->pending++; rx_give()
700 rxq->in.ptr = NEXTQ (rxq->in.ptr, rxq->in.start, rxq->in.limit); rx_give()
702 wr_mem (dev, offsetof(amb_mem, mb.adapter.rx_address[pool]), virt_to_bus (rxq->in.ptr)); rx_give()
704 spin_unlock_irqrestore (&rxq->lock, flags); rx_give()
707 spin_unlock_irqrestore (&rxq->lock, flags); rx_give()
713 amb_rxq * rxq = &dev->rxq[pool]; rx_take() local
718 spin_lock_irqsave (&rxq->lock, flags); rx_take()
720 if (rxq->pending && (rxq->out.ptr->status || rxq->out.ptr->length)) { rx_take()
722 rx_complete (dev, rxq->out.ptr); rx_take()
724 rxq->out.ptr->status = 0; rx_take()
725 rxq->out.ptr->length = 0; rx_take()
727 rxq->pending--; rx_take()
728 rxq->out.ptr = NEXTQ (rxq->out.ptr, rxq->out.start, rxq->out.limit); rx_take()
730 if (rxq->pending < rxq->low) rx_take()
731 rxq->low = rxq->pending; rx_take()
732 spin_unlock_irqrestore (&rxq->lock, flags); rx_take()
735 if (!rxq->pending && rxq->buffers_wanted) rx_take()
736 rxq->emptied++; rx_take()
737 spin_unlock_irqrestore (&rxq->lock, flags); rx_take()
746 amb_rxq * rxq = &dev->rxq[pool]; drain_rx_pool() local
756 if (rxq->pending > rxq->buffers_wanted) { drain_rx_pool()
763 while (rxq->pending > rxq->buffers_wanted) drain_rx_pool()
784 amb_rxq * rxq; fill_rx_pool() local
791 rxq = &dev->rxq[pool]; fill_rx_pool()
792 while (rxq->pending < rxq->maximum && rxq->pending < rxq->buffers_wanted) { fill_rx_pool()
794 struct sk_buff * skb = alloc_skb (rxq->buffer_size, priority); fill_rx_pool()
1107 if ((unsigned int) rxtp->max_sdu <= dev->rxq[pool].buffer_size) { amb_open()
1109 pool, rxtp->max_sdu, dev->rxq[pool].buffer_size); amb_open()
1193 if (!dev->rxq[pool].buffers_wanted) amb_open()
1194 dev->rxq[pool].buffers_wanted = rx_lats; amb_open()
1195 dev->rxq[pool].buffers_wanted += 1; amb_open()
1289 dev->rxq[pool].buffers_wanted -= 1; amb_close()
1290 if (dev->rxq[pool].buffers_wanted == rx_lats) { amb_close()
1291 dev->rxq[pool].buffers_wanted = 0; amb_close()
1458 amb_rxq * r = &dev->rxq[pool]; amb_proc_read()
1469 amb_rxq * r = &dev->rxq[pool]; amb_proc_read()
1590 amb_rxq * rxq = &dev->rxq[pool]; create_queues() local
1592 rxq->buffer_size = rx_buffer_sizes[pool]; create_queues()
1593 rxq->buffers_wanted = 0; create_queues()
1595 rxq->pending = 0; create_queues()
1596 rxq->low = rxs[pool] - 1; create_queues()
1597 rxq->emptied = 0; create_queues()
1598 rxq->maximum = rxs[pool] - 1; create_queues()
1600 rxq->in.start = in; create_queues()
1601 rxq->in.ptr = in; create_queues()
1602 rxq->in.limit = in + rxs[pool]; create_queues()
1604 memory = rxq->in.limit; create_queues()
1607 rxq->out.start = out; create_queues()
1608 rxq->out.ptr = out; create_queues()
1609 rxq->out.limit = out + rxs[pool]; create_queues()
1611 memory = rxq->out.limit; create_queues()
1629 // includes txq.in, txq.out, rxq[].in and rxq[].out destroy_queues()
2003 a.rec_struct[pool].buffer_start = bus_addr (dev->rxq[pool].in.start); amb_talk()
2004 a.rec_struct[pool].buffer_end = bus_addr (dev->rxq[pool].in.limit); amb_talk()
2005 a.rec_struct[pool].rx_start = bus_addr (dev->rxq[pool].out.start); amb_talk()
2006 a.rec_struct[pool].rx_end = bus_addr (dev->rxq[pool].out.limit); amb_talk()
2007 a.rec_struct[pool].buffer_size = cpu_to_be32 (dev->rxq[pool].buffer_size); amb_talk()
2170 spin_lock_init (&dev->rxq[pool].lock); setup_dev()
H A Dfore200e.c1180 struct host_rxq* rxq = &fore200e->host_rxq; fore200e_rx_irq() local
1187 entry = &rxq->host_entry[ rxq->head ]; fore200e_rx_irq()
1218 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX); fore200e_rx_irq()
2209 struct host_rxq* rxq = &fore200e->host_rxq; fore200e_init_rx_queue() local
2217 &rxq->status, fore200e_init_rx_queue()
2226 &rxq->rpd, fore200e_init_rx_queue()
2231 fore200e->bus->dma_chunk_free(fore200e, &rxq->status); fore200e_init_rx_queue()
2241 rxq->host_entry[ i ].status = fore200e_init_rx_queue()
2242 FORE200E_INDEX(rxq->status.align_addr, enum status, i); fore200e_init_rx_queue()
2243 rxq->host_entry[ i ].rpd = fore200e_init_rx_queue()
2244 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i); fore200e_init_rx_queue()
2245 rxq->host_entry[ i ].rpd_dma = fore200e_init_rx_queue()
2246 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i); fore200e_init_rx_queue()
2247 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ]; fore200e_init_rx_queue()
2249 *rxq->host_entry[ i ].status = STATUS_FREE; fore200e_init_rx_queue()
2251 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i), fore200e_init_rx_queue()
2254 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i), fore200e_init_rx_queue()
2259 rxq->head = 0; fore200e_init_rx_queue()
H A Dambassador.h635 amb_rxq rxq[NUM_RX_POOLS]; member in struct:amb_dev
/linux-4.1.27/drivers/net/wireless/iwlegacy/
H A D3945-mac.c900 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
901 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
902 * to replenish the iwl->rxq->rx_free.
904 * iwl->rxq is replenished and the READ IDX is updated (updating the
907 * detached from the iwl->rxq. The driver 'processed' idx is updated.
908 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
909 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
956 struct il_rx_queue *rxq = &il->rxq; il3945_rx_queue_restock() local
962 spin_lock_irqsave(&rxq->lock, flags); il3945_rx_queue_restock()
963 write = rxq->write & ~0x7; il3945_rx_queue_restock()
964 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) { il3945_rx_queue_restock()
966 element = rxq->rx_free.next; il3945_rx_queue_restock()
971 rxq->bd[rxq->write] = il3945_rx_queue_restock()
973 rxq->queue[rxq->write] = rxb; il3945_rx_queue_restock()
974 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; il3945_rx_queue_restock()
975 rxq->free_count--; il3945_rx_queue_restock()
977 spin_unlock_irqrestore(&rxq->lock, flags); il3945_rx_queue_restock()
980 if (rxq->free_count <= RX_LOW_WATERMARK) il3945_rx_queue_restock()
985 if (rxq->write_actual != (rxq->write & ~0x7) || il3945_rx_queue_restock()
986 abs(rxq->write - rxq->read) > 7) { il3945_rx_queue_restock()
987 spin_lock_irqsave(&rxq->lock, flags); il3945_rx_queue_restock()
988 rxq->need_update = 1; il3945_rx_queue_restock()
989 spin_unlock_irqrestore(&rxq->lock, flags); il3945_rx_queue_restock()
990 il_rx_queue_update_write_ptr(il, rxq); il3945_rx_queue_restock()
1005 struct il_rx_queue *rxq = &il->rxq; il3945_rx_allocate() local
1014 spin_lock_irqsave(&rxq->lock, flags); il3945_rx_allocate()
1015 if (list_empty(&rxq->rx_used)) { il3945_rx_allocate()
1016 spin_unlock_irqrestore(&rxq->lock, flags); il3945_rx_allocate()
1019 spin_unlock_irqrestore(&rxq->lock, flags); il3945_rx_allocate()
1021 if (rxq->free_count > RX_LOW_WATERMARK) il3945_rx_allocate()
1032 if (rxq->free_count <= RX_LOW_WATERMARK && il3945_rx_allocate()
1036 priority, rxq->free_count); il3945_rx_allocate()
1054 spin_lock_irqsave(&rxq->lock, flags); il3945_rx_allocate()
1056 if (list_empty(&rxq->rx_used)) { il3945_rx_allocate()
1057 spin_unlock_irqrestore(&rxq->lock, flags); il3945_rx_allocate()
1065 element = rxq->rx_used.next; il3945_rx_allocate()
1071 list_add_tail(&rxb->list, &rxq->rx_free); il3945_rx_allocate()
1072 rxq->free_count++; il3945_rx_allocate()
1075 spin_unlock_irqrestore(&rxq->lock, flags); il3945_rx_allocate()
1080 il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) il3945_rx_queue_reset() argument
1084 spin_lock_irqsave(&rxq->lock, flags); il3945_rx_queue_reset()
1085 INIT_LIST_HEAD(&rxq->rx_free); il3945_rx_queue_reset()
1086 INIT_LIST_HEAD(&rxq->rx_used); il3945_rx_queue_reset()
1091 if (rxq->pool[i].page != NULL) { il3945_rx_queue_reset()
1092 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, il3945_rx_queue_reset()
1095 __il_free_pages(il, rxq->pool[i].page); il3945_rx_queue_reset()
1096 rxq->pool[i].page = NULL; il3945_rx_queue_reset()
1098 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); il3945_rx_queue_reset()
1103 rxq->read = rxq->write = 0; il3945_rx_queue_reset()
1104 rxq->write_actual = 0; il3945_rx_queue_reset()
1105 rxq->free_count = 0; il3945_rx_queue_reset()
1106 spin_unlock_irqrestore(&rxq->lock, flags); il3945_rx_queue_reset()
1136 il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) il3945_rx_queue_free() argument
1140 if (rxq->pool[i].page != NULL) { il3945_rx_queue_free()
1141 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, il3945_rx_queue_free()
1144 __il_free_pages(il, rxq->pool[i].page); il3945_rx_queue_free()
1145 rxq->pool[i].page = NULL; il3945_rx_queue_free()
1149 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, il3945_rx_queue_free()
1150 rxq->bd_dma); il3945_rx_queue_free()
1152 rxq->rb_stts, rxq->rb_stts_dma); il3945_rx_queue_free()
1153 rxq->bd = NULL; il3945_rx_queue_free()
1154 rxq->rb_stts = NULL; il3945_rx_queue_free()
1207 struct il_rx_queue *rxq = &il->rxq; il3945_rx_handle() local
1217 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; il3945_rx_handle()
1218 i = rxq->read; il3945_rx_handle()
1221 total_empty = r - rxq->write_actual; il3945_rx_handle()
1234 rxb = rxq->queue[i]; il3945_rx_handle()
1241 rxq->queue[i] = NULL; il3945_rx_handle()
1287 spin_lock_irqsave(&rxq->lock, flags); il3945_rx_handle()
1297 list_add_tail(&rxb->list, &rxq->rx_used); il3945_rx_handle()
1299 list_add_tail(&rxb->list, &rxq->rx_free); il3945_rx_handle()
1300 rxq->free_count++; il3945_rx_handle()
1303 list_add_tail(&rxb->list, &rxq->rx_used); il3945_rx_handle()
1305 spin_unlock_irqrestore(&rxq->lock, flags); il3945_rx_handle()
1313 rxq->read = i; il3945_rx_handle()
1321 rxq->read = i; il3945_rx_handle()
1490 il_rx_queue_update_write_ptr(il, &il->rxq); il3945_irq_tasklet()
3861 if (il->rxq.bd) il3945_pci_remove()
3862 il3945_rx_queue_free(il, &il->rxq); il3945_pci_remove()
H A D4965-mac.c100 il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq) il4965_rx_queue_reset() argument
104 spin_lock_irqsave(&rxq->lock, flags); il4965_rx_queue_reset()
105 INIT_LIST_HEAD(&rxq->rx_free); il4965_rx_queue_reset()
106 INIT_LIST_HEAD(&rxq->rx_used); il4965_rx_queue_reset()
111 if (rxq->pool[i].page != NULL) { il4965_rx_queue_reset()
112 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, il4965_rx_queue_reset()
115 __il_free_pages(il, rxq->pool[i].page); il4965_rx_queue_reset()
116 rxq->pool[i].page = NULL; il4965_rx_queue_reset()
118 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); il4965_rx_queue_reset()
122 rxq->queue[i] = NULL; il4965_rx_queue_reset()
126 rxq->read = rxq->write = 0; il4965_rx_queue_reset()
127 rxq->write_actual = 0; il4965_rx_queue_reset()
128 rxq->free_count = 0; il4965_rx_queue_reset()
129 spin_unlock_irqrestore(&rxq->lock, flags); il4965_rx_queue_reset()
133 il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq) il4965_rx_init() argument
151 il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8)); il4965_rx_init()
154 il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4); il4965_rx_init()
198 struct il_rx_queue *rxq = &il->rxq; il4965_hw_nic_init() local
211 if (!rxq->bd) { il4965_hw_nic_init()
218 il4965_rx_queue_reset(il, rxq); il4965_hw_nic_init()
222 il4965_rx_init(il, rxq); il4965_hw_nic_init()
226 rxq->need_update = 1; il4965_hw_nic_init()
227 il_rx_queue_update_write_ptr(il, rxq); il4965_hw_nic_init()
267 struct il_rx_queue *rxq = &il->rxq; il4965_rx_queue_restock() local
272 spin_lock_irqsave(&rxq->lock, flags); il4965_rx_queue_restock()
273 while (il_rx_queue_space(rxq) > 0 && rxq->free_count) { il4965_rx_queue_restock()
275 rxb = rxq->queue[rxq->write]; il4965_rx_queue_restock()
279 element = rxq->rx_free.next; il4965_rx_queue_restock()
284 rxq->bd[rxq->write] = il4965_rx_queue_restock()
286 rxq->queue[rxq->write] = rxb; il4965_rx_queue_restock()
287 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; il4965_rx_queue_restock()
288 rxq->free_count--; il4965_rx_queue_restock()
290 spin_unlock_irqrestore(&rxq->lock, flags); il4965_rx_queue_restock()
293 if (rxq->free_count <= RX_LOW_WATERMARK) il4965_rx_queue_restock()
298 if (rxq->write_actual != (rxq->write & ~0x7)) { il4965_rx_queue_restock()
299 spin_lock_irqsave(&rxq->lock, flags); il4965_rx_queue_restock()
300 rxq->need_update = 1; il4965_rx_queue_restock()
301 spin_unlock_irqrestore(&rxq->lock, flags); il4965_rx_queue_restock()
302 il_rx_queue_update_write_ptr(il, rxq); il4965_rx_queue_restock()
317 struct il_rx_queue *rxq = &il->rxq; il4965_rx_allocate() local
326 spin_lock_irqsave(&rxq->lock, flags); il4965_rx_allocate()
327 if (list_empty(&rxq->rx_used)) { il4965_rx_allocate()
328 spin_unlock_irqrestore(&rxq->lock, flags); il4965_rx_allocate()
331 spin_unlock_irqrestore(&rxq->lock, flags); il4965_rx_allocate()
333 if (rxq->free_count > RX_LOW_WATERMARK) il4965_rx_allocate()
346 if (rxq->free_count <= RX_LOW_WATERMARK && il4965_rx_allocate()
352 rxq->free_count); il4965_rx_allocate()
369 spin_lock_irqsave(&rxq->lock, flags); il4965_rx_allocate()
371 if (list_empty(&rxq->rx_used)) { il4965_rx_allocate()
372 spin_unlock_irqrestore(&rxq->lock, flags); il4965_rx_allocate()
380 element = rxq->rx_used.next; il4965_rx_allocate()
388 list_add_tail(&rxb->list, &rxq->rx_free); il4965_rx_allocate()
389 rxq->free_count++; il4965_rx_allocate()
392 spin_unlock_irqrestore(&rxq->lock, flags); il4965_rx_allocate()
422 il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq) il4965_rx_queue_free() argument
426 if (rxq->pool[i].page != NULL) { il4965_rx_queue_free()
427 pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma, il4965_rx_queue_free()
430 __il_free_pages(il, rxq->pool[i].page); il4965_rx_queue_free()
431 rxq->pool[i].page = NULL; il4965_rx_queue_free()
435 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, il4965_rx_queue_free()
436 rxq->bd_dma); il4965_rx_queue_free()
438 rxq->rb_stts, rxq->rb_stts_dma); il4965_rx_queue_free()
439 rxq->bd = NULL; il4965_rx_queue_free()
440 rxq->rb_stts = NULL; il4965_rx_queue_free()
4231 struct il_rx_queue *rxq = &il->rxq; il4965_rx_handle() local
4241 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; il4965_rx_handle()
4242 i = rxq->read; il4965_rx_handle()
4249 total_empty = r - rxq->write_actual; il4965_rx_handle()
4259 rxb = rxq->queue[i]; il4965_rx_handle()
4266 rxq->queue[i] = NULL; il4965_rx_handle()
4312 spin_lock_irqsave(&rxq->lock, flags); il4965_rx_handle()
4323 list_add_tail(&rxb->list, &rxq->rx_used); il4965_rx_handle()
4325 list_add_tail(&rxb->list, &rxq->rx_free); il4965_rx_handle()
4326 rxq->free_count++; il4965_rx_handle()
4329 list_add_tail(&rxb->list, &rxq->rx_used); il4965_rx_handle()
4331 spin_unlock_irqrestore(&rxq->lock, flags); il4965_rx_handle()
4339 rxq->read = i; il4965_rx_handle()
4347 rxq->read = i; il4965_rx_handle()
4492 il_rx_queue_update_write_ptr(il, &il->rxq); il4965_irq_tasklet()
6754 if (il->rxq.bd) il4965_pci_remove()
6755 il4965_rx_queue_free(il, &il->rxq); il4965_pci_remove()
H A Ddebug.c899 struct il_rx_queue *rxq = &il->rxq; il_dbgfs_rx_queue_read() local
904 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read); il_dbgfs_rx_queue_read()
905 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write); il_dbgfs_rx_queue_read()
908 rxq->free_count); il_dbgfs_rx_queue_read()
909 if (rxq->rb_stts) { il_dbgfs_rx_queue_read()
912 le16_to_cpu(rxq->rb_stts-> il_dbgfs_rx_queue_read()
H A Dcommon.c2516 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
2517 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
2518 * to replenish the iwl->rxq->rx_free.
2520 * iwl->rxq is replenished and the READ IDX is updated (updating the
2523 * detached from the iwl->rxq. The driver 'processed' idx is updated.
2524 * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
2525 * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
2614 struct il_rx_queue *rxq = &il->rxq; il_rx_queue_alloc() local
2618 spin_lock_init(&rxq->lock); il_rx_queue_alloc()
2619 INIT_LIST_HEAD(&rxq->rx_free); il_rx_queue_alloc()
2620 INIT_LIST_HEAD(&rxq->rx_used); il_rx_queue_alloc()
2623 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, il_rx_queue_alloc()
2625 if (!rxq->bd) il_rx_queue_alloc()
2628 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status), il_rx_queue_alloc()
2629 &rxq->rb_stts_dma, GFP_KERNEL); il_rx_queue_alloc()
2630 if (!rxq->rb_stts) il_rx_queue_alloc()
2635 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); il_rx_queue_alloc()
2639 rxq->read = rxq->write = 0; il_rx_queue_alloc()
2640 rxq->write_actual = 0; il_rx_queue_alloc()
2641 rxq->free_count = 0; il_rx_queue_alloc()
2642 rxq->need_update = 0; il_rx_queue_alloc()
2646 dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, il_rx_queue_alloc()
2647 rxq->bd_dma); il_rx_queue_alloc()
H A D3945.c793 il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq) il3945_rx_init() argument
795 il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma); il3945_rx_init()
796 il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma); il3945_rx_init()
972 struct il_rx_queue *rxq = &il->rxq; il3945_hw_nic_init() local
982 if (!rxq->bd) { il3945_hw_nic_init()
989 il3945_rx_queue_reset(il, rxq); il3945_hw_nic_init()
993 il3945_rx_init(il, rxq); il3945_hw_nic_init()
996 rxq->need_update = 1; il3945_hw_nic_init()
997 il_rx_queue_update_write_ptr(il, rxq); il3945_hw_nic_init()
1000 il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7); il3945_hw_nic_init()
H A D4965.h58 void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
59 int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq);
69 void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
H A D3945.h194 void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
H A Dcommon.h1270 struct il_rx_queue rxq; member in struct:il_priv
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4vf/
H A Dcxgb4vf_main.c324 int rxq, msi, err; request_msix_queue_irqs() local
338 for_each_ethrxq(s, rxq) { for_each_ethrxq()
342 &s->ethrxq[rxq].rspq); for_each_ethrxq()
350 while (--rxq >= 0)
351 free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
362 int rxq, msi; free_msix_queue_irqs() local
366 for_each_ethrxq(s, rxq) free_msix_queue_irqs()
368 &s->ethrxq[rxq].rspq); free_msix_queue_irqs()
393 int rxq; enable_rx() local
396 for_each_ethrxq(s, rxq) enable_rx()
397 qenable(&s->ethrxq[rxq].rspq); enable_rx()
418 int rxq; quiesce_rx() local
420 for_each_ethrxq(s, rxq) quiesce_rx()
421 napi_disable(&s->ethrxq[rxq].rspq.napi); quiesce_rx()
573 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; for_each_port() local
577 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { for_each_port()
578 err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false, for_each_port()
580 &rxq->fl, t4vf_ethrx_handler); for_each_port()
590 rxq->rspq.idx = qs; for_each_port()
591 memset(&rxq->stats, 0, sizeof(rxq->stats)); for_each_port()
604 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset]; for_each_port() local
608 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { for_each_port()
609 IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq; for_each_port()
621 rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base; for_each_port()
622 EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl; for_each_port()
646 struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; for_each_port() local
651 rss[qs] = rxq[qs].rspq.abs_id; for_each_port()
678 rxq[0].rspq.abs_id; for_each_port()
1208 struct sge_eth_rxq *rxq; cxgb4vf_poll_controller() local
1211 rxq = &adapter->sge.ethrxq[pi->first_qset]; cxgb4vf_poll_controller()
1213 t4vf_sge_intr_msix(0, &rxq->rspq); cxgb4vf_poll_controller()
1214 rxq++; cxgb4vf_poll_controller()
1586 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset]; collect_sge_port_stats() local
1590 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { collect_sge_port_stats()
1593 stats->rx_csum += rxq->stats.rx_cso; collect_sge_port_stats()
1594 stats->vlan_ex += rxq->stats.vlan_ex; collect_sge_port_stats()
1596 stats->lro_pkts += rxq->stats.lro_pkts; collect_sge_port_stats()
1597 stats->lro_merged += rxq->stats.lro_merged; collect_sge_port_stats()
1750 #define R(s, v) S3("u", s, rxq[qs].v) sge_qinfo_show()
1753 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL]; sge_qinfo_show() local
1759 (rxq[qs].rspq.netdev sge_qinfo_show()
1760 ? rxq[qs].rspq.netdev->name sge_qinfo_show()
1763 (rxq[qs].rspq.netdev sge_qinfo_show()
1765 netdev_priv(rxq[qs].rspq.netdev))->port_id sge_qinfo_show()
1775 S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq)); sge_qinfo_show()
1777 adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]); sge_qinfo_show()
1906 #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v) sge_qstats_show()
1910 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL]; sge_qstats_show() local
1916 (rxq[qs].rspq.netdev sge_qstats_show()
1917 ? rxq[qs].rspq.netdev->name sge_qstats_show()
2471 struct sge_eth_rxq *rxq = &s->ethrxq[qs]; local
2474 init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
2475 rxq->fl.size = 72;
H A Dsge.c1544 * @rxq: ingress RX Ethernet Queue
1551 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, do_gro() argument
1554 struct adapter *adapter = rxq->rspq.adapter; do_gro()
1559 skb = napi_get_frags(&rxq->rspq.napi); do_gro()
1562 rxq->stats.rx_drops++; do_gro()
1571 skb_record_rx_queue(skb, rxq->rspq.idx); do_gro()
1576 rxq->stats.vlan_ex++; do_gro()
1578 ret = napi_gro_frags(&rxq->rspq.napi); do_gro()
1581 rxq->stats.lro_pkts++; do_gro()
1583 rxq->stats.lro_merged++; do_gro()
1584 rxq->stats.pkts++; do_gro()
1585 rxq->stats.rx_cso++; do_gro()
1603 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); t4vf_ethrx_handler() local
1614 do_gro(rxq, gl, pkt); t4vf_ethrx_handler()
1624 rxq->stats.rx_drops++; t4vf_ethrx_handler()
1630 rxq->stats.pkts++; t4vf_ethrx_handler()
1641 rxq->stats.rx_cso++; t4vf_ethrx_handler()
1646 rxq->stats.vlan_ex++; t4vf_ethrx_handler()
1737 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); process_responses() local
1774 free_rx_bufs(rspq->adapter, &rxq->fl, process_responses()
1787 BUG_ON(rxq->fl.avail == 0); process_responses()
1788 sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; process_responses()
1796 unmap_rx_buf(rspq->adapter, &rxq->fl); process_responses()
1820 restore_rx_bufs(&gl, &rxq->fl, frag); process_responses()
1850 rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) process_responses()
1851 __refill_fl(rspq->adapter, &rxq->fl); process_responses()
2068 struct sge_eth_rxq *rxq; sge_rx_timer_cb() local
2070 rxq = container_of(fl, struct sge_eth_rxq, fl); sge_rx_timer_cb()
2071 if (napi_reschedule(&rxq->rspq.napi)) sge_rx_timer_cb()
2168 * @rspq: pointer to to the new rxq's Response Queue to be filled in
2172 * @fl: pointer to the new rxq's Free List to be filled in
2511 struct sge_eth_rxq *rxq = s->ethrxq; t4vf_free_sge_resources() local
2517 for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) { t4vf_free_sge_resources()
2518 if (rxq->rspq.desc) t4vf_free_sge_resources()
2519 free_rspq_fl(adapter, &rxq->rspq, &rxq->fl); t4vf_free_sge_resources()
H A Dadapter.h346 * Macro to iterate across Queue Sets ("rxq" is a historic misnomer).
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/
H A Dbnx2x_vfpf.c618 req->rxq.rcq_addr = fp->rx_comp_mapping; bnx2x_vfpf_setup_q()
619 req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE; bnx2x_vfpf_setup_q()
620 req->rxq.rxq_addr = fp->rx_desc_mapping; bnx2x_vfpf_setup_q()
621 req->rxq.sge_addr = fp->rx_sge_mapping; bnx2x_vfpf_setup_q()
622 req->rxq.vf_sb = fp_idx; bnx2x_vfpf_setup_q()
623 req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS; bnx2x_vfpf_setup_q()
624 req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0; bnx2x_vfpf_setup_q()
625 req->rxq.mtu = bp->dev->mtu; bnx2x_vfpf_setup_q()
626 req->rxq.buf_sz = fp->rx_buf_size; bnx2x_vfpf_setup_q()
627 req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE; bnx2x_vfpf_setup_q()
628 req->rxq.tpa_agg_sz = tpa_agg_size; bnx2x_vfpf_setup_q()
629 req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT; bnx2x_vfpf_setup_q()
630 req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) & bnx2x_vfpf_setup_q()
632 req->rxq.flags = flags; bnx2x_vfpf_setup_q()
633 req->rxq.drop_flags = 0; bnx2x_vfpf_setup_q()
634 req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT; bnx2x_vfpf_setup_q()
635 req->rxq.stat_id = -1; /* No stats at the moment */ bnx2x_vfpf_setup_q()
1507 q->sb_idx = setup_q->rxq.vf_sb; bnx2x_vf_mbx_setup_q()
1510 init_p->rx.hc_rate = setup_q->rxq.hc_rate; bnx2x_vf_mbx_setup_q()
1511 init_p->rx.sb_cq_index = setup_q->rxq.sb_index; bnx2x_vf_mbx_setup_q()
1512 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags, bnx2x_vf_mbx_setup_q()
1516 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags, bnx2x_vf_mbx_setup_q()
1520 setup_p->gen_params.mtu = setup_q->rxq.mtu; bnx2x_vf_mbx_setup_q()
1523 rxq_params->drop_flags = setup_q->rxq.drop_flags; bnx2x_vf_mbx_setup_q()
1524 rxq_params->dscr_map = setup_q->rxq.rxq_addr; bnx2x_vf_mbx_setup_q()
1525 rxq_params->sge_map = setup_q->rxq.sge_addr; bnx2x_vf_mbx_setup_q()
1526 rxq_params->rcq_map = setup_q->rxq.rcq_addr; bnx2x_vf_mbx_setup_q()
1527 rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr; bnx2x_vf_mbx_setup_q()
1528 rxq_params->buf_sz = setup_q->rxq.buf_sz; bnx2x_vf_mbx_setup_q()
1529 rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz; bnx2x_vf_mbx_setup_q()
1530 rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt; bnx2x_vf_mbx_setup_q()
1531 rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz; bnx2x_vf_mbx_setup_q()
1533 setup_q->rxq.cache_line_log; bnx2x_vf_mbx_setup_q()
1534 rxq_params->sb_cq_index = setup_q->rxq.sb_index; bnx2x_vf_mbx_setup_q()
H A Dbnx2x_vfpf.h262 } rxq; member in struct:vfpf_setup_q_tlv
H A Dbnx2x_sriov.c149 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n" bnx2x_vfop_qctor_dump_rx()
1825 /* extract vf and rxq index from vf_cid - relies on the following: bnx2x_iov_eq_sp_event()
1954 struct bnx2x_vf_queue *rxq = vfq_get(vf, j); for_each_vfq() local
1960 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) == for_each_vfq()
1966 cur_query_entry->index = vfq_stat_id(vf, rxq); for_each_vfq()
H A Dbnx2x_sriov.h193 /* leading rss client id ~~ the client id of the first rxq, must be
H A Dbnx2x_main.c3217 /* rxq setup */ bnx2x_pf_rx_q_prep()
/linux-4.1.27/drivers/net/ethernet/samsung/sxgbe/
H A Dsxgbe_platform.c138 priv->rxq[i]->irq_no = irq_of_parse_and_map(node, chan++); sxgbe_platform_probe()
139 if (priv->rxq[i]->irq_no <= 0) { sxgbe_platform_probe()
159 irq_dispose_mapping(priv->rxq[i]->irq_no); sxgbe_platform_probe()
H A Dsxgbe_main.c330 priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i], sxgbe_clear_descriptors()
588 priv->rxq[queue_num], rx_rsize); SXGBE_FOR_EACH_QUEUE()
597 priv->rxq[queue_num]->priv_ptr = priv; SXGBE_FOR_EACH_QUEUE()
611 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
663 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize); SXGBE_FOR_EACH_QUEUE()
686 priv->rxq[queue_num] = devm_kmalloc(priv->device, SXGBE_FOR_EACH_QUEUE()
688 if (!priv->rxq[queue_num]) SXGBE_FOR_EACH_QUEUE()
973 (priv->rxq[queue_num])->dma_rx_phy, sxgbe_init_dma_engine()
1148 (priv->rxq[queue_num])->irq_no, SXGBE_FOR_EACH_QUEUE()
1150 dev->name, priv->rxq[queue_num]); SXGBE_FOR_EACH_QUEUE()
1457 for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0; sxgbe_rx_refill()
1458 priv->rxq[qnum]->dirty_rx++) { sxgbe_rx_refill()
1459 unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize; sxgbe_rx_refill()
1462 p = priv->rxq[qnum]->dma_rx + entry; sxgbe_rx_refill()
1464 if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) { sxgbe_rx_refill()
1472 priv->rxq[qnum]->rx_skbuff[entry] = skb; sxgbe_rx_refill()
1473 priv->rxq[qnum]->rx_skbuff_dma[entry] = sxgbe_rx_refill()
1478 priv->rxq[qnum]->rx_skbuff_dma[entry]; sxgbe_rx_refill()
1501 unsigned int entry = priv->rxq[qnum]->cur_rx; sxgbe_rx()
1512 p = priv->rxq[qnum]->dma_rx + entry; sxgbe_rx()
1519 next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize; sxgbe_rx()
1520 prefetch(priv->rxq[qnum]->dma_rx + next_entry); sxgbe_rx()
1535 skb = priv->rxq[qnum]->rx_skbuff[entry]; sxgbe_rx()
1541 priv->rxq[qnum]->rx_skbuff[entry] = NULL; sxgbe_rx()
1682 struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id; sxgbe_rx_interrupt() local
1683 struct sxgbe_priv_data *priv = rxq->priv_ptr; sxgbe_rx_interrupt()
1686 status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no, sxgbe_rx_interrupt()
1690 priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no); sxgbe_rx_interrupt()
1701 rxq->queue_no, priv->rx_tc); sxgbe_rx_interrupt()
H A Dsxgbe_common.h459 struct sxgbe_rx_queue *rxq[SXGBE_RX_QUEUES]; member in struct:sxgbe_priv_data
/linux-4.1.27/arch/arm/mach-ixp4xx/
H A Dvulcan-setup.c126 .rxq = 3,
131 .rxq = 4,
H A Dfsg-setup.c130 .rxq = 3,
134 .rxq = 4,
H A Dixdp425-setup.c185 .rxq = 3,
189 .rxq = 4,
H A Domixp-setup.c177 .rxq = 3,
181 .rxq = 4,
H A Dgoramo_mlr.c284 .rxq = 3,
288 .rxq = 4,
H A Dnas100d-setup.c163 .rxq = 3,
H A Dnslu2-setup.c175 .rxq = 3,
/linux-4.1.27/drivers/net/ethernet/freescale/
H A Dfec_main.c227 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; fec_enet_get_nextdesc() local
237 base = rxq->rx_bd_base; fec_enet_get_nextdesc()
238 ring_size = rxq->rx_ring_size; fec_enet_get_nextdesc()
239 ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; fec_enet_get_nextdesc()
258 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; fec_enet_get_prevdesc() local
268 base = rxq->rx_bd_base; fec_enet_get_prevdesc()
269 ring_size = rxq->rx_ring_size; fec_enet_get_prevdesc()
270 ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; fec_enet_get_prevdesc()
807 struct fec_enet_priv_rx_q *rxq; fec_enet_bd_init() local
814 rxq = fep->rx_queue[q]; fec_enet_bd_init()
815 bdp = rxq->rx_bd_base; fec_enet_bd_init()
817 for (i = 0; i < rxq->rx_ring_size; i++) { fec_enet_bd_init()
831 rxq->cur_rx = rxq->rx_bd_base; fec_enet_bd_init()
871 struct fec_enet_priv_rx_q *rxq; fec_enet_enable_ring() local
875 rxq = fep->rx_queue[i]; fec_enet_enable_ring()
876 writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); fec_enet_enable_ring()
1361 struct fec_enet_priv_rx_q *rxq; fec_enet_rx_queue() local
1380 rxq = fep->rx_queue[queue_id]; fec_enet_rx_queue()
1385 bdp = rxq->cur_rx; fec_enet_rx_queue()
1432 index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); fec_enet_rx_queue()
1433 skb = rxq->rx_skbuff[index]; fec_enet_rx_queue()
1508 rxq->rx_skbuff[index] = skb_new; fec_enet_rx_queue()
1537 rxq->cur_rx = bdp; fec_enet_rx_queue()
2565 struct fec_enet_priv_rx_q *rxq; fec_enet_free_buffers() local
2569 rxq = fep->rx_queue[q]; fec_enet_free_buffers()
2570 bdp = rxq->rx_bd_base; fec_enet_free_buffers()
2571 for (i = 0; i < rxq->rx_ring_size; i++) { fec_enet_free_buffers()
2572 skb = rxq->rx_skbuff[i]; fec_enet_free_buffers()
2573 rxq->rx_skbuff[i] = NULL; fec_enet_free_buffers()
2676 struct fec_enet_priv_rx_q *rxq; fec_enet_alloc_rxq_buffers() local
2678 rxq = fep->rx_queue[queue]; fec_enet_alloc_rxq_buffers()
2679 bdp = rxq->rx_bd_base; fec_enet_alloc_rxq_buffers()
2680 for (i = 0; i < rxq->rx_ring_size; i++) { fec_enet_alloc_rxq_buffers()
2690 rxq->rx_skbuff[i] = skb; fec_enet_alloc_rxq_buffers()
3011 struct fec_enet_priv_rx_q *rxq; fec_enet_init() local
3050 rxq = fep->rx_queue[i]; fec_enet_init()
3051 rxq->index = i; fec_enet_init()
3052 rxq->rx_bd_base = (struct bufdesc *)cbd_base; fec_enet_init()
3053 rxq->bd_dma = bd_dma; fec_enet_init()
3055 bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size; fec_enet_init()
3057 (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); fec_enet_init()
3059 bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size; fec_enet_init()
3060 cbd_base += rxq->rx_ring_size; fec_enet_init()
H A Dgianfar.c815 num_rx_qs = num_grps; /* one rxq per int group */ gfar_of_init()
/linux-4.1.27/drivers/net/ethernet/xscale/
H A Dixp4xx_eth.c678 qmgr_disable_irq(port->plat->rxq); eth_rx_irq()
686 unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id); eth_poll() local
702 if ((n = queue_get_desc(rxq, port, 0)) < 0) { eth_poll()
708 qmgr_enable_irq(rxq); eth_poll()
709 if (!qmgr_stat_below_low_watermark(rxq) && eth_poll()
716 qmgr_disable_irq(rxq); eth_poll()
1074 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0, request_queues()
1103 qmgr_release_queue(port->plat->rxq); request_queues()
1114 qmgr_release_queue(port->plat->rxq); release_queues()
1227 msg.byte5 = port->plat->rxq | 0x80; eth_open()
1228 msg.byte7 = port->plat->rxq << 4; eth_open()
1294 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, eth_open()
1315 qmgr_disable_irq(port->plat->rxq); eth_close()
1331 while (queue_get_desc(port->plat->rxq, port, 0) >= 0) eth_close()
/linux-4.1.27/drivers/net/wireless/
H A Dmwl8k.c247 struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES]; member in struct:mwl8k_priv
1152 struct mwl8k_rx_queue *rxq = priv->rxq + index; mwl8k_rxq_init() local
1156 rxq->rxd_count = 0; mwl8k_rxq_init()
1157 rxq->head = 0; mwl8k_rxq_init()
1158 rxq->tail = 0; mwl8k_rxq_init()
1162 rxq->rxd = pci_zalloc_consistent(priv->pdev, size, &rxq->rxd_dma); mwl8k_rxq_init()
1163 if (rxq->rxd == NULL) { mwl8k_rxq_init()
1168 rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL); mwl8k_rxq_init()
1169 if (rxq->buf == NULL) { mwl8k_rxq_init()
1170 pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma); mwl8k_rxq_init()
1181 rxd = rxq->rxd + (i * priv->rxd_ops->rxd_size); mwl8k_rxq_init()
1186 next_dma_addr = rxq->rxd_dma + (nexti * desc_size); mwl8k_rxq_init()
1197 struct mwl8k_rx_queue *rxq = priv->rxq + index; rxq_refill() local
1201 while (rxq->rxd_count < MWL8K_RX_DESCS && limit--) { rxq_refill()
1214 rxq->rxd_count++; rxq_refill()
1215 rx = rxq->tail++; rxq_refill()
1216 if (rxq->tail == MWL8K_RX_DESCS) rxq_refill()
1217 rxq->tail = 0; rxq_refill()
1218 rxq->buf[rx].skb = skb; rxq_refill()
1219 dma_unmap_addr_set(&rxq->buf[rx], dma, addr); rxq_refill()
1221 rxd = rxq->rxd + (rx * priv->rxd_ops->rxd_size); rxq_refill()
1234 struct mwl8k_rx_queue *rxq = priv->rxq + index; mwl8k_rxq_deinit() local
1237 if (rxq->rxd == NULL) mwl8k_rxq_deinit()
1241 if (rxq->buf[i].skb != NULL) { mwl8k_rxq_deinit()
1243 dma_unmap_addr(&rxq->buf[i], dma), mwl8k_rxq_deinit()
1245 dma_unmap_addr_set(&rxq->buf[i], dma, 0); mwl8k_rxq_deinit()
1247 kfree_skb(rxq->buf[i].skb); mwl8k_rxq_deinit()
1248 rxq->buf[i].skb = NULL; mwl8k_rxq_deinit()
1252 kfree(rxq->buf); mwl8k_rxq_deinit()
1253 rxq->buf = NULL; mwl8k_rxq_deinit()
1257 rxq->rxd, rxq->rxd_dma); mwl8k_rxq_deinit()
1258 rxq->rxd = NULL; mwl8k_rxq_deinit()
1311 struct mwl8k_rx_queue *rxq = priv->rxq + index; rxq_process() local
1315 while (rxq->rxd_count && limit--) { rxq_process()
1323 skb = rxq->buf[rxq->head].skb; rxq_process()
1327 rxd = rxq->rxd + (rxq->head * priv->rxd_ops->rxd_size); rxq_process()
1334 rxq->buf[rxq->head].skb = NULL; rxq_process()
1337 dma_unmap_addr(&rxq->buf[rxq->head], dma), rxq_process()
1339 dma_unmap_addr_set(&rxq->buf[rxq->head], dma, 0); rxq_process()
1341 rxq->head++; rxq_process()
1342 if (rxq->head == MWL8K_RX_DESCS) rxq_process()
1343 rxq->head = 0; rxq_process()
1345 rxq->rxd_count--; rxq_process()
2457 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma); mwl8k_cmd_get_hw_spec_sta()
2554 iowrite32(priv->rxq[0].rxd_dma, priv->sram + off); mwl8k_cmd_get_hw_spec_ap()
2557 iowrite32(priv->rxq[0].rxd_dma, priv->sram + off); mwl8k_cmd_get_hw_spec_ap()
2623 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma); mwl8k_cmd_set_hw_spec()
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/
H A Dsge.c1796 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, do_gro() argument
1799 struct adapter *adapter = rxq->rspq.adap; do_gro()
1804 skb = napi_get_frags(&rxq->rspq.napi); do_gro()
1807 rxq->stats.rx_drops++; do_gro()
1816 skb_record_rx_queue(skb, rxq->rspq.idx); do_gro()
1817 skb_mark_napi_id(skb, &rxq->rspq.napi); do_gro()
1818 if (rxq->rspq.netdev->features & NETIF_F_RXHASH) do_gro()
1824 rxq->stats.vlan_ex++; do_gro()
1826 ret = napi_gro_frags(&rxq->rspq.napi); do_gro()
1828 rxq->stats.lro_pkts++; do_gro()
1830 rxq->stats.lro_merged++; do_gro()
1831 rxq->stats.pkts++; do_gro()
1832 rxq->stats.rx_cso++; do_gro()
1849 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); t4_ethrx_handler() local
1866 do_gro(rxq, si, pkt); t4_ethrx_handler()
1873 rxq->stats.rx_drops++; t4_ethrx_handler()
1884 rxq->stats.pkts++; t4_ethrx_handler()
1889 rxq->stats.rx_cso++; t4_ethrx_handler()
1894 rxq->stats.rx_cso++; t4_ethrx_handler()
1917 rxq->stats.vlan_ex++; t4_ethrx_handler()
2004 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); process_responses() local
2023 free_rx_bufs(q->adap, &rxq->fl, 1); process_responses()
2032 rsd = &rxq->fl.sdesc[rxq->fl.cidx]; process_responses()
2040 unmap_rx_buf(q->adap, &rxq->fl); process_responses()
2060 restore_rx_bufs(&si, &rxq->fl, frags); process_responses()
2077 if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16) process_responses()
2078 __refill_fl(q->adap, &rxq->fl); process_responses()
2288 struct sge_eth_rxq *rxq; sge_rx_timer_cb() local
2296 rxq = container_of(fl, struct sge_eth_rxq, fl); sge_rx_timer_cb()
2297 if (napi_reschedule(&rxq->rspq.napi)) sge_rx_timer_cb()
H A Dcxgb4_main.c683 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); uldrx_handler() local
692 rxq->stats.nomem++; uldrx_handler()
696 rxq->stats.imm++; uldrx_handler()
698 rxq->stats.an++; uldrx_handler()
700 rxq->stats.pkts++; uldrx_handler()
H A Dcxgb4.h1271 unsigned int rxqi, unsigned int rxq, unsigned int tc,
H A Dt4_hw.c4447 * @rxq: the max number of interruptless ingress queues
4461 unsigned int rxqi, unsigned int rxq, unsigned int tc, t4_cfg_pfvf()
4473 FW_PFVF_CMD_NIQ_V(rxq)); t4_cfg_pfvf()
4459 t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, unsigned int rxqi, unsigned int rxq, unsigned int tc, unsigned int vi, unsigned int cmask, unsigned int pmask, unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) t4_cfg_pfvf() argument
/linux-4.1.27/drivers/net/ethernet/ibm/
H A Dibmveth.h50 #define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
51 plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac)
H A Dibmveth.c105 /* simple methods of getting data from the current rxq entry */ ibmveth_rxq_flags()
621 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq " ibmveth_open()
/linux-4.1.27/drivers/mailbox/
H A Domap-mailbox.c114 struct omap_mbox_queue *rxq; member in struct:omap_mbox
336 struct omap_mbox_queue *mq = mbox->rxq; __mbox_rx_interrupt()
356 schedule_work(&mbox->rxq->work); __mbox_rx_interrupt()
411 mbox->rxq = mq; omap_mbox_startup()
426 mbox_queue_free(mbox->rxq); omap_mbox_startup()
434 flush_work(&mbox->rxq->work); omap_mbox_fini()
435 mbox_queue_free(mbox->rxq); omap_mbox_fini()
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/
H A Di40evf_virtchnl.c243 vqpi->rxq.vsi_id = vqci->vsi_id; i40evf_configure_queues()
244 vqpi->rxq.queue_id = i; i40evf_configure_queues()
245 vqpi->rxq.ring_len = adapter->rx_rings[i]->count; i40evf_configure_queues()
246 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma; i40evf_configure_queues()
247 vqpi->rxq.max_pkt_size = adapter->netdev->mtu i40evf_configure_queues()
249 vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len; i40evf_configure_queues()
H A Di40e_virtchnl.h208 struct i40e_virtchnl_rxq_info rxq; member in struct:i40e_virtchnl_queue_pair_info
/linux-4.1.27/drivers/net/usb/
H A Dusbnet.c492 spin_lock_irqsave (&dev->rxq.lock, lockflags); rx_submit()
518 __usbnet_queue_skb(&dev->rxq, skb, rx_start); rx_submit()
524 spin_unlock_irqrestore (&dev->rxq.lock, lockflags); rx_submit()
641 state = defer_bh(dev, skb, &dev->rxq, state); rx_complete()
745 (void) unlink_urbs (dev, &dev->rxq); usbnet_unlink_rx_urbs()
763 unlink_urbs(dev, &dev->rxq); usbnet_terminate_urbs()
766 while (!skb_queue_empty(&dev->rxq) usbnet_terminate_urbs()
1039 unlink_urbs(dev, &dev->rxq); __handle_link_change()
1105 unlink_urbs (dev, &dev->rxq); usbnet_deferred_kevent()
1433 for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) { rx_alloc_submit()
1483 if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0) usbnet_bh()
1492 int temp = dev->rxq.qlen; usbnet_bh()
1497 if (temp != dev->rxq.qlen) usbnet_bh()
1500 temp, dev->rxq.qlen); usbnet_bh()
1501 if (dev->rxq.qlen < RX_QLEN(dev)) usbnet_bh()
1625 skb_queue_head_init (&dev->rxq); usbnet_probe()
/linux-4.1.27/drivers/net/
H A Dmacvtap.c214 * Select a queue based on the rxq of the device on which this packet
231 __u32 rxq; macvtap_get_queue() local
237 rxq = skb_get_hash(skb); macvtap_get_queue()
238 if (rxq) { macvtap_get_queue()
239 tap = rcu_dereference(vlan->taps[rxq % numvtaps]); macvtap_get_queue()
244 rxq = skb_get_rx_queue(skb); macvtap_get_queue()
246 while (unlikely(rxq >= numvtaps)) macvtap_get_queue()
247 rxq -= numvtaps; macvtap_get_queue()
249 tap = rcu_dereference(vlan->taps[rxq]); macvtap_get_queue()
H A Dxen-netfront.c877 struct sk_buff_head *rxq) handle_incoming_queue()
883 while ((skb = __skb_dequeue(rxq)) != NULL) { handle_incoming_queue()
922 struct sk_buff_head rxq; xennet_poll() local
929 skb_queue_head_init(&rxq); xennet_poll()
982 __skb_queue_tail(&rxq, skb); xennet_poll()
990 work_done -= handle_incoming_queue(queue, &rxq); xennet_poll()
876 handle_incoming_queue(struct netfront_queue *queue, struct sk_buff_head *rxq) handle_incoming_queue() argument
H A Dtun.c380 * we do not check rxq no. is because some cards(e.g 82599), chooses
381 * the rxq based on the txq where the last packet of the flow comes. As
383 * different rxq no. here. If we could not get rxhash, then we would
384 * hope the rxq no. may help here.
H A Dvirtio_net.c173 static int rxq2vq(int rxq) rxq2vq() argument
175 return rxq * 2; rxq2vq()
/linux-4.1.27/arch/arm/mach-ixp4xx/include/mach/
H A Dplatform.h101 u8 rxq; /* configurable, currently 0 - 31 only */ member in struct:eth_plat_info
/linux-4.1.27/drivers/net/wireless/ipw2x00/
H A Dipw2200.c3451 struct ipw_rx_queue *rxq) ipw_rx_queue_reset()
3456 spin_lock_irqsave(&rxq->lock, flags); ipw_rx_queue_reset()
3458 INIT_LIST_HEAD(&rxq->rx_free); ipw_rx_queue_reset()
3459 INIT_LIST_HEAD(&rxq->rx_used); ipw_rx_queue_reset()
3465 if (rxq->pool[i].skb != NULL) { ipw_rx_queue_reset()
3466 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, ipw_rx_queue_reset()
3468 dev_kfree_skb(rxq->pool[i].skb); ipw_rx_queue_reset()
3469 rxq->pool[i].skb = NULL; ipw_rx_queue_reset()
3471 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); ipw_rx_queue_reset()
3476 rxq->read = rxq->write = 0; ipw_rx_queue_reset()
3477 rxq->free_count = 0; ipw_rx_queue_reset()
3478 spin_unlock_irqrestore(&rxq->lock, flags); ipw_rx_queue_reset()
3545 if (!priv->rxq) ipw_load()
3546 priv->rxq = ipw_rx_queue_alloc(priv); ipw_load()
3548 ipw_rx_queue_reset(priv, priv->rxq); ipw_load()
3549 if (!priv->rxq) { ipw_load()
3668 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read); ipw_load()
3679 if (priv->rxq) { ipw_load()
3680 ipw_rx_queue_free(priv, priv->rxq); ipw_load()
3681 priv->rxq = NULL; ipw_load()
5119 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5120 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5121 * to replensish the ipw->rxq->rx_free.
5123 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5126 * detached from the ipw->rxq. The driver 'processed' index is updated.
5127 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5128 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5164 struct ipw_rx_queue *rxq = priv->rxq; ipw_rx_queue_restock() local
5170 spin_lock_irqsave(&rxq->lock, flags); ipw_rx_queue_restock()
5171 write = rxq->write; ipw_rx_queue_restock()
5172 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) { ipw_rx_queue_restock()
5173 element = rxq->rx_free.next; ipw_rx_queue_restock()
5177 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE, ipw_rx_queue_restock()
5179 rxq->queue[rxq->write] = rxb; ipw_rx_queue_restock()
5180 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE; ipw_rx_queue_restock()
5181 rxq->free_count--; ipw_rx_queue_restock()
5183 spin_unlock_irqrestore(&rxq->lock, flags); ipw_rx_queue_restock()
5187 if (rxq->free_count <= RX_LOW_WATERMARK) ipw_rx_queue_restock()
5191 if (write != rxq->write) ipw_rx_queue_restock()
5192 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write); ipw_rx_queue_restock()
5204 struct ipw_rx_queue *rxq = priv->rxq; ipw_rx_queue_replenish() local
5209 spin_lock_irqsave(&rxq->lock, flags); ipw_rx_queue_replenish()
5210 while (!list_empty(&rxq->rx_used)) { ipw_rx_queue_replenish()
5211 element = rxq->rx_used.next; ipw_rx_queue_replenish()
5228 list_add_tail(&rxb->list, &rxq->rx_free); ipw_rx_queue_replenish()
5229 rxq->free_count++; ipw_rx_queue_replenish()
5231 spin_unlock_irqrestore(&rxq->lock, flags); ipw_rx_queue_replenish()
5250 static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq) ipw_rx_queue_free() argument
5254 if (!rxq) ipw_rx_queue_free()
5258 if (rxq->pool[i].skb != NULL) { ipw_rx_queue_free()
5259 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, ipw_rx_queue_free()
5261 dev_kfree_skb(rxq->pool[i].skb); ipw_rx_queue_free()
5265 kfree(rxq); ipw_rx_queue_free()
5270 struct ipw_rx_queue *rxq; ipw_rx_queue_alloc() local
5273 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL); ipw_rx_queue_alloc()
5274 if (unlikely(!rxq)) { ipw_rx_queue_alloc()
5278 spin_lock_init(&rxq->lock); ipw_rx_queue_alloc()
5279 INIT_LIST_HEAD(&rxq->rx_free); ipw_rx_queue_alloc()
5280 INIT_LIST_HEAD(&rxq->rx_used); ipw_rx_queue_alloc()
5284 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); ipw_rx_queue_alloc()
5288 rxq->read = rxq->write = 0; ipw_rx_queue_alloc()
5289 rxq->free_count = 0; ipw_rx_queue_alloc()
5291 return rxq; ipw_rx_queue_alloc()
8286 i = priv->rxq->read; ipw_rx()
8288 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2)) ipw_rx()
8292 rxb = priv->rxq->queue[i]; ipw_rx()
8297 priv->rxq->queue[i] = NULL; ipw_rx()
8455 list_add_tail(&rxb->list, &priv->rxq->rx_used); ipw_rx()
8462 priv->rxq->read = i; ipw_rx()
8468 priv->rxq->read = i; ipw_rx()
11818 if (priv->rxq) { ipw_pci_remove()
11819 ipw_rx_queue_free(priv, priv->rxq); ipw_pci_remove()
11820 priv->rxq = NULL; ipw_pci_remove()
3450 ipw_rx_queue_reset(struct ipw_priv *priv, struct ipw_rx_queue *rxq) ipw_rx_queue_reset() argument
H A Dipw2100.c2675 struct ipw2100_bd_queue *rxq = &priv->rx_queue; __ipw2100_rx_process() local
2688 if (r >= rxq->entries) { __ipw2100_rx_process()
2693 i = (rxq->next + 1) % rxq->entries; __ipw2100_rx_process()
2697 r, rxq->next, i); */ __ipw2100_rx_process()
2765 rxq->drv[i].status.info.field = 0; __ipw2100_rx_process()
2767 i = (i + 1) % rxq->entries; __ipw2100_rx_process()
2772 rxq->next = (i ? i : rxq->entries) - 1; __ipw2100_rx_process()
2775 IPW_MEM_HOST_SHARED_RX_WRITE_INDEX, rxq->next); __ipw2100_rx_process()
H A Dipw2200.h1205 struct ipw_rx_queue *rxq; member in struct:ipw_priv
/linux-4.1.27/drivers/net/ethernet/sfc/
H A Dvfdi.h151 * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting
198 u32 rxq; member in struct:vfdi_req::__anon7324::__anon7328
H A Dptp.c221 * @rxq: Receive queue (awaiting timestamps)
272 struct sk_buff_head rxq; member in struct:efx_ptp_data
998 while ((skb = skb_dequeue(&ptp->rxq))) { efx_ptp_process_events()
1013 skb_queue_head(&ptp->rxq, skb); efx_ptp_process_events()
1129 efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq); efx_ptp_stop()
1226 skb_queue_head_init(&ptp->rxq); efx_ptp_probe()
1315 skb_queue_purge(&efx->ptp_data->rxq); efx_ptp_remove()
1439 skb_queue_tail(&ptp->rxq, skb); efx_ptp_rx()
H A Dsiena_sriov.c586 "ERROR: Invalid INIT_RXQ from %s: rxq %d evq %d " efx_vfdi_init_rxq()
765 unsigned vf_rxq = req->u.mac_filter.rxq; efx_vfdi_insert_filter()
771 "ERROR: Invalid INSERT_FILTER from %s: rxq %d " efx_vfdi_insert_filter()
H A Dethtool.c241 #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
H A Dfarch.c2530 "%s: filter type %d index %d rxq %u set", efx_farch_filter_insert()
/linux-4.1.27/drivers/net/wan/
H A Dixp4xx_hss.c658 unsigned int rxq = queue_ids[port->id].rx; hss_hdlc_poll() local
675 if ((n = queue_get_desc(rxq, port, 0)) < 0) { hss_hdlc_poll()
681 qmgr_enable_irq(rxq); hss_hdlc_poll()
682 if (!qmgr_stat_empty(rxq) && hss_hdlc_poll()
689 qmgr_disable_irq(rxq); hss_hdlc_poll()
/linux-4.1.27/drivers/net/ethernet/broadcom/
H A Dbcmsysport.c1665 u32 txq, rxq; bcm_sysport_probe() local
1674 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq)) bcm_sysport_probe()
1675 rxq = 1; bcm_sysport_probe()
1677 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq); bcm_sysport_probe()
1764 priv->base, priv->irq0, priv->irq1, txq, rxq); bcm_sysport_probe()
/linux-4.1.27/drivers/net/ethernet/emulex/benet/
H A Dbe_main.c1748 struct be_queue_info *rxq = &rxo->q; get_rx_page_info() local
1749 u16 frag_idx = rxq->tail; get_rx_page_info()
1765 queue_tail_inc(rxq); get_rx_page_info()
1766 atomic_dec(&rxq->used); get_rx_page_info()
2060 struct be_queue_info *rxq = &rxo->q; be_post_rx_frags() local
2067 page_info = &rxo->page_info_tbl[rxq->head]; be_post_rx_frags()
2092 rxd = queue_head_node(rxq); be_post_rx_frags()
2108 queue_head_inc(rxq); be_post_rx_frags()
2109 page_info = &rxo->page_info_tbl[rxq->head]; be_post_rx_frags()
2121 atomic_add(posted, &rxq->used); be_post_rx_frags()
2126 be_rxq_notify(adapter, rxq->id, notify); be_post_rx_frags()
2129 } else if (atomic_read(&rxq->used) == 0) { be_post_rx_frags()
2221 struct be_queue_info *rxq = &rxo->q; be_rx_cq_clean() local
2258 while (atomic_read(&rxq->used) > 0) { be_rx_cq_clean()
2263 BUG_ON(atomic_read(&rxq->used)); be_rx_cq_clean()
2264 rxq->tail = 0; be_rx_cq_clean()
2265 rxq->head = 0; be_rx_cq_clean()
H A Dbe_cmds.c1311 struct be_queue_info *rxq, u16 cq_id, u16 frag_size, be_cmd_rxq_create()
1316 struct be_dma_mem *q_mem = &rxq->dma_mem; be_cmd_rxq_create()
1343 rxq->id = le16_to_cpu(resp->id); be_cmd_rxq_create()
1344 rxq->created = true; be_cmd_rxq_create()
1310 be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq, u16 cq_id, u16 frag_size, u32 if_id, u32 rss, u8 *rss_id) be_cmd_rxq_create() argument
H A Dbe_ethtool.c440 sprintf(data, "rxq%d: %s", i, be_get_stat_strings()
H A Dbe_cmds.h2255 int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq,
/linux-4.1.27/include/linux/usb/
H A Dusbnet.h57 struct sk_buff_head rxq; member in struct:usbnet
/linux-4.1.27/net/core/
H A Dnet-sysfs.c1321 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0; register_queue_kobjects() local
1335 rxq = real_rx; register_queue_kobjects()
1346 net_rx_queue_update_kobjects(dev, rxq, 0); register_queue_kobjects()
H A Ddev.c2164 * @rxq: Actual number of RX queues
2171 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) netif_set_real_num_rx_queues() argument
2175 if (rxq < 1 || rxq > dev->num_rx_queues) netif_set_real_num_rx_queues()
2182 rxq); netif_set_real_num_rx_queues()
2187 dev->real_num_rx_queues = rxq; netif_set_real_num_rx_queues()
3529 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) ing_filter() argument
3545 q = rcu_dereference(rxq->qdisc); ing_filter()
3560 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue); handle_ing() local
3562 if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc) handle_ing()
3570 switch (ing_filter(skb, rxq)) { handle_ing()
/linux-4.1.27/drivers/net/wireless/ath/ath6kl/
H A Dhtc_mbox.c1913 struct list_head *rxq, ath6kl_htc_rx_bundle()
1922 n_scat_pkt = get_queue_depth(rxq); ath6kl_htc_rx_bundle()
1925 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) { ath6kl_htc_rx_bundle()
1938 __func__, get_queue_depth(rxq), n_scat_pkt); ath6kl_htc_rx_bundle()
1945 get_queue_depth(rxq), n_scat_pkt); ath6kl_htc_rx_bundle()
1955 packet = list_first_entry(rxq, struct htc_packet, list); ath6kl_htc_rx_bundle()
1962 list_add(&packet->list, rxq); ath6kl_htc_rx_bundle()
1912 ath6kl_htc_rx_bundle(struct htc_target *target, struct list_head *rxq, struct list_head *sync_compq, int *n_pkt_fetched, bool part_bundle) ath6kl_htc_rx_bundle() argument
/linux-4.1.27/drivers/infiniband/hw/cxgb3/
H A Diwch_cm.c107 static struct sk_buff_head rxq; variable in typeref:struct:sk_buff_head
2187 while ((skb = skb_dequeue(&rxq))) { process_work()
2218 skb_queue_tail(&rxq, skb); sched()
2259 skb_queue_head_init(&rxq); iwch_cm_init()
/linux-4.1.27/drivers/net/ethernet/intel/i40e/
H A Di40e_virtchnl.h208 struct i40e_virtchnl_rxq_info rxq; member in struct:i40e_virtchnl_queue_pair_info
H A Di40e_virtchnl_pf.c1281 (qpi->rxq.vsi_id != vsi_id) || i40e_vc_config_queues_msg()
1282 (qpi->rxq.queue_id != vsi_queue_id) || i40e_vc_config_queues_msg()
1289 &qpi->rxq) || i40e_vc_config_queues_msg()
/linux-4.1.27/drivers/net/xen-netback/
H A Dnetback.c502 struct sk_buff_head rxq; xenvif_rx_action() local
514 skb_queue_head_init(&rxq); xenvif_rx_action()
527 __skb_queue_tail(&rxq, skb); xenvif_rx_action()
538 while ((skb = __skb_dequeue(&rxq)) != NULL) { xenvif_rx_action()
/linux-4.1.27/drivers/net/ethernet/atheros/atl1c/
H A Datl1c_main.c1198 u32 mac, txq, rxq; atl1c_start_mac() local
1205 AT_READ_REG(hw, REG_RXQ_CTRL, &rxq); atl1c_start_mac()
1209 rxq |= RXQ_CTRL_EN; atl1c_start_mac()
1223 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq); atl1c_start_mac()
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
H A Dcm.c142 static struct sk_buff_head rxq; variable in typeref:struct:sk_buff_head
3876 while ((skb = skb_dequeue(&rxq))) { process_work()
3925 skb_queue_tail(&rxq, skb); sched()
4039 skb_queue_head_init(&rxq); c4iw_cm_init()
/linux-4.1.27/drivers/usb/gadget/function/
H A Du_ether.c418 /* fill unused rxq slots with some skb */ rx_fill()
/linux-4.1.27/drivers/scsi/cxgbi/cxgb4i/
H A Dcxgb4i.c1751 "t4 0x%p, rxq id #%d: %u.\n", t4_uld_add()
/linux-4.1.27/include/linux/
H A Dnetdevice.h2867 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
2870 unsigned int rxq) netif_set_real_num_rx_queues()
2869 netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) netif_set_real_num_rx_queues() argument
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb3/
H A Dsge.c375 * @rxq: the SGE free list to clean up

Completed in 3344 milliseconds