Lines Matching refs:rxo
546 struct be_rx_obj *rxo, u32 erx_stat) in populate_erx_stats() argument
549 rx_stats(rxo)->rx_drops_no_frags = erx_stat; in populate_erx_stats()
554 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags, in populate_erx_stats()
561 struct be_rx_obj *rxo; in be_parse_stats() local
577 for_all_rx_queues(adapter, rxo, i) { in be_parse_stats()
578 erx_stat = erx->rx_drops_no_fragments[rxo->q.id]; in be_parse_stats()
579 populate_erx_stats(adapter, rxo, erx_stat); in be_parse_stats()
589 struct be_rx_obj *rxo; in be_get_stats64() local
595 for_all_rx_queues(adapter, rxo, i) { in be_get_stats64()
596 const struct be_rx_stats *rx_stats = rx_stats(rxo); in be_get_stats64()
600 pkts = rx_stats(rxo)->rx_pkts; in be_get_stats64()
601 bytes = rx_stats(rxo)->rx_bytes; in be_get_stats64()
605 stats->multicast += rx_stats(rxo)->rx_mcast_pkts; in be_get_stats64()
606 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs + in be_get_stats64()
607 rx_stats(rxo)->rx_drops_no_frags; in be_get_stats64()
1659 struct be_rx_obj *rxo; in be_eqd_update() local
1674 rxo = &adapter->rx_obj[eqo->idx]; in be_eqd_update()
1676 start = u64_stats_fetch_begin_irq(&rxo->stats.sync); in be_eqd_update()
1677 rx_pkts = rxo->stats.rx_pkts; in be_eqd_update()
1678 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start)); in be_eqd_update()
1719 static void be_rx_stats_update(struct be_rx_obj *rxo, in be_rx_stats_update() argument
1722 struct be_rx_stats *stats = rx_stats(rxo); in be_rx_stats_update()
1744 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo) in get_rx_page_info() argument
1746 struct be_adapter *adapter = rxo->adapter; in get_rx_page_info()
1748 struct be_queue_info *rxq = &rxo->q; in get_rx_page_info()
1751 rx_page_info = &rxo->page_info_tbl[frag_idx]; in get_rx_page_info()
1771 static void be_rx_compl_discard(struct be_rx_obj *rxo, in be_rx_compl_discard() argument
1778 page_info = get_rx_page_info(rxo); in be_rx_compl_discard()
1788 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, in skb_fill_rx_data() argument
1796 page_info = get_rx_page_info(rxo); in skb_fill_rx_data()
1833 page_info = get_rx_page_info(rxo); in skb_fill_rx_data()
1860 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi, in be_rx_compl_process() argument
1863 struct be_adapter *adapter = rxo->adapter; in be_rx_compl_process()
1869 rx_stats(rxo)->rx_drops_no_skbs++; in be_rx_compl_process()
1870 be_rx_compl_discard(rxo, rxcp); in be_rx_compl_process()
1874 skb_fill_rx_data(rxo, skb, rxcp); in be_rx_compl_process()
1882 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]); in be_rx_compl_process()
1896 static void be_rx_compl_process_gro(struct be_rx_obj *rxo, in be_rx_compl_process_gro() argument
1900 struct be_adapter *adapter = rxo->adapter; in be_rx_compl_process_gro()
1908 be_rx_compl_discard(rxo, rxcp); in be_rx_compl_process_gro()
1914 page_info = get_rx_page_info(rxo); in be_rx_compl_process_gro()
1940 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]); in be_rx_compl_process_gro()
1998 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) in be_rx_compl_get() argument
2000 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq); in be_rx_compl_get()
2001 struct be_rx_compl_info *rxcp = &rxo->rxcp; in be_rx_compl_get()
2002 struct be_adapter *adapter = rxo->adapter; in be_rx_compl_get()
2039 queue_tail_inc(&rxo->cq); in be_rx_compl_get()
2056 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed) in be_post_rx_frags() argument
2058 struct be_adapter *adapter = rxo->adapter; in be_post_rx_frags()
2060 struct be_queue_info *rxq = &rxo->q; in be_post_rx_frags()
2067 page_info = &rxo->page_info_tbl[rxq->head]; in be_post_rx_frags()
2072 rx_stats(rxo)->rx_post_fail++; in be_post_rx_frags()
2109 page_info = &rxo->page_info_tbl[rxq->head]; in be_post_rx_frags()
2122 if (rxo->rx_post_starved) in be_post_rx_frags()
2123 rxo->rx_post_starved = false; in be_post_rx_frags()
2131 rxo->rx_post_starved = true; in be_post_rx_frags()
2218 static void be_rx_cq_clean(struct be_rx_obj *rxo) in be_rx_cq_clean() argument
2221 struct be_queue_info *rxq = &rxo->q; in be_rx_cq_clean()
2222 struct be_queue_info *rx_cq = &rxo->cq; in be_rx_cq_clean()
2224 struct be_adapter *adapter = rxo->adapter; in be_rx_cq_clean()
2234 rxcp = be_rx_compl_get(rxo); in be_rx_cq_clean()
2247 be_rx_compl_discard(rxo, rxcp); in be_rx_cq_clean()
2259 page_info = get_rx_page_info(rxo); in be_rx_cq_clean()
2503 struct be_rx_obj *rxo; in be_rx_cqs_destroy() local
2506 for_all_rx_queues(adapter, rxo, i) { in be_rx_cqs_destroy()
2507 q = &rxo->cq; in be_rx_cqs_destroy()
2517 struct be_rx_obj *rxo; in be_rx_cqs_create() local
2536 for_all_rx_queues(adapter, rxo, i) { in be_rx_cqs_create()
2537 rxo->adapter = adapter; in be_rx_cqs_create()
2538 cq = &rxo->cq; in be_rx_cqs_create()
2544 u64_stats_init(&rxo->stats.sync); in be_rx_cqs_create()
2602 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, in be_process_rx() argument
2605 struct be_adapter *adapter = rxo->adapter; in be_process_rx()
2606 struct be_queue_info *rx_cq = &rxo->cq; in be_process_rx()
2612 rxcp = be_rx_compl_get(rxo); in be_process_rx()
2622 be_rx_compl_discard(rxo, rxcp); in be_process_rx()
2631 be_rx_compl_discard(rxo, rxcp); in be_process_rx()
2637 be_rx_compl_process_gro(rxo, napi, rxcp); in be_process_rx()
2639 be_rx_compl_process(rxo, napi, rxcp); in be_process_rx()
2643 be_rx_stats_update(rxo, rxcp); in be_process_rx()
2652 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM && in be_process_rx()
2653 !rxo->rx_post_starved) in be_process_rx()
2654 be_post_rx_frags(rxo, GFP_ATOMIC, in be_process_rx()
2839 struct be_rx_obj *rxo; in be_poll() local
2852 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { in be_poll()
2853 work = be_process_rx(rxo, napi, budget, NAPI_POLLING); in be_poll()
2879 struct be_rx_obj *rxo; in be_busy_poll() local
2885 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { in be_busy_poll()
2886 work = be_process_rx(rxo, napi, 4, BUSY_POLLING); in be_busy_poll()
3112 struct be_rx_obj *rxo; in be_rx_qs_destroy() local
3115 for_all_rx_queues(adapter, rxo, i) { in be_rx_qs_destroy()
3116 q = &rxo->q; in be_rx_qs_destroy()
3119 be_rx_cq_clean(rxo); in be_rx_qs_destroy()
3175 struct be_rx_obj *rxo; in be_rx_qs_create() local
3178 for_all_rx_queues(adapter, rxo, i) { in be_rx_qs_create()
3179 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN, in be_rx_qs_create()
3186 rxo = default_rxo(adapter); in be_rx_qs_create()
3187 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, in be_rx_qs_create()
3189 false, &rxo->rss_id); in be_rx_qs_create()
3194 for_all_rss_queues(adapter, rxo, i) { in be_rx_qs_create()
3195 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, in be_rx_qs_create()
3197 true, &rxo->rss_id); in be_rx_qs_create()
3204 for_all_rss_queues(adapter, rxo, i) { in be_rx_qs_create()
3207 rss->rsstable[j + i] = rxo->rss_id; in be_rx_qs_create()
3233 for_all_rx_queues(adapter, rxo, i) in be_rx_qs_create()
3234 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); in be_rx_qs_create()
3242 struct be_rx_obj *rxo; in be_open() local
3255 for_all_rx_queues(adapter, rxo, i) in be_open()
3256 be_cq_notify(adapter, rxo->cq.id, true, 0); in be_open()
5152 struct be_rx_obj *rxo; in be_worker() local
5177 for_all_rx_queues(adapter, rxo, i) { in be_worker()
5181 if (rxo->rx_post_starved) in be_worker()
5182 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); in be_worker()