Lines Matching refs:lbq_desc

1033 	struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];  in ql_get_curr_lbuf()  local
1038 return lbq_desc; in ql_get_curr_lbuf()
1044 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); in ql_get_curr_lchunk() local
1047 dma_unmap_addr(lbq_desc, mapaddr), in ql_get_curr_lchunk()
1054 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) in ql_get_curr_lchunk()
1057 lbq_desc->p.pg_chunk.map, in ql_get_curr_lchunk()
1060 return lbq_desc; in ql_get_curr_lchunk()
1091 struct bq_desc *lbq_desc) in ql_get_next_chunk() argument
1122 lbq_desc->p.pg_chunk = rx_ring->pg_chunk; in ql_get_next_chunk()
1130 lbq_desc->p.pg_chunk.last_flag = 1; in ql_get_next_chunk()
1134 lbq_desc->p.pg_chunk.last_flag = 0; in ql_get_next_chunk()
1143 struct bq_desc *lbq_desc; in ql_update_lbq() local
1152 lbq_desc = &rx_ring->lbq[clean_idx]; in ql_update_lbq()
1153 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { in ql_update_lbq()
1161 map = lbq_desc->p.pg_chunk.map + in ql_update_lbq()
1162 lbq_desc->p.pg_chunk.offset; in ql_update_lbq()
1163 dma_unmap_addr_set(lbq_desc, mapaddr, map); in ql_update_lbq()
1164 dma_unmap_len_set(lbq_desc, maplen, in ql_update_lbq()
1166 *lbq_desc->addr = cpu_to_le64(map); in ql_update_lbq()
1499 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_process_mac_rx_gro_page() local
1505 put_page(lbq_desc->p.pg_chunk.page); in ql_process_mac_rx_gro_page()
1515 put_page(lbq_desc->p.pg_chunk.page); in ql_process_mac_rx_gro_page()
1518 prefetch(lbq_desc->p.pg_chunk.va); in ql_process_mac_rx_gro_page()
1520 lbq_desc->p.pg_chunk.page, in ql_process_mac_rx_gro_page()
1521 lbq_desc->p.pg_chunk.offset, in ql_process_mac_rx_gro_page()
1548 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_process_mac_rx_page() local
1555 put_page(lbq_desc->p.pg_chunk.page); in ql_process_mac_rx_page()
1559 addr = lbq_desc->p.pg_chunk.va; in ql_process_mac_rx_page()
1584 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, in ql_process_mac_rx_page()
1585 lbq_desc->p.pg_chunk.offset + hlen, in ql_process_mac_rx_page()
1628 put_page(lbq_desc->p.pg_chunk.page); in ql_process_mac_rx_page()
1753 struct bq_desc *lbq_desc; in ql_build_rx_skb() local
1845 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1848 lbq_desc->p.pg_chunk.offset, length); in ql_build_rx_skb()
1849 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, in ql_build_rx_skb()
1850 lbq_desc->p.pg_chunk.offset, in ql_build_rx_skb()
1861 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1869 dma_unmap_addr(lbq_desc, in ql_build_rx_skb()
1871 dma_unmap_len(lbq_desc, maplen), in ql_build_rx_skb()
1878 lbq_desc->p.pg_chunk.page, in ql_build_rx_skb()
1879 lbq_desc->p.pg_chunk.offset, in ql_build_rx_skb()
1886 lbq_desc->p.pg_chunk.va, in ql_build_rx_skb()
1926 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1934 lbq_desc->p.pg_chunk.page, in ql_build_rx_skb()
1935 lbq_desc->p.pg_chunk.offset, in ql_build_rx_skb()
1943 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va, in ql_build_rx_skb()
2821 struct bq_desc *lbq_desc; in ql_free_lbq_buffers() local
2828 lbq_desc = &rx_ring->lbq[curr_idx]; in ql_free_lbq_buffers()
2830 if (lbq_desc->p.pg_chunk.last_flag) { in ql_free_lbq_buffers()
2832 lbq_desc->p.pg_chunk.map, in ql_free_lbq_buffers()
2835 lbq_desc->p.pg_chunk.last_flag = 0; in ql_free_lbq_buffers()
2838 put_page(lbq_desc->p.pg_chunk.page); in ql_free_lbq_buffers()
2839 lbq_desc->p.pg_chunk.page = NULL; in ql_free_lbq_buffers()
2909 struct bq_desc *lbq_desc; in ql_init_lbq_ring() local
2914 lbq_desc = &rx_ring->lbq[i]; in ql_init_lbq_ring()
2915 memset(lbq_desc, 0, sizeof(*lbq_desc)); in ql_init_lbq_ring()
2916 lbq_desc->index = i; in ql_init_lbq_ring()
2917 lbq_desc->addr = bq; in ql_init_lbq_ring()