Lines Matching refs:q_depth
121 u16 q_depth; member
373 if (tag >= nvmeq->q_depth) { in nvme_finish_cmd()
402 if (++tail == nvmeq->q_depth) in __nvme_submit_cmd()
961 if (++head == nvmeq->q_depth) { in __nvme_process_cq()
1163 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
1184 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq()
1394 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), in nvme_free_queue()
1397 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), in nvme_free_queue()
1473 int q_depth = dev->q_depth; in nvme_cmb_qdepth() local
1474 unsigned q_size_aligned = roundup(q_depth * entry_size, dev->page_size); in nvme_cmb_qdepth()
1479 q_depth = div_u64(mem_per_q, entry_size); in nvme_cmb_qdepth()
1486 if (q_depth < 64) in nvme_cmb_qdepth()
1490 return q_depth; in nvme_cmb_qdepth()
1534 nvmeq->q_depth = depth; in nvme_alloc_queue()
1573 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); in nvme_init_queue()
1773 aqa = nvmeq->q_depth - 1; in nvme_configure_admin_queue()
2342 if (!nvme_alloc_queue(dev, i, dev->q_depth)) in nvme_create_io_queues()
2443 dev->q_depth = result; in nvme_setup_io_queues()
2663 min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; in nvme_dev_add()
2717 dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); in nvme_dev_map()
2726 dev->q_depth = 2; in nvme_dev_map()
2729 dev->q_depth); in nvme_dev_map()