Lines Matching refs:nvmeq
83 static int nvme_process_cq(struct nvme_queue *nvmeq);
146 struct nvme_queue *nvmeq; member
183 struct nvme_queue *nvmeq = dev->queues[0]; in nvme_admin_init_hctx() local
185 WARN_ON(nvmeq->hctx); in nvme_admin_init_hctx()
186 nvmeq->hctx = hctx; in nvme_admin_init_hctx()
187 hctx->driver_data = nvmeq; in nvme_admin_init_hctx()
197 struct nvme_queue *nvmeq = dev->queues[0]; in nvme_admin_init_request() local
199 BUG_ON(!nvmeq); in nvme_admin_init_request()
200 cmd->nvmeq = nvmeq; in nvme_admin_init_request()
206 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_exit_hctx() local
208 nvmeq->hctx = NULL; in nvme_exit_hctx()
215 struct nvme_queue *nvmeq = dev->queues[ in nvme_init_hctx() local
218 if (!nvmeq->hctx) in nvme_init_hctx()
219 nvmeq->hctx = hctx; in nvme_init_hctx()
223 WARN_ON(nvmeq->hctx->tags != hctx->tags); in nvme_init_hctx()
225 hctx->driver_data = nvmeq; in nvme_init_hctx()
235 struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1]; in nvme_init_request() local
237 BUG_ON(!nvmeq); in nvme_init_request()
238 cmd->nvmeq = nvmeq; in nvme_init_request()
270 static void special_completion(struct nvme_queue *nvmeq, void *ctx, in special_completion() argument
276 dev_warn(nvmeq->q_dmadev, in special_completion()
282 dev_warn(nvmeq->q_dmadev, in special_completion()
287 dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx); in special_completion()
302 static void async_req_completion(struct nvme_queue *nvmeq, void *ctx, in async_req_completion() argument
309 ++nvmeq->dev->event_limit; in async_req_completion()
311 dev_warn(nvmeq->q_dmadev, in async_req_completion()
315 static void abort_completion(struct nvme_queue *nvmeq, void *ctx, in abort_completion() argument
323 blk_mq_free_hctx_request(nvmeq->hctx, req); in abort_completion()
325 dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result); in abort_completion()
326 ++nvmeq->dev->abort_limit; in abort_completion()
329 static void async_completion(struct nvme_queue *nvmeq, void *ctx, in async_completion() argument
336 blk_mq_free_hctx_request(nvmeq->hctx, cmdinfo->req); in async_completion()
339 static inline struct nvme_cmd_info *get_cmd_from_tag(struct nvme_queue *nvmeq, in get_cmd_from_tag() argument
342 struct blk_mq_hw_ctx *hctx = nvmeq->hctx; in get_cmd_from_tag()
351 static void *nvme_finish_cmd(struct nvme_queue *nvmeq, int tag, in nvme_finish_cmd() argument
354 struct nvme_cmd_info *cmd = get_cmd_from_tag(nvmeq, tag); in nvme_finish_cmd()
356 if (tag >= nvmeq->q_depth) { in nvme_finish_cmd()
375 static int __nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) in __nvme_submit_cmd() argument
377 u16 tail = nvmeq->sq_tail; in __nvme_submit_cmd()
379 memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); in __nvme_submit_cmd()
380 if (++tail == nvmeq->q_depth) in __nvme_submit_cmd()
382 writel(tail, nvmeq->q_db); in __nvme_submit_cmd()
383 nvmeq->sq_tail = tail; in __nvme_submit_cmd()
388 static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) in nvme_submit_cmd() argument
392 spin_lock_irqsave(&nvmeq->q_lock, flags); in nvme_submit_cmd()
393 ret = __nvme_submit_cmd(nvmeq, cmd); in nvme_submit_cmd()
394 spin_unlock_irqrestore(&nvmeq->q_lock, flags); in nvme_submit_cmd()
587 static void req_completion(struct nvme_queue *nvmeq, void *ctx, in req_completion() argument
615 dev_warn(&nvmeq->dev->pci_dev->dev, in req_completion()
620 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents, in req_completion()
625 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->meta_sg, 1, in req_completion()
629 nvme_free_iod(nvmeq->dev, iod); in req_completion()
720 static void nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, in nvme_submit_discard() argument
725 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; in nvme_submit_discard()
739 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_submit_discard()
740 nvmeq->sq_tail = 0; in nvme_submit_discard()
741 writel(nvmeq->sq_tail, nvmeq->q_db); in nvme_submit_discard()
744 static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, in nvme_submit_flush() argument
747 struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; in nvme_submit_flush()
754 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_submit_flush()
755 nvmeq->sq_tail = 0; in nvme_submit_flush()
756 writel(nvmeq->sq_tail, nvmeq->q_db); in nvme_submit_flush()
759 static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod, in nvme_submit_iod() argument
775 cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; in nvme_submit_iod()
806 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_submit_iod()
807 nvmeq->sq_tail = 0; in nvme_submit_iod()
808 writel(nvmeq->sq_tail, nvmeq->q_db); in nvme_submit_iod()
817 struct nvme_queue *nvmeq = hctx->driver_data; in nvme_queue_rq() local
847 range = dma_pool_alloc(nvmeq->dev->prp_small_pool, in nvme_queue_rq()
862 if (!dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir)) in nvme_queue_rq()
866 nvme_setup_prps(nvmeq->dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) { in nvme_queue_rq()
867 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, in nvme_queue_rq()
883 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) in nvme_queue_rq()
889 spin_lock_irq(&nvmeq->q_lock); in nvme_queue_rq()
891 nvme_submit_discard(nvmeq, ns, req, iod); in nvme_queue_rq()
893 nvme_submit_flush(nvmeq, ns, req->tag); in nvme_queue_rq()
895 nvme_submit_iod(nvmeq, iod, ns); in nvme_queue_rq()
897 nvme_process_cq(nvmeq); in nvme_queue_rq()
898 spin_unlock_irq(&nvmeq->q_lock); in nvme_queue_rq()
902 nvme_free_iod(nvmeq->dev, iod); in nvme_queue_rq()
905 nvme_free_iod(nvmeq->dev, iod); in nvme_queue_rq()
909 static int nvme_process_cq(struct nvme_queue *nvmeq) in nvme_process_cq() argument
913 head = nvmeq->cq_head; in nvme_process_cq()
914 phase = nvmeq->cq_phase; in nvme_process_cq()
919 struct nvme_completion cqe = nvmeq->cqes[head]; in nvme_process_cq()
922 nvmeq->sq_head = le16_to_cpu(cqe.sq_head); in nvme_process_cq()
923 if (++head == nvmeq->q_depth) { in nvme_process_cq()
927 ctx = nvme_finish_cmd(nvmeq, cqe.command_id, &fn); in nvme_process_cq()
928 fn(nvmeq, ctx, &cqe); in nvme_process_cq()
937 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) in nvme_process_cq()
940 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); in nvme_process_cq()
941 nvmeq->cq_head = head; in nvme_process_cq()
942 nvmeq->cq_phase = phase; in nvme_process_cq()
944 nvmeq->cqe_seen = 1; in nvme_process_cq()
960 struct nvme_queue *nvmeq = data; in nvme_irq() local
961 spin_lock(&nvmeq->q_lock); in nvme_irq()
962 nvme_process_cq(nvmeq); in nvme_irq()
963 result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE; in nvme_irq()
964 nvmeq->cqe_seen = 0; in nvme_irq()
965 spin_unlock(&nvmeq->q_lock); in nvme_irq()
971 struct nvme_queue *nvmeq = data; in nvme_irq_check() local
972 struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head]; in nvme_irq_check()
973 if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase) in nvme_irq_check()
984 static void sync_completion(struct nvme_queue *nvmeq, void *ctx, in sync_completion() argument
1002 struct nvme_queue *nvmeq = cmd_rq->nvmeq; in nvme_submit_sync_cmd() local
1012 nvme_submit_cmd(nvmeq, cmd); in nvme_submit_sync_cmd()
1022 struct nvme_queue *nvmeq = dev->queues[0]; in nvme_submit_async_admin_req() local
1039 blk_mq_free_hctx_request(nvmeq->hctx, req); in nvme_submit_async_admin_req()
1040 return __nvme_submit_cmd(nvmeq, &c); in nvme_submit_async_admin_req()
1047 struct nvme_queue *nvmeq = dev->queues[0]; in nvme_submit_admin_async_cmd() local
1063 return nvme_submit_cmd(nvmeq, cmd); in nvme_submit_admin_async_cmd()
1113 struct nvme_queue *nvmeq) in adapter_alloc_cq() argument
1120 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); in adapter_alloc_cq()
1122 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq()
1124 c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); in adapter_alloc_cq()
1130 struct nvme_queue *nvmeq) in adapter_alloc_sq() argument
1137 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); in adapter_alloc_sq()
1139 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq()
1207 struct nvme_queue *nvmeq = cmd_rq->nvmeq; in nvme_abort_req() local
1208 struct nvme_dev *dev = nvmeq->dev; in nvme_abort_req()
1213 if (!nvmeq->qid || cmd_rq->aborted) { in nvme_abort_req()
1222 req->tag, nvmeq->qid); in nvme_abort_req()
1244 cmd.abort.sqid = cpu_to_le16(nvmeq->qid); in nvme_abort_req()
1250 dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag, in nvme_abort_req()
1251 nvmeq->qid); in nvme_abort_req()
1253 dev_warn(nvmeq->q_dmadev, in nvme_abort_req()
1255 req->tag, nvmeq->qid); in nvme_abort_req()
1263 struct nvme_queue *nvmeq = data; in nvme_cancel_queue_ios() local
1283 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", in nvme_cancel_queue_ios()
1284 req->tag, nvmeq->qid); in nvme_cancel_queue_ios()
1286 fn(nvmeq, ctx, &cqe); in nvme_cancel_queue_ios()
1292 struct nvme_queue *nvmeq = cmd->nvmeq; in nvme_timeout() local
1294 dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, in nvme_timeout()
1295 nvmeq->qid); in nvme_timeout()
1296 spin_lock_irq(&nvmeq->q_lock); in nvme_timeout()
1298 spin_unlock_irq(&nvmeq->q_lock); in nvme_timeout()
1308 static void nvme_free_queue(struct nvme_queue *nvmeq) in nvme_free_queue() argument
1310 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), in nvme_free_queue()
1311 (void *)nvmeq->cqes, nvmeq->cq_dma_addr); in nvme_free_queue()
1312 dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), in nvme_free_queue()
1313 nvmeq->sq_cmds, nvmeq->sq_dma_addr); in nvme_free_queue()
1314 kfree(nvmeq); in nvme_free_queue()
1322 struct nvme_queue *nvmeq = dev->queues[i]; in nvme_free_queues() local
1325 nvme_free_queue(nvmeq); in nvme_free_queues()
1333 static int nvme_suspend_queue(struct nvme_queue *nvmeq) in nvme_suspend_queue() argument
1337 spin_lock_irq(&nvmeq->q_lock); in nvme_suspend_queue()
1338 if (nvmeq->cq_vector == -1) { in nvme_suspend_queue()
1339 spin_unlock_irq(&nvmeq->q_lock); in nvme_suspend_queue()
1342 vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; in nvme_suspend_queue()
1343 nvmeq->dev->online_queues--; in nvme_suspend_queue()
1344 nvmeq->cq_vector = -1; in nvme_suspend_queue()
1345 spin_unlock_irq(&nvmeq->q_lock); in nvme_suspend_queue()
1347 if (!nvmeq->qid && nvmeq->dev->admin_q) in nvme_suspend_queue()
1348 blk_mq_freeze_queue_start(nvmeq->dev->admin_q); in nvme_suspend_queue()
1351 free_irq(vector, nvmeq); in nvme_suspend_queue()
1356 static void nvme_clear_queue(struct nvme_queue *nvmeq) in nvme_clear_queue() argument
1358 struct blk_mq_hw_ctx *hctx = nvmeq->hctx; in nvme_clear_queue()
1360 spin_lock_irq(&nvmeq->q_lock); in nvme_clear_queue()
1362 blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq); in nvme_clear_queue()
1363 spin_unlock_irq(&nvmeq->q_lock); in nvme_clear_queue()
1368 struct nvme_queue *nvmeq = dev->queues[qid]; in nvme_disable_queue() local
1370 if (!nvmeq) in nvme_disable_queue()
1372 if (nvme_suspend_queue(nvmeq)) in nvme_disable_queue()
1382 spin_lock_irq(&nvmeq->q_lock); in nvme_disable_queue()
1383 nvme_process_cq(nvmeq); in nvme_disable_queue()
1384 spin_unlock_irq(&nvmeq->q_lock); in nvme_disable_queue()
1391 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); in nvme_alloc_queue() local
1392 if (!nvmeq) in nvme_alloc_queue()
1395 nvmeq->cqes = dma_zalloc_coherent(dmadev, CQ_SIZE(depth), in nvme_alloc_queue()
1396 &nvmeq->cq_dma_addr, GFP_KERNEL); in nvme_alloc_queue()
1397 if (!nvmeq->cqes) in nvme_alloc_queue()
1400 nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth), in nvme_alloc_queue()
1401 &nvmeq->sq_dma_addr, GFP_KERNEL); in nvme_alloc_queue()
1402 if (!nvmeq->sq_cmds) in nvme_alloc_queue()
1405 nvmeq->q_dmadev = dmadev; in nvme_alloc_queue()
1406 nvmeq->dev = dev; in nvme_alloc_queue()
1407 snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d", in nvme_alloc_queue()
1409 spin_lock_init(&nvmeq->q_lock); in nvme_alloc_queue()
1410 nvmeq->cq_head = 0; in nvme_alloc_queue()
1411 nvmeq->cq_phase = 1; in nvme_alloc_queue()
1412 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_alloc_queue()
1413 nvmeq->q_depth = depth; in nvme_alloc_queue()
1414 nvmeq->qid = qid; in nvme_alloc_queue()
1416 dev->queues[qid] = nvmeq; in nvme_alloc_queue()
1418 return nvmeq; in nvme_alloc_queue()
1421 dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes, in nvme_alloc_queue()
1422 nvmeq->cq_dma_addr); in nvme_alloc_queue()
1424 kfree(nvmeq); in nvme_alloc_queue()
1428 static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq, in queue_request_irq() argument
1432 return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector, in queue_request_irq()
1434 name, nvmeq); in queue_request_irq()
1435 return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq, in queue_request_irq()
1436 IRQF_SHARED, name, nvmeq); in queue_request_irq()
1439 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) in nvme_init_queue() argument
1441 struct nvme_dev *dev = nvmeq->dev; in nvme_init_queue()
1443 spin_lock_irq(&nvmeq->q_lock); in nvme_init_queue()
1444 nvmeq->sq_tail = 0; in nvme_init_queue()
1445 nvmeq->cq_head = 0; in nvme_init_queue()
1446 nvmeq->cq_phase = 1; in nvme_init_queue()
1447 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; in nvme_init_queue()
1448 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); in nvme_init_queue()
1450 spin_unlock_irq(&nvmeq->q_lock); in nvme_init_queue()
1453 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) in nvme_create_queue() argument
1455 struct nvme_dev *dev = nvmeq->dev; in nvme_create_queue()
1458 nvmeq->cq_vector = qid - 1; in nvme_create_queue()
1459 result = adapter_alloc_cq(dev, qid, nvmeq); in nvme_create_queue()
1463 result = adapter_alloc_sq(dev, qid, nvmeq); in nvme_create_queue()
1467 result = queue_request_irq(dev, nvmeq, nvmeq->irqname); in nvme_create_queue()
1471 nvme_init_queue(nvmeq, qid); in nvme_create_queue()
1613 struct nvme_queue *nvmeq; in nvme_configure_admin_queue() local
1637 nvmeq = dev->queues[0]; in nvme_configure_admin_queue()
1638 if (!nvmeq) { in nvme_configure_admin_queue()
1639 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); in nvme_configure_admin_queue()
1640 if (!nvmeq) in nvme_configure_admin_queue()
1644 aqa = nvmeq->q_depth - 1; in nvme_configure_admin_queue()
1655 writeq(nvmeq->sq_dma_addr, &dev->bar->asq); in nvme_configure_admin_queue()
1656 writeq(nvmeq->cq_dma_addr, &dev->bar->acq); in nvme_configure_admin_queue()
1662 nvmeq->cq_vector = 0; in nvme_configure_admin_queue()
1663 result = queue_request_irq(dev, nvmeq, nvmeq->irqname); in nvme_configure_admin_queue()
2087 struct nvme_queue *nvmeq = dev->queues[i]; in nvme_kthread() local
2088 if (!nvmeq) in nvme_kthread()
2090 spin_lock_irq(&nvmeq->q_lock); in nvme_kthread()
2091 nvme_process_cq(nvmeq); in nvme_kthread()
2098 spin_unlock_irq(&nvmeq->q_lock); in nvme_kthread()
2474 static void nvme_del_queue_end(struct nvme_queue *nvmeq) in nvme_del_queue_end() argument
2476 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; in nvme_del_queue_end()
2480 static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, in adapter_async_del_queue() argument
2487 c.delete_queue.qid = cpu_to_le16(nvmeq->qid); in adapter_async_del_queue()
2489 init_kthread_work(&nvmeq->cmdinfo.work, fn); in adapter_async_del_queue()
2490 return nvme_submit_admin_async_cmd(nvmeq->dev, &c, &nvmeq->cmdinfo, in adapter_async_del_queue()
2496 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, in nvme_del_cq_work_handler() local
2498 nvme_del_queue_end(nvmeq); in nvme_del_cq_work_handler()
2501 static int nvme_delete_cq(struct nvme_queue *nvmeq) in nvme_delete_cq() argument
2503 return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq, in nvme_delete_cq()
2509 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, in nvme_del_sq_work_handler() local
2511 int status = nvmeq->cmdinfo.status; in nvme_del_sq_work_handler()
2514 status = nvme_delete_cq(nvmeq); in nvme_del_sq_work_handler()
2516 nvme_del_queue_end(nvmeq); in nvme_del_sq_work_handler()
2519 static int nvme_delete_sq(struct nvme_queue *nvmeq) in nvme_delete_sq() argument
2521 return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq, in nvme_delete_sq()
2527 struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, in nvme_del_queue_start() local
2529 if (nvme_delete_sq(nvmeq)) in nvme_del_queue_start()
2530 nvme_del_queue_end(nvmeq); in nvme_del_queue_start()
2553 struct nvme_queue *nvmeq = dev->queues[i]; in nvme_disable_io_queues() local
2555 if (nvme_suspend_queue(nvmeq)) in nvme_disable_io_queues()
2557 nvmeq->cmdinfo.ctx = nvme_get_dq(&dq); in nvme_disable_io_queues()
2558 nvmeq->cmdinfo.worker = dq.worker; in nvme_disable_io_queues()
2559 init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start); in nvme_disable_io_queues()
2560 queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work); in nvme_disable_io_queues()
2627 struct nvme_queue *nvmeq = dev->queues[i]; in nvme_dev_shutdown() local
2628 nvme_suspend_queue(nvmeq); in nvme_dev_shutdown()
2802 struct nvme_queue *nvmeq; in nvme_set_irq_hints() local
2806 nvmeq = dev->queues[i]; in nvme_set_irq_hints()
2808 if (!nvmeq->hctx) in nvme_set_irq_hints()
2811 irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, in nvme_set_irq_hints()
2812 nvmeq->hctx->cpumask); in nvme_set_irq_hints()