Lines Matching refs:iod
159 struct nvme_iod iod[0]; member
262 static void *iod_get_private(struct nvme_iod *iod) in iod_get_private() argument
264 return (void *) (iod->private & ~0x1UL); in iod_get_private()
270 static bool iod_should_kfree(struct nvme_iod *iod) in iod_should_kfree() argument
272 return (iod->private & NVME_INT_MASK) == 0; in iod_should_kfree()
416 static __le64 **iod_list(struct nvme_iod *iod) in iod_list() argument
418 return ((void *)iod) + iod->offset; in iod_list()
421 static inline void iod_init(struct nvme_iod *iod, unsigned nbytes, in iod_init() argument
424 iod->private = private; in iod_init()
425 iod->offset = offsetof(struct nvme_iod, sg[nseg]); in iod_init()
426 iod->npages = -1; in iod_init()
427 iod->length = nbytes; in iod_init()
428 iod->nents = 0; in iod_init()
435 struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) + in __nvme_alloc_iod() local
439 if (iod) in __nvme_alloc_iod()
440 iod_init(iod, bytes, nseg, priv); in __nvme_alloc_iod()
442 return iod; in __nvme_alloc_iod()
450 struct nvme_iod *iod; in nvme_alloc_iod() local
456 iod = cmd->iod; in nvme_alloc_iod()
457 iod_init(iod, size, rq->nr_phys_segments, in nvme_alloc_iod()
459 return iod; in nvme_alloc_iod()
466 static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) in nvme_free_iod() argument
470 __le64 **list = iod_list(iod); in nvme_free_iod()
471 dma_addr_t prp_dma = iod->first_dma; in nvme_free_iod()
473 if (iod->npages == 0) in nvme_free_iod()
475 for (i = 0; i < iod->npages; i++) { in nvme_free_iod()
482 if (iod_should_kfree(iod)) in nvme_free_iod()
483 kfree(iod); in nvme_free_iod()
592 struct nvme_iod *iod = ctx; in req_completion() local
593 struct request *req = iod_get_private(iod); in req_completion()
634 if (iod->nents) { in req_completion()
635 dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents, in req_completion()
640 dma_unmap_sg(nvmeq->dev->dev, iod->meta_sg, 1, in req_completion()
644 nvme_free_iod(nvmeq->dev, iod); in req_completion()
651 static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, in nvme_setup_prps() argument
656 struct scatterlist *sg = iod->sg; in nvme_setup_prps()
662 __le64 **list = iod_list(iod); in nvme_setup_prps()
680 iod->first_dma = dma_addr; in nvme_setup_prps()
687 iod->npages = 0; in nvme_setup_prps()
690 iod->npages = 1; in nvme_setup_prps()
695 iod->first_dma = dma_addr; in nvme_setup_prps()
696 iod->npages = -1; in nvme_setup_prps()
700 iod->first_dma = prp_dma; in nvme_setup_prps()
708 list[iod->npages++] = prp_list; in nvme_setup_prps()
731 struct nvme_iod *iod) in nvme_submit_priv() argument
738 cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); in nvme_submit_priv()
739 cmnd.rw.prp2 = cpu_to_le64(iod->first_dma); in nvme_submit_priv()
751 struct request *req, struct nvme_iod *iod) in nvme_submit_discard() argument
754 (struct nvme_dsm_range *)iod_list(iod)[0]; in nvme_submit_discard()
765 cmnd.dsm.prp1 = cpu_to_le64(iod->first_dma); in nvme_submit_discard()
785 static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod, in nvme_submit_iod() argument
788 struct request *req = iod_get_private(iod); in nvme_submit_iod()
805 cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); in nvme_submit_iod()
806 cmnd.rw.prp2 = cpu_to_le64(iod->first_dma); in nvme_submit_iod()
825 cpu_to_le64(sg_dma_address(iod->meta_sg)); in nvme_submit_iod()
849 struct nvme_iod *iod; in nvme_queue_rq() local
865 iod = nvme_alloc_iod(req, dev, GFP_ATOMIC); in nvme_queue_rq()
866 if (!iod) in nvme_queue_rq()
877 &iod->first_dma); in nvme_queue_rq()
880 iod_list(iod)[0] = (__le64 *)range; in nvme_queue_rq() local
881 iod->npages = 0; in nvme_queue_rq()
885 sg_init_table(iod->sg, req->nr_phys_segments); in nvme_queue_rq()
886 iod->nents = blk_rq_map_sg(req->q, req, iod->sg); in nvme_queue_rq()
887 if (!iod->nents) in nvme_queue_rq()
890 if (!dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir)) in nvme_queue_rq()
894 nvme_setup_prps(dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) { in nvme_queue_rq()
895 dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); in nvme_queue_rq()
900 dma_unmap_sg(dev->dev, iod->sg, iod->nents, in nvme_queue_rq()
905 sg_init_table(iod->meta_sg, 1); in nvme_queue_rq()
907 req->q, req->bio, iod->meta_sg) != 1) { in nvme_queue_rq()
908 dma_unmap_sg(dev->dev, iod->sg, iod->nents, in nvme_queue_rq()
916 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) { in nvme_queue_rq()
917 dma_unmap_sg(dev->dev, iod->sg, iod->nents, in nvme_queue_rq()
924 nvme_set_info(cmd, iod, req_completion); in nvme_queue_rq()
927 nvme_submit_priv(nvmeq, req, iod); in nvme_queue_rq()
929 nvme_submit_discard(nvmeq, ns, req, iod); in nvme_queue_rq()
933 nvme_submit_iod(nvmeq, iod, ns); in nvme_queue_rq()
940 nvme_free_iod(dev, iod); in nvme_queue_rq()
943 nvme_free_iod(dev, iod); in nvme_queue_rq()