Lines Matching refs:iod

147 	struct nvme_iod iod[0];  member
251 static void *iod_get_private(struct nvme_iod *iod) in iod_get_private() argument
253 return (void *) (iod->private & ~0x1UL); in iod_get_private()
259 static bool iod_should_kfree(struct nvme_iod *iod) in iod_should_kfree() argument
261 return (iod->private & NVME_INT_MASK) == 0; in iod_should_kfree()
398 static __le64 **iod_list(struct nvme_iod *iod) in iod_list() argument
400 return ((void *)iod) + iod->offset; in iod_list()
403 static inline void iod_init(struct nvme_iod *iod, unsigned nbytes, in iod_init() argument
406 iod->private = private; in iod_init()
407 iod->offset = offsetof(struct nvme_iod, sg[nseg]); in iod_init()
408 iod->npages = -1; in iod_init()
409 iod->length = nbytes; in iod_init()
410 iod->nents = 0; in iod_init()
417 struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) + in __nvme_alloc_iod() local
421 if (iod) in __nvme_alloc_iod()
422 iod_init(iod, bytes, nseg, priv); in __nvme_alloc_iod()
424 return iod; in __nvme_alloc_iod()
432 struct nvme_iod *iod; in nvme_alloc_iod() local
438 iod = cmd->iod; in nvme_alloc_iod()
439 iod_init(iod, size, rq->nr_phys_segments, in nvme_alloc_iod()
441 return iod; in nvme_alloc_iod()
448 void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) in nvme_free_iod() argument
452 __le64 **list = iod_list(iod); in nvme_free_iod()
453 dma_addr_t prp_dma = iod->first_dma; in nvme_free_iod()
455 if (iod->npages == 0) in nvme_free_iod()
457 for (i = 0; i < iod->npages; i++) { in nvme_free_iod()
464 if (iod_should_kfree(iod)) in nvme_free_iod()
465 kfree(iod); in nvme_free_iod()
590 struct nvme_iod *iod = ctx; in req_completion() local
591 struct request *req = iod_get_private(iod); in req_completion()
619 if (iod->nents) { in req_completion()
620 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents, in req_completion()
625 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->meta_sg, 1, in req_completion()
629 nvme_free_iod(nvmeq->dev, iod); in req_completion()
636 int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len, in nvme_setup_prps() argument
641 struct scatterlist *sg = iod->sg; in nvme_setup_prps()
647 __le64 **list = iod_list(iod); in nvme_setup_prps()
665 iod->first_dma = dma_addr; in nvme_setup_prps()
672 iod->npages = 0; in nvme_setup_prps()
675 iod->npages = 1; in nvme_setup_prps()
680 iod->first_dma = dma_addr; in nvme_setup_prps()
681 iod->npages = -1; in nvme_setup_prps()
685 iod->first_dma = prp_dma; in nvme_setup_prps()
693 list[iod->npages++] = prp_list; in nvme_setup_prps()
721 struct request *req, struct nvme_iod *iod) in nvme_submit_discard() argument
724 (struct nvme_dsm_range *)iod_list(iod)[0]; in nvme_submit_discard()
735 cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma); in nvme_submit_discard()
759 static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod, in nvme_submit_iod() argument
762 struct request *req = iod_get_private(iod); in nvme_submit_iod()
781 cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); in nvme_submit_iod()
782 cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); in nvme_submit_iod()
787 cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg)); in nvme_submit_iod()
820 struct nvme_iod *iod; in nvme_queue_rq() local
836 iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC); in nvme_queue_rq()
837 if (!iod) in nvme_queue_rq()
849 &iod->first_dma); in nvme_queue_rq()
852 iod_list(iod)[0] = (__le64 *)range; in nvme_queue_rq() local
853 iod->npages = 0; in nvme_queue_rq()
857 sg_init_table(iod->sg, req->nr_phys_segments); in nvme_queue_rq()
858 iod->nents = blk_rq_map_sg(req->q, req, iod->sg); in nvme_queue_rq()
859 if (!iod->nents) in nvme_queue_rq()
862 if (!dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir)) in nvme_queue_rq()
866 nvme_setup_prps(nvmeq->dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) { in nvme_queue_rq()
867 dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, in nvme_queue_rq()
868 iod->nents, dma_dir); in nvme_queue_rq()
875 sg_init_table(iod->meta_sg, 1); in nvme_queue_rq()
877 req->q, req->bio, iod->meta_sg) != 1) in nvme_queue_rq()
883 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) in nvme_queue_rq()
888 nvme_set_info(cmd, iod, req_completion); in nvme_queue_rq()
891 nvme_submit_discard(nvmeq, ns, req, iod); in nvme_queue_rq()
895 nvme_submit_iod(nvmeq, iod, ns); in nvme_queue_rq()
902 nvme_free_iod(nvmeq->dev, iod); in nvme_queue_rq()
905 nvme_free_iod(nvmeq->dev, iod); in nvme_queue_rq()
1680 struct nvme_iod *iod; in nvme_map_user_pages() local
1701 iod = __nvme_alloc_iod(count, length, dev, 0, GFP_KERNEL); in nvme_map_user_pages()
1702 if (!iod) in nvme_map_user_pages()
1705 sg = iod->sg; in nvme_map_user_pages()
1715 iod->nents = count; in nvme_map_user_pages()
1723 return iod; in nvme_map_user_pages()
1726 kfree(iod); in nvme_map_user_pages()
1735 struct nvme_iod *iod) in nvme_unmap_user_pages() argument
1739 dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, in nvme_unmap_user_pages()
1742 for (i = 0; i < iod->nents; i++) in nvme_unmap_user_pages()
1743 put_page(sg_page(&iod->sg[i])); in nvme_unmap_user_pages()
1753 struct nvme_iod *iod; in nvme_submit_io() local
1778 iod = nvme_map_user_pages(dev, write, io.addr, length); in nvme_submit_io()
1784 if (IS_ERR(iod)) in nvme_submit_io()
1785 return PTR_ERR(iod); in nvme_submit_io()
1787 prp_len = nvme_setup_prps(dev, iod, length, GFP_KERNEL); in nvme_submit_io()
1819 c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); in nvme_submit_io()
1820 c.rw.prp2 = cpu_to_le64(iod->first_dma); in nvme_submit_io()
1824 nvme_unmap_user_pages(dev, write, iod); in nvme_submit_io()
1825 nvme_free_iod(dev, iod); in nvme_submit_io()
1842 struct nvme_iod *uninitialized_var(iod); in nvme_user_cmd()
1865 iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr, in nvme_user_cmd()
1867 if (IS_ERR(iod)) in nvme_user_cmd()
1868 return PTR_ERR(iod); in nvme_user_cmd()
1869 length = nvme_setup_prps(dev, iod, length, GFP_KERNEL); in nvme_user_cmd()
1870 c.common.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); in nvme_user_cmd()
1871 c.common.prp2 = cpu_to_le64(iod->first_dma); in nvme_user_cmd()
1895 nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); in nvme_user_cmd()
1896 nvme_free_iod(dev, iod); in nvme_user_cmd()