Searched refs:sg (Results 1 - 200 of 800) sorted by relevance

1234

/linux-4.1.27/tools/virtio/linux/
H A Dscatterlist.h13 #define sg_is_chain(sg) ((sg)->page_link & 0x01)
14 #define sg_is_last(sg) ((sg)->page_link & 0x02)
15 #define sg_chain_ptr(sg) \
16 ((struct scatterlist *) ((sg)->page_link & ~0x03))
20 * @sg: SG entry
24 * Assign page to sg entry. Also see sg_set_page(), the most commonly used
28 static inline void sg_assign_page(struct scatterlist *sg, struct page *page) sg_assign_page() argument
30 unsigned long page_link = sg->page_link & 0x3; sg_assign_page()
38 BUG_ON(sg->sg_magic != SG_MAGIC); sg_assign_page()
39 BUG_ON(sg_is_chain(sg)); sg_assign_page()
41 sg->page_link = page_link | (unsigned long) page; sg_assign_page()
45 * sg_set_page - Set sg entry to point at given page
46 * @sg: SG entry
52 * Use this function to set an sg entry pointing at a page, never assign
53 * the page directly. We encode sg table information in the lower bits
55 * to an sg entry.
58 static inline void sg_set_page(struct scatterlist *sg, struct page *page, sg_set_page() argument
61 sg_assign_page(sg, page); sg_set_page()
62 sg->offset = offset; sg_set_page()
63 sg->length = len; sg_set_page()
66 static inline struct page *sg_page(struct scatterlist *sg) sg_page() argument
69 BUG_ON(sg->sg_magic != SG_MAGIC); sg_page()
70 BUG_ON(sg_is_chain(sg)); sg_page()
72 return (struct page *)((sg)->page_link & ~0x3); sg_page()
76 * Loop over each sg element, following the pointer to a new list if necessary
78 #define for_each_sg(sglist, sg, nr, __i) \
79 for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
109 * @sg: SG entryScatterlist
112 * Marks the passed in sg entry as the termination point for the sg
116 static inline void sg_mark_end(struct scatterlist *sg) sg_mark_end() argument
119 BUG_ON(sg->sg_magic != SG_MAGIC); sg_mark_end()
124 sg->page_link |= 0x02; sg_mark_end()
125 sg->page_link &= ~0x01; sg_mark_end()
130 * @sg: SG entryScatterlist
136 static inline void sg_unmark_end(struct scatterlist *sg) sg_unmark_end() argument
139 BUG_ON(sg->sg_magic != SG_MAGIC); sg_unmark_end()
141 sg->page_link &= ~0x02; sg_unmark_end()
144 static inline struct scatterlist *sg_next(struct scatterlist *sg) sg_next() argument
147 BUG_ON(sg->sg_magic != SG_MAGIC); sg_next()
149 if (sg_is_last(sg)) sg_next()
152 sg++; sg_next()
153 if (unlikely(sg_is_chain(sg))) sg_next()
154 sg = sg_chain_ptr(sg); sg_next()
156 return sg; sg_next()
172 static inline dma_addr_t sg_phys(struct scatterlist *sg) sg_phys() argument
174 return page_to_phys(sg_page(sg)) + sg->offset; sg_phys()
177 static inline void sg_set_buf(struct scatterlist *sg, const void *buf, sg_set_buf() argument
180 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); sg_set_buf()
183 static inline void sg_init_one(struct scatterlist *sg, sg_init_one() argument
186 sg_init_table(sg, 1); sg_init_one()
187 sg_set_buf(sg, buf, buflen); sg_init_one()
H A Dvirtio.h38 struct scatterlist sg[], unsigned int num,
43 struct scatterlist sg[], unsigned int num,
/linux-4.1.27/include/asm-generic/
H A Dscatterlist.h22 * You should only work with the number of sg entries pci_map_sg
23 * returns, or alternatively stop on the first sg_dma_len(sg) which
26 #define sg_dma_address(sg) ((sg)->dma_address)
29 #define sg_dma_len(sg) ((sg)->dma_length)
31 #define sg_dma_len(sg) ((sg)->length)
H A Dpci-dma-compat.h65 pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, pci_map_sg() argument
68 return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); pci_map_sg()
72 pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, pci_unmap_sg() argument
75 dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); pci_unmap_sg()
93 pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, pci_dma_sync_sg_for_cpu() argument
96 dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); pci_dma_sync_sg_for_cpu()
100 pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, pci_dma_sync_sg_for_device() argument
103 dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); pci_dma_sync_sg_for_device()
H A Ddma-mapping-common.h46 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, dma_map_sg_attrs() argument
54 for_each_sg(sg, s, nents, i) dma_map_sg_attrs()
57 ents = ops->map_sg(dev, sg, nents, dir, attrs); dma_map_sg_attrs()
59 debug_dma_map_sg(dev, sg, nents, ents, dir); dma_map_sg_attrs()
64 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
71 debug_dma_unmap_sg(dev, sg, nents, dir);
73 ops->unmap_sg(dev, sg, nents, dir, attrs);
155 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, dma_sync_sg_for_cpu() argument
162 ops->sync_sg_for_cpu(dev, sg, nelems, dir); dma_sync_sg_for_cpu()
163 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); dma_sync_sg_for_cpu()
167 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, dma_sync_sg_for_device() argument
174 ops->sync_sg_for_device(dev, sg, nelems, dir); dma_sync_sg_for_device()
175 debug_dma_sync_sg_for_device(dev, sg, nelems, dir); dma_sync_sg_for_device()
H A Ddma-mapping-broken.h47 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
51 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
72 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
/linux-4.1.27/drivers/crypto/caam/
H A Dsg_sw_sec4.h33 sg_to_sec4_sg(struct scatterlist *sg, int sg_count, sg_to_sec4_sg() argument
37 dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), sg_to_sec4_sg()
38 sg_dma_len(sg), offset); sg_to_sec4_sg()
40 sg = sg_next(sg); sg_to_sec4_sg()
50 static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, sg_to_sec4_sg_last() argument
54 sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset); sg_to_sec4_sg_last()
62 struct scatterlist *sg = sg_list; __sg_count() local
67 nbytes -= sg->length; __sg_count()
68 if (!sg_is_last(sg) && (sg + 1)->length == 0) __sg_count()
70 sg = sg_next(sg); __sg_count()
88 static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg, dma_map_sg_chained() argument
95 dma_map_sg(dev, sg, 1, dir); dma_map_sg_chained()
96 sg = sg_next(sg); dma_map_sg_chained()
99 dma_map_sg(dev, sg, nents, dir); dma_map_sg_chained()
104 static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, dma_unmap_sg_chained() argument
111 dma_unmap_sg(dev, sg, 1, dir); dma_unmap_sg_chained()
112 sg = sg_next(sg); dma_unmap_sg_chained()
115 dma_unmap_sg(dev, sg, nents, dir); dma_unmap_sg_chained()
/linux-4.1.27/drivers/crypto/qce/
H A Ddma.c57 int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents, qce_mapsg() argument
63 while (sg) { qce_mapsg()
64 err = dma_map_sg(dev, sg, 1, dir); qce_mapsg()
67 sg = sg_next(sg); qce_mapsg()
70 err = dma_map_sg(dev, sg, nents, dir); qce_mapsg()
78 void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents, qce_unmapsg() argument
82 while (sg) { qce_unmapsg()
83 dma_unmap_sg(dev, sg, 1, dir); qce_unmapsg()
84 sg = sg_next(sg); qce_unmapsg()
87 dma_unmap_sg(dev, sg, nents, dir); qce_unmapsg()
92 struct scatterlist *sg = sglist; qce_countsg() local
98 while (nbytes > 0 && sg) { qce_countsg()
100 nbytes -= sg->length; qce_countsg()
101 if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained) qce_countsg()
103 sg = sg_next(sg); qce_countsg()
112 struct scatterlist *sg = sgt->sgl, *sg_last = NULL; qce_sgtable_add() local
114 while (sg) { qce_sgtable_add()
115 if (!sg_page(sg)) qce_sgtable_add()
117 sg = sg_next(sg); qce_sgtable_add()
120 if (!sg) qce_sgtable_add()
123 while (new_sgl && sg) { qce_sgtable_add()
124 sg_set_page(sg, sg_page(new_sgl), new_sgl->length, qce_sgtable_add()
126 sg_last = sg; qce_sgtable_add()
127 sg = sg_next(sg); qce_sgtable_add()
134 static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg, qce_dma_prep_sg() argument
142 if (!sg || !nents) qce_dma_prep_sg()
145 desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags); qce_dma_prep_sg()
H A Dcipher.h38 * @dst_tbl: destination sg table
39 * @dst_sg: destination sg pointer table beginning
40 * @src_tbl: source sg table
41 * @src_sg: source sg pointer table beginning;
H A Dsha.h37 * @src_orig: original request sg list
45 * @sg: used to chain sg lists
64 struct scatterlist sg[2]; member in struct:qce_sha_reqctx
H A Dsha.c239 struct scatterlist *sg_last, *sg; qce_ahash_update() local
281 sg = sg_last = req->src; qce_ahash_update()
283 while (len < nbytes && sg) { qce_ahash_update()
284 if (len + sg_dma_len(sg) > nbytes) qce_ahash_update()
286 len += sg_dma_len(sg); qce_ahash_update()
287 sg_last = sg; qce_ahash_update()
288 sg = sg_next(sg); qce_ahash_update()
297 sg_init_table(rctx->sg, 2); qce_ahash_update()
298 sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); qce_ahash_update()
299 scatterwalk_sg_chain(rctx->sg, 2, req->src); qce_ahash_update()
300 req->src = rctx->sg; qce_ahash_update()
324 sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen); qce_ahash_final()
326 req->src = rctx->sg; qce_ahash_final()
374 struct scatterlist sg; qce_ahash_hmac_setkey() local
419 sg_init_one(&sg, buf, keylen); qce_ahash_hmac_setkey()
420 ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); qce_ahash_hmac_setkey()
H A Ddma.h53 void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
55 int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
H A Dablkcipher.c70 struct scatterlist *sg; qce_ablkcipher_async_req_handle() local
104 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst); qce_ablkcipher_async_req_handle()
105 if (IS_ERR(sg)) { qce_ablkcipher_async_req_handle()
106 ret = PTR_ERR(sg); qce_ablkcipher_async_req_handle()
110 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg); qce_ablkcipher_async_req_handle()
111 if (IS_ERR(sg)) { qce_ablkcipher_async_req_handle()
112 ret = PTR_ERR(sg); qce_ablkcipher_async_req_handle()
116 sg_mark_end(sg); qce_ablkcipher_async_req_handle()
/linux-4.1.27/crypto/
H A Dscatterwalk.c33 void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg) scatterwalk_start() argument
35 walk->sg = sg; scatterwalk_start()
37 BUG_ON(!sg->length); scatterwalk_start()
39 walk->offset = sg->offset; scatterwalk_start()
56 page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); scatterwalk_pagedone()
64 if (walk->offset >= walk->sg->offset + walk->sg->length) scatterwalk_pagedone()
65 scatterwalk_start(walk, sg_next(walk->sg)); scatterwalk_pagedone()
103 void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, scatterwalk_map_and_copy() argument
113 scatterwalk_start(&walk, sg); scatterwalk_map_and_copy()
115 if (start < offset + sg->length) scatterwalk_map_and_copy()
118 offset += sg->length; scatterwalk_map_and_copy()
119 sg = sg_next(sg); scatterwalk_map_and_copy()
128 int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes) scatterwalk_bytes_sglen() argument
133 if (num_bytes < sg->length) scatterwalk_bytes_sglen()
137 offset += sg->length; scatterwalk_bytes_sglen()
139 sg = sg_next(sg); scatterwalk_bytes_sglen()
142 if (unlikely(!sg && (num_bytes < offset))) scatterwalk_bytes_sglen()
144 } while (sg && (num_bytes > offset)); scatterwalk_bytes_sglen()
H A Dalgif_skcipher.c31 struct scatterlist sg[0]; member in struct:skcipher_sg_list
87 struct scatterlist *sg; skcipher_free_async_sgls() local
97 for_each_sg(sgl, sg, n, i) skcipher_free_async_sgls()
98 put_page(sg_page(sg)); skcipher_free_async_sgls()
136 struct scatterlist *sg = NULL; skcipher_alloc_sgl() local
140 sg = sgl->sg; skcipher_alloc_sgl()
142 if (!sg || sgl->cur >= MAX_SGL_ENTS) { skcipher_alloc_sgl()
144 sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), skcipher_alloc_sgl()
149 sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); skcipher_alloc_sgl()
152 if (sg) skcipher_alloc_sgl()
153 scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); skcipher_alloc_sgl()
166 struct scatterlist *sg; skcipher_pull_sgl() local
172 sg = sgl->sg; skcipher_pull_sgl()
175 int plen = min_t(int, used, sg[i].length); skcipher_pull_sgl()
177 if (!sg_page(sg + i)) skcipher_pull_sgl()
180 sg[i].length -= plen; skcipher_pull_sgl()
181 sg[i].offset += plen; skcipher_pull_sgl()
186 if (sg[i].length) skcipher_pull_sgl()
189 put_page(sg_page(sg + i)); skcipher_pull_sgl()
190 sg_assign_page(sg + i, NULL); skcipher_pull_sgl()
195 sizeof(*sgl) + sizeof(sgl->sg[0]) * skcipher_pull_sgl()
354 struct scatterlist *sg; skcipher_sendmsg() local
361 sg = sgl->sg + sgl->cur - 1; skcipher_sendmsg()
363 PAGE_SIZE - sg->offset - sg->length); skcipher_sendmsg()
365 err = memcpy_from_msg(page_address(sg_page(sg)) + skcipher_sendmsg()
366 sg->offset + sg->length, skcipher_sendmsg()
371 sg->length += len; skcipher_sendmsg()
372 ctx->merge = (sg->offset + sg->length) & skcipher_sendmsg()
394 sg = sgl->sg; skcipher_sendmsg()
396 sg_unmark_end(sg + sgl->cur - 1); skcipher_sendmsg()
401 sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); skcipher_sendmsg()
403 if (!sg_page(sg + i)) skcipher_sendmsg()
406 err = memcpy_from_msg(page_address(sg_page(sg + i)), skcipher_sendmsg()
409 __free_page(sg_page(sg + i)); skcipher_sendmsg()
410 sg_assign_page(sg + i, NULL); skcipher_sendmsg()
414 sg[i].length = plen; skcipher_sendmsg()
423 sg_mark_end(sg + sgl->cur - 1); skcipher_sendmsg()
472 sg_unmark_end(sgl->sg + sgl->cur - 1); skcipher_sendpage()
474 sg_mark_end(sgl->sg + sgl->cur); skcipher_sendpage()
476 sg_set_page(sgl->sg + sgl->cur, page, size, offset); skcipher_sendpage()
493 struct scatterlist *sg; skcipher_all_sg_nents() local
497 sg = sgl->sg; skcipher_all_sg_nents()
499 while (!sg->length) skcipher_all_sg_nents()
500 sg++; skcipher_all_sg_nents()
502 nents += sg_nents(sg); skcipher_all_sg_nents()
514 struct scatterlist *sg; skcipher_recvmsg_async() local
533 sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL); skcipher_recvmsg_async()
555 sg = sgl->sg; skcipher_recvmsg_async()
557 while (!sg->length) skcipher_recvmsg_async()
558 sg++; skcipher_recvmsg_async()
562 used = min_t(unsigned long, used, sg->length); skcipher_recvmsg_async()
586 sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length, skcipher_recvmsg_async()
587 sg->offset); skcipher_recvmsg_async()
617 ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, skcipher_recvmsg_async()
644 struct scatterlist *sg; skcipher_recvmsg_sync() local
653 sg = sgl->sg; skcipher_recvmsg_sync()
655 while (!sg->length) skcipher_recvmsg_sync()
656 sg++; skcipher_recvmsg_sync()
678 ablkcipher_request_set_crypt(&ctx->req, sg, skcipher_recvmsg_sync()
679 ctx->rsgl.sg, used, skcipher_recvmsg_sync()
H A Dalgif_aead.c28 struct scatterlist sg[ALG_MAX_PAGES]; member in struct:aead_sg_list
82 struct scatterlist *sg = sgl->sg; aead_put_sgl() local
86 if (!sg_page(sg + i)) aead_put_sgl()
89 put_page(sg_page(sg + i)); aead_put_sgl()
90 sg_assign_page(sg + i, NULL); aead_put_sgl()
215 struct scatterlist *sg = NULL; aead_sendmsg() local
219 sg = sgl->sg + sgl->cur - 1; aead_sendmsg()
221 PAGE_SIZE - sg->offset - sg->length); aead_sendmsg()
222 err = memcpy_from_msg(page_address(sg_page(sg)) + aead_sendmsg()
223 sg->offset + sg->length, aead_sendmsg()
228 sg->length += len; aead_sendmsg()
229 ctx->merge = (sg->offset + sg->length) & aead_sendmsg()
256 sg = sgl->sg + sgl->cur; aead_sendmsg()
259 sg_assign_page(sg, alloc_page(GFP_KERNEL)); aead_sendmsg()
261 if (!sg_page(sg)) aead_sendmsg()
264 err = memcpy_from_msg(page_address(sg_page(sg)), aead_sendmsg()
267 __free_page(sg_page(sg)); aead_sendmsg()
268 sg_assign_page(sg, NULL); aead_sendmsg()
272 sg->offset = 0; aead_sendmsg()
273 sg->length = plen; aead_sendmsg()
330 sg_set_page(sgl->sg + sgl->cur, page, size, offset); aead_sendpage()
358 struct scatterlist *sg = NULL; aead_recvmsg() local
459 * scatterlist. When this loop finishes, sg points to the start of the aead_recvmsg()
463 sg = sgl->sg + i; aead_recvmsg()
464 if (sg->length <= assoclen) { aead_recvmsg()
466 sg_set_page(assoc + i, sg_page(sg), aead_recvmsg()
467 sg->length, sg->offset); aead_recvmsg()
468 assoclen -= sg->length; aead_recvmsg()
482 sg_set_page(assoc + i, sg_page(sg), aead_recvmsg()
483 assoclen, sg->offset); aead_recvmsg()
486 sg->length -= assoclen; aead_recvmsg()
487 sg->offset += assoclen; aead_recvmsg()
493 aead_request_set_crypt(&ctx->aead_req, sg, ctx->rsgl[0].sg, used, aead_recvmsg()
624 sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES); aead_accept_parent()
H A Dtcrypt.c76 struct scatterlist *sg, int blen, int secs) test_cipher_jiffies()
85 ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); test_cipher_jiffies()
87 ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); test_cipher_jiffies()
99 struct scatterlist *sg, int blen) test_cipher_cycles()
110 ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); test_cipher_cycles()
112 ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); test_cipher_cycles()
124 ret = crypto_blkcipher_encrypt(desc, sg, sg, blen); test_cipher_cycles()
126 ret = crypto_blkcipher_decrypt(desc, sg, sg, blen); test_cipher_cycles()
247 static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE], sg_init_aead() argument
260 sg_init_table(sg, np); sg_init_aead()
263 sg_set_buf(&sg[k], xbuf[k], PAGE_SIZE); sg_init_aead()
265 sg_set_buf(&sg[k], xbuf[k], rem); sg_init_aead()
278 struct scatterlist *sg; test_aead_speed() local
311 sg = kmalloc(sizeof(*sg) * 8 * 3, GFP_KERNEL); test_aead_speed()
312 if (!sg) test_aead_speed()
314 asg = &sg[8]; test_aead_speed()
377 sg_init_aead(&sg[0], xbuf, test_aead_speed()
383 aead_request_set_crypt(req, sg, sgout, *b_size, iv); test_aead_speed()
407 kfree(sg); test_aead_speed()
454 struct scatterlist sg[TVMEMSIZE]; test_cipher_speed() local
484 sg_init_table(sg, TVMEMSIZE); test_cipher_speed()
485 sg_set_buf(sg, tvmem[0] + *keysize, test_cipher_speed()
488 sg_set_buf(sg + j, tvmem[j], PAGE_SIZE); test_cipher_speed()
499 ret = test_cipher_jiffies(&desc, enc, sg, test_cipher_speed()
502 ret = test_cipher_cycles(&desc, enc, sg, test_cipher_speed()
520 struct scatterlist *sg, int blen, test_hash_jiffies_digest()
529 ret = crypto_hash_digest(desc, sg, blen, out); test_hash_jiffies_digest()
540 static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg, test_hash_jiffies() argument
548 return test_hash_jiffies_digest(desc, sg, blen, out, secs); test_hash_jiffies()
556 ret = crypto_hash_update(desc, sg, plen); test_hash_jiffies()
573 struct scatterlist *sg, int blen, char *out) test_hash_cycles_digest()
583 ret = crypto_hash_digest(desc, sg, blen, out); test_hash_cycles_digest()
594 ret = crypto_hash_digest(desc, sg, blen, out); test_hash_cycles_digest()
615 static int test_hash_cycles(struct hash_desc *desc, struct scatterlist *sg, test_hash_cycles() argument
623 return test_hash_cycles_digest(desc, sg, blen, out); test_hash_cycles()
633 ret = crypto_hash_update(desc, sg, plen); test_hash_cycles()
652 ret = crypto_hash_update(desc, sg, plen); test_hash_cycles()
677 static void test_hash_sg_init(struct scatterlist *sg) test_hash_sg_init() argument
681 sg_init_table(sg, TVMEMSIZE); test_hash_sg_init()
683 sg_set_buf(sg + i, tvmem[i], PAGE_SIZE); test_hash_sg_init()
691 struct scatterlist sg[TVMEMSIZE]; test_hash_speed() local
718 test_hash_sg_init(sg); test_hash_speed()
735 ret = test_hash_jiffies(&desc, sg, speed[i].blen, test_hash_speed()
738 ret = test_hash_cycles(&desc, sg, speed[i].blen, test_hash_speed()
929 struct scatterlist sg[TVMEMSIZE]; test_ahash_speed() local
952 test_hash_sg_init(sg); test_ahash_speed()
974 ahash_request_set_crypt(req, sg, output, speed[i].plen); test_ahash_speed()
1126 struct scatterlist sg[TVMEMSIZE]; test_acipher_speed() local
1159 sg_init_table(sg, DIV_ROUND_UP(k, PAGE_SIZE)); test_acipher_speed()
1162 sg_set_buf(sg, tvmem[0] + *keysize, test_acipher_speed()
1167 sg_set_buf(sg + j, tvmem[j], PAGE_SIZE); test_acipher_speed()
1172 sg_set_buf(sg + j, tvmem[j], k); test_acipher_speed()
1175 sg_set_buf(sg, tvmem[0] + *keysize, *b_size); test_acipher_speed()
1182 ablkcipher_request_set_crypt(req, sg, sg, *b_size, iv); test_acipher_speed()
75 test_cipher_jiffies(struct blkcipher_desc *desc, int enc, struct scatterlist *sg, int blen, int secs) test_cipher_jiffies() argument
98 test_cipher_cycles(struct blkcipher_desc *desc, int enc, struct scatterlist *sg, int blen) test_cipher_cycles() argument
519 test_hash_jiffies_digest(struct hash_desc *desc, struct scatterlist *sg, int blen, char *out, int secs) test_hash_jiffies_digest() argument
572 test_hash_cycles_digest(struct hash_desc *desc, struct scatterlist *sg, int blen, char *out) test_hash_cycles_digest() argument
H A Dahash.c69 struct scatterlist *sg; hash_walk_new_entry() local
71 sg = walk->sg; hash_walk_new_entry()
72 walk->offset = sg->offset; hash_walk_new_entry()
73 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); hash_walk_new_entry()
75 walk->entrylen = sg->length; hash_walk_new_entry()
125 walk->sg = sg_next(walk->sg); crypto_hash_walk_done()
142 walk->sg = req->src; crypto_hash_walk_first()
160 walk->sg = req->src; crypto_ahash_walk_first()
172 struct scatterlist *sg, unsigned int len) crypto_hash_walk_first_compat()
182 walk->sg = sg; crypto_hash_walk_first_compat()
170 crypto_hash_walk_first_compat(struct hash_desc *hdesc, struct crypto_hash_walk *walk, struct scatterlist *sg, unsigned int len) crypto_hash_walk_first_compat() argument
H A Dauthencesn.c43 struct scatterlist *sg; member in struct:authenc_esn_request_ctx
108 ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, authenc_esn_geniv_ahash_update_done()
128 scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, authenc_esn_geniv_ahash_update_done()
158 scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, authenc_esn_geniv_ahash_update_done2()
179 scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, authenc_esn_geniv_ahash_done()
204 ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, authenc_esn_verify_ahash_update_done()
229 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authenc_esn_verify_ahash_update_done()
278 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authenc_esn_verify_ahash_update_done2()
318 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authenc_esn_verify_ahash_done()
366 ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, areq_ctx->cryptlen); crypto_authenc_esn_ahash()
436 areq_ctx->sg = dst; crypto_authenc_esn_genicv()
551 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, crypto_authenc_esn_verify()
604 areq_ctx->sg = src; crypto_authenc_esn_iverify()
H A Dauthenc.c41 struct scatterlist *sg; member in struct:authenc_request_ctx
134 ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, authenc_geniv_ahash_update_done()
144 scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, authenc_geniv_ahash_update_done()
163 scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg, authenc_geniv_ahash_done()
187 ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result, authenc_verify_ahash_update_done()
200 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authenc_verify_ahash_update_done()
239 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, authenc_verify_ahash_done()
286 ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, crypto_authenc_ahash_fb()
312 ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, crypto_authenc_ahash()
361 areq_ctx->sg = dst; crypto_authenc_genicv()
476 scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen, crypto_authenc_verify()
516 areq_ctx->sg = src; crypto_authenc_iverify()
/linux-4.1.27/include/linux/
H A Dscatterlist.h23 * information about the sg table as well. The two lower bits are reserved
26 * If bit 0 is set, then the page_link contains a pointer to the next sg
27 * table list. Otherwise the next entry is at sg + 1.
29 * If bit 1 is set, then this sg entry is the last element in a list.
39 * a valid sg entry, or whether it points to the start of a new scatterlist.
42 #define sg_is_chain(sg) ((sg)->page_link & 0x01)
43 #define sg_is_last(sg) ((sg)->page_link & 0x02)
44 #define sg_chain_ptr(sg) \
45 ((struct scatterlist *) ((sg)->page_link & ~0x03))
49 * @sg: SG entry
53 * Assign page to sg entry. Also see sg_set_page(), the most commonly used
57 static inline void sg_assign_page(struct scatterlist *sg, struct page *page) sg_assign_page() argument
59 unsigned long page_link = sg->page_link & 0x3; sg_assign_page()
67 BUG_ON(sg->sg_magic != SG_MAGIC); sg_assign_page()
68 BUG_ON(sg_is_chain(sg)); sg_assign_page()
70 sg->page_link = page_link | (unsigned long) page; sg_assign_page()
74 * sg_set_page - Set sg entry to point at given page
75 * @sg: SG entry
81 * Use this function to set an sg entry pointing at a page, never assign
82 * the page directly. We encode sg table information in the lower bits
84 * to an sg entry.
87 static inline void sg_set_page(struct scatterlist *sg, struct page *page, sg_set_page() argument
90 sg_assign_page(sg, page); sg_set_page()
91 sg->offset = offset; sg_set_page()
92 sg->length = len; sg_set_page()
95 static inline struct page *sg_page(struct scatterlist *sg) sg_page() argument
98 BUG_ON(sg->sg_magic != SG_MAGIC); sg_page()
99 BUG_ON(sg_is_chain(sg)); sg_page()
101 return (struct page *)((sg)->page_link & ~0x3); sg_page()
105 * sg_set_buf - Set sg entry to point at given data
106 * @sg: SG entry
111 static inline void sg_set_buf(struct scatterlist *sg, const void *buf, sg_set_buf() argument
117 sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); sg_set_buf()
121 * Loop over each sg element, following the pointer to a new list if necessary
123 #define for_each_sg(sglist, sg, nr, __i) \
124 for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
158 * @sg: SG entryScatterlist
161 * Marks the passed in sg entry as the termination point for the sg
165 static inline void sg_mark_end(struct scatterlist *sg) sg_mark_end() argument
168 BUG_ON(sg->sg_magic != SG_MAGIC); sg_mark_end()
173 sg->page_link |= 0x02; sg_mark_end()
174 sg->page_link &= ~0x01; sg_mark_end()
179 * @sg: SG entryScatterlist
185 static inline void sg_unmark_end(struct scatterlist *sg) sg_unmark_end() argument
188 BUG_ON(sg->sg_magic != SG_MAGIC); sg_unmark_end()
190 sg->page_link &= ~0x02; sg_unmark_end()
194 * sg_phys - Return physical address of an sg entry
195 * @sg: SG entry
198 * This calls page_to_phys() on the page in this sg entry, and adds the
199 * sg offset. The caller must know that it is legal to call page_to_phys()
200 * on the sg page.
203 static inline dma_addr_t sg_phys(struct scatterlist *sg) sg_phys() argument
205 return page_to_phys(sg_page(sg)) + sg->offset; sg_phys()
209 * sg_virt - Return virtual address of an sg entry
210 * @sg: SG entry
213 * This calls page_address() on the page in this sg entry, and adds the
214 * sg offset. The caller must know that the sg page has a valid virtual
218 static inline void *sg_virt(struct scatterlist *sg) sg_virt() argument
220 return page_address(sg_page(sg)) + sg->offset; sg_virt()
223 int sg_nents(struct scatterlist *sg);
259 * sg page iterator
261 * Iterates over sg entries page-by-page. On each successful iteration,
263 * to get the current page and its dma address. @piter->sg will point to the
264 * sg holding this page and @piter->sg_pgoffset to the page's page offset
265 * within the sg. The iteration will stop either when a maximum number of sg
266 * entries was reached or a terminating sg (sg_last(sg) == true) was reached.
269 struct scatterlist *sg; /* sg holding the page */ member in struct:sg_page_iter
270 unsigned int sg_pgoffset; /* page offset within the sg */
273 unsigned int __nents; /* remaining sg entries */
288 return nth_page(sg_page(piter->sg), piter->sg_pgoffset); sg_page_iter_page()
298 return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT); sg_page_iter_dma_address()
302 * for_each_sg_page - iterate over the pages of the given sg list
304 * @piter: page iterator to hold current page, sg, sg_pgoffset
305 * @nents: maximum number of sg entries to iterate over
313 * Mapping sg iterator
315 * Iterates over sg entries mapping page-by-page. On each successful
H A Ddma-debug.h47 extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
79 struct scatterlist *sg,
83 struct scatterlist *sg,
123 static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, debug_dma_map_sg() argument
173 struct scatterlist *sg, debug_dma_sync_sg_for_cpu()
179 struct scatterlist *sg, debug_dma_sync_sg_for_device()
172 debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, int direction) debug_dma_sync_sg_for_cpu() argument
178 debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, int direction) debug_dma_sync_sg_for_device() argument
H A Dswiotlb.h74 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
78 swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
95 swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
103 swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
H A Dcb710.h144 * 32-bit PIO mapping sg iterator
166 * @miter: sg mapping iter
188 * @miter: sg mapping iter
/linux-4.1.27/arch/c6x/kernel/
H A Ddma.c67 struct scatterlist *sg; dma_map_sg() local
70 for_each_sg(sglist, sg, nents, i) dma_map_sg()
71 sg->dma_address = dma_map_single(dev, sg_virt(sg), sg->length, dma_map_sg()
84 struct scatterlist *sg; dma_unmap_sg() local
87 for_each_sg(sglist, sg, nents, i) dma_unmap_sg()
88 dma_unmap_single(dev, sg_dma_address(sg), sg->length, dir); dma_unmap_sg()
117 struct scatterlist *sg; dma_sync_sg_for_cpu() local
120 for_each_sg(sglist, sg, nents, i) dma_sync_sg_for_cpu()
121 dma_sync_single_for_cpu(dev, sg_dma_address(sg), dma_sync_sg_for_cpu()
122 sg->length, dir); dma_sync_sg_for_cpu()
132 struct scatterlist *sg; dma_sync_sg_for_device() local
135 for_each_sg(sglist, sg, nents, i) dma_sync_sg_for_device()
136 dma_sync_single_for_device(dev, sg_dma_address(sg), dma_sync_sg_for_device()
137 sg->length, dir); dma_sync_sg_for_device()
/linux-4.1.27/samples/kfifo/
H A Ddma-example.c28 struct scatterlist sg[10]; example_init() local
64 sg_init_table(sg, ARRAY_SIZE(sg)); example_init()
65 nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE); example_init()
77 "sg[%d] -> " example_init()
79 i, sg[i].page_link, sg[i].offset, sg[i].length); example_init()
81 if (sg_is_last(&sg[i])) example_init()
95 nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8); example_init()
106 "sg[%d] -> " example_init()
108 i, sg[i].page_link, sg[i].offset, sg[i].length); example_init()
110 if (sg_is_last(&sg[i])) example_init()
/linux-4.1.27/arch/arm/mach-ks8695/
H A DMakefile15 obj-$(CONFIG_MACH_LITE300) += board-sg.o
16 obj-$(CONFIG_MACH_SG310) += board-sg.o
17 obj-$(CONFIG_MACH_SE4200) += board-sg.o
/linux-4.1.27/arch/nios2/mm/
H A Ddma-mapping.c59 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, dma_map_sg() argument
66 for_each_sg(sg, sg, nents, i) { for_each_sg()
69 addr = sg_virt(sg); for_each_sg()
71 __dma_sync_for_device(addr, sg->length, direction); for_each_sg()
72 sg->dma_address = sg_phys(sg); for_each_sg()
104 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, dma_unmap_sg() argument
115 for_each_sg(sg, sg, nhwentries, i) { for_each_sg()
116 addr = sg_virt(sg); for_each_sg()
118 __dma_sync_for_cpu(addr, sg->length, direction); for_each_sg()
161 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_cpu() argument
169 for_each_sg(sg, sg, nelems, i) dma_sync_sg_for_cpu()
170 __dma_sync_for_cpu(sg_virt(sg), sg->length, direction); dma_sync_sg_for_cpu()
174 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, dma_sync_sg_for_device() argument
182 for_each_sg(sg, sg, nelems, i) dma_sync_sg_for_device()
183 __dma_sync_for_device(sg_virt(sg), sg->length, direction); dma_sync_sg_for_device()
/linux-4.1.27/arch/metag/include/asm/
H A Ddma-mapping.h51 struct scatterlist *sg; dma_map_sg() local
57 for_each_sg(sglist, sg, nents, i) { for_each_sg()
58 BUG_ON(!sg_page(sg)); for_each_sg()
60 sg->dma_address = sg_phys(sg); for_each_sg()
61 dma_sync_for_device(sg_virt(sg), sg->length, direction); for_each_sg()
90 struct scatterlist *sg; dma_unmap_sg() local
96 for_each_sg(sglist, sg, nhwentries, i) { for_each_sg()
97 BUG_ON(!sg_page(sg)); for_each_sg()
99 sg->dma_address = sg_phys(sg); for_each_sg()
100 dma_sync_for_cpu(sg_virt(sg), sg->length, direction); for_each_sg()
137 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_cpu() argument
141 for (i = 0; i < nelems; i++, sg++) dma_sync_sg_for_cpu()
142 dma_sync_for_cpu(sg_virt(sg), sg->length, direction); dma_sync_sg_for_cpu()
146 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_device() argument
150 for (i = 0; i < nelems; i++, sg++) dma_sync_sg_for_device()
151 dma_sync_for_device(sg_virt(sg), sg->length, direction); dma_sync_sg_for_device()
/linux-4.1.27/lib/
H A Dscatterlist.c17 * @sg: The current sg entry
20 * Usually the next entry will be @sg@ + 1, but if this sg element is part
25 struct scatterlist *sg_next(struct scatterlist *sg) sg_next() argument
28 BUG_ON(sg->sg_magic != SG_MAGIC); sg_next()
30 if (sg_is_last(sg)) sg_next()
33 sg++; sg_next()
34 if (unlikely(sg_is_chain(sg))) sg_next()
35 sg = sg_chain_ptr(sg); sg_next()
37 return sg; sg_next()
43 * @sg: The scatterlist
46 * Allows to know how many entries are in sg, taking into acount
50 int sg_nents(struct scatterlist *sg) sg_nents() argument
53 for (nents = 0; sg; sg = sg_next(sg)) sg_nents()
79 struct scatterlist *sg, *ret = NULL; sg_last()
82 for_each_sg(sgl, sg, nents, i) sg_last()
83 ret = sg; sg_last()
100 * If this is part of a chained sg table, sg_mark_end() should be
119 * sg_init_one - Initialize a single entry sg list
120 * @sg: SG entry
125 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) sg_init_one() argument
127 sg_init_table(sg, 1); sg_init_one()
128 sg_set_buf(sg, buf, buflen); sg_init_one()
155 static void sg_kfree(struct scatterlist *sg, unsigned int nents) sg_kfree() argument
158 kmemleak_free(sg); sg_kfree()
159 free_page((unsigned long) sg); sg_kfree()
161 kfree(sg); sg_kfree()
165 * __sg_free_table - Free a previously mapped sg table
166 * @table: The sg table header to use
172 * Free an sg table previously allocated and setup with
192 * then assign 'next' to the sg table after the current one. __sg_free_table()
218 * sg_free_table - Free a previously allocated sg table
219 * @table: The mapped sg table header
229 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
230 * @table: The sg table header to use
231 * @nents: Number of entries in sg list
251 struct scatterlist *sg, *prv; __sg_alloc_table() local
277 sg = first_chunk; __sg_alloc_table()
280 sg = alloc_fn(alloc_size, gfp_mask); __sg_alloc_table()
282 if (unlikely(!sg)) { __sg_alloc_table()
295 sg_init_table(sg, alloc_size); __sg_alloc_table()
299 * If this is the first mapping, assign the sg table header. __sg_alloc_table()
303 sg_chain(prv, max_ents, sg); __sg_alloc_table()
305 table->sgl = sg; __sg_alloc_table()
311 sg_mark_end(&sg[sg_size - 1]); __sg_alloc_table()
313 prv = sg; __sg_alloc_table()
321 * sg_alloc_table - Allocate and initialize an sg table
322 * @table: The sg table header to use
323 * @nents: Number of entries in sg list
327 * Allocate and initialize an sg table. If @nents@ is larger than
328 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
345 * sg_alloc_table_from_pages - Allocate and initialize an sg table from
347 * @sgt: The sg table header to use
355 * Allocate and initialize an sg table from a list of pages. Contiguous
358 * specified by the page array. The returned sg table is released by
415 piter->sg = sglist; __sg_page_iter_start()
420 static int sg_page_count(struct scatterlist *sg) sg_page_count() argument
422 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; sg_page_count()
427 if (!piter->__nents || !piter->sg) __sg_page_iter_next()
433 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { __sg_page_iter_next()
434 piter->sg_pgoffset -= sg_page_count(piter->sg); __sg_page_iter_next()
435 piter->sg = sg_next(piter->sg); __sg_page_iter_next()
436 if (!--piter->__nents || !piter->sg) __sg_page_iter_next()
445 * sg_miter_start - start mapping iteration over a sg list
446 * @miter: sg mapping iter to be started
447 * @sgl: sg list to iterate over
448 * @nents: number of sg entries
470 struct scatterlist *sg; sg_miter_get_next_page() local
476 sg = miter->piter.sg; sg_miter_get_next_page()
479 miter->__offset = pgoffset ? 0 : sg->offset; sg_miter_get_next_page()
480 miter->__remaining = sg->offset + sg->length - sg_miter_get_next_page()
491 * @miter: sg mapping iter to be skipped
504 * true if @miter contains the valid mapping. false if end of sg
529 * @miter: sg mapping iter to proceed
541 * true if @miter contains the next mapping. false if end of sg
569 * @miter: sg mapping iter to be stopped
615 * @to_buffer: transfer direction (true == from an sg list to a
616 * buffer, false == from a buffer to an sg list
H A Dswiotlb.c875 struct scatterlist *sg; swiotlb_map_sg_attrs() local
880 for_each_sg(sgl, sg, nelems, i) { for_each_sg()
881 phys_addr_t paddr = sg_phys(sg); for_each_sg()
885 !dma_capable(hwdev, dev_addr, sg->length)) { for_each_sg()
886 phys_addr_t map = map_single(hwdev, sg_phys(sg), for_each_sg()
887 sg->length, dir); for_each_sg()
891 swiotlb_full(hwdev, sg->length, dir, 0); for_each_sg()
897 sg->dma_address = phys_to_dma(hwdev, map); for_each_sg()
899 sg->dma_address = dev_addr; for_each_sg()
900 sg_dma_len(sg) = sg->length; for_each_sg()
922 struct scatterlist *sg; swiotlb_unmap_sg_attrs() local
927 for_each_sg(sgl, sg, nelems, i) swiotlb_unmap_sg_attrs()
928 unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir); swiotlb_unmap_sg_attrs()
953 struct scatterlist *sg; swiotlb_sync_sg() local
956 for_each_sg(sgl, sg, nelems, i) swiotlb_sync_sg()
957 swiotlb_sync_single(hwdev, sg->dma_address, swiotlb_sync_sg()
958 sg_dma_len(sg), dir, target); swiotlb_sync_sg()
962 swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, swiotlb_sync_sg_for_cpu() argument
965 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); swiotlb_sync_sg_for_cpu()
970 swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, swiotlb_sync_sg_for_device() argument
973 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); swiotlb_sync_sg_for_device()
/linux-4.1.27/include/crypto/
H A Dscatterwalk.h37 struct scatterlist *sg, scatterwalk_crypto_chain()
41 head->length += sg->length; scatterwalk_crypto_chain()
42 sg = sg_next(sg); scatterwalk_crypto_chain()
45 if (sg) scatterwalk_crypto_chain()
46 scatterwalk_sg_chain(head, num, sg); scatterwalk_crypto_chain()
54 return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) + scatterwalk_samebuf()
60 unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; scatterwalk_pagelen()
86 return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); scatterwalk_page()
94 void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
100 void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
103 int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes);
36 scatterwalk_crypto_chain(struct scatterlist *head, struct scatterlist *sg, int chain, int num) scatterwalk_crypto_chain() argument
/linux-4.1.27/drivers/gpu/drm/msm/
H A Dmsm_iommu.c51 struct scatterlist *sg; msm_iommu_map() local
59 for_each_sg(sgt->sgl, sg, sgt->nents, i) { msm_iommu_map()
60 u32 pa = sg_phys(sg) - sg->offset; msm_iommu_map()
61 size_t bytes = sg->length + sg->offset; msm_iommu_map()
77 for_each_sg(sgt->sgl, sg, i, j) { msm_iommu_map()
78 size_t bytes = sg->length + sg->offset; msm_iommu_map()
90 struct scatterlist *sg; msm_iommu_unmap() local
94 for_each_sg(sgt->sgl, sg, sgt->nents, i) { msm_iommu_unmap()
95 size_t bytes = sg->length + sg->offset; msm_iommu_unmap()
H A Dmsm_gem_prime.c54 struct dma_buf_attachment *attach, struct sg_table *sg) msm_gem_prime_import_sg_table()
56 return msm_gem_import(dev, attach->dmabuf->size, sg); msm_gem_prime_import_sg_table()
53 msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) msm_gem_prime_import_sg_table() argument
/linux-4.1.27/arch/blackfin/kernel/
H A Ddma-mapping.c119 struct scatterlist *sg; dma_map_sg() local
122 for_each_sg(sg_list, sg, nents, i) { for_each_sg()
123 sg->dma_address = (dma_addr_t) sg_virt(sg); for_each_sg()
124 __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction); for_each_sg()
134 struct scatterlist *sg; dma_sync_sg_for_device() local
137 for_each_sg(sg_list, sg, nelems, i) { for_each_sg()
138 sg->dma_address = (dma_addr_t) sg_virt(sg); for_each_sg()
139 __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction); for_each_sg()
/linux-4.1.27/arch/tile/kernel/
H A Dpci-dma.c207 struct scatterlist *sg; tile_dma_map_sg() local
214 for_each_sg(sglist, sg, nents, i) { for_each_sg()
215 sg->dma_address = sg_phys(sg); for_each_sg()
216 __dma_prep_pa_range(sg->dma_address, sg->length, direction); for_each_sg()
218 sg->dma_length = sg->length; for_each_sg()
229 struct scatterlist *sg; tile_dma_unmap_sg() local
233 for_each_sg(sglist, sg, nents, i) { for_each_sg()
234 sg->dma_address = sg_phys(sg); for_each_sg()
235 __dma_complete_pa_range(sg->dma_address, sg->length, for_each_sg()
284 struct scatterlist *sg; tile_dma_sync_sg_for_cpu() local
290 for_each_sg(sglist, sg, nelems, i) { for_each_sg()
291 dma_sync_single_for_cpu(dev, sg->dma_address, for_each_sg()
292 sg_dma_len(sg), direction); for_each_sg()
300 struct scatterlist *sg; tile_dma_sync_sg_for_device() local
306 for_each_sg(sglist, sg, nelems, i) { for_each_sg()
307 dma_sync_single_for_device(dev, sg->dma_address, for_each_sg()
308 sg_dma_len(sg), direction); for_each_sg()
380 struct scatterlist *sg; tile_pci_dma_map_sg() local
387 for_each_sg(sglist, sg, nents, i) { for_each_sg()
388 sg->dma_address = sg_phys(sg); for_each_sg()
389 __dma_prep_pa_range(sg->dma_address, sg->length, direction); for_each_sg()
391 sg->dma_address = sg->dma_address + get_dma_offset(dev); for_each_sg()
393 sg->dma_length = sg->length; for_each_sg()
405 struct scatterlist *sg; tile_pci_dma_unmap_sg() local
409 for_each_sg(sglist, sg, nents, i) { for_each_sg()
410 sg->dma_address = sg_phys(sg); for_each_sg()
411 __dma_complete_pa_range(sg->dma_address, sg->length, for_each_sg()
470 struct scatterlist *sg; tile_pci_dma_sync_sg_for_cpu() local
476 for_each_sg(sglist, sg, nelems, i) { for_each_sg()
477 dma_sync_single_for_cpu(dev, sg->dma_address, for_each_sg()
478 sg_dma_len(sg), direction); for_each_sg()
487 struct scatterlist *sg; tile_pci_dma_sync_sg_for_device() local
493 for_each_sg(sglist, sg, nelems, i) { for_each_sg()
494 dma_sync_single_for_device(dev, sg->dma_address, for_each_sg()
495 sg_dma_len(sg), direction); for_each_sg()
/linux-4.1.27/drivers/scsi/
H A Dscsi_lib_dma.c16 * scsi_dma_map - perform DMA mapping against command's sg lists
19 * Returns the number of sg lists actually used, zero if the sg lists
39 * scsi_dma_unmap - unmap command's sg lists mapped by scsi_dma_map
H A Dlibiscsi_tcp.c89 * @sg: scatterlist
90 * @offset: byte offset into that sg entry
93 * data is copied to the indicated sg entry, at the given
98 struct scatterlist *sg, unsigned int offset) iscsi_tcp_segment_init_sg()
100 segment->sg = sg; iscsi_tcp_segment_init_sg()
102 segment->size = min(sg->length - offset, iscsi_tcp_segment_init_sg()
118 struct scatterlist *sg; iscsi_tcp_segment_map() local
120 if (segment->data != NULL || !segment->sg) iscsi_tcp_segment_map()
123 sg = segment->sg; iscsi_tcp_segment_map()
125 BUG_ON(sg->length == 0); iscsi_tcp_segment_map()
133 if (page_count(sg_page(sg)) >= 1 && !recv) iscsi_tcp_segment_map()
138 segment->sg_mapped = kmap_atomic(sg_page(sg)); iscsi_tcp_segment_map()
142 segment->sg_mapped = kmap(sg_page(sg)); iscsi_tcp_segment_map()
145 segment->data = segment->sg_mapped + sg->offset + segment->sg_offset; iscsi_tcp_segment_map()
154 kunmap(sg_page(segment->sg)); iscsi_tcp_segment_unmap()
172 segment->sg = NULL; iscsi_tcp_segment_splice_digest()
196 struct scatterlist sg; iscsi_tcp_segment_done() local
210 sg_init_table(&sg, 1); iscsi_tcp_segment_done()
211 sg_set_page(&sg, sg_page(segment->sg), copied, iscsi_tcp_segment_done()
213 segment->sg->offset); iscsi_tcp_segment_done()
215 sg_init_one(&sg, segment->data + segment->copied, iscsi_tcp_segment_done()
217 crypto_hash_update(segment->hash, &sg, copied); iscsi_tcp_segment_done()
238 iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg), iscsi_tcp_segment_done()
316 struct scatterlist sg; iscsi_tcp_dgst_header() local
318 sg_init_one(&sg, hdr, hdrlen); iscsi_tcp_dgst_header()
319 crypto_hash_digest(hash, &sg, hdrlen, digest); iscsi_tcp_dgst_header()
373 struct scatterlist *sg; iscsi_segment_seek_sg() local
377 for_each_sg(sg_list, sg, sg_count, i) { for_each_sg()
378 if (offset < sg->length) { for_each_sg()
379 iscsi_tcp_segment_init_sg(segment, sg, offset); for_each_sg()
382 offset -= sg->length; for_each_sg()
97 iscsi_tcp_segment_init_sg(struct iscsi_segment *segment, struct scatterlist *sg, unsigned int offset) iscsi_tcp_segment_init_sg() argument
H A Djazz_esp.c47 static int jazz_esp_map_sg(struct esp *esp, struct scatterlist *sg, jazz_esp_map_sg() argument
50 return dma_map_sg(esp->dev, sg, num_sg, dir); jazz_esp_map_sg()
59 static void jazz_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, jazz_esp_unmap_sg() argument
62 dma_unmap_sg(esp->dev, sg, num_sg, dir); jazz_esp_unmap_sg()
/linux-4.1.27/arch/xtensa/include/asm/
H A Ddma-mapping.h55 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, dma_map_sg() argument
62 for (i = 0; i < nents; i++, sg++ ) { dma_map_sg()
63 BUG_ON(!sg_page(sg)); dma_map_sg()
65 sg->dma_address = sg_phys(sg); dma_map_sg()
66 consistent_sync(sg_virt(sg), sg->length, direction); dma_map_sg()
89 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, dma_unmap_sg() argument
127 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_cpu() argument
131 for (i = 0; i < nelems; i++, sg++) dma_sync_sg_for_cpu()
132 consistent_sync(sg_virt(sg), sg->length, dir); dma_sync_sg_for_cpu()
136 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_device() argument
140 for (i = 0; i < nelems; i++, sg++) dma_sync_sg_for_device()
141 consistent_sync(sg_virt(sg), sg->length, dir); dma_sync_sg_for_device()
/linux-4.1.27/drivers/gpu/drm/nouveau/
H A Dnouveau_sgdma.c32 if (ttm->sg) { nv04_sgdma_bind()
33 node->sg = ttm->sg; nv04_sgdma_bind()
36 node->sg = NULL; nv04_sgdma_bind()
67 if (ttm->sg) { nv50_sgdma_bind()
68 node->sg = ttm->sg; nv50_sgdma_bind()
71 node->sg = NULL; nv50_sgdma_bind()
/linux-4.1.27/net/mac80211/
H A Daes_gmac.c27 struct scatterlist sg[3], ct[1]; ieee80211_aes_gmac() local
40 sg_init_table(sg, 3); ieee80211_aes_gmac()
41 sg_set_buf(&sg[0], aad, AAD_LEN); ieee80211_aes_gmac()
42 sg_set_buf(&sg[1], data, data_len - GMAC_MIC_LEN); ieee80211_aes_gmac()
43 sg_set_buf(&sg[2], zero, GMAC_MIC_LEN); ieee80211_aes_gmac()
53 aead_request_set_assoc(aead_req, sg, AAD_LEN + data_len); ieee80211_aes_gmac()
/linux-4.1.27/arch/microblaze/kernel/
H A Ddma.c58 struct scatterlist *sg; dma_direct_map_sg() local
62 for_each_sg(sgl, sg, nents, i) { for_each_sg()
63 sg->dma_address = sg_phys(sg); for_each_sg()
64 __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, for_each_sg()
65 sg->length, direction); for_each_sg()
134 struct scatterlist *sg; dma_direct_sync_sg_for_cpu() local
139 for_each_sg(sgl, sg, nents, i) dma_direct_sync_sg_for_cpu()
140 __dma_sync(sg->dma_address, sg->length, direction); dma_direct_sync_sg_for_cpu()
148 struct scatterlist *sg; dma_direct_sync_sg_for_device() local
153 for_each_sg(sgl, sg, nents, i) dma_direct_sync_sg_for_device()
154 __dma_sync(sg->dma_address, sg->length, direction); dma_direct_sync_sg_for_device()
/linux-4.1.27/arch/m68k/kernel/
H A Ddma.c123 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, dma_sync_sg_for_device() argument
128 for (i = 0; i < nents; sg++, i++) dma_sync_sg_for_device()
129 dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir); dma_sync_sg_for_device()
154 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, dma_map_sg() argument
159 for (i = 0; i < nents; sg++, i++) { dma_map_sg()
160 sg->dma_address = sg_phys(sg); dma_map_sg()
161 dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir); dma_map_sg()
/linux-4.1.27/drivers/staging/android/ion/
H A Dion_chunk_heap.c44 struct scatterlist *sg; ion_chunk_heap_allocate() local
67 sg = table->sgl; ion_chunk_heap_allocate()
73 sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)), ion_chunk_heap_allocate()
75 sg = sg_next(sg); ion_chunk_heap_allocate()
82 sg = table->sgl; ion_chunk_heap_allocate()
84 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), ion_chunk_heap_allocate()
85 sg->length); ion_chunk_heap_allocate()
86 sg = sg_next(sg); ion_chunk_heap_allocate()
99 struct scatterlist *sg; ion_chunk_heap_free() local
111 for_each_sg(table->sgl, sg, table->nents, i) { ion_chunk_heap_free()
112 gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), ion_chunk_heap_free()
113 sg->length); ion_chunk_heap_free()
H A Dion_heap.c31 struct scatterlist *sg; ion_heap_map_kernel() local
48 for_each_sg(table->sgl, sg, table->nents, i) { ion_heap_map_kernel()
49 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE; ion_heap_map_kernel()
50 struct page *page = sg_page(sg); ion_heap_map_kernel()
77 struct scatterlist *sg; ion_heap_map_user() local
81 for_each_sg(table->sgl, sg, table->nents, i) { ion_heap_map_user()
82 struct page *page = sg_page(sg); ion_heap_map_user()
84 unsigned long len = sg->length; ion_heap_map_user()
86 if (offset >= sg->length) { ion_heap_map_user()
87 offset -= sg->length; ion_heap_map_user()
91 len = sg->length - offset; ion_heap_map_user()
156 struct scatterlist sg; ion_heap_pages_zero() local
158 sg_init_table(&sg, 1); ion_heap_pages_zero()
159 sg_set_page(&sg, page, size, 0); ion_heap_pages_zero()
160 return ion_heap_sglist_zero(&sg, 1, pgprot); ion_heap_pages_zero()
H A Dion_system_heap.c129 struct scatterlist *sg; ion_system_heap_allocate() local
160 sg = table->sgl; ion_system_heap_allocate()
162 sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0); ion_system_heap_allocate()
163 sg = sg_next(sg); ion_system_heap_allocate()
185 struct scatterlist *sg; ion_system_heap_free() local
193 for_each_sg(table->sgl, sg, table->nents, i) ion_system_heap_free()
194 free_buffer_page(sys_heap, buffer, sg_page(sg)); ion_system_heap_free()
/linux-4.1.27/drivers/gpu/drm/omapdrm/
H A Domap_gem_dmabuf.c29 struct sg_table *sg; omap_gem_map_dma_buf() local
33 sg = kzalloc(sizeof(*sg), GFP_KERNEL); omap_gem_map_dma_buf()
34 if (!sg) omap_gem_map_dma_buf()
44 ret = sg_alloc_table(sg, 1, GFP_KERNEL); omap_gem_map_dma_buf()
48 sg_init_table(sg->sgl, 1); omap_gem_map_dma_buf()
49 sg_dma_len(sg->sgl) = obj->size; omap_gem_map_dma_buf()
50 sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(paddr)), obj->size, 0); omap_gem_map_dma_buf()
51 sg_dma_address(sg->sgl) = paddr; omap_gem_map_dma_buf()
56 return sg; omap_gem_map_dma_buf()
58 kfree(sg); omap_gem_map_dma_buf()
63 struct sg_table *sg, enum dma_data_direction dir) omap_gem_unmap_dma_buf()
67 sg_free_table(sg); omap_gem_unmap_dma_buf()
68 kfree(sg); omap_gem_unmap_dma_buf()
62 omap_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *sg, enum dma_data_direction dir) omap_gem_unmap_dma_buf() argument
/linux-4.1.27/arch/sparc/mm/
H A Diommu.c238 static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz) iommu_get_scsi_sgl_gflush() argument
245 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; iommu_get_scsi_sgl_gflush()
246 sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset; iommu_get_scsi_sgl_gflush()
247 sg->dma_length = sg->length; iommu_get_scsi_sgl_gflush()
248 sg = sg_next(sg); iommu_get_scsi_sgl_gflush()
252 static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz) iommu_get_scsi_sgl_pflush() argument
260 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; iommu_get_scsi_sgl_pflush()
267 if ((page = (unsigned long) page_address(sg_page(sg))) != 0) { iommu_get_scsi_sgl_pflush()
277 sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset; iommu_get_scsi_sgl_pflush()
278 sg->dma_length = sg->length; iommu_get_scsi_sgl_pflush()
279 sg = sg_next(sg); iommu_get_scsi_sgl_pflush()
309 static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz) iommu_release_scsi_sgl() argument
316 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT; iommu_release_scsi_sgl()
317 iommu_release_one(dev, sg->dma_address & PAGE_MASK, n); iommu_release_scsi_sgl()
318 sg->dma_address = 0x21212121; iommu_release_scsi_sgl()
319 sg = sg_next(sg); iommu_release_scsi_sgl()
H A Dio-unit.c153 static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz) iounit_get_scsi_sgl() argument
158 /* FIXME: Cache some resolved pages - often several sg entries are to the same page */ iounit_get_scsi_sgl()
162 sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length); iounit_get_scsi_sgl()
163 sg->dma_length = sg->length; iounit_get_scsi_sgl()
164 sg = sg_next(sg); iounit_get_scsi_sgl()
183 static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz) iounit_release_scsi_sgl() argument
192 len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT; iounit_release_scsi_sgl()
193 vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT; iounit_release_scsi_sgl()
197 sg = sg_next(sg); iounit_release_scsi_sgl()
/linux-4.1.27/arch/mips/mm/
H A Ddma-default.c219 * A single sg entry may refer to multiple physically contiguous
265 static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg, mips_dma_map_sg() argument
270 for (i = 0; i < nents; i++, sg++) { mips_dma_map_sg()
272 __dma_sync(sg_page(sg), sg->offset, sg->length, mips_dma_map_sg()
275 sg->dma_length = sg->length; mips_dma_map_sg()
277 sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) + mips_dma_map_sg()
278 sg->offset; mips_dma_map_sg()
294 static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg, mips_dma_unmap_sg() argument
300 for (i = 0; i < nhwentries; i++, sg++) { mips_dma_unmap_sg()
303 __dma_sync(sg_page(sg), sg->offset, sg->length, mips_dma_unmap_sg()
305 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction); mips_dma_unmap_sg()
327 struct scatterlist *sg, int nelems, enum dma_data_direction direction) mips_dma_sync_sg_for_cpu()
332 for (i = 0; i < nelems; i++, sg++) mips_dma_sync_sg_for_cpu()
333 __dma_sync(sg_page(sg), sg->offset, sg->length, mips_dma_sync_sg_for_cpu()
339 struct scatterlist *sg, int nelems, enum dma_data_direction direction) mips_dma_sync_sg_for_device()
344 for (i = 0; i < nelems; i++, sg++) mips_dma_sync_sg_for_device()
345 __dma_sync(sg_page(sg), sg->offset, sg->length, mips_dma_sync_sg_for_device()
326 mips_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) mips_dma_sync_sg_for_cpu() argument
338 mips_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) mips_dma_sync_sg_for_device() argument
/linux-4.1.27/drivers/mmc/host/
H A Dtmio_mmc_dma.c49 struct scatterlist *sg = host->sg_ptr, *sg_tmp; tmio_mmc_start_dma_rx() local
57 for_each_sg(sg, sg_tmp, host->sg_len, i) { tmio_mmc_start_dma_rx()
66 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || tmio_mmc_start_dma_rx()
72 if (sg->length < TMIO_MMC_MIN_DMA_LEN) { tmio_mmc_start_dma_rx()
79 /* The only sg element can be unaligned, use our bounce buffer then */ tmio_mmc_start_dma_rx()
81 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); tmio_mmc_start_dma_rx()
83 sg = host->sg_ptr; tmio_mmc_start_dma_rx()
86 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); tmio_mmc_start_dma_rx()
88 desc = dmaengine_prep_slave_sg(chan, sg, ret, tmio_mmc_start_dma_rx()
119 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, tmio_mmc_start_dma_rx()
125 struct scatterlist *sg = host->sg_ptr, *sg_tmp; tmio_mmc_start_dma_tx() local
133 for_each_sg(sg, sg_tmp, host->sg_len, i) { tmio_mmc_start_dma_tx()
142 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || tmio_mmc_start_dma_tx()
148 if (sg->length < TMIO_MMC_MIN_DMA_LEN) { tmio_mmc_start_dma_tx()
155 /* The only sg element can be unaligned, use our bounce buffer then */ tmio_mmc_start_dma_tx()
158 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); tmio_mmc_start_dma_tx()
159 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); tmio_mmc_start_dma_tx()
161 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); tmio_mmc_start_dma_tx()
163 sg = host->sg_ptr; tmio_mmc_start_dma_tx()
166 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); tmio_mmc_start_dma_tx()
168 desc = dmaengine_prep_slave_sg(chan, sg, ret, tmio_mmc_start_dma_tx()
H A Dusdhi6rol0.c169 size_t offset; /* offset within a page, including sg->offset */
180 struct scatterlist *sg; /* current SG segment */ member in struct:usdhi6_host
311 struct scatterlist *sg) usdhi6_blk_bounce()
318 data->blksz, data->blocks, sg->offset); usdhi6_blk_bounce()
345 host->sg = data->sg; usdhi6_sg_prep()
347 host->offset = host->sg->offset; usdhi6_sg_prep()
354 struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg; usdhi6_sg_map() local
355 size_t head = PAGE_SIZE - sg->offset; usdhi6_sg_map()
359 if (WARN(sg_dma_len(sg) % data->blksz, usdhi6_sg_map()
361 sg_dma_len(sg), data->blksz)) usdhi6_sg_map()
364 host->pg.page = sg_page(sg); usdhi6_sg_map()
366 host->offset = sg->offset; usdhi6_sg_map()
379 usdhi6_blk_bounce(host, sg); usdhi6_sg_map()
385 sg->offset, host->mrq->cmd->opcode, host->mrq); usdhi6_sg_map()
398 struct scatterlist *sg = data->sg_len > 1 ? usdhi6_sg_unmap() local
399 host->sg : data->sg; usdhi6_sg_unmap()
414 if (!force && sg_dma_len(sg) + sg->offset > usdhi6_sg_unmap()
460 total = host->sg->offset + sg_dma_len(host->sg); usdhi6_sg_advance()
469 usdhi6_blk_bounce(host, host->sg); usdhi6_sg_advance()
483 struct scatterlist *next = sg_next(host->sg); usdhi6_sg_advance()
489 host->sg = next; usdhi6_sg_advance()
502 host->pg.page = nth_page(sg_page(host->sg), host->page_idx); usdhi6_sg_advance()
539 dma_unmap_sg(host->chan_rx->device->dev, data->sg, usdhi6_dma_stop_unmap()
542 dma_unmap_sg(host->chan_tx->device->dev, data->sg, usdhi6_dma_stop_unmap()
566 struct scatterlist *sg = data->sg; usdhi6_dma_setup() local
583 ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir); usdhi6_dma_setup()
586 desc = dmaengine_prep_slave_sg(chan, sg, ret, dir, usdhi6_dma_setup()
1042 data->sg->offset % 4)) usdhi6_rq_start()
1045 data->blksz, data->blocks, data->sg->offset); usdhi6_rq_start()
1060 data->sg->offset, mrq->stop ? " + stop" : ""); usdhi6_rq_start()
1091 host->sg = NULL; usdhi6_request()
1672 sg_dma_len(host->sg), host->sg->offset); usdhi6_timeout_work()
310 usdhi6_blk_bounce(struct usdhi6_host *host, struct scatterlist *sg) usdhi6_blk_bounce() argument
H A Dmxcmmc.c291 struct scatterlist *sg; mxcmci_swap_buffers() local
294 for_each_sg(data->sg, sg, data->sg_len, i) mxcmci_swap_buffers()
295 buffer_swap32(sg_virt(sg), sg->length); mxcmci_swap_buffers()
306 struct scatterlist *sg; mxcmci_setup_data() local
323 for_each_sg(data->sg, sg, data->sg_len, i) { mxcmci_setup_data()
324 if (sg->offset & 3 || sg->length & 3 || sg->length < 512) { mxcmci_setup_data()
340 nents = dma_map_sg(host->dma->device->dev, data->sg, mxcmci_setup_data()
346 data->sg, data->sg_len, slave_dirn, mxcmci_setup_data()
350 dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, mxcmci_setup_data()
460 dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, mxcmci_finish_data()
618 struct scatterlist *sg; mxcmci_transfer_data() local
625 for_each_sg(data->sg, sg, data->sg_len, i) { mxcmci_transfer_data()
626 stat = mxcmci_pull(host, sg_virt(sg), sg->length); mxcmci_transfer_data()
629 host->datasize += sg->length; mxcmci_transfer_data()
632 for_each_sg(data->sg, sg, data->sg_len, i) { mxcmci_transfer_data()
633 stat = mxcmci_push(host, sg_virt(sg), sg->length); mxcmci_transfer_data()
636 host->datasize += sg->length; mxcmci_transfer_data()
H A Datmel-mci.c104 * @sg: Scatterlist entry currently being processed by PIO or PDC code.
185 struct scatterlist *sg; member in struct:atmel_mci
734 * Update host->data_size and host->sg.
759 buf_size = sg_dma_len(host->sg); atmci_pdc_set_single_buf()
760 atmci_writel(host, pointer_reg, sg_dma_address(host->sg)); atmci_pdc_set_single_buf()
775 atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4); atmci_pdc_set_single_buf()
776 host->data_size -= sg_dma_len(host->sg); atmci_pdc_set_single_buf()
778 host->sg = sg_next(host->sg); atmci_pdc_set_single_buf()
795 * Unmap sg lists, called when transfer is finished.
803 data->sg, data->sg_len, atmci_pdc_cleanup()
825 sg_copy_from_buffer(host->data->sg, host->data->sg_len, atmci_pdc_complete()
842 data->sg, data->sg_len, atmci_dma_cleanup()
907 host->sg = data->sg; atmci_prepare_data()
937 * necessary before the High Speed MCI version. It also map sg and configure
951 host->sg = data->sg; atmci_prepare_data_pdc()
973 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir); atmci_prepare_data_pdc()
977 sg_copy_to_buffer(host->data->sg, host->data->sg_len, atmci_prepare_data_pdc()
996 struct scatterlist *sg; atmci_prepare_data_dma() local
1007 host->sg = NULL; atmci_prepare_data_dma()
1022 for_each_sg(data->sg, sg, data->sg_len, i) { atmci_prepare_data_dma()
1023 if (sg->offset & 3 || sg->length & 3) atmci_prepare_data_dma()
1049 sglen = dma_map_sg(chan->device->dev, data->sg, atmci_prepare_data_dma()
1054 data->sg, sglen, slave_dirn, atmci_prepare_data_dma()
1065 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction); atmci_prepare_data_dma()
1856 struct scatterlist *sg = host->sg; atmci_read_data_pio() local
1857 void *buf = sg_virt(sg); atmci_read_data_pio()
1866 if (likely(offset + 4 <= sg->length)) { atmci_read_data_pio()
1872 if (offset == sg->length) { atmci_read_data_pio()
1873 flush_dcache_page(sg_page(sg)); atmci_read_data_pio()
1874 host->sg = sg = sg_next(sg); atmci_read_data_pio()
1876 if (!sg || !host->sg_len) atmci_read_data_pio()
1880 buf = sg_virt(sg); atmci_read_data_pio()
1883 unsigned int remaining = sg->length - offset; atmci_read_data_pio()
1887 flush_dcache_page(sg_page(sg)); atmci_read_data_pio()
1888 host->sg = sg = sg_next(sg); atmci_read_data_pio()
1890 if (!sg || !host->sg_len) atmci_read_data_pio()
1894 buf = sg_virt(sg); atmci_read_data_pio()
1924 struct scatterlist *sg = host->sg; atmci_write_data_pio() local
1925 void *buf = sg_virt(sg); atmci_write_data_pio()
1933 if (likely(offset + 4 <= sg->length)) { atmci_write_data_pio()
1939 if (offset == sg->length) { atmci_write_data_pio()
1940 host->sg = sg = sg_next(sg); atmci_write_data_pio()
1942 if (!sg || !host->sg_len) atmci_write_data_pio()
1946 buf = sg_virt(sg); atmci_write_data_pio()
1949 unsigned int remaining = sg->length - offset; atmci_write_data_pio()
1955 host->sg = sg = sg_next(sg); atmci_write_data_pio()
1957 if (!sg || !host->sg_len) { atmci_write_data_pio()
1963 buf = sg_virt(sg); atmci_write_data_pio()
H A Dtifm_sd.c170 struct scatterlist *sg = r_data->sg; tifm_sd_transfer_data() local
178 cnt = sg[host->sg_pos].length - host->block_pos; tifm_sd_transfer_data()
191 cnt = sg[host->sg_pos].length; tifm_sd_transfer_data()
193 off = sg[host->sg_pos].offset + host->block_pos; tifm_sd_transfer_data()
195 pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT); tifm_sd_transfer_data()
226 struct scatterlist *sg = r_data->sg; tifm_sd_bounce_block() local
234 cnt = sg[host->sg_pos].length - host->block_pos; tifm_sd_bounce_block()
240 cnt = sg[host->sg_pos].length; tifm_sd_bounce_block()
242 off = sg[host->sg_pos].offset + host->block_pos; tifm_sd_bounce_block()
244 pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT); tifm_sd_bounce_block()
268 struct scatterlist *sg = NULL; tifm_sd_set_dma_data() local
283 dma_len = sg_dma_len(&r_data->sg[host->sg_pos]) - host->block_pos; tifm_sd_set_dma_data()
289 dma_len = sg_dma_len(&r_data->sg[host->sg_pos]); tifm_sd_set_dma_data()
303 sg = &r_data->sg[host->sg_pos]; tifm_sd_set_dma_data()
312 sg = &host->bounce_buf; tifm_sd_set_dma_data()
319 writel(sg_dma_address(sg) + dma_off, sock->addr + SOCK_DMA_ADDRESS); tifm_sd_set_dma_data()
680 host->sg_len = tifm_map_sg(sock, r_data->sg, tifm_sd_request()
767 tifm_unmap_sg(sock, r_data->sg, r_data->sg_len, tifm_sd_end_cmd()
H A Dtmio_mmc.h118 static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, tmio_mmc_kmap_atomic() argument
122 return kmap_atomic(sg_page(sg)) + sg->offset; tmio_mmc_kmap_atomic()
125 static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg, tmio_mmc_kunmap_atomic() argument
128 kunmap_atomic(virt - sg->offset); tmio_mmc_kunmap_atomic()
/linux-4.1.27/drivers/scsi/arm/
H A Dscsi.h23 * copy_SCp_to_sg() Assumes contiguous allocation at @sg of at-most @max
25 * (possibly chained) sg-list
27 static inline int copy_SCp_to_sg(struct scatterlist *sg, struct scsi_pointer *SCp, int max) copy_SCp_to_sg() argument
36 sg_set_buf(sg, SCp->ptr, SCp->this_residual); copy_SCp_to_sg()
43 *(++sg) = *src_sg; copy_SCp_to_sg()
44 sg_mark_end(sg); copy_SCp_to_sg()
100 struct scatterlist *sg; init_SCp() local
103 scsi_for_each_sg(SCpnt, sg, sg_count, i) init_SCp()
104 len += sg->length; init_SCp()
/linux-4.1.27/drivers/media/pci/tw68/
H A Dtw68-risc.c46 struct scatterlist *sg; tw68_risc_field() local
62 sg = sglist; tw68_risc_field()
65 while (offset && offset >= sg_dma_len(sg)) { tw68_risc_field()
66 offset -= sg_dma_len(sg); tw68_risc_field()
67 sg = sg_next(sg); tw68_risc_field()
69 if (bpl <= sg_dma_len(sg) - offset) { tw68_risc_field()
73 *(rp++) = cpu_to_le32(sg_dma_address(sg) + offset); tw68_risc_field()
84 done = (sg_dma_len(sg) - offset); tw68_risc_field()
88 *(rp++) = cpu_to_le32(sg_dma_address(sg) + offset); tw68_risc_field()
90 sg = sg_next(sg); tw68_risc_field()
92 while (todo > sg_dma_len(sg)) { tw68_risc_field()
95 sg_dma_len(sg)); tw68_risc_field()
96 *(rp++) = cpu_to_le32(sg_dma_address(sg)); tw68_risc_field()
97 todo -= sg_dma_len(sg); tw68_risc_field()
98 sg = sg_next(sg); tw68_risc_field()
99 done += sg_dma_len(sg); tw68_risc_field()
106 *(rp++) = cpu_to_le32(sg_dma_address(sg)); tw68_risc_field()
/linux-4.1.27/drivers/net/wireless/orinoco/
H A Dmic.c51 struct scatterlist sg[2]; orinoco_mic() local
68 sg_init_table(sg, 2); orinoco_mic()
69 sg_set_buf(&sg[0], hdr, sizeof(hdr)); orinoco_mic()
70 sg_set_buf(&sg[1], data, data_len); orinoco_mic()
77 return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr), orinoco_mic()
/linux-4.1.27/arch/sh/kernel/
H A Ddma-nommu.c26 static int nommu_map_sg(struct device *dev, struct scatterlist *sg, nommu_map_sg() argument
33 WARN_ON(nents == 0 || sg[0].length == 0); nommu_map_sg()
35 for_each_sg(sg, s, nents, i) { for_each_sg()
54 static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, nommu_sync_sg() argument
60 for_each_sg(sg, s, nelems, i) nommu_sync_sg()
/linux-4.1.27/arch/arc/include/asm/
H A Ddma-mapping.h122 dma_map_sg(struct device *dev, struct scatterlist *sg, dma_map_sg() argument
128 for_each_sg(sg, s, nents, i) dma_map_sg()
136 dma_unmap_sg(struct device *dev, struct scatterlist *sg, dma_unmap_sg() argument
142 for_each_sg(sg, s, nents, i) dma_unmap_sg()
181 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_cpu() argument
186 for (i = 0; i < nelems; i++, sg++) dma_sync_sg_for_cpu()
187 _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); dma_sync_sg_for_cpu()
191 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_device() argument
196 for (i = 0; i < nelems; i++, sg++) dma_sync_sg_for_device()
197 _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); dma_sync_sg_for_device()
/linux-4.1.27/drivers/infiniband/ulp/iser/
H A Diser_memory.c45 struct scatterlist *sg; iser_free_bounce_sg() local
48 for_each_sg(data->sg, sg, data->size, count) iser_free_bounce_sg()
49 __free_page(sg_page(sg)); iser_free_bounce_sg()
51 kfree(data->sg); iser_free_bounce_sg()
53 data->sg = data->orig_sg; iser_free_bounce_sg()
62 struct scatterlist *sg; iser_alloc_bounce_sg() local
67 sg = kcalloc(nents, sizeof(*sg), GFP_ATOMIC); iser_alloc_bounce_sg()
68 if (!sg) iser_alloc_bounce_sg()
71 sg_init_table(sg, nents); iser_alloc_bounce_sg()
79 sg_set_page(&sg[i], page, page_len, 0); iser_alloc_bounce_sg()
84 data->orig_sg = data->sg; iser_alloc_bounce_sg()
86 data->sg = sg; iser_alloc_bounce_sg()
93 __free_page(sg_page(&sg[i - 1])); iser_alloc_bounce_sg()
94 kfree(sg); iser_alloc_bounce_sg()
102 struct scatterlist *osg, *bsg = data->sg; iser_copy_bounce()
195 data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, iser_start_rdma_unaligned_sg()
221 ib_dma_unmap_sg(dev, data->sg, data->size, iser_finalize_rdma_unaligned_sg()
250 struct scatterlist *sg, *sgl = data->sg; iser_sg_to_page_vec() local
261 for_each_sg(sgl, sg, data->dma_nents, i) { iser_sg_to_page_vec()
262 start_addr = ib_sg_dma_address(ibdev, sg); iser_sg_to_page_vec()
265 dma_len = ib_sg_dma_len(ibdev, sg); iser_sg_to_page_vec()
302 struct scatterlist *sg, *sgl, *next_sg = NULL; iser_data_buf_aligned_len() local
309 sgl = data->sg; iser_data_buf_aligned_len()
312 for_each_sg(sgl, sg, data->dma_nents, i) { iser_data_buf_aligned_len()
316 next_sg = sg_next(sg); iser_data_buf_aligned_len()
320 end_addr = start_addr + ib_sg_dma_len(ibdev, sg); iser_data_buf_aligned_len()
333 iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n", iser_data_buf_aligned_len()
341 struct scatterlist *sg; iser_data_buf_dump() local
344 for_each_sg(data->sg, sg, data->dma_nents, i) iser_data_buf_dump()
345 iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p " iser_data_buf_dump()
347 i, (unsigned long)ib_sg_dma_address(ibdev, sg), iser_data_buf_dump()
348 sg_page(sg), sg->offset, iser_data_buf_dump()
349 sg->length, ib_sg_dma_len(ibdev, sg)); iser_data_buf_dump()
372 data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir); iser_dma_map_task_data()
387 ib_dma_unmap_sg(dev, data->sg, data->size, dir); iser_dma_unmap_task_data()
394 struct scatterlist *sg = mem->sg; iser_reg_dma() local
398 reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]); iser_reg_dma()
399 reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]); iser_reg_dma()
/linux-4.1.27/net/rxrpc/
H A Drxkad.c117 struct scatterlist sg[2]; rxkad_prime_packet_security() local
140 sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); rxkad_prime_packet_security()
141 sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); rxkad_prime_packet_security()
142 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); rxkad_prime_packet_security()
161 struct scatterlist sg[2]; rxkad_secure_packet_auth() local
184 sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); rxkad_secure_packet_auth()
185 sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); rxkad_secure_packet_auth()
186 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); rxkad_secure_packet_auth()
208 struct scatterlist sg[16]; rxkad_secure_packet_encrypt() local
230 sg_init_one(&sg[0], sechdr, sizeof(rxkhdr)); rxkad_secure_packet_encrypt()
231 sg_init_one(&sg[1], &rxkhdr, sizeof(rxkhdr)); rxkad_secure_packet_encrypt()
232 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr)); rxkad_secure_packet_encrypt()
242 sg_init_table(sg, nsg); rxkad_secure_packet_encrypt()
243 skb_to_sgvec(skb, sg, 0, len); rxkad_secure_packet_encrypt()
244 crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); rxkad_secure_packet_encrypt()
261 struct scatterlist sg[2]; rxkad_secure_packet() local
294 sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); rxkad_secure_packet()
295 sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); rxkad_secure_packet()
296 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); rxkad_secure_packet()
335 struct scatterlist sg[16]; rxkad_verify_packet_auth() local
350 sg_init_table(sg, nsg); rxkad_verify_packet_auth()
351 skb_to_sgvec(skb, sg, 0, 8); rxkad_verify_packet_auth()
359 crypto_blkcipher_decrypt_iv(&desc, sg, sg, 8); rxkad_verify_packet_auth()
410 struct scatterlist _sg[4], *sg; rxkad_verify_packet_encrypt() local
425 sg = _sg; rxkad_verify_packet_encrypt()
427 sg = kmalloc(sizeof(*sg) * nsg, GFP_NOIO); rxkad_verify_packet_encrypt()
428 if (!sg) rxkad_verify_packet_encrypt()
432 sg_init_table(sg, nsg); rxkad_verify_packet_encrypt()
433 skb_to_sgvec(skb, sg, 0, skb->len); rxkad_verify_packet_encrypt()
442 crypto_blkcipher_decrypt_iv(&desc, sg, sg, skb->len); rxkad_verify_packet_encrypt()
443 if (sg != _sg) rxkad_verify_packet_encrypt()
444 kfree(sg); rxkad_verify_packet_encrypt()
493 struct scatterlist sg[2]; rxkad_verify_packet() local
529 sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf)); rxkad_verify_packet()
530 sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf)); rxkad_verify_packet()
531 crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf)); rxkad_verify_packet()
694 static void rxkad_sg_set_buf2(struct scatterlist sg[2], rxkad_sg_set_buf2() argument
699 sg_init_table(sg, 2); rxkad_sg_set_buf2()
701 sg_set_buf(&sg[0], buf, buflen); rxkad_sg_set_buf2()
702 if (sg[0].offset + buflen > PAGE_SIZE) { rxkad_sg_set_buf2()
704 sg[0].length = PAGE_SIZE - sg[0].offset; rxkad_sg_set_buf2()
705 sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length); rxkad_sg_set_buf2()
709 sg_mark_end(&sg[nsg - 1]); rxkad_sg_set_buf2()
711 ASSERTCMP(sg[0].length + sg[1].length, ==, buflen); rxkad_sg_set_buf2()
723 struct scatterlist sg[2]; rxkad_encrypt_response() local
731 rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted)); rxkad_encrypt_response()
732 crypto_blkcipher_encrypt_iv(&desc, sg, sg, sizeof(resp->encrypted)); rxkad_encrypt_response()
827 struct scatterlist sg[1]; rxkad_decrypt_ticket() local
860 sg_init_one(&sg[0], ticket, ticket_len); rxkad_decrypt_ticket()
861 crypto_blkcipher_decrypt_iv(&desc, sg, sg, ticket_len); rxkad_decrypt_ticket()
970 struct scatterlist sg[2]; rxkad_decrypt_response() local
988 rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted)); rxkad_decrypt_response()
989 crypto_blkcipher_decrypt_iv(&desc, sg, sg, sizeof(resp->encrypted)); rxkad_decrypt_response()
/linux-4.1.27/drivers/mmc/card/
H A Dqueue.c147 struct scatterlist *sg; mmc_alloc_sg() local
149 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL); mmc_alloc_sg()
150 if (!sg) mmc_alloc_sg()
154 sg_init_table(sg, sg_len); mmc_alloc_sg()
157 return sg; mmc_alloc_sg()
253 mqrq_cur->sg = mmc_alloc_sg(1, &ret); mmc_init_queue()
262 mqrq_prev->sg = mmc_alloc_sg(1, &ret); mmc_init_queue()
281 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); mmc_init_queue()
286 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); mmc_init_queue()
309 kfree(mqrq_cur->sg); mmc_init_queue()
310 mqrq_cur->sg = NULL; mmc_init_queue()
314 kfree(mqrq_prev->sg); mmc_init_queue()
315 mqrq_prev->sg = NULL; mmc_init_queue()
345 kfree(mqrq_cur->sg); mmc_cleanup_queue()
346 mqrq_cur->sg = NULL; mmc_cleanup_queue()
354 kfree(mqrq_prev->sg); mmc_cleanup_queue()
355 mqrq_prev->sg = NULL; mmc_cleanup_queue()
453 struct scatterlist *sg, mmc_queue_packed_map_sg()
456 struct scatterlist *__sg = sg; mmc_queue_packed_map_sg()
479 __sg = sg + (sg_len - 1); mmc_queue_packed_map_sg()
482 sg_mark_end(sg + (sg_len - 1)); mmc_queue_packed_map_sg()
487 * Prepare the sg list(s) to be handed of to the host driver
493 struct scatterlist *sg; mmc_queue_map_sg() local
502 mqrq->sg, cmd_type); mmc_queue_map_sg()
504 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); mmc_queue_map_sg()
518 for_each_sg(mqrq->bounce_sg, sg, sg_len, i) mmc_queue_map_sg()
519 buflen += sg->length; mmc_queue_map_sg()
521 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); mmc_queue_map_sg()
539 mqrq->bounce_buf, mqrq->sg[0].length); mmc_queue_bounce_pre()
555 mqrq->bounce_buf, mqrq->sg[0].length); mmc_queue_bounce_post()
451 mmc_queue_packed_map_sg(struct mmc_queue *mq, struct mmc_packed *packed, struct scatterlist *sg, enum mmc_packed_type cmd_type) mmc_queue_packed_map_sg() argument
H A Dmmc_test.c68 * @max_segs: maximum segments allowed by driver in scatterlist @sg
70 * @blocks: number of (512 byte) blocks currently mapped by @sg
71 * @sg_len: length of currently mapped scatterlist @sg
73 * @sg: scatterlist
84 struct scatterlist *sg; member in struct:mmc_test_area
191 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len, mmc_test_prepare_mrq()
221 mrq->data->sg = sg; mmc_test_prepare_mrq()
278 struct scatterlist sg; mmc_test_buffer_transfer() local
284 sg_init_one(&sg, buffer, blksz); mmc_test_buffer_transfer()
286 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write); mmc_test_buffer_transfer()
399 struct scatterlist *sg = NULL; mmc_test_map_sg() local
418 if (sg) mmc_test_map_sg()
419 sg = sg_next(sg); mmc_test_map_sg()
421 sg = sglist; mmc_test_map_sg()
422 if (!sg) mmc_test_map_sg()
424 sg_set_page(sg, mem->arr[i].page, len, 0); mmc_test_map_sg()
435 if (sg) mmc_test_map_sg()
436 sg_mark_end(sg); mmc_test_map_sg()
452 struct scatterlist *sg = NULL; mmc_test_map_sg_max_scatter() local
473 if (sg) mmc_test_map_sg_max_scatter()
474 sg = sg_next(sg); mmc_test_map_sg_max_scatter()
476 sg = sglist; mmc_test_map_sg_max_scatter()
477 if (!sg) mmc_test_map_sg_max_scatter()
479 sg_set_page(sg, virt_to_page(addr), len, 0); mmc_test_map_sg_max_scatter()
487 if (sg) mmc_test_map_sg_max_scatter()
488 sg_mark_end(sg); mmc_test_map_sg_max_scatter()
781 struct scatterlist *sg, unsigned sg_len, mmc_test_nonblock_transfer()
814 mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr, mmc_test_nonblock_transfer()
846 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, mmc_test_simple_transfer()
858 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr, mmc_test_simple_transfer()
879 struct scatterlist sg; mmc_test_broken_transfer() local
885 sg_init_one(&sg, test->buffer, blocks * blksz); mmc_test_broken_transfer()
887 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write); mmc_test_broken_transfer()
903 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, mmc_test_transfer()
916 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); mmc_test_transfer()
923 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr, mmc_test_transfer()
963 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); mmc_test_transfer()
989 struct scatterlist sg; mmc_test_basic_write() local
995 sg_init_one(&sg, test->buffer, 512); mmc_test_basic_write()
997 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); mmc_test_basic_write()
1007 struct scatterlist sg; mmc_test_basic_read() local
1013 sg_init_one(&sg, test->buffer, 512); mmc_test_basic_read()
1015 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0); mmc_test_basic_read()
1025 struct scatterlist sg; mmc_test_verify_write() local
1027 sg_init_one(&sg, test->buffer, 512); mmc_test_verify_write()
1029 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); mmc_test_verify_write()
1039 struct scatterlist sg; mmc_test_verify_read() local
1041 sg_init_one(&sg, test->buffer, 512); mmc_test_verify_read()
1043 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); mmc_test_verify_read()
1054 struct scatterlist sg; mmc_test_multi_write() local
1067 sg_init_one(&sg, test->buffer, size); mmc_test_multi_write()
1069 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); mmc_test_multi_write()
1080 struct scatterlist sg; mmc_test_multi_read() local
1093 sg_init_one(&sg, test->buffer, size); mmc_test_multi_read()
1095 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); mmc_test_multi_read()
1105 struct scatterlist sg; mmc_test_pow2_write() local
1111 sg_init_one(&sg, test->buffer, i); mmc_test_pow2_write()
1112 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); mmc_test_pow2_write()
1123 struct scatterlist sg; mmc_test_pow2_read() local
1129 sg_init_one(&sg, test->buffer, i); mmc_test_pow2_read()
1130 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); mmc_test_pow2_read()
1141 struct scatterlist sg; mmc_test_weird_write() local
1147 sg_init_one(&sg, test->buffer, i); mmc_test_weird_write()
1148 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); mmc_test_weird_write()
1159 struct scatterlist sg; mmc_test_weird_read() local
1165 sg_init_one(&sg, test->buffer, i); mmc_test_weird_read()
1166 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); mmc_test_weird_read()
1177 struct scatterlist sg; mmc_test_align_write() local
1180 sg_init_one(&sg, test->buffer + i, 512); mmc_test_align_write()
1181 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); mmc_test_align_write()
1192 struct scatterlist sg; mmc_test_align_read() local
1195 sg_init_one(&sg, test->buffer + i, 512); mmc_test_align_read()
1196 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); mmc_test_align_read()
1208 struct scatterlist sg; mmc_test_align_multi_write() local
1222 sg_init_one(&sg, test->buffer + i, size); mmc_test_align_multi_write()
1223 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); mmc_test_align_multi_write()
1235 struct scatterlist sg; mmc_test_align_multi_read() local
1249 sg_init_one(&sg, test->buffer + i, size); mmc_test_align_multi_read()
1250 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); mmc_test_align_multi_read()
1329 struct scatterlist sg; mmc_test_write_high() local
1331 sg_init_table(&sg, 1); mmc_test_write_high()
1332 sg_set_page(&sg, test->highmem, 512, 0); mmc_test_write_high()
1334 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); mmc_test_write_high()
1344 struct scatterlist sg; mmc_test_read_high() local
1346 sg_init_table(&sg, 1); mmc_test_read_high()
1347 sg_set_page(&sg, test->highmem, 512, 0); mmc_test_read_high()
1349 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); mmc_test_read_high()
1360 struct scatterlist sg; mmc_test_multi_write_high() local
1373 sg_init_table(&sg, 1); mmc_test_multi_write_high()
1374 sg_set_page(&sg, test->highmem, size, 0); mmc_test_multi_write_high()
1376 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); mmc_test_multi_write_high()
1387 struct scatterlist sg; mmc_test_multi_read_high() local
1400 sg_init_table(&sg, 1); mmc_test_multi_read_high()
1401 sg_set_page(&sg, test->highmem, size, 0); mmc_test_multi_read_high()
1403 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); mmc_test_multi_read_high()
1433 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg, mmc_test_area_map()
1437 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, mmc_test_area_map()
1441 pr_info("%s: Failed to map sg list\n", mmc_test_area_map()
1454 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr, mmc_test_area_transfer()
1494 ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len, mmc_test_area_io_seq()
1553 kfree(t->sg); mmc_test_area_cleanup()
1607 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL); mmc_test_area_init()
1608 if (!t->sg) { mmc_test_area_init()
2260 * Multiple blocking write 1 to 512 sg elements
2279 * Multiple non-blocking write 1 to 512 sg elements
2298 * Multiple blocking read 1 to 512 sg elements
2317 * Multiple non-blocking read 1 to 512 sg elements
2652 .name = "Write performance blocking req 1 to 512 sg elems",
2659 .name = "Write performance non-blocking req 1 to 512 sg elems",
2666 .name = "Read performance blocking req 1 to 512 sg elems",
2673 .name = "Read performance non-blocking req 1 to 512 sg elems",
190 mmc_test_prepare_mrq(struct mmc_test_card *test, struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, unsigned blocks, unsigned blksz, int write) mmc_test_prepare_mrq() argument
780 mmc_test_nonblock_transfer(struct mmc_test_card *test, struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, unsigned blocks, unsigned blksz, int write, int count) mmc_test_nonblock_transfer() argument
845 mmc_test_simple_transfer(struct mmc_test_card *test, struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, unsigned blocks, unsigned blksz, int write) mmc_test_simple_transfer() argument
902 mmc_test_transfer(struct mmc_test_card *test, struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, unsigned blocks, unsigned blksz, int write) mmc_test_transfer() argument
/linux-4.1.27/drivers/scsi/aacraid/
H A Dcommctrl.c560 if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { aac_send_raw_srb()
561 dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n", aac_send_raw_srb()
562 le32_to_cpu(srbcmd->sg.count))); aac_send_raw_srb()
567 ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry)); aac_send_raw_srb()
568 actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) * aac_send_raw_srb()
574 "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu " aac_send_raw_srb()
576 actual_fibsize, actual_fibsize64, user_srbcmd->sg.count, aac_send_raw_srb()
582 if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) { aac_send_raw_srb()
589 struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg; aac_send_raw_srb()
590 struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; aac_send_raw_srb()
600 if (upsg->sg[i].count > aac_send_raw_srb()
609 p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA); aac_send_raw_srb()
612 upsg->sg[i].count,i,upsg->count)); aac_send_raw_srb()
616 addr = (u64)upsg->sg[i].addr[0]; aac_send_raw_srb()
617 addr += ((u64)upsg->sg[i].addr[1]) << 32; aac_send_raw_srb()
623 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ aac_send_raw_srb()
624 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); aac_send_raw_srb()
629 addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir); aac_send_raw_srb()
631 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); aac_send_raw_srb()
632 psg->sg[i].addr[1] = cpu_to_le32(addr>>32); aac_send_raw_srb()
633 byte_count += upsg->sg[i].count; aac_send_raw_srb()
634 psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); aac_send_raw_srb()
652 if (usg->sg[i].count > aac_send_raw_srb()
662 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); aac_send_raw_srb()
665 usg->sg[i].count,i,usg->count)); aac_send_raw_srb()
670 sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr; aac_send_raw_srb()
675 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ aac_send_raw_srb()
677 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); aac_send_raw_srb()
682 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); aac_send_raw_srb()
684 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); aac_send_raw_srb()
685 psg->sg[i].addr[1] = cpu_to_le32(addr>>32); aac_send_raw_srb()
686 byte_count += usg->sg[i].count; aac_send_raw_srb()
687 psg->sg[i].count = cpu_to_le32(usg->sg[i].count); aac_send_raw_srb()
692 if (user_srbcmd->sg.count) aac_send_raw_srb()
698 struct user_sgmap* upsg = &user_srbcmd->sg; aac_send_raw_srb()
699 struct sgmap* psg = &srbcmd->sg; aac_send_raw_srb()
706 if (usg->sg[i].count > aac_send_raw_srb()
715 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); aac_send_raw_srb()
718 usg->sg[i].count,i,usg->count)); aac_send_raw_srb()
722 addr = (u64)usg->sg[i].addr[0]; aac_send_raw_srb()
723 addr += ((u64)usg->sg[i].addr[1]) << 32; aac_send_raw_srb()
729 if(copy_from_user(p,sg_user[i],usg->sg[i].count)){ aac_send_raw_srb()
730 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); aac_send_raw_srb()
735 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); aac_send_raw_srb()
737 psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff); aac_send_raw_srb()
738 byte_count += usg->sg[i].count; aac_send_raw_srb()
739 psg->sg[i].count = cpu_to_le32(usg->sg[i].count); aac_send_raw_srb()
745 if (upsg->sg[i].count > aac_send_raw_srb()
753 p = kmalloc(upsg->sg[i].count, GFP_KERNEL); aac_send_raw_srb()
756 upsg->sg[i].count, i, upsg->count)); aac_send_raw_srb()
760 sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr; aac_send_raw_srb()
766 upsg->sg[i].count)) { aac_send_raw_srb()
767 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); aac_send_raw_srb()
773 upsg->sg[i].count, data_dir); aac_send_raw_srb()
775 psg->sg[i].addr = cpu_to_le32(addr); aac_send_raw_srb()
776 byte_count += upsg->sg[i].count; aac_send_raw_srb()
777 psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); aac_send_raw_srb()
781 if (user_srbcmd->sg.count) aac_send_raw_srb()
802 ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count aac_send_raw_srb()
803 : srbcmd->sg.sg[i].count); aac_send_raw_srb()
805 dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n")); aac_send_raw_srb()
H A Daachba.c1183 ret = aac_build_sgraw(cmd, &readcmd->sg); aac_read_raw_io()
1188 ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw)); aac_read_raw_io()
1219 ret = aac_build_sg64(cmd, &readcmd->sg); aac_read_block64()
1223 ((le32_to_cpu(readcmd->sg.count) - 1) * aac_read_block64()
1254 ret = aac_build_sg(cmd, &readcmd->sg); aac_read_block()
1258 ((le32_to_cpu(readcmd->sg.count) - 1) * aac_read_block()
1315 ret = aac_build_sgraw(cmd, &writecmd->sg); aac_write_raw_io()
1320 ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw)); aac_write_raw_io()
1351 ret = aac_build_sg64(cmd, &writecmd->sg); aac_write_block64()
1355 ((le32_to_cpu(writecmd->sg.count) - 1) * aac_write_block64()
1385 writecmd->sg.count = cpu_to_le32(1); aac_write_block()
1388 ret = aac_build_sg(cmd, &writecmd->sg); aac_write_block()
1392 ((le32_to_cpu(writecmd->sg.count) - 1) * aac_write_block()
1454 ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg); aac_scsi_64()
1465 ((le32_to_cpu(srbcmd->sg.count) & 0xff) * aac_scsi_64()
1485 ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg); aac_scsi_32()
1496 (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) * aac_scsi_32()
1764 * Worst case size that could cause sg overflow when aac_get_adapter_info()
2993 * Calculate resid for sg aac_srb_callback()
3204 psg->sg[0].addr = 0; aac_build_sg()
3205 psg->sg[0].count = 0; aac_build_sg()
3211 struct scatterlist *sg; aac_build_sg() local
3216 scsi_for_each_sg(scsicmd, sg, nseg, i) { scsi_for_each_sg()
3217 psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg)); scsi_for_each_sg()
3218 psg->sg[i].count = cpu_to_le32(sg_dma_len(sg)); scsi_for_each_sg()
3219 byte_count += sg_dma_len(sg); scsi_for_each_sg()
3223 u32 temp = le32_to_cpu(psg->sg[i-1].count) -
3225 psg->sg[i-1].count = cpu_to_le32(temp);
3248 psg->sg[0].addr[0] = 0; aac_build_sg64()
3249 psg->sg[0].addr[1] = 0; aac_build_sg64()
3250 psg->sg[0].count = 0; aac_build_sg64()
3256 struct scatterlist *sg; aac_build_sg64() local
3259 scsi_for_each_sg(scsicmd, sg, nseg, i) { scsi_for_each_sg()
3260 int count = sg_dma_len(sg); scsi_for_each_sg()
3261 addr = sg_dma_address(sg); scsi_for_each_sg()
3262 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); scsi_for_each_sg()
3263 psg->sg[i].addr[1] = cpu_to_le32(addr>>32); scsi_for_each_sg()
3264 psg->sg[i].count = cpu_to_le32(count); scsi_for_each_sg()
3270 u32 temp = le32_to_cpu(psg->sg[i-1].count) -
3272 psg->sg[i-1].count = cpu_to_le32(temp);
3291 psg->sg[0].next = 0; aac_build_sgraw()
3292 psg->sg[0].prev = 0; aac_build_sgraw()
3293 psg->sg[0].addr[0] = 0; aac_build_sgraw()
3294 psg->sg[0].addr[1] = 0; aac_build_sgraw()
3295 psg->sg[0].count = 0; aac_build_sgraw()
3296 psg->sg[0].flags = 0; aac_build_sgraw()
3302 struct scatterlist *sg; aac_build_sgraw() local
3305 scsi_for_each_sg(scsicmd, sg, nseg, i) { scsi_for_each_sg()
3306 int count = sg_dma_len(sg); scsi_for_each_sg()
3307 u64 addr = sg_dma_address(sg); scsi_for_each_sg()
3308 psg->sg[i].next = 0; scsi_for_each_sg()
3309 psg->sg[i].prev = 0; scsi_for_each_sg()
3310 psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32)); scsi_for_each_sg()
3311 psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); scsi_for_each_sg()
3312 psg->sg[i].count = cpu_to_le32(count); scsi_for_each_sg()
3313 psg->sg[i].flags = 0; scsi_for_each_sg()
3319 u32 temp = le32_to_cpu(psg->sg[i-1].count) -
3321 psg->sg[i-1].count = cpu_to_le32(temp);
3343 struct scatterlist *sg; aac_build_sgraw2() local
3347 scsi_for_each_sg(scsicmd, sg, nseg, i) { scsi_for_each_sg()
3348 int count = sg_dma_len(sg); scsi_for_each_sg()
3349 u64 addr = sg_dma_address(sg); scsi_for_each_sg()
3381 /* not conformable: evaluate required sg elements */
/linux-4.1.27/drivers/target/tcm_fc/
H A Dtfc_io.c66 struct scatterlist *sg = NULL; ft_queue_data_in() local
100 sg = se_cmd->t_data_sg; ft_queue_data_in()
101 mem_len = sg->length; ft_queue_data_in()
102 mem_off = sg->offset; ft_queue_data_in()
103 page = sg_page(sg); ft_queue_data_in()
118 sg = sg_next(sg); ft_queue_data_in()
119 mem_len = min((size_t)sg->length, remaining); ft_queue_data_in()
120 mem_off = sg->offset; ft_queue_data_in()
121 page = sg_page(sg); ft_queue_data_in()
222 struct scatterlist *sg = NULL; ft_recv_write_data() local
251 pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " ft_recv_write_data()
297 sg = se_cmd->t_data_sg; ft_recv_write_data()
298 mem_len = sg->length; ft_recv_write_data()
299 mem_off = sg->offset; ft_recv_write_data()
300 page = sg_page(sg); ft_recv_write_data()
305 sg = sg_next(sg); ft_recv_write_data()
306 mem_len = sg->length; ft_recv_write_data()
307 mem_off = sg->offset; ft_recv_write_data()
308 page = sg_page(sg); ft_recv_write_data()
/linux-4.1.27/drivers/crypto/
H A Dbfin_crc.c61 struct dma_desc_array *sg_cpu; /* virt addr of sg dma descriptors */
62 dma_addr_t sg_dma; /* phy addr of sg dma descriptors */
64 dma_addr_t sg_mid_dma; /* phy addr of sg mid buffer */
83 struct scatterlist *sg; /* sg list head for this update*/ member in struct:bfin_crypto_crc_reqctx
84 struct scatterlist bufsl[2]; /* chained sg list */
105 struct scatterlist *sg = sg_list; sg_count() local
111 while (!sg_is_last(sg)) { sg_count()
113 sg = sg_next(sg); sg_count()
125 struct scatterlist *sg = NULL; sg_get() local
128 for_each_sg(sg_list, sg, nents, i) sg_get()
132 return sg; sg_get()
164 dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n", bfin_crypto_crc_init()
187 struct scatterlist *sg; bfin_crypto_crc_config_dma() local
196 dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE); bfin_crypto_crc_config_dma()
198 for_each_sg(ctx->sg, sg, ctx->sg_nents, j) { bfin_crypto_crc_config_dma()
199 dma_addr = sg_dma_address(sg); bfin_crypto_crc_config_dma()
200 /* deduce extra bytes in last sg */ bfin_crypto_crc_config_dma()
201 if (sg_is_last(sg)) bfin_crypto_crc_config_dma()
202 dma_count = sg_dma_len(sg) - ctx->bufnext_len; bfin_crypto_crc_config_dma()
204 dma_count = sg_dma_len(sg); bfin_crypto_crc_config_dma()
208 bytes in current sg buffer. Move addr of current bfin_crypto_crc_config_dma()
209 sg and deduce the length of current sg. bfin_crypto_crc_config_dma()
212 sg_virt(sg), bfin_crypto_crc_config_dma()
234 /* chop current sg dma len to multiple of 32 bits */ bfin_crypto_crc_config_dma()
265 (u8*)sg_virt(sg) + (dma_count << 2), bfin_crypto_crc_config_dma()
304 struct scatterlist *sg; bfin_crypto_crc_handle_queue() local
333 ctx->sg = NULL; bfin_crypto_crc_handle_queue()
375 ctx->sg = ctx->bufsl; bfin_crypto_crc_handle_queue()
377 ctx->sg = req->src; bfin_crypto_crc_handle_queue()
380 nsg = ctx->sg_nents = sg_count(ctx->sg); bfin_crypto_crc_handle_queue()
390 sg = sg_get(ctx->sg, nsg, i); bfin_crypto_crc_handle_queue()
391 j = min(nextlen, sg_dma_len(sg)); bfin_crypto_crc_handle_queue()
393 sg_virt(sg) + sg_dma_len(sg) - j, j); bfin_crypto_crc_handle_queue()
394 if (j == sg_dma_len(sg)) bfin_crypto_crc_handle_queue()
654 * need at most CRC_MAX_DMA_DESC sg + CRC_MAX_DMA_DESC middle + bfin_crypto_crc_probe()
H A Datmel-sha.c92 struct scatterlist *sg; member in struct:atmel_sha_reqctx
93 unsigned int offset; /* offset in current sg */
163 count = min(ctx->sg->length - ctx->offset, ctx->total); atmel_sha_append_sg()
169 * because the sg length is 0. In the latest case, atmel_sha_append_sg()
170 * check if there is another sg in the list, a 0 length atmel_sha_append_sg()
171 * sg doesn't necessarily mean the end of the sg list. atmel_sha_append_sg()
173 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { atmel_sha_append_sg()
174 ctx->sg = sg_next(ctx->sg); atmel_sha_append_sg()
181 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, atmel_sha_append_sg()
188 if (ctx->offset == ctx->sg->length) { atmel_sha_append_sg()
189 ctx->sg = sg_next(ctx->sg); atmel_sha_append_sg()
190 if (ctx->sg) atmel_sha_append_sg()
430 struct scatterlist sg[2]; atmel_sha_xmit_dma() local
441 sg_init_table(sg, 2); atmel_sha_xmit_dma()
442 sg_dma_address(&sg[0]) = dma_addr1; atmel_sha_xmit_dma()
443 sg_dma_len(&sg[0]) = length1; atmel_sha_xmit_dma()
444 sg_dma_address(&sg[1]) = dma_addr2; atmel_sha_xmit_dma()
445 sg_dma_len(&sg[1]) = length2; atmel_sha_xmit_dma()
446 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2, atmel_sha_xmit_dma()
449 sg_init_table(sg, 1); atmel_sha_xmit_dma()
450 sg_dma_address(&sg[0]) = dma_addr1; atmel_sha_xmit_dma()
451 sg_dma_len(&sg[0]) = length1; atmel_sha_xmit_dma()
452 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1, atmel_sha_xmit_dma()
551 struct scatterlist *sg; atmel_sha_update_dma_start() local
563 sg = ctx->sg; atmel_sha_update_dma_start()
565 if (!IS_ALIGNED(sg->offset, sizeof(u32))) atmel_sha_update_dma_start()
568 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size)) atmel_sha_update_dma_start()
572 length = min(ctx->total, sg->length); atmel_sha_update_dma_start()
574 if (sg_is_last(sg)) { atmel_sha_update_dma_start()
576 /* not last sg must be ctx->block_size aligned */ atmel_sha_update_dma_start()
594 sg = ctx->sg; atmel_sha_update_dma_start()
614 ctx->sg = sg; atmel_sha_update_dma_start()
615 if (!dma_map_sg(dd->dev, ctx->sg, 1, atmel_sha_update_dma_start()
625 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), atmel_sha_update_dma_start()
630 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { atmel_sha_update_dma_start()
638 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0, atmel_sha_update_dma_start()
647 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); atmel_sha_update_dma_stop()
648 if (ctx->sg->length == ctx->offset) { atmel_sha_update_dma_stop()
649 ctx->sg = sg_next(ctx->sg); atmel_sha_update_dma_stop()
650 if (ctx->sg) atmel_sha_update_dma_stop()
909 ctx->sg = req->src; atmel_sha_update()
H A Dimg-hash.c93 /* sg root */
96 struct scatterlist *sg; member in struct:img_hash_request_ctx
217 if (ctx->sg) img_hash_dma_callback()
221 static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg) img_hash_xmit_dma() argument
226 ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV); img_hash_xmit_dma()
228 dev_err(hdev->dev, "Invalid DMA sg\n"); img_hash_xmit_dma()
234 sg, img_hash_xmit_dma()
241 dma_unmap_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV); img_hash_xmit_dma()
256 ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg), img_hash_write_via_cpu()
364 if (!ctx->sg) img_hash_dma_task()
367 addr = sg_virt(ctx->sg); img_hash_dma_task()
368 nbytes = ctx->sg->length - ctx->offset; img_hash_dma_task()
402 ctx->sg = sg_next(ctx->sg); img_hash_dma_task()
403 while (ctx->sg && (ctx->bufcnt < 4)) { img_hash_dma_task()
404 len = ctx->sg->length; img_hash_dma_task()
411 if (tbc >= ctx->sg->length) { img_hash_dma_task()
412 ctx->sg = sg_next(ctx->sg); img_hash_dma_task()
424 ctx->sg = sg_next(ctx->sg); img_hash_dma_task()
433 dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE); img_hash_write_via_dma_stop()
637 ctx->sg = req->src; img_hash_digest()
639 ctx->nents = sg_nents(ctx->sg); img_hash_digest()
H A Ds5p-sss.c230 static void s5p_set_dma_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) s5p_set_dma_indata() argument
232 SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg)); s5p_set_dma_indata()
233 SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg)); s5p_set_dma_indata()
236 static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg) s5p_set_dma_outdata() argument
238 SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg)); s5p_set_dma_outdata()
239 SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg)); s5p_set_dma_outdata()
259 static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg) s5p_set_outdata() argument
263 if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) { s5p_set_outdata()
267 if (!sg_dma_len(sg)) { s5p_set_outdata()
272 err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE); s5p_set_outdata()
278 dev->sg_dst = sg; s5p_set_outdata()
285 static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg) s5p_set_indata() argument
289 if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) { s5p_set_indata()
293 if (!sg_dma_len(sg)) { s5p_set_indata()
298 err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE); s5p_set_indata()
304 dev->sg_src = sg; s5p_set_indata()
H A Dsahara.c277 static int sahara_sg_length(struct scatterlist *sg, sahara_sg_length() argument
285 sg_list = sg; sahara_sg_length()
479 struct scatterlist *sg; sahara_hw_descriptor_create() local
516 dev_err(dev->device, "couldn't map in sg\n"); sahara_hw_descriptor_create()
522 dev_err(dev->device, "couldn't map out sg\n"); sahara_hw_descriptor_create()
528 sg = dev->in_sg; sahara_hw_descriptor_create()
530 dev->hw_link[i]->len = sg->length; sahara_hw_descriptor_create()
531 dev->hw_link[i]->p = sg->dma_address; sahara_hw_descriptor_create()
536 sg = sg_next(sg); sahara_hw_descriptor_create()
542 sg = dev->out_sg; sahara_hw_descriptor_create()
544 dev->hw_link[j]->len = sg->length; sahara_hw_descriptor_create()
545 dev->hw_link[j]->p = sg->dma_address; sahara_hw_descriptor_create()
550 sg = sg_next(sg); sahara_hw_descriptor_create()
815 struct scatterlist *sg; sahara_sha_hw_links_create() local
830 sg = dev->in_sg; sahara_sha_hw_links_create()
831 while (sg) { sahara_sha_hw_links_create()
832 ret = dma_map_sg(dev->device, sg, 1, sahara_sha_hw_links_create()
837 dev->hw_link[i]->len = sg->length; sahara_sha_hw_links_create()
838 dev->hw_link[i]->p = sg->dma_address; sahara_sha_hw_links_create()
840 sg = sg_next(sg); sahara_sha_hw_links_create()
845 sg = dev->in_sg; sahara_sha_hw_links_create()
852 dev->hw_link[i]->len = sg->length; sahara_sha_hw_links_create()
853 dev->hw_link[i]->p = sg->dma_address; sahara_sha_hw_links_create()
858 sg = sg_next(sg); sahara_sha_hw_links_create()
938 static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes) sahara_walk_and_recalc() argument
940 if (!sg || !sg->length) sahara_walk_and_recalc()
943 while (nbytes && sg) { sahara_walk_and_recalc()
944 if (nbytes <= sg->length) { sahara_walk_and_recalc()
945 sg->length = nbytes; sahara_walk_and_recalc()
946 sg_mark_end(sg); sahara_walk_and_recalc()
949 nbytes -= sg->length; sahara_walk_and_recalc()
950 sg = sg_next(sg); sahara_walk_and_recalc()
1036 struct scatterlist *sg; sahara_sha_unmap_sg() local
1039 sg = dev->in_sg; sahara_sha_unmap_sg()
1040 while (sg) { sahara_sha_unmap_sg()
1041 dma_unmap_sg(dev->device, sg, 1, DMA_TO_DEVICE); sahara_sha_unmap_sg()
1042 sg = sg_next(sg); sahara_sha_unmap_sg()
H A Domap-sham.c152 struct scatterlist *sg; member in struct:omap_sham_reqctx
154 unsigned int offset; /* offset in current sg */
585 ctx->sgl.page_link = ctx->sg->page_link; omap_sham_xmit_dma()
586 ctx->sgl.offset = ctx->sg->offset; omap_sham_xmit_dma()
588 sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg); omap_sham_xmit_dma()
641 while (ctx->sg) { omap_sham_append_sg()
642 vaddr = kmap_atomic(sg_page(ctx->sg)); omap_sham_append_sg()
643 vaddr += ctx->sg->offset; omap_sham_append_sg()
647 ctx->sg->length - ctx->offset); omap_sham_append_sg()
655 if (ctx->offset == ctx->sg->length) { omap_sham_append_sg()
656 ctx->sg = sg_next(ctx->sg); omap_sham_append_sg()
657 if (ctx->sg) omap_sham_append_sg()
713 #define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32)))
715 #define SG_SA(sg, bs) (IS_ALIGNED(sg->length, bs))
721 struct scatterlist *sg; omap_sham_update_dma_start() local
731 * Don't use the sg interface when the transfer size is less omap_sham_update_dma_start()
742 sg = ctx->sg; omap_sham_update_dma_start()
745 if (!SG_AA(sg)) omap_sham_update_dma_start()
748 if (!sg_is_last(sg) && !SG_SA(sg, bs)) omap_sham_update_dma_start()
752 length = min(ctx->total, sg->length); omap_sham_update_dma_start()
754 if (sg_is_last(sg)) { omap_sham_update_dma_start()
756 /* not last sg must be BLOCK_SIZE aligned */ omap_sham_update_dma_start()
765 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { omap_sham_update_dma_start()
777 ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1); omap_sham_update_dma_start()
779 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); omap_sham_update_dma_start()
815 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); omap_sham_update_dma_stop()
816 if (ctx->sg->length == ctx->offset) { omap_sham_update_dma_stop()
817 ctx->sg = sg_next(ctx->sg); omap_sham_update_dma_stop()
818 if (ctx->sg) omap_sham_update_dma_stop()
1088 ctx->sg = req->src; omap_sham_update()
/linux-4.1.27/drivers/s390/scsi/
H A Dzfcp_qdio.h172 * zfcp_qdio_sg_one_sbal - check if one sbale is enough for sg data
173 * @sg: The scatterlist where to check the data size
179 int zfcp_qdio_sg_one_sbale(struct scatterlist *sg) zfcp_qdio_sg_one_sbale() argument
181 return sg_is_last(sg) && sg->length <= ZFCP_QDIO_SBALE_LEN; zfcp_qdio_sg_one_sbale()
229 * @sg: pointer to struct scatterlist
232 unsigned int zfcp_qdio_sbale_count(struct scatterlist *sg) zfcp_qdio_sbale_count() argument
236 for (; sg; sg = sg_next(sg)) zfcp_qdio_sbale_count()
244 * @sg: pointer to struct scatterlist
247 unsigned int zfcp_qdio_real_bytes(struct scatterlist *sg) zfcp_qdio_real_bytes() argument
251 for (; sg; sg = sg_next(sg)) zfcp_qdio_real_bytes()
252 real_bytes += sg->length; zfcp_qdio_real_bytes()
H A Dzfcp_aux.c543 * @sg: pointer to scatterlist
547 void zfcp_sg_free_table(struct scatterlist *sg, int count) zfcp_sg_free_table() argument
551 for (i = 0; i < count; i++, sg++) zfcp_sg_free_table()
552 if (sg) zfcp_sg_free_table()
553 free_page((unsigned long) sg_virt(sg)); zfcp_sg_free_table()
560 * @sg: pointer to struct scatterlist
566 int zfcp_sg_setup_table(struct scatterlist *sg, int count) zfcp_sg_setup_table() argument
571 sg_init_table(sg, count); zfcp_sg_setup_table()
572 for (i = 0; i < count; i++, sg++) { zfcp_sg_setup_table()
575 zfcp_sg_free_table(sg, i); zfcp_sg_setup_table()
578 sg_set_buf(sg, addr, PAGE_SIZE); zfcp_sg_setup_table()
/linux-4.1.27/arch/arm/mach-rpc/
H A Ddma.c56 static void iomd_get_next_sg(struct scatterlist *sg, struct iomd_dma *idma) iomd_get_next_sg() argument
60 if (idma->dma.sg) { iomd_get_next_sg()
61 sg->dma_address = idma->dma_addr; iomd_get_next_sg()
62 offset = sg->dma_address & ~PAGE_MASK; iomd_get_next_sg()
72 sg->length = end - TRANSFER_SIZE; iomd_get_next_sg()
79 idma->dma.sg = sg_next(idma->dma.sg); iomd_get_next_sg()
80 idma->dma_addr = idma->dma.sg->dma_address; iomd_get_next_sg()
81 idma->dma_len = idma->dma.sg->length; iomd_get_next_sg()
84 idma->dma.sg = NULL; iomd_get_next_sg()
90 sg->dma_address = 0; iomd_get_next_sg()
91 sg->length = 0; iomd_get_next_sg()
94 sg->length |= flags; iomd_get_next_sg()
167 if (!idma->dma.sg) { iomd_enable_dma()
168 idma->dma.sg = &idma->dma.buf; iomd_enable_dma()
268 if (fdma->dma.sg) floppy_enable_dma()
/linux-4.1.27/net/sunrpc/auth_gss/
H A Dgss_krb5_crypto.c61 struct scatterlist sg[1]; krb5_encrypt() local
78 sg_init_one(sg, out, length); krb5_encrypt()
80 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); krb5_encrypt()
95 struct scatterlist sg[1]; krb5_decrypt() local
111 sg_init_one(sg, out, length); krb5_decrypt()
113 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); krb5_decrypt()
120 checksummer(struct scatterlist *sg, void *data) checksummer() argument
124 return crypto_hash_update(desc, sg, sg->length); checksummer()
156 struct scatterlist sg[1]; make_checksum_hmac_md5() local
194 sg_init_one(sg, rc4salt, 4); make_checksum_hmac_md5()
195 err = crypto_hash_update(&desc, sg, 4); make_checksum_hmac_md5()
199 sg_init_one(sg, header, hdrlen); make_checksum_hmac_md5()
200 err = crypto_hash_update(&desc, sg, hdrlen); make_checksum_hmac_md5()
221 sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5)); make_checksum_hmac_md5()
222 err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5), make_checksum_hmac_md5()
246 struct scatterlist sg[1]; make_checksum() local
279 sg_init_one(sg, header, hdrlen); make_checksum()
280 err = crypto_hash_update(&desc, sg, hdrlen); make_checksum()
327 struct scatterlist sg[1]; make_checksum_v2() local
362 sg_init_one(sg, header, hdrlen); make_checksum_v2()
363 err = crypto_hash_update(&desc, sg, hdrlen); make_checksum_v2()
401 encryptor(struct scatterlist *sg, void *data) encryptor() argument
406 int thislen = desc->fraglen + sg->length; encryptor()
420 in_page = sg_page(sg); encryptor()
422 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length, encryptor()
423 sg->offset); encryptor()
424 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length, encryptor()
425 sg->offset); encryptor()
427 desc->fraglen += sg->length; encryptor()
428 desc->pos += sg->length; encryptor()
448 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, encryptor()
449 sg->offset + sg->length - fraglen); encryptor()
496 decryptor(struct scatterlist *sg, void *data) decryptor() argument
499 int thislen = desc->fraglen + sg->length; decryptor()
505 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length, decryptor()
506 sg->offset); decryptor()
508 desc->fraglen += sg->length; decryptor()
526 sg_set_page(&desc->frags[0], sg_page(sg), fraglen, decryptor()
527 sg->offset + sg->length - fraglen); decryptor()
601 struct scatterlist sg[1]; gss_krb5_cts_crypt() local
626 sg_init_one(sg, data, len); gss_krb5_cts_crypt()
629 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); gss_krb5_cts_crypt()
631 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len); gss_krb5_cts_crypt()
858 struct scatterlist sg[1]; krb5_rc4_setup_seq_key() local
884 sg_init_table(sg, 1); krb5_rc4_setup_seq_key()
885 sg_set_buf(sg, &zeroconstant, 4); krb5_rc4_setup_seq_key()
887 err = crypto_hash_digest(&desc, sg, 4, Kseq); krb5_rc4_setup_seq_key()
896 sg_set_buf(sg, cksum, 8); krb5_rc4_setup_seq_key()
898 err = crypto_hash_digest(&desc, sg, 8, Kseq); krb5_rc4_setup_seq_key()
924 struct scatterlist sg[1]; krb5_rc4_setup_enc_key() local
954 sg_init_table(sg, 1); krb5_rc4_setup_enc_key()
955 sg_set_buf(sg, zeroconstant, 4); krb5_rc4_setup_enc_key()
957 err = crypto_hash_digest(&desc, sg, 4, Kcrypt); krb5_rc4_setup_enc_key()
971 sg_set_buf(sg, seqnumarray, 4); krb5_rc4_setup_enc_key()
973 err = crypto_hash_digest(&desc, sg, 4, Kcrypt); krb5_rc4_setup_enc_key()
/linux-4.1.27/arch/alpha/kernel/
H A Dpci-noop.c145 struct scatterlist *sg; alpha_noop_map_sg() local
147 for_each_sg(sgl, sg, nents, i) { for_each_sg()
150 BUG_ON(!sg_page(sg)); for_each_sg()
151 va = sg_virt(sg); for_each_sg()
152 sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va); for_each_sg()
153 sg_dma_len(sg) = sg->length; for_each_sg()
H A Dpci_iommu.c290 assume it doesn't support sg mapping, and, since we tried to pci_map_single_1()
293 printk_once(KERN_WARNING "pci_map_single: no HW sg\n"); pci_map_single_1()
320 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n", pci_map_single_1()
426 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n", alpha_pci_unmap_page()
452 with vmalloc and sg if we can't find contiguous memory. */ alpha_pci_alloc_coherent()
505 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end, sg_classify()
513 leader = sg; sg_classify()
518 /* we will not marge sg without device. */ sg_classify()
520 for (++sg; sg < end; ++sg) { sg_classify()
522 addr = SG_ENT_PHYS_ADDRESS(sg); sg_classify()
523 len = sg->length; sg_classify()
529 sg->dma_address = -1; sg_classify()
532 sg->dma_address = -2; sg_classify()
539 leader = sg; sg_classify()
561 struct scatterlist *sg; sg_fill()
612 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n", sg_fill()
618 sg = leader; sg_fill()
621 struct scatterlist *last_sg = sg; sg_fill()
624 size = sg->length; sg_fill()
625 paddr = SG_ENT_PHYS_ADDRESS(sg); sg_fill()
627 while (sg+1 < end && (int) sg[1].dma_address == -1) { sg_fill()
628 size += sg[1].length; sg_fill()
629 sg++; sg_fill()
642 while (++last_sg <= sg) { sg_fill()
648 } while (++sg < end && (int) sg->dma_address < 0); sg_fill()
653 static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, alpha_pci_map_sg()
670 sg->dma_length = sg->length; alpha_pci_map_sg()
671 sg->dma_address alpha_pci_map_sg()
672 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg), alpha_pci_map_sg()
673 sg->length, dac_allowed); alpha_pci_map_sg()
674 return sg->dma_address != 0; alpha_pci_map_sg()
677 start = sg; alpha_pci_map_sg()
678 end = sg + nents; alpha_pci_map_sg()
681 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0); alpha_pci_map_sg()
698 for (out = sg; sg < end; ++sg) { alpha_pci_map_sg()
699 if ((int) sg->dma_address < 0) alpha_pci_map_sg()
701 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0) alpha_pci_map_sg()
731 static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg, alpha_pci_unmap_sg()
758 for (end = sg + nents; sg < end; ++sg) { alpha_pci_unmap_sg()
764 addr = sg->dma_address; alpha_pci_unmap_sg()
765 size = sg->dma_length; alpha_pci_unmap_sg()
772 sg - end + nents, addr, size); alpha_pci_unmap_sg()
780 sg - end + nents, addr, size); alpha_pci_unmap_sg()
784 DBGA(" (%ld) sg [%llx,%zx]\n", alpha_pci_unmap_sg()
785 sg - end + nents, addr, size); alpha_pci_unmap_sg()
804 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
504 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end, int virt_ok) sg_classify() argument
560 struct scatterlist *sg; sg_fill() local
652 alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) alpha_pci_map_sg() argument
730 alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) alpha_pci_unmap_sg() argument
/linux-4.1.27/drivers/gpu/drm/udl/
H A Dudl_dmabuf.c101 obj->sg = drm_prime_pages_to_sg(obj->pages, page_count); udl_map_dma_buf()
102 if (IS_ERR(obj->sg)) { udl_map_dma_buf()
104 return ERR_CAST(obj->sg); udl_map_dma_buf()
109 ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL); udl_map_dma_buf()
117 rd = obj->sg->sgl; udl_map_dma_buf()
217 struct sg_table *sg, udl_prime_create()
230 obj->sg = sg; udl_prime_create()
237 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); udl_prime_create()
247 struct sg_table *sg; udl_gem_prime_import() local
261 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); udl_gem_prime_import()
262 if (IS_ERR(sg)) { udl_gem_prime_import()
263 ret = PTR_ERR(sg); udl_gem_prime_import()
267 ret = udl_prime_create(dev, dma_buf->size, sg, &uobj); udl_gem_prime_import()
277 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); udl_gem_prime_import()
215 udl_prime_create(struct drm_device *dev, size_t size, struct sg_table *sg, struct udl_gem_object **obj_p) udl_prime_create() argument
/linux-4.1.27/arch/powerpc/kernel/
H A Ddma.c132 struct scatterlist *sg; dma_direct_map_sg() local
135 for_each_sg(sgl, sg, nents, i) { for_each_sg()
136 sg->dma_address = sg_phys(sg) + get_dma_offset(dev); for_each_sg()
137 sg->dma_length = sg->length; for_each_sg()
138 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); for_each_sg()
144 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, dma_direct_unmap_sg() argument
199 struct scatterlist *sg; dma_direct_sync_sg() local
202 for_each_sg(sgl, sg, nents, i) dma_direct_sync_sg()
203 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); dma_direct_sync_sg()
/linux-4.1.27/block/
H A Dblk-merge.c161 struct scatterlist **sg, int *nsegs, int *cluster) __blk_segment_map_sg()
166 if (*sg && *cluster) { __blk_segment_map_sg()
167 if ((*sg)->length + nbytes > queue_max_segment_size(q)) __blk_segment_map_sg()
175 (*sg)->length += nbytes; __blk_segment_map_sg()
178 if (!*sg) __blk_segment_map_sg()
179 *sg = sglist; __blk_segment_map_sg()
184 * prematurely unless it fully inits the sg __blk_segment_map_sg()
191 sg_unmark_end(*sg); __blk_segment_map_sg()
192 *sg = sg_next(*sg); __blk_segment_map_sg()
195 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); __blk_segment_map_sg()
203 struct scatterlist **sg) __blk_bios_map_sg()
229 *sg = sglist; __blk_bios_map_sg()
231 sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); __blk_bios_map_sg()
237 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, __blk_bios_map_sg()
244 * map a request to scatterlist, return number of sg entries setup. Caller
245 * must make sure sg can hold rq->nr_phys_segments entries
250 struct scatterlist *sg = NULL; blk_rq_map_sg() local
254 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); blk_rq_map_sg()
261 sg->length += pad_len; blk_rq_map_sg()
269 sg->page_link &= ~0x02; blk_rq_map_sg()
270 sg = sg_next(sg); blk_rq_map_sg()
271 sg_set_page(sg, virt_to_page(q->dma_drain_buffer), blk_rq_map_sg()
279 if (sg) blk_rq_map_sg()
280 sg_mark_end(sg); blk_rq_map_sg()
159 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, struct scatterlist *sglist, struct bio_vec *bvprv, struct scatterlist **sg, int *nsegs, int *cluster) __blk_segment_map_sg() argument
201 __blk_bios_map_sg(struct request_queue *q, struct bio *bio, struct scatterlist *sglist, struct scatterlist **sg) __blk_bios_map_sg() argument
H A Dblk-integrity.c93 struct scatterlist *sg = NULL; blk_rq_map_integrity_sg() local
107 if (sg->length + iv.bv_len > queue_max_segment_size(q)) bio_for_each_integrity_vec()
110 sg->length += iv.bv_len; bio_for_each_integrity_vec()
113 if (!sg) bio_for_each_integrity_vec()
114 sg = sglist; bio_for_each_integrity_vec()
116 sg_unmark_end(sg); bio_for_each_integrity_vec()
117 sg = sg_next(sg); bio_for_each_integrity_vec()
120 sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset); bio_for_each_integrity_vec()
128 if (sg)
129 sg_mark_end(sg);
/linux-4.1.27/net/rds/
H A Dmessage.c55 * This relies on dma_map_sg() not touching sg[].page during merging.
218 * RDS ops use this to grab SG entries from the rm's sg pool.
271 struct scatterlist *sg; rds_message_copy_from_user() local
279 sg = rm->data.op_sg; rds_message_copy_from_user()
280 sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */ rds_message_copy_from_user()
283 if (!sg_page(sg)) { rds_message_copy_from_user()
284 ret = rds_page_remainder_alloc(sg, iov_iter_count(from), rds_message_copy_from_user()
293 sg->length - sg_off); rds_message_copy_from_user()
296 nbytes = copy_page_from_iter(sg_page(sg), sg->offset + sg_off, rds_message_copy_from_user()
303 if (sg_off == sg->length) rds_message_copy_from_user()
304 sg++; rds_message_copy_from_user()
313 struct scatterlist *sg; rds_message_inc_copy_to_user() local
323 sg = rm->data.op_sg; rds_message_inc_copy_to_user()
329 sg->length - vec_off); rds_message_inc_copy_to_user()
333 ret = copy_page_to_iter(sg_page(sg), sg->offset + vec_off, rds_message_inc_copy_to_user()
341 if (vec_off == sg->length) { rds_message_inc_copy_to_user()
343 sg++; rds_message_inc_copy_to_user()
H A Dtcp_send.c81 unsigned int hdr_off, unsigned int sg, unsigned int off) rds_tcp_xmit()
119 while (sg < rm->data.op_nents) { rds_tcp_xmit()
121 sg_page(&rm->data.op_sg[sg]), rds_tcp_xmit()
122 rm->data.op_sg[sg].offset + off, rds_tcp_xmit()
123 rm->data.op_sg[sg].length - off, rds_tcp_xmit()
125 rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]), rds_tcp_xmit()
126 rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off, rds_tcp_xmit()
133 if (off == rm->data.op_sg[sg].length) { rds_tcp_xmit()
135 sg++; rds_tcp_xmit()
80 rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, unsigned int hdr_off, unsigned int sg, unsigned int off) rds_tcp_xmit() argument
H A Diw_rdma.c83 struct scatterlist *sg, unsigned int nents);
251 static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg, rds_iw_set_scatterlist() argument
254 sg->list = list; rds_iw_set_scatterlist()
255 sg->len = sg_len; rds_iw_set_scatterlist()
256 sg->dma_len = 0; rds_iw_set_scatterlist()
257 sg->dma_npages = 0; rds_iw_set_scatterlist()
258 sg->bytes = 0; rds_iw_set_scatterlist()
262 struct rds_iw_scatterlist *sg) rds_iw_map_scatterlist()
268 WARN_ON(sg->dma_len); rds_iw_map_scatterlist()
270 sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL); rds_iw_map_scatterlist()
271 if (unlikely(!sg->dma_len)) { rds_iw_map_scatterlist()
276 sg->bytes = 0; rds_iw_map_scatterlist()
277 sg->dma_npages = 0; rds_iw_map_scatterlist()
280 for (i = 0; i < sg->dma_len; ++i) { rds_iw_map_scatterlist()
281 unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]); rds_iw_map_scatterlist()
282 u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]); rds_iw_map_scatterlist()
285 sg->bytes += dma_len; rds_iw_map_scatterlist()
294 if (i < sg->dma_len - 1) rds_iw_map_scatterlist()
299 sg->dma_npages += (end_addr - dma_addr) >> PAGE_SHIFT; rds_iw_map_scatterlist()
303 if (sg->dma_npages > fastreg_message_size) rds_iw_map_scatterlist()
306 dma_pages = kmalloc(sizeof(u64) * sg->dma_npages, GFP_ATOMIC); rds_iw_map_scatterlist()
312 for (i = j = 0; i < sg->dma_len; ++i) { rds_iw_map_scatterlist()
313 unsigned int dma_len = ib_sg_dma_len(dev, &sg->list[i]); rds_iw_map_scatterlist()
314 u64 dma_addr = ib_sg_dma_address(dev, &sg->list[i]); rds_iw_map_scatterlist()
321 BUG_ON(j > sg->dma_npages); rds_iw_map_scatterlist()
327 ib_dma_unmap_sg(rds_iwdev->dev, sg->list, sg->len, DMA_BIDIRECTIONAL); rds_iw_map_scatterlist()
328 sg->dma_len = 0; rds_iw_map_scatterlist()
591 void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents, rds_iw_get_mr() argument
625 ret = rds_iw_map_fastreg(rds_iwdev->mr_pool, ibmr, sg, nents); rds_iw_get_mr()
703 * in the sg list is added to the fast reg page list and placed rds_iw_rdma_build_fastreg()
760 struct scatterlist *sg, rds_iw_map_fastreg()
768 rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len); rds_iw_map_fastreg()
261 rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev, struct rds_iw_scatterlist *sg) rds_iw_map_scatterlist() argument
758 rds_iw_map_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr, struct scatterlist *sg, unsigned int sg_len) rds_iw_map_fastreg() argument
H A Dib.h238 struct scatterlist *sg, unsigned int sg_dma_len, int direction) rds_ib_dma_sync_sg_for_cpu()
244 ib_sg_dma_address(dev, &sg[i]), rds_ib_dma_sync_sg_for_cpu()
245 ib_sg_dma_len(dev, &sg[i]), rds_ib_dma_sync_sg_for_cpu()
252 struct scatterlist *sg, unsigned int sg_dma_len, int direction) rds_ib_dma_sync_sg_for_device()
258 ib_sg_dma_address(dev, &sg[i]), rds_ib_dma_sync_sg_for_device()
259 ib_sg_dma_len(dev, &sg[i]), rds_ib_dma_sync_sg_for_device()
305 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
345 unsigned int hdr_off, unsigned int sg, unsigned int off);
237 rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev, struct scatterlist *sg, unsigned int sg_dma_len, int direction) rds_ib_dma_sync_sg_for_cpu() argument
251 rds_ib_dma_sync_sg_for_device(struct ib_device *dev, struct scatterlist *sg, unsigned int sg_dma_len, int direction) rds_ib_dma_sync_sg_for_device() argument
H A Diw.h238 struct scatterlist *sg, unsigned int sg_dma_len, int direction) rds_iw_dma_sync_sg_for_cpu()
244 ib_sg_dma_address(dev, &sg[i]), rds_iw_dma_sync_sg_for_cpu()
245 ib_sg_dma_len(dev, &sg[i]), rds_iw_dma_sync_sg_for_cpu()
252 struct scatterlist *sg, unsigned int sg_dma_len, int direction) rds_iw_dma_sync_sg_for_device()
258 ib_sg_dma_address(dev, &sg[i]), rds_iw_dma_sync_sg_for_device()
259 ib_sg_dma_len(dev, &sg[i]), rds_iw_dma_sync_sg_for_device()
315 void *rds_iw_get_mr(struct scatterlist *sg, unsigned long nents,
353 unsigned int hdr_off, unsigned int sg, unsigned int off);
237 rds_iw_dma_sync_sg_for_cpu(struct ib_device *dev, struct scatterlist *sg, unsigned int sg_dma_len, int direction) rds_iw_dma_sync_sg_for_cpu() argument
251 rds_iw_dma_sync_sg_for_device(struct ib_device *dev, struct scatterlist *sg, unsigned int sg_dma_len, int direction) rds_iw_dma_sync_sg_for_device() argument
H A Dib_rdma.c58 struct scatterlist *sg; member in struct:rds_ib_mr
368 struct scatterlist *sg, unsigned int nents) rds_ib_map_fmr()
371 struct scatterlist *scat = sg; rds_ib_map_fmr()
379 sg_dma_len = ib_dma_map_sg(dev, sg, nents, rds_ib_map_fmr()
437 ibmr->sg = scat; rds_ib_map_fmr()
458 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg, rds_ib_sync_mr()
462 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg, rds_ib_sync_mr()
474 ibmr->sg, ibmr->sg_len, __rds_ib_teardown_mr()
484 struct page *page = sg_page(&ibmr->sg[i]); __rds_ib_teardown_mr()
492 kfree(ibmr->sg); __rds_ib_teardown_mr()
494 ibmr->sg = NULL; __rds_ib_teardown_mr()
743 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, rds_ib_get_mr() argument
767 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents); rds_ib_get_mr()
367 rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr, struct scatterlist *sg, unsigned int nents) rds_ib_map_fmr() argument
H A Drdma.c179 struct scatterlist *sg; __rds_rdma_map() local
233 * pointers to the mr's sg array. We check to see if we've mapped __rds_rdma_map()
235 * to the sg array so that we can have one page ref cleanup path. __rds_rdma_map()
246 sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL); __rds_rdma_map()
247 if (!sg) { __rds_rdma_map()
252 sg_init_table(sg, nents); __rds_rdma_map()
256 sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0); __rds_rdma_map()
264 trans_private = rs->rs_transport->get_mr(sg, nents, rs, __rds_rdma_map()
269 put_page(sg_page(&sg[i])); __rds_rdma_map()
270 kfree(sg); __rds_rdma_map()
669 struct scatterlist *sg; rds_cmsg_rdma_args() local
671 sg = &op->op_sg[op->op_nents + j]; rds_cmsg_rdma_args()
672 sg_set_page(sg, pages[j], rds_cmsg_rdma_args()
676 rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n", rds_cmsg_rdma_args()
677 sg->offset, sg->length, iov->addr, iov->bytes); rds_cmsg_rdma_args()
679 iov->addr += sg->length; rds_cmsg_rdma_args()
680 iov->bytes -= sg->length; rds_cmsg_rdma_args()
/linux-4.1.27/drivers/dma/ipu/
H A Dipu_idmac.c777 struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx) ipu_submit_buffer()
791 ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg)); ipu_submit_buffer()
794 dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", ipu_submit_buffer()
795 sg, chan_id, buf_idx); ipu_submit_buffer()
804 struct scatterlist *sg; ipu_submit_channel_buffers() local
807 for (i = 0, sg = desc->sg; i < 2 && sg; i++) { ipu_submit_channel_buffers()
808 if (!ichan->sg[i]) { ipu_submit_channel_buffers()
809 ichan->sg[i] = sg; ipu_submit_channel_buffers()
811 ret = ipu_submit_buffer(ichan, desc, sg, i); ipu_submit_channel_buffers()
815 sg = sg_next(sg); ipu_submit_channel_buffers()
847 * Initial buffer assignment - the first two sg-entries from idmac_tx_submit()
850 dma_addr_t dma_1 = sg_is_last(desc->sg) ? 0 : idmac_tx_submit()
851 sg_dma_address(&desc->sg[1]); idmac_tx_submit()
853 WARN_ON(ichan->sg[0] || ichan->sg[1]); idmac_tx_submit()
861 sg_dma_address(&desc->sg[0]), idmac_tx_submit()
867 dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]); idmac_tx_submit()
875 /* submit_buffers() atomically verifies and fills empty sg slots */ idmac_tx_submit()
1126 struct idmac_tx_desc **desc, struct scatterlist *sg) idmac_sg_next()
1128 struct scatterlist *sgnew = sg ? sg_next(sg) : NULL; idmac_sg_next()
1131 /* next sg-element in this list */ idmac_sg_next()
1140 return (*desc)->sg; idmac_sg_next()
1147 * not last sg next not last sg
1148 * not last sg next last sg
1149 * last sg first sg from next descriptor
1150 * last sg NULL
1160 struct scatterlist **sg, *sgnext, *sgnew = NULL; idmac_interrupt() local
1185 * ichan->sg[0] = ichan->sg[1] = NULL; idmac_interrupt()
1188 * sg elements. idmac_interrupt()
1210 ichan->sg[ichan->active_buffer] = NULL; idmac_interrupt()
1224 sg = &ichan->sg[ichan->active_buffer]; idmac_interrupt()
1225 sgnext = ichan->sg[!ichan->active_buffer]; idmac_interrupt()
1227 if (!*sg) { idmac_interrupt()
1236 irq, (u64)sg_dma_address(*sg), idmac_interrupt()
1241 sgnew = idmac_sg_next(ichan, &descnew, *sg); idmac_interrupt()
1246 * if sgnext == NULL sg must be the last element in a scatterlist and idmac_interrupt()
1250 if (!WARN_ON(sg_next(*sg))) idmac_interrupt()
1252 ichan->sg[!ichan->active_buffer] = sgnew; idmac_interrupt()
1265 /* Calculate and submit the next sg element */ idmac_interrupt()
1268 if (unlikely(!sg_next(*sg)) || !sgnext) { idmac_interrupt()
1277 *sg = sgnew; idmac_interrupt()
1315 struct scatterlist *sg; ipu_gc_tasklet() local
1323 for_each_sg(desc->sg, sg, desc->sg_len, k) { ipu_gc_tasklet()
1324 if (ichan->sg[0] == sg) ipu_gc_tasklet()
1325 ichan->sg[0] = NULL; ipu_gc_tasklet()
1326 else if (ichan->sg[1] == sg) ipu_gc_tasklet()
1327 ichan->sg[1] = NULL; ipu_gc_tasklet()
1367 desc->sg = sgl; idmac_prep_slave_sg()
1418 ichan->sg[0] = NULL; idmac_pause()
1419 ichan->sg[1] = NULL; idmac_pause()
1457 ichan->sg[0] = NULL; __idmac_terminate_all()
1458 ichan->sg[1] = NULL; __idmac_terminate_all()
776 ipu_submit_buffer(struct idmac_channel *ichan, struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx) ipu_submit_buffer() argument
1125 idmac_sg_next(struct idmac_channel *ichan, struct idmac_tx_desc **desc, struct scatterlist *sg) idmac_sg_next() argument
/linux-4.1.27/drivers/media/v4l2-core/
H A DMakefile26 obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o
35 obj-$(CONFIG_VIDEOBUF2_DMA_SG) += videobuf2-dma-sg.o
/linux-4.1.27/drivers/infiniband/hw/ipath/
H A Dipath_dma.c104 struct scatterlist *sg; ipath_map_sg() local
111 for_each_sg(sgl, sg, nents, i) { for_each_sg()
112 addr = (u64) page_address(sg_page(sg)); for_each_sg()
118 sg->dma_address = addr + sg->offset; for_each_sg()
120 sg->dma_length = sg->length; for_each_sg()
127 struct scatterlist *sg, int nents, ipath_unmap_sg()
126 ipath_unmap_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) ipath_unmap_sg() argument
/linux-4.1.27/drivers/infiniband/hw/qib/
H A Dqib_dma.c97 struct scatterlist *sg; qib_map_sg() local
104 for_each_sg(sgl, sg, nents, i) { for_each_sg()
105 addr = (u64) page_address(sg_page(sg)); for_each_sg()
111 sg->dma_address = addr + sg->offset; for_each_sg()
113 sg->dma_length = sg->length; for_each_sg()
120 struct scatterlist *sg, int nents, qib_unmap_sg()
119 qib_unmap_sg(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) qib_unmap_sg() argument
/linux-4.1.27/arch/sparc/kernel/
H A Diommu_common.h42 struct scatterlist *sg) is_span_boundary()
45 int nr = iommu_num_pages(paddr, outs->dma_length + sg->length, is_span_boundary()
38 is_span_boundary(unsigned long entry, unsigned long shift, unsigned long boundary_size, struct scatterlist *outs, struct scatterlist *sg) is_span_boundary() argument
H A Dioport.c379 static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, sbus_map_sg() argument
382 mmu_get_scsi_sgl(dev, sg, n); sbus_map_sg()
386 static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, sbus_unmap_sg() argument
389 mmu_release_scsi_sgl(dev, sg, n); sbus_unmap_sg()
392 static void sbus_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, sbus_sync_sg_for_cpu() argument
398 static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, sbus_sync_sg_for_device() argument
553 struct scatterlist *sg; pci32_map_sg() local
557 for_each_sg(sgl, sg, nents, n) { for_each_sg()
558 sg->dma_address = sg_phys(sg); for_each_sg()
559 sg->dma_length = sg->length; for_each_sg()
572 struct scatterlist *sg; pci32_unmap_sg() local
576 for_each_sg(sgl, sg, nents, n) { for_each_sg()
577 dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length)); for_each_sg()
617 struct scatterlist *sg; pci32_sync_sg_for_cpu() local
621 for_each_sg(sgl, sg, nents, n) { for_each_sg()
622 dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length)); for_each_sg()
630 struct scatterlist *sg; pci32_sync_sg_for_device() local
634 for_each_sg(sgl, sg, nents, n) { for_each_sg()
635 dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length)); for_each_sg()
H A Diommu.c590 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg) fetch_sg_ctx() argument
599 bus_addr = sg->dma_address & IO_PAGE_MASK; fetch_sg_ctx()
613 struct scatterlist *sg; dma_4u_unmap_sg() local
626 sg = sglist; dma_4u_unmap_sg()
628 dma_addr_t dma_handle = sg->dma_address; dma_4u_unmap_sg()
629 unsigned int len = sg->dma_length; dma_4u_unmap_sg()
652 sg = sg_next(sg); dma_4u_unmap_sg()
705 struct scatterlist *sg, *sgprv; dma_4u_sync_sg_for_cpu() local
731 for_each_sg(sglist, sg, nelems, i) { for_each_sg()
732 if (sg->dma_length == 0) for_each_sg()
734 sgprv = sg; for_each_sg()
/linux-4.1.27/arch/mn10300/include/asm/
H A Ddma-mapping.h54 struct scatterlist *sg; dma_map_sg() local
60 for_each_sg(sglist, sg, nents, i) { for_each_sg()
61 BUG_ON(!sg_page(sg)); for_each_sg()
63 sg->dma_address = sg_phys(sg); for_each_sg()
71 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, dma_unmap_sg() argument
123 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, dma_sync_sg_for_cpu() argument
129 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, dma_sync_sg_for_device() argument
/linux-4.1.27/arch/x86/kernel/
H A Dpci-nommu.c56 static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, nommu_map_sg() argument
63 WARN_ON(nents == 0 || sg[0].length == 0); nommu_map_sg()
65 for_each_sg(sg, s, nents, i) { for_each_sg()
85 struct scatterlist *sg, int nelems, nommu_sync_sg_for_device()
84 nommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) nommu_sync_sg_for_device() argument
H A Damd_gart_64.c288 static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, gart_unmap_sg() argument
294 for_each_sg(sg, s, nents, i) { for_each_sg()
302 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, dma_map_sg_nonforce() argument
312 for_each_sg(sg, s, nents, i) { for_each_sg()
319 gart_unmap_sg(dev, sg, i, dir, NULL); for_each_sg()
321 sg[0].dma_length = 0; for_each_sg()
389 static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, gart_map_sg() argument
406 start_sg = sg; gart_map_sg()
407 sgmap = sg; gart_map_sg()
412 for_each_sg(sg, s, nents, i) { for_each_sg()
460 gart_unmap_sg(dev, sg, out, dir, NULL);
464 out = dma_map_sg_nonforce(dev, sg, nents, dir);
472 for_each_sg(sg, s, nents, i)
/linux-4.1.27/include/linux/platform_data/
H A Dmmc-davinci.h26 /* Number of sg segments */
H A Ddma-ste-dma40.h184 struct scatterlist sg; stedma40_slave_mem() local
185 sg_init_table(&sg, 1); stedma40_slave_mem()
186 sg.dma_address = addr; stedma40_slave_mem()
187 sg.length = size; stedma40_slave_mem()
189 return dmaengine_prep_slave_sg(chan, &sg, 1, direction, flags); stedma40_slave_mem()
/linux-4.1.27/arch/arm64/mm/
H A Ddma-mapping.c233 struct scatterlist *sg; __swiotlb_map_sg_attrs() local
238 for_each_sg(sgl, sg, ret, i) __swiotlb_map_sg_attrs()
239 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), __swiotlb_map_sg_attrs()
240 sg->length, dir); __swiotlb_map_sg_attrs()
250 struct scatterlist *sg; __swiotlb_unmap_sg_attrs() local
254 for_each_sg(sgl, sg, nelems, i) __swiotlb_unmap_sg_attrs()
255 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), __swiotlb_unmap_sg_attrs()
256 sg->length, dir); __swiotlb_unmap_sg_attrs()
282 struct scatterlist *sg; __swiotlb_sync_sg_for_cpu() local
286 for_each_sg(sgl, sg, nelems, i) __swiotlb_sync_sg_for_cpu()
287 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), __swiotlb_sync_sg_for_cpu()
288 sg->length, dir); __swiotlb_sync_sg_for_cpu()
296 struct scatterlist *sg; __swiotlb_sync_sg_for_device() local
301 for_each_sg(sgl, sg, nelems, i) __swiotlb_sync_sg_for_device()
302 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), __swiotlb_sync_sg_for_device()
303 sg->length, dir); __swiotlb_sync_sg_for_device()
/linux-4.1.27/drivers/mmc/core/
H A Dsd_ops.c254 struct scatterlist sg; mmc_app_send_scr() local
284 data.sg = &sg; mmc_app_send_scr()
287 sg_init_one(&sg, data_buf, 8); mmc_app_send_scr()
313 struct scatterlist sg; mmc_sd_switch() local
335 data.sg = &sg; mmc_sd_switch()
338 sg_init_one(&sg, resp, 64); mmc_sd_switch()
358 struct scatterlist sg; mmc_app_sd_status() local
380 data.sg = &sg; mmc_app_sd_status()
383 sg_init_one(&sg, ssr, 64); mmc_app_sd_status()
H A Dsdio_ops.c127 struct scatterlist sg, *sg_ptr; mmc_io_rw_extended() local
165 data.sg = sgtable.sgl; mmc_io_rw_extended()
168 for_each_sg(data.sg, sg_ptr, data.sg_len, i) { for_each_sg()
175 data.sg = &sg;
178 sg_init_one(&sg, buf, left_size);
H A Dmmc_ops.c296 struct scatterlist sg; mmc_send_cxd_data() local
314 data.sg = &sg; mmc_send_cxd_data()
317 sg_init_one(&sg, buf, len); mmc_send_cxd_data()
585 struct scatterlist sg; mmc_send_tuning() local
624 data.sg = &sg; mmc_send_tuning()
626 sg_init_one(&sg, data_buf, size); mmc_send_tuning()
656 struct scatterlist sg; mmc_send_bus_test() local
703 data.sg = &sg; mmc_send_bus_test()
706 sg_init_one(&sg, data_buf, len); mmc_send_bus_test()
/linux-4.1.27/arch/parisc/include/asm/
H A Ddma-mapping.h16 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction);
17 void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nhwents, enum dma_data_direction direction);
20 void (*dma_sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
21 void (*dma_sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
95 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, dma_map_sg() argument
98 return hppa_dma_ops->map_sg(dev, sg, nents, direction); dma_map_sg()
102 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, dma_unmap_sg() argument
105 hppa_dma_ops->unmap_sg(dev, sg, nhwentries, direction); dma_unmap_sg()
158 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_cpu() argument
162 hppa_dma_ops->dma_sync_sg_for_cpu(dev, sg, nelems, direction); dma_sync_sg_for_cpu()
166 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_device() argument
170 hppa_dma_ops->dma_sync_sg_for_device(dev, sg, nelems, direction); dma_sync_sg_for_device()
/linux-4.1.27/arch/ia64/sn/pci/
H A Dpci_dma.c21 #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
234 * @sg: scatterlist to unmap
248 struct scatterlist *sg; sn_dma_unmap_sg() local
252 for_each_sg(sgl, sg, nhwentries, i) { for_each_sg()
253 provider->dma_unmap(pdev, sg->dma_address, dir); for_each_sg()
254 sg->dma_address = (dma_addr_t) NULL; for_each_sg()
255 sg->dma_length = 0; for_each_sg()
262 * @sg: scatterlist to map
272 * Maps each entry of @sg for DMA.
279 struct scatterlist *saved_sg = sgl, *sg; sn_dma_map_sg() local
292 for_each_sg(sgl, sg, nhwentries, i) { for_each_sg()
294 phys_addr = SG_ENT_PHYS_ADDRESS(sg); for_each_sg()
298 sg->length, for_each_sg()
302 sg->length, for_each_sg()
305 sg->dma_address = dma_addr; for_each_sg()
306 if (!sg->dma_address) { for_each_sg()
317 sg->dma_length = sg->length; for_each_sg()
336 static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, sn_dma_sync_sg_for_cpu() argument
342 static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, sn_dma_sync_sg_for_device() argument
/linux-4.1.27/arch/cris/include/asm/
H A Ddma-mapping.h56 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, dma_map_sg() argument
59 printk("Map sg\n"); dma_map_sg()
80 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, dma_unmap_sg() argument
113 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_cpu() argument
119 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_device() argument
/linux-4.1.27/drivers/gpu/drm/i915/
H A Di915_gem_dmabuf.c53 /* Copy sg so that we make an independent mapping */ i915_gem_map_dma_buf()
93 struct sg_table *sg, i915_gem_unmap_dma_buf()
100 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); i915_gem_unmap_dma_buf()
101 sg_free_table(sg); i915_gem_unmap_dma_buf()
102 kfree(sg); i915_gem_unmap_dma_buf()
252 struct sg_table *sg; i915_gem_object_get_pages_dmabuf() local
254 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL); i915_gem_object_get_pages_dmabuf()
255 if (IS_ERR(sg)) i915_gem_object_get_pages_dmabuf()
256 return PTR_ERR(sg); i915_gem_object_get_pages_dmabuf()
258 obj->pages = sg; i915_gem_object_get_pages_dmabuf()
92 i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *sg, enum dma_data_direction dir) i915_gem_unmap_dma_buf() argument
/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_scatter.c70 if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg && drm_legacy_sg_cleanup()
72 drm_sg_cleanup(dev->sg); drm_legacy_sg_cleanup()
73 dev->sg = NULL; drm_legacy_sg_cleanup()
97 if (dev->sg) drm_legacy_sg_alloc()
149 dev->sg = entry; drm_legacy_sg_alloc()
210 entry = dev->sg; drm_legacy_sg_free()
211 dev->sg = NULL; drm_legacy_sg_free()
H A Ddrm_prime.c668 * drm_prime_pages_to_sg - converts a page array into an sg list
672 * This helper creates an sg table object from a set of pages
678 struct sg_table *sg = NULL; drm_prime_pages_to_sg() local
681 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); drm_prime_pages_to_sg()
682 if (!sg) { drm_prime_pages_to_sg()
687 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, drm_prime_pages_to_sg()
692 return sg; drm_prime_pages_to_sg()
694 kfree(sg); drm_prime_pages_to_sg()
700 * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
706 * Exports an sg table into an array of pages and addresses. This is currently
713 struct scatterlist *sg; drm_prime_sg_to_page_addr_arrays() local
720 for_each_sg(sgt->sgl, sg, sgt->nents, count) { drm_prime_sg_to_page_addr_arrays()
721 len = sg->length; drm_prime_sg_to_page_addr_arrays()
722 page = sg_page(sg); drm_prime_sg_to_page_addr_arrays()
723 addr = sg_dma_address(sg); drm_prime_sg_to_page_addr_arrays()
745 * @sg: the sg-table which was pinned at import time
750 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) drm_prime_gem_destroy() argument
755 if (sg) drm_prime_gem_destroy()
756 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); drm_prime_gem_destroy()
/linux-4.1.27/drivers/dma/sh/
H A Dusb-dmac.c46 * @sg_allocated_len: length of allocated sg
47 * @sg_len: length of sg
48 * @sg_index: index of sg
52 * @sg: information for the transfer
63 struct usb_dmac_sg sg[0]; member in struct:usb_dmac_desc
200 struct usb_dmac_sg *sg = desc->sg + index; usb_dmac_chan_start_sg() local
206 dst_addr = sg->mem_addr; usb_dmac_chan_start_sg()
208 src_addr = sg->mem_addr; usb_dmac_chan_start_sg()
211 "chan%u: queue sg %p: %u@%pad -> %pad\n", usb_dmac_chan_start_sg()
212 chan->index, sg, sg->size, &src_addr, &dst_addr); usb_dmac_chan_start_sg()
217 DIV_ROUND_UP(sg->size, USB_DMAC_XFER_SIZE)); usb_dmac_chan_start_sg()
218 usb_dmac_chan_write(chan, USB_DMATEND, usb_dmac_calc_tend(sg->size)); usb_dmac_chan_start_sg()
272 desc = kzalloc(sizeof(*desc) + sg_len * sizeof(desc->sg[0]), gfp); usb_dmac_desc_alloc()
425 struct scatterlist *sg; usb_dmac_prep_slave_sg() local
440 for_each_sg(sgl, sg, sg_len, i) { for_each_sg()
441 desc->sg[i].mem_addr = sg_dma_address(sg); for_each_sg()
442 desc->sg[i].size = sg_dma_len(sg); for_each_sg()
474 struct usb_dmac_sg *sg = desc->sg + sg_index; usb_dmac_get_current_residue() local
475 u32 mem_addr = sg->mem_addr & 0xffffffff; usb_dmac_get_current_residue()
476 unsigned int residue = sg->size; usb_dmac_get_current_residue()
523 residue += desc->sg[i].size; usb_dmac_chan_get_residue()
525 /* Add the residue for the current sg */ usb_dmac_chan_get_residue()
/linux-4.1.27/net/9p/
H A Dtrans_virtio.c69 * @sg: scatter gather list which is used to pack a request (protected?)
92 struct scatterlist sg[VIRTQUEUE_NUM]; member in struct:virtio_chan
173 * @sg: scatter/gather list to pack into
185 static int pack_sg_list(struct scatterlist *sg, int start, pack_sg_list() argument
197 sg_unmark_end(&sg[index]); pack_sg_list()
198 sg_set_buf(&sg[index++], data, s); pack_sg_list()
203 sg_mark_end(&sg[index - 1]); pack_sg_list()
216 * @sg: scatter/gather list to pack into
218 * @pdata: a list of pages to add into sg.
224 pack_sg_list_p(struct scatterlist *sg, int start, int limit, pack_sg_list_p() argument
241 sg_unmark_end(&sg[index]); pack_sg_list_p()
242 sg_set_page(&sg[index++], pdata[i++], s, data_off); pack_sg_list_p()
249 sg_mark_end(&sg[index - 1]); pack_sg_list_p()
277 out = pack_sg_list(chan->sg, 0, p9_virtio_request()
280 sgs[out_sgs++] = chan->sg; p9_virtio_request()
282 in = pack_sg_list(chan->sg, out, p9_virtio_request()
285 sgs[out_sgs + in_sgs++] = chan->sg + out; p9_virtio_request()
441 out = pack_sg_list(chan->sg, 0, p9_virtio_zc_request()
445 sgs[out_sgs++] = chan->sg; p9_virtio_zc_request()
448 sgs[out_sgs++] = chan->sg + out; p9_virtio_zc_request()
449 out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, p9_virtio_zc_request()
460 in = pack_sg_list(chan->sg, out, p9_virtio_zc_request()
463 sgs[out_sgs + in_sgs++] = chan->sg + out; p9_virtio_zc_request()
466 sgs[out_sgs + in_sgs++] = chan->sg + out + in; p9_virtio_zc_request()
467 in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM, p9_virtio_zc_request()
575 sg_init_table(chan->sg, VIRTQUEUE_NUM); p9_virtio_probe()
/linux-4.1.27/drivers/media/pci/cx25821/
H A Dcx25821-core.c1011 struct scatterlist *sg; cx25821_risc_field() local
1025 sg = sglist; cx25821_risc_field()
1027 while (offset && offset >= sg_dma_len(sg)) { cx25821_risc_field()
1028 offset -= sg_dma_len(sg); cx25821_risc_field()
1029 sg = sg_next(sg); cx25821_risc_field()
1031 if (bpl <= sg_dma_len(sg) - offset) { cx25821_risc_field()
1035 *(rp++) = cpu_to_le32(sg_dma_address(sg) + offset); cx25821_risc_field()
1042 (sg_dma_len(sg) - offset)); cx25821_risc_field()
1043 *(rp++) = cpu_to_le32(sg_dma_address(sg) + offset); cx25821_risc_field()
1045 todo -= (sg_dma_len(sg) - offset); cx25821_risc_field()
1047 sg = sg_next(sg); cx25821_risc_field()
1048 while (todo > sg_dma_len(sg)) { cx25821_risc_field()
1050 sg_dma_len(sg)); cx25821_risc_field()
1051 *(rp++) = cpu_to_le32(sg_dma_address(sg)); cx25821_risc_field()
1053 todo -= sg_dma_len(sg); cx25821_risc_field()
1054 sg = sg_next(sg); cx25821_risc_field()
1057 *(rp++) = cpu_to_le32(sg_dma_address(sg)); cx25821_risc_field()
1122 struct scatterlist *sg; cx25821_risc_field_audio() local
1130 sg = sglist; cx25821_risc_field_audio()
1132 while (offset && offset >= sg_dma_len(sg)) { cx25821_risc_field_audio()
1133 offset -= sg_dma_len(sg); cx25821_risc_field_audio()
1134 sg = sg_next(sg); cx25821_risc_field_audio()
1142 if (bpl <= sg_dma_len(sg) - offset) { cx25821_risc_field_audio()
1146 *(rp++) = cpu_to_le32(sg_dma_address(sg) + offset); cx25821_risc_field_audio()
1153 (sg_dma_len(sg) - offset)); cx25821_risc_field_audio()
1154 *(rp++) = cpu_to_le32(sg_dma_address(sg) + offset); cx25821_risc_field_audio()
1156 todo -= (sg_dma_len(sg) - offset); cx25821_risc_field_audio()
1158 sg = sg_next(sg); cx25821_risc_field_audio()
1159 while (todo > sg_dma_len(sg)) { cx25821_risc_field_audio()
1161 sg_dma_len(sg)); cx25821_risc_field_audio()
1162 *(rp++) = cpu_to_le32(sg_dma_address(sg)); cx25821_risc_field_audio()
1164 todo -= sg_dma_len(sg); cx25821_risc_field_audio()
1165 sg = sg_next(sg); cx25821_risc_field_audio()
1168 *(rp++) = cpu_to_le32(sg_dma_address(sg)); cx25821_risc_field_audio()
/linux-4.1.27/drivers/scsi/libfc/
H A Dfc_libfc.c105 * @sg: pointer to the pointer of the SG list.
112 struct scatterlist *sg, fc_copy_buffer_to_sglist()
119 while (remaining > 0 && sg) { fc_copy_buffer_to_sglist()
123 if (*offset >= sg->length) { fc_copy_buffer_to_sglist()
131 *offset -= sg->length; fc_copy_buffer_to_sglist()
132 sg = sg_next(sg); fc_copy_buffer_to_sglist()
135 sg_bytes = min(remaining, sg->length - *offset); fc_copy_buffer_to_sglist()
141 off = *offset + sg->offset; fc_copy_buffer_to_sglist()
144 page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT)); fc_copy_buffer_to_sglist()
111 fc_copy_buffer_to_sglist(void *buf, size_t len, struct scatterlist *sg, u32 *nents, size_t *offset, u32 *crc) fc_copy_buffer_to_sglist() argument
/linux-4.1.27/drivers/usb/storage/
H A Dprotocol.c139 struct scatterlist *sg = *sgptr; usb_stor_access_xfer_buf() local
143 if (sg) usb_stor_access_xfer_buf()
144 nents = sg_nents(sg); usb_stor_access_xfer_buf()
146 sg = scsi_sglist(srb); usb_stor_access_xfer_buf()
148 sg_miter_start(&miter, sg, nents, dir == FROM_XFER_BUF ? usb_stor_access_xfer_buf()
163 if (*offset + len < miter.piter.sg->length) { usb_stor_access_xfer_buf()
165 *sgptr = miter.piter.sg; usb_stor_access_xfer_buf()
168 *sgptr = sg_next(miter.piter.sg); usb_stor_access_xfer_buf()
185 struct scatterlist *sg = NULL; usb_stor_set_xfer_buf() local
188 buflen = usb_stor_access_xfer_buf(buffer, buflen, srb, &sg, &offset, usb_stor_set_xfer_buf()
/linux-4.1.27/drivers/target/iscsi/
H A Discsi_target_auth.c190 struct scatterlist sg; chap_server_compute_md5() local
263 sg_init_one(&sg, &chap->id, 1); chap_server_compute_md5()
264 ret = crypto_hash_update(&desc, &sg, 1); chap_server_compute_md5()
271 sg_init_one(&sg, &auth->password, strlen(auth->password)); chap_server_compute_md5()
272 ret = crypto_hash_update(&desc, &sg, strlen(auth->password)); chap_server_compute_md5()
279 sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH); chap_server_compute_md5()
280 ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH); chap_server_compute_md5()
392 sg_init_one(&sg, &id_as_uchar, 1); chap_server_compute_md5()
393 ret = crypto_hash_update(&desc, &sg, 1); chap_server_compute_md5()
400 sg_init_one(&sg, auth->password_mutual, chap_server_compute_md5()
402 ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual)); chap_server_compute_md5()
412 sg_init_one(&sg, challenge_binhex, challenge_len); chap_server_compute_md5()
413 ret = crypto_hash_update(&desc, &sg, challenge_len); chap_server_compute_md5()
/linux-4.1.27/drivers/dma/hsu/
H A Dhsu.c82 hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr); hsu_dma_chan_start()
83 hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len); hsu_dma_chan_start()
205 desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT); hsu_dma_alloc_desc()
206 if (!desc->sg) { hsu_dma_alloc_desc()
218 kfree(desc->sg); hsu_dma_desc_free()
229 struct scatterlist *sg; hsu_dma_prep_slave_sg() local
236 for_each_sg(sgl, sg, sg_len, i) { for_each_sg()
237 desc->sg[i].addr = sg_dma_address(sg); for_each_sg()
238 desc->sg[i].len = sg_dma_len(sg); for_each_sg()
266 bytes += desc->sg[i].len; hsu_dma_desc_size()
/linux-4.1.27/drivers/target/
H A Dtarget_core_iblock.c461 struct scatterlist *sg; iblock_execute_write_same() local
472 sg = &cmd->t_data_sg[0]; iblock_execute_write_same()
475 sg->length != cmd->se_dev->dev_attrib.block_size) { iblock_execute_write_same()
477 " block_size: %u\n", cmd->t_data_nents, sg->length, iblock_execute_write_same()
497 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) iblock_execute_write_same()
498 != sg->length) { iblock_execute_write_same()
509 block_lba += sg->length >> IBLOCK_LBA_SHIFT; iblock_execute_write_same()
636 struct scatterlist *sg; iblock_alloc_bip() local
658 for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) { iblock_alloc_bip()
660 rc = bio_integrity_add_page(bio, sg_page(sg), sg->length, iblock_alloc_bip()
661 sg->offset); iblock_alloc_bip()
662 if (rc != sg->length) { iblock_alloc_bip()
668 sg_page(sg), sg->length, sg->offset); iblock_alloc_bip()
682 struct scatterlist *sg; iblock_execute_rw() local
750 for_each_sg(sgl, sg, sgl_nents, i) { for_each_sg()
756 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) for_each_sg()
757 != sg->length) { for_each_sg()
773 block_lba += sg->length >> IBLOCK_LBA_SHIFT; for_each_sg()
H A Dtarget_core_rd.c86 struct scatterlist *sg; rd_release_sgl_table() local
90 sg = sg_table[i].sg_table; rd_release_sgl_table()
94 pg = sg_page(&sg[j]); rd_release_sgl_table()
100 kfree(sg); rd_release_sgl_table()
138 struct scatterlist *sg; rd_allocate_sgl_table() local
157 sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg), rd_allocate_sgl_table()
159 if (!sg) { rd_allocate_sgl_table()
165 sg_init_table(sg, sg_per_table + chain_entry); rd_allocate_sgl_table()
171 max_sg_per_table + 1, sg); rd_allocate_sgl_table()
176 sg_table[i].sg_table = sg; rd_allocate_sgl_table()
189 sg_assign_page(&sg[j], pg); rd_allocate_sgl_table()
190 sg[j].length = PAGE_SIZE; rd_allocate_sgl_table()
279 * tot sg needed = rd_page_count * (PGSZ/block_size) * rd_build_prot_space()
576 /* since we increment, the first sg entry is correct */ rd_execute_rw()
H A Dtarget_core_sbc.c328 struct scatterlist *sg; xdreadwrite_callback() local
363 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { xdreadwrite_callback()
364 addr = kmap_atomic(sg_page(sg)); xdreadwrite_callback()
370 for (i = 0; i < sg->length; i++) xdreadwrite_callback()
371 *(addr + sg->offset + i) ^= *(buf + offset + i); xdreadwrite_callback()
373 offset += sg->length; xdreadwrite_callback()
419 struct scatterlist *write_sg = NULL, *sg; compare_and_write_callback() local
460 pr_err("Unable to allocate compare_and_write sg\n"); compare_and_write_callback()
478 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, i) { compare_and_write_callback()
479 addr = (unsigned char *)kmap_atomic(sg_page(sg)); compare_and_write_callback()
485 len = min(sg->length, compare_len); compare_and_write_callback()
512 m.piter.sg->offset + block_size); compare_and_write_callback()
516 m.piter.sg->offset); compare_and_write_callback()
1276 struct scatterlist *sg, int sg_off) sbc_dif_copy_prot()
1284 if (!sg) sbc_dif_copy_prot()
1295 len = min(psg_len, sg->length - offset); sbc_dif_copy_prot()
1296 addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; sbc_dif_copy_prot()
1308 if (offset >= sg->length) { sbc_dif_copy_prot()
1309 sg = sg_next(sg); sbc_dif_copy_prot()
1320 unsigned int ei_lba, struct scatterlist *sg, int sg_off) sbc_dif_verify_write()
1367 if (!sg) sbc_dif_verify_write()
1370 sbc_dif_copy_prot(cmd, sectors, false, sg, sg_off); sbc_dif_verify_write()
1378 unsigned int ei_lba, struct scatterlist *sg, int sg_off) __sbc_dif_verify_read()
1382 struct scatterlist *dsg, *psg = sg; __sbc_dif_verify_read()
1390 paddr = kmap_atomic(sg_page(psg)) + sg->offset; __sbc_dif_verify_read()
1447 unsigned int ei_lba, struct scatterlist *sg, int sg_off) sbc_dif_verify_read()
1451 rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off); sbc_dif_verify_read()
1455 sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off); sbc_dif_verify_read()
1275 sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, struct scatterlist *sg, int sg_off) sbc_dif_copy_prot() argument
1319 sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors, unsigned int ei_lba, struct scatterlist *sg, int sg_off) sbc_dif_verify_write() argument
1377 __sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, unsigned int ei_lba, struct scatterlist *sg, int sg_off) __sbc_dif_verify_read() argument
1446 sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors, unsigned int ei_lba, struct scatterlist *sg, int sg_off) sbc_dif_verify_read() argument
/linux-4.1.27/drivers/xen/
H A Dswiotlb-xen.c550 struct scatterlist *sg; xen_swiotlb_map_sg_attrs() local
555 for_each_sg(sgl, sg, nelems, i) { for_each_sg()
556 phys_addr_t paddr = sg_phys(sg); for_each_sg()
561 !dma_capable(hwdev, dev_addr, sg->length) || for_each_sg()
562 range_straddles_page_boundary(paddr, sg->length)) { for_each_sg()
565 sg_phys(sg), for_each_sg()
566 sg->length, for_each_sg()
580 sg->length, for_each_sg()
583 sg->dma_address = xen_phys_to_bus(map); for_each_sg()
591 sg->length, for_each_sg()
594 sg->dma_address = dev_addr; for_each_sg()
596 sg_dma_len(sg) = sg->length; for_each_sg()
611 struct scatterlist *sg; xen_swiotlb_unmap_sg_attrs() local
616 for_each_sg(sgl, sg, nelems, i) xen_swiotlb_unmap_sg_attrs()
617 xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs); xen_swiotlb_unmap_sg_attrs()
634 struct scatterlist *sg; xen_swiotlb_sync_sg() local
637 for_each_sg(sgl, sg, nelems, i) xen_swiotlb_sync_sg()
638 xen_swiotlb_sync_single(hwdev, sg->dma_address, xen_swiotlb_sync_sg()
639 sg_dma_len(sg), dir, target); xen_swiotlb_sync_sg()
643 xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, xen_swiotlb_sync_sg_for_cpu() argument
646 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); xen_swiotlb_sync_sg_for_cpu()
651 xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, xen_swiotlb_sync_sg_for_device() argument
654 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); xen_swiotlb_sync_sg_for_device()
/linux-4.1.27/drivers/spi/
H A Dspi-topcliff-pch.c931 struct scatterlist *sg; pch_spi_handle_dma() local
1018 sg = dma->sg_rx_p; pch_spi_handle_dma()
1019 for (i = 0; i < num; i++, sg++) { pch_spi_handle_dma()
1021 sg->offset = size * i; pch_spi_handle_dma()
1022 sg->offset = sg->offset * (*bpw / 8); pch_spi_handle_dma()
1023 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, pch_spi_handle_dma()
1024 sg->offset); pch_spi_handle_dma()
1025 sg_dma_len(sg) = rem; pch_spi_handle_dma()
1027 sg->offset = size * (i - 1) + rem; pch_spi_handle_dma()
1028 sg->offset = sg->offset * (*bpw / 8); pch_spi_handle_dma()
1029 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, pch_spi_handle_dma()
1030 sg->offset); pch_spi_handle_dma()
1031 sg_dma_len(sg) = size; pch_spi_handle_dma()
1033 sg->offset = size * i; pch_spi_handle_dma()
1034 sg->offset = sg->offset * (*bpw / 8); pch_spi_handle_dma()
1035 sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, pch_spi_handle_dma()
1036 sg->offset); pch_spi_handle_dma()
1037 sg_dma_len(sg) = size; pch_spi_handle_dma()
1039 sg_dma_address(sg) = dma->rx_buf_dma + sg->offset; pch_spi_handle_dma()
1041 sg = dma->sg_rx_p; pch_spi_handle_dma()
1042 desc_rx = dmaengine_prep_slave_sg(dma->chan_rx, sg, pch_spi_handle_dma()
1050 dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE); pch_spi_handle_dma()
1078 sg = dma->sg_tx_p; pch_spi_handle_dma()
1079 for (i = 0; i < num; i++, sg++) { pch_spi_handle_dma()
1081 sg->offset = 0; pch_spi_handle_dma()
1082 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size + head, pch_spi_handle_dma()
1083 sg->offset); pch_spi_handle_dma()
1084 sg_dma_len(sg) = size + head; pch_spi_handle_dma()
1086 sg->offset = head + size * i; pch_spi_handle_dma()
1087 sg->offset = sg->offset * (*bpw / 8); pch_spi_handle_dma()
1088 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem, pch_spi_handle_dma()
1089 sg->offset); pch_spi_handle_dma()
1090 sg_dma_len(sg) = rem; pch_spi_handle_dma()
1092 sg->offset = head + size * i; pch_spi_handle_dma()
1093 sg->offset = sg->offset * (*bpw / 8); pch_spi_handle_dma()
1094 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size, pch_spi_handle_dma()
1095 sg->offset); pch_spi_handle_dma()
1096 sg_dma_len(sg) = size; pch_spi_handle_dma()
1098 sg_dma_address(sg) = dma->tx_buf_dma + sg->offset; pch_spi_handle_dma()
1100 sg = dma->sg_tx_p; pch_spi_handle_dma()
1102 sg, num, DMA_MEM_TO_DEV, pch_spi_handle_dma()
1109 dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE); pch_spi_handle_dma()
/linux-4.1.27/drivers/crypto/nx/
H A Dnx.c97 struct nx_sg *sg; nx_build_sg_list() local
121 for (sg = sg_head; sg_len < *len; sg++) { nx_build_sg_list()
124 sg->addr = sg_addr; nx_build_sg_list()
128 next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE; nx_build_sg_list()
129 sg->len = min_t(u64, sg_addr, next_page) - sg->addr; nx_build_sg_list()
130 sg_len += sg->len; nx_build_sg_list()
139 if ((sg - sg_head) == sgmax) { nx_build_sg_list()
142 sg++; nx_build_sg_list()
149 return sg; nx_build_sg_list()
192 scatterwalk_start(&walk, sg_next(walk.sg)); nx_walk_and_build()
212 * trim_sg_list - ensures the bound in sg list.
213 * @sg: sg list head
214 * @end: sg lisg end
218 static long int trim_sg_list(struct nx_sg *sg, trim_sg_list() argument
227 while (delta && end > sg) { trim_sg_list()
244 oplen = (sg - end) * sizeof(struct nx_sg); trim_sg_list()
246 data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len; trim_sg_list()
358 "ibm,max-sg-len property\n", __func__); nx_of_update_sglen()
359 dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes " nx_of_update_sglen()
479 p = of_find_property(base_node, "ibm,max-sg-len", NULL); nx_of_init()
481 dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n", nx_of_init()
/linux-4.1.27/drivers/crypto/ccp/
H A Dccp-crypto-main.c303 struct scatterlist *sg, *sg_last = NULL; ccp_crypto_sg_table_add() local
305 for (sg = table->sgl; sg; sg = sg_next(sg)) ccp_crypto_sg_table_add()
306 if (!sg_page(sg)) ccp_crypto_sg_table_add()
308 BUG_ON(!sg); ccp_crypto_sg_table_add()
310 for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) { ccp_crypto_sg_table_add()
311 sg_set_page(sg, sg_page(sg_add), sg_add->length, ccp_crypto_sg_table_add()
313 sg_last = sg; ccp_crypto_sg_table_add()
H A Dccp-ops.c54 struct scatterlist *sg; member in struct:ccp_sg_workarea
490 struct scatterlist *sg, u64 len, ccp_init_sg_workarea()
495 wa->sg = sg; ccp_init_sg_workarea()
496 if (!sg) ccp_init_sg_workarea()
499 wa->nents = sg_nents(sg); ccp_init_sg_workarea()
500 wa->length = sg->length; ccp_init_sg_workarea()
510 wa->dma_sg = sg; ccp_init_sg_workarea()
513 wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); ccp_init_sg_workarea()
524 if (!wa->sg) ccp_update_sg_workarea()
529 if (wa->sg_used == wa->sg->length) { ccp_update_sg_workarea()
530 wa->sg = sg_next(wa->sg); ccp_update_sg_workarea()
594 struct scatterlist *sg, unsigned int sg_offset, ccp_set_dm_area()
599 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, ccp_set_dm_area()
604 struct scatterlist *sg, unsigned int sg_offset, ccp_get_dm_area()
609 scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, ccp_get_dm_area()
614 struct scatterlist *sg, ccp_reverse_set_dm_area()
630 scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 0); ccp_reverse_set_dm_area()
647 struct scatterlist *sg, ccp_reverse_get_dm_area()
662 scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 1); ccp_reverse_get_dm_area()
676 struct scatterlist *sg, u64 sg_len, ccp_init_data()
684 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len, ccp_init_data()
711 if (!sg_wa->sg) ccp_queue_buf()
719 scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used, ccp_queue_buf()
725 nbytes = min(sg_wa->sg->length - sg_wa->sg_used, ccp_queue_buf()
757 sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used; ccp_prepare_data()
761 sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used; ccp_prepare_data()
769 * or the smaller of available sg room remaining for the source or ccp_prepare_data()
778 /* Not enough data in the sg element, so it ccp_prepare_data()
788 /* Enough data in the sg element, but we need to ccp_prepare_data()
791 op->src.u.dma.address = sg_dma_address(src->sg_wa.sg); ccp_prepare_data()
800 /* Not enough room in the sg element or we're on the ccp_prepare_data()
809 /* Enough room in the sg element, but we need to ccp_prepare_data()
812 op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg); ccp_prepare_data()
1499 struct scatterlist sg; ccp_run_sha_cmd() local
1531 sg_init_one(&sg, hmac_buf, block_size + digest_size); ccp_run_sha_cmd()
1541 hmac_cmd.u.sha.src = &sg; ccp_run_sha_cmd()
1753 if (!dst.sg_wa.sg || ccp_run_passthru_cmd()
1754 (dst.sg_wa.sg->length < src.sg_wa.sg->length)) { ccp_run_passthru_cmd()
1765 op.src.u.dma.address = sg_dma_address(src.sg_wa.sg); ccp_run_passthru_cmd()
1767 op.src.u.dma.length = sg_dma_len(src.sg_wa.sg); ccp_run_passthru_cmd()
1770 op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); ccp_run_passthru_cmd()
1780 dst.sg_wa.sg_used += src.sg_wa.sg->length; ccp_run_passthru_cmd()
1781 if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) { ccp_run_passthru_cmd()
1782 dst.sg_wa.sg = sg_next(dst.sg_wa.sg); ccp_run_passthru_cmd()
1785 src.sg_wa.sg = sg_next(src.sg_wa.sg); ccp_run_passthru_cmd()
489 ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, struct scatterlist *sg, u64 len, enum dma_data_direction dma_dir) ccp_init_sg_workarea() argument
593 ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, struct scatterlist *sg, unsigned int sg_offset, unsigned int len) ccp_set_dm_area() argument
603 ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, struct scatterlist *sg, unsigned int sg_offset, unsigned int len) ccp_get_dm_area() argument
613 ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa, struct scatterlist *sg, unsigned int len, unsigned int se_len, bool sign_extend) ccp_reverse_set_dm_area() argument
646 ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa, struct scatterlist *sg, unsigned int len) ccp_reverse_get_dm_area() argument
675 ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q, struct scatterlist *sg, u64 sg_len, unsigned int dm_len, enum dma_data_direction dir) ccp_init_data() argument
H A Dccp-crypto-aes-cmac.c64 struct scatterlist *sg, *cmac_key_sg = NULL; ccp_do_cmac_update() local
117 sg = NULL; ccp_do_cmac_update()
120 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg); ccp_do_cmac_update()
124 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); ccp_do_cmac_update()
134 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg); ccp_do_cmac_update()
136 if (sg) { ccp_do_cmac_update()
137 sg_mark_end(sg); ccp_do_cmac_update()
138 sg = rctx->data_sg.sgl; ccp_do_cmac_update()
156 rctx->cmd.u.aes.src = sg; ccp_do_cmac_update()
H A Dccp-crypto-sha.c63 struct scatterlist *sg; ccp_do_sha_update() local
96 sg = NULL; ccp_do_sha_update()
109 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg); ccp_do_sha_update()
110 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); ccp_do_sha_update()
111 sg_mark_end(sg); ccp_do_sha_update()
113 sg = rctx->data_sg.sgl; ccp_do_sha_update()
117 sg = &rctx->buf_sg; ccp_do_sha_update()
119 sg = req->src; ccp_do_sha_update()
130 rctx->cmd.u.sha.src = sg; ccp_do_sha_update()
/linux-4.1.27/drivers/misc/cb710/
H A Dsgbuf2.c77 * cb710_sg_dwiter_read_next_block() - get next 32-bit word from sg buffer
78 * @miter: sg mapping iterator used for reading
121 * cb710_sg_dwiter_write_next_block() - write next 32-bit word to sg buffer
122 * @miter: sg mapping iterator used for writing
/linux-4.1.27/drivers/infiniband/hw/mthca/
H A Dmthca_wqe.h117 struct ib_sge *sg) mthca_set_data_seg()
119 dseg->byte_count = cpu_to_be32(sg->length); mthca_set_data_seg()
120 dseg->lkey = cpu_to_be32(sg->lkey); mthca_set_data_seg()
121 dseg->addr = cpu_to_be64(sg->addr); mthca_set_data_seg()
116 mthca_set_data_seg(struct mthca_data_seg *dseg, struct ib_sge *sg) mthca_set_data_seg() argument
/linux-4.1.27/drivers/dma/
H A Dcoh901318_lli.c239 struct scatterlist *sg; coh901318_lli_fill_sg() local
258 for_each_sg(sgl, sg, nents, i) { for_each_sg()
259 if (sg_is_chain(sg)) { for_each_sg()
260 /* sg continues to the next sg-element don't for_each_sg()
262 * sg-element in the chain for_each_sg()
273 src = sg_dma_address(sg); for_each_sg()
276 dst = sg_dma_address(sg); for_each_sg()
278 bytes_to_transfer = sg_dma_len(sg); for_each_sg()
H A Ddma-jz4740.c112 struct jz4740_dma_sg sg[]; member in struct:jz4740_dma_desc
294 struct jz4740_dma_sg *sg; jz4740_dma_start_transfer() local
310 sg = &chan->desc->sg[chan->next_sg]; jz4740_dma_start_transfer()
313 src_addr = sg->addr; jz4740_dma_start_transfer()
317 dst_addr = sg->addr; jz4740_dma_start_transfer()
322 sg->len >> chan->transfer_shift); jz4740_dma_start_transfer()
396 struct scatterlist *sg; jz4740_dma_prep_slave_sg() local
403 for_each_sg(sgl, sg, sg_len, i) { for_each_sg()
404 desc->sg[i].addr = sg_dma_address(sg); for_each_sg()
405 desc->sg[i].len = sg_dma_len(sg); for_each_sg()
434 desc->sg[i].addr = buf_addr; jz4740_dma_prep_dma_cyclic()
435 desc->sg[i].len = period_len; jz4740_dma_prep_dma_cyclic()
456 residue += desc->sg[i].len; jz4740_dma_desc_residue()
H A Dsa11x0-dma.c85 struct sa11x0_dma_sg sg[0]; member in struct:sa11x0_dma_desc
166 struct sa11x0_dma_sg *sg; sa11x0_dma_start_sg() local
202 sg = &txd->sg[p->sg_load++]; sa11x0_dma_start_sg()
216 writel_relaxed(sg->addr, base + dbsx); sa11x0_dma_start_sg()
217 writel_relaxed(sg->len, base + dbtx); sa11x0_dma_start_sg()
222 'A' + (dbsx == DMA_DBSB), sg->addr, sa11x0_dma_start_sg()
223 'A' + (dbtx == DMA_DBTB), sg->len); sa11x0_dma_start_sg()
470 i, txd->sg[i].addr, txd->sg[i].len); sa11x0_dma_tx_status()
471 if (addr >= txd->sg[i].addr && sa11x0_dma_tx_status()
472 addr < txd->sg[i].addr + txd->sg[i].len) { sa11x0_dma_tx_status()
475 len = txd->sg[i].len - sa11x0_dma_tx_status()
476 (addr - txd->sg[i].addr); sa11x0_dma_tx_status()
486 i, txd->sg[i].addr, txd->sg[i].len); sa11x0_dma_tx_status()
487 bytes += txd->sg[i].len; sa11x0_dma_tx_status()
527 struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen, sa11x0_dma_prep_slave_sg()
547 for_each_sg(sg, sgent, sglen, i) { for_each_sg()
560 txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
567 for_each_sg(sg, sgent, sglen, i) { for_each_sg()
589 txd->sg[j].addr = addr; for_each_sg()
590 txd->sg[j].len = tlen; for_each_sg()
630 txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC); sa11x0_dma_prep_dma_cyclic()
647 txd->sg[k].addr = addr; sa11x0_dma_prep_dma_cyclic()
648 txd->sg[k].len = tlen; sa11x0_dma_prep_dma_cyclic()
526 sa11x0_dma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen, enum dma_transfer_direction dir, unsigned long flags, void *context) sa11x0_dma_prep_slave_sg() argument
H A Dimx-dma.c142 /* For slave sg and cyclic */
143 struct scatterlist *sg; member in struct:imxdma_desc
286 struct scatterlist *sg = d->sg; imxdma_sg_next() local
289 now = min(d->len, sg_dma_len(sg)); imxdma_sg_next()
294 imx_dmav1_writel(imxdma, sg->dma_address, imxdma_sg_next()
297 imx_dmav1_writel(imxdma, sg->dma_address, imxdma_sg_next()
329 d->sg && imxdma_hw_chain(imxdmac)) { imxdma_enable_hw()
330 d->sg = sg_next(d->sg); imxdma_enable_hw()
331 if (d->sg) { imxdma_enable_hw()
448 if (desc->sg) { dma_irq_handle_channel()
450 desc->sg = sg_next(desc->sg); dma_irq_handle_channel()
452 if (desc->sg) { dma_irq_handle_channel()
577 /* Cyclic transfer is the same as slave_sg with special sg configuration. */ imxdma_xfer_desc()
587 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n", imxdma_xfer_desc()
589 d->sg, d->sgcount, d->len, imxdma_xfer_desc()
598 "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n", imxdma_xfer_desc()
600 d->sg, d->sgcount, d->len, imxdma_xfer_desc()
814 struct scatterlist *sg; imxdma_prep_slave_sg() local
824 for_each_sg(sgl, sg, sg_len, i) { for_each_sg()
825 dma_length += sg_dma_len(sg); for_each_sg()
844 desc->sg = sgl;
903 desc->sg = imxdmac->sg_list; imxdma_prep_dma_cyclic()
H A Dtimb_dma.c158 struct scatterlist *sg, bool last) td_fill_desc()
160 if (sg_dma_len(sg) > USHRT_MAX) { td_fill_desc()
161 dev_err(chan2dev(&td_chan->chan), "Too big sg element\n"); td_fill_desc()
166 if (sg_dma_len(sg) % sizeof(u32)) { td_fill_desc()
168 sg_dma_len(sg)); td_fill_desc()
173 dma_desc, (unsigned long long)sg_dma_address(sg)); td_fill_desc()
175 dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff; td_fill_desc()
176 dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff; td_fill_desc()
177 dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff; td_fill_desc()
178 dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff; td_fill_desc()
180 dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff; td_fill_desc()
181 dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff; td_fill_desc()
516 struct scatterlist *sg; td_prep_slave_sg() local
540 for_each_sg(sgl, sg, sg_len, i) { for_each_sg()
547 err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg, for_each_sg()
157 td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, struct scatterlist *sg, bool last) td_fill_desc() argument
H A Dmoxart-dma.c130 struct moxart_sg sg[0]; member in struct:moxart_desc
311 d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC); moxart_prep_slave_sg()
320 d->sg[i].addr = sg_dma_address(sgent); for_each_sg()
321 d->sg[i].len = sg_dma_len(sgent); for_each_sg()
406 struct moxart_sg *sg = ch->desc->sg + idx; moxart_dma_start_sg() local
409 moxart_dma_set_params(ch, sg->addr, d->dev_addr); moxart_dma_start_sg()
411 moxart_dma_set_params(ch, d->dev_addr, sg->addr); moxart_dma_start_sg()
413 moxart_set_transfer_params(ch, sg->len); moxart_dma_start_sg()
456 size += d->sg[i].len; moxart_dma_desc_size()
H A Domap-dma.c75 struct omap_sg sg[0]; member in struct:omap_desc
362 struct omap_sg *sg = d->sg + idx; omap_dma_start_sg() local
375 omap_dma_chan_write(c, cxsa, sg->addr); omap_dma_start_sg()
378 omap_dma_chan_write(c, CEN, sg->en); omap_dma_start_sg()
379 omap_dma_chan_write(c, CFN, sg->fn); omap_dma_start_sg()
594 static size_t omap_dma_sg_size(struct omap_sg *sg) omap_dma_sg_size() argument
596 return sg->en * sg->fn; omap_dma_sg_size()
605 size += omap_dma_sg_size(&d->sg[i]); omap_dma_desc_size()
616 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; omap_dma_desc_size_pos()
620 else if (addr >= d->sg[i].addr && omap_dma_desc_size_pos()
621 addr < d->sg[i].addr + this_size) omap_dma_desc_size_pos()
622 size += d->sg[i].addr + this_size - addr; omap_dma_desc_size_pos()
798 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); omap_dma_prep_slave_sg()
843 d->sg[j].addr = sg_dma_address(sgent); for_each_sg()
844 d->sg[j].en = en; for_each_sg()
845 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; for_each_sg()
895 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); omap_dma_prep_dma_cyclic()
903 d->sg[0].addr = buf_addr; omap_dma_prep_dma_cyclic()
904 d->sg[0].en = period_len / es_bytes[es]; omap_dma_prep_dma_cyclic()
905 d->sg[0].fn = buf_len / period_len; omap_dma_prep_dma_cyclic()
H A Dmxs-dma.c507 struct scatterlist *sg; mxs_dma_prep_slave_sg() local
518 "maximum number of sg exceeded: %d > %d\n", mxs_dma_prep_slave_sg()
527 * If the sg is prepared with append flag set, the sg mxs_dma_prep_slave_sg()
528 * will be appended to the last prepared sg. mxs_dma_prep_slave_sg()
558 for_each_sg(sgl, sg, sg_len, i) { for_each_sg()
559 if (sg_dma_len(sg) > MAX_XFER_BYTES) { for_each_sg()
560 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", for_each_sg()
561 sg_dma_len(sg), MAX_XFER_BYTES); for_each_sg()
568 ccw->bufaddr = sg->dma_address; for_each_sg()
569 ccw->xfer_bytes = sg_dma_len(sg); for_each_sg()
616 "maximum number of sg exceeded: %d > %d\n", mxs_dma_prep_dma_cyclic()
839 /* mxs_dma gets 65535 bytes maximum sg size */ mxs_dma_probe()
H A Dcoh901318.h123 * @sg: scatter gather list
124 * @nents: number of entries in sg
136 struct scatterlist *sg, unsigned int nents,
/linux-4.1.27/include/media/
H A Dvideobuf2-dma-sg.h2 * videobuf2-dma-sg.h - DMA scatter/gather memory allocator for videobuf2
/linux-4.1.27/arch/mips/loongson/common/
H A Ddma-swiotlb.c68 static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg, loongson_dma_map_sg() argument
72 int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, NULL); loongson_dma_map_sg()
87 struct scatterlist *sg, int nents, loongson_dma_sync_sg_for_device()
90 swiotlb_sync_sg_for_device(dev, sg, nents, dir); loongson_dma_sync_sg_for_device()
86 loongson_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) loongson_dma_sync_sg_for_device() argument
/linux-4.1.27/arch/frv/include/asm/
H A Ddma-mapping.h34 extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
38 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, dma_unmap_sg() argument
85 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_cpu() argument
91 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, dma_sync_sg_for_device() argument
/linux-4.1.27/arch/frv/mb93090-mb00/
H A Dpci-dma-nommu.c122 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, dma_map_sg() argument
128 frv_cache_wback_inv(sg_dma_address(&sg[i]), dma_map_sg()
129 sg_dma_address(&sg[i]) + sg_dma_len(&sg[i])); dma_map_sg()
H A Dpci-dma.c53 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, dma_map_sg() argument
65 vaddr = kmap_atomic_primary(sg_page(&sg[i])); dma_map_sg()
/linux-4.1.27/drivers/rapidio/devices/
H A Dtsi721_dma.c338 struct scatterlist *sg, u32 sys_size) tsi721_desc_fill_init()
355 (u64)sg_dma_address(sg) & 0xffffffff); tsi721_desc_fill_init()
356 bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32); tsi721_desc_fill_init()
422 struct scatterlist *sg; tsi721_submit_sg() local
457 for_each_sg(desc->sg, sg, desc->sg_len, i) { tsi721_submit_sg()
459 dev_dbg(dchan->device->dev, "sg%d/%d addr: 0x%llx len: %d\n", tsi721_submit_sg()
461 (unsigned long long)sg_dma_address(sg), sg_dma_len(sg)); tsi721_submit_sg()
463 if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) { tsi721_submit_sg()
471 * If this sg entry forms contiguous block with previous one, tsi721_submit_sg()
474 if (next_addr == sg_dma_address(sg) && tsi721_submit_sg()
475 bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) { tsi721_submit_sg()
477 bcount += sg_dma_len(sg); tsi721_submit_sg()
493 desc->sg = sg; tsi721_submit_sg()
499 err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size); tsi721_submit_sg()
509 next_addr = sg_dma_address(sg); tsi721_submit_sg()
510 bcount = sg_dma_len(sg); tsi721_submit_sg()
520 if (sg_is_last(sg)) { tsi721_submit_sg()
526 rio_addr += sg_dma_len(sg); tsi721_submit_sg()
527 next_addr += sg_dma_len(sg); tsi721_submit_sg()
806 desc->sg = sgl; tsi721_prep_rio_sg()
336 tsi721_desc_fill_init(struct tsi721_tx_desc *desc, struct tsi721_dma_desc *bd_ptr, struct scatterlist *sg, u32 sys_size) tsi721_desc_fill_init() argument
/linux-4.1.27/drivers/usb/host/whci/
H A Dqset.c266 struct scatterlist *sg; qset_copy_bounce_to_sg() local
273 sg = std->bounce_sg; qset_copy_bounce_to_sg()
279 len = min(sg->length - offset, remaining); qset_copy_bounce_to_sg()
280 memcpy(sg_virt(sg) + offset, bounce, len); qset_copy_bounce_to_sg()
286 if (offset >= sg->length) { qset_copy_bounce_to_sg()
287 sg = sg_next(sg); qset_copy_bounce_to_sg()
439 struct scatterlist *sg; qset_add_urb_sg() local
450 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { qset_add_urb_sg()
460 dma_addr = sg_dma_address(sg); qset_add_urb_sg()
461 dma_remaining = min_t(size_t, sg_dma_len(sg), remaining); qset_add_urb_sg()
474 * sg list cannot be mapped onto multiple qset_add_urb_sg()
548 * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
550 * If the URB contains an sg list whose elements cannot be directly
563 struct scatterlist *sg; qset_add_urb_sg_linearize() local
571 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { qset_add_urb_sg_linearize()
580 sg_remaining = min_t(size_t, remaining, sg->length); qset_add_urb_sg_linearize()
581 orig = sg_virt(sg); qset_add_urb_sg_linearize()
591 std->bounce_sg = sg; qset_add_urb_sg_linearize()
592 std->bounce_offset = orig - sg_virt(sg); qset_add_urb_sg_linearize()
/linux-4.1.27/drivers/net/ethernet/micrel/
H A Dks8842.c146 struct scatterlist sg; member in struct:ks8842_tx_dma_ctl
154 struct scatterlist sg; member in struct:ks8842_rx_dma_ctl
443 sg_dma_len(&ctl->sg) = skb->len + sizeof(u32); ks8842_tx_frame_dma()
454 sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg), ks8842_tx_frame_dma()
458 if (sg_dma_len(&ctl->sg) % 4) ks8842_tx_frame_dma()
459 sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; ks8842_tx_frame_dma()
462 &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); ks8842_tx_frame_dma()
556 struct scatterlist *sg = &ctl->sg; __ks8842_start_new_rx_dma() local
561 sg_init_table(sg, 1); __ks8842_start_new_rx_dma()
562 sg_dma_address(sg) = dma_map_single(adapter->dev, __ks8842_start_new_rx_dma()
564 err = dma_mapping_error(adapter->dev, sg_dma_address(sg)); __ks8842_start_new_rx_dma()
566 sg_dma_address(sg) = 0; __ks8842_start_new_rx_dma()
570 sg_dma_len(sg) = DMA_BUFFER_SIZE; __ks8842_start_new_rx_dma()
573 sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); __ks8842_start_new_rx_dma()
583 sg_dma_address(sg) = 0; __ks8842_start_new_rx_dma()
589 if (sg_dma_address(sg)) __ks8842_start_new_rx_dma()
590 dma_unmap_single(adapter->dev, sg_dma_address(sg), __ks8842_start_new_rx_dma()
592 sg_dma_address(sg) = 0; __ks8842_start_new_rx_dma()
608 dma_addr_t addr = sg_dma_address(&ctl->sg); ks8842_rx_frame_dma_tasklet()
884 if (sg_dma_address(&rx_ctl->sg)) ks8842_stop_dma()
885 dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg), ks8842_stop_dma()
887 sg_dma_address(&rx_ctl->sg) = 0; ks8842_stop_dma()
910 if (sg_dma_address(&tx_ctl->sg)) ks8842_dealloc_dma_bufs()
911 dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg), ks8842_dealloc_dma_bufs()
913 sg_dma_address(&tx_ctl->sg) = 0; ks8842_dealloc_dma_bufs()
937 sg_init_table(&tx_ctl->sg, 1); ks8842_alloc_dma_bufs()
953 sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, ks8842_alloc_dma_bufs()
956 sg_dma_address(&tx_ctl->sg)); ks8842_alloc_dma_bufs()
958 sg_dma_address(&tx_ctl->sg) = 0; ks8842_alloc_dma_bufs()
/linux-4.1.27/drivers/media/pci/ivtv/
H A Divtv-udma.c77 struct scatterlist *sg; ivtv_udma_fill_sg_array() local
79 for (i = 0, sg = dma->SGlist; i < dma->SG_length; i++, sg = sg_next(sg)) { ivtv_udma_fill_sg_array()
80 dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg)); ivtv_udma_fill_sg_array()
81 dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg)); ivtv_udma_fill_sg_array()
83 buffer_offset += sg_dma_len(sg); ivtv_udma_fill_sg_array()
85 split -= sg_dma_len(sg); ivtv_udma_fill_sg_array()
/linux-4.1.27/drivers/staging/i2o/
H A Dmemory.c115 * @sg: SG list to be mapped
127 int i2o_dma_map_sg(struct i2o_controller *c, struct scatterlist *sg, i2o_dma_map_sg() argument
144 sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction); i2o_dma_map_sg()
158 *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg)); i2o_dma_map_sg()
159 *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg))); i2o_dma_map_sg()
162 *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); i2o_dma_map_sg()
164 sg = sg_next(sg); i2o_dma_map_sg()
H A Di2o_config.c604 struct sg_simple_element *sg; i2o_cfg_passthru32() local
611 sg = (struct sg_simple_element *)((&msg->u.head[0]) + i2o_cfg_passthru32()
626 if (!(sg[i].flag_count & 0x10000000 i2o_cfg_passthru32()
630 c->name, i, sg[i].flag_count); i2o_cfg_passthru32()
634 sg_size = sg[i].flag_count & 0xffffff; i2o_cfg_passthru32()
646 if (sg[i]. i2o_cfg_passthru32()
651 (void __user *)(unsigned long)sg[i]. i2o_cfg_passthru32()
661 sg[i].addr_bus = (u32) p->phys; i2o_cfg_passthru32()
677 struct sg_simple_element *sg; i2o_cfg_passthru32() local
680 // re-acquire the original message to handle correctly the sg copy operation i2o_cfg_passthru32()
703 sg = (struct sg_simple_element *)(rmsg + sg_offset); i2o_cfg_passthru32()
707 (sg[j]. i2o_cfg_passthru32()
709 sg_size = sg[j].flag_count & 0xffffff; i2o_cfg_passthru32()
712 ((void __user *)(u64) sg[j].addr_bus, i2o_cfg_passthru32()
717 sg[j].addr_bus); i2o_cfg_passthru32()
846 struct sg_simple_element *sg; i2o_cfg_passthru() local
854 sg = (struct sg_simple_element *)((&msg->u.head[0]) + i2o_cfg_passthru()
868 if (!(sg[i].flag_count & 0x10000000 i2o_cfg_passthru()
872 c->name, i, sg[i].flag_count); i2o_cfg_passthru()
876 sg_size = sg[i].flag_count & 0xffffff; i2o_cfg_passthru()
888 if (sg[i]. i2o_cfg_passthru()
892 (p->virt, (void __user *)sg[i].addr_bus, i2o_cfg_passthru()
901 sg[i].addr_bus = p->phys; i2o_cfg_passthru()
917 struct sg_simple_element *sg; i2o_cfg_passthru() local
920 // re-acquire the original message to handle correctly the sg copy operation i2o_cfg_passthru()
943 sg = (struct sg_simple_element *)(rmsg + sg_offset); i2o_cfg_passthru()
947 (sg[j]. i2o_cfg_passthru()
949 sg_size = sg[j].flag_count & 0xffffff; i2o_cfg_passthru()
952 ((void __user *)sg[j].addr_bus, sg_list[j].virt, i2o_cfg_passthru()
957 sg[j].addr_bus); i2o_cfg_passthru()
/linux-4.1.27/drivers/staging/rtl8192e/
H A Drtllib_crypt_wep.c105 struct scatterlist sg; prism2_wep_encrypt() local
152 sg_init_one(&sg, pos, len+4); prism2_wep_encrypt()
154 return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); prism2_wep_encrypt()
179 struct scatterlist sg; prism2_wep_decrypt() local
201 sg_init_one(&sg, pos, plen+4); prism2_wep_decrypt()
203 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) prism2_wep_decrypt()
H A Drtllib_crypt_tkip.c310 struct scatterlist sg; rtllib_tkip_encrypt() local
359 sg_init_one(&sg, pos, len+4); rtllib_tkip_encrypt()
363 ret = crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); rtllib_tkip_encrypt()
393 struct scatterlist sg; rtllib_tkip_decrypt() local
453 sg_init_one(&sg, pos, plen+4); rtllib_tkip_decrypt()
456 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { rtllib_tkip_decrypt()
509 struct scatterlist sg[2]; michael_mic() local
515 sg_init_table(sg, 2); michael_mic()
516 sg_set_buf(&sg[0], hdr, 16); michael_mic()
517 sg_set_buf(&sg[1], data, data_len); michael_mic()
524 return crypto_hash_digest(&desc, sg, data_len + 16, mic); michael_mic()
/linux-4.1.27/drivers/staging/rtl8192u/ieee80211/
H A Dieee80211_crypt_wep.c111 struct scatterlist sg; prism2_wep_encrypt() local
157 sg_init_one(&sg, pos, len+4); prism2_wep_encrypt()
159 return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); prism2_wep_encrypt()
183 struct scatterlist sg; prism2_wep_decrypt() local
207 sg_init_one(&sg, pos, plen+4); prism2_wep_decrypt()
209 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) prism2_wep_decrypt()
H A Dieee80211_crypt_tkip.c315 struct scatterlist sg; ieee80211_tkip_encrypt() local
369 sg_init_one(&sg, pos, len+4); ieee80211_tkip_encrypt()
370 ret= crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); ieee80211_tkip_encrypt()
399 struct scatterlist sg; ieee80211_tkip_decrypt() local
455 sg_init_one(&sg, pos, plen+4); ieee80211_tkip_decrypt()
457 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { ieee80211_tkip_decrypt()
505 struct scatterlist sg[2]; michael_mic() local
512 sg_init_table(sg, 2); michael_mic()
513 sg_set_buf(&sg[0], hdr, 16); michael_mic()
514 sg_set_buf(&sg[1], data, data_len); michael_mic()
521 return crypto_hash_digest(&desc, sg, data_len + 16, mic); michael_mic()
/linux-4.1.27/drivers/infiniband/hw/mlx5/
H A Dmem.c57 struct scatterlist *sg; mlx5_ib_cont_pages() local
78 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { mlx5_ib_cont_pages()
79 len = sg_dma_len(sg) >> page_shift; mlx5_ib_cont_pages()
80 pfn = sg_dma_address(sg) >> page_shift; mlx5_ib_cont_pages()
161 struct scatterlist *sg; __mlx5_ib_populate_pas() local
180 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { __mlx5_ib_populate_pas()
181 len = sg_dma_len(sg) >> umem_page_shift; __mlx5_ib_populate_pas()
182 base = sg_dma_address(sg); __mlx5_ib_populate_pas()
/linux-4.1.27/include/uapi/linux/
H A Dbsg.h14 * sg.h sg_io_hdr also has bits defined for it's flags member. These
15 * two flag values (0x10 and 0x20) have the same meaning in sg.h . For
/linux-4.1.27/include/xen/
H A Dswiotlb-xen.h42 xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
50 xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
/linux-4.1.27/net/wireless/
H A Dlib80211_crypt_wep.c139 struct scatterlist sg; lib80211_wep_encrypt() local
169 sg_init_one(&sg, pos, len + 4); lib80211_wep_encrypt()
170 return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); lib80211_wep_encrypt()
187 struct scatterlist sg; lib80211_wep_decrypt() local
209 sg_init_one(&sg, pos, plen + 4); lib80211_wep_decrypt()
210 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) lib80211_wep_decrypt()
H A Dlib80211_crypt_tkip.c360 struct scatterlist sg; lib80211_tkip_encrypt() local
386 sg_init_one(&sg, pos, len + 4); lib80211_tkip_encrypt()
387 return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); lib80211_tkip_encrypt()
414 struct scatterlist sg; lib80211_tkip_decrypt() local
469 sg_init_one(&sg, pos, plen + 4); lib80211_tkip_decrypt()
470 if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { lib80211_tkip_decrypt()
512 struct scatterlist sg[2]; michael_mic() local
518 sg_init_table(sg, 2); michael_mic()
519 sg_set_buf(&sg[0], hdr, 16); michael_mic()
520 sg_set_buf(&sg[1], data, data_len); michael_mic()
527 return crypto_hash_digest(&desc, sg, data_len + 16, mic); michael_mic()
/linux-4.1.27/arch/avr32/include/asm/
H A Ddma-mapping.h192 * @sg: list of buffers
212 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, dma_map_sg() argument
220 sg[i].dma_address = page_to_bus(sg_page(&sg[i])) + sg[i].offset; dma_map_sg()
221 virt = sg_virt(&sg[i]); dma_map_sg()
222 dma_cache_sync(dev, virt, sg[i].length, direction); dma_map_sg()
231 * @sg: list of buffers
240 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, dma_unmap_sg() argument
302 * @sg: list of buffers
313 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, dma_sync_sg_for_cpu() argument
324 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, dma_sync_sg_for_device() argument
330 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, direction); dma_sync_sg_for_device()
/linux-4.1.27/net/ipv4/
H A Dah4.c154 struct scatterlist *sg; ah_output() local
185 sg = ah_req_sg(ahash, req); ah_output()
186 seqhisg = sg + nfrags; ah_output()
222 sg_init_table(sg, nfrags + sglists); ah_output()
223 skb_to_sgvec_nomark(skb, sg, 0, skb->len); ah_output()
226 /* Attach seqhi sg right after packet payload */ ah_output()
230 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len); ah_output()
307 struct scatterlist *sg; ah_input() local
370 sg = ah_req_sg(ahash, req); ah_input()
371 seqhisg = sg + nfrags; ah_input()
390 sg_init_table(sg, nfrags + sglists); ah_input()
391 skb_to_sgvec_nomark(skb, sg, 0, skb->len); ah_input()
394 /* Attach seqhi sg right after packet payload */ ah_input()
398 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len); ah_input()
H A Desp4.c122 struct scatterlist *sg; esp_output() local
182 sg = asg + sglists; esp_output()
241 sg_init_table(sg, nfrags); esp_output()
242 skb_to_sgvec(skb, sg, esp_output()
256 aead_givcrypt_set_crypt(req, sg, sg, clen, iv); esp_output()
386 struct scatterlist *sg; esp_input() local
422 sg = asg + sglists; esp_input()
431 sg_init_table(sg, nfrags); esp_input()
432 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); esp_input()
444 aead_request_set_crypt(req, sg, sg, elen, iv); esp_input()
/linux-4.1.27/drivers/infiniband/core/
H A Dumem.c49 struct scatterlist *sg; __ib_umem_release() local
58 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { __ib_umem_release()
60 page = sg_page(sg); __ib_umem_release()
96 struct scatterlist *sg, *sg_list_start; ib_umem_get() local
203 for_each_sg(sg_list_start, sg, ret, i) { for_each_sg()
207 sg_set_page(sg, page_list[i], PAGE_SIZE, 0); for_each_sg()
211 sg_list_start = sg;
318 struct scatterlist *sg; ib_umem_page_count() local
326 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) ib_umem_page_count()
327 n += sg_dma_len(sg) >> shift; ib_umem_page_count()
/linux-4.1.27/drivers/memstick/core/
H A Dms_block.c94 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
98 static int msb_sg_compare_to_buffer(struct scatterlist *sg, msb_sg_compare_to_buffer() argument
104 sg_miter_start(&miter, sg, sg_nents(sg), msb_sg_compare_to_buffer()
344 struct scatterlist sg[2]; h_msb_read_page() local
434 sg_init_table(sg, ARRAY_SIZE(sg)); h_msb_read_page()
435 msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg), h_msb_read_page()
439 memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg); h_msb_read_page()
480 struct scatterlist sg[2]; h_msb_write_block() local
559 sg_init_table(sg, ARRAY_SIZE(sg)); h_msb_write_block()
561 if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg), h_msb_write_block()
566 memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg); h_msb_write_block()
860 struct scatterlist *sg, int offset) msb_read_page()
872 sg_miter_start(&miter, sg, sg_nents(sg), msb_read_page()
911 msb->current_sg = sg; msb_read_page()
979 struct scatterlist sg; msb_verify_block() local
982 sg_init_one(&sg, msb->block_buffer, msb->block_size); msb_verify_block()
987 NULL, &sg, page * msb->page_size); msb_verify_block()
1001 u16 pba, u32 lba, struct scatterlist *sg, int offset) msb_write_block()
1004 BUG_ON(sg->length < msb->page_size); msb_write_block()
1045 msb->current_sg = sg; msb_write_block()
1059 error = msb_verify_block(msb, pba, sg, offset); msb_write_block()
1116 struct scatterlist *sg, int offset) msb_update_block()
1141 error = msb_write_block(msb, new_pba, lba, sg, offset); msb_update_block()
1193 struct scatterlist sg; msb_read_boot_blocks() local
1216 sg_init_one(&sg, page, sizeof(*page)); msb_read_boot_blocks()
1217 if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) { msb_read_boot_blocks()
1255 struct scatterlist sg; msb_read_bad_block_table() local
1289 sg_init_one(&sg, buffer, size_to_read); msb_read_bad_block_table()
1292 error = msb_read_page(msb, pba, page, NULL, &sg, offset); msb_read_bad_block_table()
1531 struct scatterlist sg; msb_cache_flush() local
1548 sg_init_one(&sg, msb->cache , msb->block_size); msb_cache_flush()
1560 error = msb_read_page(msb, pba, page, &extra, &sg, offset); msb_cache_flush()
1581 error = msb_update_block(msb, msb->cache_block_lba, &sg, 0); msb_cache_flush()
1603 int page, bool add_to_cache_only, struct scatterlist *sg, int offset) msb_cache_write()
1634 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size); msb_cache_write()
1644 int page, struct scatterlist *sg, int offset) msb_cache_read()
1657 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), msb_cache_read()
1666 error = msb_read_page(msb, pba, page, NULL, sg, offset); msb_cache_read()
1670 msb_cache_write(msb, lba, page, true, sg, offset); msb_cache_read()
1806 int page, struct scatterlist *sg, size_t len, int *sucessfuly_written) msb_do_write_request()
1819 error = msb_update_block(msb, lba, sg, offset); msb_do_write_request()
1829 error = msb_cache_write(msb, lba, page, false, sg, offset); msb_do_write_request()
1846 int page, struct scatterlist *sg, int len, int *sucessfuly_read) msb_do_read_request()
1854 error = msb_cache_read(msb, lba, page, sg, offset); msb_do_read_request()
1876 struct scatterlist *sg = msb->prealloc_sg; msb_io_work() local
1907 blk_rq_map_sg(msb->queue, msb->req, sg); msb_io_work()
1915 error = msb_do_read_request(msb, lba, page, sg, msb_io_work()
1918 error = msb_do_write_request(msb, lba, page, sg, msb_io_work()
858 msb_read_page(struct msb_data *msb, u16 pba, u8 page, struct ms_extra_data_register *extra, struct scatterlist *sg, int offset) msb_read_page() argument
1000 msb_write_block(struct msb_data *msb, u16 pba, u32 lba, struct scatterlist *sg, int offset) msb_write_block() argument
1115 msb_update_block(struct msb_data *msb, u16 lba, struct scatterlist *sg, int offset) msb_update_block() argument
1602 msb_cache_write(struct msb_data *msb, int lba, int page, bool add_to_cache_only, struct scatterlist *sg, int offset) msb_cache_write() argument
1643 msb_cache_read(struct msb_data *msb, int lba, int page, struct scatterlist *sg, int offset) msb_cache_read() argument
1805 msb_do_write_request(struct msb_data *msb, int lba, int page, struct scatterlist *sg, size_t len, int *sucessfuly_written) msb_do_write_request() argument
1845 msb_do_read_request(struct msb_data *msb, int lba, int page, struct scatterlist *sg, int len, int *sucessfuly_read) msb_do_read_request() argument
/linux-4.1.27/drivers/net/
H A Dvirtio_net.c71 struct scatterlist sg[MAX_SKB_FRAGS + 2]; member in struct:send_queue
94 struct scatterlist sg[MAX_SKB_FRAGS + 2]; member in struct:receive_queue
148 * hdr is in a separate sg buffer, and data sg buffer shares same page
149 * with this header sg. This padding makes next sg 16 byte aligned
543 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); add_recvbuf_small()
544 sg_set_buf(rq->sg, hdr, vi->hdr_len); add_recvbuf_small()
545 skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); add_recvbuf_small()
547 err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); add_recvbuf_small()
561 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); add_recvbuf_big()
563 /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ add_recvbuf_big()
571 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); add_recvbuf_big()
573 /* chain new page in list head to match sg */ add_recvbuf_big()
585 /* rq->sg[0], rq->sg[1] share the same page */ add_recvbuf_big()
586 /* a separated rq->sg[0] for header - required in case !any_header_sg */ add_recvbuf_big()
587 sg_set_buf(&rq->sg[0], p, vi->hdr_len); add_recvbuf_big()
589 /* rq->sg[1] for data packet, from offset */ add_recvbuf_big()
591 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); add_recvbuf_big()
595 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, add_recvbuf_big()
640 sg_init_one(rq->sg, buf, len); add_recvbuf_mergeable()
641 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp); add_recvbuf_mergeable()
896 sg_init_table(sq->sg, MAX_SKB_FRAGS + 2); xmit_skb()
899 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); xmit_skb()
903 sg_set_buf(sq->sg, hdr, hdr_len); xmit_skb()
904 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; xmit_skb()
906 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); xmit_skb()
1021 struct scatterlist sg; virtnet_set_mac_address() local
1028 sg_init_one(&sg, addr->sa_data, dev->addr_len); virtnet_set_mac_address()
1030 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { virtnet_set_mac_address()
1111 struct scatterlist sg; virtnet_set_queues() local
1119 sg_init_one(&sg, &s, sizeof(s)); virtnet_set_queues()
1122 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { virtnet_set_queues()
1153 struct scatterlist sg[2]; virtnet_set_rx_mode() local
1169 sg_init_one(sg, &promisc, sizeof(promisc)); virtnet_set_rx_mode()
1172 VIRTIO_NET_CTRL_RX_PROMISC, sg)) virtnet_set_rx_mode()
1176 sg_init_one(sg, &allmulti, sizeof(allmulti)); virtnet_set_rx_mode()
1179 VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) virtnet_set_rx_mode()
1192 sg_init_table(sg, 2); virtnet_set_rx_mode()
1200 sg_set_buf(&sg[0], mac_data, virtnet_set_rx_mode()
1211 sg_set_buf(&sg[1], mac_data, virtnet_set_rx_mode()
1215 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) virtnet_set_rx_mode()
1225 struct scatterlist sg; virtnet_vlan_rx_add_vid() local
1227 sg_init_one(&sg, &vid, sizeof(vid)); virtnet_vlan_rx_add_vid()
1230 VIRTIO_NET_CTRL_VLAN_ADD, &sg)) virtnet_vlan_rx_add_vid()
1239 struct scatterlist sg; virtnet_vlan_rx_kill_vid() local
1241 sg_init_one(&sg, &vid, sizeof(vid)); virtnet_vlan_rx_kill_vid()
1244 VIRTIO_NET_CTRL_VLAN_DEL, &sg)) virtnet_vlan_rx_kill_vid()
1617 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); virtnet_alloc_queues()
1619 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); virtnet_alloc_queues()
/linux-4.1.27/drivers/staging/rts5208/
H A Drtsx_transport.c69 struct scatterlist *sg = rtsx_stor_access_xfer_buf() local
79 struct page *page = sg_page(sg) + rtsx_stor_access_xfer_buf()
80 ((sg->offset + *offset) >> PAGE_SHIFT); rtsx_stor_access_xfer_buf()
82 (sg->offset + *offset) & (PAGE_SIZE-1); rtsx_stor_access_xfer_buf()
83 unsigned int sglen = sg->length - *offset; rtsx_stor_access_xfer_buf()
95 ++sg; rtsx_stor_access_xfer_buf()
324 struct scatterlist *sg, int num_sg, unsigned int *index, rtsx_transfer_sglist_adma_partial()
337 if ((sg == NULL) || (num_sg <= 0) || !offset || !index) rtsx_transfer_sglist_adma_partial()
366 sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir); rtsx_transfer_sglist_adma_partial()
369 sg_ptr = sg; rtsx_transfer_sglist_adma_partial()
371 /* Usually the next entry will be @sg@ + 1, but if this sg element rtsx_transfer_sglist_adma_partial()
374 * the proper sg rtsx_transfer_sglist_adma_partial()
479 dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir); rtsx_transfer_sglist_adma_partial()
488 struct scatterlist *sg, int num_sg, rtsx_transfer_sglist_adma()
499 if ((sg == NULL) || (num_sg <= 0)) rtsx_transfer_sglist_adma()
528 buf_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir); rtsx_transfer_sglist_adma()
530 sg_ptr = sg; rtsx_transfer_sglist_adma()
626 dma_unmap_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir); rtsx_transfer_sglist_adma()
323 rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card, struct scatterlist *sg, int num_sg, unsigned int *index, unsigned int *offset, int size, enum dma_data_direction dma_dir, int timeout) rtsx_transfer_sglist_adma_partial() argument
487 rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card, struct scatterlist *sg, int num_sg, enum dma_data_direction dma_dir, int timeout) rtsx_transfer_sglist_adma() argument
/linux-4.1.27/tools/virtio/
H A Dvringh_test.c298 /* We pass sg[]s pointing into here, but we need RINGSIZE+1 */ parallel_test()
326 struct scatterlist sg[4]; parallel_test() local
350 /* Nasty three-element sg list. */ parallel_test()
351 sg_init_table(sg, num_sg = 3); parallel_test()
352 sg_set_buf(&sg[0], (void *)dbuf, 1); parallel_test()
353 sg_set_buf(&sg[1], (void *)dbuf + 1, 2); parallel_test()
354 sg_set_buf(&sg[2], (void *)dbuf + 3, 1); parallel_test()
357 sg_init_table(sg, num_sg = 2); parallel_test()
358 sg_set_buf(&sg[0], (void *)dbuf, 1); parallel_test()
359 sg_set_buf(&sg[1], (void *)dbuf + 1, 3); parallel_test()
362 sg_init_table(sg, num_sg = 1); parallel_test()
363 sg_set_buf(&sg[0], (void *)dbuf, 4); parallel_test()
366 sg_init_table(sg, num_sg = 4); parallel_test()
367 sg_set_buf(&sg[0], (void *)dbuf, 1); parallel_test()
368 sg_set_buf(&sg[1], (void *)dbuf + 1, 1); parallel_test()
369 sg_set_buf(&sg[2], (void *)dbuf + 2, 1); parallel_test()
370 sg_set_buf(&sg[3], (void *)dbuf + 3, 1); parallel_test()
378 err = virtqueue_add_outbuf(vq, sg, num_sg, dbuf, parallel_test()
381 err = virtqueue_add_inbuf(vq, sg, num_sg, parallel_test()
/linux-4.1.27/include/scsi/
H A Dsg.h12 * Original driver (sg.h):
23 * http://sg.danny.cz/sg [alternatively check the MAINTAINERS file]
24 * The documentation for the sg version 3 driver can be found at:
25 * http://sg.danny.cz/sg/p/sg_v3_ho.html
28 * For utility and test programs see: http://sg.danny.cz/sg/sg3_utils.html
89 /* defaults:: for sg driver: Q_AT_HEAD; for block layer: Q_AT_TAIL */
199 /* Defaults, commented if they differ from original sg driver */
256 Try using: "# cat /proc/scsi/sg/debug" instead in the v3 driver */
263 /* Defaults, commented if they differ from original sg driver */
/linux-4.1.27/net/ipv6/
H A Desp6.c150 struct scatterlist *sg; esp6_output() local
209 sg = asg + sglists; esp6_output()
233 sg_init_table(sg, nfrags); esp6_output()
234 skb_to_sgvec(skb, sg, esp6_output()
248 aead_givcrypt_set_crypt(req, sg, sg, clen, iv); esp6_output()
336 struct scatterlist *sg; esp6_input() local
376 sg = asg + sglists; esp6_input()
385 sg_init_table(sg, nfrags); esp6_input()
386 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen); esp6_input()
398 aead_request_set_crypt(req, sg, sg, elen, iv); esp6_input()
H A Dah6.c343 struct scatterlist *sg; ah6_output() local
380 sg = ah_req_sg(ahash, req); ah6_output()
381 seqhisg = sg + nfrags; ah6_output()
425 sg_init_table(sg, nfrags + sglists); ah6_output()
426 skb_to_sgvec_nomark(skb, sg, 0, skb->len); ah6_output()
429 /* Attach seqhi sg right after packet payload */ ah6_output()
433 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len); ah6_output()
524 struct scatterlist *sg; ah6_input() local
587 sg = ah_req_sg(ahash, req); ah6_input()
588 seqhisg = sg + nfrags; ah6_input()
603 sg_init_table(sg, nfrags + sglists); ah6_input()
604 skb_to_sgvec_nomark(skb, sg, 0, skb->len); ah6_input()
607 /* Attach seqhi sg right after packet payload */ ah6_input()
612 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len); ah6_input()
/linux-4.1.27/sound/soc/sh/
H A Dsiu_pcm.c124 struct scatterlist sg; siu_pcm_wr_set() local
127 sg_init_table(&sg, 1); siu_pcm_wr_set()
128 sg_set_page(&sg, pfn_to_page(PFN_DOWN(buff)), siu_pcm_wr_set()
130 sg_dma_len(&sg) = size; siu_pcm_wr_set()
131 sg_dma_address(&sg) = buff; siu_pcm_wr_set()
134 &sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); siu_pcm_wr_set()
172 struct scatterlist sg; siu_pcm_rd_set() local
177 sg_init_table(&sg, 1); siu_pcm_rd_set()
178 sg_set_page(&sg, pfn_to_page(PFN_DOWN(buff)), siu_pcm_rd_set()
180 sg_dma_len(&sg) = size; siu_pcm_rd_set()
181 sg_dma_address(&sg) = buff; siu_pcm_rd_set()
184 &sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); siu_pcm_rd_set()
/linux-4.1.27/include/linux/dma/
H A Dipu-dma.h149 struct scatterlist *sg; /* scatterlist for this */ member in struct:idmac_tx_desc
163 struct scatterlist *sg[2]; /* scatterlist elements in buffer-0 and -1 */ member in struct:idmac_channel
166 spinlock_t lock; /* protects sg[0,1], queue */
/linux-4.1.27/drivers/crypto/amcc/
H A Dcrypto4xx_core.c60 /* setup pe dma, include reset sg, pdr and pe, then release reset */ crypto4xx_hw_init()
71 /* un reset pe,sg and pdr */ crypto4xx_hw_init()
112 /* un reset pe,sg and pdr */ crypto4xx_hw_init()
551 struct scatterlist *sg; crypto4xx_copy_pkt_to_dst() local
558 sg = &dst[i]; crypto4xx_copy_pkt_to_dst()
559 sg_len = sg->length; crypto4xx_copy_pkt_to_dst()
560 addr = dma_map_page(dev->core_dev->device, sg_page(sg), crypto4xx_copy_pkt_to_dst()
561 sg->offset, sg->length, DMA_TO_DEVICE); crypto4xx_copy_pkt_to_dst()
564 len = (nbytes <= sg->length) ? nbytes : sg->length; crypto4xx_copy_pkt_to_dst()
574 len = (sg->length < len) ? sg->length : len; crypto4xx_copy_pkt_to_dst()
749 struct scatterlist *sg = sg_list; get_sg_count() local
754 if (sg->length > nbytes) get_sg_count()
756 nbytes -= sg->length; get_sg_count()
757 sg = sg_next(sg); get_sg_count()
789 struct scatterlist *sg; crypto4xx_build_pd() local
896 /* walk the sg, and setup gather array */ crypto4xx_build_pd()
898 sg = &src[idx]; crypto4xx_build_pd()
899 addr = dma_map_page(dev->core_dev->device, sg_page(sg), crypto4xx_build_pd()
900 sg->offset, sg->length, DMA_TO_DEVICE); crypto4xx_build_pd()
902 gd->ctl_len.len = sg->length; crypto4xx_build_pd()
905 if (sg->length >= nbytes) crypto4xx_build_pd()
907 nbytes -= sg->length; crypto4xx_build_pd()
/linux-4.1.27/drivers/block/
H A Dcpqarray.c945 c->req.sg[i].size = tmp_sg[i].length; do_ida_request()
946 c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev, do_ida_request()
1017 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n", complete_command()
1031 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr, complete_command()
1032 cmd->req.sg[i].size, ddir); complete_command()
1257 p = memdup_user(io->sg[0].addr, io->sg[0].size); ida_ctlr_ioctl()
1266 c->req.sg[0].size = io->sg[0].size; ida_ctlr_ioctl()
1267 c->req.sg[0].addr = pci_map_single(h->pci_dev, p, ida_ctlr_ioctl()
1268 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); ida_ctlr_ioctl()
1274 p = kmalloc(io->sg[0].size, GFP_KERNEL); ida_ctlr_ioctl()
1282 c->req.sg[0].size = io->sg[0].size; ida_ctlr_ioctl()
1283 c->req.sg[0].addr = pci_map_single(h->pci_dev, p, ida_ctlr_ioctl()
1284 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); ida_ctlr_ioctl()
1292 p = memdup_user(io->sg[0].addr, io->sg[0].size); ida_ctlr_ioctl()
1298 c->req.sg[0].size = io->sg[0].size; ida_ctlr_ioctl()
1299 c->req.sg[0].addr = pci_map_single(h->pci_dev, p, ida_ctlr_ioctl()
1300 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); ida_ctlr_ioctl()
1304 c->req.sg[0].size = sizeof(io->c); ida_ctlr_ioctl()
1305 c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c, ida_ctlr_ioctl()
1306 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); ida_ctlr_ioctl()
1322 pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size, ida_ctlr_ioctl()
1334 if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) { ida_ctlr_ioctl()
1439 c->req.sg[0].size = 512; sendcmd()
1441 c->req.sg[0].size = size; sendcmd()
1446 c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev, sendcmd()
1447 buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); sendcmd()
1471 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr, sendcmd()
1472 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); sendcmd()
H A Dida_ioctl.h58 /* currently, sg_cnt is assumed to be 1: only the 0th element of sg is used */
62 } sg[SG_MAX]; member in struct:__anon3601
H A Dxen-blkfront.c79 struct scatterlist *sg; member in struct:blk_shadow
405 struct scatterlist *sg; blkif_queue_request() local
454 nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); blkif_queue_request()
497 for_each_sg(info->shadow[id].sg, sg, nseg, i) { blkif_queue_request()
498 fsect = sg->offset >> 9; blkif_queue_request()
499 lsect = fsect + (sg->length >> 9) - 1; blkif_queue_request()
525 gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info); blkif_queue_request()
534 BUG_ON(sg->offset + sg->length > PAGE_SIZE); blkif_queue_request()
537 bvec_data = kmap_atomic(sg_page(sg)); blkif_queue_request()
541 * range sg->offset..sg->offset+sg->length. blkif_queue_request()
548 memcpy(shared_data + sg->offset, blkif_queue_request()
549 bvec_data + sg->offset, blkif_queue_request()
550 sg->length); blkif_queue_request()
1024 kfree(info->shadow[i].sg); blkif_free()
1025 info->shadow[i].sg = NULL; blkif_free()
1052 struct scatterlist *sg; blkif_completion() local
1067 for_each_sg(s->sg, sg, nseg, i) { blkif_completion()
1068 BUG_ON(sg->offset + sg->length > PAGE_SIZE); blkif_completion()
1071 bvec_data = kmap_atomic(sg_page(sg)); blkif_completion()
1072 memcpy(bvec_data + sg->offset, blkif_completion()
1073 shared_data + sg->offset, blkif_completion()
1074 sg->length); blkif_completion()
1727 info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO); blkfront_setup_indirect()
1734 (info->shadow[i].sg == NULL) || blkfront_setup_indirect()
1738 sg_init_table(info->shadow[i].sg, segs); blkfront_setup_indirect()
1748 kfree(info->shadow[i].sg); blkfront_setup_indirect()
1749 info->shadow[i].sg = NULL; blkfront_setup_indirect()
/linux-4.1.27/arch/powerpc/platforms/powernv/
H A Dopal.c885 /* Convert a region of vmalloc memory to an opal sg list */ opal_vmalloc_to_sg_list()
889 struct opal_sg_list *sg, *first = NULL; opal_vmalloc_to_sg_list() local
892 sg = kzalloc(PAGE_SIZE, GFP_KERNEL); opal_vmalloc_to_sg_list()
893 if (!sg) opal_vmalloc_to_sg_list()
896 first = sg; opal_vmalloc_to_sg_list()
902 sg->entry[i].data = cpu_to_be64(data); opal_vmalloc_to_sg_list()
903 sg->entry[i].length = cpu_to_be64(length); opal_vmalloc_to_sg_list()
913 sg->length = cpu_to_be64( opal_vmalloc_to_sg_list()
916 sg->next = cpu_to_be64(__pa(next)); opal_vmalloc_to_sg_list()
917 sg = next; opal_vmalloc_to_sg_list()
924 sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16); opal_vmalloc_to_sg_list()
934 void opal_free_sg_list(struct opal_sg_list *sg) opal_free_sg_list() argument
936 while (sg) { opal_free_sg_list()
937 uint64_t next = be64_to_cpu(sg->next); opal_free_sg_list()
939 kfree(sg); opal_free_sg_list()
942 sg = __va(next); opal_free_sg_list()
944 sg = NULL; opal_free_sg_list()
/linux-4.1.27/drivers/virtio/
H A Dvirtio_ring.c132 struct scatterlist *sg; virtqueue_add() local
207 for (sg = sgs[n]; sg; sg = sg_next(sg)) { virtqueue_add()
209 desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); virtqueue_add()
210 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); virtqueue_add()
216 for (sg = sgs[n]; sg; sg = sg_next(sg)) { virtqueue_add()
218 desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); virtqueue_add()
219 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); virtqueue_add()
283 struct scatterlist *sg; virtqueue_add_sgs() local
284 for (sg = sgs[i]; sg; sg = sg_next(sg)) virtqueue_add_sgs()
294 * @sg: scatterlist (must be well-formed and terminated!)
295 * @num: the number of entries in @sg readable by other side
305 struct scatterlist *sg, unsigned int num, virtqueue_add_outbuf()
309 return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); virtqueue_add_outbuf()
316 * @sg: scatterlist (must be well-formed and terminated!)
317 * @num: the number of entries in @sg writable by other side
327 struct scatterlist *sg, unsigned int num, virtqueue_add_inbuf()
331 return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); virtqueue_add_inbuf()
304 virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg, unsigned int num, void *data, gfp_t gfp) virtqueue_add_outbuf() argument
326 virtqueue_add_inbuf(struct virtqueue *vq, struct scatterlist *sg, unsigned int num, void *data, gfp_t gfp) virtqueue_add_inbuf() argument
/linux-4.1.27/arch/nios2/include/asm/
H A Ddma-mapping.h78 extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
84 extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
96 extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
98 extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
/linux-4.1.27/arch/blackfin/include/asm/
H A Ddma-mapping.h99 extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
103 dma_unmap_sg(struct device *dev, struct scatterlist *sg, dma_unmap_sg() argument
140 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, dma_sync_sg_for_cpu() argument
147 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
/linux-4.1.27/sound/core/
H A Dsgbuf.c141 * compute the max chunk size with continuous pages on sg-buffer
146 struct snd_sg_buf *sg = dmab->private_data; snd_sgbuf_get_chunk_size() local
152 pg = sg->table[start].addr >> PAGE_SHIFT; snd_sgbuf_get_chunk_size()
158 if ((sg->table[start].addr >> PAGE_SHIFT) != pg) snd_sgbuf_get_chunk_size()
/linux-4.1.27/arch/arm/mm/
H A Ddma-mapping.c793 * A single sg entry may refer to multiple physically contiguous dma_cache_maint_page()
888 * @sg: list of buffers
901 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, arm_dma_map_sg() argument
908 for_each_sg(sg, s, nents, i) { for_each_sg()
920 for_each_sg(sg, s, i, j)
928 * @sg: list of buffers
935 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, arm_dma_unmap_sg() argument
943 for_each_sg(sg, s, nents, i) arm_dma_unmap_sg()
950 * @sg: list of buffers
954 void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, arm_dma_sync_sg_for_cpu() argument
961 for_each_sg(sg, s, nents, i) arm_dma_sync_sg_for_cpu()
969 * @sg: list of buffers
973 void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, arm_dma_sync_sg_for_device() argument
980 for_each_sg(sg, s, nents, i) arm_dma_sync_sg_for_device()
1492 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, __map_sg_chunk() argument
1511 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { __map_sg_chunk()
1536 static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, __iommu_map_sg() argument
1540 struct scatterlist *s = sg, *dma = sg, *start = sg; __iommu_map_sg()
1577 for_each_sg(sg, s, count, i) __iommu_map_sg()
1585 * @sg: list of buffers
1594 int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, arm_coherent_iommu_map_sg() argument
1597 return __iommu_map_sg(dev, sg, nents, dir, attrs, true); arm_coherent_iommu_map_sg()
1603 * @sg: list of buffers
1612 int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, arm_iommu_map_sg() argument
1615 return __iommu_map_sg(dev, sg, nents, dir, attrs, false); arm_iommu_map_sg()
1618 static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, __iommu_unmap_sg() argument
1625 for_each_sg(sg, s, nents, i) { for_each_sg()
1639 * @sg: list of buffers
1646 void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, arm_coherent_iommu_unmap_sg() argument
1649 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); arm_coherent_iommu_unmap_sg()
1655 * @sg: list of buffers
1662 void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, arm_iommu_unmap_sg() argument
1665 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); arm_iommu_unmap_sg()
1671 * @sg: list of buffers
1675 void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, arm_iommu_sync_sg_for_cpu() argument
1681 for_each_sg(sg, s, nents, i) arm_iommu_sync_sg_for_cpu()
1689 * @sg: list of buffers
1693 void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, arm_iommu_sync_sg_for_device() argument
1699 for_each_sg(sg, s, nents, i) arm_iommu_sync_sg_for_device()
/linux-4.1.27/drivers/ata/
H A Dpata_pxa.c60 static void pxa_load_dmac(struct scatterlist *sg, struct ata_queued_cmd *qc) pxa_load_dmac() argument
67 cpu_addr = sg_dma_address(sg); pxa_load_dmac()
68 cpu_len = sg_dma_len(sg); pxa_load_dmac()
109 struct scatterlist *sg; pxa_qc_prep() local
119 for_each_sg(qc->sg, sg, qc->n_elem, si) pxa_qc_prep()
120 pxa_load_dmac(sg, qc); pxa_qc_prep()
/linux-4.1.27/drivers/media/pci/bt8xx/
H A Dbttv-risc.c54 struct scatterlist *sg; bttv_risc_packed() local
80 sg = sglist; bttv_risc_packed()
85 while (offset && offset >= sg_dma_len(sg)) { bttv_risc_packed()
86 offset -= sg_dma_len(sg); bttv_risc_packed()
87 sg = sg_next(sg); bttv_risc_packed()
89 if (bpl <= sg_dma_len(sg)-offset) { bttv_risc_packed()
93 *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset); bttv_risc_packed()
99 (sg_dma_len(sg)-offset)); bttv_risc_packed()
100 *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset); bttv_risc_packed()
101 todo -= (sg_dma_len(sg)-offset); bttv_risc_packed()
103 sg = sg_next(sg); bttv_risc_packed()
104 while (todo > sg_dma_len(sg)) { bttv_risc_packed()
106 sg_dma_len(sg)); bttv_risc_packed()
107 *(rp++)=cpu_to_le32(sg_dma_address(sg)); bttv_risc_packed()
108 todo -= sg_dma_len(sg); bttv_risc_packed()
109 sg = sg_next(sg); bttv_risc_packed()
113 *(rp++)=cpu_to_le32(sg_dma_address(sg)); bttv_risc_packed()
187 /* go to next sg entry if needed */ bttv_risc_planar()
/linux-4.1.27/drivers/scsi/qla2xxx/
H A Dqla_iocb.c200 struct scatterlist *sg; qla2x00_build_scsi_iocbs_32() local
223 scsi_for_each_sg(cmd, sg, tot_dsds, i) { scsi_for_each_sg()
237 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg)); scsi_for_each_sg()
238 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); scsi_for_each_sg()
258 struct scatterlist *sg; qla2x00_build_scsi_iocbs_64() local
281 scsi_for_each_sg(cmd, sg, tot_dsds, i) { scsi_for_each_sg()
296 sle_dma = sg_dma_address(sg); scsi_for_each_sg()
299 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); scsi_for_each_sg()
364 /* Map the sg table so we have an accurate count of sg entries needed */ qla2x00_start_scsi()
711 struct scatterlist *sg; qla24xx_build_scsi_iocbs() local
749 scsi_for_each_sg(cmd, sg, tot_dsds, i) { scsi_for_each_sg()
764 sle_dma = sg_dma_address(sg); scsi_for_each_sg()
767 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); scsi_for_each_sg()
880 struct scatterlist *sg; qla24xx_get_one_block_sg() local
887 sg = sgx->cur_sg; qla24xx_get_one_block_sg()
890 sg_dma_addr = sg_dma_address(sg); qla24xx_get_one_block_sg()
891 sg_len = sg_dma_len(sg); qla24xx_get_one_block_sg()
909 sg = sg_next(sg); qla24xx_get_one_block_sg()
911 sgx->cur_sg = sg; qla24xx_get_one_block_sg()
953 sgx.cur_sg = tc->sg; qla24xx_walk_and_build_sglist_no_difb()
1045 struct scatterlist *sg, *sgl; qla24xx_walk_and_build_sglist() local
1057 sgl = tc->sg; qla24xx_walk_and_build_sglist()
1065 for_each_sg(sgl, sg, tot_dsds, i) { for_each_sg()
1112 sle_dma = sg_dma_address(sg); for_each_sg()
1116 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); for_each_sg()
1135 struct scatterlist *sg, *sgl; qla24xx_walk_and_build_prot_sglist() local
1157 for_each_sg(sgl, sg, tot_dsds, i) { for_each_sg()
1204 sle_dma = sg_dma_address(sg); for_each_sg()
1208 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); for_each_sg()
1492 /* Map the sg table so we have an accurate count of sg entries needed */ qla24xx_start_scsi()
1656 /* Map the sg table so we have an accurate count of sg entries needed */ qla24xx_dif_start_scsi()
1704 /* Total Data and protection sg segment(s) */ qla24xx_dif_start_scsi()
2087 struct scatterlist *sg; qla2x00_ct_iocb() local
2131 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { qla2x00_ct_iocb()
2149 sle_dma = sg_dma_address(sg); qla2x00_ct_iocb()
2152 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); qla2x00_ct_iocb()
2166 struct scatterlist *sg; qla24xx_ct_iocb() local
2206 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) { qla24xx_ct_iocb()
2224 sle_dma = sg_dma_address(sg); qla24xx_ct_iocb()
2227 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); qla24xx_ct_iocb()
2300 /* Map the sg table so we have an accurate count of sg entries needed */ qla82xx_start_scsi()
2678 struct scatterlist *sg; qla25xx_build_bidir_iocb() local
2716 for_each_sg(bsg_job->request_payload.sg_list, sg, qla25xx_build_bidir_iocb()
2731 sle_dma = sg_dma_address(sg); qla25xx_build_bidir_iocb()
2734 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); qla25xx_build_bidir_iocb()
2742 for_each_sg(bsg_job->reply_payload.sg_list, sg, qla25xx_build_bidir_iocb()
2757 sle_dma = sg_dma_address(sg); qla25xx_build_bidir_iocb()
2760 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); qla25xx_build_bidir_iocb()
/linux-4.1.27/drivers/ide/
H A Dau1xxx-ide.c219 struct scatterlist *sg; auide_build_dmatable() local
227 sg = hwif->sg_table; auide_build_dmatable()
228 while (i && sg_dma_len(sg)) { auide_build_dmatable()
232 cur_addr = sg_dma_address(sg); auide_build_dmatable()
233 cur_len = sg_dma_len(sg); auide_build_dmatable()
253 sg_phys(sg), tc, flags)) { auide_build_dmatable()
259 sg_phys(sg), tc, flags)) { auide_build_dmatable()
268 sg = sg_next(sg); auide_build_dmatable()
/linux-4.1.27/drivers/memstick/host/
H A Dtifm_ms.c193 length = host->req->sg.length - host->block_pos; tifm_ms_transfer_data()
194 off = host->req->sg.offset + host->block_pos; tifm_ms_transfer_data()
206 pg = nth_page(sg_page(&host->req->sg), tifm_ms_transfer_data()
271 data_len = host->req->sg.length; tifm_ms_issue_cmd()
285 if (1 != tifm_map_sg(sock, &host->req->sg, 1, tifm_ms_issue_cmd()
292 data_len = sg_dma_len(&host->req->sg); tifm_ms_issue_cmd()
305 writel(sg_dma_address(&host->req->sg), tifm_ms_issue_cmd()
356 tifm_unmap_sg(sock, &host->req->sg, 1, tifm_ms_complete_cmd()
613 tifm_unmap_sg(sock, &host->req->sg, 1, tifm_ms_remove()
/linux-4.1.27/drivers/media/pci/cx88/
H A Dcx88-core.c81 struct scatterlist *sg; cx88_risc_field() local
94 sg = sglist; cx88_risc_field()
96 while (offset && offset >= sg_dma_len(sg)) { cx88_risc_field()
97 offset -= sg_dma_len(sg); cx88_risc_field()
98 sg = sg_next(sg); cx88_risc_field()
104 if (bpl <= sg_dma_len(sg)-offset) { cx88_risc_field()
107 *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset); cx88_risc_field()
113 (sg_dma_len(sg)-offset)); cx88_risc_field()
114 *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset); cx88_risc_field()
115 todo -= (sg_dma_len(sg)-offset); cx88_risc_field()
117 sg = sg_next(sg); cx88_risc_field()
118 while (todo > sg_dma_len(sg)) { cx88_risc_field()
120 sg_dma_len(sg)); cx88_risc_field()
121 *(rp++)=cpu_to_le32(sg_dma_address(sg)); cx88_risc_field()
122 todo -= sg_dma_len(sg); cx88_risc_field()
123 sg = sg_next(sg); cx88_risc_field()
126 *(rp++)=cpu_to_le32(sg_dma_address(sg)); cx88_risc_field()
/linux-4.1.27/arch/arm/kernel/
H A Ddma.c124 void set_dma_sg (unsigned int chan, struct scatterlist *sg, int nr_sg) set_dma_sg() argument
131 dma->sg = sg; set_dma_sg()
148 dma->sg = NULL; __set_dma_addr()
165 dma->sg = NULL; set_dma_count()
/linux-4.1.27/net/ipx/
H A Dipx_route.c261 struct sockaddr_ipx *sg, *st; ipxrtr_ioctl() local
267 sg = (struct sockaddr_ipx *)&rt.rt_gateway; ipxrtr_ioctl()
272 sg->sipx_family != AF_IPX || ipxrtr_ioctl()
283 f.ipx_router_network = sg->sipx_network; ipxrtr_ioctl()
284 memcpy(f.ipx_router_node, sg->sipx_node, IPX_NODE_LEN); ipxrtr_ioctl()
/linux-4.1.27/drivers/crypto/ux500/hash/
H A Dhash_core.c153 static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, hash_set_dma_transfer() argument
166 sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE); hash_set_dma_transfer()
169 ctx->device->dma.sg = sg; hash_set_dma_transfer()
171 ctx->device->dma.sg, ctx->device->dma.nents, hash_set_dma_transfer()
175 dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n", hash_set_dma_transfer()
183 ctx->device->dma.sg, ctx->device->dma.sg_len, hash_set_dma_transfer()
206 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, hash_dma_done()
211 struct scatterlist *sg, int len) hash_dma_write()
213 int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); hash_dma_write()
506 * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
508 * @sg: Scatterlist.
510 * @aligned: True if sg data aligned to work in DMA mode.
513 static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned) hash_get_nents() argument
518 while (size > 0 && sg) { hash_get_nents()
520 size -= sg->length; hash_get_nents()
523 if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) || hash_get_nents()
524 (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0)) hash_get_nents()
527 sg = sg_next(sg); hash_get_nents()
540 * hash_dma_valid_data - checks for dma valid sg data.
541 * @sg: Scatterlist.
544 * NOTE! This function checks for dma valid sg data, since dma
547 static bool hash_dma_valid_data(struct scatterlist *sg, int datasize) hash_dma_valid_data() argument
552 if (hash_get_nents(sg, datasize, &aligned) < 1) hash_dma_valid_data()
210 hash_dma_write(struct hash_ctx *ctx, struct scatterlist *sg, int len) hash_dma_write() argument
/linux-4.1.27/drivers/tty/serial/
H A Dpch_uart.c798 struct scatterlist *sg = priv->sg_tx_p; pch_dma_tx_complete() local
801 for (i = 0; i < priv->nent; i++, sg++) { pch_dma_tx_complete()
802 xmit->tail += sg_dma_len(sg); pch_dma_tx_complete()
803 port->icount.tx += sg_dma_len(sg); pch_dma_tx_complete()
807 dma_unmap_sg(port->dev, sg, priv->nent, DMA_TO_DEVICE); pch_dma_tx_complete()
869 struct scatterlist *sg; dma_handle_rx() local
872 sg = &priv->sg_rx; dma_handle_rx()
876 sg_dma_len(sg) = priv->trigger_level; dma_handle_rx()
879 sg_dma_len(sg), (unsigned long)priv->rx_buf_virt & dma_handle_rx()
882 sg_dma_address(sg) = priv->rx_buf_dma; dma_handle_rx()
885 sg, 1, DMA_DEV_TO_MEM, dma_handle_rx()
949 struct scatterlist *sg; dma_handle_tx() local
1017 sg = priv->sg_tx_p; dma_handle_tx()
1019 for (i = 0; i < num; i++, sg++) { dma_handle_tx()
1021 sg_set_page(sg, virt_to_page(xmit->buf), dma_handle_tx()
1024 sg_set_page(sg, virt_to_page(xmit->buf), dma_handle_tx()
1028 sg = priv->sg_tx_p; dma_handle_tx()
1029 nent = dma_map_sg(port->dev, sg, num, DMA_TO_DEVICE); dma_handle_tx()
1036 for (i = 0; i < nent; i++, sg++) { dma_handle_tx()
1037 sg->offset = (xmit->tail & (UART_XMIT_SIZE - 1)) + dma_handle_tx()
1039 sg_dma_address(sg) = (sg_dma_address(sg) & dma_handle_tx()
1040 ~(UART_XMIT_SIZE - 1)) + sg->offset; dma_handle_tx()
1042 sg_dma_len(sg) = rem; dma_handle_tx()
1044 sg_dma_len(sg) = size; dma_handle_tx()
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dradeon_ttm.c529 /* prepare the sg table with the user pages */ radeon_ttm_tt_pin_userptr()
568 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, radeon_ttm_tt_pin_userptr()
575 nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); radeon_ttm_tt_pin_userptr()
576 if (nents != ttm->sg->nents) radeon_ttm_tt_pin_userptr()
579 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, radeon_ttm_tt_pin_userptr()
585 kfree(ttm->sg); radeon_ttm_tt_pin_userptr()
603 if (!ttm->sg->sgl) radeon_ttm_tt_unpin_userptr()
606 /* free the sg table and pages again */ radeon_ttm_tt_unpin_userptr()
607 dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); radeon_ttm_tt_unpin_userptr()
609 for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) { radeon_ttm_tt_unpin_userptr()
618 sg_free_table(ttm->sg); radeon_ttm_tt_unpin_userptr()
724 ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL); radeon_ttm_tt_populate()
725 if (!ttm->sg) radeon_ttm_tt_populate()
733 if (slave && ttm->sg) { radeon_ttm_tt_populate()
734 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, radeon_ttm_tt_populate()
783 kfree(ttm->sg); radeon_ttm_tt_unpopulate()
/linux-4.1.27/drivers/char/hw_random/
H A Dvirtio-rng.c56 struct scatterlist sg; register_buffer() local
58 sg_init_one(&sg, buf, size); register_buffer()
61 virtqueue_add_inbuf(vi->vq, &sg, 1, buf, GFP_KERNEL); register_buffer()
/linux-4.1.27/drivers/hsi/controllers/
H A Domap_ssi.h119 * @sg: Pointer to the current sg entry being served
123 struct scatterlist *sg; member in struct:gdd_trn
/linux-4.1.27/arch/hexagon/kernel/
H A Ddma.c117 static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg, hexagon_map_sg() argument
124 WARN_ON(nents == 0 || sg[0].length == 0); hexagon_map_sg()
126 for_each_sg(sg, s, nents, i) { for_each_sg()
/linux-4.1.27/drivers/staging/rtl8192e/rtl8192e/
H A Drtl_crypto.h165 struct scatterlist *sg, unsigned int nsg);
167 void (*dit_digest)(struct crypto_tfm *tfm, struct scatterlist *sg,
277 struct scatterlist *sg, crypto_digest_update()
281 tfm->crt_digest.dit_update(tfm, sg, nsg); crypto_digest_update()
291 struct scatterlist *sg, crypto_digest_digest()
295 tfm->crt_digest.dit_digest(tfm, sg, nsg, out); crypto_digest_digest()
276 crypto_digest_update(struct crypto_tfm *tfm, struct scatterlist *sg, unsigned int nsg) crypto_digest_update() argument
290 crypto_digest_digest(struct crypto_tfm *tfm, struct scatterlist *sg, unsigned int nsg, u8 *out) crypto_digest_digest() argument

Completed in 12917 milliseconds

1234