1/*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/dma-buf.h>
14#include <linux/module.h>
15#include <linux/scatterlist.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dma-mapping.h>
19
20#include <media/videobuf2-core.h>
21#include <media/videobuf2-dma-contig.h>
22#include <media/videobuf2-memops.h>
23
24struct vb2_dc_conf {
25	struct device		*dev;
26};
27
28struct vb2_dc_buf {
29	struct device			*dev;
30	void				*vaddr;
31	unsigned long			size;
32	dma_addr_t			dma_addr;
33	enum dma_data_direction		dma_dir;
34	struct sg_table			*dma_sgt;
35
36	/* MMAP related */
37	struct vb2_vmarea_handler	handler;
38	atomic_t			refcount;
39	struct sg_table			*sgt_base;
40
41	/* USERPTR related */
42	struct vm_area_struct		*vma;
43
44	/* DMABUF related */
45	struct dma_buf_attachment	*db_attach;
46};
47
48/*********************************************/
49/*        scatterlist table functions        */
50/*********************************************/
51
52
53static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
54	void (*cb)(struct page *pg))
55{
56	struct scatterlist *s;
57	unsigned int i;
58
59	for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
60		struct page *page = sg_page(s);
61		unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
62			>> PAGE_SHIFT;
63		unsigned int j;
64
65		for (j = 0; j < n_pages; ++j, ++page)
66			cb(page);
67	}
68}
69
70static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
71{
72	struct scatterlist *s;
73	dma_addr_t expected = sg_dma_address(sgt->sgl);
74	unsigned int i;
75	unsigned long size = 0;
76
77	for_each_sg(sgt->sgl, s, sgt->nents, i) {
78		if (sg_dma_address(s) != expected)
79			break;
80		expected = sg_dma_address(s) + sg_dma_len(s);
81		size += sg_dma_len(s);
82	}
83	return size;
84}
85
86/*********************************************/
87/*         callbacks for all buffers         */
88/*********************************************/
89
90static void *vb2_dc_cookie(void *buf_priv)
91{
92	struct vb2_dc_buf *buf = buf_priv;
93
94	return &buf->dma_addr;
95}
96
97static void *vb2_dc_vaddr(void *buf_priv)
98{
99	struct vb2_dc_buf *buf = buf_priv;
100
101	if (!buf->vaddr && buf->db_attach)
102		buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
103
104	return buf->vaddr;
105}
106
107static unsigned int vb2_dc_num_users(void *buf_priv)
108{
109	struct vb2_dc_buf *buf = buf_priv;
110
111	return atomic_read(&buf->refcount);
112}
113
114static void vb2_dc_prepare(void *buf_priv)
115{
116	struct vb2_dc_buf *buf = buf_priv;
117	struct sg_table *sgt = buf->dma_sgt;
118
119	/* DMABUF exporter will flush the cache for us */
120	if (!sgt || buf->db_attach)
121		return;
122
123	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
124}
125
126static void vb2_dc_finish(void *buf_priv)
127{
128	struct vb2_dc_buf *buf = buf_priv;
129	struct sg_table *sgt = buf->dma_sgt;
130
131	/* DMABUF exporter will flush the cache for us */
132	if (!sgt || buf->db_attach)
133		return;
134
135	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
136}
137
138/*********************************************/
139/*        callbacks for MMAP buffers         */
140/*********************************************/
141
142static void vb2_dc_put(void *buf_priv)
143{
144	struct vb2_dc_buf *buf = buf_priv;
145
146	if (!atomic_dec_and_test(&buf->refcount))
147		return;
148
149	if (buf->sgt_base) {
150		sg_free_table(buf->sgt_base);
151		kfree(buf->sgt_base);
152	}
153	dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
154	put_device(buf->dev);
155	kfree(buf);
156}
157
158static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
159			  enum dma_data_direction dma_dir, gfp_t gfp_flags)
160{
161	struct vb2_dc_conf *conf = alloc_ctx;
162	struct device *dev = conf->dev;
163	struct vb2_dc_buf *buf;
164
165	buf = kzalloc(sizeof *buf, GFP_KERNEL);
166	if (!buf)
167		return ERR_PTR(-ENOMEM);
168
169	buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
170						GFP_KERNEL | gfp_flags);
171	if (!buf->vaddr) {
172		dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
173		kfree(buf);
174		return ERR_PTR(-ENOMEM);
175	}
176
177	/* Prevent the device from being released while the buffer is used */
178	buf->dev = get_device(dev);
179	buf->size = size;
180	buf->dma_dir = dma_dir;
181
182	buf->handler.refcount = &buf->refcount;
183	buf->handler.put = vb2_dc_put;
184	buf->handler.arg = buf;
185
186	atomic_inc(&buf->refcount);
187
188	return buf;
189}
190
191static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
192{
193	struct vb2_dc_buf *buf = buf_priv;
194	int ret;
195
196	if (!buf) {
197		printk(KERN_ERR "No buffer to map\n");
198		return -EINVAL;
199	}
200
201	/*
202	 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
203	 * map whole buffer
204	 */
205	vma->vm_pgoff = 0;
206
207	ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
208		buf->dma_addr, buf->size);
209
210	if (ret) {
211		pr_err("Remapping memory failed, error: %d\n", ret);
212		return ret;
213	}
214
215	vma->vm_flags		|= VM_DONTEXPAND | VM_DONTDUMP;
216	vma->vm_private_data	= &buf->handler;
217	vma->vm_ops		= &vb2_common_vm_ops;
218
219	vma->vm_ops->open(vma);
220
221	pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
222		__func__, (unsigned long)buf->dma_addr, vma->vm_start,
223		buf->size);
224
225	return 0;
226}
227
228/*********************************************/
229/*         DMABUF ops for exporters          */
230/*********************************************/
231
232struct vb2_dc_attachment {
233	struct sg_table sgt;
234	enum dma_data_direction dma_dir;
235};
236
237static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
238	struct dma_buf_attachment *dbuf_attach)
239{
240	struct vb2_dc_attachment *attach;
241	unsigned int i;
242	struct scatterlist *rd, *wr;
243	struct sg_table *sgt;
244	struct vb2_dc_buf *buf = dbuf->priv;
245	int ret;
246
247	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
248	if (!attach)
249		return -ENOMEM;
250
251	sgt = &attach->sgt;
252	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
253	 * map the same scatter list to multiple attachments at the same time.
254	 */
255	ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
256	if (ret) {
257		kfree(attach);
258		return -ENOMEM;
259	}
260
261	rd = buf->sgt_base->sgl;
262	wr = sgt->sgl;
263	for (i = 0; i < sgt->orig_nents; ++i) {
264		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
265		rd = sg_next(rd);
266		wr = sg_next(wr);
267	}
268
269	attach->dma_dir = DMA_NONE;
270	dbuf_attach->priv = attach;
271
272	return 0;
273}
274
275static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
276	struct dma_buf_attachment *db_attach)
277{
278	struct vb2_dc_attachment *attach = db_attach->priv;
279	struct sg_table *sgt;
280
281	if (!attach)
282		return;
283
284	sgt = &attach->sgt;
285
286	/* release the scatterlist cache */
287	if (attach->dma_dir != DMA_NONE)
288		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
289			attach->dma_dir);
290	sg_free_table(sgt);
291	kfree(attach);
292	db_attach->priv = NULL;
293}
294
295static struct sg_table *vb2_dc_dmabuf_ops_map(
296	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
297{
298	struct vb2_dc_attachment *attach = db_attach->priv;
299	/* stealing dmabuf mutex to serialize map/unmap operations */
300	struct mutex *lock = &db_attach->dmabuf->lock;
301	struct sg_table *sgt;
302	int ret;
303
304	mutex_lock(lock);
305
306	sgt = &attach->sgt;
307	/* return previously mapped sg table */
308	if (attach->dma_dir == dma_dir) {
309		mutex_unlock(lock);
310		return sgt;
311	}
312
313	/* release any previous cache */
314	if (attach->dma_dir != DMA_NONE) {
315		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
316			attach->dma_dir);
317		attach->dma_dir = DMA_NONE;
318	}
319
320	/* mapping to the client with new direction */
321	ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
322	if (ret <= 0) {
323		pr_err("failed to map scatterlist\n");
324		mutex_unlock(lock);
325		return ERR_PTR(-EIO);
326	}
327
328	attach->dma_dir = dma_dir;
329
330	mutex_unlock(lock);
331
332	return sgt;
333}
334
335static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
336	struct sg_table *sgt, enum dma_data_direction dma_dir)
337{
338	/* nothing to be done here */
339}
340
341static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
342{
343	/* drop reference obtained in vb2_dc_get_dmabuf */
344	vb2_dc_put(dbuf->priv);
345}
346
347static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
348{
349	struct vb2_dc_buf *buf = dbuf->priv;
350
351	return buf->vaddr + pgnum * PAGE_SIZE;
352}
353
354static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
355{
356	struct vb2_dc_buf *buf = dbuf->priv;
357
358	return buf->vaddr;
359}
360
361static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
362	struct vm_area_struct *vma)
363{
364	return vb2_dc_mmap(dbuf->priv, vma);
365}
366
367static struct dma_buf_ops vb2_dc_dmabuf_ops = {
368	.attach = vb2_dc_dmabuf_ops_attach,
369	.detach = vb2_dc_dmabuf_ops_detach,
370	.map_dma_buf = vb2_dc_dmabuf_ops_map,
371	.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
372	.kmap = vb2_dc_dmabuf_ops_kmap,
373	.kmap_atomic = vb2_dc_dmabuf_ops_kmap,
374	.vmap = vb2_dc_dmabuf_ops_vmap,
375	.mmap = vb2_dc_dmabuf_ops_mmap,
376	.release = vb2_dc_dmabuf_ops_release,
377};
378
379static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
380{
381	int ret;
382	struct sg_table *sgt;
383
384	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
385	if (!sgt) {
386		dev_err(buf->dev, "failed to alloc sg table\n");
387		return NULL;
388	}
389
390	ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
391		buf->size);
392	if (ret < 0) {
393		dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
394		kfree(sgt);
395		return NULL;
396	}
397
398	return sgt;
399}
400
401static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
402{
403	struct vb2_dc_buf *buf = buf_priv;
404	struct dma_buf *dbuf;
405	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
406
407	exp_info.ops = &vb2_dc_dmabuf_ops;
408	exp_info.size = buf->size;
409	exp_info.flags = flags;
410	exp_info.priv = buf;
411
412	if (!buf->sgt_base)
413		buf->sgt_base = vb2_dc_get_base_sgt(buf);
414
415	if (WARN_ON(!buf->sgt_base))
416		return NULL;
417
418	dbuf = dma_buf_export(&exp_info);
419	if (IS_ERR(dbuf))
420		return NULL;
421
422	/* dmabuf keeps reference to vb2 buffer */
423	atomic_inc(&buf->refcount);
424
425	return dbuf;
426}
427
428/*********************************************/
429/*       callbacks for USERPTR buffers       */
430/*********************************************/
431
432static inline int vma_is_io(struct vm_area_struct *vma)
433{
434	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
435}
436
437static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
438	struct vm_area_struct *vma, unsigned long *res)
439{
440	unsigned long pfn, start_pfn, prev_pfn;
441	unsigned int i;
442	int ret;
443
444	if (!vma_is_io(vma))
445		return -EFAULT;
446
447	ret = follow_pfn(vma, start, &pfn);
448	if (ret)
449		return ret;
450
451	start_pfn = pfn;
452	start += PAGE_SIZE;
453
454	for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
455		prev_pfn = pfn;
456		ret = follow_pfn(vma, start, &pfn);
457
458		if (ret) {
459			pr_err("no page for address %lu\n", start);
460			return ret;
461		}
462		if (pfn != prev_pfn + 1)
463			return -EINVAL;
464	}
465
466	*res = start_pfn;
467	return 0;
468}
469
470static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
471	int n_pages, struct vm_area_struct *vma,
472	enum dma_data_direction dma_dir)
473{
474	if (vma_is_io(vma)) {
475		unsigned int i;
476
477		for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
478			unsigned long pfn;
479			int ret = follow_pfn(vma, start, &pfn);
480
481			if (!pfn_valid(pfn))
482				return -EINVAL;
483
484			if (ret) {
485				pr_err("no page for address %lu\n", start);
486				return ret;
487			}
488			pages[i] = pfn_to_page(pfn);
489		}
490	} else {
491		int n;
492
493		n = get_user_pages(current, current->mm, start & PAGE_MASK,
494			n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL);
495		/* negative error means that no page was pinned */
496		n = max(n, 0);
497		if (n != n_pages) {
498			pr_err("got only %d of %d user pages\n", n, n_pages);
499			while (n)
500				put_page(pages[--n]);
501			return -EFAULT;
502		}
503	}
504
505	return 0;
506}
507
508static void vb2_dc_put_dirty_page(struct page *page)
509{
510	set_page_dirty_lock(page);
511	put_page(page);
512}
513
514static void vb2_dc_put_userptr(void *buf_priv)
515{
516	struct vb2_dc_buf *buf = buf_priv;
517	struct sg_table *sgt = buf->dma_sgt;
518
519	if (sgt) {
520		DEFINE_DMA_ATTRS(attrs);
521
522		dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
523		/*
524		 * No need to sync to CPU, it's already synced to the CPU
525		 * since the finish() memop will have been called before this.
526		 */
527		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
528				   buf->dma_dir, &attrs);
529		if (!vma_is_io(buf->vma))
530			vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
531
532		sg_free_table(sgt);
533		kfree(sgt);
534	}
535	vb2_put_vma(buf->vma);
536	kfree(buf);
537}
538
539/*
540 * For some kind of reserved memory there might be no struct page available,
541 * so all that can be done to support such 'pages' is to try to convert
542 * pfn to dma address or at the last resort just assume that
543 * dma address == physical address (like it has been assumed in earlier version
544 * of videobuf2-dma-contig
545 */
546
547#ifdef __arch_pfn_to_dma
548static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
549{
550	return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
551}
552#elif defined(__pfn_to_bus)
553static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
554{
555	return (dma_addr_t)__pfn_to_bus(pfn);
556}
557#elif defined(__pfn_to_phys)
558static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
559{
560	return (dma_addr_t)__pfn_to_phys(pfn);
561}
562#else
563static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
564{
565	/* really, we cannot do anything better at this point */
566	return (dma_addr_t)(pfn) << PAGE_SHIFT;
567}
568#endif
569
570static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
571	unsigned long size, enum dma_data_direction dma_dir)
572{
573	struct vb2_dc_conf *conf = alloc_ctx;
574	struct vb2_dc_buf *buf;
575	unsigned long start;
576	unsigned long end;
577	unsigned long offset;
578	struct page **pages;
579	int n_pages;
580	int ret = 0;
581	struct vm_area_struct *vma;
582	struct sg_table *sgt;
583	unsigned long contig_size;
584	unsigned long dma_align = dma_get_cache_alignment();
585	DEFINE_DMA_ATTRS(attrs);
586
587	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
588
589	/* Only cache aligned DMA transfers are reliable */
590	if (!IS_ALIGNED(vaddr | size, dma_align)) {
591		pr_debug("user data must be aligned to %lu bytes\n", dma_align);
592		return ERR_PTR(-EINVAL);
593	}
594
595	if (!size) {
596		pr_debug("size is zero\n");
597		return ERR_PTR(-EINVAL);
598	}
599
600	buf = kzalloc(sizeof *buf, GFP_KERNEL);
601	if (!buf)
602		return ERR_PTR(-ENOMEM);
603
604	buf->dev = conf->dev;
605	buf->dma_dir = dma_dir;
606
607	start = vaddr & PAGE_MASK;
608	offset = vaddr & ~PAGE_MASK;
609	end = PAGE_ALIGN(vaddr + size);
610	n_pages = (end - start) >> PAGE_SHIFT;
611
612	pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
613	if (!pages) {
614		ret = -ENOMEM;
615		pr_err("failed to allocate pages table\n");
616		goto fail_buf;
617	}
618
619	/* current->mm->mmap_sem is taken by videobuf2 core */
620	vma = find_vma(current->mm, vaddr);
621	if (!vma) {
622		pr_err("no vma for address %lu\n", vaddr);
623		ret = -EFAULT;
624		goto fail_pages;
625	}
626
627	if (vma->vm_end < vaddr + size) {
628		pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
629		ret = -EFAULT;
630		goto fail_pages;
631	}
632
633	buf->vma = vb2_get_vma(vma);
634	if (!buf->vma) {
635		pr_err("failed to copy vma\n");
636		ret = -ENOMEM;
637		goto fail_pages;
638	}
639
640	/* extract page list from userspace mapping */
641	ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, dma_dir);
642	if (ret) {
643		unsigned long pfn;
644		if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
645			buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
646			buf->size = size;
647			kfree(pages);
648			return buf;
649		}
650
651		pr_err("failed to get user pages\n");
652		goto fail_vma;
653	}
654
655	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
656	if (!sgt) {
657		pr_err("failed to allocate sg table\n");
658		ret = -ENOMEM;
659		goto fail_get_user_pages;
660	}
661
662	ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
663		offset, size, GFP_KERNEL);
664	if (ret) {
665		pr_err("failed to initialize sg table\n");
666		goto fail_sgt;
667	}
668
669	/* pages are no longer needed */
670	kfree(pages);
671	pages = NULL;
672
673	/*
674	 * No need to sync to the device, this will happen later when the
675	 * prepare() memop is called.
676	 */
677	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
678				      buf->dma_dir, &attrs);
679	if (sgt->nents <= 0) {
680		pr_err("failed to map scatterlist\n");
681		ret = -EIO;
682		goto fail_sgt_init;
683	}
684
685	contig_size = vb2_dc_get_contiguous_size(sgt);
686	if (contig_size < size) {
687		pr_err("contiguous mapping is too small %lu/%lu\n",
688			contig_size, size);
689		ret = -EFAULT;
690		goto fail_map_sg;
691	}
692
693	buf->dma_addr = sg_dma_address(sgt->sgl);
694	buf->size = size;
695	buf->dma_sgt = sgt;
696
697	return buf;
698
699fail_map_sg:
700	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
701			   buf->dma_dir, &attrs);
702
703fail_sgt_init:
704	if (!vma_is_io(buf->vma))
705		vb2_dc_sgt_foreach_page(sgt, put_page);
706	sg_free_table(sgt);
707
708fail_sgt:
709	kfree(sgt);
710
711fail_get_user_pages:
712	if (pages && !vma_is_io(buf->vma))
713		while (n_pages)
714			put_page(pages[--n_pages]);
715
716fail_vma:
717	vb2_put_vma(buf->vma);
718
719fail_pages:
720	kfree(pages); /* kfree is NULL-proof */
721
722fail_buf:
723	kfree(buf);
724
725	return ERR_PTR(ret);
726}
727
728/*********************************************/
729/*       callbacks for DMABUF buffers        */
730/*********************************************/
731
732static int vb2_dc_map_dmabuf(void *mem_priv)
733{
734	struct vb2_dc_buf *buf = mem_priv;
735	struct sg_table *sgt;
736	unsigned long contig_size;
737
738	if (WARN_ON(!buf->db_attach)) {
739		pr_err("trying to pin a non attached buffer\n");
740		return -EINVAL;
741	}
742
743	if (WARN_ON(buf->dma_sgt)) {
744		pr_err("dmabuf buffer is already pinned\n");
745		return 0;
746	}
747
748	/* get the associated scatterlist for this buffer */
749	sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
750	if (IS_ERR(sgt)) {
751		pr_err("Error getting dmabuf scatterlist\n");
752		return -EINVAL;
753	}
754
755	/* checking if dmabuf is big enough to store contiguous chunk */
756	contig_size = vb2_dc_get_contiguous_size(sgt);
757	if (contig_size < buf->size) {
758		pr_err("contiguous chunk is too small %lu/%lu b\n",
759			contig_size, buf->size);
760		dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
761		return -EFAULT;
762	}
763
764	buf->dma_addr = sg_dma_address(sgt->sgl);
765	buf->dma_sgt = sgt;
766	buf->vaddr = NULL;
767
768	return 0;
769}
770
771static void vb2_dc_unmap_dmabuf(void *mem_priv)
772{
773	struct vb2_dc_buf *buf = mem_priv;
774	struct sg_table *sgt = buf->dma_sgt;
775
776	if (WARN_ON(!buf->db_attach)) {
777		pr_err("trying to unpin a not attached buffer\n");
778		return;
779	}
780
781	if (WARN_ON(!sgt)) {
782		pr_err("dmabuf buffer is already unpinned\n");
783		return;
784	}
785
786	if (buf->vaddr) {
787		dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
788		buf->vaddr = NULL;
789	}
790	dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
791
792	buf->dma_addr = 0;
793	buf->dma_sgt = NULL;
794}
795
796static void vb2_dc_detach_dmabuf(void *mem_priv)
797{
798	struct vb2_dc_buf *buf = mem_priv;
799
800	/* if vb2 works correctly you should never detach mapped buffer */
801	if (WARN_ON(buf->dma_addr))
802		vb2_dc_unmap_dmabuf(buf);
803
804	/* detach this attachment */
805	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
806	kfree(buf);
807}
808
809static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
810	unsigned long size, enum dma_data_direction dma_dir)
811{
812	struct vb2_dc_conf *conf = alloc_ctx;
813	struct vb2_dc_buf *buf;
814	struct dma_buf_attachment *dba;
815
816	if (dbuf->size < size)
817		return ERR_PTR(-EFAULT);
818
819	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
820	if (!buf)
821		return ERR_PTR(-ENOMEM);
822
823	buf->dev = conf->dev;
824	/* create attachment for the dmabuf with the user device */
825	dba = dma_buf_attach(dbuf, buf->dev);
826	if (IS_ERR(dba)) {
827		pr_err("failed to attach dmabuf\n");
828		kfree(buf);
829		return dba;
830	}
831
832	buf->dma_dir = dma_dir;
833	buf->size = size;
834	buf->db_attach = dba;
835
836	return buf;
837}
838
839/*********************************************/
840/*       DMA CONTIG exported functions       */
841/*********************************************/
842
843const struct vb2_mem_ops vb2_dma_contig_memops = {
844	.alloc		= vb2_dc_alloc,
845	.put		= vb2_dc_put,
846	.get_dmabuf	= vb2_dc_get_dmabuf,
847	.cookie		= vb2_dc_cookie,
848	.vaddr		= vb2_dc_vaddr,
849	.mmap		= vb2_dc_mmap,
850	.get_userptr	= vb2_dc_get_userptr,
851	.put_userptr	= vb2_dc_put_userptr,
852	.prepare	= vb2_dc_prepare,
853	.finish		= vb2_dc_finish,
854	.map_dmabuf	= vb2_dc_map_dmabuf,
855	.unmap_dmabuf	= vb2_dc_unmap_dmabuf,
856	.attach_dmabuf	= vb2_dc_attach_dmabuf,
857	.detach_dmabuf	= vb2_dc_detach_dmabuf,
858	.num_users	= vb2_dc_num_users,
859};
860EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
861
862void *vb2_dma_contig_init_ctx(struct device *dev)
863{
864	struct vb2_dc_conf *conf;
865
866	conf = kzalloc(sizeof *conf, GFP_KERNEL);
867	if (!conf)
868		return ERR_PTR(-ENOMEM);
869
870	conf->dev = dev;
871
872	return conf;
873}
874EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
875
876void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
877{
878	if (!IS_ERR_OR_NULL(alloc_ctx))
879		kfree(alloc_ctx);
880}
881EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
882
883MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
884MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
885MODULE_LICENSE("GPL");
886