1/*
2    User DMA
3
4    Copyright (C) 2003-2004  Kevin Thayer <nufan_wfk at yahoo.com>
5    Copyright (C) 2004  Chris Kennedy <c@groovy.org>
6    Copyright (C) 2005-2007  Hans Verkuil <hverkuil@xs4all.nl>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 2 of the License, or
11    (at your option) any later version.
12
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17
18    You should have received a copy of the GNU General Public License
19    along with this program; if not, write to the Free Software
20    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 */
22
23#include "ivtv-driver.h"
24#include "ivtv-udma.h"
25
26void ivtv_udma_get_page_info(struct ivtv_dma_page_info *dma_page, unsigned long first, unsigned long size)
27{
28	dma_page->uaddr = first & PAGE_MASK;
29	dma_page->offset = first & ~PAGE_MASK;
30	dma_page->tail = 1 + ((first+size-1) & ~PAGE_MASK);
31	dma_page->first = (first & PAGE_MASK) >> PAGE_SHIFT;
32	dma_page->last = ((first+size-1) & PAGE_MASK) >> PAGE_SHIFT;
33	dma_page->page_count = dma_page->last - dma_page->first + 1;
34	if (dma_page->page_count == 1) dma_page->tail -= dma_page->offset;
35}
36
37int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset)
38{
39	int i, offset;
40	unsigned long flags;
41
42	if (map_offset < 0)
43		return map_offset;
44
45	offset = dma_page->offset;
46
47	/* Fill SG Array with new values */
48	for (i = 0; i < dma_page->page_count; i++) {
49		unsigned int len = (i == dma_page->page_count - 1) ?
50			dma_page->tail : PAGE_SIZE - offset;
51
52		if (PageHighMem(dma->map[map_offset])) {
53			void *src;
54
55			if (dma->bouncemap[map_offset] == NULL)
56				dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL);
57			if (dma->bouncemap[map_offset] == NULL)
58				return -1;
59			local_irq_save(flags);
60			src = kmap_atomic(dma->map[map_offset]) + offset;
61			memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
62			kunmap_atomic(src);
63			local_irq_restore(flags);
64			sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
65		}
66		else {
67			sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset);
68		}
69		offset = 0;
70		map_offset++;
71	}
72	return map_offset;
73}
74
75void ivtv_udma_fill_sg_array (struct ivtv_user_dma *dma, u32 buffer_offset, u32 buffer_offset_2, u32 split) {
76	int i;
77	struct scatterlist *sg;
78
79	for (i = 0, sg = dma->SGlist; i < dma->SG_length; i++, sg = sg_next(sg)) {
80		dma->SGarray[i].size = cpu_to_le32(sg_dma_len(sg));
81		dma->SGarray[i].src = cpu_to_le32(sg_dma_address(sg));
82		dma->SGarray[i].dst = cpu_to_le32(buffer_offset);
83		buffer_offset += sg_dma_len(sg);
84
85		split -= sg_dma_len(sg);
86		if (split == 0)
87			buffer_offset = buffer_offset_2;
88	}
89}
90
91/* User DMA Buffers */
92void ivtv_udma_alloc(struct ivtv *itv)
93{
94	if (itv->udma.SG_handle == 0) {
95		/* Map DMA Page Array Buffer */
96		itv->udma.SG_handle = pci_map_single(itv->pdev, itv->udma.SGarray,
97			   sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
98		ivtv_udma_sync_for_cpu(itv);
99	}
100}
101
102int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
103		       void __user *userbuf, int size_in_bytes)
104{
105	struct ivtv_dma_page_info user_dma;
106	struct ivtv_user_dma *dma = &itv->udma;
107	int i, err;
108
109	IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);
110
111	/* Still in USE */
112	if (dma->SG_length || dma->page_count) {
113		IVTV_DEBUG_WARN("ivtv_udma_setup: SG_length %d page_count %d still full?\n",
114			   dma->SG_length, dma->page_count);
115		return -EBUSY;
116	}
117
118	ivtv_udma_get_page_info(&user_dma, (unsigned long)userbuf, size_in_bytes);
119
120	if (user_dma.page_count <= 0) {
121		IVTV_DEBUG_WARN("ivtv_udma_setup: Error %d page_count from %d bytes %d offset\n",
122			   user_dma.page_count, size_in_bytes, user_dma.offset);
123		return -EINVAL;
124	}
125
126	/* Get user pages for DMA Xfer */
127	err = get_user_pages_unlocked(current, current->mm,
128			user_dma.uaddr, user_dma.page_count, 0, 1, dma->map);
129
130	if (user_dma.page_count != err) {
131		IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
132			   err, user_dma.page_count);
133		if (err >= 0) {
134			for (i = 0; i < err; i++)
135				put_page(dma->map[i]);
136			return -EINVAL;
137		}
138		return err;
139	}
140
141	dma->page_count = user_dma.page_count;
142
143	/* Fill SG List with new values */
144	if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
145		for (i = 0; i < dma->page_count; i++) {
146			put_page(dma->map[i]);
147		}
148		dma->page_count = 0;
149		return -ENOMEM;
150	}
151
152	/* Map SG List */
153	dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
154
155	/* Fill SG Array with new values */
156	ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
157
158	/* Tag SG Array with Interrupt Bit */
159	dma->SGarray[dma->SG_length - 1].size |= cpu_to_le32(0x80000000);
160
161	ivtv_udma_sync_for_device(itv);
162	return dma->page_count;
163}
164
165void ivtv_udma_unmap(struct ivtv *itv)
166{
167	struct ivtv_user_dma *dma = &itv->udma;
168	int i;
169
170	IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");
171
172	/* Nothing to free */
173	if (dma->page_count == 0)
174		return;
175
176	/* Unmap Scatterlist */
177	if (dma->SG_length) {
178		pci_unmap_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
179		dma->SG_length = 0;
180	}
181	/* sync DMA */
182	ivtv_udma_sync_for_cpu(itv);
183
184	/* Release User Pages */
185	for (i = 0; i < dma->page_count; i++) {
186		put_page(dma->map[i]);
187	}
188	dma->page_count = 0;
189}
190
191void ivtv_udma_free(struct ivtv *itv)
192{
193	int i;
194
195	/* Unmap SG Array */
196	if (itv->udma.SG_handle) {
197		pci_unmap_single(itv->pdev, itv->udma.SG_handle,
198			 sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
199	}
200
201	/* Unmap Scatterlist */
202	if (itv->udma.SG_length) {
203		pci_unmap_sg(itv->pdev, itv->udma.SGlist, itv->udma.page_count, PCI_DMA_TODEVICE);
204	}
205
206	for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
207		if (itv->udma.bouncemap[i])
208			__free_page(itv->udma.bouncemap[i]);
209	}
210}
211
212void ivtv_udma_start(struct ivtv *itv)
213{
214	IVTV_DEBUG_DMA("start UDMA\n");
215	write_reg(itv->udma.SG_handle, IVTV_REG_DECDMAADDR);
216	write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
217	set_bit(IVTV_F_I_DMA, &itv->i_flags);
218	set_bit(IVTV_F_I_UDMA, &itv->i_flags);
219	clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
220}
221
222void ivtv_udma_prepare(struct ivtv *itv)
223{
224	unsigned long flags;
225
226	spin_lock_irqsave(&itv->dma_reg_lock, flags);
227	if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
228		ivtv_udma_start(itv);
229	else
230		set_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags);
231	spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
232}
233