1/*
2 * VPDMA helper library
3 *
4 * Copyright (c) 2013 Texas Instruments Inc.
5 *
6 * David Griego, <dagriego@biglakesoftware.com>
7 * Dale Farnsworth, <dale@farnsworth.org>
8 * Archit Taneja, <archit@ti.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 */
14
15#include <linux/delay.h>
16#include <linux/dma-mapping.h>
17#include <linux/err.h>
18#include <linux/firmware.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/sched.h>
23#include <linux/slab.h>
24#include <linux/videodev2.h>
25
26#include "vpdma.h"
27#include "vpdma_priv.h"
28
29#define VPDMA_FIRMWARE	"vpdma-1b8.bin"
30
31const struct vpdma_data_format vpdma_yuv_fmts[] = {
32	[VPDMA_DATA_FMT_Y444] = {
33		.type		= VPDMA_DATA_FMT_TYPE_YUV,
34		.data_type	= DATA_TYPE_Y444,
35		.depth		= 8,
36	},
37	[VPDMA_DATA_FMT_Y422] = {
38		.type		= VPDMA_DATA_FMT_TYPE_YUV,
39		.data_type	= DATA_TYPE_Y422,
40		.depth		= 8,
41	},
42	[VPDMA_DATA_FMT_Y420] = {
43		.type		= VPDMA_DATA_FMT_TYPE_YUV,
44		.data_type	= DATA_TYPE_Y420,
45		.depth		= 8,
46	},
47	[VPDMA_DATA_FMT_C444] = {
48		.type		= VPDMA_DATA_FMT_TYPE_YUV,
49		.data_type	= DATA_TYPE_C444,
50		.depth		= 8,
51	},
52	[VPDMA_DATA_FMT_C422] = {
53		.type		= VPDMA_DATA_FMT_TYPE_YUV,
54		.data_type	= DATA_TYPE_C422,
55		.depth		= 8,
56	},
57	[VPDMA_DATA_FMT_C420] = {
58		.type		= VPDMA_DATA_FMT_TYPE_YUV,
59		.data_type	= DATA_TYPE_C420,
60		.depth		= 4,
61	},
62	[VPDMA_DATA_FMT_YC422] = {
63		.type		= VPDMA_DATA_FMT_TYPE_YUV,
64		.data_type	= DATA_TYPE_YC422,
65		.depth		= 16,
66	},
67	[VPDMA_DATA_FMT_YC444] = {
68		.type		= VPDMA_DATA_FMT_TYPE_YUV,
69		.data_type	= DATA_TYPE_YC444,
70		.depth		= 24,
71	},
72	[VPDMA_DATA_FMT_CY422] = {
73		.type		= VPDMA_DATA_FMT_TYPE_YUV,
74		.data_type	= DATA_TYPE_CY422,
75		.depth		= 16,
76	},
77};
78
79const struct vpdma_data_format vpdma_rgb_fmts[] = {
80	[VPDMA_DATA_FMT_RGB565] = {
81		.type		= VPDMA_DATA_FMT_TYPE_RGB,
82		.data_type	= DATA_TYPE_RGB16_565,
83		.depth		= 16,
84	},
85	[VPDMA_DATA_FMT_ARGB16_1555] = {
86		.type		= VPDMA_DATA_FMT_TYPE_RGB,
87		.data_type	= DATA_TYPE_ARGB_1555,
88		.depth		= 16,
89	},
90	[VPDMA_DATA_FMT_ARGB16] = {
91		.type		= VPDMA_DATA_FMT_TYPE_RGB,
92		.data_type	= DATA_TYPE_ARGB_4444,
93		.depth		= 16,
94	},
95	[VPDMA_DATA_FMT_RGBA16_5551] = {
96		.type		= VPDMA_DATA_FMT_TYPE_RGB,
97		.data_type	= DATA_TYPE_RGBA_5551,
98		.depth		= 16,
99	},
100	[VPDMA_DATA_FMT_RGBA16] = {
101		.type		= VPDMA_DATA_FMT_TYPE_RGB,
102		.data_type	= DATA_TYPE_RGBA_4444,
103		.depth		= 16,
104	},
105	[VPDMA_DATA_FMT_ARGB24] = {
106		.type		= VPDMA_DATA_FMT_TYPE_RGB,
107		.data_type	= DATA_TYPE_ARGB24_6666,
108		.depth		= 24,
109	},
110	[VPDMA_DATA_FMT_RGB24] = {
111		.type		= VPDMA_DATA_FMT_TYPE_RGB,
112		.data_type	= DATA_TYPE_RGB24_888,
113		.depth		= 24,
114	},
115	[VPDMA_DATA_FMT_ARGB32] = {
116		.type		= VPDMA_DATA_FMT_TYPE_RGB,
117		.data_type	= DATA_TYPE_ARGB32_8888,
118		.depth		= 32,
119	},
120	[VPDMA_DATA_FMT_RGBA24] = {
121		.type		= VPDMA_DATA_FMT_TYPE_RGB,
122		.data_type	= DATA_TYPE_RGBA24_6666,
123		.depth		= 24,
124	},
125	[VPDMA_DATA_FMT_RGBA32] = {
126		.type		= VPDMA_DATA_FMT_TYPE_RGB,
127		.data_type	= DATA_TYPE_RGBA32_8888,
128		.depth		= 32,
129	},
130	[VPDMA_DATA_FMT_BGR565] = {
131		.type		= VPDMA_DATA_FMT_TYPE_RGB,
132		.data_type	= DATA_TYPE_BGR16_565,
133		.depth		= 16,
134	},
135	[VPDMA_DATA_FMT_ABGR16_1555] = {
136		.type		= VPDMA_DATA_FMT_TYPE_RGB,
137		.data_type	= DATA_TYPE_ABGR_1555,
138		.depth		= 16,
139	},
140	[VPDMA_DATA_FMT_ABGR16] = {
141		.type		= VPDMA_DATA_FMT_TYPE_RGB,
142		.data_type	= DATA_TYPE_ABGR_4444,
143		.depth		= 16,
144	},
145	[VPDMA_DATA_FMT_BGRA16_5551] = {
146		.type		= VPDMA_DATA_FMT_TYPE_RGB,
147		.data_type	= DATA_TYPE_BGRA_5551,
148		.depth		= 16,
149	},
150	[VPDMA_DATA_FMT_BGRA16] = {
151		.type		= VPDMA_DATA_FMT_TYPE_RGB,
152		.data_type	= DATA_TYPE_BGRA_4444,
153		.depth		= 16,
154	},
155	[VPDMA_DATA_FMT_ABGR24] = {
156		.type		= VPDMA_DATA_FMT_TYPE_RGB,
157		.data_type	= DATA_TYPE_ABGR24_6666,
158		.depth		= 24,
159	},
160	[VPDMA_DATA_FMT_BGR24] = {
161		.type		= VPDMA_DATA_FMT_TYPE_RGB,
162		.data_type	= DATA_TYPE_BGR24_888,
163		.depth		= 24,
164	},
165	[VPDMA_DATA_FMT_ABGR32] = {
166		.type		= VPDMA_DATA_FMT_TYPE_RGB,
167		.data_type	= DATA_TYPE_ABGR32_8888,
168		.depth		= 32,
169	},
170	[VPDMA_DATA_FMT_BGRA24] = {
171		.type		= VPDMA_DATA_FMT_TYPE_RGB,
172		.data_type	= DATA_TYPE_BGRA24_6666,
173		.depth		= 24,
174	},
175	[VPDMA_DATA_FMT_BGRA32] = {
176		.type		= VPDMA_DATA_FMT_TYPE_RGB,
177		.data_type	= DATA_TYPE_BGRA32_8888,
178		.depth		= 32,
179	},
180};
181
182const struct vpdma_data_format vpdma_misc_fmts[] = {
183	[VPDMA_DATA_FMT_MV] = {
184		.type		= VPDMA_DATA_FMT_TYPE_MISC,
185		.data_type	= DATA_TYPE_MV,
186		.depth		= 4,
187	},
188};
189
190struct vpdma_channel_info {
191	int num;		/* VPDMA channel number */
192	int cstat_offset;	/* client CSTAT register offset */
193};
194
195static const struct vpdma_channel_info chan_info[] = {
196	[VPE_CHAN_LUMA1_IN] = {
197		.num		= VPE_CHAN_NUM_LUMA1_IN,
198		.cstat_offset	= VPDMA_DEI_LUMA1_CSTAT,
199	},
200	[VPE_CHAN_CHROMA1_IN] = {
201		.num		= VPE_CHAN_NUM_CHROMA1_IN,
202		.cstat_offset	= VPDMA_DEI_CHROMA1_CSTAT,
203	},
204	[VPE_CHAN_LUMA2_IN] = {
205		.num		= VPE_CHAN_NUM_LUMA2_IN,
206		.cstat_offset	= VPDMA_DEI_LUMA2_CSTAT,
207	},
208	[VPE_CHAN_CHROMA2_IN] = {
209		.num		= VPE_CHAN_NUM_CHROMA2_IN,
210		.cstat_offset	= VPDMA_DEI_CHROMA2_CSTAT,
211	},
212	[VPE_CHAN_LUMA3_IN] = {
213		.num		= VPE_CHAN_NUM_LUMA3_IN,
214		.cstat_offset	= VPDMA_DEI_LUMA3_CSTAT,
215	},
216	[VPE_CHAN_CHROMA3_IN] = {
217		.num		= VPE_CHAN_NUM_CHROMA3_IN,
218		.cstat_offset	= VPDMA_DEI_CHROMA3_CSTAT,
219	},
220	[VPE_CHAN_MV_IN] = {
221		.num		= VPE_CHAN_NUM_MV_IN,
222		.cstat_offset	= VPDMA_DEI_MV_IN_CSTAT,
223	},
224	[VPE_CHAN_MV_OUT] = {
225		.num		= VPE_CHAN_NUM_MV_OUT,
226		.cstat_offset	= VPDMA_DEI_MV_OUT_CSTAT,
227	},
228	[VPE_CHAN_LUMA_OUT] = {
229		.num		= VPE_CHAN_NUM_LUMA_OUT,
230		.cstat_offset	= VPDMA_VIP_UP_Y_CSTAT,
231	},
232	[VPE_CHAN_CHROMA_OUT] = {
233		.num		= VPE_CHAN_NUM_CHROMA_OUT,
234		.cstat_offset	= VPDMA_VIP_UP_UV_CSTAT,
235	},
236	[VPE_CHAN_RGB_OUT] = {
237		.num		= VPE_CHAN_NUM_RGB_OUT,
238		.cstat_offset	= VPDMA_VIP_UP_Y_CSTAT,
239	},
240};
241
242static u32 read_reg(struct vpdma_data *vpdma, int offset)
243{
244	return ioread32(vpdma->base + offset);
245}
246
247static void write_reg(struct vpdma_data *vpdma, int offset, u32 value)
248{
249	iowrite32(value, vpdma->base + offset);
250}
251
252static int read_field_reg(struct vpdma_data *vpdma, int offset,
253		u32 mask, int shift)
254{
255	return (read_reg(vpdma, offset) & (mask << shift)) >> shift;
256}
257
258static void write_field_reg(struct vpdma_data *vpdma, int offset, u32 field,
259		u32 mask, int shift)
260{
261	u32 val = read_reg(vpdma, offset);
262
263	val &= ~(mask << shift);
264	val |= (field & mask) << shift;
265
266	write_reg(vpdma, offset, val);
267}
268
269void vpdma_dump_regs(struct vpdma_data *vpdma)
270{
271	struct device *dev = &vpdma->pdev->dev;
272
273#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r))
274
275	dev_dbg(dev, "VPDMA Registers:\n");
276
277	DUMPREG(PID);
278	DUMPREG(LIST_ADDR);
279	DUMPREG(LIST_ATTR);
280	DUMPREG(LIST_STAT_SYNC);
281	DUMPREG(BG_RGB);
282	DUMPREG(BG_YUV);
283	DUMPREG(SETUP);
284	DUMPREG(MAX_SIZE1);
285	DUMPREG(MAX_SIZE2);
286	DUMPREG(MAX_SIZE3);
287
288	/*
289	 * dumping registers of only group0 and group3, because VPE channels
290	 * lie within group0 and group3 registers
291	 */
292	DUMPREG(INT_CHAN_STAT(0));
293	DUMPREG(INT_CHAN_MASK(0));
294	DUMPREG(INT_CHAN_STAT(3));
295	DUMPREG(INT_CHAN_MASK(3));
296	DUMPREG(INT_CLIENT0_STAT);
297	DUMPREG(INT_CLIENT0_MASK);
298	DUMPREG(INT_CLIENT1_STAT);
299	DUMPREG(INT_CLIENT1_MASK);
300	DUMPREG(INT_LIST0_STAT);
301	DUMPREG(INT_LIST0_MASK);
302
303	/*
304	 * these are registers specific to VPE clients, we can make this
305	 * function dump client registers specific to VPE or VIP based on
306	 * who is using it
307	 */
308	DUMPREG(DEI_CHROMA1_CSTAT);
309	DUMPREG(DEI_LUMA1_CSTAT);
310	DUMPREG(DEI_CHROMA2_CSTAT);
311	DUMPREG(DEI_LUMA2_CSTAT);
312	DUMPREG(DEI_CHROMA3_CSTAT);
313	DUMPREG(DEI_LUMA3_CSTAT);
314	DUMPREG(DEI_MV_IN_CSTAT);
315	DUMPREG(DEI_MV_OUT_CSTAT);
316	DUMPREG(VIP_UP_Y_CSTAT);
317	DUMPREG(VIP_UP_UV_CSTAT);
318	DUMPREG(VPI_CTL_CSTAT);
319}
320
321/*
322 * Allocate a DMA buffer
323 */
324int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size)
325{
326	buf->size = size;
327	buf->mapped = false;
328	buf->addr = kzalloc(size, GFP_KERNEL);
329	if (!buf->addr)
330		return -ENOMEM;
331
332	WARN_ON(((unsigned long)buf->addr & VPDMA_DESC_ALIGN) != 0);
333
334	return 0;
335}
336
337void vpdma_free_desc_buf(struct vpdma_buf *buf)
338{
339	WARN_ON(buf->mapped);
340	kfree(buf->addr);
341	buf->addr = NULL;
342	buf->size = 0;
343}
344
345/*
346 * map descriptor/payload DMA buffer, enabling DMA access
347 */
348int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
349{
350	struct device *dev = &vpdma->pdev->dev;
351
352	WARN_ON(buf->mapped);
353	buf->dma_addr = dma_map_single(dev, buf->addr, buf->size,
354				DMA_TO_DEVICE);
355	if (dma_mapping_error(dev, buf->dma_addr)) {
356		dev_err(dev, "failed to map buffer\n");
357		return -EINVAL;
358	}
359
360	buf->mapped = true;
361
362	return 0;
363}
364
365/*
366 * unmap descriptor/payload DMA buffer, disabling DMA access and
367 * allowing the main processor to acces the data
368 */
369void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
370{
371	struct device *dev = &vpdma->pdev->dev;
372
373	if (buf->mapped)
374		dma_unmap_single(dev, buf->dma_addr, buf->size, DMA_TO_DEVICE);
375
376	buf->mapped = false;
377}
378
379/*
380 * create a descriptor list, the user of this list will append configuration,
381 * control and data descriptors to this list, this list will be submitted to
382 * VPDMA. VPDMA's list parser will go through each descriptor and perform the
383 * required DMA operations
384 */
385int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type)
386{
387	int r;
388
389	r = vpdma_alloc_desc_buf(&list->buf, size);
390	if (r)
391		return r;
392
393	list->next = list->buf.addr;
394
395	list->type = type;
396
397	return 0;
398}
399
400/*
401 * once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
402 * to allow new descriptors to be added to the list.
403 */
404void vpdma_reset_desc_list(struct vpdma_desc_list *list)
405{
406	list->next = list->buf.addr;
407}
408
409/*
410 * free the buffer allocated fot the VPDMA descriptor list, this should be
411 * called when the user doesn't want to use VPDMA any more.
412 */
413void vpdma_free_desc_list(struct vpdma_desc_list *list)
414{
415	vpdma_free_desc_buf(&list->buf);
416
417	list->next = NULL;
418}
419
420static bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
421{
422	return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16);
423}
424
425/*
426 * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
427 */
428int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list)
429{
430	/* we always use the first list */
431	int list_num = 0;
432	int list_size;
433
434	if (vpdma_list_busy(vpdma, list_num))
435		return -EBUSY;
436
437	/* 16-byte granularity */
438	list_size = (list->next - list->buf.addr) >> 4;
439
440	write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr);
441
442	write_reg(vpdma, VPDMA_LIST_ATTR,
443			(list_num << VPDMA_LIST_NUM_SHFT) |
444			(list->type << VPDMA_LIST_TYPE_SHFT) |
445			list_size);
446
447	return 0;
448}
449
450static void dump_cfd(struct vpdma_cfd *cfd)
451{
452	int class;
453
454	class = cfd_get_class(cfd);
455
456	pr_debug("config descriptor of payload class: %s\n",
457		class == CFD_CLS_BLOCK ? "simple block" :
458		"address data block");
459
460	if (class == CFD_CLS_BLOCK)
461		pr_debug("word0: dst_addr_offset = 0x%08x\n",
462			cfd->dest_addr_offset);
463
464	if (class == CFD_CLS_BLOCK)
465		pr_debug("word1: num_data_wrds = %d\n", cfd->block_len);
466
467	pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr);
468
469	pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, "
470		"payload_len = %d\n", cfd_get_pkt_type(cfd),
471		cfd_get_direct(cfd), class, cfd_get_dest(cfd),
472		cfd_get_payload_len(cfd));
473}
474
475/*
476 * append a configuration descriptor to the given descriptor list, where the
477 * payload is in the form of a simple data block specified in the descriptor
478 * header, this is used to upload scaler coefficients to the scaler module
479 */
480void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
481		struct vpdma_buf *blk, u32 dest_offset)
482{
483	struct vpdma_cfd *cfd;
484	int len = blk->size;
485
486	WARN_ON(blk->dma_addr & VPDMA_DESC_ALIGN);
487
488	cfd = list->next;
489	WARN_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
490
491	cfd->dest_addr_offset = dest_offset;
492	cfd->block_len = len;
493	cfd->payload_addr = (u32) blk->dma_addr;
494	cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_BLOCK,
495				client, len >> 4);
496
497	list->next = cfd + 1;
498
499	dump_cfd(cfd);
500}
501
502/*
503 * append a configuration descriptor to the given descriptor list, where the
504 * payload is in the address data block format, this is used to a configure a
505 * discontiguous set of MMRs
506 */
507void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
508		struct vpdma_buf *adb)
509{
510	struct vpdma_cfd *cfd;
511	unsigned int len = adb->size;
512
513	WARN_ON(len & VPDMA_ADB_SIZE_ALIGN);
514	WARN_ON(adb->dma_addr & VPDMA_DESC_ALIGN);
515
516	cfd = list->next;
517	BUG_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
518
519	cfd->w0 = 0;
520	cfd->w1 = 0;
521	cfd->payload_addr = (u32) adb->dma_addr;
522	cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_ADB,
523				client, len >> 4);
524
525	list->next = cfd + 1;
526
527	dump_cfd(cfd);
528};
529
530/*
531 * control descriptor format change based on what type of control descriptor it
532 * is, we only use 'sync on channel' control descriptors for now, so assume it's
533 * that
534 */
535static void dump_ctd(struct vpdma_ctd *ctd)
536{
537	pr_debug("control descriptor\n");
538
539	pr_debug("word3: pkt_type = %d, source = %d, ctl_type = %d\n",
540		ctd_get_pkt_type(ctd), ctd_get_source(ctd), ctd_get_ctl(ctd));
541}
542
543/*
544 * append a 'sync on channel' type control descriptor to the given descriptor
545 * list, this descriptor stalls the VPDMA list till the time DMA is completed
546 * on the specified channel
547 */
548void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
549		enum vpdma_channel chan)
550{
551	struct vpdma_ctd *ctd;
552
553	ctd = list->next;
554	WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
555
556	ctd->w0 = 0;
557	ctd->w1 = 0;
558	ctd->w2 = 0;
559	ctd->type_source_ctl = ctd_type_source_ctl(chan_info[chan].num,
560				CTD_TYPE_SYNC_ON_CHANNEL);
561
562	list->next = ctd + 1;
563
564	dump_ctd(ctd);
565}
566
567static void dump_dtd(struct vpdma_dtd *dtd)
568{
569	int dir, chan;
570
571	dir = dtd_get_dir(dtd);
572	chan = dtd_get_chan(dtd);
573
574	pr_debug("%s data transfer descriptor for channel %d\n",
575		dir == DTD_DIR_OUT ? "outbound" : "inbound", chan);
576
577	pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, "
578		"even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
579		dtd_get_data_type(dtd), dtd_get_notify(dtd), dtd_get_field(dtd),
580		dtd_get_1d(dtd), dtd_get_even_line_skip(dtd),
581		dtd_get_odd_line_skip(dtd), dtd_get_line_stride(dtd));
582
583	if (dir == DTD_DIR_IN)
584		pr_debug("word1: line_length = %d, xfer_height = %d\n",
585			dtd_get_line_length(dtd), dtd_get_xfer_height(dtd));
586
587	pr_debug("word2: start_addr = %pad\n", &dtd->start_addr);
588
589	pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, "
590		"pri = %d, next_chan = %d\n", dtd_get_pkt_type(dtd),
591		dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd),
592		dtd_get_next_chan(dtd));
593
594	if (dir == DTD_DIR_IN)
595		pr_debug("word4: frame_width = %d, frame_height = %d\n",
596			dtd_get_frame_width(dtd), dtd_get_frame_height(dtd));
597	else
598		pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, "
599			"drp_data = %d, use_desc_reg = %d\n",
600			dtd_get_desc_write_addr(dtd), dtd_get_write_desc(dtd),
601			dtd_get_drop_data(dtd), dtd_get_use_desc(dtd));
602
603	if (dir == DTD_DIR_IN)
604		pr_debug("word5: hor_start = %d, ver_start = %d\n",
605			dtd_get_h_start(dtd), dtd_get_v_start(dtd));
606	else
607		pr_debug("word5: max_width %d, max_height %d\n",
608			dtd_get_max_width(dtd), dtd_get_max_height(dtd));
609
610	pr_debug("word6: client specific attr0 = 0x%08x\n", dtd->client_attr0);
611	pr_debug("word7: client specific attr1 = 0x%08x\n", dtd->client_attr1);
612}
613
614/*
615 * append an outbound data transfer descriptor to the given descriptor list,
616 * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel
617 *
618 * @list: vpdma desc list to which we add this decriptor
619 * @width: width of the image in pixels in memory
620 * @c_rect: compose params of output image
621 * @fmt: vpdma data format of the buffer
622 * dma_addr: dma address as seen by VPDMA
623 * chan: VPDMA channel
624 * flags: VPDMA flags to configure some descriptor fileds
625 */
626void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
627		const struct v4l2_rect *c_rect,
628		const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
629		enum vpdma_channel chan, u32 flags)
630{
631	int priority = 0;
632	int field = 0;
633	int notify = 1;
634	int channel, next_chan;
635	struct v4l2_rect rect = *c_rect;
636	int depth = fmt->depth;
637	int stride;
638	struct vpdma_dtd *dtd;
639
640	channel = next_chan = chan_info[chan].num;
641
642	if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
643			fmt->data_type == DATA_TYPE_C420) {
644		rect.height >>= 1;
645		rect.top >>= 1;
646		depth = 8;
647	}
648
649	stride = ALIGN((depth * width) >> 3, VPDMA_STRIDE_ALIGN);
650
651	dma_addr += rect.top * stride + (rect.left * depth >> 3);
652
653	dtd = list->next;
654	WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
655
656	dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
657					notify,
658					field,
659					!!(flags & VPDMA_DATA_FRAME_1D),
660					!!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
661					!!(flags & VPDMA_DATA_ODD_LINE_SKIP),
662					stride);
663	dtd->w1 = 0;
664	dtd->start_addr = (u32) dma_addr;
665	dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
666				DTD_DIR_OUT, channel, priority, next_chan);
667	dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0);
668	dtd->max_width_height = dtd_max_width_height(MAX_OUT_WIDTH_1920,
669					MAX_OUT_HEIGHT_1080);
670	dtd->client_attr0 = 0;
671	dtd->client_attr1 = 0;
672
673	list->next = dtd + 1;
674
675	dump_dtd(dtd);
676}
677
678/*
679 * append an inbound data transfer descriptor to the given descriptor list,
680 * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel
681 *
682 * @list: vpdma desc list to which we add this decriptor
683 * @width: width of the image in pixels in memory(not the cropped width)
684 * @c_rect: crop params of input image
685 * @fmt: vpdma data format of the buffer
686 * dma_addr: dma address as seen by VPDMA
687 * chan: VPDMA channel
688 * field: top or bottom field info of the input image
689 * flags: VPDMA flags to configure some descriptor fileds
690 * frame_width/height: the complete width/height of the image presented to the
691 *			client (this makes sense when multiple channels are
692 *			connected to the same client, forming a larger frame)
693 * start_h, start_v: position where the given channel starts providing pixel
694 *			data to the client (makes sense when multiple channels
695 *			contribute to the client)
696 */
697void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
698		const struct v4l2_rect *c_rect,
699		const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
700		enum vpdma_channel chan, int field, u32 flags, int frame_width,
701		int frame_height, int start_h, int start_v)
702{
703	int priority = 0;
704	int notify = 1;
705	int depth = fmt->depth;
706	int channel, next_chan;
707	struct v4l2_rect rect = *c_rect;
708	int stride;
709	struct vpdma_dtd *dtd;
710
711	channel = next_chan = chan_info[chan].num;
712
713	if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
714			fmt->data_type == DATA_TYPE_C420) {
715		rect.height >>= 1;
716		rect.top >>= 1;
717		depth = 8;
718	}
719
720	stride = ALIGN((depth * width) >> 3, VPDMA_STRIDE_ALIGN);
721
722	dma_addr += rect.top * stride + (rect.left * depth >> 3);
723
724	dtd = list->next;
725	WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
726
727	dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
728					notify,
729					field,
730					!!(flags & VPDMA_DATA_FRAME_1D),
731					!!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
732					!!(flags & VPDMA_DATA_ODD_LINE_SKIP),
733					stride);
734
735	dtd->xfer_length_height = dtd_xfer_length_height(rect.width,
736					rect.height);
737	dtd->start_addr = (u32) dma_addr;
738	dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
739				DTD_DIR_IN, channel, priority, next_chan);
740	dtd->frame_width_height = dtd_frame_width_height(frame_width,
741					frame_height);
742	dtd->start_h_v = dtd_start_h_v(start_h, start_v);
743	dtd->client_attr0 = 0;
744	dtd->client_attr1 = 0;
745
746	list->next = dtd + 1;
747
748	dump_dtd(dtd);
749}
750
751/* set or clear the mask for list complete interrupt */
752void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int list_num,
753		bool enable)
754{
755	u32 val;
756
757	val = read_reg(vpdma, VPDMA_INT_LIST0_MASK);
758	if (enable)
759		val |= (1 << (list_num * 2));
760	else
761		val &= ~(1 << (list_num * 2));
762	write_reg(vpdma, VPDMA_INT_LIST0_MASK, val);
763}
764
765/* clear previosuly occured list intterupts in the LIST_STAT register */
766void vpdma_clear_list_stat(struct vpdma_data *vpdma)
767{
768	write_reg(vpdma, VPDMA_INT_LIST0_STAT,
769		read_reg(vpdma, VPDMA_INT_LIST0_STAT));
770}
771
772/*
773 * configures the output mode of the line buffer for the given client, the
774 * line buffer content can either be mirrored(each line repeated twice) or
775 * passed to the client as is
776 */
777void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
778		enum vpdma_channel chan)
779{
780	int client_cstat = chan_info[chan].cstat_offset;
781
782	write_field_reg(vpdma, client_cstat, line_mode,
783		VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT);
784}
785
786/*
787 * configures the event which should trigger VPDMA transfer for the given
788 * client
789 */
790void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
791		enum vpdma_frame_start_event fs_event,
792		enum vpdma_channel chan)
793{
794	int client_cstat = chan_info[chan].cstat_offset;
795
796	write_field_reg(vpdma, client_cstat, fs_event,
797		VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT);
798}
799
800static void vpdma_firmware_cb(const struct firmware *f, void *context)
801{
802	struct vpdma_data *vpdma = context;
803	struct vpdma_buf fw_dma_buf;
804	int i, r;
805
806	dev_dbg(&vpdma->pdev->dev, "firmware callback\n");
807
808	if (!f || !f->data) {
809		dev_err(&vpdma->pdev->dev, "couldn't get firmware\n");
810		return;
811	}
812
813	/* already initialized */
814	if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
815			VPDMA_LIST_RDY_SHFT)) {
816		vpdma->cb(vpdma->pdev);
817		return;
818	}
819
820	r = vpdma_alloc_desc_buf(&fw_dma_buf, f->size);
821	if (r) {
822		dev_err(&vpdma->pdev->dev,
823			"failed to allocate dma buffer for firmware\n");
824		goto rel_fw;
825	}
826
827	memcpy(fw_dma_buf.addr, f->data, f->size);
828
829	vpdma_map_desc_buf(vpdma, &fw_dma_buf);
830
831	write_reg(vpdma, VPDMA_LIST_ADDR, (u32) fw_dma_buf.dma_addr);
832
833	for (i = 0; i < 100; i++) {		/* max 1 second */
834		msleep_interruptible(10);
835
836		if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
837				VPDMA_LIST_RDY_SHFT))
838			break;
839	}
840
841	if (i == 100) {
842		dev_err(&vpdma->pdev->dev, "firmware upload failed\n");
843		goto free_buf;
844	}
845
846	vpdma->cb(vpdma->pdev);
847
848free_buf:
849	vpdma_unmap_desc_buf(vpdma, &fw_dma_buf);
850
851	vpdma_free_desc_buf(&fw_dma_buf);
852rel_fw:
853	release_firmware(f);
854}
855
856static int vpdma_load_firmware(struct vpdma_data *vpdma)
857{
858	int r;
859	struct device *dev = &vpdma->pdev->dev;
860
861	r = request_firmware_nowait(THIS_MODULE, 1,
862		(const char *) VPDMA_FIRMWARE, dev, GFP_KERNEL, vpdma,
863		vpdma_firmware_cb);
864	if (r) {
865		dev_err(dev, "firmware not available %s\n", VPDMA_FIRMWARE);
866		return r;
867	} else {
868		dev_info(dev, "loading firmware %s\n", VPDMA_FIRMWARE);
869	}
870
871	return 0;
872}
873
874struct vpdma_data *vpdma_create(struct platform_device *pdev,
875		void (*cb)(struct platform_device *pdev))
876{
877	struct resource *res;
878	struct vpdma_data *vpdma;
879	int r;
880
881	dev_dbg(&pdev->dev, "vpdma_create\n");
882
883	vpdma = devm_kzalloc(&pdev->dev, sizeof(*vpdma), GFP_KERNEL);
884	if (!vpdma) {
885		dev_err(&pdev->dev, "couldn't alloc vpdma_dev\n");
886		return ERR_PTR(-ENOMEM);
887	}
888
889	vpdma->pdev = pdev;
890	vpdma->cb = cb;
891
892	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma");
893	if (res == NULL) {
894		dev_err(&pdev->dev, "missing platform resources data\n");
895		return ERR_PTR(-ENODEV);
896	}
897
898	vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
899	if (!vpdma->base) {
900		dev_err(&pdev->dev, "failed to ioremap\n");
901		return ERR_PTR(-ENOMEM);
902	}
903
904	r = vpdma_load_firmware(vpdma);
905	if (r) {
906		pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE);
907		return ERR_PTR(r);
908	}
909
910	return vpdma;
911}
912MODULE_FIRMWARE(VPDMA_FIRMWARE);
913