1/*
2 * Samsung TV Mixer driver
3 *
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5 *
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundation. either version 2 of the License,
11 * or (at your option) any later version
12 */
13
14#define pr_fmt(fmt) "s5p-tv (mixer): " fmt
15
16#include "mixer.h"
17
18#include <media/v4l2-ioctl.h>
19#include <linux/videodev2.h>
20#include <linux/mm.h>
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/timer.h>
24#include <media/videobuf2-dma-contig.h>
25
26static int find_reg_callback(struct device *dev, void *p)
27{
28	struct v4l2_subdev **sd = p;
29
30	*sd = dev_get_drvdata(dev);
31	/* non-zero value stops iteration */
32	return 1;
33}
34
35static struct v4l2_subdev *find_and_register_subdev(
36	struct mxr_device *mdev, char *module_name)
37{
38	struct device_driver *drv;
39	struct v4l2_subdev *sd = NULL;
40	int ret;
41
42	/* TODO: add waiting until probe is finished */
43	drv = driver_find(module_name, &platform_bus_type);
44	if (!drv) {
45		mxr_warn(mdev, "module %s is missing\n", module_name);
46		return NULL;
47	}
48	/* driver refcnt is increased, it is safe to iterate over devices */
49	ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
50	/* ret == 0 means that find_reg_callback was never executed */
51	if (sd == NULL) {
52		mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
53		goto done;
54	}
55	/* v4l2_device_register_subdev detects if sd is NULL */
56	ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
57	if (ret) {
58		mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
59		sd = NULL;
60	}
61
62done:
63	return sd;
64}
65
66int mxr_acquire_video(struct mxr_device *mdev,
67		      struct mxr_output_conf *output_conf, int output_count)
68{
69	struct device *dev = mdev->dev;
70	struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
71	int i;
72	int ret = 0;
73	struct v4l2_subdev *sd;
74
75	strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
76	/* prepare context for V4L2 device */
77	ret = v4l2_device_register(dev, v4l2_dev);
78	if (ret) {
79		mxr_err(mdev, "could not register v4l2 device.\n");
80		goto fail;
81	}
82
83	mdev->alloc_ctx = vb2_dma_contig_init_ctx(mdev->dev);
84	if (IS_ERR(mdev->alloc_ctx)) {
85		mxr_err(mdev, "could not acquire vb2 allocator\n");
86		ret = PTR_ERR(mdev->alloc_ctx);
87		goto fail_v4l2_dev;
88	}
89
90	/* registering outputs */
91	mdev->output_cnt = 0;
92	for (i = 0; i < output_count; ++i) {
93		struct mxr_output_conf *conf = &output_conf[i];
94		struct mxr_output *out;
95
96		sd = find_and_register_subdev(mdev, conf->module_name);
97		/* trying to register next output */
98		if (sd == NULL)
99			continue;
100		out = kzalloc(sizeof(*out), GFP_KERNEL);
101		if (out == NULL) {
102			mxr_err(mdev, "no memory for '%s'\n",
103				conf->output_name);
104			ret = -ENOMEM;
105			/* registered subdevs are removed in fail_v4l2_dev */
106			goto fail_output;
107		}
108		strlcpy(out->name, conf->output_name, sizeof(out->name));
109		out->sd = sd;
110		out->cookie = conf->cookie;
111		mdev->output[mdev->output_cnt++] = out;
112		mxr_info(mdev, "added output '%s' from module '%s'\n",
113			conf->output_name, conf->module_name);
114		/* checking if maximal number of outputs is reached */
115		if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
116			break;
117	}
118
119	if (mdev->output_cnt == 0) {
120		mxr_err(mdev, "failed to register any output\n");
121		ret = -ENODEV;
122		/* skipping fail_output because there is nothing to free */
123		goto fail_vb2_allocator;
124	}
125
126	return 0;
127
128fail_output:
129	/* kfree is NULL-safe */
130	for (i = 0; i < mdev->output_cnt; ++i)
131		kfree(mdev->output[i]);
132	memset(mdev->output, 0, sizeof(mdev->output));
133
134fail_vb2_allocator:
135	/* freeing allocator context */
136	vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
137
138fail_v4l2_dev:
139	/* NOTE: automatically unregister all subdevs */
140	v4l2_device_unregister(v4l2_dev);
141
142fail:
143	return ret;
144}
145
146void mxr_release_video(struct mxr_device *mdev)
147{
148	int i;
149
150	/* kfree is NULL-safe */
151	for (i = 0; i < mdev->output_cnt; ++i)
152		kfree(mdev->output[i]);
153
154	vb2_dma_contig_cleanup_ctx(mdev->alloc_ctx);
155	v4l2_device_unregister(&mdev->v4l2_dev);
156}
157
158static int mxr_querycap(struct file *file, void *priv,
159	struct v4l2_capability *cap)
160{
161	struct mxr_layer *layer = video_drvdata(file);
162
163	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
164
165	strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof(cap->driver));
166	strlcpy(cap->card, layer->vfd.name, sizeof(cap->card));
167	sprintf(cap->bus_info, "%d", layer->idx);
168	cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
169	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
170
171	return 0;
172}
173
174static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
175{
176	mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
177		geo->src.full_width, geo->src.full_height);
178	mxr_dbg(mdev, "src.size = (%u, %u)\n",
179		geo->src.width, geo->src.height);
180	mxr_dbg(mdev, "src.offset = (%u, %u)\n",
181		geo->src.x_offset, geo->src.y_offset);
182	mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
183		geo->dst.full_width, geo->dst.full_height);
184	mxr_dbg(mdev, "dst.size = (%u, %u)\n",
185		geo->dst.width, geo->dst.height);
186	mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
187		geo->dst.x_offset, geo->dst.y_offset);
188	mxr_dbg(mdev, "ratio = (%u, %u)\n",
189		geo->x_ratio, geo->y_ratio);
190}
191
192static void mxr_layer_default_geo(struct mxr_layer *layer)
193{
194	struct mxr_device *mdev = layer->mdev;
195	struct v4l2_mbus_framefmt mbus_fmt;
196
197	memset(&layer->geo, 0, sizeof(layer->geo));
198
199	mxr_get_mbus_fmt(mdev, &mbus_fmt);
200
201	layer->geo.dst.full_width = mbus_fmt.width;
202	layer->geo.dst.full_height = mbus_fmt.height;
203	layer->geo.dst.width = layer->geo.dst.full_width;
204	layer->geo.dst.height = layer->geo.dst.full_height;
205	layer->geo.dst.field = mbus_fmt.field;
206
207	layer->geo.src.full_width = mbus_fmt.width;
208	layer->geo.src.full_height = mbus_fmt.height;
209	layer->geo.src.width = layer->geo.src.full_width;
210	layer->geo.src.height = layer->geo.src.full_height;
211
212	mxr_geometry_dump(mdev, &layer->geo);
213	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
214	mxr_geometry_dump(mdev, &layer->geo);
215}
216
217static void mxr_layer_update_output(struct mxr_layer *layer)
218{
219	struct mxr_device *mdev = layer->mdev;
220	struct v4l2_mbus_framefmt mbus_fmt;
221
222	mxr_get_mbus_fmt(mdev, &mbus_fmt);
223	/* checking if update is needed */
224	if (layer->geo.dst.full_width == mbus_fmt.width &&
225		layer->geo.dst.full_height == mbus_fmt.width)
226		return;
227
228	layer->geo.dst.full_width = mbus_fmt.width;
229	layer->geo.dst.full_height = mbus_fmt.height;
230	layer->geo.dst.field = mbus_fmt.field;
231	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
232
233	mxr_geometry_dump(mdev, &layer->geo);
234}
235
236static const struct mxr_format *find_format_by_fourcc(
237	struct mxr_layer *layer, unsigned long fourcc);
238static const struct mxr_format *find_format_by_index(
239	struct mxr_layer *layer, unsigned long index);
240
241static int mxr_enum_fmt(struct file *file, void  *priv,
242	struct v4l2_fmtdesc *f)
243{
244	struct mxr_layer *layer = video_drvdata(file);
245	struct mxr_device *mdev = layer->mdev;
246	const struct mxr_format *fmt;
247
248	mxr_dbg(mdev, "%s\n", __func__);
249	fmt = find_format_by_index(layer, f->index);
250	if (fmt == NULL)
251		return -EINVAL;
252
253	strlcpy(f->description, fmt->name, sizeof(f->description));
254	f->pixelformat = fmt->fourcc;
255
256	return 0;
257}
258
259static unsigned int divup(unsigned int divident, unsigned int divisor)
260{
261	return (divident + divisor - 1) / divisor;
262}
263
264unsigned long mxr_get_plane_size(const struct mxr_block *blk,
265	unsigned int width, unsigned int height)
266{
267	unsigned int bl_width = divup(width, blk->width);
268	unsigned int bl_height = divup(height, blk->height);
269
270	return bl_width * bl_height * blk->size;
271}
272
273static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
274	const struct mxr_format *fmt, u32 width, u32 height)
275{
276	int i;
277
278	/* checking if nothing to fill */
279	if (!planes)
280		return;
281
282	memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
283	for (i = 0; i < fmt->num_planes; ++i) {
284		struct v4l2_plane_pix_format *plane = planes
285			+ fmt->plane2subframe[i];
286		const struct mxr_block *blk = &fmt->plane[i];
287		u32 bl_width = divup(width, blk->width);
288		u32 bl_height = divup(height, blk->height);
289		u32 sizeimage = bl_width * bl_height * blk->size;
290		u32 bytesperline = bl_width * blk->size / blk->height;
291
292		plane->sizeimage += sizeimage;
293		plane->bytesperline = max(plane->bytesperline, bytesperline);
294	}
295}
296
297static int mxr_g_fmt(struct file *file, void *priv,
298			     struct v4l2_format *f)
299{
300	struct mxr_layer *layer = video_drvdata(file);
301	struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
302
303	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
304
305	pix->width = layer->geo.src.full_width;
306	pix->height = layer->geo.src.full_height;
307	pix->field = V4L2_FIELD_NONE;
308	pix->pixelformat = layer->fmt->fourcc;
309	pix->colorspace = layer->fmt->colorspace;
310	mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
311
312	return 0;
313}
314
315static int mxr_s_fmt(struct file *file, void *priv,
316	struct v4l2_format *f)
317{
318	struct mxr_layer *layer = video_drvdata(file);
319	const struct mxr_format *fmt;
320	struct v4l2_pix_format_mplane *pix;
321	struct mxr_device *mdev = layer->mdev;
322	struct mxr_geometry *geo = &layer->geo;
323
324	mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
325
326	pix = &f->fmt.pix_mp;
327	fmt = find_format_by_fourcc(layer, pix->pixelformat);
328	if (fmt == NULL) {
329		mxr_warn(mdev, "not recognized fourcc: %08x\n",
330			pix->pixelformat);
331		return -EINVAL;
332	}
333	layer->fmt = fmt;
334	/* set source size to highest accepted value */
335	geo->src.full_width = max(geo->dst.full_width, pix->width);
336	geo->src.full_height = max(geo->dst.full_height, pix->height);
337	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
338	mxr_geometry_dump(mdev, &layer->geo);
339	/* set cropping to total visible screen */
340	geo->src.width = pix->width;
341	geo->src.height = pix->height;
342	geo->src.x_offset = 0;
343	geo->src.y_offset = 0;
344	/* assure consistency of geometry */
345	layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
346	mxr_geometry_dump(mdev, &layer->geo);
347	/* set full size to lowest possible value */
348	geo->src.full_width = 0;
349	geo->src.full_height = 0;
350	layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
351	mxr_geometry_dump(mdev, &layer->geo);
352
353	/* returning results */
354	mxr_g_fmt(file, priv, f);
355
356	return 0;
357}
358
359static int mxr_g_selection(struct file *file, void *fh,
360	struct v4l2_selection *s)
361{
362	struct mxr_layer *layer = video_drvdata(file);
363	struct mxr_geometry *geo = &layer->geo;
364
365	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
366
367	if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
368		s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
369		return -EINVAL;
370
371	switch (s->target) {
372	case V4L2_SEL_TGT_CROP:
373		s->r.left = geo->src.x_offset;
374		s->r.top = geo->src.y_offset;
375		s->r.width = geo->src.width;
376		s->r.height = geo->src.height;
377		break;
378	case V4L2_SEL_TGT_CROP_DEFAULT:
379	case V4L2_SEL_TGT_CROP_BOUNDS:
380		s->r.left = 0;
381		s->r.top = 0;
382		s->r.width = geo->src.full_width;
383		s->r.height = geo->src.full_height;
384		break;
385	case V4L2_SEL_TGT_COMPOSE:
386	case V4L2_SEL_TGT_COMPOSE_PADDED:
387		s->r.left = geo->dst.x_offset;
388		s->r.top = geo->dst.y_offset;
389		s->r.width = geo->dst.width;
390		s->r.height = geo->dst.height;
391		break;
392	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
393	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
394		s->r.left = 0;
395		s->r.top = 0;
396		s->r.width = geo->dst.full_width;
397		s->r.height = geo->dst.full_height;
398		break;
399	default:
400		return -EINVAL;
401	}
402
403	return 0;
404}
405
406/* returns 1 if rectangle 'a' is inside 'b' */
407static int mxr_is_rect_inside(struct v4l2_rect *a, struct v4l2_rect *b)
408{
409	if (a->left < b->left)
410		return 0;
411	if (a->top < b->top)
412		return 0;
413	if (a->left + a->width > b->left + b->width)
414		return 0;
415	if (a->top + a->height > b->top + b->height)
416		return 0;
417	return 1;
418}
419
420static int mxr_s_selection(struct file *file, void *fh,
421	struct v4l2_selection *s)
422{
423	struct mxr_layer *layer = video_drvdata(file);
424	struct mxr_geometry *geo = &layer->geo;
425	struct mxr_crop *target = NULL;
426	enum mxr_geometry_stage stage;
427	struct mxr_geometry tmp;
428	struct v4l2_rect res;
429
430	memset(&res, 0, sizeof(res));
431
432	mxr_dbg(layer->mdev, "%s: rect: %dx%d@%d,%d\n", __func__,
433		s->r.width, s->r.height, s->r.left, s->r.top);
434
435	if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
436		s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
437		return -EINVAL;
438
439	switch (s->target) {
440	/* ignore read-only targets */
441	case V4L2_SEL_TGT_CROP_DEFAULT:
442	case V4L2_SEL_TGT_CROP_BOUNDS:
443		res.width = geo->src.full_width;
444		res.height = geo->src.full_height;
445		break;
446
447	/* ignore read-only targets */
448	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
449	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
450		res.width = geo->dst.full_width;
451		res.height = geo->dst.full_height;
452		break;
453
454	case V4L2_SEL_TGT_CROP:
455		target = &geo->src;
456		stage = MXR_GEOMETRY_CROP;
457		break;
458	case V4L2_SEL_TGT_COMPOSE:
459	case V4L2_SEL_TGT_COMPOSE_PADDED:
460		target = &geo->dst;
461		stage = MXR_GEOMETRY_COMPOSE;
462		break;
463	default:
464		return -EINVAL;
465	}
466	/* apply change and update geometry if needed */
467	if (target) {
468		/* backup current geometry if setup fails */
469		memcpy(&tmp, geo, sizeof(tmp));
470
471		/* apply requested selection */
472		target->x_offset = s->r.left;
473		target->y_offset = s->r.top;
474		target->width = s->r.width;
475		target->height = s->r.height;
476
477		layer->ops.fix_geometry(layer, stage, s->flags);
478
479		/* retrieve update selection rectangle */
480		res.left = target->x_offset;
481		res.top = target->y_offset;
482		res.width = target->width;
483		res.height = target->height;
484
485		mxr_geometry_dump(layer->mdev, &layer->geo);
486	}
487
488	/* checking if the rectangle satisfies constraints */
489	if ((s->flags & V4L2_SEL_FLAG_LE) && !mxr_is_rect_inside(&res, &s->r))
490		goto fail;
491	if ((s->flags & V4L2_SEL_FLAG_GE) && !mxr_is_rect_inside(&s->r, &res))
492		goto fail;
493
494	/* return result rectangle */
495	s->r = res;
496
497	return 0;
498fail:
499	/* restore old geometry, which is not touched if target is NULL */
500	if (target)
501		memcpy(geo, &tmp, sizeof(tmp));
502	return -ERANGE;
503}
504
505static int mxr_enum_dv_timings(struct file *file, void *fh,
506	struct v4l2_enum_dv_timings *timings)
507{
508	struct mxr_layer *layer = video_drvdata(file);
509	struct mxr_device *mdev = layer->mdev;
510	int ret;
511
512	timings->pad = 0;
513
514	/* lock protects from changing sd_out */
515	mutex_lock(&mdev->mutex);
516	ret = v4l2_subdev_call(to_outsd(mdev), pad, enum_dv_timings, timings);
517	mutex_unlock(&mdev->mutex);
518
519	return ret ? -EINVAL : 0;
520}
521
522static int mxr_s_dv_timings(struct file *file, void *fh,
523	struct v4l2_dv_timings *timings)
524{
525	struct mxr_layer *layer = video_drvdata(file);
526	struct mxr_device *mdev = layer->mdev;
527	int ret;
528
529	/* lock protects from changing sd_out */
530	mutex_lock(&mdev->mutex);
531
532	/* timings change cannot be done while there is an entity
533	 * dependent on output configuration
534	 */
535	if (mdev->n_output > 0) {
536		mutex_unlock(&mdev->mutex);
537		return -EBUSY;
538	}
539
540	ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_timings, timings);
541
542	mutex_unlock(&mdev->mutex);
543
544	mxr_layer_update_output(layer);
545
546	/* any failure should return EINVAL according to V4L2 doc */
547	return ret ? -EINVAL : 0;
548}
549
550static int mxr_g_dv_timings(struct file *file, void *fh,
551	struct v4l2_dv_timings *timings)
552{
553	struct mxr_layer *layer = video_drvdata(file);
554	struct mxr_device *mdev = layer->mdev;
555	int ret;
556
557	/* lock protects from changing sd_out */
558	mutex_lock(&mdev->mutex);
559	ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_timings, timings);
560	mutex_unlock(&mdev->mutex);
561
562	return ret ? -EINVAL : 0;
563}
564
565static int mxr_dv_timings_cap(struct file *file, void *fh,
566	struct v4l2_dv_timings_cap *cap)
567{
568	struct mxr_layer *layer = video_drvdata(file);
569	struct mxr_device *mdev = layer->mdev;
570	int ret;
571
572	cap->pad = 0;
573
574	/* lock protects from changing sd_out */
575	mutex_lock(&mdev->mutex);
576	ret = v4l2_subdev_call(to_outsd(mdev), pad, dv_timings_cap, cap);
577	mutex_unlock(&mdev->mutex);
578
579	return ret ? -EINVAL : 0;
580}
581
582static int mxr_s_std(struct file *file, void *fh, v4l2_std_id norm)
583{
584	struct mxr_layer *layer = video_drvdata(file);
585	struct mxr_device *mdev = layer->mdev;
586	int ret;
587
588	/* lock protects from changing sd_out */
589	mutex_lock(&mdev->mutex);
590
591	/* standard change cannot be done while there is an entity
592	 * dependent on output configuration
593	 */
594	if (mdev->n_output > 0) {
595		mutex_unlock(&mdev->mutex);
596		return -EBUSY;
597	}
598
599	ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, norm);
600
601	mutex_unlock(&mdev->mutex);
602
603	mxr_layer_update_output(layer);
604
605	return ret ? -EINVAL : 0;
606}
607
608static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
609{
610	struct mxr_layer *layer = video_drvdata(file);
611	struct mxr_device *mdev = layer->mdev;
612	int ret;
613
614	/* lock protects from changing sd_out */
615	mutex_lock(&mdev->mutex);
616	ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
617	mutex_unlock(&mdev->mutex);
618
619	return ret ? -EINVAL : 0;
620}
621
622static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
623{
624	struct mxr_layer *layer = video_drvdata(file);
625	struct mxr_device *mdev = layer->mdev;
626	struct mxr_output *out;
627	struct v4l2_subdev *sd;
628
629	if (a->index >= mdev->output_cnt)
630		return -EINVAL;
631	out = mdev->output[a->index];
632	BUG_ON(out == NULL);
633	sd = out->sd;
634	strlcpy(a->name, out->name, sizeof(a->name));
635
636	/* try to obtain supported tv norms */
637	v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
638	a->capabilities = 0;
639	if (sd->ops->video && sd->ops->video->s_dv_timings)
640		a->capabilities |= V4L2_OUT_CAP_DV_TIMINGS;
641	if (sd->ops->video && sd->ops->video->s_std_output)
642		a->capabilities |= V4L2_OUT_CAP_STD;
643	a->type = V4L2_OUTPUT_TYPE_ANALOG;
644
645	return 0;
646}
647
648static int mxr_s_output(struct file *file, void *fh, unsigned int i)
649{
650	struct video_device *vfd = video_devdata(file);
651	struct mxr_layer *layer = video_drvdata(file);
652	struct mxr_device *mdev = layer->mdev;
653
654	if (i >= mdev->output_cnt || mdev->output[i] == NULL)
655		return -EINVAL;
656
657	mutex_lock(&mdev->mutex);
658	if (mdev->n_output > 0) {
659		mutex_unlock(&mdev->mutex);
660		return -EBUSY;
661	}
662	mdev->current_output = i;
663	vfd->tvnorms = 0;
664	v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
665		&vfd->tvnorms);
666	mutex_unlock(&mdev->mutex);
667
668	/* update layers geometry */
669	mxr_layer_update_output(layer);
670
671	mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
672
673	return 0;
674}
675
676static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
677{
678	struct mxr_layer *layer = video_drvdata(file);
679	struct mxr_device *mdev = layer->mdev;
680
681	mutex_lock(&mdev->mutex);
682	*p = mdev->current_output;
683	mutex_unlock(&mdev->mutex);
684
685	return 0;
686}
687
688static int mxr_reqbufs(struct file *file, void *priv,
689			  struct v4l2_requestbuffers *p)
690{
691	struct mxr_layer *layer = video_drvdata(file);
692
693	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
694	return vb2_reqbufs(&layer->vb_queue, p);
695}
696
697static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
698{
699	struct mxr_layer *layer = video_drvdata(file);
700
701	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
702	return vb2_querybuf(&layer->vb_queue, p);
703}
704
705static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
706{
707	struct mxr_layer *layer = video_drvdata(file);
708
709	mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
710	return vb2_qbuf(&layer->vb_queue, p);
711}
712
713static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
714{
715	struct mxr_layer *layer = video_drvdata(file);
716
717	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
718	return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
719}
720
721static int mxr_expbuf(struct file *file, void *priv,
722	struct v4l2_exportbuffer *eb)
723{
724	struct mxr_layer *layer = video_drvdata(file);
725
726	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
727	return vb2_expbuf(&layer->vb_queue, eb);
728}
729
730static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
731{
732	struct mxr_layer *layer = video_drvdata(file);
733
734	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
735	return vb2_streamon(&layer->vb_queue, i);
736}
737
738static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
739{
740	struct mxr_layer *layer = video_drvdata(file);
741
742	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
743	return vb2_streamoff(&layer->vb_queue, i);
744}
745
746static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
747	.vidioc_querycap = mxr_querycap,
748	/* format handling */
749	.vidioc_enum_fmt_vid_out_mplane = mxr_enum_fmt,
750	.vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
751	.vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
752	/* buffer control */
753	.vidioc_reqbufs = mxr_reqbufs,
754	.vidioc_querybuf = mxr_querybuf,
755	.vidioc_qbuf = mxr_qbuf,
756	.vidioc_dqbuf = mxr_dqbuf,
757	.vidioc_expbuf = mxr_expbuf,
758	/* Streaming control */
759	.vidioc_streamon = mxr_streamon,
760	.vidioc_streamoff = mxr_streamoff,
761	/* DV Timings functions */
762	.vidioc_enum_dv_timings = mxr_enum_dv_timings,
763	.vidioc_s_dv_timings = mxr_s_dv_timings,
764	.vidioc_g_dv_timings = mxr_g_dv_timings,
765	.vidioc_dv_timings_cap = mxr_dv_timings_cap,
766	/* analog TV standard functions */
767	.vidioc_s_std = mxr_s_std,
768	.vidioc_g_std = mxr_g_std,
769	/* Output handling */
770	.vidioc_enum_output = mxr_enum_output,
771	.vidioc_s_output = mxr_s_output,
772	.vidioc_g_output = mxr_g_output,
773	/* selection ioctls */
774	.vidioc_g_selection = mxr_g_selection,
775	.vidioc_s_selection = mxr_s_selection,
776};
777
778static int mxr_video_open(struct file *file)
779{
780	struct mxr_layer *layer = video_drvdata(file);
781	struct mxr_device *mdev = layer->mdev;
782	int ret = 0;
783
784	mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
785	if (mutex_lock_interruptible(&layer->mutex))
786		return -ERESTARTSYS;
787	/* assure device probe is finished */
788	wait_for_device_probe();
789	/* creating context for file descriptor */
790	ret = v4l2_fh_open(file);
791	if (ret) {
792		mxr_err(mdev, "v4l2_fh_open failed\n");
793		goto unlock;
794	}
795
796	/* leaving if layer is already initialized */
797	if (!v4l2_fh_is_singular_file(file))
798		goto unlock;
799
800	/* FIXME: should power be enabled on open? */
801	ret = mxr_power_get(mdev);
802	if (ret) {
803		mxr_err(mdev, "power on failed\n");
804		goto fail_fh_open;
805	}
806
807	ret = vb2_queue_init(&layer->vb_queue);
808	if (ret != 0) {
809		mxr_err(mdev, "failed to initialize vb2 queue\n");
810		goto fail_power;
811	}
812	/* set default format, first on the list */
813	layer->fmt = layer->fmt_array[0];
814	/* setup default geometry */
815	mxr_layer_default_geo(layer);
816	mutex_unlock(&layer->mutex);
817
818	return 0;
819
820fail_power:
821	mxr_power_put(mdev);
822
823fail_fh_open:
824	v4l2_fh_release(file);
825
826unlock:
827	mutex_unlock(&layer->mutex);
828
829	return ret;
830}
831
832static unsigned int
833mxr_video_poll(struct file *file, struct poll_table_struct *wait)
834{
835	struct mxr_layer *layer = video_drvdata(file);
836	unsigned int res;
837
838	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
839
840	mutex_lock(&layer->mutex);
841	res = vb2_poll(&layer->vb_queue, file, wait);
842	mutex_unlock(&layer->mutex);
843	return res;
844}
845
846static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
847{
848	struct mxr_layer *layer = video_drvdata(file);
849	int ret;
850
851	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
852
853	if (mutex_lock_interruptible(&layer->mutex))
854		return -ERESTARTSYS;
855	ret = vb2_mmap(&layer->vb_queue, vma);
856	mutex_unlock(&layer->mutex);
857	return ret;
858}
859
860static int mxr_video_release(struct file *file)
861{
862	struct mxr_layer *layer = video_drvdata(file);
863
864	mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
865	mutex_lock(&layer->mutex);
866	if (v4l2_fh_is_singular_file(file)) {
867		vb2_queue_release(&layer->vb_queue);
868		mxr_power_put(layer->mdev);
869	}
870	v4l2_fh_release(file);
871	mutex_unlock(&layer->mutex);
872	return 0;
873}
874
875static const struct v4l2_file_operations mxr_fops = {
876	.owner = THIS_MODULE,
877	.open = mxr_video_open,
878	.poll = mxr_video_poll,
879	.mmap = mxr_video_mmap,
880	.release = mxr_video_release,
881	.unlocked_ioctl = video_ioctl2,
882};
883
884static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
885	unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
886	void *alloc_ctxs[])
887{
888	struct mxr_layer *layer = vb2_get_drv_priv(vq);
889	const struct mxr_format *fmt = layer->fmt;
890	int i;
891	struct mxr_device *mdev = layer->mdev;
892	struct v4l2_plane_pix_format planes[3];
893
894	mxr_dbg(mdev, "%s\n", __func__);
895	/* checking if format was configured */
896	if (fmt == NULL)
897		return -EINVAL;
898	mxr_dbg(mdev, "fmt = %s\n", fmt->name);
899	mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
900		layer->geo.src.full_height);
901
902	*nplanes = fmt->num_subframes;
903	for (i = 0; i < fmt->num_subframes; ++i) {
904		alloc_ctxs[i] = layer->mdev->alloc_ctx;
905		sizes[i] = planes[i].sizeimage;
906		mxr_dbg(mdev, "size[%d] = %08x\n", i, sizes[i]);
907	}
908
909	if (*nbuffers == 0)
910		*nbuffers = 1;
911
912	return 0;
913}
914
915static void buf_queue(struct vb2_buffer *vb)
916{
917	struct mxr_buffer *buffer = container_of(vb, struct mxr_buffer, vb);
918	struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
919	struct mxr_device *mdev = layer->mdev;
920	unsigned long flags;
921
922	spin_lock_irqsave(&layer->enq_slock, flags);
923	list_add_tail(&buffer->list, &layer->enq_list);
924	spin_unlock_irqrestore(&layer->enq_slock, flags);
925
926	mxr_dbg(mdev, "queuing buffer\n");
927}
928
929static int start_streaming(struct vb2_queue *vq, unsigned int count)
930{
931	struct mxr_layer *layer = vb2_get_drv_priv(vq);
932	struct mxr_device *mdev = layer->mdev;
933	unsigned long flags;
934
935	mxr_dbg(mdev, "%s\n", __func__);
936
937	/* block any changes in output configuration */
938	mxr_output_get(mdev);
939
940	mxr_layer_update_output(layer);
941	layer->ops.format_set(layer);
942	/* enabling layer in hardware */
943	spin_lock_irqsave(&layer->enq_slock, flags);
944	layer->state = MXR_LAYER_STREAMING;
945	spin_unlock_irqrestore(&layer->enq_slock, flags);
946
947	layer->ops.stream_set(layer, MXR_ENABLE);
948	mxr_streamer_get(mdev);
949
950	return 0;
951}
952
953static void mxr_watchdog(unsigned long arg)
954{
955	struct mxr_layer *layer = (struct mxr_layer *) arg;
956	struct mxr_device *mdev = layer->mdev;
957	unsigned long flags;
958
959	mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
960
961	spin_lock_irqsave(&layer->enq_slock, flags);
962
963	if (layer->update_buf == layer->shadow_buf)
964		layer->update_buf = NULL;
965	if (layer->update_buf) {
966		vb2_buffer_done(&layer->update_buf->vb, VB2_BUF_STATE_ERROR);
967		layer->update_buf = NULL;
968	}
969	if (layer->shadow_buf) {
970		vb2_buffer_done(&layer->shadow_buf->vb, VB2_BUF_STATE_ERROR);
971		layer->shadow_buf = NULL;
972	}
973	spin_unlock_irqrestore(&layer->enq_slock, flags);
974}
975
976static void stop_streaming(struct vb2_queue *vq)
977{
978	struct mxr_layer *layer = vb2_get_drv_priv(vq);
979	struct mxr_device *mdev = layer->mdev;
980	unsigned long flags;
981	struct timer_list watchdog;
982	struct mxr_buffer *buf, *buf_tmp;
983
984	mxr_dbg(mdev, "%s\n", __func__);
985
986	spin_lock_irqsave(&layer->enq_slock, flags);
987
988	/* reset list */
989	layer->state = MXR_LAYER_STREAMING_FINISH;
990
991	/* set all buffer to be done */
992	list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
993		list_del(&buf->list);
994		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
995	}
996
997	spin_unlock_irqrestore(&layer->enq_slock, flags);
998
999	/* give 1 seconds to complete to complete last buffers */
1000	setup_timer_on_stack(&watchdog, mxr_watchdog,
1001		(unsigned long)layer);
1002	mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
1003
1004	/* wait until all buffers are goes to done state */
1005	vb2_wait_for_all_buffers(vq);
1006
1007	/* stop timer if all synchronization is done */
1008	del_timer_sync(&watchdog);
1009	destroy_timer_on_stack(&watchdog);
1010
1011	/* stopping hardware */
1012	spin_lock_irqsave(&layer->enq_slock, flags);
1013	layer->state = MXR_LAYER_IDLE;
1014	spin_unlock_irqrestore(&layer->enq_slock, flags);
1015
1016	/* disabling layer in hardware */
1017	layer->ops.stream_set(layer, MXR_DISABLE);
1018	/* remove one streamer */
1019	mxr_streamer_put(mdev);
1020	/* allow changes in output configuration */
1021	mxr_output_put(mdev);
1022}
1023
1024static struct vb2_ops mxr_video_qops = {
1025	.queue_setup = queue_setup,
1026	.buf_queue = buf_queue,
1027	.wait_prepare = vb2_ops_wait_prepare,
1028	.wait_finish = vb2_ops_wait_finish,
1029	.start_streaming = start_streaming,
1030	.stop_streaming = stop_streaming,
1031};
1032
1033/* FIXME: try to put this functions to mxr_base_layer_create */
1034int mxr_base_layer_register(struct mxr_layer *layer)
1035{
1036	struct mxr_device *mdev = layer->mdev;
1037	int ret;
1038
1039	ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
1040	if (ret)
1041		mxr_err(mdev, "failed to register video device\n");
1042	else
1043		mxr_info(mdev, "registered layer %s as /dev/video%d\n",
1044			layer->vfd.name, layer->vfd.num);
1045	return ret;
1046}
1047
1048void mxr_base_layer_unregister(struct mxr_layer *layer)
1049{
1050	video_unregister_device(&layer->vfd);
1051}
1052
1053void mxr_layer_release(struct mxr_layer *layer)
1054{
1055	if (layer->ops.release)
1056		layer->ops.release(layer);
1057}
1058
1059void mxr_base_layer_release(struct mxr_layer *layer)
1060{
1061	kfree(layer);
1062}
1063
1064static void mxr_vfd_release(struct video_device *vdev)
1065{
1066	pr_info("video device release\n");
1067}
1068
1069struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
1070	int idx, char *name, struct mxr_layer_ops *ops)
1071{
1072	struct mxr_layer *layer;
1073
1074	layer = kzalloc(sizeof(*layer), GFP_KERNEL);
1075	if (layer == NULL) {
1076		mxr_err(mdev, "not enough memory for layer.\n");
1077		goto fail;
1078	}
1079
1080	layer->mdev = mdev;
1081	layer->idx = idx;
1082	layer->ops = *ops;
1083
1084	spin_lock_init(&layer->enq_slock);
1085	INIT_LIST_HEAD(&layer->enq_list);
1086	mutex_init(&layer->mutex);
1087
1088	layer->vfd = (struct video_device) {
1089		.minor = -1,
1090		.release = mxr_vfd_release,
1091		.fops = &mxr_fops,
1092		.vfl_dir = VFL_DIR_TX,
1093		.ioctl_ops = &mxr_ioctl_ops,
1094	};
1095	strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
1096
1097	video_set_drvdata(&layer->vfd, layer);
1098	layer->vfd.lock = &layer->mutex;
1099	layer->vfd.v4l2_dev = &mdev->v4l2_dev;
1100
1101	layer->vb_queue = (struct vb2_queue) {
1102		.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
1103		.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF,
1104		.drv_priv = layer,
1105		.buf_struct_size = sizeof(struct mxr_buffer),
1106		.ops = &mxr_video_qops,
1107		.min_buffers_needed = 1,
1108		.mem_ops = &vb2_dma_contig_memops,
1109		.lock = &layer->mutex,
1110	};
1111
1112	return layer;
1113
1114fail:
1115	return NULL;
1116}
1117
1118static const struct mxr_format *find_format_by_fourcc(
1119	struct mxr_layer *layer, unsigned long fourcc)
1120{
1121	int i;
1122
1123	for (i = 0; i < layer->fmt_array_size; ++i)
1124		if (layer->fmt_array[i]->fourcc == fourcc)
1125			return layer->fmt_array[i];
1126	return NULL;
1127}
1128
1129static const struct mxr_format *find_format_by_index(
1130	struct mxr_layer *layer, unsigned long index)
1131{
1132	if (index >= layer->fmt_array_size)
1133		return NULL;
1134	return layer->fmt_array[index];
1135}
1136
1137