1 /*
2 * Xilinx Video DMA
3 *
4 * Copyright (C) 2013-2015 Ideas on Board
5 * Copyright (C) 2013-2015 Xilinx, Inc.
6 *
7 * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
8 * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15 #include <linux/dma/xilinx_dma.h>
16 #include <linux/lcm.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21
22 #include <media/v4l2-dev.h>
23 #include <media/v4l2-fh.h>
24 #include <media/v4l2-ioctl.h>
25 #include <media/videobuf2-v4l2.h>
26 #include <media/videobuf2-dma-contig.h>
27
28 #include "xilinx-dma.h"
29 #include "xilinx-vip.h"
30 #include "xilinx-vipp.h"
31
32 #define XVIP_DMA_DEF_FORMAT V4L2_PIX_FMT_YUYV
33 #define XVIP_DMA_DEF_WIDTH 1920
34 #define XVIP_DMA_DEF_HEIGHT 1080
35
36 /* Minimum and maximum widths are expressed in bytes */
37 #define XVIP_DMA_MIN_WIDTH 1U
38 #define XVIP_DMA_MAX_WIDTH 65535U
39 #define XVIP_DMA_MIN_HEIGHT 1U
40 #define XVIP_DMA_MAX_HEIGHT 8191U
41
42 /* -----------------------------------------------------------------------------
43 * Helper functions
44 */
45
46 static struct v4l2_subdev *
xvip_dma_remote_subdev(struct media_pad * local,u32 * pad)47 xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
48 {
49 struct media_pad *remote;
50
51 remote = media_entity_remote_pad(local);
52 if (remote == NULL ||
53 media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
54 return NULL;
55
56 if (pad)
57 *pad = remote->index;
58
59 return media_entity_to_v4l2_subdev(remote->entity);
60 }
61
xvip_dma_verify_format(struct xvip_dma * dma)62 static int xvip_dma_verify_format(struct xvip_dma *dma)
63 {
64 struct v4l2_subdev_format fmt;
65 struct v4l2_subdev *subdev;
66 int ret;
67
68 subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
69 if (subdev == NULL)
70 return -EPIPE;
71
72 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
73 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
74 if (ret < 0)
75 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
76
77 if (dma->fmtinfo->code != fmt.format.code ||
78 dma->format.height != fmt.format.height ||
79 dma->format.width != fmt.format.width ||
80 dma->format.colorspace != fmt.format.colorspace)
81 return -EINVAL;
82
83 return 0;
84 }
85
86 /* -----------------------------------------------------------------------------
87 * Pipeline Stream Management
88 */
89
90 /**
91 * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
92 * @pipe: The pipeline
93 * @start: Start (when true) or stop (when false) the pipeline
94 *
95 * Walk the entities chain starting at the pipeline output video node and start
96 * or stop all of them.
97 *
98 * Return: 0 if successful, or the return value of the failed video::s_stream
99 * operation otherwise.
100 */
xvip_pipeline_start_stop(struct xvip_pipeline * pipe,bool start)101 static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
102 {
103 struct xvip_dma *dma = pipe->output;
104 struct media_entity *entity;
105 struct media_pad *pad;
106 struct v4l2_subdev *subdev;
107 int ret;
108
109 entity = &dma->video.entity;
110 while (1) {
111 pad = &entity->pads[0];
112 if (!(pad->flags & MEDIA_PAD_FL_SINK))
113 break;
114
115 pad = media_entity_remote_pad(pad);
116 if (pad == NULL ||
117 media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
118 break;
119
120 entity = pad->entity;
121 subdev = media_entity_to_v4l2_subdev(entity);
122
123 ret = v4l2_subdev_call(subdev, video, s_stream, start);
124 if (start && ret < 0 && ret != -ENOIOCTLCMD)
125 return ret;
126 }
127
128 return 0;
129 }
130
131 /**
132 * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
133 * @pipe: The pipeline
134 * @on: Turn the stream on when true or off when false
135 *
136 * The pipeline is shared between all DMA engines connect at its input and
137 * output. While the stream state of DMA engines can be controlled
138 * independently, pipelines have a shared stream state that enable or disable
139 * all entities in the pipeline. For this reason the pipeline uses a streaming
140 * counter that tracks the number of DMA engines that have requested the stream
141 * to be enabled.
142 *
143 * When called with the @on argument set to true, this function will increment
144 * the pipeline streaming count. If the streaming count reaches the number of
145 * DMA engines in the pipeline it will enable all entities that belong to the
146 * pipeline.
147 *
148 * Similarly, when called with the @on argument set to false, this function will
149 * decrement the pipeline streaming count and disable all entities in the
150 * pipeline when the streaming count reaches zero.
151 *
152 * Return: 0 if successful, or the return value of the failed video::s_stream
153 * operation otherwise. Stopping the pipeline never fails. The pipeline state is
154 * not updated when the operation fails.
155 */
xvip_pipeline_set_stream(struct xvip_pipeline * pipe,bool on)156 static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
157 {
158 int ret = 0;
159
160 mutex_lock(&pipe->lock);
161
162 if (on) {
163 if (pipe->stream_count == pipe->num_dmas - 1) {
164 ret = xvip_pipeline_start_stop(pipe, true);
165 if (ret < 0)
166 goto done;
167 }
168 pipe->stream_count++;
169 } else {
170 if (--pipe->stream_count == 0)
171 xvip_pipeline_start_stop(pipe, false);
172 }
173
174 done:
175 mutex_unlock(&pipe->lock);
176 return ret;
177 }
178
xvip_pipeline_validate(struct xvip_pipeline * pipe,struct xvip_dma * start)179 static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
180 struct xvip_dma *start)
181 {
182 struct media_entity_graph graph;
183 struct media_entity *entity = &start->video.entity;
184 struct media_device *mdev = entity->parent;
185 unsigned int num_inputs = 0;
186 unsigned int num_outputs = 0;
187
188 mutex_lock(&mdev->graph_mutex);
189
190 /* Walk the graph to locate the video nodes. */
191 media_entity_graph_walk_start(&graph, entity);
192
193 while ((entity = media_entity_graph_walk_next(&graph))) {
194 struct xvip_dma *dma;
195
196 if (entity->type != MEDIA_ENT_T_DEVNODE_V4L)
197 continue;
198
199 dma = to_xvip_dma(media_entity_to_video_device(entity));
200
201 if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
202 pipe->output = dma;
203 num_outputs++;
204 } else {
205 num_inputs++;
206 }
207 }
208
209 mutex_unlock(&mdev->graph_mutex);
210
211 /* We need exactly one output and zero or one input. */
212 if (num_outputs != 1 || num_inputs > 1)
213 return -EPIPE;
214
215 pipe->num_dmas = num_inputs + num_outputs;
216
217 return 0;
218 }
219
__xvip_pipeline_cleanup(struct xvip_pipeline * pipe)220 static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
221 {
222 pipe->num_dmas = 0;
223 pipe->output = NULL;
224 }
225
226 /**
227 * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
228 * @pipe: the pipeline
229 *
230 * Decrease the pipeline use count and clean it up if we were the last user.
231 */
xvip_pipeline_cleanup(struct xvip_pipeline * pipe)232 static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
233 {
234 mutex_lock(&pipe->lock);
235
236 /* If we're the last user clean up the pipeline. */
237 if (--pipe->use_count == 0)
238 __xvip_pipeline_cleanup(pipe);
239
240 mutex_unlock(&pipe->lock);
241 }
242
243 /**
244 * xvip_pipeline_prepare - Prepare the pipeline for streaming
245 * @pipe: the pipeline
246 * @dma: DMA engine at one end of the pipeline
247 *
248 * Validate the pipeline if no user exists yet, otherwise just increase the use
249 * count.
250 *
251 * Return: 0 if successful or -EPIPE if the pipeline is not valid.
252 */
xvip_pipeline_prepare(struct xvip_pipeline * pipe,struct xvip_dma * dma)253 static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
254 struct xvip_dma *dma)
255 {
256 int ret;
257
258 mutex_lock(&pipe->lock);
259
260 /* If we're the first user validate and initialize the pipeline. */
261 if (pipe->use_count == 0) {
262 ret = xvip_pipeline_validate(pipe, dma);
263 if (ret < 0) {
264 __xvip_pipeline_cleanup(pipe);
265 goto done;
266 }
267 }
268
269 pipe->use_count++;
270 ret = 0;
271
272 done:
273 mutex_unlock(&pipe->lock);
274 return ret;
275 }
276
277 /* -----------------------------------------------------------------------------
278 * videobuf2 queue operations
279 */
280
281 /**
282 * struct xvip_dma_buffer - Video DMA buffer
283 * @buf: vb2 buffer base object
284 * @queue: buffer list entry in the DMA engine queued buffers list
285 * @dma: DMA channel that uses the buffer
286 */
287 struct xvip_dma_buffer {
288 struct vb2_v4l2_buffer buf;
289 struct list_head queue;
290 struct xvip_dma *dma;
291 };
292
293 #define to_xvip_dma_buffer(vb) container_of(vb, struct xvip_dma_buffer, buf)
294
xvip_dma_complete(void * param)295 static void xvip_dma_complete(void *param)
296 {
297 struct xvip_dma_buffer *buf = param;
298 struct xvip_dma *dma = buf->dma;
299
300 spin_lock(&dma->queued_lock);
301 list_del(&buf->queue);
302 spin_unlock(&dma->queued_lock);
303
304 buf->buf.field = V4L2_FIELD_NONE;
305 buf->buf.sequence = dma->sequence++;
306 v4l2_get_timestamp(&buf->buf.timestamp);
307 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage);
308 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
309 }
310
311 static int
xvip_dma_queue_setup(struct vb2_queue * vq,const void * parg,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],void * alloc_ctxs[])312 xvip_dma_queue_setup(struct vb2_queue *vq, const void *parg,
313 unsigned int *nbuffers, unsigned int *nplanes,
314 unsigned int sizes[], void *alloc_ctxs[])
315 {
316 const struct v4l2_format *fmt = parg;
317 struct xvip_dma *dma = vb2_get_drv_priv(vq);
318
319 /* Make sure the image size is large enough. */
320 if (fmt && fmt->fmt.pix.sizeimage < dma->format.sizeimage)
321 return -EINVAL;
322
323 *nplanes = 1;
324
325 sizes[0] = fmt ? fmt->fmt.pix.sizeimage : dma->format.sizeimage;
326 alloc_ctxs[0] = dma->alloc_ctx;
327
328 return 0;
329 }
330
xvip_dma_buffer_prepare(struct vb2_buffer * vb)331 static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
332 {
333 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
334 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
335 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
336
337 buf->dma = dma;
338
339 return 0;
340 }
341
xvip_dma_buffer_queue(struct vb2_buffer * vb)342 static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
343 {
344 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
345 struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
346 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
347 struct dma_async_tx_descriptor *desc;
348 dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
349 u32 flags;
350
351 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
352 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
353 dma->xt.dir = DMA_DEV_TO_MEM;
354 dma->xt.src_sgl = false;
355 dma->xt.dst_sgl = true;
356 dma->xt.dst_start = addr;
357 } else {
358 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
359 dma->xt.dir = DMA_MEM_TO_DEV;
360 dma->xt.src_sgl = true;
361 dma->xt.dst_sgl = false;
362 dma->xt.src_start = addr;
363 }
364
365 dma->xt.frame_size = 1;
366 dma->sgl[0].size = dma->format.width * dma->fmtinfo->bpp;
367 dma->sgl[0].icg = dma->format.bytesperline - dma->sgl[0].size;
368 dma->xt.numf = dma->format.height;
369
370 desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
371 if (!desc) {
372 dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
373 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
374 return;
375 }
376 desc->callback = xvip_dma_complete;
377 desc->callback_param = buf;
378
379 spin_lock_irq(&dma->queued_lock);
380 list_add_tail(&buf->queue, &dma->queued_bufs);
381 spin_unlock_irq(&dma->queued_lock);
382
383 dmaengine_submit(desc);
384
385 if (vb2_is_streaming(&dma->queue))
386 dma_async_issue_pending(dma->dma);
387 }
388
xvip_dma_start_streaming(struct vb2_queue * vq,unsigned int count)389 static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
390 {
391 struct xvip_dma *dma = vb2_get_drv_priv(vq);
392 struct xvip_dma_buffer *buf, *nbuf;
393 struct xvip_pipeline *pipe;
394 int ret;
395
396 dma->sequence = 0;
397
398 /*
399 * Start streaming on the pipeline. No link touching an entity in the
400 * pipeline can be activated or deactivated once streaming is started.
401 *
402 * Use the pipeline object embedded in the first DMA object that starts
403 * streaming.
404 */
405 pipe = dma->video.entity.pipe
406 ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
407
408 ret = media_entity_pipeline_start(&dma->video.entity, &pipe->pipe);
409 if (ret < 0)
410 goto error;
411
412 /* Verify that the configured format matches the output of the
413 * connected subdev.
414 */
415 ret = xvip_dma_verify_format(dma);
416 if (ret < 0)
417 goto error_stop;
418
419 ret = xvip_pipeline_prepare(pipe, dma);
420 if (ret < 0)
421 goto error_stop;
422
423 /* Start the DMA engine. This must be done before starting the blocks
424 * in the pipeline to avoid DMA synchronization issues.
425 */
426 dma_async_issue_pending(dma->dma);
427
428 /* Start the pipeline. */
429 xvip_pipeline_set_stream(pipe, true);
430
431 return 0;
432
433 error_stop:
434 media_entity_pipeline_stop(&dma->video.entity);
435
436 error:
437 /* Give back all queued buffers to videobuf2. */
438 spin_lock_irq(&dma->queued_lock);
439 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
440 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
441 list_del(&buf->queue);
442 }
443 spin_unlock_irq(&dma->queued_lock);
444
445 return ret;
446 }
447
xvip_dma_stop_streaming(struct vb2_queue * vq)448 static void xvip_dma_stop_streaming(struct vb2_queue *vq)
449 {
450 struct xvip_dma *dma = vb2_get_drv_priv(vq);
451 struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
452 struct xvip_dma_buffer *buf, *nbuf;
453
454 /* Stop the pipeline. */
455 xvip_pipeline_set_stream(pipe, false);
456
457 /* Stop and reset the DMA engine. */
458 dmaengine_terminate_all(dma->dma);
459
460 /* Cleanup the pipeline and mark it as being stopped. */
461 xvip_pipeline_cleanup(pipe);
462 media_entity_pipeline_stop(&dma->video.entity);
463
464 /* Give back all queued buffers to videobuf2. */
465 spin_lock_irq(&dma->queued_lock);
466 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
467 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
468 list_del(&buf->queue);
469 }
470 spin_unlock_irq(&dma->queued_lock);
471 }
472
473 static struct vb2_ops xvip_dma_queue_qops = {
474 .queue_setup = xvip_dma_queue_setup,
475 .buf_prepare = xvip_dma_buffer_prepare,
476 .buf_queue = xvip_dma_buffer_queue,
477 .wait_prepare = vb2_ops_wait_prepare,
478 .wait_finish = vb2_ops_wait_finish,
479 .start_streaming = xvip_dma_start_streaming,
480 .stop_streaming = xvip_dma_stop_streaming,
481 };
482
483 /* -----------------------------------------------------------------------------
484 * V4L2 ioctls
485 */
486
487 static int
xvip_dma_querycap(struct file * file,void * fh,struct v4l2_capability * cap)488 xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
489 {
490 struct v4l2_fh *vfh = file->private_data;
491 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
492
493 cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
494 | dma->xdev->v4l2_caps;
495
496 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
497 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
498 else
499 cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
500
501 strlcpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
502 strlcpy(cap->card, dma->video.name, sizeof(cap->card));
503 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s:%u",
504 dma->xdev->dev->of_node->name, dma->port);
505
506 return 0;
507 }
508
509 /* FIXME: without this callback function, some applications are not configured
510 * with correct formats, and it results in frames in wrong format. Whether this
511 * callback needs to be required is not clearly defined, so it should be
512 * clarified through the mailing list.
513 */
514 static int
xvip_dma_enum_format(struct file * file,void * fh,struct v4l2_fmtdesc * f)515 xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
516 {
517 struct v4l2_fh *vfh = file->private_data;
518 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
519
520 if (f->index > 0)
521 return -EINVAL;
522
523 f->pixelformat = dma->format.pixelformat;
524 strlcpy(f->description, dma->fmtinfo->description,
525 sizeof(f->description));
526
527 return 0;
528 }
529
530 static int
xvip_dma_get_format(struct file * file,void * fh,struct v4l2_format * format)531 xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
532 {
533 struct v4l2_fh *vfh = file->private_data;
534 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
535
536 format->fmt.pix = dma->format;
537
538 return 0;
539 }
540
541 static void
__xvip_dma_try_format(struct xvip_dma * dma,struct v4l2_pix_format * pix,const struct xvip_video_format ** fmtinfo)542 __xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
543 const struct xvip_video_format **fmtinfo)
544 {
545 const struct xvip_video_format *info;
546 unsigned int min_width;
547 unsigned int max_width;
548 unsigned int min_bpl;
549 unsigned int max_bpl;
550 unsigned int width;
551 unsigned int align;
552 unsigned int bpl;
553
554 /* Retrieve format information and select the default format if the
555 * requested format isn't supported.
556 */
557 info = xvip_get_format_by_fourcc(pix->pixelformat);
558 if (IS_ERR(info))
559 info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
560
561 pix->pixelformat = info->fourcc;
562 pix->field = V4L2_FIELD_NONE;
563
564 /* The transfer alignment requirements are expressed in bytes. Compute
565 * the minimum and maximum values, clamp the requested width and convert
566 * it back to pixels.
567 */
568 align = lcm(dma->align, info->bpp);
569 min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
570 max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
571 width = rounddown(pix->width * info->bpp, align);
572
573 pix->width = clamp(width, min_width, max_width) / info->bpp;
574 pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
575 XVIP_DMA_MAX_HEIGHT);
576
577 /* Clamp the requested bytes per line value. If the maximum bytes per
578 * line value is zero, the module doesn't support user configurable line
579 * sizes. Override the requested value with the minimum in that case.
580 */
581 min_bpl = pix->width * info->bpp;
582 max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
583 bpl = rounddown(pix->bytesperline, dma->align);
584
585 pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
586 pix->sizeimage = pix->bytesperline * pix->height;
587
588 if (fmtinfo)
589 *fmtinfo = info;
590 }
591
592 static int
xvip_dma_try_format(struct file * file,void * fh,struct v4l2_format * format)593 xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
594 {
595 struct v4l2_fh *vfh = file->private_data;
596 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
597
598 __xvip_dma_try_format(dma, &format->fmt.pix, NULL);
599 return 0;
600 }
601
602 static int
xvip_dma_set_format(struct file * file,void * fh,struct v4l2_format * format)603 xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
604 {
605 struct v4l2_fh *vfh = file->private_data;
606 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
607 const struct xvip_video_format *info;
608
609 __xvip_dma_try_format(dma, &format->fmt.pix, &info);
610
611 if (vb2_is_busy(&dma->queue))
612 return -EBUSY;
613
614 dma->format = format->fmt.pix;
615 dma->fmtinfo = info;
616
617 return 0;
618 }
619
620 static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
621 .vidioc_querycap = xvip_dma_querycap,
622 .vidioc_enum_fmt_vid_cap = xvip_dma_enum_format,
623 .vidioc_g_fmt_vid_cap = xvip_dma_get_format,
624 .vidioc_g_fmt_vid_out = xvip_dma_get_format,
625 .vidioc_s_fmt_vid_cap = xvip_dma_set_format,
626 .vidioc_s_fmt_vid_out = xvip_dma_set_format,
627 .vidioc_try_fmt_vid_cap = xvip_dma_try_format,
628 .vidioc_try_fmt_vid_out = xvip_dma_try_format,
629 .vidioc_reqbufs = vb2_ioctl_reqbufs,
630 .vidioc_querybuf = vb2_ioctl_querybuf,
631 .vidioc_qbuf = vb2_ioctl_qbuf,
632 .vidioc_dqbuf = vb2_ioctl_dqbuf,
633 .vidioc_create_bufs = vb2_ioctl_create_bufs,
634 .vidioc_expbuf = vb2_ioctl_expbuf,
635 .vidioc_streamon = vb2_ioctl_streamon,
636 .vidioc_streamoff = vb2_ioctl_streamoff,
637 };
638
639 /* -----------------------------------------------------------------------------
640 * V4L2 file operations
641 */
642
643 static const struct v4l2_file_operations xvip_dma_fops = {
644 .owner = THIS_MODULE,
645 .unlocked_ioctl = video_ioctl2,
646 .open = v4l2_fh_open,
647 .release = vb2_fop_release,
648 .poll = vb2_fop_poll,
649 .mmap = vb2_fop_mmap,
650 };
651
652 /* -----------------------------------------------------------------------------
653 * Xilinx Video DMA Core
654 */
655
xvip_dma_init(struct xvip_composite_device * xdev,struct xvip_dma * dma,enum v4l2_buf_type type,unsigned int port)656 int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
657 enum v4l2_buf_type type, unsigned int port)
658 {
659 char name[16];
660 int ret;
661
662 dma->xdev = xdev;
663 dma->port = port;
664 mutex_init(&dma->lock);
665 mutex_init(&dma->pipe.lock);
666 INIT_LIST_HEAD(&dma->queued_bufs);
667 spin_lock_init(&dma->queued_lock);
668
669 dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
670 dma->format.pixelformat = dma->fmtinfo->fourcc;
671 dma->format.colorspace = V4L2_COLORSPACE_SRGB;
672 dma->format.field = V4L2_FIELD_NONE;
673 dma->format.width = XVIP_DMA_DEF_WIDTH;
674 dma->format.height = XVIP_DMA_DEF_HEIGHT;
675 dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp;
676 dma->format.sizeimage = dma->format.bytesperline * dma->format.height;
677
678 /* Initialize the media entity... */
679 dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
680 ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
681
682 ret = media_entity_init(&dma->video.entity, 1, &dma->pad, 0);
683 if (ret < 0)
684 goto error;
685
686 /* ... and the video node... */
687 dma->video.fops = &xvip_dma_fops;
688 dma->video.v4l2_dev = &xdev->v4l2_dev;
689 dma->video.queue = &dma->queue;
690 snprintf(dma->video.name, sizeof(dma->video.name), "%s %s %u",
691 xdev->dev->of_node->name,
692 type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input",
693 port);
694 dma->video.vfl_type = VFL_TYPE_GRABBER;
695 dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
696 ? VFL_DIR_RX : VFL_DIR_TX;
697 dma->video.release = video_device_release_empty;
698 dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
699 dma->video.lock = &dma->lock;
700
701 video_set_drvdata(&dma->video, dma);
702
703 /* ... and the buffers queue... */
704 dma->alloc_ctx = vb2_dma_contig_init_ctx(dma->xdev->dev);
705 if (IS_ERR(dma->alloc_ctx)) {
706 ret = PTR_ERR(dma->alloc_ctx);
707 goto error;
708 }
709
710 /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
711 * V4L2 APIs would be inefficient. Testing on the command line with a
712 * 'cat /dev/video?' thus won't be possible, but given that the driver
713 * anyway requires a test tool to setup the pipeline before any video
714 * stream can be started, requiring a specific V4L2 test tool as well
715 * instead of 'cat' isn't really a drawback.
716 */
717 dma->queue.type = type;
718 dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
719 dma->queue.lock = &dma->lock;
720 dma->queue.drv_priv = dma;
721 dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
722 dma->queue.ops = &xvip_dma_queue_qops;
723 dma->queue.mem_ops = &vb2_dma_contig_memops;
724 dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
725 | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
726 ret = vb2_queue_init(&dma->queue);
727 if (ret < 0) {
728 dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
729 goto error;
730 }
731
732 /* ... and the DMA channel. */
733 snprintf(name, sizeof(name), "port%u", port);
734 dma->dma = dma_request_slave_channel(dma->xdev->dev, name);
735 if (dma->dma == NULL) {
736 dev_err(dma->xdev->dev, "no VDMA channel found\n");
737 ret = -ENODEV;
738 goto error;
739 }
740
741 dma->align = 1 << dma->dma->device->copy_align;
742
743 ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1);
744 if (ret < 0) {
745 dev_err(dma->xdev->dev, "failed to register video device\n");
746 goto error;
747 }
748
749 return 0;
750
751 error:
752 xvip_dma_cleanup(dma);
753 return ret;
754 }
755
xvip_dma_cleanup(struct xvip_dma * dma)756 void xvip_dma_cleanup(struct xvip_dma *dma)
757 {
758 if (video_is_registered(&dma->video))
759 video_unregister_device(&dma->video);
760
761 if (dma->dma)
762 dma_release_channel(dma->dma);
763
764 if (!IS_ERR_OR_NULL(dma->alloc_ctx))
765 vb2_dma_contig_cleanup_ctx(dma->alloc_ctx);
766
767 media_entity_cleanup(&dma->video.entity);
768
769 mutex_destroy(&dma->lock);
770 mutex_destroy(&dma->pipe.lock);
771 }
772