1/* 2 * ispvideo.c 3 * 4 * TI OMAP3 ISP - Generic video node 5 * 6 * Copyright (C) 2009-2010 Nokia Corporation 7 * 8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 9 * Sakari Ailus <sakari.ailus@iki.fi> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 */ 15 16#include <asm/cacheflush.h> 17#include <linux/clk.h> 18#include <linux/mm.h> 19#include <linux/module.h> 20#include <linux/pagemap.h> 21#include <linux/scatterlist.h> 22#include <linux/sched.h> 23#include <linux/slab.h> 24#include <linux/vmalloc.h> 25#include <media/v4l2-dev.h> 26#include <media/v4l2-ioctl.h> 27#include <media/videobuf2-dma-contig.h> 28 29#include "ispvideo.h" 30#include "isp.h" 31 32 33/* ----------------------------------------------------------------------------- 34 * Helper functions 35 */ 36 37/* 38 * NOTE: When adding new media bus codes, always remember to add 39 * corresponding in-memory formats to the table below!!! 40 */ 41static struct isp_format_info formats[] = { 42 { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8, 43 MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8, 44 V4L2_PIX_FMT_GREY, 8, 1, }, 45 { MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10, 46 MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y8_1X8, 47 V4L2_PIX_FMT_Y10, 10, 2, }, 48 { MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y10_1X10, 49 MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y8_1X8, 50 V4L2_PIX_FMT_Y12, 12, 2, }, 51 { MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, 52 MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, 53 V4L2_PIX_FMT_SBGGR8, 8, 1, }, 54 { MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, 55 MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, 56 V4L2_PIX_FMT_SGBRG8, 8, 1, }, 57 { MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, 58 MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, 59 V4L2_PIX_FMT_SGRBG8, 8, 1, }, 60 { MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, 61 MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, 62 V4L2_PIX_FMT_SRGGB8, 8, 1, }, 63 { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, 64 MEDIA_BUS_FMT_SBGGR10_1X10, 0, 65 V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, }, 66 { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, 67 MEDIA_BUS_FMT_SGBRG10_1X10, 0, 68 V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, }, 69 { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, 70 MEDIA_BUS_FMT_SGRBG10_1X10, 0, 71 V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, }, 72 { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, 73 MEDIA_BUS_FMT_SRGGB10_1X10, 0, 74 V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, }, 75 { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, 76 MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8, 77 V4L2_PIX_FMT_SBGGR10, 10, 2, }, 78 { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, 79 MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG8_1X8, 80 V4L2_PIX_FMT_SGBRG10, 10, 2, }, 81 { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, 82 MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG8_1X8, 83 V4L2_PIX_FMT_SGRBG10, 10, 2, }, 84 { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, 85 MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB8_1X8, 86 V4L2_PIX_FMT_SRGGB10, 10, 2, }, 87 { MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10, 88 MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8, 89 V4L2_PIX_FMT_SBGGR12, 12, 2, }, 90 { MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG10_1X10, 91 MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG8_1X8, 92 V4L2_PIX_FMT_SGBRG12, 12, 2, }, 93 { MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG10_1X10, 94 MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8, 95 V4L2_PIX_FMT_SGRBG12, 12, 2, }, 96 { MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB10_1X10, 97 MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB8_1X8, 98 V4L2_PIX_FMT_SRGGB12, 12, 2, }, 99 { MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16, 100 MEDIA_BUS_FMT_UYVY8_1X16, 0, 101 V4L2_PIX_FMT_UYVY, 16, 2, }, 102 { MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16, 103 MEDIA_BUS_FMT_YUYV8_1X16, 0, 104 V4L2_PIX_FMT_YUYV, 16, 2, }, 105 { MEDIA_BUS_FMT_UYVY8_2X8, MEDIA_BUS_FMT_UYVY8_2X8, 106 MEDIA_BUS_FMT_UYVY8_2X8, 0, 107 V4L2_PIX_FMT_UYVY, 8, 2, }, 108 { MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_YUYV8_2X8, 109 MEDIA_BUS_FMT_YUYV8_2X8, 0, 110 V4L2_PIX_FMT_YUYV, 8, 2, }, 111 /* Empty entry to catch the unsupported pixel code (0) used by the CCDC 112 * module and avoid NULL pointer dereferences. 113 */ 114 { 0, } 115}; 116 117const struct isp_format_info *omap3isp_video_format_info(u32 code) 118{ 119 unsigned int i; 120 121 for (i = 0; i < ARRAY_SIZE(formats); ++i) { 122 if (formats[i].code == code) 123 return &formats[i]; 124 } 125 126 return NULL; 127} 128 129/* 130 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format 131 * @video: ISP video instance 132 * @mbus: v4l2_mbus_framefmt format (input) 133 * @pix: v4l2_pix_format format (output) 134 * 135 * Fill the output pix structure with information from the input mbus format. 136 * The bytesperline and sizeimage fields are computed from the requested bytes 137 * per line value in the pix format and information from the video instance. 138 * 139 * Return the number of padding bytes at end of line. 140 */ 141static unsigned int isp_video_mbus_to_pix(const struct isp_video *video, 142 const struct v4l2_mbus_framefmt *mbus, 143 struct v4l2_pix_format *pix) 144{ 145 unsigned int bpl = pix->bytesperline; 146 unsigned int min_bpl; 147 unsigned int i; 148 149 memset(pix, 0, sizeof(*pix)); 150 pix->width = mbus->width; 151 pix->height = mbus->height; 152 153 for (i = 0; i < ARRAY_SIZE(formats); ++i) { 154 if (formats[i].code == mbus->code) 155 break; 156 } 157 158 if (WARN_ON(i == ARRAY_SIZE(formats))) 159 return 0; 160 161 min_bpl = pix->width * formats[i].bpp; 162 163 /* Clamp the requested bytes per line value. If the maximum bytes per 164 * line value is zero, the module doesn't support user configurable line 165 * sizes. Override the requested value with the minimum in that case. 166 */ 167 if (video->bpl_max) 168 bpl = clamp(bpl, min_bpl, video->bpl_max); 169 else 170 bpl = min_bpl; 171 172 if (!video->bpl_zero_padding || bpl != min_bpl) 173 bpl = ALIGN(bpl, video->bpl_alignment); 174 175 pix->pixelformat = formats[i].pixelformat; 176 pix->bytesperline = bpl; 177 pix->sizeimage = pix->bytesperline * pix->height; 178 pix->colorspace = mbus->colorspace; 179 pix->field = mbus->field; 180 181 return bpl - min_bpl; 182} 183 184static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix, 185 struct v4l2_mbus_framefmt *mbus) 186{ 187 unsigned int i; 188 189 memset(mbus, 0, sizeof(*mbus)); 190 mbus->width = pix->width; 191 mbus->height = pix->height; 192 193 /* Skip the last format in the loop so that it will be selected if no 194 * match is found. 195 */ 196 for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) { 197 if (formats[i].pixelformat == pix->pixelformat) 198 break; 199 } 200 201 mbus->code = formats[i].code; 202 mbus->colorspace = pix->colorspace; 203 mbus->field = pix->field; 204} 205 206static struct v4l2_subdev * 207isp_video_remote_subdev(struct isp_video *video, u32 *pad) 208{ 209 struct media_pad *remote; 210 211 remote = media_entity_remote_pad(&video->pad); 212 213 if (remote == NULL || 214 media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV) 215 return NULL; 216 217 if (pad) 218 *pad = remote->index; 219 220 return media_entity_to_v4l2_subdev(remote->entity); 221} 222 223/* Return a pointer to the ISP video instance at the far end of the pipeline. */ 224static int isp_video_get_graph_data(struct isp_video *video, 225 struct isp_pipeline *pipe) 226{ 227 struct media_entity_graph graph; 228 struct media_entity *entity = &video->video.entity; 229 struct media_device *mdev = entity->parent; 230 struct isp_video *far_end = NULL; 231 232 mutex_lock(&mdev->graph_mutex); 233 media_entity_graph_walk_start(&graph, entity); 234 235 while ((entity = media_entity_graph_walk_next(&graph))) { 236 struct isp_video *__video; 237 238 pipe->entities |= 1 << entity->id; 239 240 if (far_end != NULL) 241 continue; 242 243 if (entity == &video->video.entity) 244 continue; 245 246 if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE) 247 continue; 248 249 __video = to_isp_video(media_entity_to_video_device(entity)); 250 if (__video->type != video->type) 251 far_end = __video; 252 } 253 254 mutex_unlock(&mdev->graph_mutex); 255 256 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 257 pipe->input = far_end; 258 pipe->output = video; 259 } else { 260 if (far_end == NULL) 261 return -EPIPE; 262 263 pipe->input = video; 264 pipe->output = far_end; 265 } 266 267 return 0; 268} 269 270static int 271__isp_video_get_format(struct isp_video *video, struct v4l2_format *format) 272{ 273 struct v4l2_subdev_format fmt; 274 struct v4l2_subdev *subdev; 275 u32 pad; 276 int ret; 277 278 subdev = isp_video_remote_subdev(video, &pad); 279 if (subdev == NULL) 280 return -EINVAL; 281 282 fmt.pad = pad; 283 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 284 285 mutex_lock(&video->mutex); 286 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 287 mutex_unlock(&video->mutex); 288 289 if (ret) 290 return ret; 291 292 format->type = video->type; 293 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); 294} 295 296static int 297isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh) 298{ 299 struct v4l2_format format; 300 int ret; 301 302 memcpy(&format, &vfh->format, sizeof(format)); 303 ret = __isp_video_get_format(video, &format); 304 if (ret < 0) 305 return ret; 306 307 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat || 308 vfh->format.fmt.pix.height != format.fmt.pix.height || 309 vfh->format.fmt.pix.width != format.fmt.pix.width || 310 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline || 311 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage || 312 vfh->format.fmt.pix.field != format.fmt.pix.field) 313 return -EINVAL; 314 315 return 0; 316} 317 318/* ----------------------------------------------------------------------------- 319 * Video queue operations 320 */ 321 322static int isp_video_queue_setup(struct vb2_queue *queue, 323 const struct v4l2_format *fmt, 324 unsigned int *count, unsigned int *num_planes, 325 unsigned int sizes[], void *alloc_ctxs[]) 326{ 327 struct isp_video_fh *vfh = vb2_get_drv_priv(queue); 328 struct isp_video *video = vfh->video; 329 330 *num_planes = 1; 331 332 sizes[0] = vfh->format.fmt.pix.sizeimage; 333 if (sizes[0] == 0) 334 return -EINVAL; 335 336 alloc_ctxs[0] = video->alloc_ctx; 337 338 *count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0])); 339 340 return 0; 341} 342 343static int isp_video_buffer_prepare(struct vb2_buffer *buf) 344{ 345 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue); 346 struct isp_buffer *buffer = to_isp_buffer(buf); 347 struct isp_video *video = vfh->video; 348 dma_addr_t addr; 349 350 /* Refuse to prepare the buffer is the video node has registered an 351 * error. We don't need to take any lock here as the operation is 352 * inherently racy. The authoritative check will be performed in the 353 * queue handler, which can't return an error, this check is just a best 354 * effort to notify userspace as early as possible. 355 */ 356 if (unlikely(video->error)) 357 return -EIO; 358 359 addr = vb2_dma_contig_plane_dma_addr(buf, 0); 360 if (!IS_ALIGNED(addr, 32)) { 361 dev_dbg(video->isp->dev, 362 "Buffer address must be aligned to 32 bytes boundary.\n"); 363 return -EINVAL; 364 } 365 366 vb2_set_plane_payload(&buffer->vb, 0, vfh->format.fmt.pix.sizeimage); 367 buffer->dma = addr; 368 369 return 0; 370} 371 372/* 373 * isp_video_buffer_queue - Add buffer to streaming queue 374 * @buf: Video buffer 375 * 376 * In memory-to-memory mode, start streaming on the pipeline if buffers are 377 * queued on both the input and the output, if the pipeline isn't already busy. 378 * If the pipeline is busy, it will be restarted in the output module interrupt 379 * handler. 380 */ 381static void isp_video_buffer_queue(struct vb2_buffer *buf) 382{ 383 struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue); 384 struct isp_buffer *buffer = to_isp_buffer(buf); 385 struct isp_video *video = vfh->video; 386 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 387 enum isp_pipeline_state state; 388 unsigned long flags; 389 unsigned int empty; 390 unsigned int start; 391 392 spin_lock_irqsave(&video->irqlock, flags); 393 394 if (unlikely(video->error)) { 395 vb2_buffer_done(&buffer->vb, VB2_BUF_STATE_ERROR); 396 spin_unlock_irqrestore(&video->irqlock, flags); 397 return; 398 } 399 400 empty = list_empty(&video->dmaqueue); 401 list_add_tail(&buffer->irqlist, &video->dmaqueue); 402 403 spin_unlock_irqrestore(&video->irqlock, flags); 404 405 if (empty) { 406 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 407 state = ISP_PIPELINE_QUEUE_OUTPUT; 408 else 409 state = ISP_PIPELINE_QUEUE_INPUT; 410 411 spin_lock_irqsave(&pipe->lock, flags); 412 pipe->state |= state; 413 video->ops->queue(video, buffer); 414 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; 415 416 start = isp_pipeline_ready(pipe); 417 if (start) 418 pipe->state |= ISP_PIPELINE_STREAM; 419 spin_unlock_irqrestore(&pipe->lock, flags); 420 421 if (start) 422 omap3isp_pipeline_set_stream(pipe, 423 ISP_PIPELINE_STREAM_SINGLESHOT); 424 } 425} 426 427static const struct vb2_ops isp_video_queue_ops = { 428 .queue_setup = isp_video_queue_setup, 429 .buf_prepare = isp_video_buffer_prepare, 430 .buf_queue = isp_video_buffer_queue, 431}; 432 433/* 434 * omap3isp_video_buffer_next - Complete the current buffer and return the next 435 * @video: ISP video object 436 * 437 * Remove the current video buffer from the DMA queue and fill its timestamp and 438 * field count before handing it back to videobuf2. 439 * 440 * For capture video nodes the buffer state is set to VB2_BUF_STATE_DONE if no 441 * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise. 442 * For video output nodes the buffer state is always set to VB2_BUF_STATE_DONE. 443 * 444 * The DMA queue is expected to contain at least one buffer. 445 * 446 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is 447 * empty. 448 */ 449struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video) 450{ 451 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 452 enum isp_pipeline_state state; 453 struct isp_buffer *buf; 454 unsigned long flags; 455 456 spin_lock_irqsave(&video->irqlock, flags); 457 if (WARN_ON(list_empty(&video->dmaqueue))) { 458 spin_unlock_irqrestore(&video->irqlock, flags); 459 return NULL; 460 } 461 462 buf = list_first_entry(&video->dmaqueue, struct isp_buffer, 463 irqlist); 464 list_del(&buf->irqlist); 465 spin_unlock_irqrestore(&video->irqlock, flags); 466 467 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); 468 469 /* Do frame number propagation only if this is the output video node. 470 * Frame number either comes from the CSI receivers or it gets 471 * incremented here if H3A is not active. 472 * Note: There is no guarantee that the output buffer will finish 473 * first, so the input number might lag behind by 1 in some cases. 474 */ 475 if (video == pipe->output && !pipe->do_propagation) 476 buf->vb.v4l2_buf.sequence = 477 atomic_inc_return(&pipe->frame_number); 478 else 479 buf->vb.v4l2_buf.sequence = atomic_read(&pipe->frame_number); 480 481 if (pipe->field != V4L2_FIELD_NONE) 482 buf->vb.v4l2_buf.sequence /= 2; 483 484 buf->vb.v4l2_buf.field = pipe->field; 485 486 /* Report pipeline errors to userspace on the capture device side. */ 487 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) { 488 state = VB2_BUF_STATE_ERROR; 489 pipe->error = false; 490 } else { 491 state = VB2_BUF_STATE_DONE; 492 } 493 494 vb2_buffer_done(&buf->vb, state); 495 496 spin_lock_irqsave(&video->irqlock, flags); 497 498 if (list_empty(&video->dmaqueue)) { 499 spin_unlock_irqrestore(&video->irqlock, flags); 500 501 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 502 state = ISP_PIPELINE_QUEUE_OUTPUT 503 | ISP_PIPELINE_STREAM; 504 else 505 state = ISP_PIPELINE_QUEUE_INPUT 506 | ISP_PIPELINE_STREAM; 507 508 spin_lock_irqsave(&pipe->lock, flags); 509 pipe->state &= ~state; 510 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS) 511 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 512 spin_unlock_irqrestore(&pipe->lock, flags); 513 return NULL; 514 } 515 516 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) { 517 spin_lock(&pipe->lock); 518 pipe->state &= ~ISP_PIPELINE_STREAM; 519 spin_unlock(&pipe->lock); 520 } 521 522 buf = list_first_entry(&video->dmaqueue, struct isp_buffer, 523 irqlist); 524 525 spin_unlock_irqrestore(&video->irqlock, flags); 526 527 return buf; 528} 529 530/* 531 * omap3isp_video_cancel_stream - Cancel stream on a video node 532 * @video: ISP video object 533 * 534 * Cancelling a stream mark all buffers on the video node as erroneous and makes 535 * sure no new buffer can be queued. 536 */ 537void omap3isp_video_cancel_stream(struct isp_video *video) 538{ 539 unsigned long flags; 540 541 spin_lock_irqsave(&video->irqlock, flags); 542 543 while (!list_empty(&video->dmaqueue)) { 544 struct isp_buffer *buf; 545 546 buf = list_first_entry(&video->dmaqueue, 547 struct isp_buffer, irqlist); 548 list_del(&buf->irqlist); 549 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); 550 } 551 552 video->error = true; 553 554 spin_unlock_irqrestore(&video->irqlock, flags); 555} 556 557/* 558 * omap3isp_video_resume - Perform resume operation on the buffers 559 * @video: ISP video object 560 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise 561 * 562 * This function is intended to be used on suspend/resume scenario. It 563 * requests video queue layer to discard buffers marked as DONE if it's in 564 * continuous mode and requests ISP modules to queue again the ACTIVE buffer 565 * if there's any. 566 */ 567void omap3isp_video_resume(struct isp_video *video, int continuous) 568{ 569 struct isp_buffer *buf = NULL; 570 571 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { 572 mutex_lock(&video->queue_lock); 573 vb2_discard_done(video->queue); 574 mutex_unlock(&video->queue_lock); 575 } 576 577 if (!list_empty(&video->dmaqueue)) { 578 buf = list_first_entry(&video->dmaqueue, 579 struct isp_buffer, irqlist); 580 video->ops->queue(video, buf); 581 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; 582 } else { 583 if (continuous) 584 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 585 } 586} 587 588/* ----------------------------------------------------------------------------- 589 * V4L2 ioctls 590 */ 591 592static int 593isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap) 594{ 595 struct isp_video *video = video_drvdata(file); 596 597 strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver)); 598 strlcpy(cap->card, video->video.name, sizeof(cap->card)); 599 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info)); 600 601 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT 602 | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS; 603 604 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 605 cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; 606 else 607 cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; 608 609 return 0; 610} 611 612static int 613isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format) 614{ 615 struct isp_video_fh *vfh = to_isp_video_fh(fh); 616 struct isp_video *video = video_drvdata(file); 617 618 if (format->type != video->type) 619 return -EINVAL; 620 621 mutex_lock(&video->mutex); 622 *format = vfh->format; 623 mutex_unlock(&video->mutex); 624 625 return 0; 626} 627 628static int 629isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format) 630{ 631 struct isp_video_fh *vfh = to_isp_video_fh(fh); 632 struct isp_video *video = video_drvdata(file); 633 struct v4l2_mbus_framefmt fmt; 634 635 if (format->type != video->type) 636 return -EINVAL; 637 638 /* Replace unsupported field orders with sane defaults. */ 639 switch (format->fmt.pix.field) { 640 case V4L2_FIELD_NONE: 641 /* Progressive is supported everywhere. */ 642 break; 643 case V4L2_FIELD_ALTERNATE: 644 /* ALTERNATE is not supported on output nodes. */ 645 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 646 format->fmt.pix.field = V4L2_FIELD_NONE; 647 break; 648 case V4L2_FIELD_INTERLACED: 649 /* The ISP has no concept of video standard, select the 650 * top-bottom order when the unqualified interlaced order is 651 * requested. 652 */ 653 format->fmt.pix.field = V4L2_FIELD_INTERLACED_TB; 654 /* Fall-through */ 655 case V4L2_FIELD_INTERLACED_TB: 656 case V4L2_FIELD_INTERLACED_BT: 657 /* Interlaced orders are only supported at the CCDC output. */ 658 if (video != &video->isp->isp_ccdc.video_out) 659 format->fmt.pix.field = V4L2_FIELD_NONE; 660 break; 661 case V4L2_FIELD_TOP: 662 case V4L2_FIELD_BOTTOM: 663 case V4L2_FIELD_SEQ_TB: 664 case V4L2_FIELD_SEQ_BT: 665 default: 666 /* All other field orders are currently unsupported, default to 667 * progressive. 668 */ 669 format->fmt.pix.field = V4L2_FIELD_NONE; 670 break; 671 } 672 673 /* Fill the bytesperline and sizeimage fields by converting to media bus 674 * format and back to pixel format. 675 */ 676 isp_video_pix_to_mbus(&format->fmt.pix, &fmt); 677 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix); 678 679 mutex_lock(&video->mutex); 680 vfh->format = *format; 681 mutex_unlock(&video->mutex); 682 683 return 0; 684} 685 686static int 687isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format) 688{ 689 struct isp_video *video = video_drvdata(file); 690 struct v4l2_subdev_format fmt; 691 struct v4l2_subdev *subdev; 692 u32 pad; 693 int ret; 694 695 if (format->type != video->type) 696 return -EINVAL; 697 698 subdev = isp_video_remote_subdev(video, &pad); 699 if (subdev == NULL) 700 return -EINVAL; 701 702 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format); 703 704 fmt.pad = pad; 705 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 706 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); 707 if (ret) 708 return ret == -ENOIOCTLCMD ? -ENOTTY : ret; 709 710 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); 711 return 0; 712} 713 714static int 715isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap) 716{ 717 struct isp_video *video = video_drvdata(file); 718 struct v4l2_subdev *subdev; 719 int ret; 720 721 subdev = isp_video_remote_subdev(video, NULL); 722 if (subdev == NULL) 723 return -EINVAL; 724 725 mutex_lock(&video->mutex); 726 ret = v4l2_subdev_call(subdev, video, cropcap, cropcap); 727 mutex_unlock(&video->mutex); 728 729 return ret == -ENOIOCTLCMD ? -ENOTTY : ret; 730} 731 732static int 733isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop) 734{ 735 struct isp_video *video = video_drvdata(file); 736 struct v4l2_subdev_format format; 737 struct v4l2_subdev *subdev; 738 u32 pad; 739 int ret; 740 741 subdev = isp_video_remote_subdev(video, &pad); 742 if (subdev == NULL) 743 return -EINVAL; 744 745 /* Try the get crop operation first and fallback to get format if not 746 * implemented. 747 */ 748 ret = v4l2_subdev_call(subdev, video, g_crop, crop); 749 if (ret != -ENOIOCTLCMD) 750 return ret; 751 752 format.pad = pad; 753 format.which = V4L2_SUBDEV_FORMAT_ACTIVE; 754 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format); 755 if (ret < 0) 756 return ret == -ENOIOCTLCMD ? -ENOTTY : ret; 757 758 crop->c.left = 0; 759 crop->c.top = 0; 760 crop->c.width = format.format.width; 761 crop->c.height = format.format.height; 762 763 return 0; 764} 765 766static int 767isp_video_set_crop(struct file *file, void *fh, const struct v4l2_crop *crop) 768{ 769 struct isp_video *video = video_drvdata(file); 770 struct v4l2_subdev *subdev; 771 int ret; 772 773 subdev = isp_video_remote_subdev(video, NULL); 774 if (subdev == NULL) 775 return -EINVAL; 776 777 mutex_lock(&video->mutex); 778 ret = v4l2_subdev_call(subdev, video, s_crop, crop); 779 mutex_unlock(&video->mutex); 780 781 return ret == -ENOIOCTLCMD ? -ENOTTY : ret; 782} 783 784static int 785isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a) 786{ 787 struct isp_video_fh *vfh = to_isp_video_fh(fh); 788 struct isp_video *video = video_drvdata(file); 789 790 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || 791 video->type != a->type) 792 return -EINVAL; 793 794 memset(a, 0, sizeof(*a)); 795 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; 796 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; 797 a->parm.output.timeperframe = vfh->timeperframe; 798 799 return 0; 800} 801 802static int 803isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a) 804{ 805 struct isp_video_fh *vfh = to_isp_video_fh(fh); 806 struct isp_video *video = video_drvdata(file); 807 808 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || 809 video->type != a->type) 810 return -EINVAL; 811 812 if (a->parm.output.timeperframe.denominator == 0) 813 a->parm.output.timeperframe.denominator = 1; 814 815 vfh->timeperframe = a->parm.output.timeperframe; 816 817 return 0; 818} 819 820static int 821isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb) 822{ 823 struct isp_video_fh *vfh = to_isp_video_fh(fh); 824 struct isp_video *video = video_drvdata(file); 825 int ret; 826 827 mutex_lock(&video->queue_lock); 828 ret = vb2_reqbufs(&vfh->queue, rb); 829 mutex_unlock(&video->queue_lock); 830 831 return ret; 832} 833 834static int 835isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) 836{ 837 struct isp_video_fh *vfh = to_isp_video_fh(fh); 838 struct isp_video *video = video_drvdata(file); 839 int ret; 840 841 mutex_lock(&video->queue_lock); 842 ret = vb2_querybuf(&vfh->queue, b); 843 mutex_unlock(&video->queue_lock); 844 845 return ret; 846} 847 848static int 849isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) 850{ 851 struct isp_video_fh *vfh = to_isp_video_fh(fh); 852 struct isp_video *video = video_drvdata(file); 853 int ret; 854 855 mutex_lock(&video->queue_lock); 856 ret = vb2_qbuf(&vfh->queue, b); 857 mutex_unlock(&video->queue_lock); 858 859 return ret; 860} 861 862static int 863isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) 864{ 865 struct isp_video_fh *vfh = to_isp_video_fh(fh); 866 struct isp_video *video = video_drvdata(file); 867 int ret; 868 869 mutex_lock(&video->queue_lock); 870 ret = vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK); 871 mutex_unlock(&video->queue_lock); 872 873 return ret; 874} 875 876static int isp_video_check_external_subdevs(struct isp_video *video, 877 struct isp_pipeline *pipe) 878{ 879 struct isp_device *isp = video->isp; 880 struct media_entity *ents[] = { 881 &isp->isp_csi2a.subdev.entity, 882 &isp->isp_csi2c.subdev.entity, 883 &isp->isp_ccp2.subdev.entity, 884 &isp->isp_ccdc.subdev.entity 885 }; 886 struct media_pad *source_pad; 887 struct media_entity *source = NULL; 888 struct media_entity *sink; 889 struct v4l2_subdev_format fmt; 890 struct v4l2_ext_controls ctrls; 891 struct v4l2_ext_control ctrl; 892 unsigned int i; 893 int ret; 894 895 /* Memory-to-memory pipelines have no external subdev. */ 896 if (pipe->input != NULL) 897 return 0; 898 899 for (i = 0; i < ARRAY_SIZE(ents); i++) { 900 /* Is the entity part of the pipeline? */ 901 if (!(pipe->entities & (1 << ents[i]->id))) 902 continue; 903 904 /* ISP entities have always sink pad == 0. Find source. */ 905 source_pad = media_entity_remote_pad(&ents[i]->pads[0]); 906 if (source_pad == NULL) 907 continue; 908 909 source = source_pad->entity; 910 sink = ents[i]; 911 break; 912 } 913 914 if (!source) { 915 dev_warn(isp->dev, "can't find source, failing now\n"); 916 return -EINVAL; 917 } 918 919 if (media_entity_type(source) != MEDIA_ENT_T_V4L2_SUBDEV) 920 return 0; 921 922 pipe->external = media_entity_to_v4l2_subdev(source); 923 924 fmt.pad = source_pad->index; 925 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; 926 ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink), 927 pad, get_fmt, NULL, &fmt); 928 if (unlikely(ret < 0)) { 929 dev_warn(isp->dev, "get_fmt returned null!\n"); 930 return ret; 931 } 932 933 pipe->external_width = 934 omap3isp_video_format_info(fmt.format.code)->width; 935 936 memset(&ctrls, 0, sizeof(ctrls)); 937 memset(&ctrl, 0, sizeof(ctrl)); 938 939 ctrl.id = V4L2_CID_PIXEL_RATE; 940 941 ctrls.count = 1; 942 ctrls.controls = &ctrl; 943 944 ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &ctrls); 945 if (ret < 0) { 946 dev_warn(isp->dev, "no pixel rate control in subdev %s\n", 947 pipe->external->name); 948 return ret; 949 } 950 951 pipe->external_rate = ctrl.value64; 952 953 if (pipe->entities & (1 << isp->isp_ccdc.subdev.entity.id)) { 954 unsigned int rate = UINT_MAX; 955 /* 956 * Check that maximum allowed CCDC pixel rate isn't 957 * exceeded by the pixel rate. 958 */ 959 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate); 960 if (pipe->external_rate > rate) 961 return -ENOSPC; 962 } 963 964 return 0; 965} 966 967/* 968 * Stream management 969 * 970 * Every ISP pipeline has a single input and a single output. The input can be 971 * either a sensor or a video node. The output is always a video node. 972 * 973 * As every pipeline has an output video node, the ISP video objects at the 974 * pipeline output stores the pipeline state. It tracks the streaming state of 975 * both the input and output, as well as the availability of buffers. 976 * 977 * In sensor-to-memory mode, frames are always available at the pipeline input. 978 * Starting the sensor usually requires I2C transfers and must be done in 979 * interruptible context. The pipeline is started and stopped synchronously 980 * to the stream on/off commands. All modules in the pipeline will get their 981 * subdev set stream handler called. The module at the end of the pipeline must 982 * delay starting the hardware until buffers are available at its output. 983 * 984 * In memory-to-memory mode, starting/stopping the stream requires 985 * synchronization between the input and output. ISP modules can't be stopped 986 * in the middle of a frame, and at least some of the modules seem to become 987 * busy as soon as they're started, even if they don't receive a frame start 988 * event. For that reason frames need to be processed in single-shot mode. The 989 * driver needs to wait until a frame is completely processed and written to 990 * memory before restarting the pipeline for the next frame. Pipelined 991 * processing might be possible but requires more testing. 992 * 993 * Stream start must be delayed until buffers are available at both the input 994 * and output. The pipeline must be started in the videobuf queue callback with 995 * the buffers queue spinlock held. The modules subdev set stream operation must 996 * not sleep. 997 */ 998static int 999isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) 1000{ 1001 struct isp_video_fh *vfh = to_isp_video_fh(fh); 1002 struct isp_video *video = video_drvdata(file); 1003 enum isp_pipeline_state state; 1004 struct isp_pipeline *pipe; 1005 unsigned long flags; 1006 int ret; 1007 1008 if (type != video->type) 1009 return -EINVAL; 1010 1011 mutex_lock(&video->stream_lock); 1012 1013 /* Start streaming on the pipeline. No link touching an entity in the 1014 * pipeline can be activated or deactivated once streaming is started. 1015 */ 1016 pipe = video->video.entity.pipe 1017 ? to_isp_pipeline(&video->video.entity) : &video->pipe; 1018 1019 pipe->entities = 0; 1020 1021 if (video->isp->pdata && video->isp->pdata->set_constraints) 1022 video->isp->pdata->set_constraints(video->isp, true); 1023 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]); 1024 pipe->max_rate = pipe->l3_ick; 1025 1026 ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe); 1027 if (ret < 0) 1028 goto err_pipeline_start; 1029 1030 /* Verify that the currently configured format matches the output of 1031 * the connected subdev. 1032 */ 1033 ret = isp_video_check_format(video, vfh); 1034 if (ret < 0) 1035 goto err_check_format; 1036 1037 video->bpl_padding = ret; 1038 video->bpl_value = vfh->format.fmt.pix.bytesperline; 1039 1040 ret = isp_video_get_graph_data(video, pipe); 1041 if (ret < 0) 1042 goto err_check_format; 1043 1044 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1045 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT; 1046 else 1047 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT; 1048 1049 ret = isp_video_check_external_subdevs(video, pipe); 1050 if (ret < 0) 1051 goto err_check_format; 1052 1053 pipe->error = false; 1054 1055 spin_lock_irqsave(&pipe->lock, flags); 1056 pipe->state &= ~ISP_PIPELINE_STREAM; 1057 pipe->state |= state; 1058 spin_unlock_irqrestore(&pipe->lock, flags); 1059 1060 /* Set the maximum time per frame as the value requested by userspace. 1061 * This is a soft limit that can be overridden if the hardware doesn't 1062 * support the request limit. 1063 */ 1064 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) 1065 pipe->max_timeperframe = vfh->timeperframe; 1066 1067 video->queue = &vfh->queue; 1068 INIT_LIST_HEAD(&video->dmaqueue); 1069 atomic_set(&pipe->frame_number, -1); 1070 pipe->field = vfh->format.fmt.pix.field; 1071 1072 mutex_lock(&video->queue_lock); 1073 ret = vb2_streamon(&vfh->queue, type); 1074 mutex_unlock(&video->queue_lock); 1075 if (ret < 0) 1076 goto err_check_format; 1077 1078 /* In sensor-to-memory mode, the stream can be started synchronously 1079 * to the stream on command. In memory-to-memory mode, it will be 1080 * started when buffers are queued on both the input and output. 1081 */ 1082 if (pipe->input == NULL) { 1083 ret = omap3isp_pipeline_set_stream(pipe, 1084 ISP_PIPELINE_STREAM_CONTINUOUS); 1085 if (ret < 0) 1086 goto err_set_stream; 1087 spin_lock_irqsave(&video->irqlock, flags); 1088 if (list_empty(&video->dmaqueue)) 1089 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; 1090 spin_unlock_irqrestore(&video->irqlock, flags); 1091 } 1092 1093 mutex_unlock(&video->stream_lock); 1094 return 0; 1095 1096err_set_stream: 1097 mutex_lock(&video->queue_lock); 1098 vb2_streamoff(&vfh->queue, type); 1099 mutex_unlock(&video->queue_lock); 1100err_check_format: 1101 media_entity_pipeline_stop(&video->video.entity); 1102err_pipeline_start: 1103 if (video->isp->pdata && video->isp->pdata->set_constraints) 1104 video->isp->pdata->set_constraints(video->isp, false); 1105 /* The DMA queue must be emptied here, otherwise CCDC interrupts that 1106 * will get triggered the next time the CCDC is powered up will try to 1107 * access buffers that might have been freed but still present in the 1108 * DMA queue. This can easily get triggered if the above 1109 * omap3isp_pipeline_set_stream() call fails on a system with a 1110 * free-running sensor. 1111 */ 1112 INIT_LIST_HEAD(&video->dmaqueue); 1113 video->queue = NULL; 1114 1115 mutex_unlock(&video->stream_lock); 1116 return ret; 1117} 1118 1119static int 1120isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) 1121{ 1122 struct isp_video_fh *vfh = to_isp_video_fh(fh); 1123 struct isp_video *video = video_drvdata(file); 1124 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); 1125 enum isp_pipeline_state state; 1126 unsigned int streaming; 1127 unsigned long flags; 1128 1129 if (type != video->type) 1130 return -EINVAL; 1131 1132 mutex_lock(&video->stream_lock); 1133 1134 /* Make sure we're not streaming yet. */ 1135 mutex_lock(&video->queue_lock); 1136 streaming = vb2_is_streaming(&vfh->queue); 1137 mutex_unlock(&video->queue_lock); 1138 1139 if (!streaming) 1140 goto done; 1141 1142 /* Update the pipeline state. */ 1143 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 1144 state = ISP_PIPELINE_STREAM_OUTPUT 1145 | ISP_PIPELINE_QUEUE_OUTPUT; 1146 else 1147 state = ISP_PIPELINE_STREAM_INPUT 1148 | ISP_PIPELINE_QUEUE_INPUT; 1149 1150 spin_lock_irqsave(&pipe->lock, flags); 1151 pipe->state &= ~state; 1152 spin_unlock_irqrestore(&pipe->lock, flags); 1153 1154 /* Stop the stream. */ 1155 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED); 1156 omap3isp_video_cancel_stream(video); 1157 1158 mutex_lock(&video->queue_lock); 1159 vb2_streamoff(&vfh->queue, type); 1160 mutex_unlock(&video->queue_lock); 1161 video->queue = NULL; 1162 video->error = false; 1163 1164 if (video->isp->pdata && video->isp->pdata->set_constraints) 1165 video->isp->pdata->set_constraints(video->isp, false); 1166 media_entity_pipeline_stop(&video->video.entity); 1167 1168done: 1169 mutex_unlock(&video->stream_lock); 1170 return 0; 1171} 1172 1173static int 1174isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input) 1175{ 1176 if (input->index > 0) 1177 return -EINVAL; 1178 1179 strlcpy(input->name, "camera", sizeof(input->name)); 1180 input->type = V4L2_INPUT_TYPE_CAMERA; 1181 1182 return 0; 1183} 1184 1185static int 1186isp_video_g_input(struct file *file, void *fh, unsigned int *input) 1187{ 1188 *input = 0; 1189 1190 return 0; 1191} 1192 1193static int 1194isp_video_s_input(struct file *file, void *fh, unsigned int input) 1195{ 1196 return input == 0 ? 0 : -EINVAL; 1197} 1198 1199static const struct v4l2_ioctl_ops isp_video_ioctl_ops = { 1200 .vidioc_querycap = isp_video_querycap, 1201 .vidioc_g_fmt_vid_cap = isp_video_get_format, 1202 .vidioc_s_fmt_vid_cap = isp_video_set_format, 1203 .vidioc_try_fmt_vid_cap = isp_video_try_format, 1204 .vidioc_g_fmt_vid_out = isp_video_get_format, 1205 .vidioc_s_fmt_vid_out = isp_video_set_format, 1206 .vidioc_try_fmt_vid_out = isp_video_try_format, 1207 .vidioc_cropcap = isp_video_cropcap, 1208 .vidioc_g_crop = isp_video_get_crop, 1209 .vidioc_s_crop = isp_video_set_crop, 1210 .vidioc_g_parm = isp_video_get_param, 1211 .vidioc_s_parm = isp_video_set_param, 1212 .vidioc_reqbufs = isp_video_reqbufs, 1213 .vidioc_querybuf = isp_video_querybuf, 1214 .vidioc_qbuf = isp_video_qbuf, 1215 .vidioc_dqbuf = isp_video_dqbuf, 1216 .vidioc_streamon = isp_video_streamon, 1217 .vidioc_streamoff = isp_video_streamoff, 1218 .vidioc_enum_input = isp_video_enum_input, 1219 .vidioc_g_input = isp_video_g_input, 1220 .vidioc_s_input = isp_video_s_input, 1221}; 1222 1223/* ----------------------------------------------------------------------------- 1224 * V4L2 file operations 1225 */ 1226 1227static int isp_video_open(struct file *file) 1228{ 1229 struct isp_video *video = video_drvdata(file); 1230 struct isp_video_fh *handle; 1231 struct vb2_queue *queue; 1232 int ret = 0; 1233 1234 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 1235 if (handle == NULL) 1236 return -ENOMEM; 1237 1238 v4l2_fh_init(&handle->vfh, &video->video); 1239 v4l2_fh_add(&handle->vfh); 1240 1241 /* If this is the first user, initialise the pipeline. */ 1242 if (omap3isp_get(video->isp) == NULL) { 1243 ret = -EBUSY; 1244 goto done; 1245 } 1246 1247 ret = omap3isp_pipeline_pm_use(&video->video.entity, 1); 1248 if (ret < 0) { 1249 omap3isp_put(video->isp); 1250 goto done; 1251 } 1252 1253 queue = &handle->queue; 1254 queue->type = video->type; 1255 queue->io_modes = VB2_MMAP | VB2_USERPTR; 1256 queue->drv_priv = handle; 1257 queue->ops = &isp_video_queue_ops; 1258 queue->mem_ops = &vb2_dma_contig_memops; 1259 queue->buf_struct_size = sizeof(struct isp_buffer); 1260 queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1261 1262 ret = vb2_queue_init(&handle->queue); 1263 if (ret < 0) { 1264 omap3isp_put(video->isp); 1265 goto done; 1266 } 1267 1268 memset(&handle->format, 0, sizeof(handle->format)); 1269 handle->format.type = video->type; 1270 handle->timeperframe.denominator = 1; 1271 1272 handle->video = video; 1273 file->private_data = &handle->vfh; 1274 1275done: 1276 if (ret < 0) { 1277 v4l2_fh_del(&handle->vfh); 1278 kfree(handle); 1279 } 1280 1281 return ret; 1282} 1283 1284static int isp_video_release(struct file *file) 1285{ 1286 struct isp_video *video = video_drvdata(file); 1287 struct v4l2_fh *vfh = file->private_data; 1288 struct isp_video_fh *handle = to_isp_video_fh(vfh); 1289 1290 /* Disable streaming and free the buffers queue resources. */ 1291 isp_video_streamoff(file, vfh, video->type); 1292 1293 mutex_lock(&video->queue_lock); 1294 vb2_queue_release(&handle->queue); 1295 mutex_unlock(&video->queue_lock); 1296 1297 omap3isp_pipeline_pm_use(&video->video.entity, 0); 1298 1299 /* Release the file handle. */ 1300 v4l2_fh_del(vfh); 1301 kfree(handle); 1302 file->private_data = NULL; 1303 1304 omap3isp_put(video->isp); 1305 1306 return 0; 1307} 1308 1309static unsigned int isp_video_poll(struct file *file, poll_table *wait) 1310{ 1311 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); 1312 struct isp_video *video = video_drvdata(file); 1313 int ret; 1314 1315 mutex_lock(&video->queue_lock); 1316 ret = vb2_poll(&vfh->queue, file, wait); 1317 mutex_unlock(&video->queue_lock); 1318 1319 return ret; 1320} 1321 1322static int isp_video_mmap(struct file *file, struct vm_area_struct *vma) 1323{ 1324 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); 1325 1326 return vb2_mmap(&vfh->queue, vma); 1327} 1328 1329static struct v4l2_file_operations isp_video_fops = { 1330 .owner = THIS_MODULE, 1331 .unlocked_ioctl = video_ioctl2, 1332 .open = isp_video_open, 1333 .release = isp_video_release, 1334 .poll = isp_video_poll, 1335 .mmap = isp_video_mmap, 1336}; 1337 1338/* ----------------------------------------------------------------------------- 1339 * ISP video core 1340 */ 1341 1342static const struct isp_video_operations isp_video_dummy_ops = { 1343}; 1344 1345int omap3isp_video_init(struct isp_video *video, const char *name) 1346{ 1347 const char *direction; 1348 int ret; 1349 1350 switch (video->type) { 1351 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 1352 direction = "output"; 1353 video->pad.flags = MEDIA_PAD_FL_SINK 1354 | MEDIA_PAD_FL_MUST_CONNECT; 1355 break; 1356 case V4L2_BUF_TYPE_VIDEO_OUTPUT: 1357 direction = "input"; 1358 video->pad.flags = MEDIA_PAD_FL_SOURCE 1359 | MEDIA_PAD_FL_MUST_CONNECT; 1360 video->video.vfl_dir = VFL_DIR_TX; 1361 break; 1362 1363 default: 1364 return -EINVAL; 1365 } 1366 1367 video->alloc_ctx = vb2_dma_contig_init_ctx(video->isp->dev); 1368 if (IS_ERR(video->alloc_ctx)) 1369 return PTR_ERR(video->alloc_ctx); 1370 1371 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0); 1372 if (ret < 0) { 1373 vb2_dma_contig_cleanup_ctx(video->alloc_ctx); 1374 return ret; 1375 } 1376 1377 mutex_init(&video->mutex); 1378 atomic_set(&video->active, 0); 1379 1380 spin_lock_init(&video->pipe.lock); 1381 mutex_init(&video->stream_lock); 1382 mutex_init(&video->queue_lock); 1383 spin_lock_init(&video->irqlock); 1384 1385 /* Initialize the video device. */ 1386 if (video->ops == NULL) 1387 video->ops = &isp_video_dummy_ops; 1388 1389 video->video.fops = &isp_video_fops; 1390 snprintf(video->video.name, sizeof(video->video.name), 1391 "OMAP3 ISP %s %s", name, direction); 1392 video->video.vfl_type = VFL_TYPE_GRABBER; 1393 video->video.release = video_device_release_empty; 1394 video->video.ioctl_ops = &isp_video_ioctl_ops; 1395 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED; 1396 1397 video_set_drvdata(&video->video, video); 1398 1399 return 0; 1400} 1401 1402void omap3isp_video_cleanup(struct isp_video *video) 1403{ 1404 vb2_dma_contig_cleanup_ctx(video->alloc_ctx); 1405 media_entity_cleanup(&video->video.entity); 1406 mutex_destroy(&video->queue_lock); 1407 mutex_destroy(&video->stream_lock); 1408 mutex_destroy(&video->mutex); 1409} 1410 1411int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev) 1412{ 1413 int ret; 1414 1415 video->video.v4l2_dev = vdev; 1416 1417 ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1); 1418 if (ret < 0) 1419 dev_err(video->isp->dev, 1420 "%s: could not register video device (%d)\n", 1421 __func__, ret); 1422 1423 return ret; 1424} 1425 1426void omap3isp_video_unregister(struct isp_video *video) 1427{ 1428 if (video_is_registered(&video->video)) 1429 video_unregister_device(&video->video); 1430} 1431