1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Xilinx Video DMA
4 *
5 * Copyright (C) 2013-2015 Ideas on Board
6 * Copyright (C) 2013-2015 Xilinx, Inc.
7 *
8 * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
9 * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
10 */
11
12#include <linux/dma/xilinx_dma.h>
13#include <linux/lcm.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/slab.h>
18
19#include <media/v4l2-dev.h>
20#include <media/v4l2-fh.h>
21#include <media/v4l2-ioctl.h>
22#include <media/videobuf2-v4l2.h>
23#include <media/videobuf2-dma-contig.h>
24
25#include "xilinx-dma.h"
26#include "xilinx-vip.h"
27#include "xilinx-vipp.h"
28
29#define XVIP_DMA_DEF_WIDTH 1920
30#define XVIP_DMA_DEF_HEIGHT 1080
31
32/* Minimum and maximum widths are expressed in bytes */
33#define XVIP_DMA_MIN_WIDTH 1U
34#define XVIP_DMA_MAX_WIDTH 65535U
35#define XVIP_DMA_MIN_HEIGHT 1U
36#define XVIP_DMA_MAX_HEIGHT 8191U
37
38/* -----------------------------------------------------------------------------
39 * Helper functions
40 */
41
42static struct v4l2_subdev *
43xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
44{
45 struct media_pad *remote;
46
47 remote = media_pad_remote_pad_first(pad: local);
48 if (!remote || !is_media_entity_v4l2_subdev(entity: remote->entity))
49 return NULL;
50
51 if (pad)
52 *pad = remote->index;
53
54 return media_entity_to_v4l2_subdev(remote->entity);
55}
56
57static int xvip_dma_verify_format(struct xvip_dma *dma)
58{
59 struct v4l2_subdev_format fmt = {
60 .which = V4L2_SUBDEV_FORMAT_ACTIVE,
61 };
62 struct v4l2_subdev *subdev;
63 int ret;
64
65 subdev = xvip_dma_remote_subdev(local: &dma->pad, pad: &fmt.pad);
66 if (subdev == NULL)
67 return -EPIPE;
68
69 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
70 if (ret < 0)
71 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
72
73 if (dma->fmtinfo->code != fmt.format.code ||
74 dma->format.height != fmt.format.height ||
75 dma->format.width != fmt.format.width ||
76 dma->format.colorspace != fmt.format.colorspace)
77 return -EINVAL;
78
79 return 0;
80}
81
82/* -----------------------------------------------------------------------------
83 * Pipeline Stream Management
84 */
85
86/**
87 * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
88 * @pipe: The pipeline
89 * @start: Start (when true) or stop (when false) the pipeline
90 *
91 * Walk the entities chain starting at the pipeline output video node and start
92 * or stop all of them.
93 *
94 * Return: 0 if successful, or the return value of the failed video::s_stream
95 * operation otherwise.
96 */
97static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
98{
99 struct xvip_dma *dma = pipe->output;
100 struct media_entity *entity;
101 struct media_pad *pad;
102 struct v4l2_subdev *subdev;
103 int ret;
104
105 entity = &dma->video.entity;
106 while (1) {
107 pad = &entity->pads[0];
108 if (!(pad->flags & MEDIA_PAD_FL_SINK))
109 break;
110
111 pad = media_pad_remote_pad_first(pad);
112 if (!pad || !is_media_entity_v4l2_subdev(entity: pad->entity))
113 break;
114
115 entity = pad->entity;
116 subdev = media_entity_to_v4l2_subdev(entity);
117
118 ret = v4l2_subdev_call(subdev, video, s_stream, start);
119 if (start && ret < 0 && ret != -ENOIOCTLCMD)
120 return ret;
121 }
122
123 return 0;
124}
125
126/**
127 * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
128 * @pipe: The pipeline
129 * @on: Turn the stream on when true or off when false
130 *
131 * The pipeline is shared between all DMA engines connect at its input and
132 * output. While the stream state of DMA engines can be controlled
133 * independently, pipelines have a shared stream state that enable or disable
134 * all entities in the pipeline. For this reason the pipeline uses a streaming
135 * counter that tracks the number of DMA engines that have requested the stream
136 * to be enabled.
137 *
138 * When called with the @on argument set to true, this function will increment
139 * the pipeline streaming count. If the streaming count reaches the number of
140 * DMA engines in the pipeline it will enable all entities that belong to the
141 * pipeline.
142 *
143 * Similarly, when called with the @on argument set to false, this function will
144 * decrement the pipeline streaming count and disable all entities in the
145 * pipeline when the streaming count reaches zero.
146 *
147 * Return: 0 if successful, or the return value of the failed video::s_stream
148 * operation otherwise. Stopping the pipeline never fails. The pipeline state is
149 * not updated when the operation fails.
150 */
151static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
152{
153 int ret = 0;
154
155 mutex_lock(&pipe->lock);
156
157 if (on) {
158 if (pipe->stream_count == pipe->num_dmas - 1) {
159 ret = xvip_pipeline_start_stop(pipe, start: true);
160 if (ret < 0)
161 goto done;
162 }
163 pipe->stream_count++;
164 } else {
165 if (--pipe->stream_count == 0)
166 xvip_pipeline_start_stop(pipe, start: false);
167 }
168
169done:
170 mutex_unlock(lock: &pipe->lock);
171 return ret;
172}
173
174static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
175 struct xvip_dma *start)
176{
177 struct media_pipeline_pad_iter iter;
178 unsigned int num_inputs = 0;
179 unsigned int num_outputs = 0;
180 struct media_pad *pad;
181
182 /* Locate the video nodes in the pipeline. */
183 media_pipeline_for_each_pad(&pipe->pipe, &iter, pad) {
184 struct xvip_dma *dma;
185
186 if (pad->entity->function != MEDIA_ENT_F_IO_V4L)
187 continue;
188
189 dma = to_xvip_dma(media_entity_to_video_device(pad->entity));
190
191 if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
192 pipe->output = dma;
193 num_outputs++;
194 } else {
195 num_inputs++;
196 }
197 }
198
199 /* We need exactly one output and zero or one input. */
200 if (num_outputs != 1 || num_inputs > 1)
201 return -EPIPE;
202
203 pipe->num_dmas = num_inputs + num_outputs;
204
205 return 0;
206}
207
208static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
209{
210 pipe->num_dmas = 0;
211 pipe->output = NULL;
212}
213
214/**
215 * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
216 * @pipe: the pipeline
217 *
218 * Decrease the pipeline use count and clean it up if we were the last user.
219 */
220static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
221{
222 mutex_lock(&pipe->lock);
223
224 /* If we're the last user clean up the pipeline. */
225 if (--pipe->use_count == 0)
226 __xvip_pipeline_cleanup(pipe);
227
228 mutex_unlock(lock: &pipe->lock);
229}
230
231/**
232 * xvip_pipeline_prepare - Prepare the pipeline for streaming
233 * @pipe: the pipeline
234 * @dma: DMA engine at one end of the pipeline
235 *
236 * Validate the pipeline if no user exists yet, otherwise just increase the use
237 * count.
238 *
239 * Return: 0 if successful or -EPIPE if the pipeline is not valid.
240 */
241static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
242 struct xvip_dma *dma)
243{
244 int ret;
245
246 mutex_lock(&pipe->lock);
247
248 /* If we're the first user validate and initialize the pipeline. */
249 if (pipe->use_count == 0) {
250 ret = xvip_pipeline_validate(pipe, start: dma);
251 if (ret < 0) {
252 __xvip_pipeline_cleanup(pipe);
253 goto done;
254 }
255 }
256
257 pipe->use_count++;
258 ret = 0;
259
260done:
261 mutex_unlock(lock: &pipe->lock);
262 return ret;
263}
264
265/* -----------------------------------------------------------------------------
266 * videobuf2 queue operations
267 */
268
269/**
270 * struct xvip_dma_buffer - Video DMA buffer
271 * @buf: vb2 buffer base object
272 * @queue: buffer list entry in the DMA engine queued buffers list
273 * @dma: DMA channel that uses the buffer
274 */
275struct xvip_dma_buffer {
276 struct vb2_v4l2_buffer buf;
277 struct list_head queue;
278 struct xvip_dma *dma;
279};
280
281#define to_xvip_dma_buffer(vb) container_of(vb, struct xvip_dma_buffer, buf)
282
283static void xvip_dma_complete(void *param)
284{
285 struct xvip_dma_buffer *buf = param;
286 struct xvip_dma *dma = buf->dma;
287
288 spin_lock(lock: &dma->queued_lock);
289 list_del(entry: &buf->queue);
290 spin_unlock(lock: &dma->queued_lock);
291
292 buf->buf.field = V4L2_FIELD_NONE;
293 buf->buf.sequence = dma->sequence++;
294 buf->buf.vb2_buf.timestamp = ktime_get_ns();
295 vb2_set_plane_payload(vb: &buf->buf.vb2_buf, plane_no: 0, size: dma->format.sizeimage);
296 vb2_buffer_done(vb: &buf->buf.vb2_buf, state: VB2_BUF_STATE_DONE);
297}
298
299static int
300xvip_dma_queue_setup(struct vb2_queue *vq,
301 unsigned int *nbuffers, unsigned int *nplanes,
302 unsigned int sizes[], struct device *alloc_devs[])
303{
304 struct xvip_dma *dma = vb2_get_drv_priv(q: vq);
305
306 /* Make sure the image size is large enough. */
307 if (*nplanes)
308 return sizes[0] < dma->format.sizeimage ? -EINVAL : 0;
309
310 *nplanes = 1;
311 sizes[0] = dma->format.sizeimage;
312
313 return 0;
314}
315
316static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
317{
318 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
319 struct xvip_dma *dma = vb2_get_drv_priv(q: vb->vb2_queue);
320 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
321
322 buf->dma = dma;
323
324 return 0;
325}
326
327static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
328{
329 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
330 struct xvip_dma *dma = vb2_get_drv_priv(q: vb->vb2_queue);
331 struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
332 struct dma_async_tx_descriptor *desc;
333 dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, plane_no: 0);
334 u32 flags;
335
336 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
337 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
338 dma->xt.dir = DMA_DEV_TO_MEM;
339 dma->xt.src_sgl = false;
340 dma->xt.dst_sgl = true;
341 dma->xt.dst_start = addr;
342 } else {
343 flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
344 dma->xt.dir = DMA_MEM_TO_DEV;
345 dma->xt.src_sgl = true;
346 dma->xt.dst_sgl = false;
347 dma->xt.src_start = addr;
348 }
349
350 dma->xt.frame_size = 1;
351 dma->sgl[0].size = dma->format.width * dma->fmtinfo->bpp;
352 dma->sgl[0].icg = dma->format.bytesperline - dma->sgl[0].size;
353 dma->xt.numf = dma->format.height;
354
355 desc = dmaengine_prep_interleaved_dma(chan: dma->dma, xt: &dma->xt, flags);
356 if (!desc) {
357 dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
358 vb2_buffer_done(vb: &buf->buf.vb2_buf, state: VB2_BUF_STATE_ERROR);
359 return;
360 }
361 desc->callback = xvip_dma_complete;
362 desc->callback_param = buf;
363
364 spin_lock_irq(lock: &dma->queued_lock);
365 list_add_tail(new: &buf->queue, head: &dma->queued_bufs);
366 spin_unlock_irq(lock: &dma->queued_lock);
367
368 dmaengine_submit(desc);
369
370 if (vb2_is_streaming(q: &dma->queue))
371 dma_async_issue_pending(chan: dma->dma);
372}
373
374static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
375{
376 struct xvip_dma *dma = vb2_get_drv_priv(q: vq);
377 struct xvip_dma_buffer *buf, *nbuf;
378 struct xvip_pipeline *pipe;
379 int ret;
380
381 dma->sequence = 0;
382
383 /*
384 * Start streaming on the pipeline. No link touching an entity in the
385 * pipeline can be activated or deactivated once streaming is started.
386 *
387 * Use the pipeline object embedded in the first DMA object that starts
388 * streaming.
389 */
390 pipe = to_xvip_pipeline(vdev: &dma->video) ? : &dma->pipe;
391
392 ret = video_device_pipeline_start(vdev: &dma->video, pipe: &pipe->pipe);
393 if (ret < 0)
394 goto error;
395
396 /* Verify that the configured format matches the output of the
397 * connected subdev.
398 */
399 ret = xvip_dma_verify_format(dma);
400 if (ret < 0)
401 goto error_stop;
402
403 ret = xvip_pipeline_prepare(pipe, dma);
404 if (ret < 0)
405 goto error_stop;
406
407 /* Start the DMA engine. This must be done before starting the blocks
408 * in the pipeline to avoid DMA synchronization issues.
409 */
410 dma_async_issue_pending(chan: dma->dma);
411
412 /* Start the pipeline. */
413 xvip_pipeline_set_stream(pipe, on: true);
414
415 return 0;
416
417error_stop:
418 video_device_pipeline_stop(vdev: &dma->video);
419
420error:
421 /* Give back all queued buffers to videobuf2. */
422 spin_lock_irq(lock: &dma->queued_lock);
423 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
424 vb2_buffer_done(vb: &buf->buf.vb2_buf, state: VB2_BUF_STATE_QUEUED);
425 list_del(entry: &buf->queue);
426 }
427 spin_unlock_irq(lock: &dma->queued_lock);
428
429 return ret;
430}
431
432static void xvip_dma_stop_streaming(struct vb2_queue *vq)
433{
434 struct xvip_dma *dma = vb2_get_drv_priv(q: vq);
435 struct xvip_pipeline *pipe = to_xvip_pipeline(vdev: &dma->video);
436 struct xvip_dma_buffer *buf, *nbuf;
437
438 /* Stop the pipeline. */
439 xvip_pipeline_set_stream(pipe, on: false);
440
441 /* Stop and reset the DMA engine. */
442 dmaengine_terminate_all(chan: dma->dma);
443
444 /* Cleanup the pipeline and mark it as being stopped. */
445 xvip_pipeline_cleanup(pipe);
446 video_device_pipeline_stop(vdev: &dma->video);
447
448 /* Give back all queued buffers to videobuf2. */
449 spin_lock_irq(lock: &dma->queued_lock);
450 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
451 vb2_buffer_done(vb: &buf->buf.vb2_buf, state: VB2_BUF_STATE_ERROR);
452 list_del(entry: &buf->queue);
453 }
454 spin_unlock_irq(lock: &dma->queued_lock);
455}
456
457static const struct vb2_ops xvip_dma_queue_qops = {
458 .queue_setup = xvip_dma_queue_setup,
459 .buf_prepare = xvip_dma_buffer_prepare,
460 .buf_queue = xvip_dma_buffer_queue,
461 .wait_prepare = vb2_ops_wait_prepare,
462 .wait_finish = vb2_ops_wait_finish,
463 .start_streaming = xvip_dma_start_streaming,
464 .stop_streaming = xvip_dma_stop_streaming,
465};
466
467/* -----------------------------------------------------------------------------
468 * V4L2 ioctls
469 */
470
471static int
472xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
473{
474 struct v4l2_fh *vfh = file->private_data;
475 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
476
477 cap->capabilities = dma->xdev->v4l2_caps | V4L2_CAP_STREAMING |
478 V4L2_CAP_DEVICE_CAPS;
479
480 strscpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
481 strscpy(cap->card, dma->video.name, sizeof(cap->card));
482 snprintf(buf: cap->bus_info, size: sizeof(cap->bus_info), fmt: "platform:%pOFn:%u",
483 dma->xdev->dev->of_node, dma->port);
484
485 return 0;
486}
487
488/* FIXME: without this callback function, some applications are not configured
489 * with correct formats, and it results in frames in wrong format. Whether this
490 * callback needs to be required is not clearly defined, so it should be
491 * clarified through the mailing list.
492 */
493static int
494xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
495{
496 struct v4l2_fh *vfh = file->private_data;
497 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
498
499 if (f->index > 0)
500 return -EINVAL;
501
502 f->pixelformat = dma->format.pixelformat;
503
504 return 0;
505}
506
507static int
508xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
509{
510 struct v4l2_fh *vfh = file->private_data;
511 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
512
513 format->fmt.pix = dma->format;
514
515 return 0;
516}
517
518static void
519__xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
520 const struct xvip_video_format **fmtinfo)
521{
522 const struct xvip_video_format *info;
523 unsigned int min_width;
524 unsigned int max_width;
525 unsigned int min_bpl;
526 unsigned int max_bpl;
527 unsigned int width;
528 unsigned int align;
529 unsigned int bpl;
530
531 /* Retrieve format information and select the default format if the
532 * requested format isn't supported.
533 */
534 info = xvip_get_format_by_fourcc(fourcc: pix->pixelformat);
535
536 pix->pixelformat = info->fourcc;
537 pix->field = V4L2_FIELD_NONE;
538
539 /* The transfer alignment requirements are expressed in bytes. Compute
540 * the minimum and maximum values, clamp the requested width and convert
541 * it back to pixels.
542 */
543 align = lcm(a: dma->align, b: info->bpp);
544 min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
545 max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
546 width = rounddown(pix->width * info->bpp, align);
547
548 pix->width = clamp(width, min_width, max_width) / info->bpp;
549 pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
550 XVIP_DMA_MAX_HEIGHT);
551
552 /* Clamp the requested bytes per line value. If the maximum bytes per
553 * line value is zero, the module doesn't support user configurable line
554 * sizes. Override the requested value with the minimum in that case.
555 */
556 min_bpl = pix->width * info->bpp;
557 max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
558 bpl = rounddown(pix->bytesperline, dma->align);
559
560 pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
561 pix->sizeimage = pix->bytesperline * pix->height;
562
563 if (fmtinfo)
564 *fmtinfo = info;
565}
566
567static int
568xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
569{
570 struct v4l2_fh *vfh = file->private_data;
571 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
572
573 __xvip_dma_try_format(dma, pix: &format->fmt.pix, NULL);
574 return 0;
575}
576
577static int
578xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
579{
580 struct v4l2_fh *vfh = file->private_data;
581 struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
582 const struct xvip_video_format *info;
583
584 __xvip_dma_try_format(dma, pix: &format->fmt.pix, fmtinfo: &info);
585
586 if (vb2_is_busy(q: &dma->queue))
587 return -EBUSY;
588
589 dma->format = format->fmt.pix;
590 dma->fmtinfo = info;
591
592 return 0;
593}
594
595static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
596 .vidioc_querycap = xvip_dma_querycap,
597 .vidioc_enum_fmt_vid_cap = xvip_dma_enum_format,
598 .vidioc_g_fmt_vid_cap = xvip_dma_get_format,
599 .vidioc_g_fmt_vid_out = xvip_dma_get_format,
600 .vidioc_s_fmt_vid_cap = xvip_dma_set_format,
601 .vidioc_s_fmt_vid_out = xvip_dma_set_format,
602 .vidioc_try_fmt_vid_cap = xvip_dma_try_format,
603 .vidioc_try_fmt_vid_out = xvip_dma_try_format,
604 .vidioc_reqbufs = vb2_ioctl_reqbufs,
605 .vidioc_querybuf = vb2_ioctl_querybuf,
606 .vidioc_qbuf = vb2_ioctl_qbuf,
607 .vidioc_dqbuf = vb2_ioctl_dqbuf,
608 .vidioc_create_bufs = vb2_ioctl_create_bufs,
609 .vidioc_expbuf = vb2_ioctl_expbuf,
610 .vidioc_streamon = vb2_ioctl_streamon,
611 .vidioc_streamoff = vb2_ioctl_streamoff,
612};
613
614/* -----------------------------------------------------------------------------
615 * V4L2 file operations
616 */
617
618static const struct v4l2_file_operations xvip_dma_fops = {
619 .owner = THIS_MODULE,
620 .unlocked_ioctl = video_ioctl2,
621 .open = v4l2_fh_open,
622 .release = vb2_fop_release,
623 .poll = vb2_fop_poll,
624 .mmap = vb2_fop_mmap,
625};
626
627/* -----------------------------------------------------------------------------
628 * Xilinx Video DMA Core
629 */
630
631int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
632 enum v4l2_buf_type type, unsigned int port)
633{
634 char name[16];
635 int ret;
636
637 dma->xdev = xdev;
638 dma->port = port;
639 mutex_init(&dma->lock);
640 mutex_init(&dma->pipe.lock);
641 INIT_LIST_HEAD(list: &dma->queued_bufs);
642 spin_lock_init(&dma->queued_lock);
643
644 dma->fmtinfo = xvip_get_format_by_fourcc(V4L2_PIX_FMT_YUYV);
645 dma->format.pixelformat = dma->fmtinfo->fourcc;
646 dma->format.colorspace = V4L2_COLORSPACE_SRGB;
647 dma->format.field = V4L2_FIELD_NONE;
648 dma->format.width = XVIP_DMA_DEF_WIDTH;
649 dma->format.height = XVIP_DMA_DEF_HEIGHT;
650 dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp;
651 dma->format.sizeimage = dma->format.bytesperline * dma->format.height;
652
653 /* Initialize the media entity... */
654 dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
655 ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
656
657 ret = media_entity_pads_init(entity: &dma->video.entity, num_pads: 1, pads: &dma->pad);
658 if (ret < 0)
659 goto error;
660
661 /* ... and the video node... */
662 dma->video.fops = &xvip_dma_fops;
663 dma->video.v4l2_dev = &xdev->v4l2_dev;
664 dma->video.queue = &dma->queue;
665 snprintf(buf: dma->video.name, size: sizeof(dma->video.name), fmt: "%pOFn %s %u",
666 xdev->dev->of_node,
667 type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input",
668 port);
669 dma->video.vfl_type = VFL_TYPE_VIDEO;
670 dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
671 ? VFL_DIR_RX : VFL_DIR_TX;
672 dma->video.release = video_device_release_empty;
673 dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
674 dma->video.lock = &dma->lock;
675 dma->video.device_caps = V4L2_CAP_STREAMING;
676 if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
677 dma->video.device_caps |= V4L2_CAP_VIDEO_CAPTURE;
678 else
679 dma->video.device_caps |= V4L2_CAP_VIDEO_OUTPUT;
680
681 video_set_drvdata(vdev: &dma->video, data: dma);
682
683 /* ... and the buffers queue... */
684 /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
685 * V4L2 APIs would be inefficient. Testing on the command line with a
686 * 'cat /dev/video?' thus won't be possible, but given that the driver
687 * anyway requires a test tool to setup the pipeline before any video
688 * stream can be started, requiring a specific V4L2 test tool as well
689 * instead of 'cat' isn't really a drawback.
690 */
691 dma->queue.type = type;
692 dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
693 dma->queue.lock = &dma->lock;
694 dma->queue.drv_priv = dma;
695 dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
696 dma->queue.ops = &xvip_dma_queue_qops;
697 dma->queue.mem_ops = &vb2_dma_contig_memops;
698 dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
699 | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
700 dma->queue.dev = dma->xdev->dev;
701 ret = vb2_queue_init(q: &dma->queue);
702 if (ret < 0) {
703 dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
704 goto error;
705 }
706
707 /* ... and the DMA channel. */
708 snprintf(buf: name, size: sizeof(name), fmt: "port%u", port);
709 dma->dma = dma_request_chan(dev: dma->xdev->dev, name);
710 if (IS_ERR(ptr: dma->dma)) {
711 ret = dev_err_probe(dev: dma->xdev->dev, err: PTR_ERR(ptr: dma->dma),
712 fmt: "no VDMA channel found\n");
713 goto error;
714 }
715
716 dma->align = 1 << dma->dma->device->copy_align;
717
718 ret = video_register_device(vdev: &dma->video, type: VFL_TYPE_VIDEO, nr: -1);
719 if (ret < 0) {
720 dev_err(dma->xdev->dev, "failed to register video device\n");
721 goto error;
722 }
723
724 return 0;
725
726error:
727 xvip_dma_cleanup(dma);
728 return ret;
729}
730
731void xvip_dma_cleanup(struct xvip_dma *dma)
732{
733 if (video_is_registered(vdev: &dma->video))
734 video_unregister_device(vdev: &dma->video);
735
736 if (!IS_ERR_OR_NULL(ptr: dma->dma))
737 dma_release_channel(chan: dma->dma);
738
739 media_entity_cleanup(entity: &dma->video.entity);
740
741 mutex_destroy(lock: &dma->lock);
742 mutex_destroy(lock: &dma->pipe.lock);
743}
744

source code of linux/drivers/media/platform/xilinx/xilinx-dma.c