1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Samsung EXYNOS5 SoC series G-Scaler driver
7 */
8
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/errno.h>
13#include <linux/bug.h>
14#include <linux/interrupt.h>
15#include <linux/workqueue.h>
16#include <linux/device.h>
17#include <linux/platform_device.h>
18#include <linux/list.h>
19#include <linux/io.h>
20#include <linux/slab.h>
21#include <linux/clk.h>
22
23#include <media/v4l2-ioctl.h>
24
25#include "gsc-core.h"
26
27static int gsc_m2m_ctx_stop_req(struct gsc_ctx *ctx)
28{
29 struct gsc_ctx *curr_ctx;
30 struct gsc_dev *gsc = ctx->gsc_dev;
31 int ret;
32
33 curr_ctx = v4l2_m2m_get_curr_priv(m2m_dev: gsc->m2m.m2m_dev);
34 if (!gsc_m2m_pending(gsc) || (curr_ctx != ctx))
35 return 0;
36
37 gsc_ctx_state_lock_set(GSC_CTX_STOP_REQ, ctx);
38 ret = wait_event_timeout(gsc->irq_queue,
39 !gsc_ctx_state_is_set(GSC_CTX_STOP_REQ, ctx),
40 GSC_SHUTDOWN_TIMEOUT);
41
42 return ret == 0 ? -ETIMEDOUT : ret;
43}
44
45static void __gsc_m2m_job_abort(struct gsc_ctx *ctx)
46{
47 int ret;
48
49 ret = gsc_m2m_ctx_stop_req(ctx);
50 if ((ret == -ETIMEDOUT) || (ctx->state & GSC_CTX_ABORT)) {
51 gsc_ctx_state_lock_clear(GSC_CTX_STOP_REQ | GSC_CTX_ABORT, ctx);
52 gsc_m2m_job_finish(ctx, vb_state: VB2_BUF_STATE_ERROR);
53 }
54}
55
56static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
57{
58 struct gsc_ctx *ctx = q->drv_priv;
59
60 return pm_runtime_resume_and_get(dev: &ctx->gsc_dev->pdev->dev);
61}
62
63static void __gsc_m2m_cleanup_queue(struct gsc_ctx *ctx)
64{
65 struct vb2_v4l2_buffer *src_vb, *dst_vb;
66
67 while (v4l2_m2m_num_src_bufs_ready(m2m_ctx: ctx->m2m_ctx) > 0) {
68 src_vb = v4l2_m2m_src_buf_remove(m2m_ctx: ctx->m2m_ctx);
69 v4l2_m2m_buf_done(buf: src_vb, state: VB2_BUF_STATE_ERROR);
70 }
71
72 while (v4l2_m2m_num_dst_bufs_ready(m2m_ctx: ctx->m2m_ctx) > 0) {
73 dst_vb = v4l2_m2m_dst_buf_remove(m2m_ctx: ctx->m2m_ctx);
74 v4l2_m2m_buf_done(buf: dst_vb, state: VB2_BUF_STATE_ERROR);
75 }
76}
77
78static void gsc_m2m_stop_streaming(struct vb2_queue *q)
79{
80 struct gsc_ctx *ctx = q->drv_priv;
81
82 __gsc_m2m_job_abort(ctx);
83
84 __gsc_m2m_cleanup_queue(ctx);
85
86 pm_runtime_put(dev: &ctx->gsc_dev->pdev->dev);
87}
88
89void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
90{
91 struct vb2_v4l2_buffer *src_vb, *dst_vb;
92
93 if (!ctx || !ctx->m2m_ctx)
94 return;
95
96 src_vb = v4l2_m2m_src_buf_remove(m2m_ctx: ctx->m2m_ctx);
97 dst_vb = v4l2_m2m_dst_buf_remove(m2m_ctx: ctx->m2m_ctx);
98
99 if (src_vb && dst_vb) {
100 dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
101 dst_vb->timecode = src_vb->timecode;
102 dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
103 dst_vb->flags |=
104 src_vb->flags
105 & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
106
107 v4l2_m2m_buf_done(buf: src_vb, state: vb_state);
108 v4l2_m2m_buf_done(buf: dst_vb, state: vb_state);
109
110 v4l2_m2m_job_finish(m2m_dev: ctx->gsc_dev->m2m.m2m_dev,
111 m2m_ctx: ctx->m2m_ctx);
112 }
113}
114
115static void gsc_m2m_job_abort(void *priv)
116{
117 __gsc_m2m_job_abort(ctx: (struct gsc_ctx *)priv);
118}
119
120static int gsc_get_bufs(struct gsc_ctx *ctx)
121{
122 struct gsc_frame *s_frame, *d_frame;
123 struct vb2_v4l2_buffer *src_vb, *dst_vb;
124 int ret;
125
126 s_frame = &ctx->s_frame;
127 d_frame = &ctx->d_frame;
128
129 src_vb = v4l2_m2m_next_src_buf(m2m_ctx: ctx->m2m_ctx);
130 ret = gsc_prepare_addr(ctx, vb: &src_vb->vb2_buf, frame: s_frame, addr: &s_frame->addr);
131 if (ret)
132 return ret;
133
134 dst_vb = v4l2_m2m_next_dst_buf(m2m_ctx: ctx->m2m_ctx);
135 ret = gsc_prepare_addr(ctx, vb: &dst_vb->vb2_buf, frame: d_frame, addr: &d_frame->addr);
136 if (ret)
137 return ret;
138
139 dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
140
141 return 0;
142}
143
144static void gsc_m2m_device_run(void *priv)
145{
146 struct gsc_ctx *ctx = priv;
147 struct gsc_dev *gsc;
148 unsigned long flags;
149 int ret;
150 bool is_set = false;
151
152 if (WARN(!ctx, "null hardware context\n"))
153 return;
154
155 gsc = ctx->gsc_dev;
156 spin_lock_irqsave(&gsc->slock, flags);
157
158 set_bit(nr: ST_M2M_PEND, addr: &gsc->state);
159
160 /* Reconfigure hardware if the context has changed. */
161 if (gsc->m2m.ctx != ctx) {
162 pr_debug("gsc->m2m.ctx = 0x%p, current_ctx = 0x%p",
163 gsc->m2m.ctx, ctx);
164 ctx->state |= GSC_PARAMS;
165 gsc->m2m.ctx = ctx;
166 }
167
168 is_set = ctx->state & GSC_CTX_STOP_REQ;
169 if (is_set) {
170 ctx->state &= ~GSC_CTX_STOP_REQ;
171 ctx->state |= GSC_CTX_ABORT;
172 wake_up(&gsc->irq_queue);
173 goto put_device;
174 }
175
176 ret = gsc_get_bufs(ctx);
177 if (ret) {
178 pr_err("Wrong address");
179 goto put_device;
180 }
181
182 gsc_set_prefbuf(gsc, frm: &ctx->s_frame);
183 gsc_hw_set_input_addr(dev: gsc, addr: &ctx->s_frame.addr, GSC_M2M_BUF_NUM);
184 gsc_hw_set_output_addr(dev: gsc, addr: &ctx->d_frame.addr, GSC_M2M_BUF_NUM);
185
186 if (ctx->state & GSC_PARAMS) {
187 gsc_hw_set_input_buf_masking(dev: gsc, GSC_M2M_BUF_NUM, enable: false);
188 gsc_hw_set_output_buf_masking(dev: gsc, GSC_M2M_BUF_NUM, enable: false);
189 gsc_hw_set_frm_done_irq_mask(dev: gsc, mask: false);
190 gsc_hw_set_gsc_irq_enable(dev: gsc, mask: true);
191
192 if (gsc_set_scaler_info(ctx)) {
193 pr_err("Scaler setup error");
194 goto put_device;
195 }
196
197 gsc_hw_set_input_path(ctx);
198 gsc_hw_set_in_size(ctx);
199 gsc_hw_set_in_image_format(ctx);
200
201 gsc_hw_set_output_path(ctx);
202 gsc_hw_set_out_size(ctx);
203 gsc_hw_set_out_image_format(ctx);
204
205 gsc_hw_set_prescaler(ctx);
206 gsc_hw_set_mainscaler(ctx);
207 gsc_hw_set_rotation(ctx);
208 gsc_hw_set_global_alpha(ctx);
209 }
210
211 /* update shadow registers */
212 gsc_hw_set_sfr_update(ctx);
213
214 ctx->state &= ~GSC_PARAMS;
215 gsc_hw_enable_control(dev: gsc, on: true);
216
217 spin_unlock_irqrestore(lock: &gsc->slock, flags);
218 return;
219
220put_device:
221 ctx->state &= ~GSC_PARAMS;
222 spin_unlock_irqrestore(lock: &gsc->slock, flags);
223}
224
225static int gsc_m2m_queue_setup(struct vb2_queue *vq,
226 unsigned int *num_buffers, unsigned int *num_planes,
227 unsigned int sizes[], struct device *alloc_devs[])
228{
229 struct gsc_ctx *ctx = vb2_get_drv_priv(q: vq);
230 struct gsc_frame *frame;
231 int i;
232
233 frame = ctx_get_frame(ctx, type: vq->type);
234 if (IS_ERR(ptr: frame))
235 return PTR_ERR(ptr: frame);
236
237 if (!frame->fmt)
238 return -EINVAL;
239
240 *num_planes = frame->fmt->num_planes;
241 for (i = 0; i < frame->fmt->num_planes; i++)
242 sizes[i] = frame->payload[i];
243 return 0;
244}
245
246static int gsc_m2m_buf_prepare(struct vb2_buffer *vb)
247{
248 struct gsc_ctx *ctx = vb2_get_drv_priv(q: vb->vb2_queue);
249 struct gsc_frame *frame;
250 int i;
251
252 frame = ctx_get_frame(ctx, type: vb->vb2_queue->type);
253 if (IS_ERR(ptr: frame))
254 return PTR_ERR(ptr: frame);
255
256 if (V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type)) {
257 for (i = 0; i < frame->fmt->num_planes; i++)
258 vb2_set_plane_payload(vb, plane_no: i, size: frame->payload[i]);
259 }
260
261 return 0;
262}
263
264static void gsc_m2m_buf_queue(struct vb2_buffer *vb)
265{
266 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
267 struct gsc_ctx *ctx = vb2_get_drv_priv(q: vb->vb2_queue);
268
269 pr_debug("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
270
271 if (ctx->m2m_ctx)
272 v4l2_m2m_buf_queue(m2m_ctx: ctx->m2m_ctx, vbuf);
273}
274
275static const struct vb2_ops gsc_m2m_qops = {
276 .queue_setup = gsc_m2m_queue_setup,
277 .buf_prepare = gsc_m2m_buf_prepare,
278 .buf_queue = gsc_m2m_buf_queue,
279 .wait_prepare = vb2_ops_wait_prepare,
280 .wait_finish = vb2_ops_wait_finish,
281 .stop_streaming = gsc_m2m_stop_streaming,
282 .start_streaming = gsc_m2m_start_streaming,
283};
284
285static int gsc_m2m_querycap(struct file *file, void *fh,
286 struct v4l2_capability *cap)
287{
288 strscpy(cap->driver, GSC_MODULE_NAME, sizeof(cap->driver));
289 strscpy(cap->card, GSC_MODULE_NAME " gscaler", sizeof(cap->card));
290 return 0;
291}
292
293static int gsc_m2m_enum_fmt(struct file *file, void *priv,
294 struct v4l2_fmtdesc *f)
295{
296 return gsc_enum_fmt(f);
297}
298
299static int gsc_m2m_g_fmt_mplane(struct file *file, void *fh,
300 struct v4l2_format *f)
301{
302 struct gsc_ctx *ctx = fh_to_ctx(fh);
303
304 return gsc_g_fmt_mplane(ctx, f);
305}
306
307static int gsc_m2m_try_fmt_mplane(struct file *file, void *fh,
308 struct v4l2_format *f)
309{
310 struct gsc_ctx *ctx = fh_to_ctx(fh);
311
312 return gsc_try_fmt_mplane(ctx, f);
313}
314
315static int gsc_m2m_s_fmt_mplane(struct file *file, void *fh,
316 struct v4l2_format *f)
317{
318 struct gsc_ctx *ctx = fh_to_ctx(fh);
319 struct vb2_queue *vq;
320 struct gsc_frame *frame;
321 struct v4l2_pix_format_mplane *pix;
322 int i, ret = 0;
323
324 ret = gsc_m2m_try_fmt_mplane(file, fh, f);
325 if (ret)
326 return ret;
327
328 vq = v4l2_m2m_get_vq(m2m_ctx: ctx->m2m_ctx, type: f->type);
329
330 if (vb2_is_streaming(q: vq)) {
331 pr_err("queue (%d) busy", f->type);
332 return -EBUSY;
333 }
334
335 if (V4L2_TYPE_IS_OUTPUT(f->type))
336 frame = &ctx->s_frame;
337 else
338 frame = &ctx->d_frame;
339
340 pix = &f->fmt.pix_mp;
341 frame->fmt = find_fmt(pixelformat: &pix->pixelformat, NULL, index: 0);
342 frame->colorspace = pix->colorspace;
343 if (!frame->fmt)
344 return -EINVAL;
345
346 for (i = 0; i < frame->fmt->num_planes; i++)
347 frame->payload[i] = pix->plane_fmt[i].sizeimage;
348
349 gsc_set_frame_size(frame, width: pix->width, height: pix->height);
350
351 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
352 gsc_ctx_state_lock_set(GSC_PARAMS | GSC_DST_FMT, ctx);
353 else
354 gsc_ctx_state_lock_set(GSC_PARAMS | GSC_SRC_FMT, ctx);
355
356 pr_debug("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
357
358 return 0;
359}
360
361static int gsc_m2m_reqbufs(struct file *file, void *fh,
362 struct v4l2_requestbuffers *reqbufs)
363{
364 struct gsc_ctx *ctx = fh_to_ctx(fh);
365 struct gsc_dev *gsc = ctx->gsc_dev;
366 u32 max_cnt;
367
368 max_cnt = (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
369 gsc->variant->in_buf_cnt : gsc->variant->out_buf_cnt;
370 if (reqbufs->count > max_cnt)
371 return -EINVAL;
372
373 return v4l2_m2m_reqbufs(file, m2m_ctx: ctx->m2m_ctx, reqbufs);
374}
375
376static int gsc_m2m_expbuf(struct file *file, void *fh,
377 struct v4l2_exportbuffer *eb)
378{
379 struct gsc_ctx *ctx = fh_to_ctx(fh);
380 return v4l2_m2m_expbuf(file, m2m_ctx: ctx->m2m_ctx, eb);
381}
382
383static int gsc_m2m_querybuf(struct file *file, void *fh,
384 struct v4l2_buffer *buf)
385{
386 struct gsc_ctx *ctx = fh_to_ctx(fh);
387 return v4l2_m2m_querybuf(file, m2m_ctx: ctx->m2m_ctx, buf);
388}
389
390static int gsc_m2m_qbuf(struct file *file, void *fh,
391 struct v4l2_buffer *buf)
392{
393 struct gsc_ctx *ctx = fh_to_ctx(fh);
394 return v4l2_m2m_qbuf(file, m2m_ctx: ctx->m2m_ctx, buf);
395}
396
397static int gsc_m2m_dqbuf(struct file *file, void *fh,
398 struct v4l2_buffer *buf)
399{
400 struct gsc_ctx *ctx = fh_to_ctx(fh);
401 return v4l2_m2m_dqbuf(file, m2m_ctx: ctx->m2m_ctx, buf);
402}
403
404static int gsc_m2m_streamon(struct file *file, void *fh,
405 enum v4l2_buf_type type)
406{
407 struct gsc_ctx *ctx = fh_to_ctx(fh);
408
409 /* The source and target color format need to be set */
410 if (V4L2_TYPE_IS_OUTPUT(type)) {
411 if (!gsc_ctx_state_is_set(GSC_SRC_FMT, ctx))
412 return -EINVAL;
413 } else if (!gsc_ctx_state_is_set(GSC_DST_FMT, ctx)) {
414 return -EINVAL;
415 }
416
417 return v4l2_m2m_streamon(file, m2m_ctx: ctx->m2m_ctx, type);
418}
419
420static int gsc_m2m_streamoff(struct file *file, void *fh,
421 enum v4l2_buf_type type)
422{
423 struct gsc_ctx *ctx = fh_to_ctx(fh);
424 return v4l2_m2m_streamoff(file, m2m_ctx: ctx->m2m_ctx, type);
425}
426
427/* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
428static int is_rectangle_enclosed(struct v4l2_rect *a, struct v4l2_rect *b)
429{
430 if (a->left < b->left || a->top < b->top)
431 return 0;
432
433 if (a->left + a->width > b->left + b->width)
434 return 0;
435
436 if (a->top + a->height > b->top + b->height)
437 return 0;
438
439 return 1;
440}
441
442static int gsc_m2m_g_selection(struct file *file, void *fh,
443 struct v4l2_selection *s)
444{
445 struct gsc_frame *frame;
446 struct gsc_ctx *ctx = fh_to_ctx(fh);
447
448 if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
449 (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
450 return -EINVAL;
451
452 frame = ctx_get_frame(ctx, type: s->type);
453 if (IS_ERR(ptr: frame))
454 return PTR_ERR(ptr: frame);
455
456 switch (s->target) {
457 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
458 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
459 case V4L2_SEL_TGT_CROP_BOUNDS:
460 case V4L2_SEL_TGT_CROP_DEFAULT:
461 s->r.left = 0;
462 s->r.top = 0;
463 s->r.width = frame->f_width;
464 s->r.height = frame->f_height;
465 return 0;
466
467 case V4L2_SEL_TGT_COMPOSE:
468 case V4L2_SEL_TGT_CROP:
469 s->r.left = frame->crop.left;
470 s->r.top = frame->crop.top;
471 s->r.width = frame->crop.width;
472 s->r.height = frame->crop.height;
473 return 0;
474 }
475
476 return -EINVAL;
477}
478
479static int gsc_m2m_s_selection(struct file *file, void *fh,
480 struct v4l2_selection *s)
481{
482 struct gsc_frame *frame;
483 struct gsc_ctx *ctx = fh_to_ctx(fh);
484 struct gsc_variant *variant = ctx->gsc_dev->variant;
485 struct v4l2_selection sel = *s;
486 int ret;
487
488 if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
489 (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
490 return -EINVAL;
491
492 ret = gsc_try_selection(ctx, s: &sel);
493 if (ret)
494 return ret;
495
496 if (s->flags & V4L2_SEL_FLAG_LE &&
497 !is_rectangle_enclosed(a: &sel.r, b: &s->r))
498 return -ERANGE;
499
500 if (s->flags & V4L2_SEL_FLAG_GE &&
501 !is_rectangle_enclosed(a: &s->r, b: &sel.r))
502 return -ERANGE;
503
504 s->r = sel.r;
505
506 switch (s->target) {
507 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
508 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
509 case V4L2_SEL_TGT_COMPOSE:
510 frame = &ctx->s_frame;
511 break;
512
513 case V4L2_SEL_TGT_CROP_BOUNDS:
514 case V4L2_SEL_TGT_CROP:
515 case V4L2_SEL_TGT_CROP_DEFAULT:
516 frame = &ctx->d_frame;
517 break;
518
519 default:
520 return -EINVAL;
521 }
522
523 /* Check to see if scaling ratio is within supported range */
524 if (gsc_ctx_state_is_set(GSC_DST_FMT | GSC_SRC_FMT, ctx)) {
525 if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
526 ret = gsc_check_scaler_ratio(var: variant, sw: sel.r.width,
527 sh: sel.r.height, dw: ctx->d_frame.crop.width,
528 dh: ctx->d_frame.crop.height,
529 rot: ctx->gsc_ctrls.rotate->val, out_path: ctx->out_path);
530 } else {
531 ret = gsc_check_scaler_ratio(var: variant,
532 sw: ctx->s_frame.crop.width,
533 sh: ctx->s_frame.crop.height, dw: sel.r.width,
534 dh: sel.r.height, rot: ctx->gsc_ctrls.rotate->val,
535 out_path: ctx->out_path);
536 }
537
538 if (ret) {
539 pr_err("Out of scaler range");
540 return -EINVAL;
541 }
542 }
543
544 frame->crop = sel.r;
545
546 gsc_ctx_state_lock_set(GSC_PARAMS, ctx);
547 return 0;
548}
549
550static const struct v4l2_ioctl_ops gsc_m2m_ioctl_ops = {
551 .vidioc_querycap = gsc_m2m_querycap,
552 .vidioc_enum_fmt_vid_cap = gsc_m2m_enum_fmt,
553 .vidioc_enum_fmt_vid_out = gsc_m2m_enum_fmt,
554 .vidioc_g_fmt_vid_cap_mplane = gsc_m2m_g_fmt_mplane,
555 .vidioc_g_fmt_vid_out_mplane = gsc_m2m_g_fmt_mplane,
556 .vidioc_try_fmt_vid_cap_mplane = gsc_m2m_try_fmt_mplane,
557 .vidioc_try_fmt_vid_out_mplane = gsc_m2m_try_fmt_mplane,
558 .vidioc_s_fmt_vid_cap_mplane = gsc_m2m_s_fmt_mplane,
559 .vidioc_s_fmt_vid_out_mplane = gsc_m2m_s_fmt_mplane,
560 .vidioc_reqbufs = gsc_m2m_reqbufs,
561 .vidioc_expbuf = gsc_m2m_expbuf,
562 .vidioc_querybuf = gsc_m2m_querybuf,
563 .vidioc_qbuf = gsc_m2m_qbuf,
564 .vidioc_dqbuf = gsc_m2m_dqbuf,
565 .vidioc_streamon = gsc_m2m_streamon,
566 .vidioc_streamoff = gsc_m2m_streamoff,
567 .vidioc_g_selection = gsc_m2m_g_selection,
568 .vidioc_s_selection = gsc_m2m_s_selection
569};
570
571static int queue_init(void *priv, struct vb2_queue *src_vq,
572 struct vb2_queue *dst_vq)
573{
574 struct gsc_ctx *ctx = priv;
575 int ret;
576
577 memset(src_vq, 0, sizeof(*src_vq));
578 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
579 src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
580 src_vq->drv_priv = ctx;
581 src_vq->ops = &gsc_m2m_qops;
582 src_vq->mem_ops = &vb2_dma_contig_memops;
583 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
584 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
585 src_vq->lock = &ctx->gsc_dev->lock;
586 src_vq->dev = &ctx->gsc_dev->pdev->dev;
587
588 ret = vb2_queue_init(q: src_vq);
589 if (ret)
590 return ret;
591
592 memset(dst_vq, 0, sizeof(*dst_vq));
593 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
594 dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
595 dst_vq->drv_priv = ctx;
596 dst_vq->ops = &gsc_m2m_qops;
597 dst_vq->mem_ops = &vb2_dma_contig_memops;
598 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
599 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
600 dst_vq->lock = &ctx->gsc_dev->lock;
601 dst_vq->dev = &ctx->gsc_dev->pdev->dev;
602
603 return vb2_queue_init(q: dst_vq);
604}
605
606static int gsc_m2m_open(struct file *file)
607{
608 struct gsc_dev *gsc = video_drvdata(file);
609 struct gsc_ctx *ctx = NULL;
610 int ret;
611
612 pr_debug("pid: %d, state: 0x%lx", task_pid_nr(current), gsc->state);
613
614 if (mutex_lock_interruptible(&gsc->lock))
615 return -ERESTARTSYS;
616
617 ctx = kzalloc(size: sizeof(*ctx), GFP_KERNEL);
618 if (!ctx) {
619 ret = -ENOMEM;
620 goto unlock;
621 }
622
623 v4l2_fh_init(fh: &ctx->fh, vdev: gsc->m2m.vfd);
624 ret = gsc_ctrls_create(ctx);
625 if (ret)
626 goto error_fh;
627
628 /* Use separate control handler per file handle */
629 ctx->fh.ctrl_handler = &ctx->ctrl_handler;
630 file->private_data = &ctx->fh;
631 v4l2_fh_add(fh: &ctx->fh);
632
633 ctx->gsc_dev = gsc;
634 /* Default color format */
635 ctx->s_frame.fmt = get_format(index: 0);
636 ctx->d_frame.fmt = get_format(index: 0);
637 /* Setup the device context for mem2mem mode. */
638 ctx->state = GSC_CTX_M2M;
639 ctx->flags = 0;
640 ctx->in_path = GSC_DMA;
641 ctx->out_path = GSC_DMA;
642
643 ctx->m2m_ctx = v4l2_m2m_ctx_init(m2m_dev: gsc->m2m.m2m_dev, drv_priv: ctx, queue_init);
644 if (IS_ERR(ptr: ctx->m2m_ctx)) {
645 pr_err("Failed to initialize m2m context");
646 ret = PTR_ERR(ptr: ctx->m2m_ctx);
647 goto error_ctrls;
648 }
649
650 if (gsc->m2m.refcnt++ == 0)
651 set_bit(nr: ST_M2M_OPEN, addr: &gsc->state);
652
653 pr_debug("gsc m2m driver is opened, ctx(0x%p)", ctx);
654
655 mutex_unlock(lock: &gsc->lock);
656 return 0;
657
658error_ctrls:
659 gsc_ctrls_delete(ctx);
660 v4l2_fh_del(fh: &ctx->fh);
661error_fh:
662 v4l2_fh_exit(fh: &ctx->fh);
663 kfree(objp: ctx);
664unlock:
665 mutex_unlock(lock: &gsc->lock);
666 return ret;
667}
668
669static int gsc_m2m_release(struct file *file)
670{
671 struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
672 struct gsc_dev *gsc = ctx->gsc_dev;
673
674 pr_debug("pid: %d, state: 0x%lx, refcnt= %d",
675 task_pid_nr(current), gsc->state, gsc->m2m.refcnt);
676
677 mutex_lock(&gsc->lock);
678
679 v4l2_m2m_ctx_release(m2m_ctx: ctx->m2m_ctx);
680 gsc_ctrls_delete(ctx);
681 v4l2_fh_del(fh: &ctx->fh);
682 v4l2_fh_exit(fh: &ctx->fh);
683
684 if (--gsc->m2m.refcnt <= 0)
685 clear_bit(nr: ST_M2M_OPEN, addr: &gsc->state);
686 kfree(objp: ctx);
687
688 mutex_unlock(lock: &gsc->lock);
689 return 0;
690}
691
692static __poll_t gsc_m2m_poll(struct file *file,
693 struct poll_table_struct *wait)
694{
695 struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
696 struct gsc_dev *gsc = ctx->gsc_dev;
697 __poll_t ret;
698
699 if (mutex_lock_interruptible(&gsc->lock))
700 return EPOLLERR;
701
702 ret = v4l2_m2m_poll(file, m2m_ctx: ctx->m2m_ctx, wait);
703 mutex_unlock(lock: &gsc->lock);
704
705 return ret;
706}
707
708static int gsc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
709{
710 struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
711 struct gsc_dev *gsc = ctx->gsc_dev;
712 int ret;
713
714 if (mutex_lock_interruptible(&gsc->lock))
715 return -ERESTARTSYS;
716
717 ret = v4l2_m2m_mmap(file, m2m_ctx: ctx->m2m_ctx, vma);
718 mutex_unlock(lock: &gsc->lock);
719
720 return ret;
721}
722
723static const struct v4l2_file_operations gsc_m2m_fops = {
724 .owner = THIS_MODULE,
725 .open = gsc_m2m_open,
726 .release = gsc_m2m_release,
727 .poll = gsc_m2m_poll,
728 .unlocked_ioctl = video_ioctl2,
729 .mmap = gsc_m2m_mmap,
730};
731
732static const struct v4l2_m2m_ops gsc_m2m_ops = {
733 .device_run = gsc_m2m_device_run,
734 .job_abort = gsc_m2m_job_abort,
735};
736
737int gsc_register_m2m_device(struct gsc_dev *gsc)
738{
739 struct platform_device *pdev;
740 int ret;
741
742 if (!gsc)
743 return -ENODEV;
744
745 pdev = gsc->pdev;
746
747 gsc->vdev.fops = &gsc_m2m_fops;
748 gsc->vdev.ioctl_ops = &gsc_m2m_ioctl_ops;
749 gsc->vdev.release = video_device_release_empty;
750 gsc->vdev.lock = &gsc->lock;
751 gsc->vdev.vfl_dir = VFL_DIR_M2M;
752 gsc->vdev.v4l2_dev = &gsc->v4l2_dev;
753 gsc->vdev.device_caps = V4L2_CAP_STREAMING |
754 V4L2_CAP_VIDEO_M2M_MPLANE;
755 snprintf(buf: gsc->vdev.name, size: sizeof(gsc->vdev.name), fmt: "%s.%d:m2m",
756 GSC_MODULE_NAME, gsc->id);
757
758 video_set_drvdata(vdev: &gsc->vdev, data: gsc);
759
760 gsc->m2m.vfd = &gsc->vdev;
761 gsc->m2m.m2m_dev = v4l2_m2m_init(m2m_ops: &gsc_m2m_ops);
762 if (IS_ERR(ptr: gsc->m2m.m2m_dev)) {
763 dev_err(&pdev->dev, "failed to initialize v4l2-m2m device\n");
764 return PTR_ERR(ptr: gsc->m2m.m2m_dev);
765 }
766
767 ret = video_register_device(vdev: &gsc->vdev, type: VFL_TYPE_VIDEO, nr: -1);
768 if (ret) {
769 dev_err(&pdev->dev,
770 "%s(): failed to register video device\n", __func__);
771 goto err_m2m_release;
772 }
773
774 pr_debug("gsc m2m driver registered as /dev/video%d", gsc->vdev.num);
775 return 0;
776
777err_m2m_release:
778 v4l2_m2m_release(m2m_dev: gsc->m2m.m2m_dev);
779
780 return ret;
781}
782
783void gsc_unregister_m2m_device(struct gsc_dev *gsc)
784{
785 if (gsc) {
786 v4l2_m2m_release(m2m_dev: gsc->m2m.m2m_dev);
787 video_unregister_device(vdev: &gsc->vdev);
788 }
789}
790

source code of linux/drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c