1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Memory-to-memory device framework for Video for Linux 2 and vb2. |
4 | * |
5 | * Helper functions for devices that use vb2 buffers for both their |
6 | * source and destination. |
7 | * |
8 | * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. |
9 | * Pawel Osciak, <pawel@osciak.com> |
10 | * Marek Szyprowski, <m.szyprowski@samsung.com> |
11 | */ |
12 | #include <linux/module.h> |
13 | #include <linux/sched.h> |
14 | #include <linux/slab.h> |
15 | |
16 | #include <media/media-device.h> |
17 | #include <media/videobuf2-v4l2.h> |
18 | #include <media/v4l2-mem2mem.h> |
19 | #include <media/v4l2-dev.h> |
20 | #include <media/v4l2-device.h> |
21 | #include <media/v4l2-fh.h> |
22 | #include <media/v4l2-event.h> |
23 | |
24 | MODULE_DESCRIPTION("Mem to mem device framework for vb2" ); |
25 | MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>" ); |
26 | MODULE_LICENSE("GPL" ); |
27 | |
28 | static bool debug; |
29 | module_param(debug, bool, 0644); |
30 | |
31 | #define dprintk(fmt, arg...) \ |
32 | do { \ |
33 | if (debug) \ |
34 | printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ |
35 | } while (0) |
36 | |
37 | |
38 | /* Instance is already queued on the job_queue */ |
39 | #define TRANS_QUEUED (1 << 0) |
40 | /* Instance is currently running in hardware */ |
41 | #define TRANS_RUNNING (1 << 1) |
42 | /* Instance is currently aborting */ |
43 | #define TRANS_ABORT (1 << 2) |
44 | |
45 | |
46 | /* The job queue is not running new jobs */ |
47 | #define QUEUE_PAUSED (1 << 0) |
48 | |
49 | |
50 | /* Offset base for buffers on the destination queue - used to distinguish |
51 | * between source and destination buffers when mmapping - they receive the same |
52 | * offsets but for different queues */ |
53 | #define DST_QUEUE_OFF_BASE (1 << 30) |
54 | |
55 | enum v4l2_m2m_entity_type { |
56 | MEM2MEM_ENT_TYPE_SOURCE, |
57 | MEM2MEM_ENT_TYPE_SINK, |
58 | MEM2MEM_ENT_TYPE_PROC |
59 | }; |
60 | |
61 | static const char * const m2m_entity_name[] = { |
62 | "source" , |
63 | "sink" , |
64 | "proc" |
65 | }; |
66 | |
67 | /** |
68 | * struct v4l2_m2m_dev - per-device context |
69 | * @source: &struct media_entity pointer with the source entity |
70 | * Used only when the M2M device is registered via |
71 | * v4l2_m2m_register_media_controller(). |
72 | * @source_pad: &struct media_pad with the source pad. |
73 | * Used only when the M2M device is registered via |
74 | * v4l2_m2m_register_media_controller(). |
75 | * @sink: &struct media_entity pointer with the sink entity |
76 | * Used only when the M2M device is registered via |
77 | * v4l2_m2m_register_media_controller(). |
78 | * @sink_pad: &struct media_pad with the sink pad. |
79 | * Used only when the M2M device is registered via |
80 | * v4l2_m2m_register_media_controller(). |
81 | * @proc: &struct media_entity pointer with the M2M device itself. |
82 | * @proc_pads: &struct media_pad with the @proc pads. |
83 | * Used only when the M2M device is registered via |
84 | * v4l2_m2m_unregister_media_controller(). |
85 | * @intf_devnode: &struct media_intf devnode pointer with the interface |
86 | * with controls the M2M device. |
87 | * @curr_ctx: currently running instance |
88 | * @job_queue: instances queued to run |
89 | * @job_spinlock: protects job_queue |
90 | * @job_work: worker to run queued jobs. |
91 | * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED. |
92 | * @m2m_ops: driver callbacks |
93 | */ |
94 | struct v4l2_m2m_dev { |
95 | struct v4l2_m2m_ctx *curr_ctx; |
96 | #ifdef CONFIG_MEDIA_CONTROLLER |
97 | struct media_entity *source; |
98 | struct media_pad source_pad; |
99 | struct media_entity sink; |
100 | struct media_pad sink_pad; |
101 | struct media_entity proc; |
102 | struct media_pad proc_pads[2]; |
103 | struct media_intf_devnode *intf_devnode; |
104 | #endif |
105 | |
106 | struct list_head job_queue; |
107 | spinlock_t job_spinlock; |
108 | struct work_struct job_work; |
109 | unsigned long job_queue_flags; |
110 | |
111 | const struct v4l2_m2m_ops *m2m_ops; |
112 | }; |
113 | |
114 | static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, |
115 | enum v4l2_buf_type type) |
116 | { |
117 | if (V4L2_TYPE_IS_OUTPUT(type)) |
118 | return &m2m_ctx->out_q_ctx; |
119 | else |
120 | return &m2m_ctx->cap_q_ctx; |
121 | } |
122 | |
123 | struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, |
124 | enum v4l2_buf_type type) |
125 | { |
126 | struct v4l2_m2m_queue_ctx *q_ctx; |
127 | |
128 | q_ctx = get_queue_ctx(m2m_ctx, type); |
129 | if (!q_ctx) |
130 | return NULL; |
131 | |
132 | return &q_ctx->q; |
133 | } |
134 | EXPORT_SYMBOL(v4l2_m2m_get_vq); |
135 | |
136 | struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) |
137 | { |
138 | struct v4l2_m2m_buffer *b; |
139 | unsigned long flags; |
140 | |
141 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
142 | |
143 | if (list_empty(head: &q_ctx->rdy_queue)) { |
144 | spin_unlock_irqrestore(lock: &q_ctx->rdy_spinlock, flags); |
145 | return NULL; |
146 | } |
147 | |
148 | b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); |
149 | spin_unlock_irqrestore(lock: &q_ctx->rdy_spinlock, flags); |
150 | return &b->vb; |
151 | } |
152 | EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); |
153 | |
154 | struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) |
155 | { |
156 | struct v4l2_m2m_buffer *b; |
157 | unsigned long flags; |
158 | |
159 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
160 | |
161 | if (list_empty(head: &q_ctx->rdy_queue)) { |
162 | spin_unlock_irqrestore(lock: &q_ctx->rdy_spinlock, flags); |
163 | return NULL; |
164 | } |
165 | |
166 | b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); |
167 | spin_unlock_irqrestore(lock: &q_ctx->rdy_spinlock, flags); |
168 | return &b->vb; |
169 | } |
170 | EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf); |
171 | |
172 | struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) |
173 | { |
174 | struct v4l2_m2m_buffer *b; |
175 | unsigned long flags; |
176 | |
177 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
178 | if (list_empty(head: &q_ctx->rdy_queue)) { |
179 | spin_unlock_irqrestore(lock: &q_ctx->rdy_spinlock, flags); |
180 | return NULL; |
181 | } |
182 | b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); |
183 | list_del(entry: &b->list); |
184 | q_ctx->num_rdy--; |
185 | spin_unlock_irqrestore(lock: &q_ctx->rdy_spinlock, flags); |
186 | |
187 | return &b->vb; |
188 | } |
189 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); |
190 | |
191 | void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, |
192 | struct vb2_v4l2_buffer *vbuf) |
193 | { |
194 | struct v4l2_m2m_buffer *b; |
195 | unsigned long flags; |
196 | |
197 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
198 | b = container_of(vbuf, struct v4l2_m2m_buffer, vb); |
199 | list_del(entry: &b->list); |
200 | q_ctx->num_rdy--; |
201 | spin_unlock_irqrestore(lock: &q_ctx->rdy_spinlock, flags); |
202 | } |
203 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf); |
204 | |
205 | struct vb2_v4l2_buffer * |
206 | v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx) |
207 | |
208 | { |
209 | struct v4l2_m2m_buffer *b, *tmp; |
210 | struct vb2_v4l2_buffer *ret = NULL; |
211 | unsigned long flags; |
212 | |
213 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
214 | list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) { |
215 | if (b->vb.vb2_buf.index == idx) { |
216 | list_del(entry: &b->list); |
217 | q_ctx->num_rdy--; |
218 | ret = &b->vb; |
219 | break; |
220 | } |
221 | } |
222 | spin_unlock_irqrestore(lock: &q_ctx->rdy_spinlock, flags); |
223 | |
224 | return ret; |
225 | } |
226 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx); |
227 | |
228 | /* |
229 | * Scheduling handlers |
230 | */ |
231 | |
232 | void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) |
233 | { |
234 | unsigned long flags; |
235 | void *ret = NULL; |
236 | |
237 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
238 | if (m2m_dev->curr_ctx) |
239 | ret = m2m_dev->curr_ctx->priv; |
240 | spin_unlock_irqrestore(lock: &m2m_dev->job_spinlock, flags); |
241 | |
242 | return ret; |
243 | } |
244 | EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); |
245 | |
246 | /** |
247 | * v4l2_m2m_try_run() - select next job to perform and run it if possible |
248 | * @m2m_dev: per-device context |
249 | * |
250 | * Get next transaction (if present) from the waiting jobs list and run it. |
251 | * |
252 | * Note that this function can run on a given v4l2_m2m_ctx context, |
253 | * but call .device_run for another context. |
254 | */ |
255 | static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) |
256 | { |
257 | unsigned long flags; |
258 | |
259 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
260 | if (NULL != m2m_dev->curr_ctx) { |
261 | spin_unlock_irqrestore(lock: &m2m_dev->job_spinlock, flags); |
262 | dprintk("Another instance is running, won't run now\n" ); |
263 | return; |
264 | } |
265 | |
266 | if (list_empty(head: &m2m_dev->job_queue)) { |
267 | spin_unlock_irqrestore(lock: &m2m_dev->job_spinlock, flags); |
268 | dprintk("No job pending\n" ); |
269 | return; |
270 | } |
271 | |
272 | if (m2m_dev->job_queue_flags & QUEUE_PAUSED) { |
273 | spin_unlock_irqrestore(lock: &m2m_dev->job_spinlock, flags); |
274 | dprintk("Running new jobs is paused\n" ); |
275 | return; |
276 | } |
277 | |
278 | m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, |
279 | struct v4l2_m2m_ctx, queue); |
280 | m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; |
281 | spin_unlock_irqrestore(lock: &m2m_dev->job_spinlock, flags); |
282 | |
283 | dprintk("Running job on m2m_ctx: %p\n" , m2m_dev->curr_ctx); |
284 | m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); |
285 | } |
286 | |
287 | /* |
288 | * __v4l2_m2m_try_queue() - queue a job |
289 | * @m2m_dev: m2m device |
290 | * @m2m_ctx: m2m context |
291 | * |
292 | * Check if this context is ready to queue a job. |
293 | * |
294 | * This function can run in interrupt context. |
295 | */ |
296 | static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev, |
297 | struct v4l2_m2m_ctx *m2m_ctx) |
298 | { |
299 | unsigned long flags_job; |
300 | struct vb2_v4l2_buffer *dst, *src; |
301 | |
302 | dprintk("Trying to schedule a job for m2m_ctx: %p\n" , m2m_ctx); |
303 | |
304 | if (!m2m_ctx->out_q_ctx.q.streaming || |
305 | (!m2m_ctx->cap_q_ctx.q.streaming && !m2m_ctx->ignore_cap_streaming)) { |
306 | if (!m2m_ctx->ignore_cap_streaming) |
307 | dprintk("Streaming needs to be on for both queues\n" ); |
308 | else |
309 | dprintk("Streaming needs to be on for the OUTPUT queue\n" ); |
310 | return; |
311 | } |
312 | |
313 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); |
314 | |
315 | /* If the context is aborted then don't schedule it */ |
316 | if (m2m_ctx->job_flags & TRANS_ABORT) { |
317 | dprintk("Aborted context\n" ); |
318 | goto job_unlock; |
319 | } |
320 | |
321 | if (m2m_ctx->job_flags & TRANS_QUEUED) { |
322 | dprintk("On job queue already\n" ); |
323 | goto job_unlock; |
324 | } |
325 | |
326 | src = v4l2_m2m_next_src_buf(m2m_ctx); |
327 | dst = v4l2_m2m_next_dst_buf(m2m_ctx); |
328 | if (!src && !m2m_ctx->out_q_ctx.buffered) { |
329 | dprintk("No input buffers available\n" ); |
330 | goto job_unlock; |
331 | } |
332 | if (!dst && !m2m_ctx->cap_q_ctx.buffered) { |
333 | dprintk("No output buffers available\n" ); |
334 | goto job_unlock; |
335 | } |
336 | |
337 | m2m_ctx->new_frame = true; |
338 | |
339 | if (src && dst && dst->is_held && |
340 | dst->vb2_buf.copied_timestamp && |
341 | dst->vb2_buf.timestamp != src->vb2_buf.timestamp) { |
342 | dprintk("Timestamp mismatch, returning held capture buffer\n" ); |
343 | dst->is_held = false; |
344 | v4l2_m2m_dst_buf_remove(m2m_ctx); |
345 | v4l2_m2m_buf_done(buf: dst, state: VB2_BUF_STATE_DONE); |
346 | dst = v4l2_m2m_next_dst_buf(m2m_ctx); |
347 | |
348 | if (!dst && !m2m_ctx->cap_q_ctx.buffered) { |
349 | dprintk("No output buffers available after returning held buffer\n" ); |
350 | goto job_unlock; |
351 | } |
352 | } |
353 | |
354 | if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags & |
355 | VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)) |
356 | m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp || |
357 | dst->vb2_buf.timestamp != src->vb2_buf.timestamp; |
358 | |
359 | if (m2m_ctx->has_stopped) { |
360 | dprintk("Device has stopped\n" ); |
361 | goto job_unlock; |
362 | } |
363 | |
364 | if (m2m_dev->m2m_ops->job_ready |
365 | && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { |
366 | dprintk("Driver not ready\n" ); |
367 | goto job_unlock; |
368 | } |
369 | |
370 | list_add_tail(new: &m2m_ctx->queue, head: &m2m_dev->job_queue); |
371 | m2m_ctx->job_flags |= TRANS_QUEUED; |
372 | |
373 | job_unlock: |
374 | spin_unlock_irqrestore(lock: &m2m_dev->job_spinlock, flags: flags_job); |
375 | } |
376 | |
377 | /** |
378 | * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context |
379 | * @m2m_ctx: m2m context |
380 | * |
381 | * Check if this context is ready to queue a job. If suitable, |
382 | * run the next queued job on the mem2mem device. |
383 | * |
384 | * This function shouldn't run in interrupt context. |
385 | * |
386 | * Note that v4l2_m2m_try_schedule() can schedule one job for this context, |
387 | * and then run another job for another context. |
388 | */ |
389 | void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) |
390 | { |
391 | struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev; |
392 | |
393 | __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); |
394 | v4l2_m2m_try_run(m2m_dev); |
395 | } |
396 | EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); |
397 | |
398 | /** |
399 | * v4l2_m2m_device_run_work() - run pending jobs for the context |
400 | * @work: Work structure used for scheduling the execution of this function. |
401 | */ |
402 | static void v4l2_m2m_device_run_work(struct work_struct *work) |
403 | { |
404 | struct v4l2_m2m_dev *m2m_dev = |
405 | container_of(work, struct v4l2_m2m_dev, job_work); |
406 | |
407 | v4l2_m2m_try_run(m2m_dev); |
408 | } |
409 | |
410 | /** |
411 | * v4l2_m2m_cancel_job() - cancel pending jobs for the context |
412 | * @m2m_ctx: m2m context with jobs to be canceled |
413 | * |
414 | * In case of streamoff or release called on any context, |
415 | * 1] If the context is currently running, then abort job will be called |
416 | * 2] If the context is queued, then the context will be removed from |
417 | * the job_queue |
418 | */ |
419 | static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) |
420 | { |
421 | struct v4l2_m2m_dev *m2m_dev; |
422 | unsigned long flags; |
423 | |
424 | m2m_dev = m2m_ctx->m2m_dev; |
425 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
426 | |
427 | m2m_ctx->job_flags |= TRANS_ABORT; |
428 | if (m2m_ctx->job_flags & TRANS_RUNNING) { |
429 | spin_unlock_irqrestore(lock: &m2m_dev->job_spinlock, flags); |
430 | if (m2m_dev->m2m_ops->job_abort) |
431 | m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); |
432 | dprintk("m2m_ctx %p running, will wait to complete\n" , m2m_ctx); |
433 | wait_event(m2m_ctx->finished, |
434 | !(m2m_ctx->job_flags & TRANS_RUNNING)); |
435 | } else if (m2m_ctx->job_flags & TRANS_QUEUED) { |
436 | list_del(entry: &m2m_ctx->queue); |
437 | m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); |
438 | spin_unlock_irqrestore(lock: &m2m_dev->job_spinlock, flags); |
439 | dprintk("m2m_ctx: %p had been on queue and was removed\n" , |
440 | m2m_ctx); |
441 | } else { |
442 | /* Do nothing, was not on queue/running */ |
443 | spin_unlock_irqrestore(lock: &m2m_dev->job_spinlock, flags); |
444 | } |
445 | } |
446 | |
447 | /* |
448 | * Schedule the next job, called from v4l2_m2m_job_finish() or |
449 | * v4l2_m2m_buf_done_and_job_finish(). |
450 | */ |
451 | static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev, |
452 | struct v4l2_m2m_ctx *m2m_ctx) |
453 | { |
454 | /* |
455 | * This instance might have more buffers ready, but since we do not |
456 | * allow more than one job on the job_queue per instance, each has |
457 | * to be scheduled separately after the previous one finishes. |
458 | */ |
459 | __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); |
460 | |
461 | /* |
462 | * We might be running in atomic context, |
463 | * but the job must be run in non-atomic context. |
464 | */ |
465 | schedule_work(work: &m2m_dev->job_work); |
466 | } |
467 | |
468 | /* |
469 | * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or |
470 | * v4l2_m2m_buf_done_and_job_finish(). |
471 | */ |
472 | static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, |
473 | struct v4l2_m2m_ctx *m2m_ctx) |
474 | { |
475 | if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { |
476 | dprintk("Called by an instance not currently running\n" ); |
477 | return false; |
478 | } |
479 | |
480 | list_del(entry: &m2m_dev->curr_ctx->queue); |
481 | m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); |
482 | wake_up(&m2m_dev->curr_ctx->finished); |
483 | m2m_dev->curr_ctx = NULL; |
484 | return true; |
485 | } |
486 | |
487 | void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, |
488 | struct v4l2_m2m_ctx *m2m_ctx) |
489 | { |
490 | unsigned long flags; |
491 | bool schedule_next; |
492 | |
493 | /* |
494 | * This function should not be used for drivers that support |
495 | * holding capture buffers. Those should use |
496 | * v4l2_m2m_buf_done_and_job_finish() instead. |
497 | */ |
498 | WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags & |
499 | VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF); |
500 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
501 | schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); |
502 | spin_unlock_irqrestore(lock: &m2m_dev->job_spinlock, flags); |
503 | |
504 | if (schedule_next) |
505 | v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); |
506 | } |
507 | EXPORT_SYMBOL(v4l2_m2m_job_finish); |
508 | |
509 | void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, |
510 | struct v4l2_m2m_ctx *m2m_ctx, |
511 | enum vb2_buffer_state state) |
512 | { |
513 | struct vb2_v4l2_buffer *src_buf, *dst_buf; |
514 | bool schedule_next = false; |
515 | unsigned long flags; |
516 | |
517 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
518 | src_buf = v4l2_m2m_src_buf_remove(m2m_ctx); |
519 | dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx); |
520 | |
521 | if (WARN_ON(!src_buf || !dst_buf)) |
522 | goto unlock; |
523 | dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; |
524 | if (!dst_buf->is_held) { |
525 | v4l2_m2m_dst_buf_remove(m2m_ctx); |
526 | v4l2_m2m_buf_done(buf: dst_buf, state); |
527 | } |
528 | /* |
529 | * If the request API is being used, returning the OUTPUT |
530 | * (src) buffer will wake-up any process waiting on the |
531 | * request file descriptor. |
532 | * |
533 | * Therefore, return the CAPTURE (dst) buffer first, |
534 | * to avoid signalling the request file descriptor |
535 | * before the CAPTURE buffer is done. |
536 | */ |
537 | v4l2_m2m_buf_done(buf: src_buf, state); |
538 | schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); |
539 | unlock: |
540 | spin_unlock_irqrestore(lock: &m2m_dev->job_spinlock, flags); |
541 | |
542 | if (schedule_next) |
543 | v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); |
544 | } |
545 | EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish); |
546 | |
547 | void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev) |
548 | { |
549 | unsigned long flags; |
550 | struct v4l2_m2m_ctx *curr_ctx; |
551 | |
552 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
553 | m2m_dev->job_queue_flags |= QUEUE_PAUSED; |
554 | curr_ctx = m2m_dev->curr_ctx; |
555 | spin_unlock_irqrestore(lock: &m2m_dev->job_spinlock, flags); |
556 | |
557 | if (curr_ctx) |
558 | wait_event(curr_ctx->finished, |
559 | !(curr_ctx->job_flags & TRANS_RUNNING)); |
560 | } |
561 | EXPORT_SYMBOL(v4l2_m2m_suspend); |
562 | |
563 | void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev) |
564 | { |
565 | unsigned long flags; |
566 | |
567 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
568 | m2m_dev->job_queue_flags &= ~QUEUE_PAUSED; |
569 | spin_unlock_irqrestore(lock: &m2m_dev->job_spinlock, flags); |
570 | |
571 | v4l2_m2m_try_run(m2m_dev); |
572 | } |
573 | EXPORT_SYMBOL(v4l2_m2m_resume); |
574 | |
575 | int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
576 | struct v4l2_requestbuffers *reqbufs) |
577 | { |
578 | struct vb2_queue *vq; |
579 | int ret; |
580 | |
581 | vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); |
582 | ret = vb2_reqbufs(q: vq, req: reqbufs); |
583 | /* If count == 0, then the owner has released all buffers and he |
584 | is no longer owner of the queue. Otherwise we have an owner. */ |
585 | if (ret == 0) |
586 | vq->owner = reqbufs->count ? file->private_data : NULL; |
587 | |
588 | return ret; |
589 | } |
590 | EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); |
591 | |
592 | static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq, |
593 | struct v4l2_buffer *buf) |
594 | { |
595 | /* Adjust MMAP memory offsets for the CAPTURE queue */ |
596 | if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) { |
597 | if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { |
598 | unsigned int i; |
599 | |
600 | for (i = 0; i < buf->length; ++i) |
601 | buf->m.planes[i].m.mem_offset |
602 | += DST_QUEUE_OFF_BASE; |
603 | } else { |
604 | buf->m.offset += DST_QUEUE_OFF_BASE; |
605 | } |
606 | } |
607 | } |
608 | |
609 | int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
610 | struct v4l2_buffer *buf) |
611 | { |
612 | struct vb2_queue *vq; |
613 | int ret; |
614 | |
615 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); |
616 | ret = vb2_querybuf(q: vq, b: buf); |
617 | if (ret) |
618 | return ret; |
619 | |
620 | /* Adjust MMAP memory offsets for the CAPTURE queue */ |
621 | v4l2_m2m_adjust_mem_offset(vq, buf); |
622 | |
623 | return 0; |
624 | } |
625 | EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); |
626 | |
627 | /* |
628 | * This will add the LAST flag and mark the buffer management |
629 | * state as stopped. |
630 | * This is called when the last capture buffer must be flagged as LAST |
631 | * in draining mode from the encoder/decoder driver buf_queue() callback |
632 | * or from v4l2_update_last_buf_state() when a capture buffer is available. |
633 | */ |
634 | void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, |
635 | struct vb2_v4l2_buffer *vbuf) |
636 | { |
637 | vbuf->flags |= V4L2_BUF_FLAG_LAST; |
638 | vb2_buffer_done(vb: &vbuf->vb2_buf, state: VB2_BUF_STATE_DONE); |
639 | |
640 | v4l2_m2m_mark_stopped(m2m_ctx); |
641 | } |
642 | EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done); |
643 | |
644 | /* When stop command is issued, update buffer management state */ |
645 | static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx) |
646 | { |
647 | struct vb2_v4l2_buffer *next_dst_buf; |
648 | |
649 | if (m2m_ctx->is_draining) |
650 | return -EBUSY; |
651 | |
652 | if (m2m_ctx->has_stopped) |
653 | return 0; |
654 | |
655 | m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx); |
656 | m2m_ctx->is_draining = true; |
657 | |
658 | /* |
659 | * The processing of the last output buffer queued before |
660 | * the STOP command is expected to mark the buffer management |
661 | * state as stopped with v4l2_m2m_mark_stopped(). |
662 | */ |
663 | if (m2m_ctx->last_src_buf) |
664 | return 0; |
665 | |
666 | /* |
667 | * In case the output queue is empty, try to mark the last capture |
668 | * buffer as LAST. |
669 | */ |
670 | next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); |
671 | if (!next_dst_buf) { |
672 | /* |
673 | * Wait for the next queued one in encoder/decoder driver |
674 | * buf_queue() callback using the v4l2_m2m_dst_buf_is_last() |
675 | * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet |
676 | * streaming. |
677 | */ |
678 | m2m_ctx->next_buf_last = true; |
679 | return 0; |
680 | } |
681 | |
682 | v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf); |
683 | |
684 | return 0; |
685 | } |
686 | |
687 | /* |
688 | * Updates the encoding/decoding buffer management state, should |
689 | * be called from encoder/decoder drivers start_streaming() |
690 | */ |
691 | void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, |
692 | struct vb2_queue *q) |
693 | { |
694 | /* If start streaming again, untag the last output buffer */ |
695 | if (V4L2_TYPE_IS_OUTPUT(q->type)) |
696 | m2m_ctx->last_src_buf = NULL; |
697 | } |
698 | EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state); |
699 | |
700 | /* |
701 | * Updates the encoding/decoding buffer management state, should |
702 | * be called from encoder/decoder driver stop_streaming() |
703 | */ |
704 | void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, |
705 | struct vb2_queue *q) |
706 | { |
707 | if (V4L2_TYPE_IS_OUTPUT(q->type)) { |
708 | /* |
709 | * If in draining state, either mark next dst buffer as |
710 | * done or flag next one to be marked as done either |
711 | * in encoder/decoder driver buf_queue() callback using |
712 | * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf() |
713 | * if encoder/decoder is not yet streaming |
714 | */ |
715 | if (m2m_ctx->is_draining) { |
716 | struct vb2_v4l2_buffer *next_dst_buf; |
717 | |
718 | m2m_ctx->last_src_buf = NULL; |
719 | next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); |
720 | if (!next_dst_buf) |
721 | m2m_ctx->next_buf_last = true; |
722 | else |
723 | v4l2_m2m_last_buffer_done(m2m_ctx, |
724 | next_dst_buf); |
725 | } |
726 | } else { |
727 | v4l2_m2m_clear_state(m2m_ctx); |
728 | } |
729 | } |
730 | EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state); |
731 | |
732 | static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx, |
733 | struct vb2_queue *q) |
734 | { |
735 | struct vb2_buffer *vb; |
736 | struct vb2_v4l2_buffer *vbuf; |
737 | unsigned int i; |
738 | |
739 | if (WARN_ON(q->is_output)) |
740 | return; |
741 | if (list_empty(head: &q->queued_list)) |
742 | return; |
743 | |
744 | vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry); |
745 | for (i = 0; i < vb->num_planes; i++) |
746 | vb2_set_plane_payload(vb, plane_no: i, size: 0); |
747 | |
748 | /* |
749 | * Since the buffer hasn't been queued to the ready queue, |
750 | * mark is active and owned before marking it LAST and DONE |
751 | */ |
752 | vb->state = VB2_BUF_STATE_ACTIVE; |
753 | atomic_inc(v: &q->owned_by_drv_count); |
754 | |
755 | vbuf = to_vb2_v4l2_buffer(vb); |
756 | vbuf->field = V4L2_FIELD_NONE; |
757 | |
758 | v4l2_m2m_last_buffer_done(m2m_ctx, vbuf); |
759 | } |
760 | |
761 | int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
762 | struct v4l2_buffer *buf) |
763 | { |
764 | struct video_device *vdev = video_devdata(file); |
765 | struct vb2_queue *vq; |
766 | int ret; |
767 | |
768 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); |
769 | if (V4L2_TYPE_IS_CAPTURE(vq->type) && |
770 | (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { |
771 | dprintk("%s: requests cannot be used with capture buffers\n" , |
772 | __func__); |
773 | return -EPERM; |
774 | } |
775 | |
776 | ret = vb2_qbuf(q: vq, mdev: vdev->v4l2_dev->mdev, b: buf); |
777 | if (ret) |
778 | return ret; |
779 | |
780 | /* Adjust MMAP memory offsets for the CAPTURE queue */ |
781 | v4l2_m2m_adjust_mem_offset(vq, buf); |
782 | |
783 | /* |
784 | * If the capture queue is streaming, but streaming hasn't started |
785 | * on the device, but was asked to stop, mark the previously queued |
786 | * buffer as DONE with LAST flag since it won't be queued on the |
787 | * device. |
788 | */ |
789 | if (V4L2_TYPE_IS_CAPTURE(vq->type) && |
790 | vb2_is_streaming(q: vq) && !vb2_start_streaming_called(q: vq) && |
791 | (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx))) |
792 | v4l2_m2m_force_last_buf_done(m2m_ctx, q: vq); |
793 | else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) |
794 | v4l2_m2m_try_schedule(m2m_ctx); |
795 | |
796 | return 0; |
797 | } |
798 | EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); |
799 | |
800 | int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
801 | struct v4l2_buffer *buf) |
802 | { |
803 | struct vb2_queue *vq; |
804 | int ret; |
805 | |
806 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); |
807 | ret = vb2_dqbuf(q: vq, b: buf, nonblocking: file->f_flags & O_NONBLOCK); |
808 | if (ret) |
809 | return ret; |
810 | |
811 | /* Adjust MMAP memory offsets for the CAPTURE queue */ |
812 | v4l2_m2m_adjust_mem_offset(vq, buf); |
813 | |
814 | return 0; |
815 | } |
816 | EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); |
817 | |
818 | int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
819 | struct v4l2_buffer *buf) |
820 | { |
821 | struct video_device *vdev = video_devdata(file); |
822 | struct vb2_queue *vq; |
823 | int ret; |
824 | |
825 | vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); |
826 | ret = vb2_prepare_buf(q: vq, mdev: vdev->v4l2_dev->mdev, b: buf); |
827 | if (ret) |
828 | return ret; |
829 | |
830 | /* Adjust MMAP memory offsets for the CAPTURE queue */ |
831 | v4l2_m2m_adjust_mem_offset(vq, buf); |
832 | |
833 | return 0; |
834 | } |
835 | EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); |
836 | |
837 | int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
838 | struct v4l2_create_buffers *create) |
839 | { |
840 | struct vb2_queue *vq; |
841 | |
842 | vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); |
843 | return vb2_create_bufs(q: vq, create); |
844 | } |
845 | EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); |
846 | |
847 | int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
848 | struct v4l2_exportbuffer *eb) |
849 | { |
850 | struct vb2_queue *vq; |
851 | |
852 | vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); |
853 | return vb2_expbuf(q: vq, eb); |
854 | } |
855 | EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); |
856 | |
857 | int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
858 | enum v4l2_buf_type type) |
859 | { |
860 | struct vb2_queue *vq; |
861 | int ret; |
862 | |
863 | vq = v4l2_m2m_get_vq(m2m_ctx, type); |
864 | ret = vb2_streamon(q: vq, type); |
865 | if (!ret) |
866 | v4l2_m2m_try_schedule(m2m_ctx); |
867 | |
868 | return ret; |
869 | } |
870 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); |
871 | |
872 | int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
873 | enum v4l2_buf_type type) |
874 | { |
875 | struct v4l2_m2m_dev *m2m_dev; |
876 | struct v4l2_m2m_queue_ctx *q_ctx; |
877 | unsigned long flags_job, flags; |
878 | int ret; |
879 | |
880 | /* wait until the current context is dequeued from job_queue */ |
881 | v4l2_m2m_cancel_job(m2m_ctx); |
882 | |
883 | q_ctx = get_queue_ctx(m2m_ctx, type); |
884 | ret = vb2_streamoff(q: &q_ctx->q, type); |
885 | if (ret) |
886 | return ret; |
887 | |
888 | m2m_dev = m2m_ctx->m2m_dev; |
889 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); |
890 | /* We should not be scheduled anymore, since we're dropping a queue. */ |
891 | if (m2m_ctx->job_flags & TRANS_QUEUED) |
892 | list_del(entry: &m2m_ctx->queue); |
893 | m2m_ctx->job_flags = 0; |
894 | |
895 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
896 | /* Drop queue, since streamoff returns device to the same state as after |
897 | * calling reqbufs. */ |
898 | INIT_LIST_HEAD(list: &q_ctx->rdy_queue); |
899 | q_ctx->num_rdy = 0; |
900 | spin_unlock_irqrestore(lock: &q_ctx->rdy_spinlock, flags); |
901 | |
902 | if (m2m_dev->curr_ctx == m2m_ctx) { |
903 | m2m_dev->curr_ctx = NULL; |
904 | wake_up(&m2m_ctx->finished); |
905 | } |
906 | spin_unlock_irqrestore(lock: &m2m_dev->job_spinlock, flags: flags_job); |
907 | |
908 | return 0; |
909 | } |
910 | EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); |
911 | |
912 | static __poll_t v4l2_m2m_poll_for_data(struct file *file, |
913 | struct v4l2_m2m_ctx *m2m_ctx, |
914 | struct poll_table_struct *wait) |
915 | { |
916 | struct vb2_queue *src_q, *dst_q; |
917 | __poll_t rc = 0; |
918 | unsigned long flags; |
919 | |
920 | src_q = v4l2_m2m_get_src_vq(m2m_ctx); |
921 | dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); |
922 | |
923 | /* |
924 | * There has to be at least one buffer queued on each queued_list, which |
925 | * means either in driver already or waiting for driver to claim it |
926 | * and start processing. |
927 | */ |
928 | if ((!vb2_is_streaming(q: src_q) || src_q->error || |
929 | list_empty(head: &src_q->queued_list)) && |
930 | (!vb2_is_streaming(q: dst_q) || dst_q->error || |
931 | (list_empty(head: &dst_q->queued_list) && !dst_q->last_buffer_dequeued))) |
932 | return EPOLLERR; |
933 | |
934 | spin_lock_irqsave(&src_q->done_lock, flags); |
935 | if (!list_empty(head: &src_q->done_list)) |
936 | rc |= EPOLLOUT | EPOLLWRNORM; |
937 | spin_unlock_irqrestore(lock: &src_q->done_lock, flags); |
938 | |
939 | spin_lock_irqsave(&dst_q->done_lock, flags); |
940 | /* |
941 | * If the last buffer was dequeued from the capture queue, signal |
942 | * userspace. DQBUF(CAPTURE) will return -EPIPE. |
943 | */ |
944 | if (!list_empty(head: &dst_q->done_list) || dst_q->last_buffer_dequeued) |
945 | rc |= EPOLLIN | EPOLLRDNORM; |
946 | spin_unlock_irqrestore(lock: &dst_q->done_lock, flags); |
947 | |
948 | return rc; |
949 | } |
950 | |
951 | __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
952 | struct poll_table_struct *wait) |
953 | { |
954 | struct video_device *vfd = video_devdata(file); |
955 | struct vb2_queue *src_q = v4l2_m2m_get_src_vq(m2m_ctx); |
956 | struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); |
957 | __poll_t req_events = poll_requested_events(p: wait); |
958 | __poll_t rc = 0; |
959 | |
960 | /* |
961 | * poll_wait() MUST be called on the first invocation on all the |
962 | * potential queues of interest, even if we are not interested in their |
963 | * events during this first call. Failure to do so will result in |
964 | * queue's events to be ignored because the poll_table won't be capable |
965 | * of adding new wait queues thereafter. |
966 | */ |
967 | poll_wait(filp: file, wait_address: &src_q->done_wq, p: wait); |
968 | poll_wait(filp: file, wait_address: &dst_q->done_wq, p: wait); |
969 | |
970 | if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)) |
971 | rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait); |
972 | |
973 | if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { |
974 | struct v4l2_fh *fh = file->private_data; |
975 | |
976 | poll_wait(filp: file, wait_address: &fh->wait, p: wait); |
977 | if (v4l2_event_pending(fh)) |
978 | rc |= EPOLLPRI; |
979 | } |
980 | |
981 | return rc; |
982 | } |
983 | EXPORT_SYMBOL_GPL(v4l2_m2m_poll); |
984 | |
985 | int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
986 | struct vm_area_struct *vma) |
987 | { |
988 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; |
989 | struct vb2_queue *vq; |
990 | |
991 | if (offset < DST_QUEUE_OFF_BASE) { |
992 | vq = v4l2_m2m_get_src_vq(m2m_ctx); |
993 | } else { |
994 | vq = v4l2_m2m_get_dst_vq(m2m_ctx); |
995 | vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); |
996 | } |
997 | |
998 | return vb2_mmap(q: vq, vma); |
999 | } |
1000 | EXPORT_SYMBOL(v4l2_m2m_mmap); |
1001 | |
1002 | #ifndef CONFIG_MMU |
1003 | unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr, |
1004 | unsigned long len, unsigned long pgoff, |
1005 | unsigned long flags) |
1006 | { |
1007 | struct v4l2_fh *fh = file->private_data; |
1008 | unsigned long offset = pgoff << PAGE_SHIFT; |
1009 | struct vb2_queue *vq; |
1010 | |
1011 | if (offset < DST_QUEUE_OFF_BASE) { |
1012 | vq = v4l2_m2m_get_src_vq(fh->m2m_ctx); |
1013 | } else { |
1014 | vq = v4l2_m2m_get_dst_vq(fh->m2m_ctx); |
1015 | pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); |
1016 | } |
1017 | |
1018 | return vb2_get_unmapped_area(vq, addr, len, pgoff, flags); |
1019 | } |
1020 | EXPORT_SYMBOL_GPL(v4l2_m2m_get_unmapped_area); |
1021 | #endif |
1022 | |
1023 | #if defined(CONFIG_MEDIA_CONTROLLER) |
1024 | void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) |
1025 | { |
1026 | media_remove_intf_links(intf: &m2m_dev->intf_devnode->intf); |
1027 | media_devnode_remove(devnode: m2m_dev->intf_devnode); |
1028 | |
1029 | media_entity_remove_links(entity: m2m_dev->source); |
1030 | media_entity_remove_links(entity: &m2m_dev->sink); |
1031 | media_entity_remove_links(entity: &m2m_dev->proc); |
1032 | media_device_unregister_entity(entity: m2m_dev->source); |
1033 | media_device_unregister_entity(entity: &m2m_dev->sink); |
1034 | media_device_unregister_entity(entity: &m2m_dev->proc); |
1035 | kfree(objp: m2m_dev->source->name); |
1036 | kfree(objp: m2m_dev->sink.name); |
1037 | kfree(objp: m2m_dev->proc.name); |
1038 | } |
1039 | EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller); |
1040 | |
1041 | static int v4l2_m2m_register_entity(struct media_device *mdev, |
1042 | struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type, |
1043 | struct video_device *vdev, int function) |
1044 | { |
1045 | struct media_entity *entity; |
1046 | struct media_pad *pads; |
1047 | char *name; |
1048 | unsigned int len; |
1049 | int num_pads; |
1050 | int ret; |
1051 | |
1052 | switch (type) { |
1053 | case MEM2MEM_ENT_TYPE_SOURCE: |
1054 | entity = m2m_dev->source; |
1055 | pads = &m2m_dev->source_pad; |
1056 | pads[0].flags = MEDIA_PAD_FL_SOURCE; |
1057 | num_pads = 1; |
1058 | break; |
1059 | case MEM2MEM_ENT_TYPE_SINK: |
1060 | entity = &m2m_dev->sink; |
1061 | pads = &m2m_dev->sink_pad; |
1062 | pads[0].flags = MEDIA_PAD_FL_SINK; |
1063 | num_pads = 1; |
1064 | break; |
1065 | case MEM2MEM_ENT_TYPE_PROC: |
1066 | entity = &m2m_dev->proc; |
1067 | pads = m2m_dev->proc_pads; |
1068 | pads[0].flags = MEDIA_PAD_FL_SINK; |
1069 | pads[1].flags = MEDIA_PAD_FL_SOURCE; |
1070 | num_pads = 2; |
1071 | break; |
1072 | default: |
1073 | return -EINVAL; |
1074 | } |
1075 | |
1076 | entity->obj_type = MEDIA_ENTITY_TYPE_BASE; |
1077 | if (type != MEM2MEM_ENT_TYPE_PROC) { |
1078 | entity->info.dev.major = VIDEO_MAJOR; |
1079 | entity->info.dev.minor = vdev->minor; |
1080 | } |
1081 | len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]); |
1082 | name = kmalloc(size: len, GFP_KERNEL); |
1083 | if (!name) |
1084 | return -ENOMEM; |
1085 | snprintf(buf: name, size: len, fmt: "%s-%s" , vdev->name, m2m_entity_name[type]); |
1086 | entity->name = name; |
1087 | entity->function = function; |
1088 | |
1089 | ret = media_entity_pads_init(entity, num_pads, pads); |
1090 | if (ret) { |
1091 | kfree(objp: entity->name); |
1092 | entity->name = NULL; |
1093 | return ret; |
1094 | } |
1095 | ret = media_device_register_entity(mdev, entity); |
1096 | if (ret) { |
1097 | kfree(objp: entity->name); |
1098 | entity->name = NULL; |
1099 | return ret; |
1100 | } |
1101 | |
1102 | return 0; |
1103 | } |
1104 | |
1105 | int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, |
1106 | struct video_device *vdev, int function) |
1107 | { |
1108 | struct media_device *mdev = vdev->v4l2_dev->mdev; |
1109 | struct media_link *link; |
1110 | int ret; |
1111 | |
1112 | if (!mdev) |
1113 | return 0; |
1114 | |
1115 | /* A memory-to-memory device consists in two |
1116 | * DMA engine and one video processing entities. |
1117 | * The DMA engine entities are linked to a V4L interface |
1118 | */ |
1119 | |
1120 | /* Create the three entities with their pads */ |
1121 | m2m_dev->source = &vdev->entity; |
1122 | ret = v4l2_m2m_register_entity(mdev, m2m_dev, |
1123 | type: MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L); |
1124 | if (ret) |
1125 | return ret; |
1126 | ret = v4l2_m2m_register_entity(mdev, m2m_dev, |
1127 | type: MEM2MEM_ENT_TYPE_PROC, vdev, function); |
1128 | if (ret) |
1129 | goto err_rel_entity0; |
1130 | ret = v4l2_m2m_register_entity(mdev, m2m_dev, |
1131 | type: MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L); |
1132 | if (ret) |
1133 | goto err_rel_entity1; |
1134 | |
1135 | /* Connect the three entities */ |
1136 | ret = media_create_pad_link(source: m2m_dev->source, source_pad: 0, sink: &m2m_dev->proc, sink_pad: 0, |
1137 | MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); |
1138 | if (ret) |
1139 | goto err_rel_entity2; |
1140 | |
1141 | ret = media_create_pad_link(source: &m2m_dev->proc, source_pad: 1, sink: &m2m_dev->sink, sink_pad: 0, |
1142 | MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); |
1143 | if (ret) |
1144 | goto err_rm_links0; |
1145 | |
1146 | /* Create video interface */ |
1147 | m2m_dev->intf_devnode = media_devnode_create(mdev, |
1148 | MEDIA_INTF_T_V4L_VIDEO, flags: 0, |
1149 | VIDEO_MAJOR, minor: vdev->minor); |
1150 | if (!m2m_dev->intf_devnode) { |
1151 | ret = -ENOMEM; |
1152 | goto err_rm_links1; |
1153 | } |
1154 | |
1155 | /* Connect the two DMA engines to the interface */ |
1156 | link = media_create_intf_link(entity: m2m_dev->source, |
1157 | intf: &m2m_dev->intf_devnode->intf, |
1158 | MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); |
1159 | if (!link) { |
1160 | ret = -ENOMEM; |
1161 | goto err_rm_devnode; |
1162 | } |
1163 | |
1164 | link = media_create_intf_link(entity: &m2m_dev->sink, |
1165 | intf: &m2m_dev->intf_devnode->intf, |
1166 | MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); |
1167 | if (!link) { |
1168 | ret = -ENOMEM; |
1169 | goto err_rm_intf_link; |
1170 | } |
1171 | return 0; |
1172 | |
1173 | err_rm_intf_link: |
1174 | media_remove_intf_links(intf: &m2m_dev->intf_devnode->intf); |
1175 | err_rm_devnode: |
1176 | media_devnode_remove(devnode: m2m_dev->intf_devnode); |
1177 | err_rm_links1: |
1178 | media_entity_remove_links(entity: &m2m_dev->sink); |
1179 | err_rm_links0: |
1180 | media_entity_remove_links(entity: &m2m_dev->proc); |
1181 | media_entity_remove_links(entity: m2m_dev->source); |
1182 | err_rel_entity2: |
1183 | media_device_unregister_entity(entity: &m2m_dev->proc); |
1184 | kfree(objp: m2m_dev->proc.name); |
1185 | err_rel_entity1: |
1186 | media_device_unregister_entity(entity: &m2m_dev->sink); |
1187 | kfree(objp: m2m_dev->sink.name); |
1188 | err_rel_entity0: |
1189 | media_device_unregister_entity(entity: m2m_dev->source); |
1190 | kfree(objp: m2m_dev->source->name); |
1191 | return ret; |
1192 | return 0; |
1193 | } |
1194 | EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller); |
1195 | #endif |
1196 | |
1197 | struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) |
1198 | { |
1199 | struct v4l2_m2m_dev *m2m_dev; |
1200 | |
1201 | if (!m2m_ops || WARN_ON(!m2m_ops->device_run)) |
1202 | return ERR_PTR(error: -EINVAL); |
1203 | |
1204 | m2m_dev = kzalloc(size: sizeof *m2m_dev, GFP_KERNEL); |
1205 | if (!m2m_dev) |
1206 | return ERR_PTR(error: -ENOMEM); |
1207 | |
1208 | m2m_dev->curr_ctx = NULL; |
1209 | m2m_dev->m2m_ops = m2m_ops; |
1210 | INIT_LIST_HEAD(list: &m2m_dev->job_queue); |
1211 | spin_lock_init(&m2m_dev->job_spinlock); |
1212 | INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work); |
1213 | |
1214 | return m2m_dev; |
1215 | } |
1216 | EXPORT_SYMBOL_GPL(v4l2_m2m_init); |
1217 | |
1218 | void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) |
1219 | { |
1220 | kfree(objp: m2m_dev); |
1221 | } |
1222 | EXPORT_SYMBOL_GPL(v4l2_m2m_release); |
1223 | |
1224 | struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, |
1225 | void *drv_priv, |
1226 | int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) |
1227 | { |
1228 | struct v4l2_m2m_ctx *m2m_ctx; |
1229 | struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; |
1230 | int ret; |
1231 | |
1232 | m2m_ctx = kzalloc(size: sizeof *m2m_ctx, GFP_KERNEL); |
1233 | if (!m2m_ctx) |
1234 | return ERR_PTR(error: -ENOMEM); |
1235 | |
1236 | m2m_ctx->priv = drv_priv; |
1237 | m2m_ctx->m2m_dev = m2m_dev; |
1238 | init_waitqueue_head(&m2m_ctx->finished); |
1239 | |
1240 | out_q_ctx = &m2m_ctx->out_q_ctx; |
1241 | cap_q_ctx = &m2m_ctx->cap_q_ctx; |
1242 | |
1243 | INIT_LIST_HEAD(list: &out_q_ctx->rdy_queue); |
1244 | INIT_LIST_HEAD(list: &cap_q_ctx->rdy_queue); |
1245 | spin_lock_init(&out_q_ctx->rdy_spinlock); |
1246 | spin_lock_init(&cap_q_ctx->rdy_spinlock); |
1247 | |
1248 | INIT_LIST_HEAD(list: &m2m_ctx->queue); |
1249 | |
1250 | ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); |
1251 | |
1252 | if (ret) |
1253 | goto err; |
1254 | /* |
1255 | * Both queues should use same the mutex to lock the m2m context. |
1256 | * This lock is used in some v4l2_m2m_* helpers. |
1257 | */ |
1258 | if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) { |
1259 | ret = -EINVAL; |
1260 | goto err; |
1261 | } |
1262 | m2m_ctx->q_lock = out_q_ctx->q.lock; |
1263 | |
1264 | return m2m_ctx; |
1265 | err: |
1266 | kfree(objp: m2m_ctx); |
1267 | return ERR_PTR(error: ret); |
1268 | } |
1269 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); |
1270 | |
1271 | void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) |
1272 | { |
1273 | /* wait until the current context is dequeued from job_queue */ |
1274 | v4l2_m2m_cancel_job(m2m_ctx); |
1275 | |
1276 | vb2_queue_release(q: &m2m_ctx->cap_q_ctx.q); |
1277 | vb2_queue_release(q: &m2m_ctx->out_q_ctx.q); |
1278 | |
1279 | kfree(objp: m2m_ctx); |
1280 | } |
1281 | EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); |
1282 | |
1283 | void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, |
1284 | struct vb2_v4l2_buffer *vbuf) |
1285 | { |
1286 | struct v4l2_m2m_buffer *b = container_of(vbuf, |
1287 | struct v4l2_m2m_buffer, vb); |
1288 | struct v4l2_m2m_queue_ctx *q_ctx; |
1289 | unsigned long flags; |
1290 | |
1291 | q_ctx = get_queue_ctx(m2m_ctx, type: vbuf->vb2_buf.vb2_queue->type); |
1292 | if (!q_ctx) |
1293 | return; |
1294 | |
1295 | spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); |
1296 | list_add_tail(new: &b->list, head: &q_ctx->rdy_queue); |
1297 | q_ctx->num_rdy++; |
1298 | spin_unlock_irqrestore(lock: &q_ctx->rdy_spinlock, flags); |
1299 | } |
1300 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); |
1301 | |
1302 | void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, |
1303 | struct vb2_v4l2_buffer *cap_vb, |
1304 | bool copy_frame_flags) |
1305 | { |
1306 | u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK; |
1307 | |
1308 | if (copy_frame_flags) |
1309 | mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | |
1310 | V4L2_BUF_FLAG_BFRAME; |
1311 | |
1312 | cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp; |
1313 | |
1314 | if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE) |
1315 | cap_vb->timecode = out_vb->timecode; |
1316 | cap_vb->field = out_vb->field; |
1317 | cap_vb->flags &= ~mask; |
1318 | cap_vb->flags |= out_vb->flags & mask; |
1319 | cap_vb->vb2_buf.copied_timestamp = 1; |
1320 | } |
1321 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata); |
1322 | |
1323 | void v4l2_m2m_request_queue(struct media_request *req) |
1324 | { |
1325 | struct media_request_object *obj, *obj_safe; |
1326 | struct v4l2_m2m_ctx *m2m_ctx = NULL; |
1327 | |
1328 | /* |
1329 | * Queue all objects. Note that buffer objects are at the end of the |
1330 | * objects list, after all other object types. Once buffer objects |
1331 | * are queued, the driver might delete them immediately (if the driver |
1332 | * processes the buffer at once), so we have to use |
1333 | * list_for_each_entry_safe() to handle the case where the object we |
1334 | * queue is deleted. |
1335 | */ |
1336 | list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { |
1337 | struct v4l2_m2m_ctx *m2m_ctx_obj; |
1338 | struct vb2_buffer *vb; |
1339 | |
1340 | if (!obj->ops->queue) |
1341 | continue; |
1342 | |
1343 | if (vb2_request_object_is_buffer(obj)) { |
1344 | /* Sanity checks */ |
1345 | vb = container_of(obj, struct vb2_buffer, req_obj); |
1346 | WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); |
1347 | m2m_ctx_obj = container_of(vb->vb2_queue, |
1348 | struct v4l2_m2m_ctx, |
1349 | out_q_ctx.q); |
1350 | WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx); |
1351 | m2m_ctx = m2m_ctx_obj; |
1352 | } |
1353 | |
1354 | /* |
1355 | * The buffer we queue here can in theory be immediately |
1356 | * unbound, hence the use of list_for_each_entry_safe() |
1357 | * above and why we call the queue op last. |
1358 | */ |
1359 | obj->ops->queue(obj); |
1360 | } |
1361 | |
1362 | WARN_ON(!m2m_ctx); |
1363 | |
1364 | if (m2m_ctx) |
1365 | v4l2_m2m_try_schedule(m2m_ctx); |
1366 | } |
1367 | EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue); |
1368 | |
1369 | /* Videobuf2 ioctl helpers */ |
1370 | |
1371 | int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, |
1372 | struct v4l2_requestbuffers *rb) |
1373 | { |
1374 | struct v4l2_fh *fh = file->private_data; |
1375 | |
1376 | return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); |
1377 | } |
1378 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); |
1379 | |
1380 | int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, |
1381 | struct v4l2_create_buffers *create) |
1382 | { |
1383 | struct v4l2_fh *fh = file->private_data; |
1384 | |
1385 | return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); |
1386 | } |
1387 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); |
1388 | |
1389 | int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, |
1390 | struct v4l2_buffer *buf) |
1391 | { |
1392 | struct v4l2_fh *fh = file->private_data; |
1393 | |
1394 | return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); |
1395 | } |
1396 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); |
1397 | |
1398 | int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, |
1399 | struct v4l2_buffer *buf) |
1400 | { |
1401 | struct v4l2_fh *fh = file->private_data; |
1402 | |
1403 | return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); |
1404 | } |
1405 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); |
1406 | |
1407 | int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, |
1408 | struct v4l2_buffer *buf) |
1409 | { |
1410 | struct v4l2_fh *fh = file->private_data; |
1411 | |
1412 | return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); |
1413 | } |
1414 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); |
1415 | |
1416 | int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, |
1417 | struct v4l2_buffer *buf) |
1418 | { |
1419 | struct v4l2_fh *fh = file->private_data; |
1420 | |
1421 | return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); |
1422 | } |
1423 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf); |
1424 | |
1425 | int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, |
1426 | struct v4l2_exportbuffer *eb) |
1427 | { |
1428 | struct v4l2_fh *fh = file->private_data; |
1429 | |
1430 | return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); |
1431 | } |
1432 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); |
1433 | |
1434 | int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, |
1435 | enum v4l2_buf_type type) |
1436 | { |
1437 | struct v4l2_fh *fh = file->private_data; |
1438 | |
1439 | return v4l2_m2m_streamon(file, fh->m2m_ctx, type); |
1440 | } |
1441 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); |
1442 | |
1443 | int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, |
1444 | enum v4l2_buf_type type) |
1445 | { |
1446 | struct v4l2_fh *fh = file->private_data; |
1447 | |
1448 | return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); |
1449 | } |
1450 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); |
1451 | |
1452 | int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh, |
1453 | struct v4l2_encoder_cmd *ec) |
1454 | { |
1455 | if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) |
1456 | return -EINVAL; |
1457 | |
1458 | ec->flags = 0; |
1459 | return 0; |
1460 | } |
1461 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd); |
1462 | |
1463 | int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh, |
1464 | struct v4l2_decoder_cmd *dc) |
1465 | { |
1466 | if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) |
1467 | return -EINVAL; |
1468 | |
1469 | dc->flags = 0; |
1470 | |
1471 | if (dc->cmd == V4L2_DEC_CMD_STOP) { |
1472 | dc->stop.pts = 0; |
1473 | } else if (dc->cmd == V4L2_DEC_CMD_START) { |
1474 | dc->start.speed = 0; |
1475 | dc->start.format = V4L2_DEC_START_FMT_NONE; |
1476 | } |
1477 | return 0; |
1478 | } |
1479 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd); |
1480 | |
1481 | /* |
1482 | * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START |
1483 | * Should be called from the encoder driver encoder_cmd() callback |
1484 | */ |
1485 | int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
1486 | struct v4l2_encoder_cmd *ec) |
1487 | { |
1488 | if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) |
1489 | return -EINVAL; |
1490 | |
1491 | if (ec->cmd == V4L2_ENC_CMD_STOP) |
1492 | return v4l2_update_last_buf_state(m2m_ctx); |
1493 | |
1494 | if (m2m_ctx->is_draining) |
1495 | return -EBUSY; |
1496 | |
1497 | if (m2m_ctx->has_stopped) |
1498 | m2m_ctx->has_stopped = false; |
1499 | |
1500 | return 0; |
1501 | } |
1502 | EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd); |
1503 | |
1504 | /* |
1505 | * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START |
1506 | * Should be called from the decoder driver decoder_cmd() callback |
1507 | */ |
1508 | int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, |
1509 | struct v4l2_decoder_cmd *dc) |
1510 | { |
1511 | if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) |
1512 | return -EINVAL; |
1513 | |
1514 | if (dc->cmd == V4L2_DEC_CMD_STOP) |
1515 | return v4l2_update_last_buf_state(m2m_ctx); |
1516 | |
1517 | if (m2m_ctx->is_draining) |
1518 | return -EBUSY; |
1519 | |
1520 | if (m2m_ctx->has_stopped) |
1521 | m2m_ctx->has_stopped = false; |
1522 | |
1523 | return 0; |
1524 | } |
1525 | EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd); |
1526 | |
1527 | int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv, |
1528 | struct v4l2_encoder_cmd *ec) |
1529 | { |
1530 | struct v4l2_fh *fh = file->private_data; |
1531 | |
1532 | return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec); |
1533 | } |
1534 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd); |
1535 | |
1536 | int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv, |
1537 | struct v4l2_decoder_cmd *dc) |
1538 | { |
1539 | struct v4l2_fh *fh = file->private_data; |
1540 | |
1541 | return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc); |
1542 | } |
1543 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd); |
1544 | |
1545 | int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh, |
1546 | struct v4l2_decoder_cmd *dc) |
1547 | { |
1548 | if (dc->cmd != V4L2_DEC_CMD_FLUSH) |
1549 | return -EINVAL; |
1550 | |
1551 | dc->flags = 0; |
1552 | |
1553 | return 0; |
1554 | } |
1555 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd); |
1556 | |
1557 | int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, |
1558 | struct v4l2_decoder_cmd *dc) |
1559 | { |
1560 | struct v4l2_fh *fh = file->private_data; |
1561 | struct vb2_v4l2_buffer *out_vb, *cap_vb; |
1562 | struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev; |
1563 | unsigned long flags; |
1564 | int ret; |
1565 | |
1566 | ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc); |
1567 | if (ret < 0) |
1568 | return ret; |
1569 | |
1570 | spin_lock_irqsave(&m2m_dev->job_spinlock, flags); |
1571 | out_vb = v4l2_m2m_last_src_buf(m2m_ctx: fh->m2m_ctx); |
1572 | cap_vb = v4l2_m2m_last_dst_buf(m2m_ctx: fh->m2m_ctx); |
1573 | |
1574 | /* |
1575 | * If there is an out buffer pending, then clear any HOLD flag. |
1576 | * |
1577 | * By clearing this flag we ensure that when this output |
1578 | * buffer is processed any held capture buffer will be released. |
1579 | */ |
1580 | if (out_vb) { |
1581 | out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; |
1582 | } else if (cap_vb && cap_vb->is_held) { |
1583 | /* |
1584 | * If there were no output buffers, but there is a |
1585 | * capture buffer that is held, then release that |
1586 | * buffer. |
1587 | */ |
1588 | cap_vb->is_held = false; |
1589 | v4l2_m2m_dst_buf_remove(m2m_ctx: fh->m2m_ctx); |
1590 | v4l2_m2m_buf_done(buf: cap_vb, state: VB2_BUF_STATE_DONE); |
1591 | } |
1592 | spin_unlock_irqrestore(lock: &m2m_dev->job_spinlock, flags); |
1593 | |
1594 | return 0; |
1595 | } |
1596 | EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd); |
1597 | |
1598 | /* |
1599 | * v4l2_file_operations helpers. It is assumed here same lock is used |
1600 | * for the output and the capture buffer queue. |
1601 | */ |
1602 | |
1603 | int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) |
1604 | { |
1605 | struct v4l2_fh *fh = file->private_data; |
1606 | |
1607 | return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); |
1608 | } |
1609 | EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); |
1610 | |
1611 | __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait) |
1612 | { |
1613 | struct v4l2_fh *fh = file->private_data; |
1614 | struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; |
1615 | __poll_t ret; |
1616 | |
1617 | if (m2m_ctx->q_lock) |
1618 | mutex_lock(m2m_ctx->q_lock); |
1619 | |
1620 | ret = v4l2_m2m_poll(file, m2m_ctx, wait); |
1621 | |
1622 | if (m2m_ctx->q_lock) |
1623 | mutex_unlock(lock: m2m_ctx->q_lock); |
1624 | |
1625 | return ret; |
1626 | } |
1627 | EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); |
1628 | |
1629 | |