1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2017 - 2018 Intel Corporation |
4 | * Copyright 2017 Google LLC |
5 | * |
6 | * Based on Intel IPU4 driver. |
7 | * |
8 | */ |
9 | |
10 | #include <linux/delay.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/module.h> |
13 | #include <linux/pm_runtime.h> |
14 | |
15 | #include "ipu3.h" |
16 | #include "ipu3-dmamap.h" |
17 | #include "ipu3-mmu.h" |
18 | |
19 | #define IMGU_PCI_ID 0x1919 |
20 | #define IMGU_PCI_BAR 0 |
21 | #define IMGU_DMA_MASK DMA_BIT_MASK(39) |
22 | #define IMGU_MAX_QUEUE_DEPTH (2 + 2) |
23 | |
24 | /* |
25 | * pre-allocated buffer size for IMGU dummy buffers. Those |
26 | * values should be tuned to big enough to avoid buffer |
27 | * re-allocation when streaming to lower streaming latency. |
28 | */ |
29 | #define CSS_QUEUE_IN_BUF_SIZE 0 |
30 | #define CSS_QUEUE_PARAMS_BUF_SIZE 0 |
31 | #define CSS_QUEUE_OUT_BUF_SIZE (4160 * 3120 * 12 / 8) |
32 | #define CSS_QUEUE_VF_BUF_SIZE (1920 * 1080 * 12 / 8) |
33 | #define CSS_QUEUE_STAT_3A_BUF_SIZE sizeof(struct ipu3_uapi_stats_3a) |
34 | |
35 | static const size_t css_queue_buf_size_map[IPU3_CSS_QUEUES] = { |
36 | [IPU3_CSS_QUEUE_IN] = CSS_QUEUE_IN_BUF_SIZE, |
37 | [IPU3_CSS_QUEUE_PARAMS] = CSS_QUEUE_PARAMS_BUF_SIZE, |
38 | [IPU3_CSS_QUEUE_OUT] = CSS_QUEUE_OUT_BUF_SIZE, |
39 | [IPU3_CSS_QUEUE_VF] = CSS_QUEUE_VF_BUF_SIZE, |
40 | [IPU3_CSS_QUEUE_STAT_3A] = CSS_QUEUE_STAT_3A_BUF_SIZE, |
41 | }; |
42 | |
43 | static const struct imgu_node_mapping imgu_node_map[IMGU_NODE_NUM] = { |
44 | [IMGU_NODE_IN] = {IPU3_CSS_QUEUE_IN, "input" }, |
45 | [IMGU_NODE_PARAMS] = {IPU3_CSS_QUEUE_PARAMS, "parameters" }, |
46 | [IMGU_NODE_OUT] = {IPU3_CSS_QUEUE_OUT, "output" }, |
47 | [IMGU_NODE_VF] = {IPU3_CSS_QUEUE_VF, "viewfinder" }, |
48 | [IMGU_NODE_STAT_3A] = {IPU3_CSS_QUEUE_STAT_3A, "3a stat" }, |
49 | }; |
50 | |
51 | unsigned int imgu_node_to_queue(unsigned int node) |
52 | { |
53 | return imgu_node_map[node].css_queue; |
54 | } |
55 | |
56 | unsigned int imgu_map_node(struct imgu_device *imgu, unsigned int css_queue) |
57 | { |
58 | unsigned int i; |
59 | |
60 | for (i = 0; i < IMGU_NODE_NUM; i++) |
61 | if (imgu_node_map[i].css_queue == css_queue) |
62 | break; |
63 | |
64 | return i; |
65 | } |
66 | |
67 | /**************** Dummy buffers ****************/ |
68 | |
69 | static void imgu_dummybufs_cleanup(struct imgu_device *imgu, unsigned int pipe) |
70 | { |
71 | unsigned int i; |
72 | struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; |
73 | |
74 | for (i = 0; i < IPU3_CSS_QUEUES; i++) |
75 | imgu_dmamap_free(imgu, |
76 | map: &imgu_pipe->queues[i].dmap); |
77 | } |
78 | |
79 | static int imgu_dummybufs_preallocate(struct imgu_device *imgu, |
80 | unsigned int pipe) |
81 | { |
82 | unsigned int i; |
83 | size_t size; |
84 | struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; |
85 | |
86 | for (i = 0; i < IPU3_CSS_QUEUES; i++) { |
87 | size = css_queue_buf_size_map[i]; |
88 | /* |
89 | * Do not enable dummy buffers for master queue, |
90 | * always require that real buffers from user are |
91 | * available. |
92 | */ |
93 | if (i == IMGU_QUEUE_MASTER || size == 0) |
94 | continue; |
95 | |
96 | if (!imgu_dmamap_alloc(imgu, |
97 | map: &imgu_pipe->queues[i].dmap, len: size)) { |
98 | imgu_dummybufs_cleanup(imgu, pipe); |
99 | return -ENOMEM; |
100 | } |
101 | } |
102 | |
103 | return 0; |
104 | } |
105 | |
106 | static int imgu_dummybufs_init(struct imgu_device *imgu, unsigned int pipe) |
107 | { |
108 | const struct v4l2_pix_format_mplane *mpix; |
109 | const struct v4l2_meta_format *meta; |
110 | unsigned int i, k, node; |
111 | size_t size; |
112 | struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; |
113 | |
114 | /* Allocate a dummy buffer for each queue where buffer is optional */ |
115 | for (i = 0; i < IPU3_CSS_QUEUES; i++) { |
116 | node = imgu_map_node(imgu, css_queue: i); |
117 | if (!imgu_pipe->queue_enabled[node] || i == IMGU_QUEUE_MASTER) |
118 | continue; |
119 | |
120 | if (!imgu_pipe->nodes[IMGU_NODE_VF].enabled && |
121 | i == IPU3_CSS_QUEUE_VF) |
122 | /* |
123 | * Do not enable dummy buffers for VF if it is not |
124 | * requested by the user. |
125 | */ |
126 | continue; |
127 | |
128 | meta = &imgu_pipe->nodes[node].vdev_fmt.fmt.meta; |
129 | mpix = &imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp; |
130 | |
131 | if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS) |
132 | size = meta->buffersize; |
133 | else |
134 | size = mpix->plane_fmt[0].sizeimage; |
135 | |
136 | if (imgu_css_dma_buffer_resize(imgu, |
137 | map: &imgu_pipe->queues[i].dmap, |
138 | size)) { |
139 | imgu_dummybufs_cleanup(imgu, pipe); |
140 | return -ENOMEM; |
141 | } |
142 | |
143 | for (k = 0; k < IMGU_MAX_QUEUE_DEPTH; k++) |
144 | imgu_css_buf_init(b: &imgu_pipe->queues[i].dummybufs[k], queue: i, |
145 | daddr: imgu_pipe->queues[i].dmap.daddr); |
146 | } |
147 | |
148 | return 0; |
149 | } |
150 | |
151 | /* May be called from atomic context */ |
152 | static struct imgu_css_buffer *imgu_dummybufs_get(struct imgu_device *imgu, |
153 | int queue, unsigned int pipe) |
154 | { |
155 | unsigned int i; |
156 | struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; |
157 | |
158 | /* dummybufs are not allocated for master q */ |
159 | if (queue == IPU3_CSS_QUEUE_IN) |
160 | return NULL; |
161 | |
162 | if (WARN_ON(!imgu_pipe->queues[queue].dmap.vaddr)) |
163 | /* Buffer should not be allocated here */ |
164 | return NULL; |
165 | |
166 | for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++) |
167 | if (imgu_css_buf_state(b: &imgu_pipe->queues[queue].dummybufs[i]) != |
168 | IPU3_CSS_BUFFER_QUEUED) |
169 | break; |
170 | |
171 | if (i == IMGU_MAX_QUEUE_DEPTH) |
172 | return NULL; |
173 | |
174 | imgu_css_buf_init(b: &imgu_pipe->queues[queue].dummybufs[i], queue, |
175 | daddr: imgu_pipe->queues[queue].dmap.daddr); |
176 | |
177 | return &imgu_pipe->queues[queue].dummybufs[i]; |
178 | } |
179 | |
180 | /* Check if given buffer is a dummy buffer */ |
181 | static bool imgu_dummybufs_check(struct imgu_device *imgu, |
182 | struct imgu_css_buffer *buf, |
183 | unsigned int pipe) |
184 | { |
185 | unsigned int i; |
186 | struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; |
187 | |
188 | for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++) |
189 | if (buf == &imgu_pipe->queues[buf->queue].dummybufs[i]) |
190 | break; |
191 | |
192 | return i < IMGU_MAX_QUEUE_DEPTH; |
193 | } |
194 | |
195 | static void imgu_buffer_done(struct imgu_device *imgu, struct vb2_buffer *vb, |
196 | enum vb2_buffer_state state) |
197 | { |
198 | mutex_lock(&imgu->lock); |
199 | imgu_v4l2_buffer_done(vb, state); |
200 | mutex_unlock(lock: &imgu->lock); |
201 | } |
202 | |
203 | static struct imgu_css_buffer *imgu_queue_getbuf(struct imgu_device *imgu, |
204 | unsigned int node, |
205 | unsigned int pipe) |
206 | { |
207 | struct imgu_buffer *buf; |
208 | struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; |
209 | |
210 | if (WARN_ON(node >= IMGU_NODE_NUM)) |
211 | return NULL; |
212 | |
213 | /* Find first free buffer from the node */ |
214 | list_for_each_entry(buf, &imgu_pipe->nodes[node].buffers, vid_buf.list) { |
215 | if (imgu_css_buf_state(b: &buf->css_buf) == IPU3_CSS_BUFFER_NEW) |
216 | return &buf->css_buf; |
217 | } |
218 | |
219 | /* There were no free buffers, try to return a dummy buffer */ |
220 | return imgu_dummybufs_get(imgu, queue: imgu_node_map[node].css_queue, pipe); |
221 | } |
222 | |
223 | /* |
224 | * Queue as many buffers to CSS as possible. If all buffers don't fit into |
225 | * CSS buffer queues, they remain unqueued and will be queued later. |
226 | */ |
227 | int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe) |
228 | { |
229 | unsigned int node; |
230 | int r = 0; |
231 | struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; |
232 | |
233 | if (!imgu_css_is_streaming(css: &imgu->css)) |
234 | return 0; |
235 | |
236 | dev_dbg(&imgu->pci_dev->dev, "Queue buffers to pipe %d" , pipe); |
237 | mutex_lock(&imgu->lock); |
238 | |
239 | if (!imgu_css_pipe_queue_empty(css: &imgu->css, pipe)) { |
240 | mutex_unlock(lock: &imgu->lock); |
241 | return 0; |
242 | } |
243 | |
244 | /* Buffer set is queued to FW only when input buffer is ready */ |
245 | for (node = IMGU_NODE_NUM - 1; |
246 | imgu_queue_getbuf(imgu, IMGU_NODE_IN, pipe); |
247 | node = node ? node - 1 : IMGU_NODE_NUM - 1) { |
248 | if (node == IMGU_NODE_VF && |
249 | !imgu_pipe->nodes[IMGU_NODE_VF].enabled) { |
250 | dev_warn(&imgu->pci_dev->dev, |
251 | "Vf not enabled, ignore queue" ); |
252 | continue; |
253 | } else if (node == IMGU_NODE_PARAMS && |
254 | imgu_pipe->nodes[node].enabled) { |
255 | struct vb2_buffer *vb; |
256 | struct imgu_vb2_buffer *ivb; |
257 | |
258 | /* No parameters for this frame */ |
259 | if (list_empty(head: &imgu_pipe->nodes[node].buffers)) |
260 | continue; |
261 | |
262 | ivb = list_first_entry(&imgu_pipe->nodes[node].buffers, |
263 | struct imgu_vb2_buffer, list); |
264 | list_del(entry: &ivb->list); |
265 | vb = &ivb->vbb.vb2_buf; |
266 | r = imgu_css_set_parameters(css: &imgu->css, pipe, |
267 | set_params: vb2_plane_vaddr(vb, plane_no: 0)); |
268 | if (r) { |
269 | vb2_buffer_done(vb, state: VB2_BUF_STATE_ERROR); |
270 | dev_warn(&imgu->pci_dev->dev, |
271 | "set parameters failed." ); |
272 | continue; |
273 | } |
274 | |
275 | vb2_buffer_done(vb, state: VB2_BUF_STATE_DONE); |
276 | dev_dbg(&imgu->pci_dev->dev, |
277 | "queue user parameters %d to css." , vb->index); |
278 | } else if (imgu_pipe->queue_enabled[node]) { |
279 | struct imgu_css_buffer *buf = |
280 | imgu_queue_getbuf(imgu, node, pipe); |
281 | struct imgu_buffer *ibuf = NULL; |
282 | bool dummy; |
283 | |
284 | if (!buf) |
285 | break; |
286 | |
287 | r = imgu_css_buf_queue(css: &imgu->css, pipe, b: buf); |
288 | if (r) |
289 | break; |
290 | dummy = imgu_dummybufs_check(imgu, buf, pipe); |
291 | if (!dummy) |
292 | ibuf = container_of(buf, struct imgu_buffer, |
293 | css_buf); |
294 | dev_dbg(&imgu->pci_dev->dev, |
295 | "queue %s %s buffer %u to css da: 0x%08x\n" , |
296 | dummy ? "dummy" : "user" , |
297 | imgu_node_map[node].name, |
298 | dummy ? 0 : ibuf->vid_buf.vbb.vb2_buf.index, |
299 | (u32)buf->daddr); |
300 | } |
301 | } |
302 | mutex_unlock(lock: &imgu->lock); |
303 | |
304 | if (r && r != -EBUSY) |
305 | goto failed; |
306 | |
307 | return 0; |
308 | |
309 | failed: |
310 | /* |
311 | * On error, mark all buffers as failed which are not |
312 | * yet queued to CSS |
313 | */ |
314 | dev_err(&imgu->pci_dev->dev, |
315 | "failed to queue buffer to CSS on queue %i (%d)\n" , |
316 | node, r); |
317 | |
318 | if (initial) |
319 | /* If we were called from streamon(), no need to finish bufs */ |
320 | return r; |
321 | |
322 | for (node = 0; node < IMGU_NODE_NUM; node++) { |
323 | struct imgu_buffer *buf, *buf0; |
324 | |
325 | if (!imgu_pipe->queue_enabled[node]) |
326 | continue; /* Skip disabled queues */ |
327 | |
328 | mutex_lock(&imgu->lock); |
329 | list_for_each_entry_safe(buf, buf0, |
330 | &imgu_pipe->nodes[node].buffers, |
331 | vid_buf.list) { |
332 | if (imgu_css_buf_state(b: &buf->css_buf) == |
333 | IPU3_CSS_BUFFER_QUEUED) |
334 | continue; /* Was already queued, skip */ |
335 | |
336 | imgu_v4l2_buffer_done(vb: &buf->vid_buf.vbb.vb2_buf, |
337 | state: VB2_BUF_STATE_ERROR); |
338 | } |
339 | mutex_unlock(lock: &imgu->lock); |
340 | } |
341 | |
342 | return r; |
343 | } |
344 | |
345 | static int imgu_powerup(struct imgu_device *imgu) |
346 | { |
347 | int r; |
348 | unsigned int pipe; |
349 | unsigned int freq = 200; |
350 | struct v4l2_mbus_framefmt *fmt; |
351 | |
352 | /* input larger than 2048*1152, ask imgu to work on high freq */ |
353 | for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) { |
354 | fmt = &imgu->imgu_pipe[pipe].nodes[IMGU_NODE_IN].pad_fmt; |
355 | dev_dbg(&imgu->pci_dev->dev, "pipe %u input format = %ux%u" , |
356 | pipe, fmt->width, fmt->height); |
357 | if ((fmt->width * fmt->height) >= (2048 * 1152)) |
358 | freq = 450; |
359 | } |
360 | |
361 | r = imgu_css_set_powerup(dev: &imgu->pci_dev->dev, base: imgu->base, freq); |
362 | if (r) |
363 | return r; |
364 | |
365 | imgu_mmu_resume(info: imgu->mmu); |
366 | return 0; |
367 | } |
368 | |
369 | static void imgu_powerdown(struct imgu_device *imgu) |
370 | { |
371 | imgu_mmu_suspend(info: imgu->mmu); |
372 | imgu_css_set_powerdown(dev: &imgu->pci_dev->dev, base: imgu->base); |
373 | } |
374 | |
375 | int imgu_s_stream(struct imgu_device *imgu, int enable) |
376 | { |
377 | struct device *dev = &imgu->pci_dev->dev; |
378 | int r, pipe; |
379 | |
380 | if (!enable) { |
381 | /* Stop streaming */ |
382 | dev_dbg(dev, "stream off\n" ); |
383 | /* Block new buffers to be queued to CSS. */ |
384 | atomic_set(v: &imgu->qbuf_barrier, i: 1); |
385 | imgu_css_stop_streaming(css: &imgu->css); |
386 | synchronize_irq(irq: imgu->pci_dev->irq); |
387 | atomic_set(v: &imgu->qbuf_barrier, i: 0); |
388 | imgu_powerdown(imgu); |
389 | pm_runtime_put(dev: &imgu->pci_dev->dev); |
390 | |
391 | return 0; |
392 | } |
393 | |
394 | /* Set Power */ |
395 | r = pm_runtime_resume_and_get(dev); |
396 | if (r < 0) { |
397 | dev_err(dev, "failed to set imgu power\n" ); |
398 | return r; |
399 | } |
400 | |
401 | r = imgu_powerup(imgu); |
402 | if (r) { |
403 | dev_err(dev, "failed to power up imgu\n" ); |
404 | pm_runtime_put(dev); |
405 | return r; |
406 | } |
407 | |
408 | /* Start CSS streaming */ |
409 | r = imgu_css_start_streaming(css: &imgu->css); |
410 | if (r) { |
411 | dev_err(dev, "failed to start css streaming (%d)" , r); |
412 | goto fail_start_streaming; |
413 | } |
414 | |
415 | for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) { |
416 | /* Initialize dummy buffers */ |
417 | r = imgu_dummybufs_init(imgu, pipe); |
418 | if (r) { |
419 | dev_err(dev, "failed to initialize dummy buffers (%d)" , r); |
420 | goto fail_dummybufs; |
421 | } |
422 | |
423 | /* Queue as many buffers from queue as possible */ |
424 | r = imgu_queue_buffers(imgu, initial: true, pipe); |
425 | if (r) { |
426 | dev_err(dev, "failed to queue initial buffers (%d)" , r); |
427 | goto fail_queueing; |
428 | } |
429 | } |
430 | |
431 | return 0; |
432 | fail_queueing: |
433 | for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) |
434 | imgu_dummybufs_cleanup(imgu, pipe); |
435 | fail_dummybufs: |
436 | imgu_css_stop_streaming(css: &imgu->css); |
437 | fail_start_streaming: |
438 | pm_runtime_put(dev); |
439 | |
440 | return r; |
441 | } |
442 | |
443 | static void imgu_video_nodes_exit(struct imgu_device *imgu) |
444 | { |
445 | int i; |
446 | |
447 | for (i = 0; i < IMGU_MAX_PIPE_NUM; i++) |
448 | imgu_dummybufs_cleanup(imgu, pipe: i); |
449 | |
450 | imgu_v4l2_unregister(dev: imgu); |
451 | } |
452 | |
453 | static int imgu_video_nodes_init(struct imgu_device *imgu) |
454 | { |
455 | struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL }; |
456 | struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL }; |
457 | struct imgu_media_pipe *imgu_pipe; |
458 | unsigned int i, j; |
459 | int r; |
460 | |
461 | imgu->buf_struct_size = sizeof(struct imgu_buffer); |
462 | |
463 | for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) { |
464 | imgu_pipe = &imgu->imgu_pipe[j]; |
465 | |
466 | for (i = 0; i < IMGU_NODE_NUM; i++) { |
467 | imgu_pipe->nodes[i].name = imgu_node_map[i].name; |
468 | imgu_pipe->nodes[i].output = i < IMGU_QUEUE_FIRST_INPUT; |
469 | imgu_pipe->nodes[i].enabled = false; |
470 | |
471 | if (i != IMGU_NODE_PARAMS && i != IMGU_NODE_STAT_3A) |
472 | fmts[imgu_node_map[i].css_queue] = |
473 | &imgu_pipe->nodes[i].vdev_fmt.fmt.pix_mp; |
474 | atomic_set(v: &imgu_pipe->nodes[i].sequence, i: 0); |
475 | } |
476 | } |
477 | |
478 | r = imgu_v4l2_register(dev: imgu); |
479 | if (r) |
480 | return r; |
481 | |
482 | /* Set initial formats and initialize formats of video nodes */ |
483 | for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) { |
484 | imgu_pipe = &imgu->imgu_pipe[j]; |
485 | |
486 | rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_pipe->imgu_sd.rect.eff; |
487 | rects[IPU3_CSS_RECT_BDS] = &imgu_pipe->imgu_sd.rect.bds; |
488 | imgu_css_fmt_set(css: &imgu->css, fmts, rects, pipe: j); |
489 | |
490 | /* Pre-allocate dummy buffers */ |
491 | r = imgu_dummybufs_preallocate(imgu, pipe: j); |
492 | if (r) { |
493 | dev_err(&imgu->pci_dev->dev, |
494 | "failed to pre-allocate dummy buffers (%d)" , r); |
495 | goto out_cleanup; |
496 | } |
497 | } |
498 | |
499 | return 0; |
500 | |
501 | out_cleanup: |
502 | imgu_video_nodes_exit(imgu); |
503 | |
504 | return r; |
505 | } |
506 | |
507 | /**************** PCI interface ****************/ |
508 | |
509 | static irqreturn_t imgu_isr_threaded(int irq, void *imgu_ptr) |
510 | { |
511 | struct imgu_device *imgu = imgu_ptr; |
512 | struct imgu_media_pipe *imgu_pipe; |
513 | int p; |
514 | |
515 | /* Dequeue / queue buffers */ |
516 | do { |
517 | u64 ns = ktime_get_ns(); |
518 | struct imgu_css_buffer *b; |
519 | struct imgu_buffer *buf = NULL; |
520 | unsigned int node, pipe; |
521 | bool dummy; |
522 | |
523 | do { |
524 | mutex_lock(&imgu->lock); |
525 | b = imgu_css_buf_dequeue(css: &imgu->css); |
526 | mutex_unlock(lock: &imgu->lock); |
527 | } while (PTR_ERR(ptr: b) == -EAGAIN); |
528 | |
529 | if (IS_ERR(ptr: b)) { |
530 | if (PTR_ERR(ptr: b) != -EBUSY) /* All done */ |
531 | dev_err(&imgu->pci_dev->dev, |
532 | "failed to dequeue buffers (%ld)\n" , |
533 | PTR_ERR(b)); |
534 | break; |
535 | } |
536 | |
537 | node = imgu_map_node(imgu, css_queue: b->queue); |
538 | pipe = b->pipe; |
539 | dummy = imgu_dummybufs_check(imgu, buf: b, pipe); |
540 | if (!dummy) |
541 | buf = container_of(b, struct imgu_buffer, css_buf); |
542 | dev_dbg(&imgu->pci_dev->dev, |
543 | "dequeue %s %s buffer %d daddr 0x%x from css\n" , |
544 | dummy ? "dummy" : "user" , |
545 | imgu_node_map[node].name, |
546 | dummy ? 0 : buf->vid_buf.vbb.vb2_buf.index, |
547 | (u32)b->daddr); |
548 | |
549 | if (dummy) |
550 | /* It was a dummy buffer, skip it */ |
551 | continue; |
552 | |
553 | /* Fill vb2 buffer entries and tell it's ready */ |
554 | imgu_pipe = &imgu->imgu_pipe[pipe]; |
555 | if (!imgu_pipe->nodes[node].output) { |
556 | buf->vid_buf.vbb.vb2_buf.timestamp = ns; |
557 | buf->vid_buf.vbb.field = V4L2_FIELD_NONE; |
558 | buf->vid_buf.vbb.sequence = |
559 | atomic_inc_return( |
560 | v: &imgu_pipe->nodes[node].sequence); |
561 | dev_dbg(&imgu->pci_dev->dev, "vb2 buffer sequence %d" , |
562 | buf->vid_buf.vbb.sequence); |
563 | } |
564 | imgu_buffer_done(imgu, vb: &buf->vid_buf.vbb.vb2_buf, |
565 | state: imgu_css_buf_state(b: &buf->css_buf) == |
566 | IPU3_CSS_BUFFER_DONE ? |
567 | VB2_BUF_STATE_DONE : |
568 | VB2_BUF_STATE_ERROR); |
569 | mutex_lock(&imgu->lock); |
570 | if (imgu_css_queue_empty(css: &imgu->css)) |
571 | wake_up_all(&imgu->buf_drain_wq); |
572 | mutex_unlock(lock: &imgu->lock); |
573 | } while (1); |
574 | |
575 | /* |
576 | * Try to queue more buffers for CSS. |
577 | * qbuf_barrier is used to disable new buffers |
578 | * to be queued to CSS. |
579 | */ |
580 | if (!atomic_read(v: &imgu->qbuf_barrier)) |
581 | for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) |
582 | imgu_queue_buffers(imgu, initial: false, pipe: p); |
583 | |
584 | return IRQ_HANDLED; |
585 | } |
586 | |
587 | static irqreturn_t imgu_isr(int irq, void *imgu_ptr) |
588 | { |
589 | struct imgu_device *imgu = imgu_ptr; |
590 | |
591 | /* acknowledge interruption */ |
592 | if (imgu_css_irq_ack(css: &imgu->css) < 0) |
593 | return IRQ_NONE; |
594 | |
595 | return IRQ_WAKE_THREAD; |
596 | } |
597 | |
598 | static int imgu_pci_config_setup(struct pci_dev *dev) |
599 | { |
600 | u16 pci_command; |
601 | int r = pci_enable_msi(dev); |
602 | |
603 | if (r) { |
604 | dev_err(&dev->dev, "failed to enable MSI (%d)\n" , r); |
605 | return r; |
606 | } |
607 | |
608 | pci_read_config_word(dev, PCI_COMMAND, val: &pci_command); |
609 | pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | |
610 | PCI_COMMAND_INTX_DISABLE; |
611 | pci_write_config_word(dev, PCI_COMMAND, val: pci_command); |
612 | |
613 | return 0; |
614 | } |
615 | |
616 | static int imgu_pci_probe(struct pci_dev *pci_dev, |
617 | const struct pci_device_id *id) |
618 | { |
619 | struct imgu_device *imgu; |
620 | phys_addr_t phys; |
621 | unsigned long phys_len; |
622 | void __iomem *const *iomap; |
623 | int r; |
624 | |
625 | imgu = devm_kzalloc(dev: &pci_dev->dev, size: sizeof(*imgu), GFP_KERNEL); |
626 | if (!imgu) |
627 | return -ENOMEM; |
628 | |
629 | imgu->pci_dev = pci_dev; |
630 | |
631 | r = pcim_enable_device(pdev: pci_dev); |
632 | if (r) { |
633 | dev_err(&pci_dev->dev, "failed to enable device (%d)\n" , r); |
634 | return r; |
635 | } |
636 | |
637 | dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n" , |
638 | pci_dev->device, pci_dev->revision); |
639 | |
640 | phys = pci_resource_start(pci_dev, IMGU_PCI_BAR); |
641 | phys_len = pci_resource_len(pci_dev, IMGU_PCI_BAR); |
642 | |
643 | r = pcim_iomap_regions(pdev: pci_dev, mask: 1 << IMGU_PCI_BAR, name: pci_name(pdev: pci_dev)); |
644 | if (r) { |
645 | dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n" , r); |
646 | return r; |
647 | } |
648 | dev_info(&pci_dev->dev, "physical base address %pap, %lu bytes\n" , |
649 | &phys, phys_len); |
650 | |
651 | iomap = pcim_iomap_table(pdev: pci_dev); |
652 | if (!iomap) { |
653 | dev_err(&pci_dev->dev, "failed to iomap table\n" ); |
654 | return -ENODEV; |
655 | } |
656 | |
657 | imgu->base = iomap[IMGU_PCI_BAR]; |
658 | |
659 | pci_set_drvdata(pdev: pci_dev, data: imgu); |
660 | |
661 | pci_set_master(dev: pci_dev); |
662 | |
663 | r = dma_coerce_mask_and_coherent(dev: &pci_dev->dev, IMGU_DMA_MASK); |
664 | if (r) { |
665 | dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n" , r); |
666 | return -ENODEV; |
667 | } |
668 | |
669 | r = imgu_pci_config_setup(dev: pci_dev); |
670 | if (r) |
671 | return r; |
672 | |
673 | mutex_init(&imgu->lock); |
674 | mutex_init(&imgu->streaming_lock); |
675 | atomic_set(v: &imgu->qbuf_barrier, i: 0); |
676 | init_waitqueue_head(&imgu->buf_drain_wq); |
677 | |
678 | r = imgu_css_set_powerup(dev: &pci_dev->dev, base: imgu->base, freq: 200); |
679 | if (r) { |
680 | dev_err(&pci_dev->dev, |
681 | "failed to power up CSS (%d)\n" , r); |
682 | goto out_mutex_destroy; |
683 | } |
684 | |
685 | imgu->mmu = imgu_mmu_init(parent: &pci_dev->dev, base: imgu->base); |
686 | if (IS_ERR(ptr: imgu->mmu)) { |
687 | r = PTR_ERR(ptr: imgu->mmu); |
688 | dev_err(&pci_dev->dev, "failed to initialize MMU (%d)\n" , r); |
689 | goto out_css_powerdown; |
690 | } |
691 | |
692 | r = imgu_dmamap_init(imgu); |
693 | if (r) { |
694 | dev_err(&pci_dev->dev, |
695 | "failed to initialize DMA mapping (%d)\n" , r); |
696 | goto out_mmu_exit; |
697 | } |
698 | |
699 | /* ISP programming */ |
700 | r = imgu_css_init(dev: &pci_dev->dev, css: &imgu->css, base: imgu->base, length: phys_len); |
701 | if (r) { |
702 | dev_err(&pci_dev->dev, "failed to initialize CSS (%d)\n" , r); |
703 | goto out_dmamap_exit; |
704 | } |
705 | |
706 | /* v4l2 sub-device registration */ |
707 | r = imgu_video_nodes_init(imgu); |
708 | if (r) { |
709 | dev_err(&pci_dev->dev, "failed to create V4L2 devices (%d)\n" , |
710 | r); |
711 | goto out_css_cleanup; |
712 | } |
713 | |
714 | r = devm_request_threaded_irq(dev: &pci_dev->dev, irq: pci_dev->irq, |
715 | handler: imgu_isr, thread_fn: imgu_isr_threaded, |
716 | IRQF_SHARED, IMGU_NAME, dev_id: imgu); |
717 | if (r) { |
718 | dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n" , r); |
719 | goto out_video_exit; |
720 | } |
721 | |
722 | pm_runtime_put_noidle(dev: &pci_dev->dev); |
723 | pm_runtime_allow(dev: &pci_dev->dev); |
724 | |
725 | return 0; |
726 | |
727 | out_video_exit: |
728 | imgu_video_nodes_exit(imgu); |
729 | out_css_cleanup: |
730 | imgu_css_cleanup(css: &imgu->css); |
731 | out_dmamap_exit: |
732 | imgu_dmamap_exit(imgu); |
733 | out_mmu_exit: |
734 | imgu_mmu_exit(info: imgu->mmu); |
735 | out_css_powerdown: |
736 | imgu_css_set_powerdown(dev: &pci_dev->dev, base: imgu->base); |
737 | out_mutex_destroy: |
738 | mutex_destroy(lock: &imgu->streaming_lock); |
739 | mutex_destroy(lock: &imgu->lock); |
740 | |
741 | return r; |
742 | } |
743 | |
744 | static void imgu_pci_remove(struct pci_dev *pci_dev) |
745 | { |
746 | struct imgu_device *imgu = pci_get_drvdata(pdev: pci_dev); |
747 | |
748 | pm_runtime_forbid(dev: &pci_dev->dev); |
749 | pm_runtime_get_noresume(dev: &pci_dev->dev); |
750 | |
751 | imgu_video_nodes_exit(imgu); |
752 | imgu_css_cleanup(css: &imgu->css); |
753 | imgu_css_set_powerdown(dev: &pci_dev->dev, base: imgu->base); |
754 | imgu_dmamap_exit(imgu); |
755 | imgu_mmu_exit(info: imgu->mmu); |
756 | mutex_destroy(lock: &imgu->streaming_lock); |
757 | mutex_destroy(lock: &imgu->lock); |
758 | } |
759 | |
760 | static int __maybe_unused imgu_suspend(struct device *dev) |
761 | { |
762 | struct pci_dev *pci_dev = to_pci_dev(dev); |
763 | struct imgu_device *imgu = pci_get_drvdata(pdev: pci_dev); |
764 | |
765 | imgu->suspend_in_stream = imgu_css_is_streaming(css: &imgu->css); |
766 | if (!imgu->suspend_in_stream) |
767 | goto out; |
768 | /* Block new buffers to be queued to CSS. */ |
769 | atomic_set(v: &imgu->qbuf_barrier, i: 1); |
770 | /* |
771 | * Wait for currently running irq handler to be done so that |
772 | * no new buffers will be queued to fw later. |
773 | */ |
774 | synchronize_irq(irq: pci_dev->irq); |
775 | /* Wait until all buffers in CSS are done. */ |
776 | if (!wait_event_timeout(imgu->buf_drain_wq, |
777 | imgu_css_queue_empty(&imgu->css), msecs_to_jiffies(1000))) |
778 | dev_err(dev, "wait buffer drain timeout.\n" ); |
779 | |
780 | imgu_css_stop_streaming(css: &imgu->css); |
781 | atomic_set(v: &imgu->qbuf_barrier, i: 0); |
782 | imgu_powerdown(imgu); |
783 | pm_runtime_force_suspend(dev); |
784 | out: |
785 | return 0; |
786 | } |
787 | |
788 | static int __maybe_unused imgu_resume(struct device *dev) |
789 | { |
790 | struct imgu_device *imgu = dev_get_drvdata(dev); |
791 | int r = 0; |
792 | unsigned int pipe; |
793 | |
794 | if (!imgu->suspend_in_stream) |
795 | goto out; |
796 | |
797 | pm_runtime_force_resume(dev); |
798 | |
799 | r = imgu_powerup(imgu); |
800 | if (r) { |
801 | dev_err(dev, "failed to power up imgu\n" ); |
802 | goto out; |
803 | } |
804 | |
805 | /* Start CSS streaming */ |
806 | r = imgu_css_start_streaming(css: &imgu->css); |
807 | if (r) { |
808 | dev_err(dev, "failed to resume css streaming (%d)" , r); |
809 | goto out; |
810 | } |
811 | |
812 | for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) { |
813 | r = imgu_queue_buffers(imgu, initial: true, pipe); |
814 | if (r) |
815 | dev_err(dev, "failed to queue buffers to pipe %d (%d)" , |
816 | pipe, r); |
817 | } |
818 | |
819 | out: |
820 | return r; |
821 | } |
822 | |
823 | /* |
824 | * PCI rpm framework checks the existence of driver rpm callbacks. |
825 | * Place a dummy callback here to avoid rpm going into error state. |
826 | */ |
827 | static __maybe_unused int imgu_rpm_dummy_cb(struct device *dev) |
828 | { |
829 | return 0; |
830 | } |
831 | |
832 | static const struct dev_pm_ops imgu_pm_ops = { |
833 | SET_RUNTIME_PM_OPS(&imgu_rpm_dummy_cb, &imgu_rpm_dummy_cb, NULL) |
834 | SET_SYSTEM_SLEEP_PM_OPS(&imgu_suspend, &imgu_resume) |
835 | }; |
836 | |
837 | static const struct pci_device_id imgu_pci_tbl[] = { |
838 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, IMGU_PCI_ID) }, |
839 | { 0, } |
840 | }; |
841 | |
842 | MODULE_DEVICE_TABLE(pci, imgu_pci_tbl); |
843 | |
844 | static struct pci_driver imgu_pci_driver = { |
845 | .name = IMGU_NAME, |
846 | .id_table = imgu_pci_tbl, |
847 | .probe = imgu_pci_probe, |
848 | .remove = imgu_pci_remove, |
849 | .driver = { |
850 | .pm = &imgu_pm_ops, |
851 | }, |
852 | }; |
853 | |
854 | module_pci_driver(imgu_pci_driver); |
855 | |
856 | MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>" ); |
857 | MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>" ); |
858 | MODULE_AUTHOR("Jian Xu Zheng <jian.xu.zheng@intel.com>" ); |
859 | MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>" ); |
860 | MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>" ); |
861 | MODULE_LICENSE("GPL v2" ); |
862 | MODULE_DESCRIPTION("Intel ipu3_imgu PCI driver" ); |
863 | |