1 | // SPDX-License-Identifier: MIT |
2 | /* |
3 | * Copyright © 2020 Intel Corporation |
4 | */ |
5 | #include <linux/kernel.h> |
6 | #include <linux/pm_qos.h> |
7 | #include <linux/slab.h> |
8 | |
9 | #include <drm/drm_atomic_helper.h> |
10 | #include <drm/drm_fourcc.h> |
11 | #include <drm/drm_plane.h> |
12 | #include <drm/drm_vblank_work.h> |
13 | |
14 | #include "i915_vgpu.h" |
15 | #include "i9xx_plane.h" |
16 | #include "icl_dsi.h" |
17 | #include "intel_atomic.h" |
18 | #include "intel_atomic_plane.h" |
19 | #include "intel_color.h" |
20 | #include "intel_crtc.h" |
21 | #include "intel_cursor.h" |
22 | #include "intel_display_debugfs.h" |
23 | #include "intel_display_irq.h" |
24 | #include "intel_display_trace.h" |
25 | #include "intel_display_types.h" |
26 | #include "intel_drrs.h" |
27 | #include "intel_dsb.h" |
28 | #include "intel_dsi.h" |
29 | #include "intel_fifo_underrun.h" |
30 | #include "intel_pipe_crc.h" |
31 | #include "intel_psr.h" |
32 | #include "intel_sprite.h" |
33 | #include "intel_vblank.h" |
34 | #include "intel_vrr.h" |
35 | #include "skl_universal_plane.h" |
36 | |
37 | static void assert_vblank_disabled(struct drm_crtc *crtc) |
38 | { |
39 | struct drm_i915_private *i915 = to_i915(dev: crtc->dev); |
40 | |
41 | if (I915_STATE_WARN(i915, drm_crtc_vblank_get(crtc) == 0, |
42 | "[CRTC:%d:%s] vblank assertion failure (expected off, current on)\n" , |
43 | crtc->base.id, crtc->name)) |
44 | drm_crtc_vblank_put(crtc); |
45 | } |
46 | |
47 | struct intel_crtc *intel_first_crtc(struct drm_i915_private *i915) |
48 | { |
49 | return to_intel_crtc(drm_crtc_from_index(&i915->drm, 0)); |
50 | } |
51 | |
52 | struct intel_crtc *intel_crtc_for_pipe(struct drm_i915_private *i915, |
53 | enum pipe pipe) |
54 | { |
55 | struct intel_crtc *crtc; |
56 | |
57 | for_each_intel_crtc(&i915->drm, crtc) { |
58 | if (crtc->pipe == pipe) |
59 | return crtc; |
60 | } |
61 | |
62 | return NULL; |
63 | } |
64 | |
65 | void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc) |
66 | { |
67 | drm_crtc_wait_one_vblank(crtc: &crtc->base); |
68 | } |
69 | |
70 | void intel_wait_for_vblank_if_active(struct drm_i915_private *i915, |
71 | enum pipe pipe) |
72 | { |
73 | struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); |
74 | |
75 | if (crtc->active) |
76 | intel_crtc_wait_for_next_vblank(crtc); |
77 | } |
78 | |
79 | u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) |
80 | { |
81 | struct drm_device *dev = crtc->base.dev; |
82 | struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(crtc: &crtc->base)]; |
83 | |
84 | if (!crtc->active) |
85 | return 0; |
86 | |
87 | if (!vblank->max_vblank_count) |
88 | return (u32)drm_crtc_accurate_vblank_count(crtc: &crtc->base); |
89 | |
90 | return crtc->base.funcs->get_vblank_counter(&crtc->base); |
91 | } |
92 | |
93 | u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state) |
94 | { |
95 | struct drm_i915_private *dev_priv = to_i915(dev: crtc_state->uapi.crtc->dev); |
96 | |
97 | /* |
98 | * From Gen 11, In case of dsi cmd mode, frame counter wouldnt |
99 | * have updated at the beginning of TE, if we want to use |
100 | * the hw counter, then we would find it updated in only |
101 | * the next TE, hence switching to sw counter. |
102 | */ |
103 | if (crtc_state->mode_flags & (I915_MODE_FLAG_DSI_USE_TE0 | |
104 | I915_MODE_FLAG_DSI_USE_TE1)) |
105 | return 0; |
106 | |
107 | /* |
108 | * On i965gm the hardware frame counter reads |
109 | * zero when the TV encoder is enabled :( |
110 | */ |
111 | if (IS_I965GM(dev_priv) && |
112 | (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT))) |
113 | return 0; |
114 | |
115 | if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) |
116 | return 0xffffffff; /* full 32 bit counter */ |
117 | else if (DISPLAY_VER(dev_priv) >= 3) |
118 | return 0xffffff; /* only 24 bits of frame count */ |
119 | else |
120 | return 0; /* Gen2 doesn't have a hardware frame counter */ |
121 | } |
122 | |
123 | void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state) |
124 | { |
125 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
126 | |
127 | assert_vblank_disabled(crtc: &crtc->base); |
128 | drm_crtc_set_max_vblank_count(crtc: &crtc->base, |
129 | max_vblank_count: intel_crtc_max_vblank_count(crtc_state)); |
130 | drm_crtc_vblank_on(crtc: &crtc->base); |
131 | |
132 | /* |
133 | * Should really happen exactly when we enable the pipe |
134 | * but we want the frame counters in the trace, and that |
135 | * requires vblank support on some platforms/outputs. |
136 | */ |
137 | trace_intel_pipe_enable(crtc); |
138 | } |
139 | |
140 | void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state) |
141 | { |
142 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
143 | |
144 | /* |
145 | * Should really happen exactly when we disable the pipe |
146 | * but we want the frame counters in the trace, and that |
147 | * requires vblank support on some platforms/outputs. |
148 | */ |
149 | trace_intel_pipe_disable(crtc); |
150 | |
151 | drm_crtc_vblank_off(crtc: &crtc->base); |
152 | assert_vblank_disabled(crtc: &crtc->base); |
153 | } |
154 | |
155 | struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc) |
156 | { |
157 | struct intel_crtc_state *crtc_state; |
158 | |
159 | crtc_state = kmalloc(size: sizeof(*crtc_state), GFP_KERNEL); |
160 | |
161 | if (crtc_state) |
162 | intel_crtc_state_reset(crtc_state, crtc); |
163 | |
164 | return crtc_state; |
165 | } |
166 | |
167 | void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, |
168 | struct intel_crtc *crtc) |
169 | { |
170 | memset(crtc_state, 0, sizeof(*crtc_state)); |
171 | |
172 | __drm_atomic_helper_crtc_state_reset(state: &crtc_state->uapi, crtc: &crtc->base); |
173 | |
174 | crtc_state->cpu_transcoder = INVALID_TRANSCODER; |
175 | crtc_state->master_transcoder = INVALID_TRANSCODER; |
176 | crtc_state->hsw_workaround_pipe = INVALID_PIPE; |
177 | crtc_state->scaler_state.scaler_id = -1; |
178 | crtc_state->mst_master_transcoder = INVALID_TRANSCODER; |
179 | crtc_state->max_link_bpp_x16 = INT_MAX; |
180 | } |
181 | |
182 | static struct intel_crtc *intel_crtc_alloc(void) |
183 | { |
184 | struct intel_crtc_state *crtc_state; |
185 | struct intel_crtc *crtc; |
186 | |
187 | crtc = kzalloc(size: sizeof(*crtc), GFP_KERNEL); |
188 | if (!crtc) |
189 | return ERR_PTR(error: -ENOMEM); |
190 | |
191 | crtc_state = intel_crtc_state_alloc(crtc); |
192 | if (!crtc_state) { |
193 | kfree(objp: crtc); |
194 | return ERR_PTR(error: -ENOMEM); |
195 | } |
196 | |
197 | crtc->base.state = &crtc_state->uapi; |
198 | crtc->config = crtc_state; |
199 | |
200 | return crtc; |
201 | } |
202 | |
203 | static void intel_crtc_free(struct intel_crtc *crtc) |
204 | { |
205 | intel_crtc_destroy_state(crtc: &crtc->base, state: crtc->base.state); |
206 | kfree(objp: crtc); |
207 | } |
208 | |
209 | static void intel_crtc_destroy(struct drm_crtc *_crtc) |
210 | { |
211 | struct intel_crtc *crtc = to_intel_crtc(_crtc); |
212 | |
213 | cpu_latency_qos_remove_request(req: &crtc->vblank_pm_qos); |
214 | |
215 | drm_crtc_cleanup(crtc: &crtc->base); |
216 | kfree(objp: crtc); |
217 | } |
218 | |
219 | static int intel_crtc_late_register(struct drm_crtc *crtc) |
220 | { |
221 | intel_crtc_debugfs_add(to_intel_crtc(crtc)); |
222 | return 0; |
223 | } |
224 | |
225 | #define INTEL_CRTC_FUNCS \ |
226 | .set_config = drm_atomic_helper_set_config, \ |
227 | .destroy = intel_crtc_destroy, \ |
228 | .page_flip = drm_atomic_helper_page_flip, \ |
229 | .atomic_duplicate_state = intel_crtc_duplicate_state, \ |
230 | .atomic_destroy_state = intel_crtc_destroy_state, \ |
231 | .set_crc_source = intel_crtc_set_crc_source, \ |
232 | .verify_crc_source = intel_crtc_verify_crc_source, \ |
233 | .get_crc_sources = intel_crtc_get_crc_sources, \ |
234 | .late_register = intel_crtc_late_register |
235 | |
236 | static const struct drm_crtc_funcs bdw_crtc_funcs = { |
237 | INTEL_CRTC_FUNCS, |
238 | |
239 | .get_vblank_counter = g4x_get_vblank_counter, |
240 | .enable_vblank = bdw_enable_vblank, |
241 | .disable_vblank = bdw_disable_vblank, |
242 | .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, |
243 | }; |
244 | |
245 | static const struct drm_crtc_funcs ilk_crtc_funcs = { |
246 | INTEL_CRTC_FUNCS, |
247 | |
248 | .get_vblank_counter = g4x_get_vblank_counter, |
249 | .enable_vblank = ilk_enable_vblank, |
250 | .disable_vblank = ilk_disable_vblank, |
251 | .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, |
252 | }; |
253 | |
254 | static const struct drm_crtc_funcs g4x_crtc_funcs = { |
255 | INTEL_CRTC_FUNCS, |
256 | |
257 | .get_vblank_counter = g4x_get_vblank_counter, |
258 | .enable_vblank = i965_enable_vblank, |
259 | .disable_vblank = i965_disable_vblank, |
260 | .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, |
261 | }; |
262 | |
263 | static const struct drm_crtc_funcs i965_crtc_funcs = { |
264 | INTEL_CRTC_FUNCS, |
265 | |
266 | .get_vblank_counter = i915_get_vblank_counter, |
267 | .enable_vblank = i965_enable_vblank, |
268 | .disable_vblank = i965_disable_vblank, |
269 | .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, |
270 | }; |
271 | |
272 | static const struct drm_crtc_funcs i915gm_crtc_funcs = { |
273 | INTEL_CRTC_FUNCS, |
274 | |
275 | .get_vblank_counter = i915_get_vblank_counter, |
276 | .enable_vblank = i915gm_enable_vblank, |
277 | .disable_vblank = i915gm_disable_vblank, |
278 | .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, |
279 | }; |
280 | |
281 | static const struct drm_crtc_funcs i915_crtc_funcs = { |
282 | INTEL_CRTC_FUNCS, |
283 | |
284 | .get_vblank_counter = i915_get_vblank_counter, |
285 | .enable_vblank = i8xx_enable_vblank, |
286 | .disable_vblank = i8xx_disable_vblank, |
287 | .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, |
288 | }; |
289 | |
290 | static const struct drm_crtc_funcs i8xx_crtc_funcs = { |
291 | INTEL_CRTC_FUNCS, |
292 | |
293 | /* no hw vblank counter */ |
294 | .enable_vblank = i8xx_enable_vblank, |
295 | .disable_vblank = i8xx_disable_vblank, |
296 | .get_vblank_timestamp = intel_crtc_get_vblank_timestamp, |
297 | }; |
298 | |
299 | int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) |
300 | { |
301 | struct intel_plane *primary, *cursor; |
302 | const struct drm_crtc_funcs *funcs; |
303 | struct intel_crtc *crtc; |
304 | int sprite, ret; |
305 | |
306 | crtc = intel_crtc_alloc(); |
307 | if (IS_ERR(ptr: crtc)) |
308 | return PTR_ERR(ptr: crtc); |
309 | |
310 | crtc->pipe = pipe; |
311 | crtc->num_scalers = DISPLAY_RUNTIME_INFO(dev_priv)->num_scalers[pipe]; |
312 | |
313 | if (DISPLAY_VER(dev_priv) >= 9) |
314 | primary = skl_universal_plane_create(dev_priv, pipe, |
315 | plane_id: PLANE_PRIMARY); |
316 | else |
317 | primary = intel_primary_plane_create(dev_priv, pipe); |
318 | if (IS_ERR(ptr: primary)) { |
319 | ret = PTR_ERR(ptr: primary); |
320 | goto fail; |
321 | } |
322 | crtc->plane_ids_mask |= BIT(primary->id); |
323 | |
324 | intel_init_fifo_underrun_reporting(i915: dev_priv, crtc, enable: false); |
325 | |
326 | for_each_sprite(dev_priv, pipe, sprite) { |
327 | struct intel_plane *plane; |
328 | |
329 | if (DISPLAY_VER(dev_priv) >= 9) |
330 | plane = skl_universal_plane_create(dev_priv, pipe, |
331 | plane_id: PLANE_SPRITE0 + sprite); |
332 | else |
333 | plane = intel_sprite_plane_create(dev_priv, pipe, plane: sprite); |
334 | if (IS_ERR(ptr: plane)) { |
335 | ret = PTR_ERR(ptr: plane); |
336 | goto fail; |
337 | } |
338 | crtc->plane_ids_mask |= BIT(plane->id); |
339 | } |
340 | |
341 | cursor = intel_cursor_plane_create(dev_priv, pipe); |
342 | if (IS_ERR(ptr: cursor)) { |
343 | ret = PTR_ERR(ptr: cursor); |
344 | goto fail; |
345 | } |
346 | crtc->plane_ids_mask |= BIT(cursor->id); |
347 | |
348 | if (HAS_GMCH(dev_priv)) { |
349 | if (IS_CHERRYVIEW(dev_priv) || |
350 | IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv)) |
351 | funcs = &g4x_crtc_funcs; |
352 | else if (DISPLAY_VER(dev_priv) == 4) |
353 | funcs = &i965_crtc_funcs; |
354 | else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv)) |
355 | funcs = &i915gm_crtc_funcs; |
356 | else if (DISPLAY_VER(dev_priv) == 3) |
357 | funcs = &i915_crtc_funcs; |
358 | else |
359 | funcs = &i8xx_crtc_funcs; |
360 | } else { |
361 | if (DISPLAY_VER(dev_priv) >= 8) |
362 | funcs = &bdw_crtc_funcs; |
363 | else |
364 | funcs = &ilk_crtc_funcs; |
365 | } |
366 | |
367 | ret = drm_crtc_init_with_planes(dev: &dev_priv->drm, crtc: &crtc->base, |
368 | primary: &primary->base, cursor: &cursor->base, |
369 | funcs, name: "pipe %c" , pipe_name(pipe)); |
370 | if (ret) |
371 | goto fail; |
372 | |
373 | if (DISPLAY_VER(dev_priv) >= 11) |
374 | drm_crtc_create_scaling_filter_property(crtc: &crtc->base, |
375 | BIT(DRM_SCALING_FILTER_DEFAULT) | |
376 | BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR)); |
377 | |
378 | intel_color_crtc_init(crtc); |
379 | intel_drrs_crtc_init(crtc); |
380 | intel_crtc_crc_init(crtc); |
381 | |
382 | cpu_latency_qos_add_request(req: &crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE); |
383 | |
384 | drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe); |
385 | |
386 | return 0; |
387 | |
388 | fail: |
389 | intel_crtc_free(crtc); |
390 | |
391 | return ret; |
392 | } |
393 | |
394 | static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state) |
395 | { |
396 | return crtc_state->hw.active && |
397 | !intel_crtc_needs_modeset(crtc_state) && |
398 | !crtc_state->preload_luts && |
399 | intel_crtc_needs_color_update(crtc_state) && |
400 | !intel_color_uses_dsb(crtc_state); |
401 | } |
402 | |
403 | static void intel_crtc_vblank_work(struct kthread_work *base) |
404 | { |
405 | struct drm_vblank_work *work = to_drm_vblank_work(base); |
406 | struct intel_crtc_state *crtc_state = |
407 | container_of(work, typeof(*crtc_state), vblank_work); |
408 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
409 | |
410 | trace_intel_crtc_vblank_work_start(crtc); |
411 | |
412 | intel_color_load_luts(crtc_state); |
413 | |
414 | if (crtc_state->uapi.event) { |
415 | spin_lock_irq(lock: &crtc->base.dev->event_lock); |
416 | drm_crtc_send_vblank_event(crtc: &crtc->base, e: crtc_state->uapi.event); |
417 | crtc_state->uapi.event = NULL; |
418 | spin_unlock_irq(lock: &crtc->base.dev->event_lock); |
419 | } |
420 | |
421 | trace_intel_crtc_vblank_work_end(crtc); |
422 | } |
423 | |
424 | static void intel_crtc_vblank_work_init(struct intel_crtc_state *crtc_state) |
425 | { |
426 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
427 | |
428 | drm_vblank_work_init(work: &crtc_state->vblank_work, crtc: &crtc->base, |
429 | func: intel_crtc_vblank_work); |
430 | /* |
431 | * Interrupt latency is critical for getting the vblank |
432 | * work executed as early as possible during the vblank. |
433 | */ |
434 | cpu_latency_qos_update_request(req: &crtc->vblank_pm_qos, new_value: 0); |
435 | } |
436 | |
437 | void intel_wait_for_vblank_workers(struct intel_atomic_state *state) |
438 | { |
439 | struct intel_crtc_state *crtc_state; |
440 | struct intel_crtc *crtc; |
441 | int i; |
442 | |
443 | for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { |
444 | if (!intel_crtc_needs_vblank_work(crtc_state)) |
445 | continue; |
446 | |
447 | drm_vblank_work_flush(work: &crtc_state->vblank_work); |
448 | cpu_latency_qos_update_request(req: &crtc->vblank_pm_qos, |
449 | PM_QOS_DEFAULT_VALUE); |
450 | } |
451 | } |
452 | |
453 | int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, |
454 | int usecs) |
455 | { |
456 | /* paranoia */ |
457 | if (!adjusted_mode->crtc_htotal) |
458 | return 1; |
459 | |
460 | return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock, |
461 | 1000 * adjusted_mode->crtc_htotal); |
462 | } |
463 | |
464 | /** |
465 | * intel_pipe_update_start() - start update of a set of display registers |
466 | * @state: the atomic state |
467 | * @crtc: the crtc |
468 | * |
469 | * Mark the start of an update to pipe registers that should be updated |
470 | * atomically regarding vblank. If the next vblank will happens within |
471 | * the next 100 us, this function waits until the vblank passes. |
472 | * |
473 | * After a successful call to this function, interrupts will be disabled |
474 | * until a subsequent call to intel_pipe_update_end(). That is done to |
475 | * avoid random delays. |
476 | */ |
477 | void intel_pipe_update_start(struct intel_atomic_state *state, |
478 | struct intel_crtc *crtc) |
479 | { |
480 | struct drm_i915_private *dev_priv = to_i915(dev: crtc->base.dev); |
481 | const struct intel_crtc_state *old_crtc_state = |
482 | intel_atomic_get_old_crtc_state(state, crtc); |
483 | struct intel_crtc_state *new_crtc_state = |
484 | intel_atomic_get_new_crtc_state(state, crtc); |
485 | struct intel_vblank_evade_ctx evade; |
486 | int scanline; |
487 | |
488 | intel_psr_lock(crtc_state: new_crtc_state); |
489 | |
490 | if (new_crtc_state->do_async_flip) { |
491 | spin_lock_irq(lock: &crtc->base.dev->event_lock); |
492 | /* arm the event for the flip done irq handler */ |
493 | crtc->flip_done_event = new_crtc_state->uapi.event; |
494 | spin_unlock_irq(lock: &crtc->base.dev->event_lock); |
495 | |
496 | new_crtc_state->uapi.event = NULL; |
497 | return; |
498 | } |
499 | |
500 | if (intel_crtc_needs_vblank_work(crtc_state: new_crtc_state)) |
501 | intel_crtc_vblank_work_init(crtc_state: new_crtc_state); |
502 | |
503 | intel_vblank_evade_init(old_crtc_state, new_crtc_state, evade: &evade); |
504 | |
505 | if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base))) |
506 | goto irq_disable; |
507 | |
508 | /* |
509 | * Wait for psr to idle out after enabling the VBL interrupts |
510 | * VBL interrupts will start the PSR exit and prevent a PSR |
511 | * re-entry as well. |
512 | */ |
513 | intel_psr_wait_for_idle_locked(new_crtc_state); |
514 | |
515 | local_irq_disable(); |
516 | |
517 | crtc->debug.min_vbl = evade.min; |
518 | crtc->debug.max_vbl = evade.max; |
519 | trace_intel_pipe_update_start(crtc); |
520 | |
521 | scanline = intel_vblank_evade(evade: &evade); |
522 | |
523 | drm_crtc_vblank_put(crtc: &crtc->base); |
524 | |
525 | crtc->debug.scanline_start = scanline; |
526 | crtc->debug.start_vbl_time = ktime_get(); |
527 | crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); |
528 | |
529 | trace_intel_pipe_update_vblank_evaded(crtc); |
530 | return; |
531 | |
532 | irq_disable: |
533 | local_irq_disable(); |
534 | } |
535 | |
536 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) |
537 | static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) |
538 | { |
539 | u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time)); |
540 | unsigned int h; |
541 | |
542 | h = ilog2(delta >> 9); |
543 | if (h >= ARRAY_SIZE(crtc->debug.vbl.times)) |
544 | h = ARRAY_SIZE(crtc->debug.vbl.times) - 1; |
545 | crtc->debug.vbl.times[h]++; |
546 | |
547 | crtc->debug.vbl.sum += delta; |
548 | if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min) |
549 | crtc->debug.vbl.min = delta; |
550 | if (delta > crtc->debug.vbl.max) |
551 | crtc->debug.vbl.max = delta; |
552 | |
553 | if (delta > 1000 * VBLANK_EVASION_TIME_US) { |
554 | drm_dbg_kms(crtc->base.dev, |
555 | "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n" , |
556 | pipe_name(crtc->pipe), |
557 | div_u64(delta, 1000), |
558 | VBLANK_EVASION_TIME_US); |
559 | crtc->debug.vbl.over++; |
560 | } |
561 | } |
562 | #else |
563 | static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {} |
564 | #endif |
565 | |
566 | /** |
567 | * intel_pipe_update_end() - end update of a set of display registers |
568 | * @state: the atomic state |
569 | * @crtc: the crtc |
570 | * |
571 | * Mark the end of an update started with intel_pipe_update_start(). This |
572 | * re-enables interrupts and verifies the update was actually completed |
573 | * before a vblank. |
574 | */ |
575 | void intel_pipe_update_end(struct intel_atomic_state *state, |
576 | struct intel_crtc *crtc) |
577 | { |
578 | struct intel_crtc_state *new_crtc_state = |
579 | intel_atomic_get_new_crtc_state(state, crtc); |
580 | enum pipe pipe = crtc->pipe; |
581 | int scanline_end = intel_get_crtc_scanline(crtc); |
582 | u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc); |
583 | ktime_t end_vbl_time = ktime_get(); |
584 | struct drm_i915_private *dev_priv = to_i915(dev: crtc->base.dev); |
585 | |
586 | if (new_crtc_state->do_async_flip) |
587 | goto out; |
588 | |
589 | trace_intel_pipe_update_end(crtc, frame: end_vbl_count, scanline_end); |
590 | |
591 | /* |
592 | * Incase of mipi dsi command mode, we need to set frame update |
593 | * request for every commit. |
594 | */ |
595 | if (DISPLAY_VER(dev_priv) >= 11 && |
596 | intel_crtc_has_type(crtc_state: new_crtc_state, type: INTEL_OUTPUT_DSI)) |
597 | icl_dsi_frame_update(crtc_state: new_crtc_state); |
598 | |
599 | /* We're still in the vblank-evade critical section, this can't race. |
600 | * Would be slightly nice to just grab the vblank count and arm the |
601 | * event outside of the critical section - the spinlock might spin for a |
602 | * while ... */ |
603 | if (intel_crtc_needs_vblank_work(crtc_state: new_crtc_state)) { |
604 | drm_vblank_work_schedule(work: &new_crtc_state->vblank_work, |
605 | count: drm_crtc_accurate_vblank_count(crtc: &crtc->base) + 1, |
606 | nextonmiss: false); |
607 | } else if (new_crtc_state->uapi.event) { |
608 | drm_WARN_ON(&dev_priv->drm, |
609 | drm_crtc_vblank_get(&crtc->base) != 0); |
610 | |
611 | spin_lock(lock: &crtc->base.dev->event_lock); |
612 | drm_crtc_arm_vblank_event(crtc: &crtc->base, |
613 | e: new_crtc_state->uapi.event); |
614 | spin_unlock(lock: &crtc->base.dev->event_lock); |
615 | |
616 | new_crtc_state->uapi.event = NULL; |
617 | } |
618 | |
619 | /* |
620 | * Send VRR Push to terminate Vblank. If we are already in vblank |
621 | * this has to be done _after_ sampling the frame counter, as |
622 | * otherwise the push would immediately terminate the vblank and |
623 | * the sampled frame counter would correspond to the next frame |
624 | * instead of the current frame. |
625 | * |
626 | * There is a tiny race here (iff vblank evasion failed us) where |
627 | * we might sample the frame counter just before vmax vblank start |
628 | * but the push would be sent just after it. That would cause the |
629 | * push to affect the next frame instead of the current frame, |
630 | * which would cause the next frame to terminate already at vmin |
631 | * vblank start instead of vmax vblank start. |
632 | */ |
633 | intel_vrr_send_push(crtc_state: new_crtc_state); |
634 | |
635 | local_irq_enable(); |
636 | |
637 | if (intel_vgpu_active(i915: dev_priv)) |
638 | goto out; |
639 | |
640 | if (crtc->debug.start_vbl_count && |
641 | crtc->debug.start_vbl_count != end_vbl_count) { |
642 | drm_err(&dev_priv->drm, |
643 | "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n" , |
644 | pipe_name(pipe), crtc->debug.start_vbl_count, |
645 | end_vbl_count, |
646 | ktime_us_delta(end_vbl_time, |
647 | crtc->debug.start_vbl_time), |
648 | crtc->debug.min_vbl, crtc->debug.max_vbl, |
649 | crtc->debug.scanline_start, scanline_end); |
650 | } |
651 | |
652 | dbg_vblank_evade(crtc, end: end_vbl_time); |
653 | |
654 | out: |
655 | intel_psr_unlock(crtc_state: new_crtc_state); |
656 | } |
657 | |