1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#include "i915_drv.h"
7
8#include <drm/display/drm_dp_tunnel.h>
9
10#include "intel_atomic.h"
11#include "intel_display_limits.h"
12#include "intel_display_types.h"
13#include "intel_dp.h"
14#include "intel_dp_link_training.h"
15#include "intel_dp_mst.h"
16#include "intel_dp_tunnel.h"
17#include "intel_link_bw.h"
18
19struct intel_dp_tunnel_inherited_state {
20 struct drm_dp_tunnel_ref ref[I915_MAX_PIPES];
21};
22
23/**
24 * intel_dp_tunnel_disconnect - Disconnect a DP tunnel from a port
25 * @intel_dp: DP port object the tunnel is connected to
26 *
27 * Disconnect a DP tunnel from @intel_dp, destroying any related state. This
28 * should be called after detecting a sink-disconnect event from the port.
29 */
30void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp)
31{
32 drm_dp_tunnel_destroy(tunnel: intel_dp->tunnel);
33 intel_dp->tunnel = NULL;
34}
35
36/**
37 * intel_dp_tunnel_destroy - Destroy a DP tunnel
38 * @intel_dp: DP port object the tunnel is connected to
39 *
40 * Destroy a DP tunnel connected to @intel_dp, after disabling the BW
41 * allocation mode on the tunnel. This should be called while destroying the
42 * port.
43 */
44void intel_dp_tunnel_destroy(struct intel_dp *intel_dp)
45{
46 if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
47 drm_dp_tunnel_disable_bw_alloc(tunnel: intel_dp->tunnel);
48
49 intel_dp_tunnel_disconnect(intel_dp);
50}
51
52static int kbytes_to_mbits(int kbytes)
53{
54 return DIV_ROUND_UP(kbytes * 8, 1000);
55}
56
57static int get_current_link_bw(struct intel_dp *intel_dp,
58 bool *below_dprx_bw)
59{
60 int rate = intel_dp_max_common_rate(intel_dp);
61 int lane_count = intel_dp_max_common_lane_count(intel_dp);
62 int bw;
63
64 bw = intel_dp_max_link_data_rate(intel_dp, max_dprx_rate: rate, max_dprx_lanes: lane_count);
65 *below_dprx_bw = bw < drm_dp_max_dprx_data_rate(max_link_rate: rate, max_lanes: lane_count);
66
67 return bw;
68}
69
70static int update_tunnel_state(struct intel_dp *intel_dp)
71{
72 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
73 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
74 bool old_bw_below_dprx;
75 bool new_bw_below_dprx;
76 int old_bw;
77 int new_bw;
78 int ret;
79
80 old_bw = get_current_link_bw(intel_dp, below_dprx_bw: &old_bw_below_dprx);
81
82 ret = drm_dp_tunnel_update_state(tunnel: intel_dp->tunnel);
83 if (ret < 0) {
84 drm_dbg_kms(&i915->drm,
85 "[DPTUN %s][ENCODER:%d:%s] State update failed (err %pe)\n",
86 drm_dp_tunnel_name(intel_dp->tunnel),
87 encoder->base.base.id, encoder->base.name,
88 ERR_PTR(ret));
89
90 return ret;
91 }
92
93 if (ret == 0 ||
94 !drm_dp_tunnel_bw_alloc_is_enabled(tunnel: intel_dp->tunnel))
95 return 0;
96
97 intel_dp_update_sink_caps(intel_dp);
98
99 new_bw = get_current_link_bw(intel_dp, below_dprx_bw: &new_bw_below_dprx);
100
101 /* Suppress the notification if the mode list can't change due to bw. */
102 if (old_bw_below_dprx == new_bw_below_dprx &&
103 !new_bw_below_dprx)
104 return 0;
105
106 drm_dbg_kms(&i915->drm,
107 "[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n",
108 drm_dp_tunnel_name(intel_dp->tunnel),
109 encoder->base.base.id, encoder->base.name,
110 kbytes_to_mbits(old_bw), kbytes_to_mbits(new_bw));
111
112 return 1;
113}
114
115/*
116 * Allocate the BW for a tunnel on a DP connector/port if the connector/port
117 * was already active when detecting the tunnel. The allocated BW must be
118 * freed by the next atomic modeset, storing the BW in the
119 * intel_atomic_state::inherited_dp_tunnels, and calling
120 * intel_dp_tunnel_atomic_free_bw().
121 */
122static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pipe_mask)
123{
124 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
125 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
126 struct intel_crtc *crtc;
127 int tunnel_bw = 0;
128 int err;
129
130 for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
131 const struct intel_crtc_state *crtc_state =
132 to_intel_crtc_state(crtc->base.state);
133 int stream_bw = intel_dp_config_required_rate(crtc_state);
134
135 tunnel_bw += stream_bw;
136
137 drm_dbg_kms(&i915->drm,
138 "[DPTUN %s][ENCODER:%d:%s][CRTC:%d:%s] Initial BW for stream %d: %d/%d Mb/s\n",
139 drm_dp_tunnel_name(intel_dp->tunnel),
140 encoder->base.base.id, encoder->base.name,
141 crtc->base.base.id, crtc->base.name,
142 crtc->pipe,
143 kbytes_to_mbits(stream_bw), kbytes_to_mbits(tunnel_bw));
144 }
145
146 err = drm_dp_tunnel_alloc_bw(tunnel: intel_dp->tunnel, bw: tunnel_bw);
147 if (err) {
148 drm_dbg_kms(&i915->drm,
149 "[DPTUN %s][ENCODER:%d:%s] Initial BW allocation failed (err %pe)\n",
150 drm_dp_tunnel_name(intel_dp->tunnel),
151 encoder->base.base.id, encoder->base.name,
152 ERR_PTR(err));
153
154 return err;
155 }
156
157 return update_tunnel_state(intel_dp);
158}
159
160static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
161 struct drm_modeset_acquire_ctx *ctx)
162{
163 u8 pipe_mask;
164 int err;
165
166 err = intel_dp_get_active_pipes(intel_dp, ctx, pipe_mask: &pipe_mask);
167 if (err)
168 return err;
169
170 return allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
171}
172
173static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
174{
175 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
176 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
177 struct drm_dp_tunnel *tunnel;
178 int ret;
179
180 tunnel = drm_dp_tunnel_detect(mgr: i915->display.dp_tunnel_mgr,
181 aux: &intel_dp->aux);
182 if (IS_ERR(ptr: tunnel))
183 return PTR_ERR(ptr: tunnel);
184
185 intel_dp->tunnel = tunnel;
186
187 ret = drm_dp_tunnel_enable_bw_alloc(tunnel: intel_dp->tunnel);
188 if (ret) {
189 if (ret == -EOPNOTSUPP)
190 return 0;
191
192 drm_dbg_kms(&i915->drm,
193 "[DPTUN %s][ENCODER:%d:%s] Failed to enable BW allocation mode (ret %pe)\n",
194 drm_dp_tunnel_name(intel_dp->tunnel),
195 encoder->base.base.id, encoder->base.name,
196 ERR_PTR(ret));
197
198 /* Keep the tunnel with BWA disabled */
199 return 0;
200 }
201
202 ret = allocate_initial_tunnel_bw(intel_dp, ctx);
203 if (ret < 0)
204 intel_dp_tunnel_destroy(intel_dp);
205
206 return ret;
207}
208
209/**
210 * intel_dp_tunnel_detect - Detect a DP tunnel on a port
211 * @intel_dp: DP port object
212 * @ctx: lock context acquired by the connector detection handler
213 *
214 * Detect a DP tunnel on the @intel_dp port, enabling the BW allocation mode
215 * on it if supported and allocating the BW required on an already active port.
216 * The BW allocated this way must be freed by the next atomic modeset calling
217 * intel_dp_tunnel_atomic_free_bw().
218 *
219 * If @intel_dp has already a tunnel detected on it, update the tunnel's state
220 * wrt. its support for BW allocation mode and the available BW via the
221 * tunnel. If the tunnel's state change requires this - for instance the
222 * tunnel's group ID has changed - the tunnel will be dropped and recreated.
223 *
224 * Return 0 in case of success - after any tunnel detected and added to
225 * @intel_dp - 1 in case the BW on an already existing tunnel has changed in a
226 * way that requires notifying user space.
227 */
228int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
229{
230 int ret;
231
232 if (intel_dp_is_edp(intel_dp))
233 return 0;
234
235 if (intel_dp->tunnel) {
236 ret = update_tunnel_state(intel_dp);
237 if (ret >= 0)
238 return ret;
239
240 /* Try to recreate the tunnel after an update error. */
241 intel_dp_tunnel_destroy(intel_dp);
242 }
243
244 return detect_new_tunnel(intel_dp, ctx);
245}
246
247/**
248 * intel_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation support on a tunnel
249 * @intel_dp: DP port object
250 *
251 * Query whether a DP tunnel is connected on @intel_dp and the tunnel supports
252 * the BW allocation mode.
253 *
254 * Returns %true if the BW allocation mode is supported on @intel_dp.
255 */
256bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp)
257{
258 return drm_dp_tunnel_bw_alloc_is_enabled(tunnel: intel_dp->tunnel);
259}
260
261/**
262 * intel_dp_tunnel_suspend - Suspend a DP tunnel connected on a port
263 * @intel_dp: DP port object
264 *
265 * Suspend a DP tunnel on @intel_dp with BW allocation mode enabled on it.
266 */
267void intel_dp_tunnel_suspend(struct intel_dp *intel_dp)
268{
269 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
270 struct intel_connector *connector = intel_dp->attached_connector;
271 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
272
273 if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
274 return;
275
276 drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n",
277 drm_dp_tunnel_name(intel_dp->tunnel),
278 connector->base.base.id, connector->base.name,
279 encoder->base.base.id, encoder->base.name);
280
281 drm_dp_tunnel_disable_bw_alloc(tunnel: intel_dp->tunnel);
282
283 intel_dp->tunnel_suspended = true;
284}
285
286/**
287 * intel_dp_tunnel_resume - Resume a DP tunnel connected on a port
288 * @intel_dp: DP port object
289 * @crtc_state: CRTC state
290 * @dpcd_updated: the DPCD DPRX capabilities got updated during resume
291 *
292 * Resume a DP tunnel on @intel_dp with BW allocation mode enabled on it.
293 */
294void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
295 const struct intel_crtc_state *crtc_state,
296 bool dpcd_updated)
297{
298 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
299 struct intel_connector *connector = intel_dp->attached_connector;
300 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
301 u8 dpcd[DP_RECEIVER_CAP_SIZE];
302 u8 pipe_mask;
303 int err = 0;
304
305 if (!intel_dp->tunnel_suspended)
306 return;
307
308 intel_dp->tunnel_suspended = false;
309
310 drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n",
311 drm_dp_tunnel_name(intel_dp->tunnel),
312 connector->base.base.id, connector->base.name,
313 encoder->base.base.id, encoder->base.name);
314
315 /*
316 * The TBT Connection Manager requires the GFX driver to read out
317 * the sink's DPRX caps to be able to service any BW requests later.
318 * During resume overriding the caps in @intel_dp cached before
319 * suspend must be avoided, so do here only a dummy read, unless the
320 * capabilities were updated already during resume.
321 */
322 if (!dpcd_updated) {
323 err = intel_dp_read_dprx_caps(intel_dp, dpcd);
324
325 if (err) {
326 drm_dp_tunnel_set_io_error(tunnel: intel_dp->tunnel);
327 goto out_err;
328 }
329 }
330
331 err = drm_dp_tunnel_enable_bw_alloc(tunnel: intel_dp->tunnel);
332 if (err)
333 goto out_err;
334
335 pipe_mask = 0;
336 if (crtc_state) {
337 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
338
339 /* TODO: Add support for MST */
340 pipe_mask |= BIT(crtc->pipe);
341 }
342
343 err = allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
344 if (err < 0)
345 goto out_err;
346
347 return;
348
349out_err:
350 drm_dbg_kms(&i915->drm,
351 "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and redect it (err %pe)\n",
352 drm_dp_tunnel_name(intel_dp->tunnel),
353 connector->base.base.id, connector->base.name,
354 encoder->base.base.id, encoder->base.name,
355 ERR_PTR(err));
356}
357
358static struct drm_dp_tunnel *
359get_inherited_tunnel(struct intel_atomic_state *state, struct intel_crtc *crtc)
360{
361 if (!state->inherited_dp_tunnels)
362 return NULL;
363
364 return state->inherited_dp_tunnels->ref[crtc->pipe].tunnel;
365}
366
367static int
368add_inherited_tunnel(struct intel_atomic_state *state,
369 struct drm_dp_tunnel *tunnel,
370 struct intel_crtc *crtc)
371{
372 struct drm_i915_private *i915 = to_i915(dev: state->base.dev);
373 struct drm_dp_tunnel *old_tunnel;
374
375 old_tunnel = get_inherited_tunnel(state, crtc);
376 if (old_tunnel) {
377 drm_WARN_ON(&i915->drm, old_tunnel != tunnel);
378 return 0;
379 }
380
381 if (!state->inherited_dp_tunnels) {
382 state->inherited_dp_tunnels = kzalloc(size: sizeof(*state->inherited_dp_tunnels),
383 GFP_KERNEL);
384 if (!state->inherited_dp_tunnels)
385 return -ENOMEM;
386 }
387
388 drm_dp_tunnel_ref_get(tunnel, tunnel_ref: &state->inherited_dp_tunnels->ref[crtc->pipe]);
389
390 return 0;
391}
392
393static int check_inherited_tunnel_state(struct intel_atomic_state *state,
394 struct intel_dp *intel_dp,
395 const struct intel_digital_connector_state *old_conn_state)
396{
397 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
398 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
399 struct intel_connector *connector =
400 to_intel_connector(old_conn_state->base.connector);
401 struct intel_crtc *old_crtc;
402 const struct intel_crtc_state *old_crtc_state;
403
404 /*
405 * If a BWA tunnel gets detected only after the corresponding
406 * connector got enabled already without a BWA tunnel, or a different
407 * BWA tunnel (which was removed meanwhile) the old CRTC state won't
408 * contain the state of the current tunnel. This tunnel still has a
409 * reserved BW, which needs to be released, add the state for such
410 * inherited tunnels separately only to this atomic state.
411 */
412 if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
413 return 0;
414
415 if (!old_conn_state->base.crtc)
416 return 0;
417
418 old_crtc = to_intel_crtc(old_conn_state->base.crtc);
419 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc: old_crtc);
420
421 if (!old_crtc_state->hw.active ||
422 old_crtc_state->dp_tunnel_ref.tunnel == intel_dp->tunnel)
423 return 0;
424
425 drm_dbg_kms(&i915->drm,
426 "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding state for inherited tunnel %p\n",
427 drm_dp_tunnel_name(intel_dp->tunnel),
428 connector->base.base.id, connector->base.name,
429 encoder->base.base.id, encoder->base.name,
430 old_crtc->base.base.id, old_crtc->base.name,
431 intel_dp->tunnel);
432
433 return add_inherited_tunnel(state, tunnel: intel_dp->tunnel, crtc: old_crtc);
434}
435
436/**
437 * intel_dp_tunnel_atomic_cleanup_inherited_state - Free any inherited DP tunnel state
438 * @state: Atomic state
439 *
440 * Free the inherited DP tunnel state in @state.
441 */
442void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state)
443{
444 enum pipe pipe;
445
446 if (!state->inherited_dp_tunnels)
447 return;
448
449 for_each_pipe(to_i915(state->base.dev), pipe)
450 if (state->inherited_dp_tunnels->ref[pipe].tunnel)
451 drm_dp_tunnel_ref_put(tunnel_ref: &state->inherited_dp_tunnels->ref[pipe]);
452
453 kfree(objp: state->inherited_dp_tunnels);
454 state->inherited_dp_tunnels = NULL;
455}
456
457static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *state,
458 struct drm_dp_tunnel *tunnel)
459{
460 struct drm_i915_private *i915 = to_i915(dev: state->base.dev);
461 u32 pipe_mask;
462 int err;
463
464 err = drm_dp_tunnel_atomic_get_group_streams_in_state(state: &state->base,
465 tunnel, stream_mask: &pipe_mask);
466 if (err)
467 return err;
468
469 drm_WARN_ON(&i915->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1));
470
471 return intel_modeset_pipes_in_mask_early(state, reason: "DPTUN", pipe_mask);
472}
473
474/**
475 * intel_dp_tunnel_atomic_add_state_for_crtc - Add CRTC specific DP tunnel state
476 * @state: Atomic state
477 * @crtc: CRTC to add the tunnel state for
478 *
479 * Add the DP tunnel state for @crtc if the CRTC (aka DP tunnel stream) is enabled
480 * via a DP tunnel.
481 *
482 * Return 0 in case of success, a negative error code otherwise.
483 */
484int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
485 struct intel_crtc *crtc)
486{
487 const struct intel_crtc_state *new_crtc_state =
488 intel_atomic_get_new_crtc_state(state, crtc);
489 const struct drm_dp_tunnel_state *tunnel_state;
490 struct drm_dp_tunnel *tunnel = new_crtc_state->dp_tunnel_ref.tunnel;
491
492 if (!tunnel)
493 return 0;
494
495 tunnel_state = drm_dp_tunnel_atomic_get_state(state: &state->base, tunnel);
496 if (IS_ERR(ptr: tunnel_state))
497 return PTR_ERR(ptr: tunnel_state);
498
499 return 0;
500}
501
502static int check_group_state(struct intel_atomic_state *state,
503 struct intel_dp *intel_dp,
504 struct intel_connector *connector,
505 struct intel_crtc *crtc)
506{
507 struct drm_i915_private *i915 = to_i915(dev: state->base.dev);
508 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
509 const struct intel_crtc_state *crtc_state =
510 intel_atomic_get_new_crtc_state(state, crtc);
511
512 if (!crtc_state->dp_tunnel_ref.tunnel)
513 return 0;
514
515 drm_dbg_kms(&i915->drm,
516 "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding group state for tunnel %p\n",
517 drm_dp_tunnel_name(intel_dp->tunnel),
518 connector->base.base.id, connector->base.name,
519 encoder->base.base.id, encoder->base.name,
520 crtc->base.base.id, crtc->base.name,
521 crtc_state->dp_tunnel_ref.tunnel);
522
523 return intel_dp_tunnel_atomic_add_group_state(state, tunnel: crtc_state->dp_tunnel_ref.tunnel);
524}
525
526/**
527 * intel_dp_tunnel_atomic_check_state - Check a connector's DP tunnel specific state
528 * @state: Atomic state
529 * @intel_dp: DP port object
530 * @connector: connector using @intel_dp
531 *
532 * Check and add the DP tunnel atomic state for @intel_dp/@connector to
533 * @state, if there is a DP tunnel detected on @intel_dp with BW allocation
534 * mode enabled on it, or if @intel_dp/@connector was previously enabled via a
535 * DP tunnel.
536 *
537 * Returns 0 in case of success, or a negative error code otherwise.
538 */
539int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
540 struct intel_dp *intel_dp,
541 struct intel_connector *connector)
542{
543 const struct intel_digital_connector_state *old_conn_state =
544 intel_atomic_get_old_connector_state(state, connector);
545 const struct intel_digital_connector_state *new_conn_state =
546 intel_atomic_get_new_connector_state(state, connector);
547 int err;
548
549 if (old_conn_state->base.crtc) {
550 err = check_group_state(state, intel_dp, connector,
551 to_intel_crtc(old_conn_state->base.crtc));
552 if (err)
553 return err;
554 }
555
556 if (new_conn_state->base.crtc &&
557 new_conn_state->base.crtc != old_conn_state->base.crtc) {
558 err = check_group_state(state, intel_dp, connector,
559 to_intel_crtc(new_conn_state->base.crtc));
560 if (err)
561 return err;
562 }
563
564 return check_inherited_tunnel_state(state, intel_dp, old_conn_state);
565}
566
567/**
568 * intel_dp_tunnel_atomic_compute_stream_bw - Compute the BW required by a DP tunnel stream
569 * @state: Atomic state
570 * @intel_dp: DP object
571 * @connector: connector using @intel_dp
572 * @crtc_state: state of CRTC of the given DP tunnel stream
573 *
574 * Compute the required BW of CRTC (aka DP tunnel stream), storing this BW to
575 * the DP tunnel state containing the stream in @state. Before re-calculating a
576 * BW requirement in the crtc_state state the old BW requirement computed by this
577 * function must be cleared by calling intel_dp_tunnel_atomic_clear_stream_bw().
578 *
579 * Returns 0 in case of success, a negative error code otherwise.
580 */
581int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
582 struct intel_dp *intel_dp,
583 const struct intel_connector *connector,
584 struct intel_crtc_state *crtc_state)
585{
586 struct drm_i915_private *i915 = to_i915(dev: state->base.dev);
587 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
588 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
589 int required_rate = intel_dp_config_required_rate(crtc_state);
590 int ret;
591
592 if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
593 return 0;
594
595 drm_dbg_kms(&i915->drm,
596 "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Stream %d required BW %d Mb/s\n",
597 drm_dp_tunnel_name(intel_dp->tunnel),
598 connector->base.base.id, connector->base.name,
599 encoder->base.base.id, encoder->base.name,
600 crtc->base.base.id, crtc->base.name,
601 crtc->pipe,
602 kbytes_to_mbits(required_rate));
603
604 ret = drm_dp_tunnel_atomic_set_stream_bw(state: &state->base, tunnel: intel_dp->tunnel,
605 stream_id: crtc->pipe, bw: required_rate);
606 if (ret < 0)
607 return ret;
608
609 drm_dp_tunnel_ref_get(tunnel: intel_dp->tunnel,
610 tunnel_ref: &crtc_state->dp_tunnel_ref);
611
612 return 0;
613}
614
615/**
616 * intel_dp_tunnel_atomic_clear_stream_bw - Clear any DP tunnel stream BW requirement
617 * @state: Atomic state
618 * @crtc_state: state of CRTC of the given DP tunnel stream
619 *
620 * Clear any DP tunnel stream BW requirement set by
621 * intel_dp_tunnel_atomic_compute_stream_bw().
622 */
623void intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
624 struct intel_crtc_state *crtc_state)
625{
626 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
627
628 if (!crtc_state->dp_tunnel_ref.tunnel)
629 return;
630
631 drm_dp_tunnel_atomic_set_stream_bw(state: &state->base,
632 tunnel: crtc_state->dp_tunnel_ref.tunnel,
633 stream_id: crtc->pipe, bw: 0);
634 drm_dp_tunnel_ref_put(tunnel_ref: &crtc_state->dp_tunnel_ref);
635}
636
637/**
638 * intel_dp_tunnel_atomic_check_link - Check the DP tunnel atomic state
639 * @state: intel atomic state
640 * @limits: link BW limits
641 *
642 * Check the link configuration for all DP tunnels in @state. If the
643 * configuration is invalid @limits will be updated if possible to
644 * reduce the total BW, after which the configuration for all CRTCs in
645 * @state must be recomputed with the updated @limits.
646 *
647 * Returns:
648 * - 0 if the confugration is valid
649 * - %-EAGAIN, if the configuration is invalid and @limits got updated
650 * with fallback values with which the configuration of all CRTCs in
651 * @state must be recomputed
652 * - Other negative error, if the configuration is invalid without a
653 * fallback possibility, or the check failed for another reason
654 */
655int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
656 struct intel_link_bw_limits *limits)
657{
658 u32 failed_stream_mask;
659 int err;
660
661 err = drm_dp_tunnel_atomic_check_stream_bws(state: &state->base,
662 failed_stream_mask: &failed_stream_mask);
663 if (err != -ENOSPC)
664 return err;
665
666 err = intel_link_bw_reduce_bpp(state, limits,
667 pipe_mask: failed_stream_mask, reason: "DP tunnel link BW");
668
669 return err ? : -EAGAIN;
670}
671
672static void atomic_decrease_bw(struct intel_atomic_state *state)
673{
674 struct intel_crtc *crtc;
675 const struct intel_crtc_state *old_crtc_state;
676 const struct intel_crtc_state *new_crtc_state;
677 int i;
678
679 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
680 const struct drm_dp_tunnel_state *new_tunnel_state;
681 struct drm_dp_tunnel *tunnel;
682 int old_bw;
683 int new_bw;
684
685 if (!intel_crtc_needs_modeset(crtc_state: new_crtc_state))
686 continue;
687
688 tunnel = get_inherited_tunnel(state, crtc);
689 if (!tunnel)
690 tunnel = old_crtc_state->dp_tunnel_ref.tunnel;
691
692 if (!tunnel)
693 continue;
694
695 old_bw = drm_dp_tunnel_get_allocated_bw(tunnel);
696
697 new_tunnel_state = drm_dp_tunnel_atomic_get_new_state(state: &state->base, tunnel);
698 new_bw = drm_dp_tunnel_atomic_get_required_bw(tunnel_state: new_tunnel_state);
699
700 if (new_bw >= old_bw)
701 continue;
702
703 drm_dp_tunnel_alloc_bw(tunnel, bw: new_bw);
704 }
705}
706
707static void queue_retry_work(struct intel_atomic_state *state,
708 struct drm_dp_tunnel *tunnel,
709 const struct intel_crtc_state *crtc_state)
710{
711 struct drm_i915_private *i915 = to_i915(dev: state->base.dev);
712 struct intel_encoder *encoder;
713
714 encoder = intel_get_crtc_new_encoder(state, crtc_state);
715
716 if (!intel_digital_port_connected(encoder))
717 return;
718
719 drm_dbg_kms(&i915->drm,
720 "[DPTUN %s][ENCODER:%d:%s] BW allocation failed on a connected sink\n",
721 drm_dp_tunnel_name(tunnel),
722 encoder->base.base.id,
723 encoder->base.name);
724
725 intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
726}
727
728static void atomic_increase_bw(struct intel_atomic_state *state)
729{
730 struct intel_crtc *crtc;
731 const struct intel_crtc_state *crtc_state;
732 int i;
733
734 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
735 struct drm_dp_tunnel_state *tunnel_state;
736 struct drm_dp_tunnel *tunnel = crtc_state->dp_tunnel_ref.tunnel;
737 int bw;
738
739 if (!intel_crtc_needs_modeset(crtc_state))
740 continue;
741
742 if (!tunnel)
743 continue;
744
745 tunnel_state = drm_dp_tunnel_atomic_get_new_state(state: &state->base, tunnel);
746
747 bw = drm_dp_tunnel_atomic_get_required_bw(tunnel_state);
748
749 if (drm_dp_tunnel_alloc_bw(tunnel, bw) != 0)
750 queue_retry_work(state, tunnel, crtc_state);
751 }
752}
753
754/**
755 * intel_dp_tunnel_atomic_alloc_bw - Allocate the BW for all modeset tunnels
756 * @state: Atomic state
757 *
758 * Allocate the required BW for all tunnels in @state.
759 */
760void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
761{
762 atomic_decrease_bw(state);
763 atomic_increase_bw(state);
764}
765
766/**
767 * intel_dp_tunnel_mgr_init - Initialize the DP tunnel manager
768 * @i915: i915 device object
769 *
770 * Initialize the DP tunnel manager. The tunnel manager will support the
771 * detection/management of DP tunnels on all DP connectors, so the function
772 * must be called after all these connectors have been registered already.
773 *
774 * Return 0 in case of success, a negative error code otherwise.
775 */
776int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
777{
778 struct drm_dp_tunnel_mgr *tunnel_mgr;
779 struct drm_connector_list_iter connector_list_iter;
780 struct intel_connector *connector;
781 int dp_connectors = 0;
782
783 drm_connector_list_iter_begin(dev: &i915->drm, iter: &connector_list_iter);
784 for_each_intel_connector_iter(connector, &connector_list_iter) {
785 if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
786 continue;
787
788 dp_connectors++;
789 }
790 drm_connector_list_iter_end(iter: &connector_list_iter);
791
792 tunnel_mgr = drm_dp_tunnel_mgr_create(dev: &i915->drm, max_group_count: dp_connectors);
793 if (IS_ERR(ptr: tunnel_mgr))
794 return PTR_ERR(ptr: tunnel_mgr);
795
796 i915->display.dp_tunnel_mgr = tunnel_mgr;
797
798 return 0;
799}
800
801/**
802 * intel_dp_tunnel_mgr_cleanup - Clean up the DP tunnel manager state
803 * @i915: i915 device object
804 *
805 * Clean up the DP tunnel manager state.
806 */
807void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915)
808{
809 drm_dp_tunnel_mgr_destroy(mgr: i915->display.dp_tunnel_mgr);
810 i915->display.dp_tunnel_mgr = NULL;
811}
812

source code of linux/drivers/gpu/drm/i915/display/intel_dp_tunnel.c