1 | /* |
2 | * Copyright 2012-15 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Authors: AMD |
23 | * |
24 | */ |
25 | |
26 | #include <drm/display/drm_dp_helper.h> |
27 | #include <drm/display/drm_dp_mst_helper.h> |
28 | #include <drm/drm_atomic.h> |
29 | #include <drm/drm_atomic_helper.h> |
30 | #include "dm_services.h" |
31 | #include "amdgpu.h" |
32 | #include "amdgpu_dm.h" |
33 | #include "amdgpu_dm_mst_types.h" |
34 | #include "amdgpu_dm_hdcp.h" |
35 | |
36 | #include "dc.h" |
37 | #include "dm_helpers.h" |
38 | |
39 | #include "ddc_service_types.h" |
40 | #include "dpcd_defs.h" |
41 | |
42 | #include "dmub_cmd.h" |
43 | #if defined(CONFIG_DEBUG_FS) |
44 | #include "amdgpu_dm_debugfs.h" |
45 | #endif |
46 | |
47 | #include "dc/dcn20/dcn20_resource.h" |
48 | |
49 | #define PEAK_FACTOR_X1000 1006 |
50 | |
51 | static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, |
52 | struct drm_dp_aux_msg *msg) |
53 | { |
54 | ssize_t result = 0; |
55 | struct aux_payload payload; |
56 | enum aux_return_code_type operation_result; |
57 | struct amdgpu_device *adev; |
58 | struct ddc_service *ddc; |
59 | |
60 | if (WARN_ON(msg->size > 16)) |
61 | return -E2BIG; |
62 | |
63 | payload.address = msg->address; |
64 | payload.data = msg->buffer; |
65 | payload.length = msg->size; |
66 | payload.reply = &msg->reply; |
67 | payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0; |
68 | payload.write = (msg->request & DP_AUX_I2C_READ) == 0; |
69 | payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0; |
70 | payload.write_status_update = |
71 | (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0; |
72 | payload.defer_delay = 0; |
73 | |
74 | result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, payload: &payload, |
75 | operation_result: &operation_result); |
76 | |
77 | /* |
78 | * w/a on certain intel platform where hpd is unexpected to pull low during |
79 | * 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON |
80 | * aux transaction is succuess in such case, therefore bypass the error |
81 | */ |
82 | ddc = TO_DM_AUX(aux)->ddc_service; |
83 | adev = ddc->ctx->driver_context; |
84 | if (adev->dm.aux_hpd_discon_quirk) { |
85 | if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE && |
86 | operation_result == AUX_RET_ERROR_HPD_DISCON) { |
87 | result = 0; |
88 | operation_result = AUX_RET_SUCCESS; |
89 | } |
90 | } |
91 | |
92 | if (payload.write && result >= 0) |
93 | result = msg->size; |
94 | |
95 | if (result < 0) |
96 | switch (operation_result) { |
97 | case AUX_RET_SUCCESS: |
98 | break; |
99 | case AUX_RET_ERROR_HPD_DISCON: |
100 | case AUX_RET_ERROR_UNKNOWN: |
101 | case AUX_RET_ERROR_INVALID_OPERATION: |
102 | case AUX_RET_ERROR_PROTOCOL_ERROR: |
103 | result = -EIO; |
104 | break; |
105 | case AUX_RET_ERROR_INVALID_REPLY: |
106 | case AUX_RET_ERROR_ENGINE_ACQUIRE: |
107 | result = -EBUSY; |
108 | break; |
109 | case AUX_RET_ERROR_TIMEOUT: |
110 | result = -ETIMEDOUT; |
111 | break; |
112 | } |
113 | |
114 | return result; |
115 | } |
116 | |
117 | static void |
118 | dm_dp_mst_connector_destroy(struct drm_connector *connector) |
119 | { |
120 | struct amdgpu_dm_connector *aconnector = |
121 | to_amdgpu_dm_connector(connector); |
122 | |
123 | if (aconnector->dc_sink) { |
124 | dc_link_remove_remote_sink(link: aconnector->dc_link, |
125 | sink: aconnector->dc_sink); |
126 | dc_sink_release(sink: aconnector->dc_sink); |
127 | } |
128 | |
129 | kfree(objp: aconnector->edid); |
130 | |
131 | drm_connector_cleanup(connector); |
132 | drm_dp_mst_put_port_malloc(port: aconnector->mst_output_port); |
133 | kfree(objp: aconnector); |
134 | } |
135 | |
136 | static int |
137 | amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) |
138 | { |
139 | struct amdgpu_dm_connector *amdgpu_dm_connector = |
140 | to_amdgpu_dm_connector(connector); |
141 | int r; |
142 | |
143 | r = drm_dp_mst_connector_late_register(connector, |
144 | port: amdgpu_dm_connector->mst_output_port); |
145 | if (r < 0) |
146 | return r; |
147 | |
148 | #if defined(CONFIG_DEBUG_FS) |
149 | connector_debugfs_init(connector: amdgpu_dm_connector); |
150 | #endif |
151 | |
152 | return 0; |
153 | } |
154 | |
155 | static void |
156 | amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) |
157 | { |
158 | struct amdgpu_dm_connector *aconnector = |
159 | to_amdgpu_dm_connector(connector); |
160 | struct drm_dp_mst_port *port = aconnector->mst_output_port; |
161 | struct amdgpu_dm_connector *root = aconnector->mst_root; |
162 | struct dc_link *dc_link = aconnector->dc_link; |
163 | struct dc_sink *dc_sink = aconnector->dc_sink; |
164 | |
165 | drm_dp_mst_connector_early_unregister(connector, port); |
166 | |
167 | /* |
168 | * Release dc_sink for connector which its attached port is |
169 | * no longer in the mst topology |
170 | */ |
171 | drm_modeset_lock(lock: &root->mst_mgr.base.lock, NULL); |
172 | if (dc_sink) { |
173 | if (dc_link->sink_count) |
174 | dc_link_remove_remote_sink(link: dc_link, sink: dc_sink); |
175 | |
176 | drm_dbg_dp(connector->dev, |
177 | "DM_MST: remove remote sink 0x%p, %d remaining\n" , |
178 | dc_sink, dc_link->sink_count); |
179 | |
180 | dc_sink_release(sink: dc_sink); |
181 | aconnector->dc_sink = NULL; |
182 | aconnector->edid = NULL; |
183 | } |
184 | |
185 | aconnector->mst_status = MST_STATUS_DEFAULT; |
186 | drm_modeset_unlock(lock: &root->mst_mgr.base.lock); |
187 | } |
188 | |
189 | static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { |
190 | .fill_modes = drm_helper_probe_single_connector_modes, |
191 | .destroy = dm_dp_mst_connector_destroy, |
192 | .reset = amdgpu_dm_connector_funcs_reset, |
193 | .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, |
194 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
195 | .atomic_set_property = amdgpu_dm_connector_atomic_set_property, |
196 | .atomic_get_property = amdgpu_dm_connector_atomic_get_property, |
197 | .late_register = amdgpu_dm_mst_connector_late_register, |
198 | .early_unregister = amdgpu_dm_mst_connector_early_unregister, |
199 | }; |
200 | |
201 | bool needs_dsc_aux_workaround(struct dc_link *link) |
202 | { |
203 | if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && |
204 | (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) && |
205 | link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2) |
206 | return true; |
207 | |
208 | return false; |
209 | } |
210 | |
211 | static bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port) |
212 | { |
213 | u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F |
214 | |
215 | if (drm_dp_dpcd_read(aux: port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, buffer: &branch_vendor_data, size: 4) == 4) { |
216 | if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && |
217 | IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) { |
218 | DRM_INFO("Synaptics Cascaded MST hub\n" ); |
219 | return true; |
220 | } |
221 | } |
222 | |
223 | return false; |
224 | } |
225 | |
226 | static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) |
227 | { |
228 | struct dc_sink *dc_sink = aconnector->dc_sink; |
229 | struct drm_dp_mst_port *port = aconnector->mst_output_port; |
230 | u8 dsc_caps[16] = { 0 }; |
231 | u8 dsc_branch_dec_caps_raw[3] = { 0 }; // DSC branch decoder caps 0xA0 ~ 0xA2 |
232 | u8 *dsc_branch_dec_caps = NULL; |
233 | |
234 | aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); |
235 | |
236 | /* |
237 | * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs |
238 | * because it only check the dsc/fec caps of the "port variable" and not the dock |
239 | * |
240 | * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display |
241 | * |
242 | * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux |
243 | * |
244 | */ |
245 | if (!aconnector->dsc_aux && !port->parent->port_parent && |
246 | needs_dsc_aux_workaround(link: aconnector->dc_link)) |
247 | aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux; |
248 | |
249 | /* synaptics cascaded MST hub case */ |
250 | if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(link: aconnector->dc_link, port)) |
251 | aconnector->dsc_aux = port->mgr->aux; |
252 | |
253 | if (!aconnector->dsc_aux) |
254 | return false; |
255 | |
256 | if (drm_dp_dpcd_read(aux: aconnector->dsc_aux, DP_DSC_SUPPORT, buffer: dsc_caps, size: 16) < 0) |
257 | return false; |
258 | |
259 | if (drm_dp_dpcd_read(aux: aconnector->dsc_aux, |
260 | DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, buffer: dsc_branch_dec_caps_raw, size: 3) == 3) |
261 | dsc_branch_dec_caps = dsc_branch_dec_caps_raw; |
262 | |
263 | if (!dc_dsc_parse_dsc_dpcd(dc: aconnector->dc_link->ctx->dc, |
264 | dpcd_dsc_basic_data: dsc_caps, dpcd_dsc_ext_data: dsc_branch_dec_caps, |
265 | dsc_sink_caps: &dc_sink->dsc_caps.dsc_dec_caps)) |
266 | return false; |
267 | |
268 | return true; |
269 | } |
270 | |
271 | static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector) |
272 | { |
273 | union dp_downstream_port_present ds_port_present; |
274 | |
275 | if (!aconnector->dsc_aux) |
276 | return false; |
277 | |
278 | if (drm_dp_dpcd_read(aux: aconnector->dsc_aux, DP_DOWNSTREAMPORT_PRESENT, buffer: &ds_port_present, size: 1) < 0) { |
279 | DRM_INFO("Failed to read downstream_port_present 0x05 from DFP of branch device\n" ); |
280 | return false; |
281 | } |
282 | |
283 | aconnector->mst_downstream_port_present = ds_port_present; |
284 | DRM_INFO("Downstream port present %d, type %d\n" , |
285 | ds_port_present.fields.PORT_PRESENT, ds_port_present.fields.PORT_TYPE); |
286 | |
287 | return true; |
288 | } |
289 | |
290 | static int dm_dp_mst_get_modes(struct drm_connector *connector) |
291 | { |
292 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); |
293 | int ret = 0; |
294 | |
295 | if (!aconnector) |
296 | return drm_add_edid_modes(connector, NULL); |
297 | |
298 | if (!aconnector->edid) { |
299 | struct edid *edid; |
300 | |
301 | edid = drm_dp_mst_get_edid(connector, mgr: &aconnector->mst_root->mst_mgr, port: aconnector->mst_output_port); |
302 | |
303 | if (!edid) { |
304 | amdgpu_dm_set_mst_status(status: &aconnector->mst_status, |
305 | flags: MST_REMOTE_EDID, set: false); |
306 | |
307 | drm_connector_update_edid_property( |
308 | connector: &aconnector->base, |
309 | NULL); |
310 | |
311 | DRM_DEBUG_KMS("Can't get EDID of %s. Add default remote sink." , connector->name); |
312 | if (!aconnector->dc_sink) { |
313 | struct dc_sink *dc_sink; |
314 | struct dc_sink_init_data init_params = { |
315 | .link = aconnector->dc_link, |
316 | .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; |
317 | |
318 | dc_sink = dc_link_add_remote_sink( |
319 | dc_link: aconnector->dc_link, |
320 | NULL, |
321 | len: 0, |
322 | init_data: &init_params); |
323 | |
324 | if (!dc_sink) { |
325 | DRM_ERROR("Unable to add a remote sink\n" ); |
326 | return 0; |
327 | } |
328 | |
329 | drm_dbg_dp(connector->dev, |
330 | "DM_MST: add remote sink 0x%p, %d remaining\n" , |
331 | dc_sink, |
332 | aconnector->dc_link->sink_count); |
333 | |
334 | dc_sink->priv = aconnector; |
335 | aconnector->dc_sink = dc_sink; |
336 | } |
337 | |
338 | return ret; |
339 | } |
340 | |
341 | aconnector->edid = edid; |
342 | amdgpu_dm_set_mst_status(status: &aconnector->mst_status, |
343 | flags: MST_REMOTE_EDID, set: true); |
344 | } |
345 | |
346 | if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) { |
347 | dc_sink_release(sink: aconnector->dc_sink); |
348 | aconnector->dc_sink = NULL; |
349 | } |
350 | |
351 | if (!aconnector->dc_sink) { |
352 | struct dc_sink *dc_sink; |
353 | struct dc_sink_init_data init_params = { |
354 | .link = aconnector->dc_link, |
355 | .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; |
356 | dc_sink = dc_link_add_remote_sink( |
357 | dc_link: aconnector->dc_link, |
358 | edid: (uint8_t *)aconnector->edid, |
359 | len: (aconnector->edid->extensions + 1) * EDID_LENGTH, |
360 | init_data: &init_params); |
361 | |
362 | if (!dc_sink) { |
363 | DRM_ERROR("Unable to add a remote sink\n" ); |
364 | return 0; |
365 | } |
366 | |
367 | drm_dbg_dp(connector->dev, |
368 | "DM_MST: add remote sink 0x%p, %d remaining\n" , |
369 | dc_sink, aconnector->dc_link->sink_count); |
370 | |
371 | dc_sink->priv = aconnector; |
372 | /* dc_link_add_remote_sink returns a new reference */ |
373 | aconnector->dc_sink = dc_sink; |
374 | |
375 | /* when display is unplugged from mst hub, connctor will be |
376 | * destroyed within dm_dp_mst_connector_destroy. connector |
377 | * hdcp perperties, like type, undesired, desired, enabled, |
378 | * will be lost. So, save hdcp properties into hdcp_work within |
379 | * amdgpu_dm_atomic_commit_tail. if the same display is |
380 | * plugged back with same display index, its hdcp properties |
381 | * will be retrieved from hdcp_work within dm_dp_mst_get_modes |
382 | */ |
383 | if (aconnector->dc_sink && connector->state) { |
384 | struct drm_device *dev = connector->dev; |
385 | struct amdgpu_device *adev = drm_to_adev(ddev: dev); |
386 | |
387 | if (adev->dm.hdcp_workqueue) { |
388 | struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; |
389 | struct hdcp_workqueue *hdcp_w = |
390 | &hdcp_work[aconnector->dc_link->link_index]; |
391 | |
392 | connector->state->hdcp_content_type = |
393 | hdcp_w->hdcp_content_type[connector->index]; |
394 | connector->state->content_protection = |
395 | hdcp_w->content_protection[connector->index]; |
396 | } |
397 | } |
398 | |
399 | if (aconnector->dc_sink) { |
400 | amdgpu_dm_update_freesync_caps( |
401 | connector, edid: aconnector->edid); |
402 | |
403 | if (!validate_dsc_caps_on_connector(aconnector)) |
404 | memset(&aconnector->dc_sink->dsc_caps, |
405 | 0, sizeof(aconnector->dc_sink->dsc_caps)); |
406 | |
407 | if (!retrieve_downstream_port_device(aconnector)) |
408 | memset(&aconnector->mst_downstream_port_present, |
409 | 0, sizeof(aconnector->mst_downstream_port_present)); |
410 | } |
411 | } |
412 | |
413 | drm_connector_update_edid_property( |
414 | connector: &aconnector->base, edid: aconnector->edid); |
415 | |
416 | ret = drm_add_edid_modes(connector, edid: aconnector->edid); |
417 | |
418 | return ret; |
419 | } |
420 | |
421 | static struct drm_encoder * |
422 | dm_mst_atomic_best_encoder(struct drm_connector *connector, |
423 | struct drm_atomic_state *state) |
424 | { |
425 | struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, |
426 | connector); |
427 | struct drm_device *dev = connector->dev; |
428 | struct amdgpu_device *adev = drm_to_adev(ddev: dev); |
429 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc); |
430 | |
431 | return &adev->dm.mst_encoders[acrtc->crtc_id].base; |
432 | } |
433 | |
434 | static int |
435 | dm_dp_mst_detect(struct drm_connector *connector, |
436 | struct drm_modeset_acquire_ctx *ctx, bool force) |
437 | { |
438 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); |
439 | struct amdgpu_dm_connector *master = aconnector->mst_root; |
440 | struct drm_dp_mst_port *port = aconnector->mst_output_port; |
441 | int connection_status; |
442 | |
443 | if (drm_connector_is_unregistered(connector)) |
444 | return connector_status_disconnected; |
445 | |
446 | connection_status = drm_dp_mst_detect_port(connector, ctx, mgr: &master->mst_mgr, |
447 | port: aconnector->mst_output_port); |
448 | |
449 | if (port->pdt != DP_PEER_DEVICE_NONE && !port->dpcd_rev) { |
450 | uint8_t dpcd_rev; |
451 | int ret; |
452 | |
453 | ret = drm_dp_dpcd_readb(aux: &port->aux, DP_DP13_DPCD_REV, valuep: &dpcd_rev); |
454 | |
455 | if (ret == 1) { |
456 | port->dpcd_rev = dpcd_rev; |
457 | |
458 | /* Could be DP1.2 DP Rx case*/ |
459 | if (!dpcd_rev) { |
460 | ret = drm_dp_dpcd_readb(aux: &port->aux, DP_DPCD_REV, valuep: &dpcd_rev); |
461 | |
462 | if (ret == 1) |
463 | port->dpcd_rev = dpcd_rev; |
464 | } |
465 | |
466 | if (!dpcd_rev) |
467 | DRM_DEBUG_KMS("Can't decide DPCD revision number!" ); |
468 | } |
469 | |
470 | /* |
471 | * Could be legacy sink, logical port etc on DP1.2. |
472 | * Will get Nack under these cases when issue remote |
473 | * DPCD read. |
474 | */ |
475 | if (ret != 1) |
476 | DRM_DEBUG_KMS("Can't access DPCD" ); |
477 | } else if (port->pdt == DP_PEER_DEVICE_NONE) { |
478 | port->dpcd_rev = 0; |
479 | } |
480 | |
481 | /* |
482 | * Release dc_sink for connector which unplug event is notified by CSN msg |
483 | */ |
484 | if (connection_status == connector_status_disconnected && aconnector->dc_sink) { |
485 | if (aconnector->dc_link->sink_count) |
486 | dc_link_remove_remote_sink(link: aconnector->dc_link, sink: aconnector->dc_sink); |
487 | |
488 | drm_dbg_dp(connector->dev, |
489 | "DM_MST: remove remote sink 0x%p, %d remaining\n" , |
490 | aconnector->dc_link, |
491 | aconnector->dc_link->sink_count); |
492 | |
493 | dc_sink_release(sink: aconnector->dc_sink); |
494 | aconnector->dc_sink = NULL; |
495 | aconnector->edid = NULL; |
496 | |
497 | amdgpu_dm_set_mst_status(status: &aconnector->mst_status, |
498 | flags: MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD, |
499 | set: false); |
500 | } |
501 | |
502 | return connection_status; |
503 | } |
504 | |
505 | static int dm_dp_mst_atomic_check(struct drm_connector *connector, |
506 | struct drm_atomic_state *state) |
507 | { |
508 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); |
509 | struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_root->mst_mgr; |
510 | struct drm_dp_mst_port *mst_port = aconnector->mst_output_port; |
511 | |
512 | return drm_dp_atomic_release_time_slots(state, mgr: mst_mgr, port: mst_port); |
513 | } |
514 | |
515 | static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { |
516 | .get_modes = dm_dp_mst_get_modes, |
517 | .mode_valid = amdgpu_dm_connector_mode_valid, |
518 | .atomic_best_encoder = dm_mst_atomic_best_encoder, |
519 | .detect_ctx = dm_dp_mst_detect, |
520 | .atomic_check = dm_dp_mst_atomic_check, |
521 | }; |
522 | |
523 | static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) |
524 | { |
525 | drm_encoder_cleanup(encoder); |
526 | } |
527 | |
528 | static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { |
529 | .destroy = amdgpu_dm_encoder_destroy, |
530 | }; |
531 | |
532 | void |
533 | dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev) |
534 | { |
535 | struct drm_device *dev = adev_to_drm(adev); |
536 | int i; |
537 | |
538 | for (i = 0; i < adev->dm.display_indexes_num; i++) { |
539 | struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i]; |
540 | struct drm_encoder *encoder = &amdgpu_encoder->base; |
541 | |
542 | encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); |
543 | |
544 | drm_encoder_init( |
545 | dev, |
546 | encoder: &amdgpu_encoder->base, |
547 | funcs: &amdgpu_dm_encoder_funcs, |
548 | DRM_MODE_ENCODER_DPMST, |
549 | NULL); |
550 | |
551 | drm_encoder_helper_add(encoder, funcs: &amdgpu_dm_encoder_helper_funcs); |
552 | } |
553 | } |
554 | |
555 | static struct drm_connector * |
556 | dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, |
557 | struct drm_dp_mst_port *port, |
558 | const char *pathprop) |
559 | { |
560 | struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); |
561 | struct drm_device *dev = master->base.dev; |
562 | struct amdgpu_device *adev = drm_to_adev(ddev: dev); |
563 | struct amdgpu_dm_connector *aconnector; |
564 | struct drm_connector *connector; |
565 | int i; |
566 | |
567 | aconnector = kzalloc(size: sizeof(*aconnector), GFP_KERNEL); |
568 | if (!aconnector) |
569 | return NULL; |
570 | |
571 | connector = &aconnector->base; |
572 | aconnector->mst_output_port = port; |
573 | aconnector->mst_root = master; |
574 | amdgpu_dm_set_mst_status(status: &aconnector->mst_status, |
575 | flags: MST_PROBE, set: true); |
576 | |
577 | if (drm_connector_init( |
578 | dev, |
579 | connector, |
580 | funcs: &dm_dp_mst_connector_funcs, |
581 | DRM_MODE_CONNECTOR_DisplayPort)) { |
582 | kfree(objp: aconnector); |
583 | return NULL; |
584 | } |
585 | drm_connector_helper_add(connector, funcs: &dm_dp_mst_connector_helper_funcs); |
586 | |
587 | amdgpu_dm_connector_init_helper( |
588 | dm: &adev->dm, |
589 | aconnector, |
590 | DRM_MODE_CONNECTOR_DisplayPort, |
591 | link: master->dc_link, |
592 | link_index: master->connector_id); |
593 | |
594 | for (i = 0; i < adev->dm.display_indexes_num; i++) { |
595 | drm_connector_attach_encoder(connector: &aconnector->base, |
596 | encoder: &adev->dm.mst_encoders[i].base); |
597 | } |
598 | |
599 | connector->max_bpc_property = master->base.max_bpc_property; |
600 | if (connector->max_bpc_property) |
601 | drm_connector_attach_max_bpc_property(connector, min: 8, max: 16); |
602 | |
603 | connector->vrr_capable_property = master->base.vrr_capable_property; |
604 | if (connector->vrr_capable_property) |
605 | drm_connector_attach_vrr_capable_property(connector); |
606 | |
607 | drm_object_attach_property( |
608 | obj: &connector->base, |
609 | property: dev->mode_config.path_property, |
610 | init_val: 0); |
611 | drm_object_attach_property( |
612 | obj: &connector->base, |
613 | property: dev->mode_config.tile_property, |
614 | init_val: 0); |
615 | |
616 | drm_connector_set_path_property(connector, path: pathprop); |
617 | |
618 | /* |
619 | * Initialize connector state before adding the connectror to drm and |
620 | * framebuffer lists |
621 | */ |
622 | amdgpu_dm_connector_funcs_reset(connector); |
623 | |
624 | drm_dp_mst_get_port_malloc(port); |
625 | |
626 | return connector; |
627 | } |
628 | |
629 | void dm_handle_mst_sideband_msg_ready_event( |
630 | struct drm_dp_mst_topology_mgr *mgr, |
631 | enum mst_msg_ready_type msg_rdy_type) |
632 | { |
633 | uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; |
634 | uint8_t dret; |
635 | bool new_irq_handled = false; |
636 | int dpcd_addr; |
637 | uint8_t dpcd_bytes_to_read; |
638 | const uint8_t max_process_count = 30; |
639 | uint8_t process_count = 0; |
640 | u8 retry; |
641 | struct amdgpu_dm_connector *aconnector = |
642 | container_of(mgr, struct amdgpu_dm_connector, mst_mgr); |
643 | |
644 | |
645 | const struct dc_link_status *link_status = dc_link_get_status(link: aconnector->dc_link); |
646 | |
647 | if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { |
648 | dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; |
649 | /* DPCD 0x200 - 0x201 for downstream IRQ */ |
650 | dpcd_addr = DP_SINK_COUNT; |
651 | } else { |
652 | dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; |
653 | /* DPCD 0x2002 - 0x2005 for downstream IRQ */ |
654 | dpcd_addr = DP_SINK_COUNT_ESI; |
655 | } |
656 | |
657 | mutex_lock(&aconnector->handle_mst_msg_ready); |
658 | |
659 | while (process_count < max_process_count) { |
660 | u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {}; |
661 | |
662 | process_count++; |
663 | |
664 | dret = drm_dp_dpcd_read( |
665 | aux: &aconnector->dm_dp_aux.aux, |
666 | offset: dpcd_addr, |
667 | buffer: esi, |
668 | size: dpcd_bytes_to_read); |
669 | |
670 | if (dret != dpcd_bytes_to_read) { |
671 | DRM_DEBUG_KMS("DPCD read and acked number is not as expected!" ); |
672 | break; |
673 | } |
674 | |
675 | DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n" , esi[0], esi[1], esi[2]); |
676 | |
677 | switch (msg_rdy_type) { |
678 | case DOWN_REP_MSG_RDY_EVENT: |
679 | /* Only handle DOWN_REP_MSG_RDY case*/ |
680 | esi[1] &= DP_DOWN_REP_MSG_RDY; |
681 | break; |
682 | case UP_REQ_MSG_RDY_EVENT: |
683 | /* Only handle UP_REQ_MSG_RDY case*/ |
684 | esi[1] &= DP_UP_REQ_MSG_RDY; |
685 | break; |
686 | default: |
687 | /* Handle both cases*/ |
688 | esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY); |
689 | break; |
690 | } |
691 | |
692 | if (!esi[1]) |
693 | break; |
694 | |
695 | /* handle MST irq */ |
696 | if (aconnector->mst_mgr.mst_state) |
697 | drm_dp_mst_hpd_irq_handle_event(mgr: &aconnector->mst_mgr, |
698 | esi, |
699 | ack, |
700 | handled: &new_irq_handled); |
701 | |
702 | if (new_irq_handled) { |
703 | /* ACK at DPCD to notify down stream */ |
704 | for (retry = 0; retry < 3; retry++) { |
705 | ssize_t wret; |
706 | |
707 | wret = drm_dp_dpcd_writeb(aux: &aconnector->dm_dp_aux.aux, |
708 | offset: dpcd_addr + 1, |
709 | value: ack[1]); |
710 | if (wret == 1) |
711 | break; |
712 | } |
713 | |
714 | if (retry == 3) { |
715 | DRM_ERROR("Failed to ack MST event.\n" ); |
716 | break; |
717 | } |
718 | |
719 | drm_dp_mst_hpd_irq_send_new_request(mgr: &aconnector->mst_mgr); |
720 | |
721 | new_irq_handled = false; |
722 | } else { |
723 | break; |
724 | } |
725 | } |
726 | |
727 | mutex_unlock(lock: &aconnector->handle_mst_msg_ready); |
728 | |
729 | if (process_count == max_process_count) |
730 | DRM_DEBUG_DRIVER("Loop exceeded max iterations\n" ); |
731 | } |
732 | |
733 | static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr) |
734 | { |
735 | dm_handle_mst_sideband_msg_ready_event(mgr, msg_rdy_type: DOWN_REP_MSG_RDY_EVENT); |
736 | } |
737 | |
738 | static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { |
739 | .add_connector = dm_dp_add_mst_connector, |
740 | .poll_hpd_irq = dm_handle_mst_down_rep_msg_ready, |
741 | }; |
742 | |
743 | void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, |
744 | struct amdgpu_dm_connector *aconnector, |
745 | int link_index) |
746 | { |
747 | struct dc_link_settings max_link_enc_cap = {0}; |
748 | |
749 | aconnector->dm_dp_aux.aux.name = |
750 | kasprintf(GFP_KERNEL, fmt: "AMDGPU DM aux hw bus %d" , |
751 | link_index); |
752 | aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer; |
753 | aconnector->dm_dp_aux.aux.drm_dev = dm->ddev; |
754 | aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc; |
755 | |
756 | drm_dp_aux_init(aux: &aconnector->dm_dp_aux.aux); |
757 | drm_dp_cec_register_connector(aux: &aconnector->dm_dp_aux.aux, |
758 | connector: &aconnector->base); |
759 | |
760 | if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP) |
761 | return; |
762 | |
763 | dc_link_dp_get_max_link_enc_cap(link: aconnector->dc_link, max_link_enc_cap: &max_link_enc_cap); |
764 | aconnector->mst_mgr.cbs = &dm_mst_cbs; |
765 | drm_dp_mst_topology_mgr_init(mgr: &aconnector->mst_mgr, dev: adev_to_drm(adev: dm->adev), |
766 | aux: &aconnector->dm_dp_aux.aux, max_dpcd_transaction_bytes: 16, max_payloads: 4, conn_base_id: aconnector->connector_id); |
767 | |
768 | drm_connector_attach_dp_subconnector_property(connector: &aconnector->base); |
769 | } |
770 | |
771 | int dm_mst_get_pbn_divider(struct dc_link *link) |
772 | { |
773 | if (!link) |
774 | return 0; |
775 | |
776 | return dc_link_bandwidth_kbps(link, |
777 | link_setting: dc_link_get_link_cap(link)) / (8 * 1000 * 54); |
778 | } |
779 | |
780 | struct dsc_mst_fairness_params { |
781 | struct dc_crtc_timing *timing; |
782 | struct dc_sink *sink; |
783 | struct dc_dsc_bw_range bw_range; |
784 | bool compression_possible; |
785 | struct drm_dp_mst_port *port; |
786 | enum dsc_clock_force_state clock_force_enable; |
787 | uint32_t num_slices_h; |
788 | uint32_t num_slices_v; |
789 | uint32_t bpp_overwrite; |
790 | struct amdgpu_dm_connector *aconnector; |
791 | }; |
792 | |
793 | static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link) |
794 | { |
795 | u8 link_coding_cap; |
796 | uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B; |
797 | |
798 | link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(link: dc_link); |
799 | if (link_coding_cap == DP_128b_132b_ENCODING) |
800 | fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B; |
801 | |
802 | return fec_overhead_multiplier_x1000; |
803 | } |
804 | |
805 | static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000) |
806 | { |
807 | u64 peak_kbps = kbps; |
808 | |
809 | peak_kbps *= 1006; |
810 | peak_kbps *= fec_overhead_multiplier_x1000; |
811 | peak_kbps = div_u64(dividend: peak_kbps, divisor: 1000 * 1000); |
812 | return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); |
813 | } |
814 | |
815 | static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, |
816 | struct dsc_mst_fairness_vars *vars, |
817 | int count, |
818 | int k) |
819 | { |
820 | struct drm_connector *drm_connector; |
821 | int i; |
822 | struct dc_dsc_config_options dsc_options = {0}; |
823 | |
824 | for (i = 0; i < count; i++) { |
825 | drm_connector = ¶ms[i].aconnector->base; |
826 | |
827 | dc_dsc_get_default_config_option(dc: params[i].sink->ctx->dc, options: &dsc_options); |
828 | dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16; |
829 | |
830 | memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); |
831 | if (vars[i + k].dsc_enabled && dc_dsc_compute_config( |
832 | dsc: params[i].sink->ctx->dc->res_pool->dscs[0], |
833 | dsc_sink_caps: ¶ms[i].sink->dsc_caps.dsc_dec_caps, |
834 | options: &dsc_options, |
835 | target_bandwidth_kbps: 0, |
836 | timing: params[i].timing, |
837 | link_encoding: dc_link_get_highest_encoding_format(link: params[i].aconnector->dc_link), |
838 | dsc_cfg: ¶ms[i].timing->dsc_cfg)) { |
839 | params[i].timing->flags.DSC = 1; |
840 | |
841 | if (params[i].bpp_overwrite) |
842 | params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite; |
843 | else |
844 | params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16; |
845 | |
846 | if (params[i].num_slices_h) |
847 | params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h; |
848 | |
849 | if (params[i].num_slices_v) |
850 | params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v; |
851 | } else { |
852 | params[i].timing->flags.DSC = 0; |
853 | } |
854 | params[i].timing->dsc_cfg.mst_pbn = vars[i + k].pbn; |
855 | } |
856 | |
857 | for (i = 0; i < count; i++) { |
858 | if (params[i].sink) { |
859 | if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL && |
860 | params[i].sink->sink_signal != SIGNAL_TYPE_NONE) |
861 | DRM_DEBUG_DRIVER("%s i=%d dispname=%s\n" , __func__, i, |
862 | params[i].sink->edid_caps.display_name); |
863 | } |
864 | |
865 | DRM_DEBUG_DRIVER("dsc=%d bits_per_pixel=%d pbn=%d\n" , |
866 | params[i].timing->flags.DSC, |
867 | params[i].timing->dsc_cfg.bits_per_pixel, |
868 | vars[i + k].pbn); |
869 | } |
870 | } |
871 | |
872 | static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) |
873 | { |
874 | struct dc_dsc_config dsc_config; |
875 | u64 kbps; |
876 | |
877 | struct drm_connector *drm_connector = ¶m.aconnector->base; |
878 | struct dc_dsc_config_options dsc_options = {0}; |
879 | |
880 | dc_dsc_get_default_config_option(dc: param.sink->ctx->dc, options: &dsc_options); |
881 | dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16; |
882 | |
883 | kbps = div_u64(dividend: (u64)pbn * 994 * 8 * 54, divisor: 64); |
884 | dc_dsc_compute_config( |
885 | dsc: param.sink->ctx->dc->res_pool->dscs[0], |
886 | dsc_sink_caps: ¶m.sink->dsc_caps.dsc_dec_caps, |
887 | options: &dsc_options, |
888 | target_bandwidth_kbps: (int) kbps, timing: param.timing, |
889 | link_encoding: dc_link_get_highest_encoding_format(link: param.aconnector->dc_link), |
890 | dsc_cfg: &dsc_config); |
891 | |
892 | return dsc_config.bits_per_pixel; |
893 | } |
894 | |
895 | static int increase_dsc_bpp(struct drm_atomic_state *state, |
896 | struct drm_dp_mst_topology_state *mst_state, |
897 | struct dc_link *dc_link, |
898 | struct dsc_mst_fairness_params *params, |
899 | struct dsc_mst_fairness_vars *vars, |
900 | int count, |
901 | int k) |
902 | { |
903 | int i; |
904 | bool bpp_increased[MAX_PIPES]; |
905 | int initial_slack[MAX_PIPES]; |
906 | int min_initial_slack; |
907 | int next_index; |
908 | int remaining_to_increase = 0; |
909 | int link_timeslots_used; |
910 | int fair_pbn_alloc; |
911 | int ret = 0; |
912 | uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); |
913 | |
914 | for (i = 0; i < count; i++) { |
915 | if (vars[i + k].dsc_enabled) { |
916 | initial_slack[i] = |
917 | kbps_to_peak_pbn(kbps: params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn; |
918 | bpp_increased[i] = false; |
919 | remaining_to_increase += 1; |
920 | } else { |
921 | initial_slack[i] = 0; |
922 | bpp_increased[i] = true; |
923 | } |
924 | } |
925 | |
926 | while (remaining_to_increase) { |
927 | next_index = -1; |
928 | min_initial_slack = -1; |
929 | for (i = 0; i < count; i++) { |
930 | if (!bpp_increased[i]) { |
931 | if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) { |
932 | min_initial_slack = initial_slack[i]; |
933 | next_index = i; |
934 | } |
935 | } |
936 | } |
937 | |
938 | if (next_index == -1) |
939 | break; |
940 | |
941 | link_timeslots_used = 0; |
942 | |
943 | for (i = 0; i < count; i++) |
944 | link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div); |
945 | |
946 | fair_pbn_alloc = |
947 | (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div; |
948 | |
949 | if (initial_slack[next_index] > fair_pbn_alloc) { |
950 | vars[next_index].pbn += fair_pbn_alloc; |
951 | ret = drm_dp_atomic_find_time_slots(state, |
952 | mgr: params[next_index].port->mgr, |
953 | port: params[next_index].port, |
954 | pbn: vars[next_index].pbn); |
955 | if (ret < 0) |
956 | return ret; |
957 | |
958 | ret = drm_dp_mst_atomic_check(state); |
959 | if (ret == 0) { |
960 | vars[next_index].bpp_x16 = bpp_x16_from_pbn(param: params[next_index], pbn: vars[next_index].pbn); |
961 | } else { |
962 | vars[next_index].pbn -= fair_pbn_alloc; |
963 | ret = drm_dp_atomic_find_time_slots(state, |
964 | mgr: params[next_index].port->mgr, |
965 | port: params[next_index].port, |
966 | pbn: vars[next_index].pbn); |
967 | if (ret < 0) |
968 | return ret; |
969 | } |
970 | } else { |
971 | vars[next_index].pbn += initial_slack[next_index]; |
972 | ret = drm_dp_atomic_find_time_slots(state, |
973 | mgr: params[next_index].port->mgr, |
974 | port: params[next_index].port, |
975 | pbn: vars[next_index].pbn); |
976 | if (ret < 0) |
977 | return ret; |
978 | |
979 | ret = drm_dp_mst_atomic_check(state); |
980 | if (ret == 0) { |
981 | vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; |
982 | } else { |
983 | vars[next_index].pbn -= initial_slack[next_index]; |
984 | ret = drm_dp_atomic_find_time_slots(state, |
985 | mgr: params[next_index].port->mgr, |
986 | port: params[next_index].port, |
987 | pbn: vars[next_index].pbn); |
988 | if (ret < 0) |
989 | return ret; |
990 | } |
991 | } |
992 | |
993 | bpp_increased[next_index] = true; |
994 | remaining_to_increase--; |
995 | } |
996 | return 0; |
997 | } |
998 | |
999 | static int try_disable_dsc(struct drm_atomic_state *state, |
1000 | struct dc_link *dc_link, |
1001 | struct dsc_mst_fairness_params *params, |
1002 | struct dsc_mst_fairness_vars *vars, |
1003 | int count, |
1004 | int k) |
1005 | { |
1006 | int i; |
1007 | bool tried[MAX_PIPES]; |
1008 | int kbps_increase[MAX_PIPES]; |
1009 | int max_kbps_increase; |
1010 | int next_index; |
1011 | int remaining_to_try = 0; |
1012 | int ret; |
1013 | uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); |
1014 | |
1015 | for (i = 0; i < count; i++) { |
1016 | if (vars[i + k].dsc_enabled |
1017 | && vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16 |
1018 | && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) { |
1019 | kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; |
1020 | tried[i] = false; |
1021 | remaining_to_try += 1; |
1022 | } else { |
1023 | kbps_increase[i] = 0; |
1024 | tried[i] = true; |
1025 | } |
1026 | } |
1027 | |
1028 | while (remaining_to_try) { |
1029 | next_index = -1; |
1030 | max_kbps_increase = -1; |
1031 | for (i = 0; i < count; i++) { |
1032 | if (!tried[i]) { |
1033 | if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) { |
1034 | max_kbps_increase = kbps_increase[i]; |
1035 | next_index = i; |
1036 | } |
1037 | } |
1038 | } |
1039 | |
1040 | if (next_index == -1) |
1041 | break; |
1042 | |
1043 | vars[next_index].pbn = kbps_to_peak_pbn(kbps: params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); |
1044 | ret = drm_dp_atomic_find_time_slots(state, |
1045 | mgr: params[next_index].port->mgr, |
1046 | port: params[next_index].port, |
1047 | pbn: vars[next_index].pbn); |
1048 | if (ret < 0) |
1049 | return ret; |
1050 | |
1051 | ret = drm_dp_mst_atomic_check(state); |
1052 | if (ret == 0) { |
1053 | vars[next_index].dsc_enabled = false; |
1054 | vars[next_index].bpp_x16 = 0; |
1055 | } else { |
1056 | vars[next_index].pbn = kbps_to_peak_pbn(kbps: params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000); |
1057 | ret = drm_dp_atomic_find_time_slots(state, |
1058 | mgr: params[next_index].port->mgr, |
1059 | port: params[next_index].port, |
1060 | pbn: vars[next_index].pbn); |
1061 | if (ret < 0) |
1062 | return ret; |
1063 | } |
1064 | |
1065 | tried[next_index] = true; |
1066 | remaining_to_try--; |
1067 | } |
1068 | return 0; |
1069 | } |
1070 | |
1071 | static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, |
1072 | struct dc_state *dc_state, |
1073 | struct dc_link *dc_link, |
1074 | struct dsc_mst_fairness_vars *vars, |
1075 | struct drm_dp_mst_topology_mgr *mgr, |
1076 | int *link_vars_start_index) |
1077 | { |
1078 | struct dc_stream_state *stream; |
1079 | struct dsc_mst_fairness_params params[MAX_PIPES]; |
1080 | struct amdgpu_dm_connector *aconnector; |
1081 | struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr); |
1082 | int count = 0; |
1083 | int i, k, ret; |
1084 | bool debugfs_overwrite = false; |
1085 | uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); |
1086 | |
1087 | memset(params, 0, sizeof(params)); |
1088 | |
1089 | if (IS_ERR(ptr: mst_state)) |
1090 | return PTR_ERR(ptr: mst_state); |
1091 | |
1092 | /* Set up params */ |
1093 | for (i = 0; i < dc_state->stream_count; i++) { |
1094 | struct dc_dsc_policy dsc_policy = {0}; |
1095 | |
1096 | stream = dc_state->streams[i]; |
1097 | |
1098 | if (stream->link != dc_link) |
1099 | continue; |
1100 | |
1101 | aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; |
1102 | if (!aconnector) |
1103 | continue; |
1104 | |
1105 | if (!aconnector->mst_output_port) |
1106 | continue; |
1107 | |
1108 | stream->timing.flags.DSC = 0; |
1109 | |
1110 | params[count].timing = &stream->timing; |
1111 | params[count].sink = stream->sink; |
1112 | params[count].aconnector = aconnector; |
1113 | params[count].port = aconnector->mst_output_port; |
1114 | params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; |
1115 | if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE) |
1116 | debugfs_overwrite = true; |
1117 | params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; |
1118 | params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; |
1119 | params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel; |
1120 | params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; |
1121 | dc_dsc_get_policy_for_timing(timing: params[count].timing, max_target_bpp_limit_override_x16: 0, policy: &dsc_policy); |
1122 | if (!dc_dsc_compute_bandwidth_range( |
1123 | dsc: stream->sink->ctx->dc->res_pool->dscs[0], |
1124 | dsc_min_slice_height_override: stream->sink->ctx->dc->debug.dsc_min_slice_height_override, |
1125 | min_bpp_x16: dsc_policy.min_target_bpp * 16, |
1126 | max_bpp_x16: dsc_policy.max_target_bpp * 16, |
1127 | dsc_sink_caps: &stream->sink->dsc_caps.dsc_dec_caps, |
1128 | timing: &stream->timing, |
1129 | link_encoding: dc_link_get_highest_encoding_format(link: dc_link), |
1130 | range: ¶ms[count].bw_range)) |
1131 | params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(timing: &stream->timing, |
1132 | link_encoding: dc_link_get_highest_encoding_format(link: dc_link)); |
1133 | |
1134 | count++; |
1135 | } |
1136 | |
1137 | if (count == 0) { |
1138 | ASSERT(0); |
1139 | return 0; |
1140 | } |
1141 | |
1142 | /* k is start index of vars for current phy link used by mst hub */ |
1143 | k = *link_vars_start_index; |
1144 | /* set vars start index for next mst hub phy link */ |
1145 | *link_vars_start_index += count; |
1146 | |
1147 | /* Try no compression */ |
1148 | for (i = 0; i < count; i++) { |
1149 | vars[i + k].aconnector = params[i].aconnector; |
1150 | vars[i + k].pbn = kbps_to_peak_pbn(kbps: params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); |
1151 | vars[i + k].dsc_enabled = false; |
1152 | vars[i + k].bpp_x16 = 0; |
1153 | ret = drm_dp_atomic_find_time_slots(state, mgr: params[i].port->mgr, port: params[i].port, |
1154 | pbn: vars[i + k].pbn); |
1155 | if (ret < 0) |
1156 | return ret; |
1157 | } |
1158 | ret = drm_dp_mst_atomic_check(state); |
1159 | if (ret == 0 && !debugfs_overwrite) { |
1160 | set_dsc_configs_from_fairness_vars(params, vars, count, k); |
1161 | return 0; |
1162 | } else if (ret != -ENOSPC) { |
1163 | return ret; |
1164 | } |
1165 | |
1166 | /* Try max compression */ |
1167 | for (i = 0; i < count; i++) { |
1168 | if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { |
1169 | vars[i + k].pbn = kbps_to_peak_pbn(kbps: params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000); |
1170 | vars[i + k].dsc_enabled = true; |
1171 | vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; |
1172 | ret = drm_dp_atomic_find_time_slots(state, mgr: params[i].port->mgr, |
1173 | port: params[i].port, pbn: vars[i + k].pbn); |
1174 | if (ret < 0) |
1175 | return ret; |
1176 | } else { |
1177 | vars[i + k].pbn = kbps_to_peak_pbn(kbps: params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); |
1178 | vars[i + k].dsc_enabled = false; |
1179 | vars[i + k].bpp_x16 = 0; |
1180 | ret = drm_dp_atomic_find_time_slots(state, mgr: params[i].port->mgr, |
1181 | port: params[i].port, pbn: vars[i + k].pbn); |
1182 | if (ret < 0) |
1183 | return ret; |
1184 | } |
1185 | } |
1186 | ret = drm_dp_mst_atomic_check(state); |
1187 | if (ret != 0) |
1188 | return ret; |
1189 | |
1190 | /* Optimize degree of compression */ |
1191 | ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k); |
1192 | if (ret < 0) |
1193 | return ret; |
1194 | |
1195 | ret = try_disable_dsc(state, dc_link, params, vars, count, k); |
1196 | if (ret < 0) |
1197 | return ret; |
1198 | |
1199 | set_dsc_configs_from_fairness_vars(params, vars, count, k); |
1200 | |
1201 | return 0; |
1202 | } |
1203 | |
1204 | static bool is_dsc_need_re_compute( |
1205 | struct drm_atomic_state *state, |
1206 | struct dc_state *dc_state, |
1207 | struct dc_link *dc_link) |
1208 | { |
1209 | int i, j; |
1210 | bool is_dsc_need_re_compute = false; |
1211 | struct amdgpu_dm_connector *stream_on_link[MAX_PIPES]; |
1212 | int new_stream_on_link_num = 0; |
1213 | struct amdgpu_dm_connector *aconnector; |
1214 | struct dc_stream_state *stream; |
1215 | const struct dc *dc = dc_link->dc; |
1216 | |
1217 | /* only check phy used by dsc mst branch */ |
1218 | if (dc_link->type != dc_connection_mst_branch) |
1219 | return false; |
1220 | |
1221 | if (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || |
1222 | dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) |
1223 | return false; |
1224 | |
1225 | for (i = 0; i < MAX_PIPES; i++) |
1226 | stream_on_link[i] = NULL; |
1227 | |
1228 | /* check if there is mode change in new request */ |
1229 | for (i = 0; i < dc_state->stream_count; i++) { |
1230 | struct drm_crtc_state *new_crtc_state; |
1231 | struct drm_connector_state *new_conn_state; |
1232 | |
1233 | stream = dc_state->streams[i]; |
1234 | if (!stream) |
1235 | continue; |
1236 | |
1237 | /* check if stream using the same link for mst */ |
1238 | if (stream->link != dc_link) |
1239 | continue; |
1240 | |
1241 | aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; |
1242 | if (!aconnector) |
1243 | continue; |
1244 | |
1245 | stream_on_link[new_stream_on_link_num] = aconnector; |
1246 | new_stream_on_link_num++; |
1247 | |
1248 | new_conn_state = drm_atomic_get_new_connector_state(state, connector: &aconnector->base); |
1249 | if (!new_conn_state) |
1250 | continue; |
1251 | |
1252 | if (IS_ERR(ptr: new_conn_state)) |
1253 | continue; |
1254 | |
1255 | if (!new_conn_state->crtc) |
1256 | continue; |
1257 | |
1258 | new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc: new_conn_state->crtc); |
1259 | if (!new_crtc_state) |
1260 | continue; |
1261 | |
1262 | if (IS_ERR(ptr: new_crtc_state)) |
1263 | continue; |
1264 | |
1265 | if (new_crtc_state->enable && new_crtc_state->active) { |
1266 | if (new_crtc_state->mode_changed || new_crtc_state->active_changed || |
1267 | new_crtc_state->connectors_changed) |
1268 | return true; |
1269 | } |
1270 | } |
1271 | |
1272 | /* check current_state if there stream on link but it is not in |
1273 | * new request state |
1274 | */ |
1275 | for (i = 0; i < dc->current_state->stream_count; i++) { |
1276 | stream = dc->current_state->streams[i]; |
1277 | /* only check stream on the mst hub */ |
1278 | if (stream->link != dc_link) |
1279 | continue; |
1280 | |
1281 | aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; |
1282 | if (!aconnector) |
1283 | continue; |
1284 | |
1285 | for (j = 0; j < new_stream_on_link_num; j++) { |
1286 | if (stream_on_link[j]) { |
1287 | if (aconnector == stream_on_link[j]) |
1288 | break; |
1289 | } |
1290 | } |
1291 | |
1292 | if (j == new_stream_on_link_num) { |
1293 | /* not in new state */ |
1294 | is_dsc_need_re_compute = true; |
1295 | break; |
1296 | } |
1297 | } |
1298 | |
1299 | return is_dsc_need_re_compute; |
1300 | } |
1301 | |
1302 | int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, |
1303 | struct dc_state *dc_state, |
1304 | struct dsc_mst_fairness_vars *vars) |
1305 | { |
1306 | int i, j; |
1307 | struct dc_stream_state *stream; |
1308 | bool computed_streams[MAX_PIPES]; |
1309 | struct amdgpu_dm_connector *aconnector; |
1310 | struct drm_dp_mst_topology_mgr *mst_mgr; |
1311 | struct resource_pool *res_pool; |
1312 | int link_vars_start_index = 0; |
1313 | int ret = 0; |
1314 | |
1315 | for (i = 0; i < dc_state->stream_count; i++) |
1316 | computed_streams[i] = false; |
1317 | |
1318 | for (i = 0; i < dc_state->stream_count; i++) { |
1319 | stream = dc_state->streams[i]; |
1320 | res_pool = stream->ctx->dc->res_pool; |
1321 | |
1322 | if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) |
1323 | continue; |
1324 | |
1325 | aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; |
1326 | |
1327 | if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port) |
1328 | continue; |
1329 | |
1330 | if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) |
1331 | continue; |
1332 | |
1333 | if (computed_streams[i]) |
1334 | continue; |
1335 | |
1336 | if (res_pool->funcs->remove_stream_from_ctx && |
1337 | res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) |
1338 | return -EINVAL; |
1339 | |
1340 | if (!is_dsc_need_re_compute(state, dc_state, dc_link: stream->link)) |
1341 | continue; |
1342 | |
1343 | mst_mgr = aconnector->mst_output_port->mgr; |
1344 | ret = compute_mst_dsc_configs_for_link(state, dc_state, dc_link: stream->link, vars, mgr: mst_mgr, |
1345 | link_vars_start_index: &link_vars_start_index); |
1346 | if (ret != 0) |
1347 | return ret; |
1348 | |
1349 | for (j = 0; j < dc_state->stream_count; j++) { |
1350 | if (dc_state->streams[j]->link == stream->link) |
1351 | computed_streams[j] = true; |
1352 | } |
1353 | } |
1354 | |
1355 | for (i = 0; i < dc_state->stream_count; i++) { |
1356 | stream = dc_state->streams[i]; |
1357 | |
1358 | if (stream->timing.flags.DSC == 1) |
1359 | if (dc_stream_add_dsc_to_resource(dc: stream->ctx->dc, state: dc_state, stream) != DC_OK) |
1360 | return -EINVAL; |
1361 | } |
1362 | |
1363 | return ret; |
1364 | } |
1365 | |
1366 | static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, |
1367 | struct dc_state *dc_state, |
1368 | struct dsc_mst_fairness_vars *vars) |
1369 | { |
1370 | int i, j; |
1371 | struct dc_stream_state *stream; |
1372 | bool computed_streams[MAX_PIPES]; |
1373 | struct amdgpu_dm_connector *aconnector; |
1374 | struct drm_dp_mst_topology_mgr *mst_mgr; |
1375 | int link_vars_start_index = 0; |
1376 | int ret = 0; |
1377 | |
1378 | for (i = 0; i < dc_state->stream_count; i++) |
1379 | computed_streams[i] = false; |
1380 | |
1381 | for (i = 0; i < dc_state->stream_count; i++) { |
1382 | stream = dc_state->streams[i]; |
1383 | |
1384 | if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) |
1385 | continue; |
1386 | |
1387 | aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; |
1388 | |
1389 | if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port) |
1390 | continue; |
1391 | |
1392 | if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) |
1393 | continue; |
1394 | |
1395 | if (computed_streams[i]) |
1396 | continue; |
1397 | |
1398 | if (!is_dsc_need_re_compute(state, dc_state, dc_link: stream->link)) |
1399 | continue; |
1400 | |
1401 | mst_mgr = aconnector->mst_output_port->mgr; |
1402 | ret = compute_mst_dsc_configs_for_link(state, dc_state, dc_link: stream->link, vars, mgr: mst_mgr, |
1403 | link_vars_start_index: &link_vars_start_index); |
1404 | if (ret != 0) |
1405 | return ret; |
1406 | |
1407 | for (j = 0; j < dc_state->stream_count; j++) { |
1408 | if (dc_state->streams[j]->link == stream->link) |
1409 | computed_streams[j] = true; |
1410 | } |
1411 | } |
1412 | |
1413 | return ret; |
1414 | } |
1415 | |
1416 | static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state, |
1417 | struct dc_stream_state *stream) |
1418 | { |
1419 | int i; |
1420 | struct drm_crtc *crtc; |
1421 | struct drm_crtc_state *new_state, *old_state; |
1422 | |
1423 | for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, i) { |
1424 | struct dm_crtc_state *dm_state = to_dm_crtc_state(new_state); |
1425 | |
1426 | if (dm_state->stream == stream) |
1427 | return i; |
1428 | } |
1429 | return -1; |
1430 | } |
1431 | |
1432 | static bool is_link_to_dschub(struct dc_link *dc_link) |
1433 | { |
1434 | union dpcd_dsc_basic_capabilities *dsc_caps = |
1435 | &dc_link->dpcd_caps.dsc_caps.dsc_basic_caps; |
1436 | |
1437 | /* only check phy used by dsc mst branch */ |
1438 | if (dc_link->type != dc_connection_mst_branch) |
1439 | return false; |
1440 | |
1441 | if (!(dsc_caps->fields.dsc_support.DSC_SUPPORT || |
1442 | dsc_caps->fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) |
1443 | return false; |
1444 | return true; |
1445 | } |
1446 | |
1447 | static bool is_dsc_precompute_needed(struct drm_atomic_state *state) |
1448 | { |
1449 | int i; |
1450 | struct drm_crtc *crtc; |
1451 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; |
1452 | bool ret = false; |
1453 | |
1454 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
1455 | struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(new_crtc_state); |
1456 | |
1457 | if (!amdgpu_dm_find_first_crtc_matching_connector(state, crtc)) { |
1458 | ret = false; |
1459 | break; |
1460 | } |
1461 | if (dm_crtc_state->stream && dm_crtc_state->stream->link) |
1462 | if (is_link_to_dschub(dc_link: dm_crtc_state->stream->link)) |
1463 | ret = true; |
1464 | } |
1465 | return ret; |
1466 | } |
1467 | |
1468 | int pre_validate_dsc(struct drm_atomic_state *state, |
1469 | struct dm_atomic_state **dm_state_ptr, |
1470 | struct dsc_mst_fairness_vars *vars) |
1471 | { |
1472 | int i; |
1473 | struct dm_atomic_state *dm_state; |
1474 | struct dc_state *local_dc_state = NULL; |
1475 | int ret = 0; |
1476 | |
1477 | if (!is_dsc_precompute_needed(state)) { |
1478 | DRM_INFO_ONCE("DSC precompute is not needed.\n" ); |
1479 | return 0; |
1480 | } |
1481 | ret = dm_atomic_get_state(state, dm_state: dm_state_ptr); |
1482 | if (ret != 0) { |
1483 | DRM_INFO_ONCE("dm_atomic_get_state() failed\n" ); |
1484 | return ret; |
1485 | } |
1486 | dm_state = *dm_state_ptr; |
1487 | |
1488 | /* |
1489 | * create local vailable for dc_state. copy content of streams of dm_state->context |
1490 | * to local variable. make sure stream pointer of local variable not the same as stream |
1491 | * from dm_state->context. |
1492 | */ |
1493 | |
1494 | local_dc_state = kmemdup(p: dm_state->context, size: sizeof(struct dc_state), GFP_KERNEL); |
1495 | if (!local_dc_state) |
1496 | return -ENOMEM; |
1497 | |
1498 | for (i = 0; i < local_dc_state->stream_count; i++) { |
1499 | struct dc_stream_state *stream = dm_state->context->streams[i]; |
1500 | int ind = find_crtc_index_in_state_by_stream(state, stream); |
1501 | |
1502 | if (ind >= 0) { |
1503 | struct amdgpu_dm_connector *aconnector; |
1504 | struct drm_connector_state *drm_new_conn_state; |
1505 | struct dm_connector_state *dm_new_conn_state; |
1506 | struct dm_crtc_state *dm_old_crtc_state; |
1507 | |
1508 | aconnector = |
1509 | amdgpu_dm_find_first_crtc_matching_connector(state, |
1510 | crtc: state->crtcs[ind].ptr); |
1511 | drm_new_conn_state = |
1512 | drm_atomic_get_new_connector_state(state, |
1513 | connector: &aconnector->base); |
1514 | dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); |
1515 | dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state); |
1516 | |
1517 | local_dc_state->streams[i] = |
1518 | create_validate_stream_for_sink(aconnector, |
1519 | drm_mode: &state->crtcs[ind].new_state->mode, |
1520 | dm_state: dm_new_conn_state, |
1521 | old_stream: dm_old_crtc_state->stream); |
1522 | if (local_dc_state->streams[i] == NULL) { |
1523 | ret = -EINVAL; |
1524 | break; |
1525 | } |
1526 | } |
1527 | } |
1528 | |
1529 | if (ret != 0) |
1530 | goto clean_exit; |
1531 | |
1532 | ret = pre_compute_mst_dsc_configs_for_state(state, dc_state: local_dc_state, vars); |
1533 | if (ret != 0) { |
1534 | DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n" ); |
1535 | ret = -EINVAL; |
1536 | goto clean_exit; |
1537 | } |
1538 | |
1539 | /* |
1540 | * compare local_streams -> timing with dm_state->context, |
1541 | * if the same set crtc_state->mode-change = 0; |
1542 | */ |
1543 | for (i = 0; i < local_dc_state->stream_count; i++) { |
1544 | struct dc_stream_state *stream = dm_state->context->streams[i]; |
1545 | |
1546 | if (local_dc_state->streams[i] && |
1547 | dc_is_timing_changed(cur_stream: stream, new_stream: local_dc_state->streams[i])) { |
1548 | DRM_INFO_ONCE("crtc[%d] needs mode_changed\n" , i); |
1549 | } else { |
1550 | int ind = find_crtc_index_in_state_by_stream(state, stream); |
1551 | |
1552 | if (ind >= 0) |
1553 | state->crtcs[ind].new_state->mode_changed = 0; |
1554 | } |
1555 | } |
1556 | clean_exit: |
1557 | for (i = 0; i < local_dc_state->stream_count; i++) { |
1558 | struct dc_stream_state *stream = dm_state->context->streams[i]; |
1559 | |
1560 | if (local_dc_state->streams[i] != stream) |
1561 | dc_stream_release(dc_stream: local_dc_state->streams[i]); |
1562 | } |
1563 | |
1564 | kfree(objp: local_dc_state); |
1565 | |
1566 | return ret; |
1567 | } |
1568 | |
1569 | static unsigned int kbps_from_pbn(unsigned int pbn) |
1570 | { |
1571 | unsigned int kbps = pbn; |
1572 | |
1573 | kbps *= (1000000 / PEAK_FACTOR_X1000); |
1574 | kbps *= 8; |
1575 | kbps *= 54; |
1576 | kbps /= 64; |
1577 | |
1578 | return kbps; |
1579 | } |
1580 | |
1581 | static bool is_dsc_common_config_possible(struct dc_stream_state *stream, |
1582 | struct dc_dsc_bw_range *bw_range) |
1583 | { |
1584 | struct dc_dsc_policy dsc_policy = {0}; |
1585 | |
1586 | dc_dsc_get_policy_for_timing(timing: &stream->timing, max_target_bpp_limit_override_x16: 0, policy: &dsc_policy); |
1587 | dc_dsc_compute_bandwidth_range(dsc: stream->sink->ctx->dc->res_pool->dscs[0], |
1588 | dsc_min_slice_height_override: stream->sink->ctx->dc->debug.dsc_min_slice_height_override, |
1589 | min_bpp_x16: dsc_policy.min_target_bpp * 16, |
1590 | max_bpp_x16: dsc_policy.max_target_bpp * 16, |
1591 | dsc_sink_caps: &stream->sink->dsc_caps.dsc_dec_caps, |
1592 | timing: &stream->timing, link_encoding: dc_link_get_highest_encoding_format(link: stream->link), range: bw_range); |
1593 | |
1594 | return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16; |
1595 | } |
1596 | |
1597 | enum dc_status dm_dp_mst_is_port_support_mode( |
1598 | struct amdgpu_dm_connector *aconnector, |
1599 | struct dc_stream_state *stream) |
1600 | { |
1601 | int bpp, pbn, branch_max_throughput_mps = 0; |
1602 | struct dc_link_settings cur_link_settings; |
1603 | unsigned int end_to_end_bw_in_kbps = 0; |
1604 | unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0; |
1605 | unsigned int max_compressed_bw_in_kbps = 0; |
1606 | struct dc_dsc_bw_range bw_range = {0}; |
1607 | struct drm_dp_mst_topology_mgr *mst_mgr; |
1608 | |
1609 | /* |
1610 | * check if the mode could be supported if DSC pass-through is supported |
1611 | * AND check if there enough bandwidth available to support the mode |
1612 | * with DSC enabled. |
1613 | */ |
1614 | if (is_dsc_common_config_possible(stream, bw_range: &bw_range) && |
1615 | aconnector->mst_output_port->passthrough_aux) { |
1616 | mst_mgr = aconnector->mst_output_port->mgr; |
1617 | mutex_lock(&mst_mgr->lock); |
1618 | |
1619 | cur_link_settings = stream->link->verified_link_cap; |
1620 | |
1621 | upper_link_bw_in_kbps = dc_link_bandwidth_kbps(link: aconnector->dc_link, |
1622 | link_setting: &cur_link_settings |
1623 | ); |
1624 | down_link_bw_in_kbps = kbps_from_pbn(pbn: aconnector->mst_output_port->full_pbn); |
1625 | |
1626 | /* pick the bottleneck */ |
1627 | end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, |
1628 | down_link_bw_in_kbps); |
1629 | |
1630 | mutex_unlock(lock: &mst_mgr->lock); |
1631 | |
1632 | /* |
1633 | * use the maximum dsc compression bandwidth as the required |
1634 | * bandwidth for the mode |
1635 | */ |
1636 | max_compressed_bw_in_kbps = bw_range.min_kbps; |
1637 | |
1638 | if (end_to_end_bw_in_kbps < max_compressed_bw_in_kbps) { |
1639 | DRM_DEBUG_DRIVER("Mode does not fit into DSC pass-through bandwidth validation\n" ); |
1640 | return DC_FAIL_BANDWIDTH_VALIDATE; |
1641 | } |
1642 | } else { |
1643 | /* check if mode could be supported within full_pbn */ |
1644 | bpp = convert_dc_color_depth_into_bpc(display_color_depth: stream->timing.display_color_depth) * 3; |
1645 | pbn = drm_dp_calc_pbn_mode(clock: stream->timing.pix_clk_100hz / 10, bpp, dsc: false); |
1646 | |
1647 | if (pbn > aconnector->mst_output_port->full_pbn) |
1648 | return DC_FAIL_BANDWIDTH_VALIDATE; |
1649 | } |
1650 | |
1651 | /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */ |
1652 | switch (stream->timing.pixel_encoding) { |
1653 | case PIXEL_ENCODING_RGB: |
1654 | case PIXEL_ENCODING_YCBCR444: |
1655 | branch_max_throughput_mps = |
1656 | aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps; |
1657 | break; |
1658 | case PIXEL_ENCODING_YCBCR422: |
1659 | case PIXEL_ENCODING_YCBCR420: |
1660 | branch_max_throughput_mps = |
1661 | aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps; |
1662 | break; |
1663 | default: |
1664 | break; |
1665 | } |
1666 | |
1667 | if (branch_max_throughput_mps != 0 && |
1668 | ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) |
1669 | return DC_FAIL_BANDWIDTH_VALIDATE; |
1670 | |
1671 | return DC_OK; |
1672 | } |
1673 | |