1 | /* |
2 | * Copyright 2019 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Authors: AMD |
23 | * |
24 | */ |
25 | |
26 | #include "dc.h" |
27 | #include "dc_dmub_srv.h" |
28 | #include "../dmub/dmub_srv.h" |
29 | #include "dm_helpers.h" |
30 | #include "dc_hw_types.h" |
31 | #include "core_types.h" |
32 | #include "../basics/conversion.h" |
33 | #include "cursor_reg_cache.h" |
34 | #include "resource.h" |
35 | #include "clk_mgr.h" |
36 | #include "dc_state_priv.h" |
37 | |
38 | #define CTX dc_dmub_srv->ctx |
39 | #define DC_LOGGER CTX->logger |
40 | |
41 | static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc, |
42 | struct dmub_srv *dmub) |
43 | { |
44 | dc_srv->dmub = dmub; |
45 | dc_srv->ctx = dc->ctx; |
46 | } |
47 | |
48 | struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub) |
49 | { |
50 | struct dc_dmub_srv *dc_srv = |
51 | kzalloc(size: sizeof(struct dc_dmub_srv), GFP_KERNEL); |
52 | |
53 | if (dc_srv == NULL) { |
54 | BREAK_TO_DEBUGGER(); |
55 | return NULL; |
56 | } |
57 | |
58 | dc_dmub_srv_construct(dc_srv, dc, dmub); |
59 | |
60 | return dc_srv; |
61 | } |
62 | |
63 | void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv) |
64 | { |
65 | if (*dmub_srv) { |
66 | kfree(objp: *dmub_srv); |
67 | *dmub_srv = NULL; |
68 | } |
69 | } |
70 | |
71 | void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) |
72 | { |
73 | struct dmub_srv *dmub = dc_dmub_srv->dmub; |
74 | struct dc_context *dc_ctx = dc_dmub_srv->ctx; |
75 | enum dmub_status status; |
76 | |
77 | do { |
78 | status = dmub_srv_wait_for_idle(dmub, timeout_us: 100000); |
79 | } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); |
80 | |
81 | if (status != DMUB_STATUS_OK) { |
82 | DC_ERROR("Error waiting for DMUB idle: status=%d\n" , status); |
83 | dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); |
84 | } |
85 | } |
86 | |
87 | void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv) |
88 | { |
89 | struct dmub_srv *dmub = dc_dmub_srv->dmub; |
90 | struct dc_context *dc_ctx = dc_dmub_srv->ctx; |
91 | enum dmub_status status = DMUB_STATUS_OK; |
92 | |
93 | status = dmub_srv_clear_inbox0_ack(dmub); |
94 | if (status != DMUB_STATUS_OK) { |
95 | DC_ERROR("Error clearing INBOX0 ack: status=%d\n" , status); |
96 | dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); |
97 | } |
98 | } |
99 | |
100 | void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dc_dmub_srv) |
101 | { |
102 | struct dmub_srv *dmub = dc_dmub_srv->dmub; |
103 | struct dc_context *dc_ctx = dc_dmub_srv->ctx; |
104 | enum dmub_status status = DMUB_STATUS_OK; |
105 | |
106 | status = dmub_srv_wait_for_inbox0_ack(dmub, timeout_us: 100000); |
107 | if (status != DMUB_STATUS_OK) { |
108 | DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n" ); |
109 | dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); |
110 | } |
111 | } |
112 | |
113 | void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv, |
114 | union dmub_inbox0_data_register data) |
115 | { |
116 | struct dmub_srv *dmub = dc_dmub_srv->dmub; |
117 | struct dc_context *dc_ctx = dc_dmub_srv->ctx; |
118 | enum dmub_status status = DMUB_STATUS_OK; |
119 | |
120 | status = dmub_srv_send_inbox0_cmd(dmub, data); |
121 | if (status != DMUB_STATUS_OK) { |
122 | DC_ERROR("Error sending INBOX0 cmd\n" ); |
123 | dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); |
124 | } |
125 | } |
126 | |
127 | bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, |
128 | unsigned int count, |
129 | union dmub_rb_cmd *cmd_list) |
130 | { |
131 | struct dc_context *dc_ctx; |
132 | struct dmub_srv *dmub; |
133 | enum dmub_status status; |
134 | int i; |
135 | |
136 | if (!dc_dmub_srv || !dc_dmub_srv->dmub) |
137 | return false; |
138 | |
139 | dc_ctx = dc_dmub_srv->ctx; |
140 | dmub = dc_dmub_srv->dmub; |
141 | |
142 | for (i = 0 ; i < count; i++) { |
143 | // Queue command |
144 | status = dmub_srv_cmd_queue(dmub, cmd: &cmd_list[i]); |
145 | |
146 | if (status == DMUB_STATUS_QUEUE_FULL) { |
147 | /* Execute and wait for queue to become empty again. */ |
148 | status = dmub_srv_cmd_execute(dmub); |
149 | if (status == DMUB_STATUS_POWER_STATE_D3) |
150 | return false; |
151 | |
152 | do { |
153 | status = dmub_srv_wait_for_idle(dmub, timeout_us: 100000); |
154 | } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); |
155 | |
156 | /* Requeue the command. */ |
157 | status = dmub_srv_cmd_queue(dmub, cmd: &cmd_list[i]); |
158 | } |
159 | |
160 | if (status != DMUB_STATUS_OK) { |
161 | if (status != DMUB_STATUS_POWER_STATE_D3) { |
162 | DC_ERROR("Error queueing DMUB command: status=%d\n" , status); |
163 | dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); |
164 | } |
165 | return false; |
166 | } |
167 | } |
168 | |
169 | status = dmub_srv_cmd_execute(dmub); |
170 | if (status != DMUB_STATUS_OK) { |
171 | if (status != DMUB_STATUS_POWER_STATE_D3) { |
172 | DC_ERROR("Error starting DMUB execution: status=%d\n" , status); |
173 | dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); |
174 | } |
175 | return false; |
176 | } |
177 | |
178 | return true; |
179 | } |
180 | |
181 | bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, |
182 | enum dm_dmub_wait_type wait_type, |
183 | union dmub_rb_cmd *cmd_list) |
184 | { |
185 | struct dmub_srv *dmub; |
186 | enum dmub_status status; |
187 | |
188 | if (!dc_dmub_srv || !dc_dmub_srv->dmub) |
189 | return false; |
190 | |
191 | dmub = dc_dmub_srv->dmub; |
192 | |
193 | // Wait for DMUB to process command |
194 | if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { |
195 | do { |
196 | status = dmub_srv_wait_for_idle(dmub, timeout_us: 100000); |
197 | } while (dc_dmub_srv->ctx->dc->debug.disable_timeout && status != DMUB_STATUS_OK); |
198 | |
199 | if (status != DMUB_STATUS_OK) { |
200 | DC_LOG_DEBUG("No reply for DMUB command: status=%d\n" , status); |
201 | dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); |
202 | return false; |
203 | } |
204 | |
205 | // Copy data back from ring buffer into command |
206 | if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) |
207 | dmub_rb_get_return_data(rb: &dmub->inbox1_rb, cmd: cmd_list); |
208 | } |
209 | |
210 | return true; |
211 | } |
212 | |
213 | bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) |
214 | { |
215 | return dc_dmub_srv_cmd_run_list(dc_dmub_srv, count: 1, cmd_list: cmd, wait_type); |
216 | } |
217 | |
218 | bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type) |
219 | { |
220 | struct dc_context *dc_ctx; |
221 | struct dmub_srv *dmub; |
222 | enum dmub_status status; |
223 | int i; |
224 | |
225 | if (!dc_dmub_srv || !dc_dmub_srv->dmub) |
226 | return false; |
227 | |
228 | dc_ctx = dc_dmub_srv->ctx; |
229 | dmub = dc_dmub_srv->dmub; |
230 | |
231 | for (i = 0 ; i < count; i++) { |
232 | // Queue command |
233 | status = dmub_srv_cmd_queue(dmub, cmd: &cmd_list[i]); |
234 | |
235 | if (status == DMUB_STATUS_QUEUE_FULL) { |
236 | /* Execute and wait for queue to become empty again. */ |
237 | status = dmub_srv_cmd_execute(dmub); |
238 | if (status == DMUB_STATUS_POWER_STATE_D3) |
239 | return false; |
240 | |
241 | dmub_srv_wait_for_idle(dmub, timeout_us: 100000); |
242 | |
243 | /* Requeue the command. */ |
244 | status = dmub_srv_cmd_queue(dmub, cmd: &cmd_list[i]); |
245 | } |
246 | |
247 | if (status != DMUB_STATUS_OK) { |
248 | if (status != DMUB_STATUS_POWER_STATE_D3) { |
249 | DC_ERROR("Error queueing DMUB command: status=%d\n" , status); |
250 | dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); |
251 | } |
252 | return false; |
253 | } |
254 | } |
255 | |
256 | status = dmub_srv_cmd_execute(dmub); |
257 | if (status != DMUB_STATUS_OK) { |
258 | if (status != DMUB_STATUS_POWER_STATE_D3) { |
259 | DC_ERROR("Error starting DMUB execution: status=%d\n" , status); |
260 | dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); |
261 | } |
262 | return false; |
263 | } |
264 | |
265 | // Wait for DMUB to process command |
266 | if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) { |
267 | if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { |
268 | do { |
269 | status = dmub_srv_wait_for_idle(dmub, timeout_us: 100000); |
270 | } while (status != DMUB_STATUS_OK); |
271 | } else |
272 | status = dmub_srv_wait_for_idle(dmub, timeout_us: 100000); |
273 | |
274 | if (status != DMUB_STATUS_OK) { |
275 | DC_LOG_DEBUG("No reply for DMUB command: status=%d\n" , status); |
276 | dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); |
277 | return false; |
278 | } |
279 | |
280 | // Copy data back from ring buffer into command |
281 | if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) |
282 | dmub_rb_get_return_data(rb: &dmub->inbox1_rb, cmd: cmd_list); |
283 | } |
284 | |
285 | return true; |
286 | } |
287 | |
288 | bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv) |
289 | { |
290 | struct dmub_srv *dmub; |
291 | struct dc_context *dc_ctx; |
292 | union dmub_fw_boot_status boot_status; |
293 | enum dmub_status status; |
294 | |
295 | if (!dc_dmub_srv || !dc_dmub_srv->dmub) |
296 | return false; |
297 | |
298 | dmub = dc_dmub_srv->dmub; |
299 | dc_ctx = dc_dmub_srv->ctx; |
300 | |
301 | status = dmub_srv_get_fw_boot_status(dmub, status: &boot_status); |
302 | if (status != DMUB_STATUS_OK) { |
303 | DC_ERROR("Error querying DMUB boot status: error=%d\n" , status); |
304 | return false; |
305 | } |
306 | |
307 | return boot_status.bits.optimized_init_done; |
308 | } |
309 | |
310 | bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, |
311 | unsigned int stream_mask) |
312 | { |
313 | if (!dc_dmub_srv || !dc_dmub_srv->dmub) |
314 | return false; |
315 | |
316 | return dc_wake_and_execute_gpint(ctx: dc_dmub_srv->ctx, command_code: DMUB_GPINT__IDLE_OPT_NOTIFY_STREAM_MASK, |
317 | param: stream_mask, NULL, wait_type: DM_DMUB_WAIT_TYPE_WAIT); |
318 | } |
319 | |
320 | bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv) |
321 | { |
322 | struct dmub_srv *dmub; |
323 | struct dc_context *dc_ctx; |
324 | union dmub_fw_boot_status boot_status; |
325 | enum dmub_status status; |
326 | |
327 | if (!dc_dmub_srv || !dc_dmub_srv->dmub) |
328 | return false; |
329 | |
330 | dmub = dc_dmub_srv->dmub; |
331 | dc_ctx = dc_dmub_srv->ctx; |
332 | |
333 | status = dmub_srv_get_fw_boot_status(dmub, status: &boot_status); |
334 | if (status != DMUB_STATUS_OK) { |
335 | DC_ERROR("Error querying DMUB boot status: error=%d\n" , status); |
336 | return false; |
337 | } |
338 | |
339 | return boot_status.bits.restore_required; |
340 | } |
341 | |
342 | bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry) |
343 | { |
344 | struct dmub_srv *dmub = dc->ctx->dmub_srv->dmub; |
345 | return dmub_srv_get_outbox0_msg(dmub, entry); |
346 | } |
347 | |
348 | void dc_dmub_trace_event_control(struct dc *dc, bool enable) |
349 | { |
350 | dm_helpers_dmub_outbox_interrupt_control(ctx: dc->ctx, enable); |
351 | } |
352 | |
353 | void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max) |
354 | { |
355 | union dmub_rb_cmd cmd = { 0 }; |
356 | |
357 | cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; |
358 | cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_DRR_UPDATE; |
359 | cmd.drr_update.dmub_optc_state_req.v_total_max = vtotal_max; |
360 | cmd.drr_update.dmub_optc_state_req.v_total_min = vtotal_min; |
361 | cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst; |
362 | |
363 | cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); |
364 | |
365 | // Send the command to the DMCUB. |
366 | dc_wake_and_execute_dmub_cmd(ctx: dc->ctx, cmd: &cmd, wait_type: DM_DMUB_WAIT_TYPE_WAIT); |
367 | } |
368 | |
369 | void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst) |
370 | { |
371 | union dmub_rb_cmd cmd = { 0 }; |
372 | |
373 | cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; |
374 | cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER; |
375 | cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst; |
376 | |
377 | cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header); |
378 | |
379 | // Send the command to the DMCUB. |
380 | dc_wake_and_execute_dmub_cmd(ctx: dc->ctx, cmd: &cmd, wait_type: DM_DMUB_WAIT_TYPE_WAIT); |
381 | } |
382 | |
383 | static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream) |
384 | { |
385 | uint8_t pipes = 0; |
386 | int i = 0; |
387 | |
388 | for (i = 0; i < MAX_PIPES; i++) { |
389 | struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; |
390 | |
391 | if (pipe->stream == stream && pipe->stream_res.tg) |
392 | pipes = i; |
393 | } |
394 | return pipes; |
395 | } |
396 | |
397 | static void dc_dmub_srv_populate_fams_pipe_info(struct dc *dc, struct dc_state *context, |
398 | struct pipe_ctx *head_pipe, |
399 | struct dmub_cmd_fw_assisted_mclk_switch_pipe_data *fams_pipe_data) |
400 | { |
401 | int j; |
402 | int pipe_idx = 0; |
403 | |
404 | fams_pipe_data->pipe_index[pipe_idx++] = head_pipe->plane_res.hubp->inst; |
405 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
406 | struct pipe_ctx *split_pipe = &context->res_ctx.pipe_ctx[j]; |
407 | |
408 | if (split_pipe->stream == head_pipe->stream && (split_pipe->top_pipe || split_pipe->prev_odm_pipe)) { |
409 | fams_pipe_data->pipe_index[pipe_idx++] = split_pipe->plane_res.hubp->inst; |
410 | } |
411 | } |
412 | fams_pipe_data->pipe_count = pipe_idx; |
413 | } |
414 | |
415 | bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, struct dc_state *context) |
416 | { |
417 | union dmub_rb_cmd cmd = { 0 }; |
418 | struct dmub_cmd_fw_assisted_mclk_switch_config *config_data = &cmd.fw_assisted_mclk_switch.config_data; |
419 | int i = 0, k = 0; |
420 | int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it. |
421 | uint8_t visual_confirm_enabled; |
422 | int pipe_idx = 0; |
423 | |
424 | if (dc == NULL) |
425 | return false; |
426 | |
427 | visual_confirm_enabled = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS; |
428 | |
429 | // Format command. |
430 | cmd.fw_assisted_mclk_switch.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; |
431 | cmd.fw_assisted_mclk_switch.header.sub_type = DMUB_CMD__FAMS_SETUP_FW_CTRL; |
432 | cmd.fw_assisted_mclk_switch.config_data.fams_enabled = should_manage_pstate; |
433 | cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled; |
434 | |
435 | if (should_manage_pstate) { |
436 | for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { |
437 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
438 | |
439 | if (!pipe->stream) |
440 | continue; |
441 | |
442 | /* If FAMS is being used to support P-State and there is a stream |
443 | * that does not use FAMS, we are in an FPO + VActive scenario. |
444 | * Assign vactive stretch margin in this case. |
445 | */ |
446 | if (!pipe->stream->fpo_in_use) { |
447 | cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us; |
448 | break; |
449 | } |
450 | pipe_idx++; |
451 | } |
452 | } |
453 | |
454 | for (i = 0, k = 0; context && i < dc->res_pool->pipe_count; i++) { |
455 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
456 | |
457 | if (resource_is_pipe_type(pipe_ctx: pipe, type: OTG_MASTER) && pipe->stream->fpo_in_use) { |
458 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
459 | uint8_t min_refresh_in_hz = (pipe->stream->timing.min_refresh_in_uhz + 999999) / 1000000; |
460 | |
461 | config_data->pipe_data[k].pix_clk_100hz = pipe->stream->timing.pix_clk_100hz; |
462 | config_data->pipe_data[k].min_refresh_in_hz = min_refresh_in_hz; |
463 | config_data->pipe_data[k].max_ramp_step = ramp_up_num_steps; |
464 | config_data->pipe_data[k].pipes = dc_dmub_srv_get_pipes_for_stream(dc, stream: pipe->stream); |
465 | dc_dmub_srv_populate_fams_pipe_info(dc, context, head_pipe: pipe, fams_pipe_data: &config_data->pipe_data[k]); |
466 | k++; |
467 | } |
468 | } |
469 | cmd.fw_assisted_mclk_switch.header.payload_bytes = |
470 | sizeof(cmd.fw_assisted_mclk_switch) - sizeof(cmd.fw_assisted_mclk_switch.header); |
471 | |
472 | // Send the command to the DMCUB. |
473 | dc_wake_and_execute_dmub_cmd(ctx: dc->ctx, cmd: &cmd, wait_type: DM_DMUB_WAIT_TYPE_WAIT); |
474 | |
475 | return true; |
476 | } |
477 | |
478 | void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv) |
479 | { |
480 | union dmub_rb_cmd cmd = { 0 }; |
481 | |
482 | if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) |
483 | return; |
484 | |
485 | memset(&cmd, 0, sizeof(cmd)); |
486 | |
487 | /* Prepare fw command */ |
488 | cmd.query_feature_caps.header.type = DMUB_CMD__QUERY_FEATURE_CAPS; |
489 | cmd.query_feature_caps.header.sub_type = 0; |
490 | cmd.query_feature_caps.header.ret_status = 1; |
491 | cmd.query_feature_caps.header.payload_bytes = sizeof(struct dmub_cmd_query_feature_caps_data); |
492 | |
493 | /* If command was processed, copy feature caps to dmub srv */ |
494 | if (dc_wake_and_execute_dmub_cmd(ctx: dc_dmub_srv->ctx, cmd: &cmd, wait_type: DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && |
495 | cmd.query_feature_caps.header.ret_status == 0) { |
496 | memcpy(&dc_dmub_srv->dmub->feature_caps, |
497 | &cmd.query_feature_caps.query_feature_caps_data, |
498 | sizeof(struct dmub_feature_caps)); |
499 | } |
500 | } |
501 | |
502 | void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx) |
503 | { |
504 | union dmub_rb_cmd cmd = { 0 }; |
505 | unsigned int panel_inst = 0; |
506 | |
507 | dc_get_edp_link_panel_inst(dc, link: pipe_ctx->stream->link, inst_out: &panel_inst); |
508 | |
509 | memset(&cmd, 0, sizeof(cmd)); |
510 | |
511 | // Prepare fw command |
512 | cmd.visual_confirm_color.header.type = DMUB_CMD__GET_VISUAL_CONFIRM_COLOR; |
513 | cmd.visual_confirm_color.header.sub_type = 0; |
514 | cmd.visual_confirm_color.header.ret_status = 1; |
515 | cmd.visual_confirm_color.header.payload_bytes = sizeof(struct dmub_cmd_visual_confirm_color_data); |
516 | cmd.visual_confirm_color.visual_confirm_color_data.visual_confirm_color.panel_inst = panel_inst; |
517 | |
518 | // If command was processed, copy feature caps to dmub srv |
519 | if (dc_wake_and_execute_dmub_cmd(ctx: dc->ctx, cmd: &cmd, wait_type: DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && |
520 | cmd.visual_confirm_color.header.ret_status == 0) { |
521 | memcpy(&dc->ctx->dmub_srv->dmub->visual_confirm_color, |
522 | &cmd.visual_confirm_color.visual_confirm_color_data, |
523 | sizeof(struct dmub_visual_confirm_color)); |
524 | } |
525 | } |
526 | |
527 | /** |
528 | * populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command |
529 | * |
530 | * @dc: [in] pointer to dc object |
531 | * @subvp_pipe: [in] pipe_ctx for the SubVP pipe |
532 | * @vblank_pipe: [in] pipe_ctx for the DRR pipe |
533 | * @pipe_data: [in] Pipe data which stores the VBLANK/DRR info |
534 | * @context: [in] DC state for access to phantom stream |
535 | * |
536 | * Populate the DMCUB SubVP command with DRR pipe info. All the information |
537 | * required for calculating the SubVP + DRR microschedule is populated here. |
538 | * |
539 | * High level algorithm: |
540 | * 1. Get timing for SubVP pipe, phantom pipe, and DRR pipe |
541 | * 2. Calculate the min and max vtotal which supports SubVP + DRR microschedule |
542 | * 3. Populate the drr_info with the min and max supported vtotal values |
543 | */ |
544 | static void populate_subvp_cmd_drr_info(struct dc *dc, |
545 | struct dc_state *context, |
546 | struct pipe_ctx *subvp_pipe, |
547 | struct pipe_ctx *vblank_pipe, |
548 | struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data) |
549 | { |
550 | struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(state: context, stream: subvp_pipe->stream); |
551 | struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; |
552 | struct dc_crtc_timing *phantom_timing = &phantom_stream->timing; |
553 | struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing; |
554 | uint16_t drr_frame_us = 0; |
555 | uint16_t min_drr_supported_us = 0; |
556 | uint16_t max_drr_supported_us = 0; |
557 | uint16_t max_drr_vblank_us = 0; |
558 | uint16_t max_drr_mallregion_us = 0; |
559 | uint16_t mall_region_us = 0; |
560 | uint16_t prefetch_us = 0; |
561 | uint16_t subvp_active_us = 0; |
562 | uint16_t drr_active_us = 0; |
563 | uint16_t min_vtotal_supported = 0; |
564 | uint16_t max_vtotal_supported = 0; |
565 | |
566 | pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true; |
567 | pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping |
568 | pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now |
569 | |
570 | drr_frame_us = div64_u64(dividend: ((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000), |
571 | divisor: (((uint64_t)drr_timing->pix_clk_100hz * 100))); |
572 | // P-State allow width and FW delays already included phantom_timing->v_addressable |
573 | mall_region_us = div64_u64(dividend: ((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000), |
574 | divisor: (((uint64_t)phantom_timing->pix_clk_100hz * 100))); |
575 | min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US; |
576 | min_vtotal_supported = div64_u64(dividend: ((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us), |
577 | divisor: (((uint64_t)drr_timing->h_total * 1000000))); |
578 | |
579 | prefetch_us = div64_u64(dividend: ((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000), |
580 | divisor: (((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); |
581 | subvp_active_us = div64_u64(dividend: ((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000), |
582 | divisor: (((uint64_t)main_timing->pix_clk_100hz * 100))); |
583 | drr_active_us = div64_u64(dividend: ((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000), |
584 | divisor: (((uint64_t)drr_timing->pix_clk_100hz * 100))); |
585 | max_drr_vblank_us = div64_u64(dividend: (subvp_active_us - prefetch_us - |
586 | dc->caps.subvp_fw_processing_delay_us - drr_active_us), divisor: 2) + drr_active_us; |
587 | max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us - dc->caps.subvp_fw_processing_delay_us; |
588 | max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us; |
589 | max_vtotal_supported = div64_u64(dividend: ((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us), |
590 | divisor: (((uint64_t)drr_timing->h_total * 1000000))); |
591 | |
592 | /* When calculating the max vtotal supported for SubVP + DRR cases, add |
593 | * margin due to possible rounding errors (being off by 1 line in the |
594 | * FW calculation can incorrectly push the P-State switch to wait 1 frame |
595 | * longer). |
596 | */ |
597 | max_vtotal_supported = max_vtotal_supported - dc->caps.subvp_drr_max_vblank_margin_us; |
598 | |
599 | pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported; |
600 | pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported; |
601 | pipe_data->pipe_config.vblank_data.drr_info.drr_vblank_start_margin = dc->caps.subvp_drr_vblank_start_margin_us; |
602 | } |
603 | |
604 | /** |
605 | * populate_subvp_cmd_vblank_pipe_info - Helper to populate VBLANK pipe info for the DMUB subvp command |
606 | * |
607 | * @dc: [in] current dc state |
608 | * @context: [in] new dc state |
609 | * @cmd: [in] DMUB cmd to be populated with SubVP info |
610 | * @vblank_pipe: [in] pipe_ctx for the VBLANK pipe |
611 | * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd |
612 | * |
613 | * Populate the DMCUB SubVP command with VBLANK pipe info. All the information |
614 | * required to calculate the microschedule for SubVP + VBLANK case is stored in |
615 | * the pipe_data (subvp_data and vblank_data). Also check if the VBLANK pipe |
616 | * is a DRR display -- if it is make a call to populate drr_info. |
617 | */ |
618 | static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc, |
619 | struct dc_state *context, |
620 | union dmub_rb_cmd *cmd, |
621 | struct pipe_ctx *vblank_pipe, |
622 | uint8_t cmd_pipe_index) |
623 | { |
624 | uint32_t i; |
625 | struct pipe_ctx *pipe = NULL; |
626 | struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = |
627 | &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; |
628 | |
629 | // Find the SubVP pipe |
630 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
631 | pipe = &context->res_ctx.pipe_ctx[i]; |
632 | |
633 | // We check for master pipe, but it shouldn't matter since we only need |
634 | // the pipe for timing info (stream should be same for any pipe splits) |
635 | if (!resource_is_pipe_type(pipe_ctx: pipe, type: OTG_MASTER) || |
636 | !resource_is_pipe_type(pipe_ctx: pipe, type: DPP_PIPE)) |
637 | continue; |
638 | |
639 | // Find the SubVP pipe |
640 | if (dc_state_get_pipe_subvp_type(state: context, pipe_ctx: pipe) == SUBVP_MAIN) |
641 | break; |
642 | } |
643 | |
644 | pipe_data->mode = VBLANK; |
645 | pipe_data->pipe_config.vblank_data.pix_clk_100hz = vblank_pipe->stream->timing.pix_clk_100hz; |
646 | pipe_data->pipe_config.vblank_data.vblank_start = vblank_pipe->stream->timing.v_total - |
647 | vblank_pipe->stream->timing.v_front_porch; |
648 | pipe_data->pipe_config.vblank_data.vtotal = vblank_pipe->stream->timing.v_total; |
649 | pipe_data->pipe_config.vblank_data.htotal = vblank_pipe->stream->timing.h_total; |
650 | pipe_data->pipe_config.vblank_data.vblank_pipe_index = vblank_pipe->pipe_idx; |
651 | pipe_data->pipe_config.vblank_data.vstartup_start = vblank_pipe->pipe_dlg_param.vstartup_start; |
652 | pipe_data->pipe_config.vblank_data.vblank_end = |
653 | vblank_pipe->stream->timing.v_total - vblank_pipe->stream->timing.v_front_porch - vblank_pipe->stream->timing.v_addressable; |
654 | |
655 | if (vblank_pipe->stream->ignore_msa_timing_param && |
656 | (vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed)) |
657 | populate_subvp_cmd_drr_info(dc, context, subvp_pipe: pipe, vblank_pipe, pipe_data); |
658 | } |
659 | |
660 | /** |
661 | * update_subvp_prefetch_end_to_mall_start - Helper for SubVP + SubVP case |
662 | * |
663 | * @dc: [in] current dc state |
664 | * @context: [in] new dc state |
665 | * @cmd: [in] DMUB cmd to be populated with SubVP info |
666 | * @subvp_pipes: [in] Array of SubVP pipes (should always be length 2) |
667 | * |
668 | * For SubVP + SubVP, we use a single vertical interrupt to start the |
669 | * microschedule for both SubVP pipes. In order for this to work correctly, the |
670 | * MALL REGION of both SubVP pipes must start at the same time. This function |
671 | * lengthens the prefetch end to mall start delay of the SubVP pipe that has |
672 | * the shorter prefetch so that both MALL REGION's will start at the same time. |
673 | */ |
674 | static void update_subvp_prefetch_end_to_mall_start(struct dc *dc, |
675 | struct dc_state *context, |
676 | union dmub_rb_cmd *cmd, |
677 | struct pipe_ctx *subvp_pipes[]) |
678 | { |
679 | uint32_t subvp0_prefetch_us = 0; |
680 | uint32_t subvp1_prefetch_us = 0; |
681 | uint32_t prefetch_delta_us = 0; |
682 | struct dc_stream_state *phantom_stream0 = NULL; |
683 | struct dc_stream_state *phantom_stream1 = NULL; |
684 | struct dc_crtc_timing *phantom_timing0 = NULL; |
685 | struct dc_crtc_timing *phantom_timing1 = NULL; |
686 | struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL; |
687 | |
688 | phantom_stream0 = dc_state_get_paired_subvp_stream(state: context, stream: subvp_pipes[0]->stream); |
689 | phantom_stream1 = dc_state_get_paired_subvp_stream(state: context, stream: subvp_pipes[1]->stream); |
690 | phantom_timing0 = &phantom_stream0->timing; |
691 | phantom_timing1 = &phantom_stream1->timing; |
692 | |
693 | subvp0_prefetch_us = div64_u64(dividend: ((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) * |
694 | (uint64_t)phantom_timing0->h_total * 1000000), |
695 | divisor: (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); |
696 | subvp1_prefetch_us = div64_u64(dividend: ((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) * |
697 | (uint64_t)phantom_timing1->h_total * 1000000), |
698 | divisor: (((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us)); |
699 | |
700 | // Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time) |
701 | // should increase it's prefetch time to match the other |
702 | if (subvp0_prefetch_us > subvp1_prefetch_us) { |
703 | pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1]; |
704 | prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us; |
705 | pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = |
706 | div64_u64(dividend: ((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) * |
707 | ((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)), |
708 | divisor: ((uint64_t)phantom_timing1->h_total * 1000000)); |
709 | |
710 | } else if (subvp1_prefetch_us > subvp0_prefetch_us) { |
711 | pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0]; |
712 | prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us; |
713 | pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = |
714 | div64_u64(dividend: ((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) * |
715 | ((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)), |
716 | divisor: ((uint64_t)phantom_timing0->h_total * 1000000)); |
717 | } |
718 | } |
719 | |
720 | /** |
721 | * populate_subvp_cmd_pipe_info - Helper to populate the SubVP pipe info for the DMUB subvp command |
722 | * |
723 | * @dc: [in] current dc state |
724 | * @context: [in] new dc state |
725 | * @cmd: [in] DMUB cmd to be populated with SubVP info |
726 | * @subvp_pipe: [in] pipe_ctx for the SubVP pipe |
727 | * @cmd_pipe_index: [in] index for the pipe array in DMCUB SubVP cmd |
728 | * |
729 | * Populate the DMCUB SubVP command with SubVP pipe info. All the information |
730 | * required to calculate the microschedule for the SubVP pipe is stored in the |
731 | * pipe_data of the DMCUB SubVP command. |
732 | */ |
733 | static void populate_subvp_cmd_pipe_info(struct dc *dc, |
734 | struct dc_state *context, |
735 | union dmub_rb_cmd *cmd, |
736 | struct pipe_ctx *subvp_pipe, |
737 | uint8_t cmd_pipe_index) |
738 | { |
739 | uint32_t j; |
740 | struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = |
741 | &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index]; |
742 | struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(state: context, stream: subvp_pipe->stream); |
743 | struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing; |
744 | struct dc_crtc_timing *phantom_timing = &phantom_stream->timing; |
745 | uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den; |
746 | |
747 | pipe_data->mode = SUBVP; |
748 | pipe_data->pipe_config.subvp_data.pix_clk_100hz = subvp_pipe->stream->timing.pix_clk_100hz; |
749 | pipe_data->pipe_config.subvp_data.htotal = subvp_pipe->stream->timing.h_total; |
750 | pipe_data->pipe_config.subvp_data.vtotal = subvp_pipe->stream->timing.v_total; |
751 | pipe_data->pipe_config.subvp_data.main_vblank_start = |
752 | main_timing->v_total - main_timing->v_front_porch; |
753 | pipe_data->pipe_config.subvp_data.main_vblank_end = |
754 | main_timing->v_total - main_timing->v_front_porch - main_timing->v_addressable; |
755 | pipe_data->pipe_config.subvp_data.mall_region_lines = phantom_timing->v_addressable; |
756 | pipe_data->pipe_config.subvp_data.main_pipe_index = subvp_pipe->stream_res.tg->inst; |
757 | pipe_data->pipe_config.subvp_data.is_drr = subvp_pipe->stream->ignore_msa_timing_param && |
758 | (subvp_pipe->stream->allow_freesync || subvp_pipe->stream->vrr_active_variable || subvp_pipe->stream->vrr_active_fixed); |
759 | |
760 | /* Calculate the scaling factor from the src and dst height. |
761 | * e.g. If 3840x2160 being downscaled to 1920x1080, the scaling factor is 1/2. |
762 | * Reduce the fraction 1080/2160 = 1/2 for the "scaling factor" |
763 | * |
764 | * Make sure to combine stream and plane scaling together. |
765 | */ |
766 | reduce_fraction(num: subvp_pipe->stream->src.height, den: subvp_pipe->stream->dst.height, |
767 | out_num: &out_num_stream, out_den: &out_den_stream); |
768 | reduce_fraction(num: subvp_pipe->plane_state->src_rect.height, den: subvp_pipe->plane_state->dst_rect.height, |
769 | out_num: &out_num_plane, out_den: &out_den_plane); |
770 | reduce_fraction(num: out_num_stream * out_num_plane, den: out_den_stream * out_den_plane, out_num: &out_num, out_den: &out_den); |
771 | pipe_data->pipe_config.subvp_data.scale_factor_numerator = out_num; |
772 | pipe_data->pipe_config.subvp_data.scale_factor_denominator = out_den; |
773 | |
774 | // Prefetch lines is equal to VACTIVE + BP + VSYNC |
775 | pipe_data->pipe_config.subvp_data.prefetch_lines = |
776 | phantom_timing->v_total - phantom_timing->v_front_porch; |
777 | |
778 | // Round up |
779 | pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines = |
780 | div64_u64(dividend: ((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) + |
781 | ((uint64_t)phantom_timing->h_total * 1000000 - 1)), divisor: ((uint64_t)phantom_timing->h_total * 1000000)); |
782 | pipe_data->pipe_config.subvp_data.processing_delay_lines = |
783 | div64_u64(dividend: ((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) + |
784 | ((uint64_t)phantom_timing->h_total * 1000000 - 1)), divisor: ((uint64_t)phantom_timing->h_total * 1000000)); |
785 | |
786 | if (subvp_pipe->bottom_pipe) { |
787 | pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->bottom_pipe->pipe_idx; |
788 | } else if (subvp_pipe->next_odm_pipe) { |
789 | pipe_data->pipe_config.subvp_data.main_split_pipe_index = subvp_pipe->next_odm_pipe->pipe_idx; |
790 | } else { |
791 | pipe_data->pipe_config.subvp_data.main_split_pipe_index = 0xF; |
792 | } |
793 | |
794 | // Find phantom pipe index based on phantom stream |
795 | for (j = 0; j < dc->res_pool->pipe_count; j++) { |
796 | struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j]; |
797 | |
798 | if (resource_is_pipe_type(pipe_ctx: phantom_pipe, type: OTG_MASTER) && |
799 | phantom_pipe->stream == dc_state_get_paired_subvp_stream(state: context, stream: subvp_pipe->stream)) { |
800 | pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst; |
801 | if (phantom_pipe->bottom_pipe) { |
802 | pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst; |
803 | } else if (phantom_pipe->next_odm_pipe) { |
804 | pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->next_odm_pipe->plane_res.hubp->inst; |
805 | } else { |
806 | pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = 0xF; |
807 | } |
808 | break; |
809 | } |
810 | } |
811 | } |
812 | |
813 | /** |
814 | * dc_dmub_setup_subvp_dmub_command - Populate the DMCUB SubVP command |
815 | * |
816 | * @dc: [in] current dc state |
817 | * @context: [in] new dc state |
818 | * @enable: [in] if true enables the pipes population |
819 | * |
820 | * This function loops through each pipe and populates the DMUB SubVP CMD info |
821 | * based on the pipe (e.g. SubVP, VBLANK). |
822 | */ |
823 | void dc_dmub_setup_subvp_dmub_command(struct dc *dc, |
824 | struct dc_state *context, |
825 | bool enable) |
826 | { |
827 | uint8_t cmd_pipe_index = 0; |
828 | uint32_t i, pipe_idx; |
829 | uint8_t subvp_count = 0; |
830 | union dmub_rb_cmd cmd; |
831 | struct pipe_ctx *subvp_pipes[2]; |
832 | uint32_t wm_val_refclk = 0; |
833 | enum mall_stream_type pipe_mall_type; |
834 | |
835 | memset(&cmd, 0, sizeof(cmd)); |
836 | // FW command for SUBVP |
837 | cmd.fw_assisted_mclk_switch_v2.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; |
838 | cmd.fw_assisted_mclk_switch_v2.header.sub_type = DMUB_CMD__HANDLE_SUBVP_CMD; |
839 | cmd.fw_assisted_mclk_switch_v2.header.payload_bytes = |
840 | sizeof(cmd.fw_assisted_mclk_switch_v2) - sizeof(cmd.fw_assisted_mclk_switch_v2.header); |
841 | |
842 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
843 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
844 | |
845 | /* For SubVP pipe count, only count the top most (ODM / MPC) pipe |
846 | */ |
847 | if (resource_is_pipe_type(pipe_ctx: pipe, type: OTG_MASTER) && |
848 | resource_is_pipe_type(pipe_ctx: pipe, type: DPP_PIPE) && |
849 | dc_state_get_pipe_subvp_type(state: context, pipe_ctx: pipe) == SUBVP_MAIN) |
850 | subvp_pipes[subvp_count++] = pipe; |
851 | } |
852 | |
853 | if (enable) { |
854 | // For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd |
855 | for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { |
856 | struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; |
857 | pipe_mall_type = dc_state_get_pipe_subvp_type(state: context, pipe_ctx: pipe); |
858 | |
859 | if (!pipe->stream) |
860 | continue; |
861 | |
862 | /* When populating subvp cmd info, only pass in the top most (ODM / MPC) pipe. |
863 | * Any ODM or MPC splits being used in SubVP will be handled internally in |
864 | * populate_subvp_cmd_pipe_info |
865 | */ |
866 | if (resource_is_pipe_type(pipe_ctx: pipe, type: OTG_MASTER) && |
867 | resource_is_pipe_type(pipe_ctx: pipe, type: DPP_PIPE) && |
868 | pipe_mall_type == SUBVP_MAIN) { |
869 | populate_subvp_cmd_pipe_info(dc, context, cmd: &cmd, subvp_pipe: pipe, cmd_pipe_index: cmd_pipe_index++); |
870 | } else if (resource_is_pipe_type(pipe_ctx: pipe, type: OTG_MASTER) && |
871 | resource_is_pipe_type(pipe_ctx: pipe, type: DPP_PIPE) && |
872 | pipe_mall_type == SUBVP_NONE) { |
873 | // Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where |
874 | // we run through DML without calculating "natural" P-state support |
875 | populate_subvp_cmd_vblank_pipe_info(dc, context, cmd: &cmd, vblank_pipe: pipe, cmd_pipe_index: cmd_pipe_index++); |
876 | |
877 | } |
878 | pipe_idx++; |
879 | } |
880 | if (subvp_count == 2) { |
881 | update_subvp_prefetch_end_to_mall_start(dc, context, cmd: &cmd, subvp_pipes); |
882 | } |
883 | cmd.fw_assisted_mclk_switch_v2.config_data.pstate_allow_width_us = dc->caps.subvp_pstate_allow_width_us; |
884 | cmd.fw_assisted_mclk_switch_v2.config_data.vertical_int_margin_us = dc->caps.subvp_vertical_int_margin_us; |
885 | |
886 | // Store the original watermark value for this SubVP config so we can lower it when the |
887 | // MCLK switch starts |
888 | wm_val_refclk = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns * |
889 | (dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000) / 1000; |
890 | |
891 | cmd.fw_assisted_mclk_switch_v2.config_data.watermark_a_cache = wm_val_refclk < 0xFFFF ? wm_val_refclk : 0xFFFF; |
892 | } |
893 | |
894 | dc_wake_and_execute_dmub_cmd(ctx: dc->ctx, cmd: &cmd, wait_type: DM_DMUB_WAIT_TYPE_WAIT); |
895 | } |
896 | |
897 | bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data) |
898 | { |
899 | if (!dc_dmub_srv || !dc_dmub_srv->dmub || !diag_data) |
900 | return false; |
901 | return dmub_srv_get_diagnostic_data(dmub: dc_dmub_srv->dmub, diag_data); |
902 | } |
903 | |
904 | void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) |
905 | { |
906 | struct dmub_diagnostic_data diag_data = {0}; |
907 | |
908 | if (!dc_dmub_srv || !dc_dmub_srv->dmub) { |
909 | DC_LOG_ERROR("%s: invalid parameters." , __func__); |
910 | return; |
911 | } |
912 | |
913 | if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, diag_data: &diag_data)) { |
914 | DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed." , __func__); |
915 | return; |
916 | } |
917 | |
918 | DC_LOG_DEBUG("DMCUB STATE:" ); |
919 | DC_LOG_DEBUG(" dmcub_version : %08x" , diag_data.dmcub_version); |
920 | DC_LOG_DEBUG(" scratch [0] : %08x" , diag_data.scratch[0]); |
921 | DC_LOG_DEBUG(" scratch [1] : %08x" , diag_data.scratch[1]); |
922 | DC_LOG_DEBUG(" scratch [2] : %08x" , diag_data.scratch[2]); |
923 | DC_LOG_DEBUG(" scratch [3] : %08x" , diag_data.scratch[3]); |
924 | DC_LOG_DEBUG(" scratch [4] : %08x" , diag_data.scratch[4]); |
925 | DC_LOG_DEBUG(" scratch [5] : %08x" , diag_data.scratch[5]); |
926 | DC_LOG_DEBUG(" scratch [6] : %08x" , diag_data.scratch[6]); |
927 | DC_LOG_DEBUG(" scratch [7] : %08x" , diag_data.scratch[7]); |
928 | DC_LOG_DEBUG(" scratch [8] : %08x" , diag_data.scratch[8]); |
929 | DC_LOG_DEBUG(" scratch [9] : %08x" , diag_data.scratch[9]); |
930 | DC_LOG_DEBUG(" scratch [10] : %08x" , diag_data.scratch[10]); |
931 | DC_LOG_DEBUG(" scratch [11] : %08x" , diag_data.scratch[11]); |
932 | DC_LOG_DEBUG(" scratch [12] : %08x" , diag_data.scratch[12]); |
933 | DC_LOG_DEBUG(" scratch [13] : %08x" , diag_data.scratch[13]); |
934 | DC_LOG_DEBUG(" scratch [14] : %08x" , diag_data.scratch[14]); |
935 | DC_LOG_DEBUG(" scratch [15] : %08x" , diag_data.scratch[15]); |
936 | DC_LOG_DEBUG(" pc : %08x" , diag_data.pc); |
937 | DC_LOG_DEBUG(" unk_fault_addr : %08x" , diag_data.undefined_address_fault_addr); |
938 | DC_LOG_DEBUG(" inst_fault_addr : %08x" , diag_data.inst_fetch_fault_addr); |
939 | DC_LOG_DEBUG(" data_fault_addr : %08x" , diag_data.data_write_fault_addr); |
940 | DC_LOG_DEBUG(" inbox1_rptr : %08x" , diag_data.inbox1_rptr); |
941 | DC_LOG_DEBUG(" inbox1_wptr : %08x" , diag_data.inbox1_wptr); |
942 | DC_LOG_DEBUG(" inbox1_size : %08x" , diag_data.inbox1_size); |
943 | DC_LOG_DEBUG(" inbox0_rptr : %08x" , diag_data.inbox0_rptr); |
944 | DC_LOG_DEBUG(" inbox0_wptr : %08x" , diag_data.inbox0_wptr); |
945 | DC_LOG_DEBUG(" inbox0_size : %08x" , diag_data.inbox0_size); |
946 | DC_LOG_DEBUG(" is_enabled : %d" , diag_data.is_dmcub_enabled); |
947 | DC_LOG_DEBUG(" is_soft_reset : %d" , diag_data.is_dmcub_soft_reset); |
948 | DC_LOG_DEBUG(" is_secure_reset : %d" , diag_data.is_dmcub_secure_reset); |
949 | DC_LOG_DEBUG(" is_traceport_en : %d" , diag_data.is_traceport_en); |
950 | DC_LOG_DEBUG(" is_cw0_en : %d" , diag_data.is_cw0_enabled); |
951 | DC_LOG_DEBUG(" is_cw6_en : %d" , diag_data.is_cw6_enabled); |
952 | } |
953 | |
954 | static bool dc_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) |
955 | { |
956 | struct pipe_ctx *test_pipe, *split_pipe; |
957 | const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data; |
958 | struct rect r1 = scl_data->recout, r2, r2_half; |
959 | int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b; |
960 | int cur_layer = pipe_ctx->plane_state->layer_index; |
961 | |
962 | /** |
963 | * Disable the cursor if there's another pipe above this with a |
964 | * plane that contains this pipe's viewport to prevent double cursor |
965 | * and incorrect scaling artifacts. |
966 | */ |
967 | for (test_pipe = pipe_ctx->top_pipe; test_pipe; |
968 | test_pipe = test_pipe->top_pipe) { |
969 | // Skip invisible layer and pipe-split plane on same layer |
970 | if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer) |
971 | continue; |
972 | |
973 | r2 = test_pipe->plane_res.scl_data.recout; |
974 | r2_r = r2.x + r2.width; |
975 | r2_b = r2.y + r2.height; |
976 | split_pipe = test_pipe; |
977 | |
978 | /** |
979 | * There is another half plane on same layer because of |
980 | * pipe-split, merge together per same height. |
981 | */ |
982 | for (split_pipe = pipe_ctx->top_pipe; split_pipe; |
983 | split_pipe = split_pipe->top_pipe) |
984 | if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) { |
985 | r2_half = split_pipe->plane_res.scl_data.recout; |
986 | r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x; |
987 | r2.width = r2.width + r2_half.width; |
988 | r2_r = r2.x + r2.width; |
989 | break; |
990 | } |
991 | |
992 | if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b) |
993 | return true; |
994 | } |
995 | |
996 | return false; |
997 | } |
998 | |
999 | static bool dc_dmub_should_update_cursor_data(struct pipe_ctx *pipe_ctx) |
1000 | { |
1001 | if (pipe_ctx->plane_state != NULL) { |
1002 | if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) |
1003 | return false; |
1004 | |
1005 | if (dc_can_pipe_disable_cursor(pipe_ctx)) |
1006 | return false; |
1007 | } |
1008 | |
1009 | if ((pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 || |
1010 | pipe_ctx->stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) && |
1011 | pipe_ctx->stream->ctx->dce_version >= DCN_VERSION_3_1) |
1012 | return true; |
1013 | |
1014 | if (pipe_ctx->stream->link->replay_settings.config.replay_supported) |
1015 | return true; |
1016 | |
1017 | return false; |
1018 | } |
1019 | |
1020 | static void dc_build_cursor_update_payload0( |
1021 | struct pipe_ctx *pipe_ctx, uint8_t p_idx, |
1022 | struct dmub_cmd_update_cursor_payload0 *payload) |
1023 | { |
1024 | struct hubp *hubp = pipe_ctx->plane_res.hubp; |
1025 | unsigned int panel_inst = 0; |
1026 | |
1027 | if (!dc_get_edp_link_panel_inst(dc: hubp->ctx->dc, |
1028 | link: pipe_ctx->stream->link, inst_out: &panel_inst)) |
1029 | return; |
1030 | |
1031 | /* Payload: Cursor Rect is built from position & attribute |
1032 | * x & y are obtained from postion |
1033 | */ |
1034 | payload->cursor_rect.x = hubp->cur_rect.x; |
1035 | payload->cursor_rect.y = hubp->cur_rect.y; |
1036 | /* w & h are obtained from attribute */ |
1037 | payload->cursor_rect.width = hubp->cur_rect.w; |
1038 | payload->cursor_rect.height = hubp->cur_rect.h; |
1039 | |
1040 | payload->enable = hubp->pos.cur_ctl.bits.cur_enable; |
1041 | payload->pipe_idx = p_idx; |
1042 | payload->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; |
1043 | payload->panel_inst = panel_inst; |
1044 | } |
1045 | |
1046 | static void dc_build_cursor_position_update_payload0( |
1047 | struct dmub_cmd_update_cursor_payload0 *pl, const uint8_t p_idx, |
1048 | const struct hubp *hubp, const struct dpp *dpp) |
1049 | { |
1050 | /* Hubp */ |
1051 | pl->position_cfg.pHubp.cur_ctl.raw = hubp->pos.cur_ctl.raw; |
1052 | pl->position_cfg.pHubp.position.raw = hubp->pos.position.raw; |
1053 | pl->position_cfg.pHubp.hot_spot.raw = hubp->pos.hot_spot.raw; |
1054 | pl->position_cfg.pHubp.dst_offset.raw = hubp->pos.dst_offset.raw; |
1055 | |
1056 | /* dpp */ |
1057 | pl->position_cfg.pDpp.cur0_ctl.raw = dpp->pos.cur0_ctl.raw; |
1058 | pl->position_cfg.pipe_idx = p_idx; |
1059 | } |
1060 | |
1061 | static void dc_build_cursor_attribute_update_payload1( |
1062 | struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx, |
1063 | const struct hubp *hubp, const struct dpp *dpp) |
1064 | { |
1065 | /* Hubp */ |
1066 | pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH; |
1067 | pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR; |
1068 | pl_A->aHubp.cur_ctl.raw = hubp->att.cur_ctl.raw; |
1069 | pl_A->aHubp.size.raw = hubp->att.size.raw; |
1070 | pl_A->aHubp.settings.raw = hubp->att.settings.raw; |
1071 | |
1072 | /* dpp */ |
1073 | pl_A->aDpp.cur0_ctl.raw = dpp->att.cur0_ctl.raw; |
1074 | } |
1075 | |
1076 | /** |
1077 | * dc_send_update_cursor_info_to_dmu - Populate the DMCUB Cursor update info command |
1078 | * |
1079 | * @pCtx: [in] pipe context |
1080 | * @pipe_idx: [in] pipe index |
1081 | * |
1082 | * This function would store the cursor related information and pass it into |
1083 | * dmub |
1084 | */ |
1085 | void dc_send_update_cursor_info_to_dmu( |
1086 | struct pipe_ctx *pCtx, uint8_t pipe_idx) |
1087 | { |
1088 | union dmub_rb_cmd cmd[2]; |
1089 | union dmub_cmd_update_cursor_info_data *update_cursor_info_0 = |
1090 | &cmd[0].update_cursor_info.update_cursor_info_data; |
1091 | |
1092 | memset(cmd, 0, sizeof(cmd)); |
1093 | |
1094 | if (!dc_dmub_should_update_cursor_data(pipe_ctx: pCtx)) |
1095 | return; |
1096 | /* |
1097 | * Since we use multi_cmd_pending for dmub command, the 2nd command is |
1098 | * only assigned to store cursor attributes info. |
1099 | * 1st command can view as 2 parts, 1st is for PSR/Replay data, the other |
1100 | * is to store cursor position info. |
1101 | * |
1102 | * Command heaer type must be the same type if using multi_cmd_pending. |
1103 | * Besides, while process 2nd command in DMU, the sub type is useless. |
1104 | * So it's meanless to pass the sub type header with different type. |
1105 | */ |
1106 | |
1107 | { |
1108 | /* Build Payload#0 Header */ |
1109 | cmd[0].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; |
1110 | cmd[0].update_cursor_info.header.payload_bytes = |
1111 | sizeof(cmd[0].update_cursor_info.update_cursor_info_data); |
1112 | cmd[0].update_cursor_info.header.multi_cmd_pending = 1; //To combine multi dmu cmd, 1st cmd |
1113 | |
1114 | /* Prepare Payload */ |
1115 | dc_build_cursor_update_payload0(pipe_ctx: pCtx, p_idx: pipe_idx, payload: &update_cursor_info_0->payload0); |
1116 | |
1117 | dc_build_cursor_position_update_payload0(pl: &update_cursor_info_0->payload0, p_idx: pipe_idx, |
1118 | hubp: pCtx->plane_res.hubp, dpp: pCtx->plane_res.dpp); |
1119 | } |
1120 | { |
1121 | /* Build Payload#1 Header */ |
1122 | cmd[1].update_cursor_info.header.type = DMUB_CMD__UPDATE_CURSOR_INFO; |
1123 | cmd[1].update_cursor_info.header.payload_bytes = sizeof(struct cursor_attributes_cfg); |
1124 | cmd[1].update_cursor_info.header.multi_cmd_pending = 0; //Indicate it's the last command. |
1125 | |
1126 | dc_build_cursor_attribute_update_payload1( |
1127 | pl_A: &cmd[1].update_cursor_info.update_cursor_info_data.payload1.attribute_cfg, |
1128 | p_idx: pipe_idx, hubp: pCtx->plane_res.hubp, dpp: pCtx->plane_res.dpp); |
1129 | |
1130 | /* Combine 2nd cmds update_curosr_info to DMU */ |
1131 | dc_wake_and_execute_dmub_cmd_list(ctx: pCtx->stream->ctx, count: 2, cmd, wait_type: DM_DMUB_WAIT_TYPE_WAIT); |
1132 | } |
1133 | } |
1134 | |
1135 | bool dc_dmub_check_min_version(struct dmub_srv *srv) |
1136 | { |
1137 | if (!srv->hw_funcs.is_psrsu_supported) |
1138 | return true; |
1139 | return srv->hw_funcs.is_psrsu_supported(srv); |
1140 | } |
1141 | |
1142 | void dc_dmub_srv_enable_dpia_trace(const struct dc *dc) |
1143 | { |
1144 | struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; |
1145 | |
1146 | if (!dc_dmub_srv || !dc_dmub_srv->dmub) { |
1147 | DC_LOG_ERROR("%s: invalid parameters." , __func__); |
1148 | return; |
1149 | } |
1150 | |
1151 | if (!dc_wake_and_execute_gpint(ctx: dc->ctx, command_code: DMUB_GPINT__SET_TRACE_BUFFER_MASK_WORD1, |
1152 | param: 0x0010, NULL, wait_type: DM_DMUB_WAIT_TYPE_WAIT)) { |
1153 | DC_LOG_ERROR("timeout updating trace buffer mask word\n" ); |
1154 | return; |
1155 | } |
1156 | |
1157 | if (!dc_wake_and_execute_gpint(ctx: dc->ctx, command_code: DMUB_GPINT__UPDATE_TRACE_BUFFER_MASK, |
1158 | param: 0x0000, NULL, wait_type: DM_DMUB_WAIT_TYPE_WAIT)) { |
1159 | DC_LOG_ERROR("timeout updating trace buffer mask word\n" ); |
1160 | return; |
1161 | } |
1162 | |
1163 | DC_LOG_DEBUG("Enabled DPIA trace\n" ); |
1164 | } |
1165 | |
1166 | void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index) |
1167 | { |
1168 | dmub_srv_subvp_save_surf_addr(dmub: dc_dmub_srv->dmub, addr, subvp_index); |
1169 | } |
1170 | |
1171 | bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) |
1172 | { |
1173 | struct dc_context *dc_ctx; |
1174 | enum dmub_status status; |
1175 | |
1176 | if (!dc_dmub_srv || !dc_dmub_srv->dmub) |
1177 | return true; |
1178 | |
1179 | if (dc_dmub_srv->ctx->dc->debug.dmcub_emulation) |
1180 | return true; |
1181 | |
1182 | dc_ctx = dc_dmub_srv->ctx; |
1183 | |
1184 | if (wait) { |
1185 | if (dc_dmub_srv->ctx->dc->debug.disable_timeout) { |
1186 | do { |
1187 | status = dmub_srv_wait_for_hw_pwr_up(dmub: dc_dmub_srv->dmub, timeout_us: 500000); |
1188 | } while (status != DMUB_STATUS_OK); |
1189 | } else { |
1190 | status = dmub_srv_wait_for_hw_pwr_up(dmub: dc_dmub_srv->dmub, timeout_us: 500000); |
1191 | if (status != DMUB_STATUS_OK) { |
1192 | DC_ERROR("Error querying DMUB hw power up status: error=%d\n" , status); |
1193 | return false; |
1194 | } |
1195 | } |
1196 | } else |
1197 | return dmub_srv_is_hw_pwr_up(dmub: dc_dmub_srv->dmub); |
1198 | |
1199 | return true; |
1200 | } |
1201 | |
1202 | static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) |
1203 | { |
1204 | struct dc_dmub_srv *dc_dmub_srv; |
1205 | union dmub_rb_cmd cmd = {0}; |
1206 | |
1207 | if (dc->debug.dmcub_emulation) |
1208 | return; |
1209 | |
1210 | if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) |
1211 | return; |
1212 | |
1213 | dc_dmub_srv = dc->ctx->dmub_srv; |
1214 | |
1215 | memset(&cmd, 0, sizeof(cmd)); |
1216 | cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT; |
1217 | cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE; |
1218 | cmd.idle_opt_notify_idle.header.payload_bytes = |
1219 | sizeof(cmd.idle_opt_notify_idle) - |
1220 | sizeof(cmd.idle_opt_notify_idle.header); |
1221 | |
1222 | cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle; |
1223 | |
1224 | if (allow_idle) { |
1225 | volatile struct dmub_shared_state_ips_driver *ips_driver = |
1226 | &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; |
1227 | union dmub_shared_state_ips_driver_signals new_signals; |
1228 | |
1229 | dc_dmub_srv_wait_idle(dc_dmub_srv: dc->ctx->dmub_srv); |
1230 | |
1231 | memset(&new_signals, 0, sizeof(new_signals)); |
1232 | |
1233 | if (dc->config.disable_ips == DMUB_IPS_ENABLE || |
1234 | dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { |
1235 | new_signals.bits.allow_pg = 1; |
1236 | new_signals.bits.allow_ips1 = 1; |
1237 | new_signals.bits.allow_ips2 = 1; |
1238 | new_signals.bits.allow_z10 = 1; |
1239 | } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { |
1240 | new_signals.bits.allow_ips1 = 1; |
1241 | } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { |
1242 | new_signals.bits.allow_pg = 1; |
1243 | new_signals.bits.allow_ips1 = 1; |
1244 | } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { |
1245 | new_signals.bits.allow_pg = 1; |
1246 | new_signals.bits.allow_ips1 = 1; |
1247 | new_signals.bits.allow_ips2 = 1; |
1248 | } |
1249 | |
1250 | ips_driver->signals = new_signals; |
1251 | } |
1252 | |
1253 | /* NOTE: This does not use the "wake" interface since this is part of the wake path. */ |
1254 | /* We also do not perform a wait since DMCUB could enter idle after the notification. */ |
1255 | dm_execute_dmub_cmd(ctx: dc->ctx, cmd: &cmd, wait_type: allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT); |
1256 | } |
1257 | |
1258 | static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) |
1259 | { |
1260 | struct dc_dmub_srv *dc_dmub_srv; |
1261 | |
1262 | if (dc->debug.dmcub_emulation) |
1263 | return; |
1264 | |
1265 | if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub) |
1266 | return; |
1267 | |
1268 | dc_dmub_srv = dc->ctx->dmub_srv; |
1269 | |
1270 | if (dc->clk_mgr->funcs->exit_low_power_state) { |
1271 | volatile const struct dmub_shared_state_ips_fw *ips_fw = |
1272 | &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; |
1273 | volatile struct dmub_shared_state_ips_driver *ips_driver = |
1274 | &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; |
1275 | union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals; |
1276 | |
1277 | ips_driver->signals.all = 0; |
1278 | |
1279 | if (prev_driver_signals.bits.allow_ips2) { |
1280 | udelay(dc->debug.ips2_eval_delay_us); |
1281 | |
1282 | if (ips_fw->signals.bits.ips2_commit) { |
1283 | // Tell PMFW to exit low power state |
1284 | dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); |
1285 | |
1286 | // Wait for IPS2 entry upper bound |
1287 | udelay(dc->debug.ips2_entry_delay_us); |
1288 | |
1289 | dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); |
1290 | |
1291 | while (ips_fw->signals.bits.ips2_commit) |
1292 | udelay(1); |
1293 | |
1294 | if (!dc_dmub_srv_is_hw_pwr_up(dc_dmub_srv: dc->ctx->dmub_srv, wait: true)) |
1295 | ASSERT(0); |
1296 | |
1297 | dmub_srv_sync_inbox1(dmub: dc->ctx->dmub_srv->dmub); |
1298 | } |
1299 | } |
1300 | |
1301 | dc_dmub_srv_notify_idle(dc, allow_idle: false); |
1302 | if (prev_driver_signals.bits.allow_ips1) { |
1303 | while (ips_fw->signals.bits.ips1_commit) |
1304 | udelay(1); |
1305 | |
1306 | } |
1307 | } |
1308 | |
1309 | if (!dc_dmub_srv_is_hw_pwr_up(dc_dmub_srv: dc->ctx->dmub_srv, wait: true)) |
1310 | ASSERT(0); |
1311 | } |
1312 | |
1313 | void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState) |
1314 | { |
1315 | struct dmub_srv *dmub; |
1316 | |
1317 | if (!dc_dmub_srv) |
1318 | return; |
1319 | |
1320 | dmub = dc_dmub_srv->dmub; |
1321 | |
1322 | if (powerState == DC_ACPI_CM_POWER_STATE_D0) |
1323 | dmub_srv_set_power_state(dmub, dmub_srv_power_state: DMUB_POWER_STATE_D0); |
1324 | else |
1325 | dmub_srv_set_power_state(dmub, dmub_srv_power_state: DMUB_POWER_STATE_D3); |
1326 | } |
1327 | |
1328 | void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle) |
1329 | { |
1330 | struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; |
1331 | |
1332 | if (!dc_dmub_srv || !dc_dmub_srv->dmub) |
1333 | return; |
1334 | |
1335 | if (dc_dmub_srv->idle_allowed == allow_idle) |
1336 | return; |
1337 | |
1338 | /* |
1339 | * Entering a low power state requires a driver notification. |
1340 | * Powering up the hardware requires notifying PMFW and DMCUB. |
1341 | * Clearing the driver idle allow requires a DMCUB command. |
1342 | * DMCUB commands requires the DMCUB to be powered up and restored. |
1343 | * |
1344 | * Exit out early to prevent an infinite loop of DMCUB commands |
1345 | * triggering exit low power - use software state to track this. |
1346 | */ |
1347 | dc_dmub_srv->idle_allowed = allow_idle; |
1348 | |
1349 | if (!allow_idle) |
1350 | dc_dmub_srv_exit_low_power_state(dc); |
1351 | else |
1352 | dc_dmub_srv_notify_idle(dc, allow_idle); |
1353 | } |
1354 | |
1355 | bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, |
1356 | enum dm_dmub_wait_type wait_type) |
1357 | { |
1358 | return dc_wake_and_execute_dmub_cmd_list(ctx, count: 1, cmd, wait_type); |
1359 | } |
1360 | |
1361 | bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, |
1362 | union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) |
1363 | { |
1364 | struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; |
1365 | bool result = false, reallow_idle = false; |
1366 | |
1367 | if (!dc_dmub_srv || !dc_dmub_srv->dmub) |
1368 | return false; |
1369 | |
1370 | if (count == 0) |
1371 | return true; |
1372 | |
1373 | if (dc_dmub_srv->idle_allowed) { |
1374 | dc_dmub_srv_apply_idle_power_optimizations(dc: ctx->dc, allow_idle: false); |
1375 | reallow_idle = true; |
1376 | } |
1377 | |
1378 | /* |
1379 | * These may have different implementations in DM, so ensure |
1380 | * that we guide it to the expected helper. |
1381 | */ |
1382 | if (count > 1) |
1383 | result = dm_execute_dmub_cmd_list(ctx, count, cmd, wait_type); |
1384 | else |
1385 | result = dm_execute_dmub_cmd(ctx, cmd, wait_type); |
1386 | |
1387 | if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle) |
1388 | dc_dmub_srv_apply_idle_power_optimizations(dc: ctx->dc, allow_idle: true); |
1389 | |
1390 | return result; |
1391 | } |
1392 | |
1393 | static bool dc_dmub_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code, |
1394 | uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type) |
1395 | { |
1396 | struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; |
1397 | const uint32_t wait_us = wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT ? 0 : 30; |
1398 | enum dmub_status status; |
1399 | |
1400 | if (response) |
1401 | *response = 0; |
1402 | |
1403 | if (!dc_dmub_srv || !dc_dmub_srv->dmub) |
1404 | return false; |
1405 | |
1406 | status = dmub_srv_send_gpint_command(dmub: dc_dmub_srv->dmub, command_code, param, timeout_us: wait_us); |
1407 | if (status != DMUB_STATUS_OK) { |
1408 | if (status == DMUB_STATUS_TIMEOUT && wait_type == DM_DMUB_WAIT_TYPE_NO_WAIT) |
1409 | return true; |
1410 | |
1411 | return false; |
1412 | } |
1413 | |
1414 | if (response && wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) |
1415 | dmub_srv_get_gpint_response(dmub: dc_dmub_srv->dmub, response); |
1416 | |
1417 | return true; |
1418 | } |
1419 | |
1420 | bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code, |
1421 | uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type) |
1422 | { |
1423 | struct dc_dmub_srv *dc_dmub_srv = ctx->dmub_srv; |
1424 | bool result = false, reallow_idle = false; |
1425 | |
1426 | if (!dc_dmub_srv || !dc_dmub_srv->dmub) |
1427 | return false; |
1428 | |
1429 | if (dc_dmub_srv->idle_allowed) { |
1430 | dc_dmub_srv_apply_idle_power_optimizations(dc: ctx->dc, allow_idle: false); |
1431 | reallow_idle = true; |
1432 | } |
1433 | |
1434 | result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type); |
1435 | |
1436 | if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle) |
1437 | dc_dmub_srv_apply_idle_power_optimizations(dc: ctx->dc, allow_idle: true); |
1438 | |
1439 | return result; |
1440 | } |
1441 | |