1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Huang Rui
23 *
24 */
25
26#include <linux/firmware.h>
27#include <drm/drm_drv.h>
28
29#include "amdgpu.h"
30#include "amdgpu_psp.h"
31#include "amdgpu_ucode.h"
32#include "amdgpu_xgmi.h"
33#include "soc15_common.h"
34#include "psp_v3_1.h"
35#include "psp_v10_0.h"
36#include "psp_v11_0.h"
37#include "psp_v11_0_8.h"
38#include "psp_v12_0.h"
39#include "psp_v13_0.h"
40#include "psp_v13_0_4.h"
41
42#include "amdgpu_ras.h"
43#include "amdgpu_securedisplay.h"
44#include "amdgpu_atomfirmware.h"
45
46#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3)
47
48static int psp_load_smu_fw(struct psp_context *psp);
49static int psp_rap_terminate(struct psp_context *psp);
50static int psp_securedisplay_terminate(struct psp_context *psp);
51
52static int psp_ring_init(struct psp_context *psp,
53 enum psp_ring_type ring_type)
54{
55 int ret = 0;
56 struct psp_ring *ring;
57 struct amdgpu_device *adev = psp->adev;
58
59 ring = &psp->km_ring;
60
61 ring->ring_type = ring_type;
62
63 /* allocate 4k Page of Local Frame Buffer memory for ring */
64 ring->ring_size = 0x1000;
65 ret = amdgpu_bo_create_kernel(adev, size: ring->ring_size, PAGE_SIZE,
66 AMDGPU_GEM_DOMAIN_VRAM |
67 AMDGPU_GEM_DOMAIN_GTT,
68 bo_ptr: &adev->firmware.rbuf,
69 gpu_addr: &ring->ring_mem_mc_addr,
70 cpu_addr: (void **)&ring->ring_mem);
71 if (ret) {
72 ring->ring_size = 0;
73 return ret;
74 }
75
76 return 0;
77}
78
79/*
80 * Due to DF Cstate management centralized to PMFW, the firmware
81 * loading sequence will be updated as below:
82 * - Load KDB
83 * - Load SYS_DRV
84 * - Load tOS
85 * - Load PMFW
86 * - Setup TMR
87 * - Load other non-psp fw
88 * - Load ASD
89 * - Load XGMI/RAS/HDCP/DTM TA if any
90 *
91 * This new sequence is required for
92 * - Arcturus and onwards
93 */
94static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
95{
96 struct amdgpu_device *adev = psp->adev;
97
98 if (amdgpu_sriov_vf(adev)) {
99 psp->pmfw_centralized_cstate_management = false;
100 return;
101 }
102
103 switch (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0)) {
104 case IP_VERSION(11, 0, 0):
105 case IP_VERSION(11, 0, 4):
106 case IP_VERSION(11, 0, 5):
107 case IP_VERSION(11, 0, 7):
108 case IP_VERSION(11, 0, 9):
109 case IP_VERSION(11, 0, 11):
110 case IP_VERSION(11, 0, 12):
111 case IP_VERSION(11, 0, 13):
112 case IP_VERSION(13, 0, 0):
113 case IP_VERSION(13, 0, 2):
114 case IP_VERSION(13, 0, 7):
115 psp->pmfw_centralized_cstate_management = true;
116 break;
117 default:
118 psp->pmfw_centralized_cstate_management = false;
119 break;
120 }
121}
122
123static int psp_init_sriov_microcode(struct psp_context *psp)
124{
125 struct amdgpu_device *adev = psp->adev;
126 char ucode_prefix[30];
127 int ret = 0;
128
129 amdgpu_ucode_ip_version_decode(adev, block_type: MP0_HWIP, ucode_prefix, len: sizeof(ucode_prefix));
130
131 switch (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0)) {
132 case IP_VERSION(9, 0, 0):
133 case IP_VERSION(11, 0, 7):
134 case IP_VERSION(11, 0, 9):
135 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
136 ret = psp_init_cap_microcode(psp, chip_name: ucode_prefix);
137 break;
138 case IP_VERSION(13, 0, 2):
139 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
140 ret = psp_init_cap_microcode(psp, chip_name: ucode_prefix);
141 ret &= psp_init_ta_microcode(psp, chip_name: ucode_prefix);
142 break;
143 case IP_VERSION(13, 0, 0):
144 adev->virt.autoload_ucode_id = 0;
145 break;
146 case IP_VERSION(13, 0, 6):
147 ret = psp_init_cap_microcode(psp, chip_name: ucode_prefix);
148 ret &= psp_init_ta_microcode(psp, chip_name: ucode_prefix);
149 break;
150 case IP_VERSION(13, 0, 10):
151 adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
152 ret = psp_init_cap_microcode(psp, chip_name: ucode_prefix);
153 break;
154 default:
155 return -EINVAL;
156 }
157 return ret;
158}
159
160static int psp_early_init(void *handle)
161{
162 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
163 struct psp_context *psp = &adev->psp;
164
165 switch (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0)) {
166 case IP_VERSION(9, 0, 0):
167 psp_v3_1_set_psp_funcs(psp);
168 psp->autoload_supported = false;
169 break;
170 case IP_VERSION(10, 0, 0):
171 case IP_VERSION(10, 0, 1):
172 psp_v10_0_set_psp_funcs(psp);
173 psp->autoload_supported = false;
174 break;
175 case IP_VERSION(11, 0, 2):
176 case IP_VERSION(11, 0, 4):
177 psp_v11_0_set_psp_funcs(psp);
178 psp->autoload_supported = false;
179 break;
180 case IP_VERSION(11, 0, 0):
181 case IP_VERSION(11, 0, 7):
182 adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
183 fallthrough;
184 case IP_VERSION(11, 0, 5):
185 case IP_VERSION(11, 0, 9):
186 case IP_VERSION(11, 0, 11):
187 case IP_VERSION(11, 5, 0):
188 case IP_VERSION(11, 0, 12):
189 case IP_VERSION(11, 0, 13):
190 psp_v11_0_set_psp_funcs(psp);
191 psp->autoload_supported = true;
192 break;
193 case IP_VERSION(11, 0, 3):
194 case IP_VERSION(12, 0, 1):
195 psp_v12_0_set_psp_funcs(psp);
196 break;
197 case IP_VERSION(13, 0, 2):
198 case IP_VERSION(13, 0, 6):
199 psp_v13_0_set_psp_funcs(psp);
200 break;
201 case IP_VERSION(13, 0, 1):
202 case IP_VERSION(13, 0, 3):
203 case IP_VERSION(13, 0, 5):
204 case IP_VERSION(13, 0, 8):
205 case IP_VERSION(13, 0, 11):
206 case IP_VERSION(14, 0, 0):
207 psp_v13_0_set_psp_funcs(psp);
208 psp->autoload_supported = true;
209 break;
210 case IP_VERSION(11, 0, 8):
211 if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
212 psp_v11_0_8_set_psp_funcs(psp);
213 psp->autoload_supported = false;
214 }
215 break;
216 case IP_VERSION(13, 0, 0):
217 case IP_VERSION(13, 0, 7):
218 case IP_VERSION(13, 0, 10):
219 psp_v13_0_set_psp_funcs(psp);
220 psp->autoload_supported = true;
221 adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
222 break;
223 case IP_VERSION(13, 0, 4):
224 psp_v13_0_4_set_psp_funcs(psp);
225 psp->autoload_supported = true;
226 break;
227 default:
228 return -EINVAL;
229 }
230
231 psp->adev = adev;
232
233 psp_check_pmfw_centralized_cstate_management(psp);
234
235 if (amdgpu_sriov_vf(adev))
236 return psp_init_sriov_microcode(psp);
237 else
238 return psp_init_microcode(psp);
239}
240
241void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
242{
243 amdgpu_bo_free_kernel(bo: &mem_ctx->shared_bo, gpu_addr: &mem_ctx->shared_mc_addr,
244 cpu_addr: &mem_ctx->shared_buf);
245 mem_ctx->shared_bo = NULL;
246}
247
248static void psp_free_shared_bufs(struct psp_context *psp)
249{
250 void *tmr_buf;
251 void **pptr;
252
253 /* free TMR memory buffer */
254 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
255 amdgpu_bo_free_kernel(bo: &psp->tmr_bo, gpu_addr: &psp->tmr_mc_addr, cpu_addr: pptr);
256 psp->tmr_bo = NULL;
257
258 /* free xgmi shared memory */
259 psp_ta_free_shared_buf(mem_ctx: &psp->xgmi_context.context.mem_context);
260
261 /* free ras shared memory */
262 psp_ta_free_shared_buf(mem_ctx: &psp->ras_context.context.mem_context);
263
264 /* free hdcp shared memory */
265 psp_ta_free_shared_buf(mem_ctx: &psp->hdcp_context.context.mem_context);
266
267 /* free dtm shared memory */
268 psp_ta_free_shared_buf(mem_ctx: &psp->dtm_context.context.mem_context);
269
270 /* free rap shared memory */
271 psp_ta_free_shared_buf(mem_ctx: &psp->rap_context.context.mem_context);
272
273 /* free securedisplay shared memory */
274 psp_ta_free_shared_buf(mem_ctx: &psp->securedisplay_context.context.mem_context);
275
276
277}
278
279static void psp_memory_training_fini(struct psp_context *psp)
280{
281 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
282
283 ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
284 kfree(objp: ctx->sys_cache);
285 ctx->sys_cache = NULL;
286}
287
288static int psp_memory_training_init(struct psp_context *psp)
289{
290 int ret;
291 struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
292
293 if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
294 DRM_DEBUG("memory training is not supported!\n");
295 return 0;
296 }
297
298 ctx->sys_cache = kzalloc(size: ctx->train_data_size, GFP_KERNEL);
299 if (ctx->sys_cache == NULL) {
300 DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n");
301 ret = -ENOMEM;
302 goto Err_out;
303 }
304
305 DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
306 ctx->train_data_size,
307 ctx->p2c_train_data_offset,
308 ctx->c2p_train_data_offset);
309 ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
310 return 0;
311
312Err_out:
313 psp_memory_training_fini(psp);
314 return ret;
315}
316
317/*
318 * Helper funciton to query psp runtime database entry
319 *
320 * @adev: amdgpu_device pointer
321 * @entry_type: the type of psp runtime database entry
322 * @db_entry: runtime database entry pointer
323 *
324 * Return false if runtime database doesn't exit or entry is invalid
325 * or true if the specific database entry is found, and copy to @db_entry
326 */
327static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
328 enum psp_runtime_entry_type entry_type,
329 void *db_entry)
330{
331 uint64_t db_header_pos, db_dir_pos;
332 struct psp_runtime_data_header db_header = {0};
333 struct psp_runtime_data_directory db_dir = {0};
334 bool ret = false;
335 int i;
336
337 if (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(13, 0, 6))
338 return false;
339
340 db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
341 db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
342
343 /* read runtime db header from vram */
344 amdgpu_device_vram_access(adev, pos: db_header_pos, buf: (uint32_t *)&db_header,
345 size: sizeof(struct psp_runtime_data_header), write: false);
346
347 if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
348 /* runtime db doesn't exist, exit */
349 dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
350 return false;
351 }
352
353 /* read runtime database entry from vram */
354 amdgpu_device_vram_access(adev, pos: db_dir_pos, buf: (uint32_t *)&db_dir,
355 size: sizeof(struct psp_runtime_data_directory), write: false);
356
357 if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
358 /* invalid db entry count, exit */
359 dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
360 return false;
361 }
362
363 /* look up for requested entry type */
364 for (i = 0; i < db_dir.entry_count && !ret; i++) {
365 if (db_dir.entry_list[i].entry_type == entry_type) {
366 switch (entry_type) {
367 case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
368 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
369 /* invalid db entry size */
370 dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
371 return false;
372 }
373 /* read runtime database entry */
374 amdgpu_device_vram_access(adev, pos: db_header_pos + db_dir.entry_list[i].offset,
375 buf: (uint32_t *)db_entry, size: sizeof(struct psp_runtime_boot_cfg_entry), write: false);
376 ret = true;
377 break;
378 case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
379 if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
380 /* invalid db entry size */
381 dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
382 return false;
383 }
384 /* read runtime database entry */
385 amdgpu_device_vram_access(adev, pos: db_header_pos + db_dir.entry_list[i].offset,
386 buf: (uint32_t *)db_entry, size: sizeof(struct psp_runtime_scpm_entry), write: false);
387 ret = true;
388 break;
389 default:
390 ret = false;
391 break;
392 }
393 }
394 }
395
396 return ret;
397}
398
399static int psp_sw_init(void *handle)
400{
401 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
402 struct psp_context *psp = &adev->psp;
403 int ret;
404 struct psp_runtime_boot_cfg_entry boot_cfg_entry;
405 struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
406 struct psp_runtime_scpm_entry scpm_entry;
407
408 psp->cmd = kzalloc(size: sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
409 if (!psp->cmd) {
410 DRM_ERROR("Failed to allocate memory to command buffer!\n");
411 ret = -ENOMEM;
412 }
413
414 adev->psp.xgmi_context.supports_extended_data =
415 !adev->gmc.xgmi.connected_to_cpu &&
416 amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(13, 0, 2);
417
418 memset(&scpm_entry, 0, sizeof(scpm_entry));
419 if ((psp_get_runtime_db_entry(adev,
420 entry_type: PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
421 db_entry: &scpm_entry)) &&
422 (scpm_entry.scpm_status != SCPM_DISABLE)) {
423 adev->scpm_enabled = true;
424 adev->scpm_status = scpm_entry.scpm_status;
425 } else {
426 adev->scpm_enabled = false;
427 adev->scpm_status = SCPM_DISABLE;
428 }
429
430 /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
431
432 memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
433 if (psp_get_runtime_db_entry(adev,
434 entry_type: PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
435 db_entry: &boot_cfg_entry)) {
436 psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
437 if ((psp->boot_cfg_bitmask) &
438 BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
439 /* If psp runtime database exists, then
440 * only enable two stage memory training
441 * when TWO_STAGE_DRAM_TRAINING bit is set
442 * in runtime database
443 */
444 mem_training_ctx->enable_mem_training = true;
445 }
446
447 } else {
448 /* If psp runtime database doesn't exist or is
449 * invalid, force enable two stage memory training
450 */
451 mem_training_ctx->enable_mem_training = true;
452 }
453
454 if (mem_training_ctx->enable_mem_training) {
455 ret = psp_memory_training_init(psp);
456 if (ret) {
457 DRM_ERROR("Failed to initialize memory training!\n");
458 return ret;
459 }
460
461 ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
462 if (ret) {
463 DRM_ERROR("Failed to process memory training!\n");
464 return ret;
465 }
466 }
467
468 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
469 amdgpu_sriov_vf(adev) ?
470 AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
471 bo_ptr: &psp->fw_pri_bo,
472 gpu_addr: &psp->fw_pri_mc_addr,
473 cpu_addr: &psp->fw_pri_buf);
474 if (ret)
475 return ret;
476
477 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
478 AMDGPU_GEM_DOMAIN_VRAM |
479 AMDGPU_GEM_DOMAIN_GTT,
480 bo_ptr: &psp->fence_buf_bo,
481 gpu_addr: &psp->fence_buf_mc_addr,
482 cpu_addr: &psp->fence_buf);
483 if (ret)
484 goto failed1;
485
486 ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
487 AMDGPU_GEM_DOMAIN_VRAM |
488 AMDGPU_GEM_DOMAIN_GTT,
489 bo_ptr: &psp->cmd_buf_bo, gpu_addr: &psp->cmd_buf_mc_addr,
490 cpu_addr: (void **)&psp->cmd_buf_mem);
491 if (ret)
492 goto failed2;
493
494 return 0;
495
496failed2:
497 amdgpu_bo_free_kernel(bo: &psp->fence_buf_bo,
498 gpu_addr: &psp->fence_buf_mc_addr, cpu_addr: &psp->fence_buf);
499failed1:
500 amdgpu_bo_free_kernel(bo: &psp->fw_pri_bo,
501 gpu_addr: &psp->fw_pri_mc_addr, cpu_addr: &psp->fw_pri_buf);
502 return ret;
503}
504
505static int psp_sw_fini(void *handle)
506{
507 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
508 struct psp_context *psp = &adev->psp;
509 struct psp_gfx_cmd_resp *cmd = psp->cmd;
510
511 psp_memory_training_fini(psp);
512
513 amdgpu_ucode_release(fw: &psp->sos_fw);
514 amdgpu_ucode_release(fw: &psp->asd_fw);
515 amdgpu_ucode_release(fw: &psp->ta_fw);
516 amdgpu_ucode_release(fw: &psp->cap_fw);
517 amdgpu_ucode_release(fw: &psp->toc_fw);
518
519 kfree(objp: cmd);
520 cmd = NULL;
521
522 psp_free_shared_bufs(psp);
523
524 if (psp->km_ring.ring_mem)
525 amdgpu_bo_free_kernel(bo: &adev->firmware.rbuf,
526 gpu_addr: &psp->km_ring.ring_mem_mc_addr,
527 cpu_addr: (void **)&psp->km_ring.ring_mem);
528
529 amdgpu_bo_free_kernel(bo: &psp->fw_pri_bo,
530 gpu_addr: &psp->fw_pri_mc_addr, cpu_addr: &psp->fw_pri_buf);
531 amdgpu_bo_free_kernel(bo: &psp->fence_buf_bo,
532 gpu_addr: &psp->fence_buf_mc_addr, cpu_addr: &psp->fence_buf);
533 amdgpu_bo_free_kernel(bo: &psp->cmd_buf_bo, gpu_addr: &psp->cmd_buf_mc_addr,
534 cpu_addr: (void **)&psp->cmd_buf_mem);
535
536 return 0;
537}
538
539int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
540 uint32_t reg_val, uint32_t mask, bool check_changed)
541{
542 uint32_t val;
543 int i;
544 struct amdgpu_device *adev = psp->adev;
545
546 if (psp->adev->no_hw_access)
547 return 0;
548
549 for (i = 0; i < adev->usec_timeout; i++) {
550 val = RREG32(reg_index);
551 if (check_changed) {
552 if (val != reg_val)
553 return 0;
554 } else {
555 if ((val & mask) == reg_val)
556 return 0;
557 }
558 udelay(1);
559 }
560
561 return -ETIME;
562}
563
564int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
565 uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
566{
567 uint32_t val;
568 int i;
569 struct amdgpu_device *adev = psp->adev;
570
571 if (psp->adev->no_hw_access)
572 return 0;
573
574 for (i = 0; i < msec_timeout; i++) {
575 val = RREG32(reg_index);
576 if ((val & mask) == reg_val)
577 return 0;
578 msleep(msecs: 1);
579 }
580
581 return -ETIME;
582}
583
584static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
585{
586 switch (cmd_id) {
587 case GFX_CMD_ID_LOAD_TA:
588 return "LOAD_TA";
589 case GFX_CMD_ID_UNLOAD_TA:
590 return "UNLOAD_TA";
591 case GFX_CMD_ID_INVOKE_CMD:
592 return "INVOKE_CMD";
593 case GFX_CMD_ID_LOAD_ASD:
594 return "LOAD_ASD";
595 case GFX_CMD_ID_SETUP_TMR:
596 return "SETUP_TMR";
597 case GFX_CMD_ID_LOAD_IP_FW:
598 return "LOAD_IP_FW";
599 case GFX_CMD_ID_DESTROY_TMR:
600 return "DESTROY_TMR";
601 case GFX_CMD_ID_SAVE_RESTORE:
602 return "SAVE_RESTORE_IP_FW";
603 case GFX_CMD_ID_SETUP_VMR:
604 return "SETUP_VMR";
605 case GFX_CMD_ID_DESTROY_VMR:
606 return "DESTROY_VMR";
607 case GFX_CMD_ID_PROG_REG:
608 return "PROG_REG";
609 case GFX_CMD_ID_GET_FW_ATTESTATION:
610 return "GET_FW_ATTESTATION";
611 case GFX_CMD_ID_LOAD_TOC:
612 return "ID_LOAD_TOC";
613 case GFX_CMD_ID_AUTOLOAD_RLC:
614 return "AUTOLOAD_RLC";
615 case GFX_CMD_ID_BOOT_CFG:
616 return "BOOT_CFG";
617 default:
618 return "UNKNOWN CMD";
619 }
620}
621
622static int
623psp_cmd_submit_buf(struct psp_context *psp,
624 struct amdgpu_firmware_info *ucode,
625 struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
626{
627 int ret;
628 int index;
629 int timeout = 20000;
630 bool ras_intr = false;
631 bool skip_unsupport = false;
632
633 if (psp->adev->no_hw_access)
634 return 0;
635
636 memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
637
638 memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
639
640 index = atomic_inc_return(v: &psp->fence_value);
641 ret = psp_ring_cmd_submit(psp, cmd_buf_mc_addr: psp->cmd_buf_mc_addr, fence_mc_addr, index);
642 if (ret) {
643 atomic_dec(v: &psp->fence_value);
644 goto exit;
645 }
646
647 amdgpu_device_invalidate_hdp(adev: psp->adev, NULL);
648 while (*((unsigned int *)psp->fence_buf) != index) {
649 if (--timeout == 0)
650 break;
651 /*
652 * Shouldn't wait for timeout when err_event_athub occurs,
653 * because gpu reset thread triggered and lock resource should
654 * be released for psp resume sequence.
655 */
656 ras_intr = amdgpu_ras_intr_triggered();
657 if (ras_intr)
658 break;
659 usleep_range(min: 10, max: 100);
660 amdgpu_device_invalidate_hdp(adev: psp->adev, NULL);
661 }
662
663 /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
664 skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
665 psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
666
667 memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
668
669 /* In some cases, psp response status is not 0 even there is no
670 * problem while the command is submitted. Some version of PSP FW
671 * doesn't write 0 to that field.
672 * So here we would like to only print a warning instead of an error
673 * during psp initialization to avoid breaking hw_init and it doesn't
674 * return -EINVAL.
675 */
676 if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
677 if (ucode)
678 DRM_WARN("failed to load ucode %s(0x%X) ",
679 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
680 DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
681 psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
682 psp->cmd_buf_mem->resp.status);
683 /* If any firmware (including CAP) load fails under SRIOV, it should
684 * return failure to stop the VF from initializing.
685 * Also return failure in case of timeout
686 */
687 if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
688 ret = -EINVAL;
689 goto exit;
690 }
691 }
692
693 if (ucode) {
694 ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
695 ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
696 }
697
698exit:
699 return ret;
700}
701
702static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
703{
704 struct psp_gfx_cmd_resp *cmd = psp->cmd;
705
706 mutex_lock(&psp->mutex);
707
708 memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
709
710 return cmd;
711}
712
713static void release_psp_cmd_buf(struct psp_context *psp)
714{
715 mutex_unlock(lock: &psp->mutex);
716}
717
718static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
719 struct psp_gfx_cmd_resp *cmd,
720 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
721{
722 struct amdgpu_device *adev = psp->adev;
723 uint32_t size = 0;
724 uint64_t tmr_pa = 0;
725
726 if (tmr_bo) {
727 size = amdgpu_bo_size(bo: tmr_bo);
728 tmr_pa = amdgpu_gmc_vram_pa(adev, bo: tmr_bo);
729 }
730
731 if (amdgpu_sriov_vf(psp->adev))
732 cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
733 else
734 cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
735 cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
736 cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
737 cmd->cmd.cmd_setup_tmr.buf_size = size;
738 cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
739 cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
740 cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
741}
742
743static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
744 uint64_t pri_buf_mc, uint32_t size)
745{
746 cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
747 cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
748 cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
749 cmd->cmd.cmd_load_toc.toc_size = size;
750}
751
752/* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
753static int psp_load_toc(struct psp_context *psp,
754 uint32_t *tmr_size)
755{
756 int ret;
757 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
758
759 /* Copy toc to psp firmware private buffer */
760 psp_copy_fw(psp, start_addr: psp->toc.start_addr, bin_size: psp->toc.size_bytes);
761
762 psp_prep_load_toc_cmd_buf(cmd, pri_buf_mc: psp->fw_pri_mc_addr, size: psp->toc.size_bytes);
763
764 ret = psp_cmd_submit_buf(psp, NULL, cmd,
765 fence_mc_addr: psp->fence_buf_mc_addr);
766 if (!ret)
767 *tmr_size = psp->cmd_buf_mem->resp.tmr_size;
768
769 release_psp_cmd_buf(psp);
770
771 return ret;
772}
773
774static bool psp_boottime_tmr(struct psp_context *psp)
775{
776 switch (amdgpu_ip_version(adev: psp->adev, ip: MP0_HWIP, inst: 0)) {
777 case IP_VERSION(13, 0, 6):
778 return true;
779 default:
780 return false;
781 }
782}
783
784/* Set up Trusted Memory Region */
785static int psp_tmr_init(struct psp_context *psp)
786{
787 int ret = 0;
788 int tmr_size;
789 void *tmr_buf;
790 void **pptr;
791
792 /*
793 * According to HW engineer, they prefer the TMR address be "naturally
794 * aligned" , e.g. the start address be an integer divide of TMR size.
795 *
796 * Note: this memory need be reserved till the driver
797 * uninitializes.
798 */
799 tmr_size = PSP_TMR_SIZE(psp->adev);
800
801 /* For ASICs support RLC autoload, psp will parse the toc
802 * and calculate the total size of TMR needed
803 */
804 if (!amdgpu_sriov_vf(psp->adev) &&
805 psp->toc.start_addr &&
806 psp->toc.size_bytes &&
807 psp->fw_pri_buf) {
808 ret = psp_load_toc(psp, tmr_size: &tmr_size);
809 if (ret) {
810 DRM_ERROR("Failed to load toc\n");
811 return ret;
812 }
813 }
814
815 if (!psp->tmr_bo) {
816 pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
817 ret = amdgpu_bo_create_kernel(adev: psp->adev, size: tmr_size,
818 PSP_TMR_ALIGNMENT,
819 AMDGPU_HAS_VRAM(psp->adev) ?
820 AMDGPU_GEM_DOMAIN_VRAM :
821 AMDGPU_GEM_DOMAIN_GTT,
822 bo_ptr: &psp->tmr_bo, gpu_addr: &psp->tmr_mc_addr,
823 cpu_addr: pptr);
824 }
825
826 return ret;
827}
828
829static bool psp_skip_tmr(struct psp_context *psp)
830{
831 switch (amdgpu_ip_version(adev: psp->adev, ip: MP0_HWIP, inst: 0)) {
832 case IP_VERSION(11, 0, 9):
833 case IP_VERSION(11, 0, 7):
834 case IP_VERSION(13, 0, 2):
835 case IP_VERSION(13, 0, 6):
836 case IP_VERSION(13, 0, 10):
837 return true;
838 default:
839 return false;
840 }
841}
842
843static int psp_tmr_load(struct psp_context *psp)
844{
845 int ret;
846 struct psp_gfx_cmd_resp *cmd;
847
848 /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
849 * Already set up by host driver.
850 */
851 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
852 return 0;
853
854 cmd = acquire_psp_cmd_buf(psp);
855
856 psp_prep_tmr_cmd_buf(psp, cmd, tmr_mc: psp->tmr_mc_addr, tmr_bo: psp->tmr_bo);
857 if (psp->tmr_bo)
858 DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
859 amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
860
861 ret = psp_cmd_submit_buf(psp, NULL, cmd,
862 fence_mc_addr: psp->fence_buf_mc_addr);
863
864 release_psp_cmd_buf(psp);
865
866 return ret;
867}
868
869static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
870 struct psp_gfx_cmd_resp *cmd)
871{
872 if (amdgpu_sriov_vf(psp->adev))
873 cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
874 else
875 cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
876}
877
878static int psp_tmr_unload(struct psp_context *psp)
879{
880 int ret;
881 struct psp_gfx_cmd_resp *cmd;
882
883 /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
884 * as TMR is not loaded at all
885 */
886 if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
887 return 0;
888
889 cmd = acquire_psp_cmd_buf(psp);
890
891 psp_prep_tmr_unload_cmd_buf(psp, cmd);
892 dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
893
894 ret = psp_cmd_submit_buf(psp, NULL, cmd,
895 fence_mc_addr: psp->fence_buf_mc_addr);
896
897 release_psp_cmd_buf(psp);
898
899 return ret;
900}
901
902static int psp_tmr_terminate(struct psp_context *psp)
903{
904 return psp_tmr_unload(psp);
905}
906
907int psp_get_fw_attestation_records_addr(struct psp_context *psp,
908 uint64_t *output_ptr)
909{
910 int ret;
911 struct psp_gfx_cmd_resp *cmd;
912
913 if (!output_ptr)
914 return -EINVAL;
915
916 if (amdgpu_sriov_vf(psp->adev))
917 return 0;
918
919 cmd = acquire_psp_cmd_buf(psp);
920
921 cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
922
923 ret = psp_cmd_submit_buf(psp, NULL, cmd,
924 fence_mc_addr: psp->fence_buf_mc_addr);
925
926 if (!ret) {
927 *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
928 ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
929 }
930
931 release_psp_cmd_buf(psp);
932
933 return ret;
934}
935
936static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
937{
938 struct psp_context *psp = &adev->psp;
939 struct psp_gfx_cmd_resp *cmd;
940 int ret;
941
942 if (amdgpu_sriov_vf(adev))
943 return 0;
944
945 cmd = acquire_psp_cmd_buf(psp);
946
947 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
948 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
949
950 ret = psp_cmd_submit_buf(psp, NULL, cmd, fence_mc_addr: psp->fence_buf_mc_addr);
951 if (!ret) {
952 *boot_cfg =
953 (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
954 }
955
956 release_psp_cmd_buf(psp);
957
958 return ret;
959}
960
961static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
962{
963 int ret;
964 struct psp_context *psp = &adev->psp;
965 struct psp_gfx_cmd_resp *cmd;
966
967 if (amdgpu_sriov_vf(adev))
968 return 0;
969
970 cmd = acquire_psp_cmd_buf(psp);
971
972 cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
973 cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
974 cmd->cmd.boot_cfg.boot_config = boot_cfg;
975 cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
976
977 ret = psp_cmd_submit_buf(psp, NULL, cmd, fence_mc_addr: psp->fence_buf_mc_addr);
978
979 release_psp_cmd_buf(psp);
980
981 return ret;
982}
983
984static int psp_rl_load(struct amdgpu_device *adev)
985{
986 int ret;
987 struct psp_context *psp = &adev->psp;
988 struct psp_gfx_cmd_resp *cmd;
989
990 if (!is_psp_fw_valid(bin: psp->rl))
991 return 0;
992
993 cmd = acquire_psp_cmd_buf(psp);
994
995 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
996 memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
997
998 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
999 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
1000 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
1001 cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
1002 cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
1003
1004 ret = psp_cmd_submit_buf(psp, NULL, cmd, fence_mc_addr: psp->fence_buf_mc_addr);
1005
1006 release_psp_cmd_buf(psp);
1007
1008 return ret;
1009}
1010
1011int psp_spatial_partition(struct psp_context *psp, int mode)
1012{
1013 struct psp_gfx_cmd_resp *cmd;
1014 int ret;
1015
1016 if (amdgpu_sriov_vf(psp->adev))
1017 return 0;
1018
1019 cmd = acquire_psp_cmd_buf(psp);
1020
1021 cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
1022 cmd->cmd.cmd_spatial_part.mode = mode;
1023
1024 dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
1025 ret = psp_cmd_submit_buf(psp, NULL, cmd, fence_mc_addr: psp->fence_buf_mc_addr);
1026
1027 release_psp_cmd_buf(psp);
1028
1029 return ret;
1030}
1031
1032static int psp_asd_initialize(struct psp_context *psp)
1033{
1034 int ret;
1035
1036 /* If PSP version doesn't match ASD version, asd loading will be failed.
1037 * add workaround to bypass it for sriov now.
1038 * TODO: add version check to make it common
1039 */
1040 if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
1041 return 0;
1042
1043 psp->asd_context.mem_context.shared_mc_addr = 0;
1044 psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
1045 psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
1046
1047 ret = psp_ta_load(psp, context: &psp->asd_context);
1048 if (!ret)
1049 psp->asd_context.initialized = true;
1050
1051 return ret;
1052}
1053
1054static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1055 uint32_t session_id)
1056{
1057 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
1058 cmd->cmd.cmd_unload_ta.session_id = session_id;
1059}
1060
1061int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
1062{
1063 int ret;
1064 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1065
1066 psp_prep_ta_unload_cmd_buf(cmd, session_id: context->session_id);
1067
1068 ret = psp_cmd_submit_buf(psp, NULL, cmd, fence_mc_addr: psp->fence_buf_mc_addr);
1069
1070 context->resp_status = cmd->resp.status;
1071
1072 release_psp_cmd_buf(psp);
1073
1074 return ret;
1075}
1076
1077static int psp_asd_terminate(struct psp_context *psp)
1078{
1079 int ret;
1080
1081 if (amdgpu_sriov_vf(psp->adev))
1082 return 0;
1083
1084 if (!psp->asd_context.initialized)
1085 return 0;
1086
1087 ret = psp_ta_unload(psp, context: &psp->asd_context);
1088 if (!ret)
1089 psp->asd_context.initialized = false;
1090
1091 return ret;
1092}
1093
1094static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1095 uint32_t id, uint32_t value)
1096{
1097 cmd->cmd_id = GFX_CMD_ID_PROG_REG;
1098 cmd->cmd.cmd_setup_reg_prog.reg_value = value;
1099 cmd->cmd.cmd_setup_reg_prog.reg_id = id;
1100}
1101
1102int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
1103 uint32_t value)
1104{
1105 struct psp_gfx_cmd_resp *cmd;
1106 int ret = 0;
1107
1108 if (reg >= PSP_REG_LAST)
1109 return -EINVAL;
1110
1111 cmd = acquire_psp_cmd_buf(psp);
1112
1113 psp_prep_reg_prog_cmd_buf(cmd, id: reg, value);
1114 ret = psp_cmd_submit_buf(psp, NULL, cmd, fence_mc_addr: psp->fence_buf_mc_addr);
1115 if (ret)
1116 DRM_ERROR("PSP failed to program reg id %d", reg);
1117
1118 release_psp_cmd_buf(psp);
1119
1120 return ret;
1121}
1122
1123static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1124 uint64_t ta_bin_mc,
1125 struct ta_context *context)
1126{
1127 cmd->cmd_id = context->ta_load_type;
1128 cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
1129 cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
1130 cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
1131
1132 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
1133 lower_32_bits(context->mem_context.shared_mc_addr);
1134 cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
1135 upper_32_bits(context->mem_context.shared_mc_addr);
1136 cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
1137}
1138
1139int psp_ta_init_shared_buf(struct psp_context *psp,
1140 struct ta_mem_context *mem_ctx)
1141{
1142 /*
1143 * Allocate 16k memory aligned to 4k from Frame Buffer (local
1144 * physical) for ta to host memory
1145 */
1146 return amdgpu_bo_create_kernel(adev: psp->adev, size: mem_ctx->shared_mem_size,
1147 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
1148 AMDGPU_GEM_DOMAIN_GTT,
1149 bo_ptr: &mem_ctx->shared_bo,
1150 gpu_addr: &mem_ctx->shared_mc_addr,
1151 cpu_addr: &mem_ctx->shared_buf);
1152}
1153
1154static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
1155 uint32_t ta_cmd_id,
1156 uint32_t session_id)
1157{
1158 cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
1159 cmd->cmd.cmd_invoke_cmd.session_id = session_id;
1160 cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
1161}
1162
1163int psp_ta_invoke(struct psp_context *psp,
1164 uint32_t ta_cmd_id,
1165 struct ta_context *context)
1166{
1167 int ret;
1168 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
1169
1170 psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id: context->session_id);
1171
1172 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1173 fence_mc_addr: psp->fence_buf_mc_addr);
1174
1175 context->resp_status = cmd->resp.status;
1176
1177 release_psp_cmd_buf(psp);
1178
1179 return ret;
1180}
1181
1182int psp_ta_load(struct psp_context *psp, struct ta_context *context)
1183{
1184 int ret;
1185 struct psp_gfx_cmd_resp *cmd;
1186
1187 cmd = acquire_psp_cmd_buf(psp);
1188
1189 psp_copy_fw(psp, start_addr: context->bin_desc.start_addr,
1190 bin_size: context->bin_desc.size_bytes);
1191
1192 psp_prep_ta_load_cmd_buf(cmd, ta_bin_mc: psp->fw_pri_mc_addr, context);
1193
1194 ret = psp_cmd_submit_buf(psp, NULL, cmd,
1195 fence_mc_addr: psp->fence_buf_mc_addr);
1196
1197 context->resp_status = cmd->resp.status;
1198
1199 if (!ret)
1200 context->session_id = cmd->resp.session_id;
1201
1202 release_psp_cmd_buf(psp);
1203
1204 return ret;
1205}
1206
1207int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1208{
1209 return psp_ta_invoke(psp, ta_cmd_id, context: &psp->xgmi_context.context);
1210}
1211
1212int psp_xgmi_terminate(struct psp_context *psp)
1213{
1214 int ret;
1215 struct amdgpu_device *adev = psp->adev;
1216
1217 /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
1218 if (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(11, 0, 4) ||
1219 (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(13, 0, 2) &&
1220 adev->gmc.xgmi.connected_to_cpu))
1221 return 0;
1222
1223 if (!psp->xgmi_context.context.initialized)
1224 return 0;
1225
1226 ret = psp_ta_unload(psp, context: &psp->xgmi_context.context);
1227
1228 psp->xgmi_context.context.initialized = false;
1229
1230 return ret;
1231}
1232
1233int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
1234{
1235 struct ta_xgmi_shared_memory *xgmi_cmd;
1236 int ret;
1237
1238 if (!psp->ta_fw ||
1239 !psp->xgmi_context.context.bin_desc.size_bytes ||
1240 !psp->xgmi_context.context.bin_desc.start_addr)
1241 return -ENOENT;
1242
1243 if (!load_ta)
1244 goto invoke;
1245
1246 psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
1247 psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1248
1249 if (!psp->xgmi_context.context.mem_context.shared_buf) {
1250 ret = psp_ta_init_shared_buf(psp, mem_ctx: &psp->xgmi_context.context.mem_context);
1251 if (ret)
1252 return ret;
1253 }
1254
1255 /* Load XGMI TA */
1256 ret = psp_ta_load(psp, context: &psp->xgmi_context.context);
1257 if (!ret)
1258 psp->xgmi_context.context.initialized = true;
1259 else
1260 return ret;
1261
1262invoke:
1263 /* Initialize XGMI session */
1264 xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
1265 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1266 xgmi_cmd->flag_extend_link_record = set_extended_data;
1267 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
1268
1269 ret = psp_xgmi_invoke(psp, ta_cmd_id: xgmi_cmd->cmd_id);
1270 /* note down the capbility flag for XGMI TA */
1271 psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
1272
1273 return ret;
1274}
1275
1276int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
1277{
1278 struct ta_xgmi_shared_memory *xgmi_cmd;
1279 int ret;
1280
1281 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1282 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1283
1284 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
1285
1286 /* Invoke xgmi ta to get hive id */
1287 ret = psp_xgmi_invoke(psp, ta_cmd_id: xgmi_cmd->cmd_id);
1288 if (ret)
1289 return ret;
1290
1291 *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
1292
1293 return 0;
1294}
1295
1296int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
1297{
1298 struct ta_xgmi_shared_memory *xgmi_cmd;
1299 int ret;
1300
1301 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1302 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1303
1304 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
1305
1306 /* Invoke xgmi ta to get the node id */
1307 ret = psp_xgmi_invoke(psp, ta_cmd_id: xgmi_cmd->cmd_id);
1308 if (ret)
1309 return ret;
1310
1311 *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
1312
1313 return 0;
1314}
1315
1316static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
1317{
1318 return (amdgpu_ip_version(adev: psp->adev, ip: MP0_HWIP, inst: 0) ==
1319 IP_VERSION(13, 0, 2) &&
1320 psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
1321 amdgpu_ip_version(adev: psp->adev, ip: MP0_HWIP, inst: 0) >=
1322 IP_VERSION(13, 0, 6);
1323}
1324
1325/*
1326 * Chips that support extended topology information require the driver to
1327 * reflect topology information in the opposite direction. This is
1328 * because the TA has already exceeded its link record limit and if the
1329 * TA holds bi-directional information, the driver would have to do
1330 * multiple fetches instead of just two.
1331 */
1332static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
1333 struct psp_xgmi_node_info node_info)
1334{
1335 struct amdgpu_device *mirror_adev;
1336 struct amdgpu_hive_info *hive;
1337 uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
1338 uint64_t dst_node_id = node_info.node_id;
1339 uint8_t dst_num_hops = node_info.num_hops;
1340 uint8_t dst_num_links = node_info.num_links;
1341
1342 hive = amdgpu_get_xgmi_hive(adev: psp->adev);
1343 list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
1344 struct psp_xgmi_topology_info *mirror_top_info;
1345 int j;
1346
1347 if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
1348 continue;
1349
1350 mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
1351 for (j = 0; j < mirror_top_info->num_nodes; j++) {
1352 if (mirror_top_info->nodes[j].node_id != src_node_id)
1353 continue;
1354
1355 mirror_top_info->nodes[j].num_hops = dst_num_hops;
1356 /*
1357 * prevent 0 num_links value re-reflection since reflection
1358 * criteria is based on num_hops (direct or indirect).
1359 *
1360 */
1361 if (dst_num_links)
1362 mirror_top_info->nodes[j].num_links = dst_num_links;
1363
1364 break;
1365 }
1366
1367 break;
1368 }
1369
1370 amdgpu_put_xgmi_hive(hive);
1371}
1372
1373int psp_xgmi_get_topology_info(struct psp_context *psp,
1374 int number_devices,
1375 struct psp_xgmi_topology_info *topology,
1376 bool get_extended_data)
1377{
1378 struct ta_xgmi_shared_memory *xgmi_cmd;
1379 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1380 struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
1381 int i;
1382 int ret;
1383
1384 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1385 return -EINVAL;
1386
1387 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1388 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1389 xgmi_cmd->flag_extend_link_record = get_extended_data;
1390
1391 /* Fill in the shared memory with topology information as input */
1392 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1393 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
1394 topology_info_input->num_nodes = number_devices;
1395
1396 for (i = 0; i < topology_info_input->num_nodes; i++) {
1397 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1398 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1399 topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
1400 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1401 }
1402
1403 /* Invoke xgmi ta to get the topology information */
1404 ret = psp_xgmi_invoke(psp, ta_cmd_id: TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
1405 if (ret)
1406 return ret;
1407
1408 /* Read the output topology information from the shared memory */
1409 topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
1410 topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
1411 for (i = 0; i < topology->num_nodes; i++) {
1412 /* extended data will either be 0 or equal to non-extended data */
1413 if (topology_info_output->nodes[i].num_hops)
1414 topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
1415
1416 /* non-extended data gets everything here so no need to update */
1417 if (!get_extended_data) {
1418 topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
1419 topology->nodes[i].is_sharing_enabled =
1420 topology_info_output->nodes[i].is_sharing_enabled;
1421 topology->nodes[i].sdma_engine =
1422 topology_info_output->nodes[i].sdma_engine;
1423 }
1424
1425 }
1426
1427 /* Invoke xgmi ta again to get the link information */
1428 if (psp_xgmi_peer_link_info_supported(psp)) {
1429 struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
1430 struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
1431 bool requires_reflection =
1432 (psp->xgmi_context.supports_extended_data &&
1433 get_extended_data) ||
1434 amdgpu_ip_version(adev: psp->adev, ip: MP0_HWIP, inst: 0) ==
1435 IP_VERSION(13, 0, 6);
1436 bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps &
1437 EXTEND_PEER_LINK_INFO_CMD_FLAG;
1438
1439 /* popluate the shared output buffer rather than the cmd input buffer
1440 * with node_ids as the input for GET_PEER_LINKS command execution.
1441 * This is required for GET_PEER_LINKS per xgmi ta implementation.
1442 * The same requirement for GET_EXTEND_PEER_LINKS command.
1443 */
1444 if (ta_port_num_support) {
1445 link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
1446
1447 for (i = 0; i < topology->num_nodes; i++)
1448 link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1449
1450 link_extend_info_output->num_nodes = topology->num_nodes;
1451 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
1452 } else {
1453 link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
1454
1455 for (i = 0; i < topology->num_nodes; i++)
1456 link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
1457
1458 link_info_output->num_nodes = topology->num_nodes;
1459 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
1460 }
1461
1462 ret = psp_xgmi_invoke(psp, ta_cmd_id: xgmi_cmd->cmd_id);
1463 if (ret)
1464 return ret;
1465
1466 for (i = 0; i < topology->num_nodes; i++) {
1467 uint8_t node_num_links = ta_port_num_support ?
1468 link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
1469 /* accumulate num_links on extended data */
1470 if (get_extended_data) {
1471 topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
1472 } else {
1473 topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
1474 topology->nodes[i].num_links : node_num_links;
1475 }
1476
1477 /* reflect the topology information for bi-directionality */
1478 if (requires_reflection && topology->nodes[i].num_hops)
1479 psp_xgmi_reflect_topology_info(psp, node_info: topology->nodes[i]);
1480 }
1481 }
1482
1483 return 0;
1484}
1485
1486int psp_xgmi_set_topology_info(struct psp_context *psp,
1487 int number_devices,
1488 struct psp_xgmi_topology_info *topology)
1489{
1490 struct ta_xgmi_shared_memory *xgmi_cmd;
1491 struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
1492 int i;
1493
1494 if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
1495 return -EINVAL;
1496
1497 xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
1498 memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
1499
1500 topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
1501 xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
1502 topology_info_input->num_nodes = number_devices;
1503
1504 for (i = 0; i < topology_info_input->num_nodes; i++) {
1505 topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
1506 topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
1507 topology_info_input->nodes[i].is_sharing_enabled = 1;
1508 topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
1509 }
1510
1511 /* Invoke xgmi ta to set topology information */
1512 return psp_xgmi_invoke(psp, ta_cmd_id: TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
1513}
1514
1515// ras begin
1516static void psp_ras_ta_check_status(struct psp_context *psp)
1517{
1518 struct ta_ras_shared_memory *ras_cmd =
1519 (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1520
1521 switch (ras_cmd->ras_status) {
1522 case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
1523 dev_warn(psp->adev->dev,
1524 "RAS WARNING: cmd failed due to unsupported ip\n");
1525 break;
1526 case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
1527 dev_warn(psp->adev->dev,
1528 "RAS WARNING: cmd failed due to unsupported error injection\n");
1529 break;
1530 case TA_RAS_STATUS__SUCCESS:
1531 break;
1532 case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
1533 if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
1534 dev_warn(psp->adev->dev,
1535 "RAS WARNING: Inject error to critical region is not allowed\n");
1536 break;
1537 default:
1538 dev_warn(psp->adev->dev,
1539 "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
1540 break;
1541 }
1542}
1543
1544int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1545{
1546 struct ta_ras_shared_memory *ras_cmd;
1547 int ret;
1548
1549 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1550
1551 /*
1552 * TODO: bypass the loading in sriov for now
1553 */
1554 if (amdgpu_sriov_vf(psp->adev))
1555 return 0;
1556
1557 ret = psp_ta_invoke(psp, ta_cmd_id, context: &psp->ras_context.context);
1558
1559 if (amdgpu_ras_intr_triggered())
1560 return ret;
1561
1562 if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
1563 DRM_WARN("RAS: Unsupported Interface");
1564 return -EINVAL;
1565 }
1566
1567 if (!ret) {
1568 if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
1569 dev_warn(psp->adev->dev, "ECC switch disabled\n");
1570
1571 ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
1572 } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
1573 dev_warn(psp->adev->dev,
1574 "RAS internal register access blocked\n");
1575
1576 psp_ras_ta_check_status(psp);
1577 }
1578
1579 return ret;
1580}
1581
1582int psp_ras_enable_features(struct psp_context *psp,
1583 union ta_ras_cmd_input *info, bool enable)
1584{
1585 struct ta_ras_shared_memory *ras_cmd;
1586 int ret;
1587
1588 if (!psp->ras_context.context.initialized)
1589 return -EINVAL;
1590
1591 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1592 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1593
1594 if (enable)
1595 ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
1596 else
1597 ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
1598
1599 ras_cmd->ras_in_message = *info;
1600
1601 ret = psp_ras_invoke(psp, ta_cmd_id: ras_cmd->cmd_id);
1602 if (ret)
1603 return -EINVAL;
1604
1605 return 0;
1606}
1607
1608int psp_ras_terminate(struct psp_context *psp)
1609{
1610 int ret;
1611
1612 /*
1613 * TODO: bypass the terminate in sriov for now
1614 */
1615 if (amdgpu_sriov_vf(psp->adev))
1616 return 0;
1617
1618 if (!psp->ras_context.context.initialized)
1619 return 0;
1620
1621 ret = psp_ta_unload(psp, context: &psp->ras_context.context);
1622
1623 psp->ras_context.context.initialized = false;
1624
1625 return ret;
1626}
1627
1628int psp_ras_initialize(struct psp_context *psp)
1629{
1630 int ret;
1631 uint32_t boot_cfg = 0xFF;
1632 struct amdgpu_device *adev = psp->adev;
1633 struct ta_ras_shared_memory *ras_cmd;
1634
1635 /*
1636 * TODO: bypass the initialize in sriov for now
1637 */
1638 if (amdgpu_sriov_vf(adev))
1639 return 0;
1640
1641 if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
1642 !adev->psp.ras_context.context.bin_desc.start_addr) {
1643 dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
1644 return 0;
1645 }
1646
1647 if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
1648 /* query GECC enablement status from boot config
1649 * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
1650 */
1651 ret = psp_boot_config_get(adev, boot_cfg: &boot_cfg);
1652 if (ret)
1653 dev_warn(adev->dev, "PSP get boot config failed\n");
1654
1655 if (!amdgpu_ras_is_supported(adev: psp->adev, block: AMDGPU_RAS_BLOCK__UMC)) {
1656 if (!boot_cfg) {
1657 dev_info(adev->dev, "GECC is disabled\n");
1658 } else {
1659 /* disable GECC in next boot cycle if ras is
1660 * disabled by module parameter amdgpu_ras_enable
1661 * and/or amdgpu_ras_mask, or boot_config_get call
1662 * is failed
1663 */
1664 ret = psp_boot_config_set(adev, boot_cfg: 0);
1665 if (ret)
1666 dev_warn(adev->dev, "PSP set boot config failed\n");
1667 else
1668 dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
1669 }
1670 } else {
1671 if (boot_cfg == 1) {
1672 dev_info(adev->dev, "GECC is enabled\n");
1673 } else {
1674 /* enable GECC in next boot cycle if it is disabled
1675 * in boot config, or force enable GECC if failed to
1676 * get boot configuration
1677 */
1678 ret = psp_boot_config_set(adev, boot_cfg: BOOT_CONFIG_GECC);
1679 if (ret)
1680 dev_warn(adev->dev, "PSP set boot config failed\n");
1681 else
1682 dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
1683 }
1684 }
1685 }
1686
1687 psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
1688 psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1689
1690 if (!psp->ras_context.context.mem_context.shared_buf) {
1691 ret = psp_ta_init_shared_buf(psp, mem_ctx: &psp->ras_context.context.mem_context);
1692 if (ret)
1693 return ret;
1694 }
1695
1696 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1697 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1698
1699 if (amdgpu_ras_is_poison_mode_supported(adev))
1700 ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
1701 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
1702 ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
1703 ras_cmd->ras_in_message.init_flags.xcc_mask =
1704 adev->gfx.xcc_mask;
1705 ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
1706
1707 ret = psp_ta_load(psp, context: &psp->ras_context.context);
1708
1709 if (!ret && !ras_cmd->ras_status)
1710 psp->ras_context.context.initialized = true;
1711 else {
1712 if (ras_cmd->ras_status)
1713 dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
1714
1715 /* fail to load RAS TA */
1716 psp->ras_context.context.initialized = false;
1717 }
1718
1719 return ret;
1720}
1721
1722int psp_ras_trigger_error(struct psp_context *psp,
1723 struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
1724{
1725 struct ta_ras_shared_memory *ras_cmd;
1726 struct amdgpu_device *adev = psp->adev;
1727 int ret;
1728 uint32_t dev_mask;
1729
1730 if (!psp->ras_context.context.initialized)
1731 return -EINVAL;
1732
1733 switch (info->block_id) {
1734 case TA_RAS_BLOCK__GFX:
1735 dev_mask = GET_MASK(GC, instance_mask);
1736 break;
1737 case TA_RAS_BLOCK__SDMA:
1738 dev_mask = GET_MASK(SDMA0, instance_mask);
1739 break;
1740 case TA_RAS_BLOCK__VCN:
1741 case TA_RAS_BLOCK__JPEG:
1742 dev_mask = GET_MASK(VCN, instance_mask);
1743 break;
1744 default:
1745 dev_mask = instance_mask;
1746 break;
1747 }
1748
1749 /* reuse sub_block_index for backward compatibility */
1750 dev_mask <<= AMDGPU_RAS_INST_SHIFT;
1751 dev_mask &= AMDGPU_RAS_INST_MASK;
1752 info->sub_block_index |= dev_mask;
1753
1754 ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
1755 memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
1756
1757 ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
1758 ras_cmd->ras_in_message.trigger_error = *info;
1759
1760 ret = psp_ras_invoke(psp, ta_cmd_id: ras_cmd->cmd_id);
1761 if (ret)
1762 return -EINVAL;
1763
1764 /* If err_event_athub occurs error inject was successful, however
1765 * return status from TA is no long reliable
1766 */
1767 if (amdgpu_ras_intr_triggered())
1768 return 0;
1769
1770 if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
1771 return -EACCES;
1772 else if (ras_cmd->ras_status)
1773 return -EINVAL;
1774
1775 return 0;
1776}
1777// ras end
1778
1779// HDCP start
1780static int psp_hdcp_initialize(struct psp_context *psp)
1781{
1782 int ret;
1783
1784 /*
1785 * TODO: bypass the initialize in sriov for now
1786 */
1787 if (amdgpu_sriov_vf(psp->adev))
1788 return 0;
1789
1790 if (!psp->hdcp_context.context.bin_desc.size_bytes ||
1791 !psp->hdcp_context.context.bin_desc.start_addr) {
1792 dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
1793 return 0;
1794 }
1795
1796 psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
1797 psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1798
1799 if (!psp->hdcp_context.context.mem_context.shared_buf) {
1800 ret = psp_ta_init_shared_buf(psp, mem_ctx: &psp->hdcp_context.context.mem_context);
1801 if (ret)
1802 return ret;
1803 }
1804
1805 ret = psp_ta_load(psp, context: &psp->hdcp_context.context);
1806 if (!ret) {
1807 psp->hdcp_context.context.initialized = true;
1808 mutex_init(&psp->hdcp_context.mutex);
1809 }
1810
1811 return ret;
1812}
1813
1814int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1815{
1816 /*
1817 * TODO: bypass the loading in sriov for now
1818 */
1819 if (amdgpu_sriov_vf(psp->adev))
1820 return 0;
1821
1822 return psp_ta_invoke(psp, ta_cmd_id, context: &psp->hdcp_context.context);
1823}
1824
1825static int psp_hdcp_terminate(struct psp_context *psp)
1826{
1827 int ret;
1828
1829 /*
1830 * TODO: bypass the terminate in sriov for now
1831 */
1832 if (amdgpu_sriov_vf(psp->adev))
1833 return 0;
1834
1835 if (!psp->hdcp_context.context.initialized)
1836 return 0;
1837
1838 ret = psp_ta_unload(psp, context: &psp->hdcp_context.context);
1839
1840 psp->hdcp_context.context.initialized = false;
1841
1842 return ret;
1843}
1844// HDCP end
1845
1846// DTM start
1847static int psp_dtm_initialize(struct psp_context *psp)
1848{
1849 int ret;
1850
1851 /*
1852 * TODO: bypass the initialize in sriov for now
1853 */
1854 if (amdgpu_sriov_vf(psp->adev))
1855 return 0;
1856
1857 if (!psp->dtm_context.context.bin_desc.size_bytes ||
1858 !psp->dtm_context.context.bin_desc.start_addr) {
1859 dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
1860 return 0;
1861 }
1862
1863 psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
1864 psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1865
1866 if (!psp->dtm_context.context.mem_context.shared_buf) {
1867 ret = psp_ta_init_shared_buf(psp, mem_ctx: &psp->dtm_context.context.mem_context);
1868 if (ret)
1869 return ret;
1870 }
1871
1872 ret = psp_ta_load(psp, context: &psp->dtm_context.context);
1873 if (!ret) {
1874 psp->dtm_context.context.initialized = true;
1875 mutex_init(&psp->dtm_context.mutex);
1876 }
1877
1878 return ret;
1879}
1880
1881int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
1882{
1883 /*
1884 * TODO: bypass the loading in sriov for now
1885 */
1886 if (amdgpu_sriov_vf(psp->adev))
1887 return 0;
1888
1889 return psp_ta_invoke(psp, ta_cmd_id, context: &psp->dtm_context.context);
1890}
1891
1892static int psp_dtm_terminate(struct psp_context *psp)
1893{
1894 int ret;
1895
1896 /*
1897 * TODO: bypass the terminate in sriov for now
1898 */
1899 if (amdgpu_sriov_vf(psp->adev))
1900 return 0;
1901
1902 if (!psp->dtm_context.context.initialized)
1903 return 0;
1904
1905 ret = psp_ta_unload(psp, context: &psp->dtm_context.context);
1906
1907 psp->dtm_context.context.initialized = false;
1908
1909 return ret;
1910}
1911// DTM end
1912
1913// RAP start
1914static int psp_rap_initialize(struct psp_context *psp)
1915{
1916 int ret;
1917 enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
1918
1919 /*
1920 * TODO: bypass the initialize in sriov for now
1921 */
1922 if (amdgpu_sriov_vf(psp->adev))
1923 return 0;
1924
1925 if (!psp->rap_context.context.bin_desc.size_bytes ||
1926 !psp->rap_context.context.bin_desc.start_addr) {
1927 dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
1928 return 0;
1929 }
1930
1931 psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
1932 psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
1933
1934 if (!psp->rap_context.context.mem_context.shared_buf) {
1935 ret = psp_ta_init_shared_buf(psp, mem_ctx: &psp->rap_context.context.mem_context);
1936 if (ret)
1937 return ret;
1938 }
1939
1940 ret = psp_ta_load(psp, context: &psp->rap_context.context);
1941 if (!ret) {
1942 psp->rap_context.context.initialized = true;
1943 mutex_init(&psp->rap_context.mutex);
1944 } else
1945 return ret;
1946
1947 ret = psp_rap_invoke(psp, ta_cmd_id: TA_CMD_RAP__INITIALIZE, status: &status);
1948 if (ret || status != TA_RAP_STATUS__SUCCESS) {
1949 psp_rap_terminate(psp);
1950 /* free rap shared memory */
1951 psp_ta_free_shared_buf(mem_ctx: &psp->rap_context.context.mem_context);
1952
1953 dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
1954 ret, status);
1955
1956 return ret;
1957 }
1958
1959 return 0;
1960}
1961
1962static int psp_rap_terminate(struct psp_context *psp)
1963{
1964 int ret;
1965
1966 if (!psp->rap_context.context.initialized)
1967 return 0;
1968
1969 ret = psp_ta_unload(psp, context: &psp->rap_context.context);
1970
1971 psp->rap_context.context.initialized = false;
1972
1973 return ret;
1974}
1975
1976int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
1977{
1978 struct ta_rap_shared_memory *rap_cmd;
1979 int ret = 0;
1980
1981 if (!psp->rap_context.context.initialized)
1982 return 0;
1983
1984 if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
1985 ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
1986 return -EINVAL;
1987
1988 mutex_lock(&psp->rap_context.mutex);
1989
1990 rap_cmd = (struct ta_rap_shared_memory *)
1991 psp->rap_context.context.mem_context.shared_buf;
1992 memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
1993
1994 rap_cmd->cmd_id = ta_cmd_id;
1995 rap_cmd->validation_method_id = METHOD_A;
1996
1997 ret = psp_ta_invoke(psp, ta_cmd_id: rap_cmd->cmd_id, context: &psp->rap_context.context);
1998 if (ret)
1999 goto out_unlock;
2000
2001 if (status)
2002 *status = rap_cmd->rap_status;
2003
2004out_unlock:
2005 mutex_unlock(lock: &psp->rap_context.mutex);
2006
2007 return ret;
2008}
2009// RAP end
2010
2011/* securedisplay start */
2012static int psp_securedisplay_initialize(struct psp_context *psp)
2013{
2014 int ret;
2015 struct ta_securedisplay_cmd *securedisplay_cmd;
2016
2017 /*
2018 * TODO: bypass the initialize in sriov for now
2019 */
2020 if (amdgpu_sriov_vf(psp->adev))
2021 return 0;
2022
2023 if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
2024 !psp->securedisplay_context.context.bin_desc.start_addr) {
2025 dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
2026 return 0;
2027 }
2028
2029 psp->securedisplay_context.context.mem_context.shared_mem_size =
2030 PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
2031 psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
2032
2033 if (!psp->securedisplay_context.context.initialized) {
2034 ret = psp_ta_init_shared_buf(psp,
2035 mem_ctx: &psp->securedisplay_context.context.mem_context);
2036 if (ret)
2037 return ret;
2038 }
2039
2040 ret = psp_ta_load(psp, context: &psp->securedisplay_context.context);
2041 if (!ret) {
2042 psp->securedisplay_context.context.initialized = true;
2043 mutex_init(&psp->securedisplay_context.mutex);
2044 } else
2045 return ret;
2046
2047 mutex_lock(&psp->securedisplay_context.mutex);
2048
2049 psp_prep_securedisplay_cmd_buf(psp, cmd: &securedisplay_cmd,
2050 command_id: TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2051
2052 ret = psp_securedisplay_invoke(psp, ta_cmd_id: TA_SECUREDISPLAY_COMMAND__QUERY_TA);
2053
2054 mutex_unlock(lock: &psp->securedisplay_context.mutex);
2055
2056 if (ret) {
2057 psp_securedisplay_terminate(psp);
2058 /* free securedisplay shared memory */
2059 psp_ta_free_shared_buf(mem_ctx: &psp->securedisplay_context.context.mem_context);
2060 dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
2061 return -EINVAL;
2062 }
2063
2064 if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
2065 psp_securedisplay_parse_resp_status(psp, status: securedisplay_cmd->status);
2066 dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
2067 securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
2068 /* don't try again */
2069 psp->securedisplay_context.context.bin_desc.size_bytes = 0;
2070 }
2071
2072 return 0;
2073}
2074
2075static int psp_securedisplay_terminate(struct psp_context *psp)
2076{
2077 int ret;
2078
2079 /*
2080 * TODO:bypass the terminate in sriov for now
2081 */
2082 if (amdgpu_sriov_vf(psp->adev))
2083 return 0;
2084
2085 if (!psp->securedisplay_context.context.initialized)
2086 return 0;
2087
2088 ret = psp_ta_unload(psp, context: &psp->securedisplay_context.context);
2089
2090 psp->securedisplay_context.context.initialized = false;
2091
2092 return ret;
2093}
2094
2095int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
2096{
2097 int ret;
2098
2099 if (!psp->securedisplay_context.context.initialized)
2100 return -EINVAL;
2101
2102 if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
2103 ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC)
2104 return -EINVAL;
2105
2106 ret = psp_ta_invoke(psp, ta_cmd_id, context: &psp->securedisplay_context.context);
2107
2108 return ret;
2109}
2110/* SECUREDISPLAY end */
2111
2112int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
2113{
2114 struct psp_context *psp = &adev->psp;
2115 int ret = 0;
2116
2117 if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
2118 ret = psp->funcs->wait_for_bootloader(psp);
2119
2120 return ret;
2121}
2122
2123int amdgpu_psp_query_boot_status(struct amdgpu_device *adev)
2124{
2125 struct psp_context *psp = &adev->psp;
2126 int ret = 0;
2127
2128 if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
2129 return 0;
2130
2131 if (psp->funcs &&
2132 psp->funcs->query_boot_status)
2133 ret = psp->funcs->query_boot_status(psp);
2134
2135 return ret;
2136}
2137
2138static int psp_hw_start(struct psp_context *psp)
2139{
2140 struct amdgpu_device *adev = psp->adev;
2141 int ret;
2142
2143 if (!amdgpu_sriov_vf(adev)) {
2144 if ((is_psp_fw_valid(bin: psp->kdb)) &&
2145 (psp->funcs->bootloader_load_kdb != NULL)) {
2146 ret = psp_bootloader_load_kdb(psp);
2147 if (ret) {
2148 DRM_ERROR("PSP load kdb failed!\n");
2149 return ret;
2150 }
2151 }
2152
2153 if ((is_psp_fw_valid(bin: psp->spl)) &&
2154 (psp->funcs->bootloader_load_spl != NULL)) {
2155 ret = psp_bootloader_load_spl(psp);
2156 if (ret) {
2157 DRM_ERROR("PSP load spl failed!\n");
2158 return ret;
2159 }
2160 }
2161
2162 if ((is_psp_fw_valid(bin: psp->sys)) &&
2163 (psp->funcs->bootloader_load_sysdrv != NULL)) {
2164 ret = psp_bootloader_load_sysdrv(psp);
2165 if (ret) {
2166 DRM_ERROR("PSP load sys drv failed!\n");
2167 return ret;
2168 }
2169 }
2170
2171 if ((is_psp_fw_valid(bin: psp->soc_drv)) &&
2172 (psp->funcs->bootloader_load_soc_drv != NULL)) {
2173 ret = psp_bootloader_load_soc_drv(psp);
2174 if (ret) {
2175 DRM_ERROR("PSP load soc drv failed!\n");
2176 return ret;
2177 }
2178 }
2179
2180 if ((is_psp_fw_valid(bin: psp->intf_drv)) &&
2181 (psp->funcs->bootloader_load_intf_drv != NULL)) {
2182 ret = psp_bootloader_load_intf_drv(psp);
2183 if (ret) {
2184 DRM_ERROR("PSP load intf drv failed!\n");
2185 return ret;
2186 }
2187 }
2188
2189 if ((is_psp_fw_valid(bin: psp->dbg_drv)) &&
2190 (psp->funcs->bootloader_load_dbg_drv != NULL)) {
2191 ret = psp_bootloader_load_dbg_drv(psp);
2192 if (ret) {
2193 DRM_ERROR("PSP load dbg drv failed!\n");
2194 return ret;
2195 }
2196 }
2197
2198 if ((is_psp_fw_valid(bin: psp->ras_drv)) &&
2199 (psp->funcs->bootloader_load_ras_drv != NULL)) {
2200 ret = psp_bootloader_load_ras_drv(psp);
2201 if (ret) {
2202 DRM_ERROR("PSP load ras_drv failed!\n");
2203 return ret;
2204 }
2205 }
2206
2207 if ((is_psp_fw_valid(bin: psp->sos)) &&
2208 (psp->funcs->bootloader_load_sos != NULL)) {
2209 ret = psp_bootloader_load_sos(psp);
2210 if (ret) {
2211 DRM_ERROR("PSP load sos failed!\n");
2212 return ret;
2213 }
2214 }
2215 }
2216
2217 ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
2218 if (ret) {
2219 DRM_ERROR("PSP create ring failed!\n");
2220 return ret;
2221 }
2222
2223 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
2224 goto skip_pin_bo;
2225
2226 if (!psp_boottime_tmr(psp)) {
2227 ret = psp_tmr_init(psp);
2228 if (ret) {
2229 DRM_ERROR("PSP tmr init failed!\n");
2230 return ret;
2231 }
2232 }
2233
2234skip_pin_bo:
2235 /*
2236 * For ASICs with DF Cstate management centralized
2237 * to PMFW, TMR setup should be performed after PMFW
2238 * loaded and before other non-psp firmware loaded.
2239 */
2240 if (psp->pmfw_centralized_cstate_management) {
2241 ret = psp_load_smu_fw(psp);
2242 if (ret)
2243 return ret;
2244 }
2245
2246 ret = psp_tmr_load(psp);
2247 if (ret) {
2248 DRM_ERROR("PSP load tmr failed!\n");
2249 return ret;
2250 }
2251
2252 return 0;
2253}
2254
2255static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
2256 enum psp_gfx_fw_type *type)
2257{
2258 switch (ucode->ucode_id) {
2259 case AMDGPU_UCODE_ID_CAP:
2260 *type = GFX_FW_TYPE_CAP;
2261 break;
2262 case AMDGPU_UCODE_ID_SDMA0:
2263 *type = GFX_FW_TYPE_SDMA0;
2264 break;
2265 case AMDGPU_UCODE_ID_SDMA1:
2266 *type = GFX_FW_TYPE_SDMA1;
2267 break;
2268 case AMDGPU_UCODE_ID_SDMA2:
2269 *type = GFX_FW_TYPE_SDMA2;
2270 break;
2271 case AMDGPU_UCODE_ID_SDMA3:
2272 *type = GFX_FW_TYPE_SDMA3;
2273 break;
2274 case AMDGPU_UCODE_ID_SDMA4:
2275 *type = GFX_FW_TYPE_SDMA4;
2276 break;
2277 case AMDGPU_UCODE_ID_SDMA5:
2278 *type = GFX_FW_TYPE_SDMA5;
2279 break;
2280 case AMDGPU_UCODE_ID_SDMA6:
2281 *type = GFX_FW_TYPE_SDMA6;
2282 break;
2283 case AMDGPU_UCODE_ID_SDMA7:
2284 *type = GFX_FW_TYPE_SDMA7;
2285 break;
2286 case AMDGPU_UCODE_ID_CP_MES:
2287 *type = GFX_FW_TYPE_CP_MES;
2288 break;
2289 case AMDGPU_UCODE_ID_CP_MES_DATA:
2290 *type = GFX_FW_TYPE_MES_STACK;
2291 break;
2292 case AMDGPU_UCODE_ID_CP_MES1:
2293 *type = GFX_FW_TYPE_CP_MES_KIQ;
2294 break;
2295 case AMDGPU_UCODE_ID_CP_MES1_DATA:
2296 *type = GFX_FW_TYPE_MES_KIQ_STACK;
2297 break;
2298 case AMDGPU_UCODE_ID_CP_CE:
2299 *type = GFX_FW_TYPE_CP_CE;
2300 break;
2301 case AMDGPU_UCODE_ID_CP_PFP:
2302 *type = GFX_FW_TYPE_CP_PFP;
2303 break;
2304 case AMDGPU_UCODE_ID_CP_ME:
2305 *type = GFX_FW_TYPE_CP_ME;
2306 break;
2307 case AMDGPU_UCODE_ID_CP_MEC1:
2308 *type = GFX_FW_TYPE_CP_MEC;
2309 break;
2310 case AMDGPU_UCODE_ID_CP_MEC1_JT:
2311 *type = GFX_FW_TYPE_CP_MEC_ME1;
2312 break;
2313 case AMDGPU_UCODE_ID_CP_MEC2:
2314 *type = GFX_FW_TYPE_CP_MEC;
2315 break;
2316 case AMDGPU_UCODE_ID_CP_MEC2_JT:
2317 *type = GFX_FW_TYPE_CP_MEC_ME2;
2318 break;
2319 case AMDGPU_UCODE_ID_RLC_P:
2320 *type = GFX_FW_TYPE_RLC_P;
2321 break;
2322 case AMDGPU_UCODE_ID_RLC_V:
2323 *type = GFX_FW_TYPE_RLC_V;
2324 break;
2325 case AMDGPU_UCODE_ID_RLC_G:
2326 *type = GFX_FW_TYPE_RLC_G;
2327 break;
2328 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
2329 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
2330 break;
2331 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
2332 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
2333 break;
2334 case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
2335 *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
2336 break;
2337 case AMDGPU_UCODE_ID_RLC_IRAM:
2338 *type = GFX_FW_TYPE_RLC_IRAM;
2339 break;
2340 case AMDGPU_UCODE_ID_RLC_DRAM:
2341 *type = GFX_FW_TYPE_RLC_DRAM_BOOT;
2342 break;
2343 case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
2344 *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
2345 break;
2346 case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
2347 *type = GFX_FW_TYPE_SE0_TAP_DELAYS;
2348 break;
2349 case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
2350 *type = GFX_FW_TYPE_SE1_TAP_DELAYS;
2351 break;
2352 case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
2353 *type = GFX_FW_TYPE_SE2_TAP_DELAYS;
2354 break;
2355 case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
2356 *type = GFX_FW_TYPE_SE3_TAP_DELAYS;
2357 break;
2358 case AMDGPU_UCODE_ID_SMC:
2359 *type = GFX_FW_TYPE_SMU;
2360 break;
2361 case AMDGPU_UCODE_ID_PPTABLE:
2362 *type = GFX_FW_TYPE_PPTABLE;
2363 break;
2364 case AMDGPU_UCODE_ID_UVD:
2365 *type = GFX_FW_TYPE_UVD;
2366 break;
2367 case AMDGPU_UCODE_ID_UVD1:
2368 *type = GFX_FW_TYPE_UVD1;
2369 break;
2370 case AMDGPU_UCODE_ID_VCE:
2371 *type = GFX_FW_TYPE_VCE;
2372 break;
2373 case AMDGPU_UCODE_ID_VCN:
2374 *type = GFX_FW_TYPE_VCN;
2375 break;
2376 case AMDGPU_UCODE_ID_VCN1:
2377 *type = GFX_FW_TYPE_VCN1;
2378 break;
2379 case AMDGPU_UCODE_ID_DMCU_ERAM:
2380 *type = GFX_FW_TYPE_DMCU_ERAM;
2381 break;
2382 case AMDGPU_UCODE_ID_DMCU_INTV:
2383 *type = GFX_FW_TYPE_DMCU_ISR;
2384 break;
2385 case AMDGPU_UCODE_ID_VCN0_RAM:
2386 *type = GFX_FW_TYPE_VCN0_RAM;
2387 break;
2388 case AMDGPU_UCODE_ID_VCN1_RAM:
2389 *type = GFX_FW_TYPE_VCN1_RAM;
2390 break;
2391 case AMDGPU_UCODE_ID_DMCUB:
2392 *type = GFX_FW_TYPE_DMUB;
2393 break;
2394 case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
2395 *type = GFX_FW_TYPE_SDMA_UCODE_TH0;
2396 break;
2397 case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
2398 *type = GFX_FW_TYPE_SDMA_UCODE_TH1;
2399 break;
2400 case AMDGPU_UCODE_ID_IMU_I:
2401 *type = GFX_FW_TYPE_IMU_I;
2402 break;
2403 case AMDGPU_UCODE_ID_IMU_D:
2404 *type = GFX_FW_TYPE_IMU_D;
2405 break;
2406 case AMDGPU_UCODE_ID_CP_RS64_PFP:
2407 *type = GFX_FW_TYPE_RS64_PFP;
2408 break;
2409 case AMDGPU_UCODE_ID_CP_RS64_ME:
2410 *type = GFX_FW_TYPE_RS64_ME;
2411 break;
2412 case AMDGPU_UCODE_ID_CP_RS64_MEC:
2413 *type = GFX_FW_TYPE_RS64_MEC;
2414 break;
2415 case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
2416 *type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
2417 break;
2418 case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
2419 *type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
2420 break;
2421 case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
2422 *type = GFX_FW_TYPE_RS64_ME_P0_STACK;
2423 break;
2424 case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
2425 *type = GFX_FW_TYPE_RS64_ME_P1_STACK;
2426 break;
2427 case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
2428 *type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
2429 break;
2430 case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
2431 *type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
2432 break;
2433 case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
2434 *type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
2435 break;
2436 case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
2437 *type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
2438 break;
2439 case AMDGPU_UCODE_ID_VPE_CTX:
2440 *type = GFX_FW_TYPE_VPEC_FW1;
2441 break;
2442 case AMDGPU_UCODE_ID_VPE_CTL:
2443 *type = GFX_FW_TYPE_VPEC_FW2;
2444 break;
2445 case AMDGPU_UCODE_ID_VPE:
2446 *type = GFX_FW_TYPE_VPE;
2447 break;
2448 case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
2449 *type = GFX_FW_TYPE_UMSCH_UCODE;
2450 break;
2451 case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
2452 *type = GFX_FW_TYPE_UMSCH_DATA;
2453 break;
2454 case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
2455 *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
2456 break;
2457 case AMDGPU_UCODE_ID_P2S_TABLE:
2458 *type = GFX_FW_TYPE_P2S_TABLE;
2459 break;
2460 case AMDGPU_UCODE_ID_MAXIMUM:
2461 default:
2462 return -EINVAL;
2463 }
2464
2465 return 0;
2466}
2467
2468static void psp_print_fw_hdr(struct psp_context *psp,
2469 struct amdgpu_firmware_info *ucode)
2470{
2471 struct amdgpu_device *adev = psp->adev;
2472 struct common_firmware_header *hdr;
2473
2474 switch (ucode->ucode_id) {
2475 case AMDGPU_UCODE_ID_SDMA0:
2476 case AMDGPU_UCODE_ID_SDMA1:
2477 case AMDGPU_UCODE_ID_SDMA2:
2478 case AMDGPU_UCODE_ID_SDMA3:
2479 case AMDGPU_UCODE_ID_SDMA4:
2480 case AMDGPU_UCODE_ID_SDMA5:
2481 case AMDGPU_UCODE_ID_SDMA6:
2482 case AMDGPU_UCODE_ID_SDMA7:
2483 hdr = (struct common_firmware_header *)
2484 adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
2485 amdgpu_ucode_print_sdma_hdr(hdr);
2486 break;
2487 case AMDGPU_UCODE_ID_CP_CE:
2488 hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
2489 amdgpu_ucode_print_gfx_hdr(hdr);
2490 break;
2491 case AMDGPU_UCODE_ID_CP_PFP:
2492 hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
2493 amdgpu_ucode_print_gfx_hdr(hdr);
2494 break;
2495 case AMDGPU_UCODE_ID_CP_ME:
2496 hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
2497 amdgpu_ucode_print_gfx_hdr(hdr);
2498 break;
2499 case AMDGPU_UCODE_ID_CP_MEC1:
2500 hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
2501 amdgpu_ucode_print_gfx_hdr(hdr);
2502 break;
2503 case AMDGPU_UCODE_ID_RLC_G:
2504 hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
2505 amdgpu_ucode_print_rlc_hdr(hdr);
2506 break;
2507 case AMDGPU_UCODE_ID_SMC:
2508 hdr = (struct common_firmware_header *)adev->pm.fw->data;
2509 amdgpu_ucode_print_smc_hdr(hdr);
2510 break;
2511 default:
2512 break;
2513 }
2514}
2515
2516static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
2517 struct psp_gfx_cmd_resp *cmd)
2518{
2519 int ret;
2520 uint64_t fw_mem_mc_addr = ucode->mc_addr;
2521
2522 cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
2523 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
2524 cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
2525 cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
2526
2527 ret = psp_get_fw_type(ucode, type: &cmd->cmd.cmd_load_ip_fw.fw_type);
2528 if (ret)
2529 DRM_ERROR("Unknown firmware type\n");
2530
2531 return ret;
2532}
2533
2534int psp_execute_ip_fw_load(struct psp_context *psp,
2535 struct amdgpu_firmware_info *ucode)
2536{
2537 int ret = 0;
2538 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
2539
2540 ret = psp_prep_load_ip_fw_cmd_buf(ucode, cmd);
2541 if (!ret) {
2542 ret = psp_cmd_submit_buf(psp, ucode, cmd,
2543 fence_mc_addr: psp->fence_buf_mc_addr);
2544 }
2545
2546 release_psp_cmd_buf(psp);
2547
2548 return ret;
2549}
2550
2551static int psp_load_p2s_table(struct psp_context *psp)
2552{
2553 int ret;
2554 struct amdgpu_device *adev = psp->adev;
2555 struct amdgpu_firmware_info *ucode =
2556 &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
2557
2558 if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
2559 return 0;
2560
2561 if (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(13, 0, 6)) {
2562 uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
2563 0x0036003C;
2564 if (psp->sos.fw_version < supp_vers)
2565 return 0;
2566 }
2567
2568 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2569 return 0;
2570
2571 ret = psp_execute_ip_fw_load(psp, ucode);
2572
2573 return ret;
2574}
2575
2576static int psp_load_smu_fw(struct psp_context *psp)
2577{
2578 int ret;
2579 struct amdgpu_device *adev = psp->adev;
2580 struct amdgpu_firmware_info *ucode =
2581 &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
2582 struct amdgpu_ras *ras = psp->ras_context.ras;
2583
2584 /*
2585 * Skip SMU FW reloading in case of using BACO for runpm only,
2586 * as SMU is always alive.
2587 */
2588 if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
2589 return 0;
2590
2591 if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
2592 return 0;
2593
2594 if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
2595 (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(11, 0, 4) ||
2596 amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) == IP_VERSION(11, 0, 2)))) {
2597 ret = amdgpu_dpm_set_mp1_state(adev, mp1_state: PP_MP1_STATE_UNLOAD);
2598 if (ret)
2599 DRM_WARN("Failed to set MP1 state prepare for reload\n");
2600 }
2601
2602 ret = psp_execute_ip_fw_load(psp, ucode);
2603
2604 if (ret)
2605 DRM_ERROR("PSP load smu failed!\n");
2606
2607 return ret;
2608}
2609
2610static bool fw_load_skip_check(struct psp_context *psp,
2611 struct amdgpu_firmware_info *ucode)
2612{
2613 if (!ucode->fw || !ucode->ucode_size)
2614 return true;
2615
2616 if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
2617 return true;
2618
2619 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2620 (psp_smu_reload_quirk(psp) ||
2621 psp->autoload_supported ||
2622 psp->pmfw_centralized_cstate_management))
2623 return true;
2624
2625 if (amdgpu_sriov_vf(psp->adev) &&
2626 amdgpu_virt_fw_load_skip_check(adev: psp->adev, ucode_id: ucode->ucode_id))
2627 return true;
2628
2629 if (psp->autoload_supported &&
2630 (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
2631 ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
2632 /* skip mec JT when autoload is enabled */
2633 return true;
2634
2635 return false;
2636}
2637
2638int psp_load_fw_list(struct psp_context *psp,
2639 struct amdgpu_firmware_info **ucode_list, int ucode_count)
2640{
2641 int ret = 0, i;
2642 struct amdgpu_firmware_info *ucode;
2643
2644 for (i = 0; i < ucode_count; ++i) {
2645 ucode = ucode_list[i];
2646 psp_print_fw_hdr(psp, ucode);
2647 ret = psp_execute_ip_fw_load(psp, ucode);
2648 if (ret)
2649 return ret;
2650 }
2651 return ret;
2652}
2653
2654static int psp_load_non_psp_fw(struct psp_context *psp)
2655{
2656 int i, ret;
2657 struct amdgpu_firmware_info *ucode;
2658 struct amdgpu_device *adev = psp->adev;
2659
2660 if (psp->autoload_supported &&
2661 !psp->pmfw_centralized_cstate_management) {
2662 ret = psp_load_smu_fw(psp);
2663 if (ret)
2664 return ret;
2665 }
2666
2667 /* Load P2S table first if it's available */
2668 psp_load_p2s_table(psp);
2669
2670 for (i = 0; i < adev->firmware.max_ucodes; i++) {
2671 ucode = &adev->firmware.ucode[i];
2672
2673 if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
2674 !fw_load_skip_check(psp, ucode)) {
2675 ret = psp_load_smu_fw(psp);
2676 if (ret)
2677 return ret;
2678 continue;
2679 }
2680
2681 if (fw_load_skip_check(psp, ucode))
2682 continue;
2683
2684 if (psp->autoload_supported &&
2685 (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) ==
2686 IP_VERSION(11, 0, 7) ||
2687 amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) ==
2688 IP_VERSION(11, 0, 11) ||
2689 amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) ==
2690 IP_VERSION(11, 0, 12)) &&
2691 (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
2692 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
2693 ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
2694 /* PSP only receive one SDMA fw for sienna_cichlid,
2695 * as all four sdma fw are same
2696 */
2697 continue;
2698
2699 psp_print_fw_hdr(psp, ucode);
2700
2701 ret = psp_execute_ip_fw_load(psp, ucode);
2702 if (ret)
2703 return ret;
2704
2705 /* Start rlc autoload after psp recieved all the gfx firmware */
2706 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
2707 adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
2708 ret = psp_rlc_autoload_start(psp);
2709 if (ret) {
2710 DRM_ERROR("Failed to start rlc autoload\n");
2711 return ret;
2712 }
2713 }
2714 }
2715
2716 return 0;
2717}
2718
2719static int psp_load_fw(struct amdgpu_device *adev)
2720{
2721 int ret;
2722 struct psp_context *psp = &adev->psp;
2723
2724 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2725 /* should not destroy ring, only stop */
2726 psp_ring_stop(psp, PSP_RING_TYPE__KM);
2727 } else {
2728 memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
2729
2730 ret = psp_ring_init(psp, ring_type: PSP_RING_TYPE__KM);
2731 if (ret) {
2732 DRM_ERROR("PSP ring init failed!\n");
2733 goto failed;
2734 }
2735 }
2736
2737 ret = psp_hw_start(psp);
2738 if (ret)
2739 goto failed;
2740
2741 ret = psp_load_non_psp_fw(psp);
2742 if (ret)
2743 goto failed1;
2744
2745 ret = psp_asd_initialize(psp);
2746 if (ret) {
2747 DRM_ERROR("PSP load asd failed!\n");
2748 goto failed1;
2749 }
2750
2751 ret = psp_rl_load(adev);
2752 if (ret) {
2753 DRM_ERROR("PSP load RL failed!\n");
2754 goto failed1;
2755 }
2756
2757 if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
2758 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2759 ret = psp_xgmi_initialize(psp, set_extended_data: false, load_ta: true);
2760 /* Warning the XGMI seesion initialize failure
2761 * Instead of stop driver initialization
2762 */
2763 if (ret)
2764 dev_err(psp->adev->dev,
2765 "XGMI: Failed to initialize XGMI session\n");
2766 }
2767 }
2768
2769 if (psp->ta_fw) {
2770 ret = psp_ras_initialize(psp);
2771 if (ret)
2772 dev_err(psp->adev->dev,
2773 "RAS: Failed to initialize RAS\n");
2774
2775 ret = psp_hdcp_initialize(psp);
2776 if (ret)
2777 dev_err(psp->adev->dev,
2778 "HDCP: Failed to initialize HDCP\n");
2779
2780 ret = psp_dtm_initialize(psp);
2781 if (ret)
2782 dev_err(psp->adev->dev,
2783 "DTM: Failed to initialize DTM\n");
2784
2785 ret = psp_rap_initialize(psp);
2786 if (ret)
2787 dev_err(psp->adev->dev,
2788 "RAP: Failed to initialize RAP\n");
2789
2790 ret = psp_securedisplay_initialize(psp);
2791 if (ret)
2792 dev_err(psp->adev->dev,
2793 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
2794 }
2795
2796 return 0;
2797
2798failed1:
2799 psp_free_shared_bufs(psp);
2800failed:
2801 /*
2802 * all cleanup jobs (xgmi terminate, ras terminate,
2803 * ring destroy, cmd/fence/fw buffers destory,
2804 * psp->cmd destory) are delayed to psp_hw_fini
2805 */
2806 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2807 return ret;
2808}
2809
2810static int psp_hw_init(void *handle)
2811{
2812 int ret;
2813 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2814
2815 mutex_lock(&adev->firmware.mutex);
2816 /*
2817 * This sequence is just used on hw_init only once, no need on
2818 * resume.
2819 */
2820 ret = amdgpu_ucode_init_bo(adev);
2821 if (ret)
2822 goto failed;
2823
2824 ret = psp_load_fw(adev);
2825 if (ret) {
2826 DRM_ERROR("PSP firmware loading failed\n");
2827 goto failed;
2828 }
2829
2830 mutex_unlock(lock: &adev->firmware.mutex);
2831 return 0;
2832
2833failed:
2834 adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
2835 mutex_unlock(lock: &adev->firmware.mutex);
2836 return -EINVAL;
2837}
2838
2839static int psp_hw_fini(void *handle)
2840{
2841 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2842 struct psp_context *psp = &adev->psp;
2843
2844 if (psp->ta_fw) {
2845 psp_ras_terminate(psp);
2846 psp_securedisplay_terminate(psp);
2847 psp_rap_terminate(psp);
2848 psp_dtm_terminate(psp);
2849 psp_hdcp_terminate(psp);
2850
2851 if (adev->gmc.xgmi.num_physical_nodes > 1)
2852 psp_xgmi_terminate(psp);
2853 }
2854
2855 psp_asd_terminate(psp);
2856 psp_tmr_terminate(psp);
2857
2858 psp_ring_destroy(psp, PSP_RING_TYPE__KM);
2859
2860 return 0;
2861}
2862
2863static int psp_suspend(void *handle)
2864{
2865 int ret = 0;
2866 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2867 struct psp_context *psp = &adev->psp;
2868
2869 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
2870 psp->xgmi_context.context.initialized) {
2871 ret = psp_xgmi_terminate(psp);
2872 if (ret) {
2873 DRM_ERROR("Failed to terminate xgmi ta\n");
2874 goto out;
2875 }
2876 }
2877
2878 if (psp->ta_fw) {
2879 ret = psp_ras_terminate(psp);
2880 if (ret) {
2881 DRM_ERROR("Failed to terminate ras ta\n");
2882 goto out;
2883 }
2884 ret = psp_hdcp_terminate(psp);
2885 if (ret) {
2886 DRM_ERROR("Failed to terminate hdcp ta\n");
2887 goto out;
2888 }
2889 ret = psp_dtm_terminate(psp);
2890 if (ret) {
2891 DRM_ERROR("Failed to terminate dtm ta\n");
2892 goto out;
2893 }
2894 ret = psp_rap_terminate(psp);
2895 if (ret) {
2896 DRM_ERROR("Failed to terminate rap ta\n");
2897 goto out;
2898 }
2899 ret = psp_securedisplay_terminate(psp);
2900 if (ret) {
2901 DRM_ERROR("Failed to terminate securedisplay ta\n");
2902 goto out;
2903 }
2904 }
2905
2906 ret = psp_asd_terminate(psp);
2907 if (ret) {
2908 DRM_ERROR("Failed to terminate asd\n");
2909 goto out;
2910 }
2911
2912 ret = psp_tmr_terminate(psp);
2913 if (ret) {
2914 DRM_ERROR("Failed to terminate tmr\n");
2915 goto out;
2916 }
2917
2918 ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
2919 if (ret)
2920 DRM_ERROR("PSP ring stop failed\n");
2921
2922out:
2923 return ret;
2924}
2925
2926static int psp_resume(void *handle)
2927{
2928 int ret;
2929 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2930 struct psp_context *psp = &adev->psp;
2931
2932 DRM_INFO("PSP is resuming...\n");
2933
2934 if (psp->mem_train_ctx.enable_mem_training) {
2935 ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
2936 if (ret) {
2937 DRM_ERROR("Failed to process memory training!\n");
2938 return ret;
2939 }
2940 }
2941
2942 mutex_lock(&adev->firmware.mutex);
2943
2944 ret = psp_hw_start(psp);
2945 if (ret)
2946 goto failed;
2947
2948 ret = psp_load_non_psp_fw(psp);
2949 if (ret)
2950 goto failed;
2951
2952 ret = psp_asd_initialize(psp);
2953 if (ret) {
2954 DRM_ERROR("PSP load asd failed!\n");
2955 goto failed;
2956 }
2957
2958 ret = psp_rl_load(adev);
2959 if (ret) {
2960 dev_err(adev->dev, "PSP load RL failed!\n");
2961 goto failed;
2962 }
2963
2964 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2965 ret = psp_xgmi_initialize(psp, set_extended_data: false, load_ta: true);
2966 /* Warning the XGMI seesion initialize failure
2967 * Instead of stop driver initialization
2968 */
2969 if (ret)
2970 dev_err(psp->adev->dev,
2971 "XGMI: Failed to initialize XGMI session\n");
2972 }
2973
2974 if (psp->ta_fw) {
2975 ret = psp_ras_initialize(psp);
2976 if (ret)
2977 dev_err(psp->adev->dev,
2978 "RAS: Failed to initialize RAS\n");
2979
2980 ret = psp_hdcp_initialize(psp);
2981 if (ret)
2982 dev_err(psp->adev->dev,
2983 "HDCP: Failed to initialize HDCP\n");
2984
2985 ret = psp_dtm_initialize(psp);
2986 if (ret)
2987 dev_err(psp->adev->dev,
2988 "DTM: Failed to initialize DTM\n");
2989
2990 ret = psp_rap_initialize(psp);
2991 if (ret)
2992 dev_err(psp->adev->dev,
2993 "RAP: Failed to initialize RAP\n");
2994
2995 ret = psp_securedisplay_initialize(psp);
2996 if (ret)
2997 dev_err(psp->adev->dev,
2998 "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
2999 }
3000
3001 mutex_unlock(lock: &adev->firmware.mutex);
3002
3003 return 0;
3004
3005failed:
3006 DRM_ERROR("PSP resume failed\n");
3007 mutex_unlock(lock: &adev->firmware.mutex);
3008 return ret;
3009}
3010
3011int psp_gpu_reset(struct amdgpu_device *adev)
3012{
3013 int ret;
3014
3015 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
3016 return 0;
3017
3018 mutex_lock(&adev->psp.mutex);
3019 ret = psp_mode1_reset(&adev->psp);
3020 mutex_unlock(lock: &adev->psp.mutex);
3021
3022 return ret;
3023}
3024
3025int psp_rlc_autoload_start(struct psp_context *psp)
3026{
3027 int ret;
3028 struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
3029
3030 cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
3031
3032 ret = psp_cmd_submit_buf(psp, NULL, cmd,
3033 fence_mc_addr: psp->fence_buf_mc_addr);
3034
3035 release_psp_cmd_buf(psp);
3036
3037 return ret;
3038}
3039
3040int psp_ring_cmd_submit(struct psp_context *psp,
3041 uint64_t cmd_buf_mc_addr,
3042 uint64_t fence_mc_addr,
3043 int index)
3044{
3045 unsigned int psp_write_ptr_reg = 0;
3046 struct psp_gfx_rb_frame *write_frame;
3047 struct psp_ring *ring = &psp->km_ring;
3048 struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
3049 struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
3050 ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
3051 struct amdgpu_device *adev = psp->adev;
3052 uint32_t ring_size_dw = ring->ring_size / 4;
3053 uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
3054
3055 /* KM (GPCOM) prepare write pointer */
3056 psp_write_ptr_reg = psp_ring_get_wptr(psp);
3057
3058 /* Update KM RB frame pointer to new frame */
3059 /* write_frame ptr increments by size of rb_frame in bytes */
3060 /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
3061 if ((psp_write_ptr_reg % ring_size_dw) == 0)
3062 write_frame = ring_buffer_start;
3063 else
3064 write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
3065 /* Check invalid write_frame ptr address */
3066 if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
3067 DRM_ERROR("ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
3068 ring_buffer_start, ring_buffer_end, write_frame);
3069 DRM_ERROR("write_frame is pointing to address out of bounds\n");
3070 return -EINVAL;
3071 }
3072
3073 /* Initialize KM RB frame */
3074 memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
3075
3076 /* Update KM RB frame */
3077 write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
3078 write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
3079 write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
3080 write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
3081 write_frame->fence_value = index;
3082 amdgpu_device_flush_hdp(adev, NULL);
3083
3084 /* Update the write Pointer in DWORDs */
3085 psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
3086 psp_ring_set_wptr(psp, psp_write_ptr_reg);
3087 return 0;
3088}
3089
3090int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
3091{
3092 struct amdgpu_device *adev = psp->adev;
3093 char fw_name[PSP_FW_NAME_LEN];
3094 const struct psp_firmware_header_v1_0 *asd_hdr;
3095 int err = 0;
3096
3097 snprintf(buf: fw_name, size: sizeof(fw_name), fmt: "amdgpu/%s_asd.bin", chip_name);
3098 err = amdgpu_ucode_request(adev, fw: &adev->psp.asd_fw, fw_name);
3099 if (err)
3100 goto out;
3101
3102 asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
3103 adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
3104 adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
3105 adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
3106 adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
3107 le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
3108 return 0;
3109out:
3110 amdgpu_ucode_release(fw: &adev->psp.asd_fw);
3111 return err;
3112}
3113
3114int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
3115{
3116 struct amdgpu_device *adev = psp->adev;
3117 char fw_name[PSP_FW_NAME_LEN];
3118 const struct psp_firmware_header_v1_0 *toc_hdr;
3119 int err = 0;
3120
3121 snprintf(buf: fw_name, size: sizeof(fw_name), fmt: "amdgpu/%s_toc.bin", chip_name);
3122 err = amdgpu_ucode_request(adev, fw: &adev->psp.toc_fw, fw_name);
3123 if (err)
3124 goto out;
3125
3126 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
3127 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
3128 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
3129 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
3130 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
3131 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
3132 return 0;
3133out:
3134 amdgpu_ucode_release(fw: &adev->psp.toc_fw);
3135 return err;
3136}
3137
3138static int parse_sos_bin_descriptor(struct psp_context *psp,
3139 const struct psp_fw_bin_desc *desc,
3140 const struct psp_firmware_header_v2_0 *sos_hdr)
3141{
3142 uint8_t *ucode_start_addr = NULL;
3143
3144 if (!psp || !desc || !sos_hdr)
3145 return -EINVAL;
3146
3147 ucode_start_addr = (uint8_t *)sos_hdr +
3148 le32_to_cpu(desc->offset_bytes) +
3149 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3150
3151 switch (desc->fw_type) {
3152 case PSP_FW_TYPE_PSP_SOS:
3153 psp->sos.fw_version = le32_to_cpu(desc->fw_version);
3154 psp->sos.feature_version = le32_to_cpu(desc->fw_version);
3155 psp->sos.size_bytes = le32_to_cpu(desc->size_bytes);
3156 psp->sos.start_addr = ucode_start_addr;
3157 break;
3158 case PSP_FW_TYPE_PSP_SYS_DRV:
3159 psp->sys.fw_version = le32_to_cpu(desc->fw_version);
3160 psp->sys.feature_version = le32_to_cpu(desc->fw_version);
3161 psp->sys.size_bytes = le32_to_cpu(desc->size_bytes);
3162 psp->sys.start_addr = ucode_start_addr;
3163 break;
3164 case PSP_FW_TYPE_PSP_KDB:
3165 psp->kdb.fw_version = le32_to_cpu(desc->fw_version);
3166 psp->kdb.feature_version = le32_to_cpu(desc->fw_version);
3167 psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes);
3168 psp->kdb.start_addr = ucode_start_addr;
3169 break;
3170 case PSP_FW_TYPE_PSP_TOC:
3171 psp->toc.fw_version = le32_to_cpu(desc->fw_version);
3172 psp->toc.feature_version = le32_to_cpu(desc->fw_version);
3173 psp->toc.size_bytes = le32_to_cpu(desc->size_bytes);
3174 psp->toc.start_addr = ucode_start_addr;
3175 break;
3176 case PSP_FW_TYPE_PSP_SPL:
3177 psp->spl.fw_version = le32_to_cpu(desc->fw_version);
3178 psp->spl.feature_version = le32_to_cpu(desc->fw_version);
3179 psp->spl.size_bytes = le32_to_cpu(desc->size_bytes);
3180 psp->spl.start_addr = ucode_start_addr;
3181 break;
3182 case PSP_FW_TYPE_PSP_RL:
3183 psp->rl.fw_version = le32_to_cpu(desc->fw_version);
3184 psp->rl.feature_version = le32_to_cpu(desc->fw_version);
3185 psp->rl.size_bytes = le32_to_cpu(desc->size_bytes);
3186 psp->rl.start_addr = ucode_start_addr;
3187 break;
3188 case PSP_FW_TYPE_PSP_SOC_DRV:
3189 psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version);
3190 psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version);
3191 psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3192 psp->soc_drv.start_addr = ucode_start_addr;
3193 break;
3194 case PSP_FW_TYPE_PSP_INTF_DRV:
3195 psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version);
3196 psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version);
3197 psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3198 psp->intf_drv.start_addr = ucode_start_addr;
3199 break;
3200 case PSP_FW_TYPE_PSP_DBG_DRV:
3201 psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version);
3202 psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version);
3203 psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3204 psp->dbg_drv.start_addr = ucode_start_addr;
3205 break;
3206 case PSP_FW_TYPE_PSP_RAS_DRV:
3207 psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version);
3208 psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version);
3209 psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
3210 psp->ras_drv.start_addr = ucode_start_addr;
3211 break;
3212 default:
3213 dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
3214 break;
3215 }
3216
3217 return 0;
3218}
3219
3220static int psp_init_sos_base_fw(struct amdgpu_device *adev)
3221{
3222 const struct psp_firmware_header_v1_0 *sos_hdr;
3223 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3224 uint8_t *ucode_array_start_addr;
3225
3226 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3227 ucode_array_start_addr = (uint8_t *)sos_hdr +
3228 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3229
3230 if (adev->gmc.xgmi.connected_to_cpu ||
3231 (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) != IP_VERSION(13, 0, 2))) {
3232 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
3233 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
3234
3235 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
3236 adev->psp.sys.start_addr = ucode_array_start_addr;
3237
3238 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
3239 adev->psp.sos.start_addr = ucode_array_start_addr +
3240 le32_to_cpu(sos_hdr->sos.offset_bytes);
3241 } else {
3242 /* Load alternate PSP SOS FW */
3243 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3244
3245 adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3246 adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
3247
3248 adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
3249 adev->psp.sys.start_addr = ucode_array_start_addr +
3250 le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
3251
3252 adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
3253 adev->psp.sos.start_addr = ucode_array_start_addr +
3254 le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
3255 }
3256
3257 if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
3258 dev_warn(adev->dev, "PSP SOS FW not available");
3259 return -EINVAL;
3260 }
3261
3262 return 0;
3263}
3264
3265int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
3266{
3267 struct amdgpu_device *adev = psp->adev;
3268 char fw_name[PSP_FW_NAME_LEN];
3269 const struct psp_firmware_header_v1_0 *sos_hdr;
3270 const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
3271 const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
3272 const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
3273 const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
3274 int err = 0;
3275 uint8_t *ucode_array_start_addr;
3276 int fw_index = 0;
3277
3278 snprintf(buf: fw_name, size: sizeof(fw_name), fmt: "amdgpu/%s_sos.bin", chip_name);
3279 err = amdgpu_ucode_request(adev, fw: &adev->psp.sos_fw, fw_name);
3280 if (err)
3281 goto out;
3282
3283 sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
3284 ucode_array_start_addr = (uint8_t *)sos_hdr +
3285 le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
3286 amdgpu_ucode_print_psp_hdr(hdr: &sos_hdr->header);
3287
3288 switch (sos_hdr->header.header_version_major) {
3289 case 1:
3290 err = psp_init_sos_base_fw(adev);
3291 if (err)
3292 goto out;
3293
3294 if (sos_hdr->header.header_version_minor == 1) {
3295 sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
3296 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
3297 adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3298 le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
3299 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
3300 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3301 le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
3302 }
3303 if (sos_hdr->header.header_version_minor == 2) {
3304 sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
3305 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
3306 adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
3307 le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
3308 }
3309 if (sos_hdr->header.header_version_minor == 3) {
3310 sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
3311 adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
3312 adev->psp.toc.start_addr = ucode_array_start_addr +
3313 le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
3314 adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
3315 adev->psp.kdb.start_addr = ucode_array_start_addr +
3316 le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
3317 adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
3318 adev->psp.spl.start_addr = ucode_array_start_addr +
3319 le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
3320 adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
3321 adev->psp.rl.start_addr = ucode_array_start_addr +
3322 le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
3323 }
3324 break;
3325 case 2:
3326 sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
3327
3328 if (le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3329 dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
3330 err = -EINVAL;
3331 goto out;
3332 }
3333
3334 for (fw_index = 0; fw_index < le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count); fw_index++) {
3335 err = parse_sos_bin_descriptor(psp,
3336 desc: &sos_hdr_v2_0->psp_fw_bin[fw_index],
3337 sos_hdr: sos_hdr_v2_0);
3338 if (err)
3339 goto out;
3340 }
3341 break;
3342 default:
3343 dev_err(adev->dev,
3344 "unsupported psp sos firmware\n");
3345 err = -EINVAL;
3346 goto out;
3347 }
3348
3349 return 0;
3350out:
3351 amdgpu_ucode_release(fw: &adev->psp.sos_fw);
3352
3353 return err;
3354}
3355
3356static int parse_ta_bin_descriptor(struct psp_context *psp,
3357 const struct psp_fw_bin_desc *desc,
3358 const struct ta_firmware_header_v2_0 *ta_hdr)
3359{
3360 uint8_t *ucode_start_addr = NULL;
3361
3362 if (!psp || !desc || !ta_hdr)
3363 return -EINVAL;
3364
3365 ucode_start_addr = (uint8_t *)ta_hdr +
3366 le32_to_cpu(desc->offset_bytes) +
3367 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3368
3369 switch (desc->fw_type) {
3370 case TA_FW_TYPE_PSP_ASD:
3371 psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3372 psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
3373 psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3374 psp->asd_context.bin_desc.start_addr = ucode_start_addr;
3375 break;
3376 case TA_FW_TYPE_PSP_XGMI:
3377 psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3378 psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3379 psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
3380 break;
3381 case TA_FW_TYPE_PSP_RAS:
3382 psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3383 psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3384 psp->ras_context.context.bin_desc.start_addr = ucode_start_addr;
3385 break;
3386 case TA_FW_TYPE_PSP_HDCP:
3387 psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3388 psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3389 psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr;
3390 break;
3391 case TA_FW_TYPE_PSP_DTM:
3392 psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3393 psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3394 psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr;
3395 break;
3396 case TA_FW_TYPE_PSP_RAP:
3397 psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
3398 psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
3399 psp->rap_context.context.bin_desc.start_addr = ucode_start_addr;
3400 break;
3401 case TA_FW_TYPE_PSP_SECUREDISPLAY:
3402 psp->securedisplay_context.context.bin_desc.fw_version =
3403 le32_to_cpu(desc->fw_version);
3404 psp->securedisplay_context.context.bin_desc.size_bytes =
3405 le32_to_cpu(desc->size_bytes);
3406 psp->securedisplay_context.context.bin_desc.start_addr =
3407 ucode_start_addr;
3408 break;
3409 default:
3410 dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
3411 break;
3412 }
3413
3414 return 0;
3415}
3416
3417static int parse_ta_v1_microcode(struct psp_context *psp)
3418{
3419 const struct ta_firmware_header_v1_0 *ta_hdr;
3420 struct amdgpu_device *adev = psp->adev;
3421
3422 ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
3423
3424 if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
3425 return -EINVAL;
3426
3427 adev->psp.xgmi_context.context.bin_desc.fw_version =
3428 le32_to_cpu(ta_hdr->xgmi.fw_version);
3429 adev->psp.xgmi_context.context.bin_desc.size_bytes =
3430 le32_to_cpu(ta_hdr->xgmi.size_bytes);
3431 adev->psp.xgmi_context.context.bin_desc.start_addr =
3432 (uint8_t *)ta_hdr +
3433 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3434
3435 adev->psp.ras_context.context.bin_desc.fw_version =
3436 le32_to_cpu(ta_hdr->ras.fw_version);
3437 adev->psp.ras_context.context.bin_desc.size_bytes =
3438 le32_to_cpu(ta_hdr->ras.size_bytes);
3439 adev->psp.ras_context.context.bin_desc.start_addr =
3440 (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
3441 le32_to_cpu(ta_hdr->ras.offset_bytes);
3442
3443 adev->psp.hdcp_context.context.bin_desc.fw_version =
3444 le32_to_cpu(ta_hdr->hdcp.fw_version);
3445 adev->psp.hdcp_context.context.bin_desc.size_bytes =
3446 le32_to_cpu(ta_hdr->hdcp.size_bytes);
3447 adev->psp.hdcp_context.context.bin_desc.start_addr =
3448 (uint8_t *)ta_hdr +
3449 le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
3450
3451 adev->psp.dtm_context.context.bin_desc.fw_version =
3452 le32_to_cpu(ta_hdr->dtm.fw_version);
3453 adev->psp.dtm_context.context.bin_desc.size_bytes =
3454 le32_to_cpu(ta_hdr->dtm.size_bytes);
3455 adev->psp.dtm_context.context.bin_desc.start_addr =
3456 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3457 le32_to_cpu(ta_hdr->dtm.offset_bytes);
3458
3459 adev->psp.securedisplay_context.context.bin_desc.fw_version =
3460 le32_to_cpu(ta_hdr->securedisplay.fw_version);
3461 adev->psp.securedisplay_context.context.bin_desc.size_bytes =
3462 le32_to_cpu(ta_hdr->securedisplay.size_bytes);
3463 adev->psp.securedisplay_context.context.bin_desc.start_addr =
3464 (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
3465 le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
3466
3467 adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
3468
3469 return 0;
3470}
3471
3472static int parse_ta_v2_microcode(struct psp_context *psp)
3473{
3474 const struct ta_firmware_header_v2_0 *ta_hdr;
3475 struct amdgpu_device *adev = psp->adev;
3476 int err = 0;
3477 int ta_index = 0;
3478
3479 ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
3480
3481 if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
3482 return -EINVAL;
3483
3484 if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
3485 dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
3486 return -EINVAL;
3487 }
3488
3489 for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
3490 err = parse_ta_bin_descriptor(psp,
3491 desc: &ta_hdr->ta_fw_bin[ta_index],
3492 ta_hdr);
3493 if (err)
3494 return err;
3495 }
3496
3497 return 0;
3498}
3499
3500int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
3501{
3502 const struct common_firmware_header *hdr;
3503 struct amdgpu_device *adev = psp->adev;
3504 char fw_name[PSP_FW_NAME_LEN];
3505 int err;
3506
3507 snprintf(buf: fw_name, size: sizeof(fw_name), fmt: "amdgpu/%s_ta.bin", chip_name);
3508 err = amdgpu_ucode_request(adev, fw: &adev->psp.ta_fw, fw_name);
3509 if (err)
3510 return err;
3511
3512 hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
3513 switch (le16_to_cpu(hdr->header_version_major)) {
3514 case 1:
3515 err = parse_ta_v1_microcode(psp);
3516 break;
3517 case 2:
3518 err = parse_ta_v2_microcode(psp);
3519 break;
3520 default:
3521 dev_err(adev->dev, "unsupported TA header version\n");
3522 err = -EINVAL;
3523 }
3524
3525 if (err)
3526 amdgpu_ucode_release(fw: &adev->psp.ta_fw);
3527
3528 return err;
3529}
3530
3531int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
3532{
3533 struct amdgpu_device *adev = psp->adev;
3534 char fw_name[PSP_FW_NAME_LEN];
3535 const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
3536 struct amdgpu_firmware_info *info = NULL;
3537 int err = 0;
3538
3539 if (!amdgpu_sriov_vf(adev)) {
3540 dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
3541 return -EINVAL;
3542 }
3543
3544 snprintf(buf: fw_name, size: sizeof(fw_name), fmt: "amdgpu/%s_cap.bin", chip_name);
3545 err = amdgpu_ucode_request(adev, fw: &adev->psp.cap_fw, fw_name);
3546 if (err) {
3547 if (err == -ENODEV) {
3548 dev_warn(adev->dev, "cap microcode does not exist, skip\n");
3549 err = 0;
3550 goto out;
3551 }
3552 dev_err(adev->dev, "fail to initialize cap microcode\n");
3553 }
3554
3555 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
3556 info->ucode_id = AMDGPU_UCODE_ID_CAP;
3557 info->fw = adev->psp.cap_fw;
3558 cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
3559 adev->psp.cap_fw->data;
3560 adev->firmware.fw_size += ALIGN(
3561 le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
3562 adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
3563 adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
3564 adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
3565
3566 return 0;
3567
3568out:
3569 amdgpu_ucode_release(fw: &adev->psp.cap_fw);
3570 return err;
3571}
3572
3573static int psp_set_clockgating_state(void *handle,
3574 enum amd_clockgating_state state)
3575{
3576 return 0;
3577}
3578
3579static int psp_set_powergating_state(void *handle,
3580 enum amd_powergating_state state)
3581{
3582 return 0;
3583}
3584
3585static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
3586 struct device_attribute *attr,
3587 char *buf)
3588{
3589 struct drm_device *ddev = dev_get_drvdata(dev);
3590 struct amdgpu_device *adev = drm_to_adev(ddev);
3591 uint32_t fw_ver;
3592 int ret;
3593
3594 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3595 DRM_INFO("PSP block is not ready yet.");
3596 return -EBUSY;
3597 }
3598
3599 mutex_lock(&adev->psp.mutex);
3600 ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
3601 mutex_unlock(lock: &adev->psp.mutex);
3602
3603 if (ret) {
3604 DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
3605 return ret;
3606 }
3607
3608 return sysfs_emit(buf, fmt: "%x\n", fw_ver);
3609}
3610
3611static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
3612 struct device_attribute *attr,
3613 const char *buf,
3614 size_t count)
3615{
3616 struct drm_device *ddev = dev_get_drvdata(dev);
3617 struct amdgpu_device *adev = drm_to_adev(ddev);
3618 int ret, idx;
3619 char fw_name[100];
3620 const struct firmware *usbc_pd_fw;
3621 struct amdgpu_bo *fw_buf_bo = NULL;
3622 uint64_t fw_pri_mc_addr;
3623 void *fw_pri_cpu_addr;
3624
3625 if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
3626 DRM_INFO("PSP block is not ready yet.");
3627 return -EBUSY;
3628 }
3629
3630 if (!drm_dev_enter(dev: ddev, idx: &idx))
3631 return -ENODEV;
3632
3633 snprintf(buf: fw_name, size: sizeof(fw_name), fmt: "amdgpu/%s", buf);
3634 ret = request_firmware(fw: &usbc_pd_fw, name: fw_name, device: adev->dev);
3635 if (ret)
3636 goto fail;
3637
3638 /* LFB address which is aligned to 1MB boundary per PSP request */
3639 ret = amdgpu_bo_create_kernel(adev, size: usbc_pd_fw->size, align: 0x100000,
3640 AMDGPU_GEM_DOMAIN_VRAM |
3641 AMDGPU_GEM_DOMAIN_GTT,
3642 bo_ptr: &fw_buf_bo, gpu_addr: &fw_pri_mc_addr,
3643 cpu_addr: &fw_pri_cpu_addr);
3644 if (ret)
3645 goto rel_buf;
3646
3647 memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
3648
3649 mutex_lock(&adev->psp.mutex);
3650 ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
3651 mutex_unlock(lock: &adev->psp.mutex);
3652
3653 amdgpu_bo_free_kernel(bo: &fw_buf_bo, gpu_addr: &fw_pri_mc_addr, cpu_addr: &fw_pri_cpu_addr);
3654
3655rel_buf:
3656 release_firmware(fw: usbc_pd_fw);
3657fail:
3658 if (ret) {
3659 DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
3660 count = ret;
3661 }
3662
3663 drm_dev_exit(idx);
3664 return count;
3665}
3666
3667void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
3668{
3669 int idx;
3670
3671 if (!drm_dev_enter(dev: adev_to_drm(adev: psp->adev), idx: &idx))
3672 return;
3673
3674 memset(psp->fw_pri_buf, 0, PSP_1_MEG);
3675 memcpy(psp->fw_pri_buf, start_addr, bin_size);
3676
3677 drm_dev_exit(idx);
3678}
3679
3680/**
3681 * DOC: usbc_pd_fw
3682 * Reading from this file will retrieve the USB-C PD firmware version. Writing to
3683 * this file will trigger the update process.
3684 */
3685static DEVICE_ATTR(usbc_pd_fw, 0644,
3686 psp_usbc_pd_fw_sysfs_read,
3687 psp_usbc_pd_fw_sysfs_write);
3688
3689int is_psp_fw_valid(struct psp_bin_desc bin)
3690{
3691 return bin.size_bytes;
3692}
3693
3694static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
3695 struct bin_attribute *bin_attr,
3696 char *buffer, loff_t pos, size_t count)
3697{
3698 struct device *dev = kobj_to_dev(kobj);
3699 struct drm_device *ddev = dev_get_drvdata(dev);
3700 struct amdgpu_device *adev = drm_to_adev(ddev);
3701
3702 adev->psp.vbflash_done = false;
3703
3704 /* Safeguard against memory drain */
3705 if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
3706 dev_err(adev->dev, "File size cannot exceed %u", AMD_VBIOS_FILE_MAX_SIZE_B);
3707 kvfree(addr: adev->psp.vbflash_tmp_buf);
3708 adev->psp.vbflash_tmp_buf = NULL;
3709 adev->psp.vbflash_image_size = 0;
3710 return -ENOMEM;
3711 }
3712
3713 /* TODO Just allocate max for now and optimize to realloc later if needed */
3714 if (!adev->psp.vbflash_tmp_buf) {
3715 adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
3716 if (!adev->psp.vbflash_tmp_buf)
3717 return -ENOMEM;
3718 }
3719
3720 mutex_lock(&adev->psp.mutex);
3721 memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
3722 adev->psp.vbflash_image_size += count;
3723 mutex_unlock(lock: &adev->psp.mutex);
3724
3725 dev_dbg(adev->dev, "IFWI staged for update");
3726
3727 return count;
3728}
3729
3730static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
3731 struct bin_attribute *bin_attr, char *buffer,
3732 loff_t pos, size_t count)
3733{
3734 struct device *dev = kobj_to_dev(kobj);
3735 struct drm_device *ddev = dev_get_drvdata(dev);
3736 struct amdgpu_device *adev = drm_to_adev(ddev);
3737 struct amdgpu_bo *fw_buf_bo = NULL;
3738 uint64_t fw_pri_mc_addr;
3739 void *fw_pri_cpu_addr;
3740 int ret;
3741
3742 if (adev->psp.vbflash_image_size == 0)
3743 return -EINVAL;
3744
3745 dev_dbg(adev->dev, "PSP IFWI flash process initiated");
3746
3747 ret = amdgpu_bo_create_kernel(adev, size: adev->psp.vbflash_image_size,
3748 AMDGPU_GPU_PAGE_SIZE,
3749 AMDGPU_GEM_DOMAIN_VRAM,
3750 bo_ptr: &fw_buf_bo,
3751 gpu_addr: &fw_pri_mc_addr,
3752 cpu_addr: &fw_pri_cpu_addr);
3753 if (ret)
3754 goto rel_buf;
3755
3756 memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
3757
3758 mutex_lock(&adev->psp.mutex);
3759 ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
3760 mutex_unlock(lock: &adev->psp.mutex);
3761
3762 amdgpu_bo_free_kernel(bo: &fw_buf_bo, gpu_addr: &fw_pri_mc_addr, cpu_addr: &fw_pri_cpu_addr);
3763
3764rel_buf:
3765 kvfree(addr: adev->psp.vbflash_tmp_buf);
3766 adev->psp.vbflash_tmp_buf = NULL;
3767 adev->psp.vbflash_image_size = 0;
3768
3769 if (ret) {
3770 dev_err(adev->dev, "Failed to load IFWI, err = %d", ret);
3771 return ret;
3772 }
3773
3774 dev_dbg(adev->dev, "PSP IFWI flash process done");
3775 return 0;
3776}
3777
3778/**
3779 * DOC: psp_vbflash
3780 * Writing to this file will stage an IFWI for update. Reading from this file
3781 * will trigger the update process.
3782 */
3783static struct bin_attribute psp_vbflash_bin_attr = {
3784 .attr = {.name = "psp_vbflash", .mode = 0660},
3785 .size = 0,
3786 .write = amdgpu_psp_vbflash_write,
3787 .read = amdgpu_psp_vbflash_read,
3788};
3789
3790/**
3791 * DOC: psp_vbflash_status
3792 * The status of the flash process.
3793 * 0: IFWI flash not complete.
3794 * 1: IFWI flash complete.
3795 */
3796static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
3797 struct device_attribute *attr,
3798 char *buf)
3799{
3800 struct drm_device *ddev = dev_get_drvdata(dev);
3801 struct amdgpu_device *adev = drm_to_adev(ddev);
3802 uint32_t vbflash_status;
3803
3804 vbflash_status = psp_vbflash_status(&adev->psp);
3805 if (!adev->psp.vbflash_done)
3806 vbflash_status = 0;
3807 else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
3808 vbflash_status = 1;
3809
3810 return sysfs_emit(buf, fmt: "0x%x\n", vbflash_status);
3811}
3812static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
3813
3814static struct bin_attribute *bin_flash_attrs[] = {
3815 &psp_vbflash_bin_attr,
3816 NULL
3817};
3818
3819static struct attribute *flash_attrs[] = {
3820 &dev_attr_psp_vbflash_status.attr,
3821 &dev_attr_usbc_pd_fw.attr,
3822 NULL
3823};
3824
3825static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
3826{
3827 struct device *dev = kobj_to_dev(kobj);
3828 struct drm_device *ddev = dev_get_drvdata(dev);
3829 struct amdgpu_device *adev = drm_to_adev(ddev);
3830
3831 if (attr == &dev_attr_usbc_pd_fw.attr)
3832 return adev->psp.sup_pd_fw_up ? 0660 : 0;
3833
3834 return adev->psp.sup_ifwi_up ? 0440 : 0;
3835}
3836
3837static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
3838 struct bin_attribute *attr,
3839 int idx)
3840{
3841 struct device *dev = kobj_to_dev(kobj);
3842 struct drm_device *ddev = dev_get_drvdata(dev);
3843 struct amdgpu_device *adev = drm_to_adev(ddev);
3844
3845 return adev->psp.sup_ifwi_up ? 0660 : 0;
3846}
3847
3848const struct attribute_group amdgpu_flash_attr_group = {
3849 .attrs = flash_attrs,
3850 .bin_attrs = bin_flash_attrs,
3851 .is_bin_visible = amdgpu_bin_flash_attr_is_visible,
3852 .is_visible = amdgpu_flash_attr_is_visible,
3853};
3854
3855const struct amd_ip_funcs psp_ip_funcs = {
3856 .name = "psp",
3857 .early_init = psp_early_init,
3858 .late_init = NULL,
3859 .sw_init = psp_sw_init,
3860 .sw_fini = psp_sw_fini,
3861 .hw_init = psp_hw_init,
3862 .hw_fini = psp_hw_fini,
3863 .suspend = psp_suspend,
3864 .resume = psp_resume,
3865 .is_idle = NULL,
3866 .check_soft_reset = NULL,
3867 .wait_for_idle = NULL,
3868 .soft_reset = NULL,
3869 .set_clockgating_state = psp_set_clockgating_state,
3870 .set_powergating_state = psp_set_powergating_state,
3871};
3872
3873const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
3874 .type = AMD_IP_BLOCK_TYPE_PSP,
3875 .major = 3,
3876 .minor = 1,
3877 .rev = 0,
3878 .funcs = &psp_ip_funcs,
3879};
3880
3881const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
3882 .type = AMD_IP_BLOCK_TYPE_PSP,
3883 .major = 10,
3884 .minor = 0,
3885 .rev = 0,
3886 .funcs = &psp_ip_funcs,
3887};
3888
3889const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
3890 .type = AMD_IP_BLOCK_TYPE_PSP,
3891 .major = 11,
3892 .minor = 0,
3893 .rev = 0,
3894 .funcs = &psp_ip_funcs,
3895};
3896
3897const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
3898 .type = AMD_IP_BLOCK_TYPE_PSP,
3899 .major = 11,
3900 .minor = 0,
3901 .rev = 8,
3902 .funcs = &psp_ip_funcs,
3903};
3904
3905const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
3906 .type = AMD_IP_BLOCK_TYPE_PSP,
3907 .major = 12,
3908 .minor = 0,
3909 .rev = 0,
3910 .funcs = &psp_ip_funcs,
3911};
3912
3913const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
3914 .type = AMD_IP_BLOCK_TYPE_PSP,
3915 .major = 13,
3916 .minor = 0,
3917 .rev = 0,
3918 .funcs = &psp_ip_funcs,
3919};
3920
3921const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
3922 .type = AMD_IP_BLOCK_TYPE_PSP,
3923 .major = 13,
3924 .minor = 0,
3925 .rev = 4,
3926 .funcs = &psp_ip_funcs,
3927};
3928

source code of linux/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c