1 | /* |
2 | * Copyright 2018 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #include "smumgr.h" |
25 | #include "vega20_inc.h" |
26 | #include "soc15_common.h" |
27 | #include "vega20_smumgr.h" |
28 | #include "vega20_ppsmc.h" |
29 | #include "smu11_driver_if.h" |
30 | #include "ppatomctrl.h" |
31 | #include "pp_debug.h" |
32 | #include "smu_ucode_xfer_vi.h" |
33 | #include "smu7_smumgr.h" |
34 | #include "vega20_hwmgr.h" |
35 | |
36 | /* MP Apertures */ |
37 | #define MP0_Public 0x03800000 |
38 | #define MP0_SRAM 0x03900000 |
39 | #define MP1_Public 0x03b00000 |
40 | #define MP1_SRAM 0x03c00004 |
41 | |
42 | /* address block */ |
43 | #define smnMP1_FIRMWARE_FLAGS 0x3010024 |
44 | #define smnMP0_FW_INTF 0x30101c0 |
45 | #define smnMP1_PUB_CTRL 0x3010b14 |
46 | |
47 | static bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr) |
48 | { |
49 | struct amdgpu_device *adev = hwmgr->adev; |
50 | uint32_t mp1_fw_flags; |
51 | |
52 | mp1_fw_flags = RREG32_PCIE(MP1_Public | |
53 | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); |
54 | |
55 | if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> |
56 | MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) |
57 | return true; |
58 | |
59 | return false; |
60 | } |
61 | |
62 | /* |
63 | * Check if SMC has responded to previous message. |
64 | * |
65 | * @param smumgr the address of the powerplay hardware manager. |
66 | * @return TRUE SMC has responded, FALSE otherwise. |
67 | */ |
68 | static uint32_t vega20_wait_for_response(struct pp_hwmgr *hwmgr) |
69 | { |
70 | struct amdgpu_device *adev = hwmgr->adev; |
71 | uint32_t reg; |
72 | |
73 | reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); |
74 | |
75 | phm_wait_for_register_unequal(hwmgr, reg, |
76 | 0, MP1_C2PMSG_90__CONTENT_MASK); |
77 | |
78 | return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); |
79 | } |
80 | |
81 | /* |
82 | * Send a message to the SMC, and do not wait for its response. |
83 | * @param smumgr the address of the powerplay hardware manager. |
84 | * @param msg the message to send. |
85 | * @return Always return 0. |
86 | */ |
87 | static int vega20_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, |
88 | uint16_t msg) |
89 | { |
90 | struct amdgpu_device *adev = hwmgr->adev; |
91 | |
92 | WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); |
93 | |
94 | return 0; |
95 | } |
96 | |
97 | /* |
98 | * Send a message to the SMC, and wait for its response. |
99 | * @param hwmgr the address of the powerplay hardware manager. |
100 | * @param msg the message to send. |
101 | * @return Always return 0. |
102 | */ |
103 | static int vega20_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) |
104 | { |
105 | struct amdgpu_device *adev = hwmgr->adev; |
106 | int ret = 0; |
107 | |
108 | vega20_wait_for_response(hwmgr); |
109 | |
110 | WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); |
111 | |
112 | vega20_send_msg_to_smc_without_waiting(hwmgr, msg); |
113 | |
114 | ret = vega20_wait_for_response(hwmgr); |
115 | if (ret != PPSMC_Result_OK) |
116 | pr_err("Failed to send message 0x%x, response 0x%x\n" , msg, ret); |
117 | |
118 | return (ret == PPSMC_Result_OK) ? 0 : -EIO; |
119 | } |
120 | |
121 | /* |
122 | * Send a message to the SMC with parameter |
123 | * @param hwmgr: the address of the powerplay hardware manager. |
124 | * @param msg: the message to send. |
125 | * @param parameter: the parameter to send |
126 | * @return Always return 0. |
127 | */ |
128 | static int vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, |
129 | uint16_t msg, uint32_t parameter) |
130 | { |
131 | struct amdgpu_device *adev = hwmgr->adev; |
132 | int ret = 0; |
133 | |
134 | vega20_wait_for_response(hwmgr); |
135 | |
136 | WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); |
137 | |
138 | WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); |
139 | |
140 | vega20_send_msg_to_smc_without_waiting(hwmgr, msg); |
141 | |
142 | ret = vega20_wait_for_response(hwmgr); |
143 | if (ret != PPSMC_Result_OK) |
144 | pr_err("Failed to send message 0x%x, response 0x%x\n" , msg, ret); |
145 | |
146 | return (ret == PPSMC_Result_OK) ? 0 : -EIO; |
147 | } |
148 | |
149 | static uint32_t vega20_get_argument(struct pp_hwmgr *hwmgr) |
150 | { |
151 | struct amdgpu_device *adev = hwmgr->adev; |
152 | |
153 | return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); |
154 | } |
155 | |
156 | /* |
157 | * Copy table from SMC into driver FB |
158 | * @param hwmgr the address of the HW manager |
159 | * @param table_id the driver's table ID to copy from |
160 | */ |
161 | static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, |
162 | uint8_t *table, int16_t table_id) |
163 | { |
164 | struct vega20_smumgr *priv = |
165 | (struct vega20_smumgr *)(hwmgr->smu_backend); |
166 | int ret = 0; |
167 | |
168 | PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT, |
169 | "Invalid SMU Table ID!" , return -EINVAL); |
170 | PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, |
171 | "Invalid SMU Table version!" , return -EINVAL); |
172 | PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, |
173 | "Invalid SMU Table Length!" , return -EINVAL); |
174 | |
175 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
176 | PPSMC_MSG_SetDriverDramAddrHigh, |
177 | upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, |
178 | "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!" , |
179 | return ret); |
180 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
181 | PPSMC_MSG_SetDriverDramAddrLow, |
182 | lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, |
183 | "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!" , |
184 | return ret); |
185 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
186 | PPSMC_MSG_TransferTableSmu2Dram, table_id)) == 0, |
187 | "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!" , |
188 | return ret); |
189 | |
190 | memcpy(table, priv->smu_tables.entry[table_id].table, |
191 | priv->smu_tables.entry[table_id].size); |
192 | |
193 | return 0; |
194 | } |
195 | |
196 | /* |
197 | * Copy table from Driver FB into SMC |
198 | * @param hwmgr the address of the HW manager |
199 | * @param table_id the table to copy from |
200 | */ |
201 | static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr, |
202 | uint8_t *table, int16_t table_id) |
203 | { |
204 | struct vega20_smumgr *priv = |
205 | (struct vega20_smumgr *)(hwmgr->smu_backend); |
206 | int ret = 0; |
207 | |
208 | PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT, |
209 | "Invalid SMU Table ID!" , return -EINVAL); |
210 | PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, |
211 | "Invalid SMU Table version!" , return -EINVAL); |
212 | PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, |
213 | "Invalid SMU Table Length!" , return -EINVAL); |
214 | |
215 | memcpy(priv->smu_tables.entry[table_id].table, table, |
216 | priv->smu_tables.entry[table_id].size); |
217 | |
218 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
219 | PPSMC_MSG_SetDriverDramAddrHigh, |
220 | upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, |
221 | "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!" , |
222 | return ret); |
223 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
224 | PPSMC_MSG_SetDriverDramAddrLow, |
225 | lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0, |
226 | "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!" , |
227 | return ret); |
228 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
229 | PPSMC_MSG_TransferTableDram2Smu, table_id)) == 0, |
230 | "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!" , |
231 | return ret); |
232 | |
233 | return 0; |
234 | } |
235 | |
236 | int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr, |
237 | uint8_t *table, uint16_t workload_type) |
238 | { |
239 | struct vega20_smumgr *priv = |
240 | (struct vega20_smumgr *)(hwmgr->smu_backend); |
241 | int ret = 0; |
242 | |
243 | memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table, |
244 | priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size); |
245 | |
246 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
247 | PPSMC_MSG_SetDriverDramAddrHigh, |
248 | upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, |
249 | "[SetActivityMonitor] Attempt to Set Dram Addr High Failed!" , |
250 | return ret); |
251 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
252 | PPSMC_MSG_SetDriverDramAddrLow, |
253 | lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, |
254 | "[SetActivityMonitor] Attempt to Set Dram Addr Low Failed!" , |
255 | return ret); |
256 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
257 | PPSMC_MSG_TransferTableDram2Smu, TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0, |
258 | "[SetActivityMonitor] Attempt to Transfer Table To SMU Failed!" , |
259 | return ret); |
260 | |
261 | return 0; |
262 | } |
263 | |
264 | int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr, |
265 | uint8_t *table, uint16_t workload_type) |
266 | { |
267 | struct vega20_smumgr *priv = |
268 | (struct vega20_smumgr *)(hwmgr->smu_backend); |
269 | int ret = 0; |
270 | |
271 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
272 | PPSMC_MSG_SetDriverDramAddrHigh, |
273 | upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, |
274 | "[GetActivityMonitor] Attempt to Set Dram Addr High Failed!" , |
275 | return ret); |
276 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
277 | PPSMC_MSG_SetDriverDramAddrLow, |
278 | lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0, |
279 | "[GetActivityMonitor] Attempt to Set Dram Addr Low Failed!" , |
280 | return ret); |
281 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
282 | PPSMC_MSG_TransferTableSmu2Dram, |
283 | TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0, |
284 | "[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!" , |
285 | return ret); |
286 | |
287 | memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, |
288 | priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size); |
289 | |
290 | return 0; |
291 | } |
292 | |
293 | int vega20_enable_smc_features(struct pp_hwmgr *hwmgr, |
294 | bool enable, uint64_t feature_mask) |
295 | { |
296 | uint32_t smu_features_low, smu_features_high; |
297 | int ret = 0; |
298 | |
299 | smu_features_low = (uint32_t)((feature_mask & SMU_FEATURES_LOW_MASK) >> SMU_FEATURES_LOW_SHIFT); |
300 | smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT); |
301 | |
302 | if (enable) { |
303 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
304 | PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0, |
305 | "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!" , |
306 | return ret); |
307 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
308 | PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0, |
309 | "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!" , |
310 | return ret); |
311 | } else { |
312 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
313 | PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0, |
314 | "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!" , |
315 | return ret); |
316 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
317 | PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0, |
318 | "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!" , |
319 | return ret); |
320 | } |
321 | |
322 | return 0; |
323 | } |
324 | |
325 | int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr, |
326 | uint64_t *features_enabled) |
327 | { |
328 | uint32_t smc_features_low, smc_features_high; |
329 | int ret = 0; |
330 | |
331 | if (features_enabled == NULL) |
332 | return -EINVAL; |
333 | |
334 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr, |
335 | PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0, |
336 | "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!" , |
337 | return ret); |
338 | smc_features_low = vega20_get_argument(hwmgr); |
339 | PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr, |
340 | PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0, |
341 | "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!" , |
342 | return ret); |
343 | smc_features_high = vega20_get_argument(hwmgr); |
344 | |
345 | *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) | |
346 | (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK)); |
347 | |
348 | return 0; |
349 | } |
350 | |
351 | static int vega20_set_tools_address(struct pp_hwmgr *hwmgr) |
352 | { |
353 | struct vega20_smumgr *priv = |
354 | (struct vega20_smumgr *)(hwmgr->smu_backend); |
355 | int ret = 0; |
356 | |
357 | if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) { |
358 | ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
359 | PPSMC_MSG_SetToolsDramAddrHigh, |
360 | upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)); |
361 | if (!ret) |
362 | ret = vega20_send_msg_to_smc_with_parameter(hwmgr, |
363 | PPSMC_MSG_SetToolsDramAddrLow, |
364 | lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)); |
365 | } |
366 | |
367 | return ret; |
368 | } |
369 | |
370 | static int vega20_smu_init(struct pp_hwmgr *hwmgr) |
371 | { |
372 | struct vega20_smumgr *priv; |
373 | unsigned long tools_size = 0x19000; |
374 | int ret = 0; |
375 | |
376 | struct cgs_firmware_info info = {0}; |
377 | |
378 | ret = cgs_get_firmware_info(hwmgr->device, |
379 | smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), |
380 | &info); |
381 | if (ret || !info.kptr) |
382 | return -EINVAL; |
383 | |
384 | priv = kzalloc(sizeof(struct vega20_smumgr), GFP_KERNEL); |
385 | if (!priv) |
386 | return -ENOMEM; |
387 | |
388 | hwmgr->smu_backend = priv; |
389 | |
390 | /* allocate space for pptable */ |
391 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
392 | sizeof(PPTable_t), |
393 | PAGE_SIZE, |
394 | AMDGPU_GEM_DOMAIN_VRAM, |
395 | &priv->smu_tables.entry[TABLE_PPTABLE].handle, |
396 | &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr, |
397 | &priv->smu_tables.entry[TABLE_PPTABLE].table); |
398 | if (ret) |
399 | goto free_backend; |
400 | |
401 | priv->smu_tables.entry[TABLE_PPTABLE].version = 0x01; |
402 | priv->smu_tables.entry[TABLE_PPTABLE].size = sizeof(PPTable_t); |
403 | |
404 | /* allocate space for watermarks table */ |
405 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
406 | sizeof(Watermarks_t), |
407 | PAGE_SIZE, |
408 | AMDGPU_GEM_DOMAIN_VRAM, |
409 | &priv->smu_tables.entry[TABLE_WATERMARKS].handle, |
410 | &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr, |
411 | &priv->smu_tables.entry[TABLE_WATERMARKS].table); |
412 | if (ret) |
413 | goto err0; |
414 | |
415 | priv->smu_tables.entry[TABLE_WATERMARKS].version = 0x01; |
416 | priv->smu_tables.entry[TABLE_WATERMARKS].size = sizeof(Watermarks_t); |
417 | |
418 | /* allocate space for pmstatuslog table */ |
419 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
420 | tools_size, |
421 | PAGE_SIZE, |
422 | AMDGPU_GEM_DOMAIN_VRAM, |
423 | &priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle, |
424 | &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr, |
425 | &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table); |
426 | if (ret) |
427 | goto err1; |
428 | |
429 | priv->smu_tables.entry[TABLE_PMSTATUSLOG].version = 0x01; |
430 | priv->smu_tables.entry[TABLE_PMSTATUSLOG].size = tools_size; |
431 | |
432 | /* allocate space for OverDrive table */ |
433 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
434 | sizeof(OverDriveTable_t), |
435 | PAGE_SIZE, |
436 | AMDGPU_GEM_DOMAIN_VRAM, |
437 | &priv->smu_tables.entry[TABLE_OVERDRIVE].handle, |
438 | &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr, |
439 | &priv->smu_tables.entry[TABLE_OVERDRIVE].table); |
440 | if (ret) |
441 | goto err2; |
442 | |
443 | priv->smu_tables.entry[TABLE_OVERDRIVE].version = 0x01; |
444 | priv->smu_tables.entry[TABLE_OVERDRIVE].size = sizeof(OverDriveTable_t); |
445 | |
446 | /* allocate space for SmuMetrics table */ |
447 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
448 | sizeof(SmuMetrics_t), |
449 | PAGE_SIZE, |
450 | AMDGPU_GEM_DOMAIN_VRAM, |
451 | &priv->smu_tables.entry[TABLE_SMU_METRICS].handle, |
452 | &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr, |
453 | &priv->smu_tables.entry[TABLE_SMU_METRICS].table); |
454 | if (ret) |
455 | goto err3; |
456 | |
457 | priv->smu_tables.entry[TABLE_SMU_METRICS].version = 0x01; |
458 | priv->smu_tables.entry[TABLE_SMU_METRICS].size = sizeof(SmuMetrics_t); |
459 | |
460 | /* allocate space for ActivityMonitor table */ |
461 | ret = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev, |
462 | sizeof(DpmActivityMonitorCoeffInt_t), |
463 | PAGE_SIZE, |
464 | AMDGPU_GEM_DOMAIN_VRAM, |
465 | &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle, |
466 | &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr, |
467 | &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table); |
468 | if (ret) |
469 | goto err4; |
470 | |
471 | priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01; |
472 | priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t); |
473 | |
474 | return 0; |
475 | |
476 | err4: |
477 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle, |
478 | &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr, |
479 | &priv->smu_tables.entry[TABLE_SMU_METRICS].table); |
480 | err3: |
481 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle, |
482 | &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr, |
483 | &priv->smu_tables.entry[TABLE_OVERDRIVE].table); |
484 | err2: |
485 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle, |
486 | &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr, |
487 | &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table); |
488 | err1: |
489 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle, |
490 | &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr, |
491 | &priv->smu_tables.entry[TABLE_WATERMARKS].table); |
492 | err0: |
493 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle, |
494 | &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr, |
495 | &priv->smu_tables.entry[TABLE_PPTABLE].table); |
496 | free_backend: |
497 | kfree(hwmgr->smu_backend); |
498 | |
499 | return -EINVAL; |
500 | } |
501 | |
502 | static int vega20_smu_fini(struct pp_hwmgr *hwmgr) |
503 | { |
504 | struct vega20_smumgr *priv = |
505 | (struct vega20_smumgr *)(hwmgr->smu_backend); |
506 | |
507 | if (priv) { |
508 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle, |
509 | &priv->smu_tables.entry[TABLE_PPTABLE].mc_addr, |
510 | &priv->smu_tables.entry[TABLE_PPTABLE].table); |
511 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_WATERMARKS].handle, |
512 | &priv->smu_tables.entry[TABLE_WATERMARKS].mc_addr, |
513 | &priv->smu_tables.entry[TABLE_WATERMARKS].table); |
514 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PMSTATUSLOG].handle, |
515 | &priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr, |
516 | &priv->smu_tables.entry[TABLE_PMSTATUSLOG].table); |
517 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_OVERDRIVE].handle, |
518 | &priv->smu_tables.entry[TABLE_OVERDRIVE].mc_addr, |
519 | &priv->smu_tables.entry[TABLE_OVERDRIVE].table); |
520 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_SMU_METRICS].handle, |
521 | &priv->smu_tables.entry[TABLE_SMU_METRICS].mc_addr, |
522 | &priv->smu_tables.entry[TABLE_SMU_METRICS].table); |
523 | amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].handle, |
524 | &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr, |
525 | &priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table); |
526 | kfree(hwmgr->smu_backend); |
527 | hwmgr->smu_backend = NULL; |
528 | } |
529 | return 0; |
530 | } |
531 | |
532 | static int vega20_start_smu(struct pp_hwmgr *hwmgr) |
533 | { |
534 | int ret; |
535 | |
536 | ret = vega20_is_smc_ram_running(hwmgr); |
537 | PP_ASSERT_WITH_CODE(ret, |
538 | "[Vega20StartSmu] SMC is not running!" , |
539 | return -EINVAL); |
540 | |
541 | ret = vega20_set_tools_address(hwmgr); |
542 | PP_ASSERT_WITH_CODE(!ret, |
543 | "[Vega20StartSmu] Failed to set tools address!" , |
544 | return ret); |
545 | |
546 | return 0; |
547 | } |
548 | |
549 | static bool vega20_is_dpm_running(struct pp_hwmgr *hwmgr) |
550 | { |
551 | uint64_t features_enabled = 0; |
552 | |
553 | vega20_get_enabled_smc_features(hwmgr, &features_enabled); |
554 | |
555 | if (features_enabled & SMC_DPM_FEATURES) |
556 | return true; |
557 | else |
558 | return false; |
559 | } |
560 | |
561 | static int vega20_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, |
562 | uint16_t table_id, bool rw) |
563 | { |
564 | int ret; |
565 | |
566 | if (rw) |
567 | ret = vega20_copy_table_from_smc(hwmgr, table, table_id); |
568 | else |
569 | ret = vega20_copy_table_to_smc(hwmgr, table, table_id); |
570 | |
571 | return ret; |
572 | } |
573 | |
574 | const struct pp_smumgr_func vega20_smu_funcs = { |
575 | .smu_init = &vega20_smu_init, |
576 | .smu_fini = &vega20_smu_fini, |
577 | .start_smu = &vega20_start_smu, |
578 | .request_smu_load_specific_fw = NULL, |
579 | .send_msg_to_smc = &vega20_send_msg_to_smc, |
580 | .send_msg_to_smc_with_parameter = &vega20_send_msg_to_smc_with_parameter, |
581 | .download_pptable_settings = NULL, |
582 | .upload_pptable_settings = NULL, |
583 | .is_dpm_running = vega20_is_dpm_running, |
584 | .get_argument = vega20_get_argument, |
585 | .smc_table_manager = vega20_smc_table_manager, |
586 | }; |
587 | |