1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #include <linux/delay.h> |
25 | #include <linux/fb.h> |
26 | #include <linux/module.h> |
27 | #include <linux/slab.h> |
28 | |
29 | #include "hwmgr.h" |
30 | #include "amd_powerplay.h" |
31 | #include "hardwaremanager.h" |
32 | #include "ppatomfwctrl.h" |
33 | #include "atomfirmware.h" |
34 | #include "cgs_common.h" |
35 | #include "vega10_powertune.h" |
36 | #include "smu9.h" |
37 | #include "smu9_driver_if.h" |
38 | #include "vega10_inc.h" |
39 | #include "soc15_common.h" |
40 | #include "pppcielanes.h" |
41 | #include "vega10_hwmgr.h" |
42 | #include "vega10_smumgr.h" |
43 | #include "vega10_processpptables.h" |
44 | #include "vega10_pptable.h" |
45 | #include "vega10_thermal.h" |
46 | #include "pp_debug.h" |
47 | #include "amd_pcie_helpers.h" |
48 | #include "ppinterrupt.h" |
49 | #include "pp_overdriver.h" |
50 | #include "pp_thermal.h" |
51 | #include "vega10_baco.h" |
52 | |
53 | #include "smuio/smuio_9_0_offset.h" |
54 | #include "smuio/smuio_9_0_sh_mask.h" |
55 | |
56 | #define HBM_MEMORY_CHANNEL_WIDTH 128 |
57 | |
58 | static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; |
59 | |
60 | #define mmDF_CS_AON0_DramBaseAddress0 0x0044 |
61 | #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0 |
62 | |
63 | //DF_CS_AON0_DramBaseAddress0 |
64 | #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0 |
65 | #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1 |
66 | #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4 |
67 | #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8 |
68 | #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc |
69 | #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L |
70 | #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L |
71 | #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L |
72 | #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L |
73 | #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L |
74 | |
75 | typedef enum { |
76 | CLK_SMNCLK = 0, |
77 | CLK_SOCCLK, |
78 | CLK_MP0CLK, |
79 | CLK_MP1CLK, |
80 | CLK_LCLK, |
81 | CLK_DCEFCLK, |
82 | CLK_VCLK, |
83 | CLK_DCLK, |
84 | CLK_ECLK, |
85 | CLK_UCLK, |
86 | CLK_GFXCLK, |
87 | CLK_COUNT, |
88 | } CLOCK_ID_e; |
89 | |
90 | static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic); |
91 | |
92 | struct vega10_power_state *cast_phw_vega10_power_state( |
93 | struct pp_hw_power_state *hw_ps) |
94 | { |
95 | PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic), |
96 | "Invalid Powerstate Type!" , |
97 | return NULL;); |
98 | |
99 | return (struct vega10_power_state *)hw_ps; |
100 | } |
101 | |
102 | const struct vega10_power_state *cast_const_phw_vega10_power_state( |
103 | const struct pp_hw_power_state *hw_ps) |
104 | { |
105 | PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic), |
106 | "Invalid Powerstate Type!" , |
107 | return NULL;); |
108 | |
109 | return (const struct vega10_power_state *)hw_ps; |
110 | } |
111 | |
112 | static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr) |
113 | { |
114 | struct vega10_hwmgr *data = hwmgr->backend; |
115 | |
116 | data->registry_data.sclk_dpm_key_disabled = |
117 | hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; |
118 | data->registry_data.socclk_dpm_key_disabled = |
119 | hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true; |
120 | data->registry_data.mclk_dpm_key_disabled = |
121 | hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; |
122 | data->registry_data.pcie_dpm_key_disabled = |
123 | hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true; |
124 | |
125 | data->registry_data.dcefclk_dpm_key_disabled = |
126 | hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true; |
127 | |
128 | if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) { |
129 | data->registry_data.power_containment_support = 1; |
130 | data->registry_data.enable_pkg_pwr_tracking_feature = 1; |
131 | data->registry_data.enable_tdc_limit_feature = 1; |
132 | } |
133 | |
134 | data->registry_data.clock_stretcher_support = |
135 | hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? true : false; |
136 | |
137 | data->registry_data.ulv_support = |
138 | hwmgr->feature_mask & PP_ULV_MASK ? true : false; |
139 | |
140 | data->registry_data.sclk_deep_sleep_support = |
141 | hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK ? true : false; |
142 | |
143 | data->registry_data.disable_water_mark = 0; |
144 | |
145 | data->registry_data.fan_control_support = 1; |
146 | data->registry_data.thermal_support = 1; |
147 | data->registry_data.fw_ctf_enabled = 1; |
148 | |
149 | data->registry_data.avfs_support = |
150 | hwmgr->feature_mask & PP_AVFS_MASK ? true : false; |
151 | data->registry_data.led_dpm_enabled = 1; |
152 | |
153 | data->registry_data.vr0hot_enabled = 1; |
154 | data->registry_data.vr1hot_enabled = 1; |
155 | data->registry_data.regulator_hot_gpio_support = 1; |
156 | |
157 | data->registry_data.didt_support = 1; |
158 | if (data->registry_data.didt_support) { |
159 | data->registry_data.didt_mode = 6; |
160 | data->registry_data.sq_ramping_support = 1; |
161 | data->registry_data.db_ramping_support = 0; |
162 | data->registry_data.td_ramping_support = 0; |
163 | data->registry_data.tcp_ramping_support = 0; |
164 | data->registry_data.dbr_ramping_support = 0; |
165 | data->registry_data.edc_didt_support = 1; |
166 | data->registry_data.gc_didt_support = 0; |
167 | data->registry_data.psm_didt_support = 0; |
168 | } |
169 | |
170 | data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT; |
171 | data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; |
172 | data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; |
173 | data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; |
174 | data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; |
175 | data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; |
176 | data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; |
177 | data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; |
178 | data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; |
179 | data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; |
180 | data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; |
181 | data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; |
182 | data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; |
183 | |
184 | data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT; |
185 | data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT; |
186 | data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT; |
187 | data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT; |
188 | } |
189 | |
190 | static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr) |
191 | { |
192 | struct vega10_hwmgr *data = hwmgr->backend; |
193 | struct phm_ppt_v2_information *table_info = |
194 | (struct phm_ppt_v2_information *)hwmgr->pptable; |
195 | struct amdgpu_device *adev = hwmgr->adev; |
196 | |
197 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
198 | PHM_PlatformCaps_SclkDeepSleep); |
199 | |
200 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
201 | PHM_PlatformCaps_DynamicPatchPowerState); |
202 | |
203 | if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) |
204 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
205 | PHM_PlatformCaps_ControlVDDCI); |
206 | |
207 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
208 | PHM_PlatformCaps_EnableSMU7ThermalManagement); |
209 | |
210 | if (adev->pg_flags & AMD_PG_SUPPORT_UVD) |
211 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
212 | PHM_PlatformCaps_UVDPowerGating); |
213 | |
214 | if (adev->pg_flags & AMD_PG_SUPPORT_VCE) |
215 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
216 | PHM_PlatformCaps_VCEPowerGating); |
217 | |
218 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
219 | PHM_PlatformCaps_UnTabledHardwareInterface); |
220 | |
221 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
222 | PHM_PlatformCaps_FanSpeedInTableIsRPM); |
223 | |
224 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
225 | PHM_PlatformCaps_ODFuzzyFanControlSupport); |
226 | |
227 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
228 | PHM_PlatformCaps_DynamicPowerManagement); |
229 | |
230 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
231 | PHM_PlatformCaps_SMC); |
232 | |
233 | /* power tune caps */ |
234 | /* assume disabled */ |
235 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
236 | PHM_PlatformCaps_PowerContainment); |
237 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
238 | PHM_PlatformCaps_DiDtSupport); |
239 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
240 | PHM_PlatformCaps_SQRamping); |
241 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
242 | PHM_PlatformCaps_DBRamping); |
243 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
244 | PHM_PlatformCaps_TDRamping); |
245 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
246 | PHM_PlatformCaps_TCPRamping); |
247 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
248 | PHM_PlatformCaps_DBRRamping); |
249 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
250 | PHM_PlatformCaps_DiDtEDCEnable); |
251 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
252 | PHM_PlatformCaps_GCEDC); |
253 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
254 | PHM_PlatformCaps_PSM); |
255 | |
256 | if (data->registry_data.didt_support) { |
257 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport); |
258 | if (data->registry_data.sq_ramping_support) |
259 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); |
260 | if (data->registry_data.db_ramping_support) |
261 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping); |
262 | if (data->registry_data.td_ramping_support) |
263 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping); |
264 | if (data->registry_data.tcp_ramping_support) |
265 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); |
266 | if (data->registry_data.dbr_ramping_support) |
267 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping); |
268 | if (data->registry_data.edc_didt_support) |
269 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable); |
270 | if (data->registry_data.gc_didt_support) |
271 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC); |
272 | if (data->registry_data.psm_didt_support) |
273 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM); |
274 | } |
275 | |
276 | if (data->registry_data.power_containment_support) |
277 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
278 | PHM_PlatformCaps_PowerContainment); |
279 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
280 | PHM_PlatformCaps_CAC); |
281 | |
282 | if (table_info->tdp_table->usClockStretchAmount && |
283 | data->registry_data.clock_stretcher_support) |
284 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
285 | PHM_PlatformCaps_ClockStretcher); |
286 | |
287 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
288 | PHM_PlatformCaps_RegulatorHot); |
289 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
290 | PHM_PlatformCaps_AutomaticDCTransition); |
291 | |
292 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
293 | PHM_PlatformCaps_UVDDPM); |
294 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
295 | PHM_PlatformCaps_VCEDPM); |
296 | |
297 | return 0; |
298 | } |
299 | |
300 | static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr) |
301 | { |
302 | struct vega10_hwmgr *data = hwmgr->backend; |
303 | struct phm_ppt_v2_information *table_info = |
304 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
305 | struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); |
306 | struct vega10_odn_vddc_lookup_table *od_lookup_table; |
307 | struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; |
308 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3]; |
309 | struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3]; |
310 | struct pp_atomfwctrl_avfs_parameters avfs_params = {0}; |
311 | uint32_t i; |
312 | int result; |
313 | |
314 | result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params); |
315 | if (!result) { |
316 | data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc; |
317 | data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc; |
318 | } |
319 | |
320 | od_lookup_table = &odn_table->vddc_lookup_table; |
321 | vddc_lookup_table = table_info->vddc_lookup_table; |
322 | |
323 | for (i = 0; i < vddc_lookup_table->count; i++) |
324 | od_lookup_table->entries[i].us_vdd = vddc_lookup_table->entries[i].us_vdd; |
325 | |
326 | od_lookup_table->count = vddc_lookup_table->count; |
327 | |
328 | dep_table[0] = table_info->vdd_dep_on_sclk; |
329 | dep_table[1] = table_info->vdd_dep_on_mclk; |
330 | dep_table[2] = table_info->vdd_dep_on_socclk; |
331 | od_table[0] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_sclk; |
332 | od_table[1] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_mclk; |
333 | od_table[2] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_socclk; |
334 | |
335 | for (i = 0; i < 3; i++) |
336 | smu_get_voltage_dependency_table_ppt_v1(dep_table[i], od_table[i]); |
337 | |
338 | if (odn_table->max_vddc == 0 || odn_table->max_vddc > 2000) |
339 | odn_table->max_vddc = dep_table[0]->entries[dep_table[0]->count - 1].vddc; |
340 | if (odn_table->min_vddc == 0 || odn_table->min_vddc > 2000) |
341 | odn_table->min_vddc = dep_table[0]->entries[0].vddc; |
342 | |
343 | i = od_table[2]->count - 1; |
344 | od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock > od_table[2]->entries[i].clk ? |
345 | hwmgr->platform_descriptor.overdriveLimit.memoryClock : |
346 | od_table[2]->entries[i].clk; |
347 | od_table[2]->entries[i].vddc = odn_table->max_vddc > od_table[2]->entries[i].vddc ? |
348 | odn_table->max_vddc : |
349 | od_table[2]->entries[i].vddc; |
350 | |
351 | return 0; |
352 | } |
353 | |
354 | static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) |
355 | { |
356 | struct vega10_hwmgr *data = hwmgr->backend; |
357 | int i; |
358 | uint32_t sub_vendor_id, hw_revision; |
359 | struct amdgpu_device *adev = hwmgr->adev; |
360 | |
361 | vega10_initialize_power_tune_defaults(hwmgr); |
362 | |
363 | for (i = 0; i < GNLD_FEATURES_MAX; i++) { |
364 | data->smu_features[i].smu_feature_id = 0xffff; |
365 | data->smu_features[i].smu_feature_bitmap = 1 << i; |
366 | data->smu_features[i].enabled = false; |
367 | data->smu_features[i].supported = false; |
368 | } |
369 | |
370 | data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id = |
371 | FEATURE_DPM_PREFETCHER_BIT; |
372 | data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id = |
373 | FEATURE_DPM_GFXCLK_BIT; |
374 | data->smu_features[GNLD_DPM_UCLK].smu_feature_id = |
375 | FEATURE_DPM_UCLK_BIT; |
376 | data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id = |
377 | FEATURE_DPM_SOCCLK_BIT; |
378 | data->smu_features[GNLD_DPM_UVD].smu_feature_id = |
379 | FEATURE_DPM_UVD_BIT; |
380 | data->smu_features[GNLD_DPM_VCE].smu_feature_id = |
381 | FEATURE_DPM_VCE_BIT; |
382 | data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id = |
383 | FEATURE_DPM_MP0CLK_BIT; |
384 | data->smu_features[GNLD_DPM_LINK].smu_feature_id = |
385 | FEATURE_DPM_LINK_BIT; |
386 | data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id = |
387 | FEATURE_DPM_DCEFCLK_BIT; |
388 | data->smu_features[GNLD_ULV].smu_feature_id = |
389 | FEATURE_ULV_BIT; |
390 | data->smu_features[GNLD_AVFS].smu_feature_id = |
391 | FEATURE_AVFS_BIT; |
392 | data->smu_features[GNLD_DS_GFXCLK].smu_feature_id = |
393 | FEATURE_DS_GFXCLK_BIT; |
394 | data->smu_features[GNLD_DS_SOCCLK].smu_feature_id = |
395 | FEATURE_DS_SOCCLK_BIT; |
396 | data->smu_features[GNLD_DS_LCLK].smu_feature_id = |
397 | FEATURE_DS_LCLK_BIT; |
398 | data->smu_features[GNLD_PPT].smu_feature_id = |
399 | FEATURE_PPT_BIT; |
400 | data->smu_features[GNLD_TDC].smu_feature_id = |
401 | FEATURE_TDC_BIT; |
402 | data->smu_features[GNLD_THERMAL].smu_feature_id = |
403 | FEATURE_THERMAL_BIT; |
404 | data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id = |
405 | FEATURE_GFX_PER_CU_CG_BIT; |
406 | data->smu_features[GNLD_RM].smu_feature_id = |
407 | FEATURE_RM_BIT; |
408 | data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id = |
409 | FEATURE_DS_DCEFCLK_BIT; |
410 | data->smu_features[GNLD_ACDC].smu_feature_id = |
411 | FEATURE_ACDC_BIT; |
412 | data->smu_features[GNLD_VR0HOT].smu_feature_id = |
413 | FEATURE_VR0HOT_BIT; |
414 | data->smu_features[GNLD_VR1HOT].smu_feature_id = |
415 | FEATURE_VR1HOT_BIT; |
416 | data->smu_features[GNLD_FW_CTF].smu_feature_id = |
417 | FEATURE_FW_CTF_BIT; |
418 | data->smu_features[GNLD_LED_DISPLAY].smu_feature_id = |
419 | FEATURE_LED_DISPLAY_BIT; |
420 | data->smu_features[GNLD_FAN_CONTROL].smu_feature_id = |
421 | FEATURE_FAN_CONTROL_BIT; |
422 | data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT; |
423 | data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT; |
424 | data->smu_features[GNLD_PCC_LIMIT].smu_feature_id = FEATURE_PCC_LIMIT_CONTROL_BIT; |
425 | |
426 | if (!data->registry_data.prefetcher_dpm_key_disabled) |
427 | data->smu_features[GNLD_DPM_PREFETCHER].supported = true; |
428 | |
429 | if (!data->registry_data.sclk_dpm_key_disabled) |
430 | data->smu_features[GNLD_DPM_GFXCLK].supported = true; |
431 | |
432 | if (!data->registry_data.mclk_dpm_key_disabled) |
433 | data->smu_features[GNLD_DPM_UCLK].supported = true; |
434 | |
435 | if (!data->registry_data.socclk_dpm_key_disabled) |
436 | data->smu_features[GNLD_DPM_SOCCLK].supported = true; |
437 | |
438 | if (PP_CAP(PHM_PlatformCaps_UVDDPM)) |
439 | data->smu_features[GNLD_DPM_UVD].supported = true; |
440 | |
441 | if (PP_CAP(PHM_PlatformCaps_VCEDPM)) |
442 | data->smu_features[GNLD_DPM_VCE].supported = true; |
443 | |
444 | if (!data->registry_data.pcie_dpm_key_disabled) |
445 | data->smu_features[GNLD_DPM_LINK].supported = true; |
446 | |
447 | if (!data->registry_data.dcefclk_dpm_key_disabled) |
448 | data->smu_features[GNLD_DPM_DCEFCLK].supported = true; |
449 | |
450 | if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) && |
451 | data->registry_data.sclk_deep_sleep_support) { |
452 | data->smu_features[GNLD_DS_GFXCLK].supported = true; |
453 | data->smu_features[GNLD_DS_SOCCLK].supported = true; |
454 | data->smu_features[GNLD_DS_LCLK].supported = true; |
455 | data->smu_features[GNLD_DS_DCEFCLK].supported = true; |
456 | } |
457 | |
458 | if (data->registry_data.enable_pkg_pwr_tracking_feature) |
459 | data->smu_features[GNLD_PPT].supported = true; |
460 | |
461 | if (data->registry_data.enable_tdc_limit_feature) |
462 | data->smu_features[GNLD_TDC].supported = true; |
463 | |
464 | if (data->registry_data.thermal_support) |
465 | data->smu_features[GNLD_THERMAL].supported = true; |
466 | |
467 | if (data->registry_data.fan_control_support) |
468 | data->smu_features[GNLD_FAN_CONTROL].supported = true; |
469 | |
470 | if (data->registry_data.fw_ctf_enabled) |
471 | data->smu_features[GNLD_FW_CTF].supported = true; |
472 | |
473 | if (data->registry_data.avfs_support) |
474 | data->smu_features[GNLD_AVFS].supported = true; |
475 | |
476 | if (data->registry_data.led_dpm_enabled) |
477 | data->smu_features[GNLD_LED_DISPLAY].supported = true; |
478 | |
479 | if (data->registry_data.vr1hot_enabled) |
480 | data->smu_features[GNLD_VR1HOT].supported = true; |
481 | |
482 | if (data->registry_data.vr0hot_enabled) |
483 | data->smu_features[GNLD_VR0HOT].supported = true; |
484 | |
485 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion); |
486 | hwmgr->smu_version = smum_get_argument(hwmgr); |
487 | /* ACG firmware has major version 5 */ |
488 | if ((hwmgr->smu_version & 0xff000000) == 0x5000000) |
489 | data->smu_features[GNLD_ACG].supported = true; |
490 | if (data->registry_data.didt_support) |
491 | data->smu_features[GNLD_DIDT].supported = true; |
492 | |
493 | hw_revision = adev->pdev->revision; |
494 | sub_vendor_id = adev->pdev->subsystem_vendor; |
495 | |
496 | if ((hwmgr->chip_id == 0x6862 || |
497 | hwmgr->chip_id == 0x6861 || |
498 | hwmgr->chip_id == 0x6868) && |
499 | (hw_revision == 0) && |
500 | (sub_vendor_id != 0x1002)) |
501 | data->smu_features[GNLD_PCC_LIMIT].supported = true; |
502 | } |
503 | |
504 | #ifdef PPLIB_VEGA10_EVV_SUPPORT |
505 | static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr, |
506 | phm_ppt_v1_voltage_lookup_table *lookup_table, |
507 | uint16_t virtual_voltage_id, int32_t *socclk) |
508 | { |
509 | uint8_t entry_id; |
510 | uint8_t voltage_id; |
511 | struct phm_ppt_v2_information *table_info = |
512 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
513 | |
514 | PP_ASSERT_WITH_CODE(lookup_table->count != 0, |
515 | "Lookup table is empty" , |
516 | return -EINVAL); |
517 | |
518 | /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */ |
519 | for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) { |
520 | voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd; |
521 | if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id) |
522 | break; |
523 | } |
524 | |
525 | PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count, |
526 | "Can't find requested voltage id in vdd_dep_on_socclk table!" , |
527 | return -EINVAL); |
528 | |
529 | *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk; |
530 | |
531 | return 0; |
532 | } |
533 | |
534 | #define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01 |
535 | /** |
536 | * Get Leakage VDDC based on leakage ID. |
537 | * |
538 | * @param hwmgr the address of the powerplay hardware manager. |
539 | * @return always 0. |
540 | */ |
541 | static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr) |
542 | { |
543 | struct vega10_hwmgr *data = hwmgr->backend; |
544 | uint16_t vv_id; |
545 | uint32_t vddc = 0; |
546 | uint16_t i, j; |
547 | uint32_t sclk = 0; |
548 | struct phm_ppt_v2_information *table_info = |
549 | (struct phm_ppt_v2_information *)hwmgr->pptable; |
550 | struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table = |
551 | table_info->vdd_dep_on_socclk; |
552 | int result; |
553 | |
554 | for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) { |
555 | vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; |
556 | |
557 | if (!vega10_get_socclk_for_voltage_evv(hwmgr, |
558 | table_info->vddc_lookup_table, vv_id, &sclk)) { |
559 | if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) { |
560 | for (j = 1; j < socclk_table->count; j++) { |
561 | if (socclk_table->entries[j].clk == sclk && |
562 | socclk_table->entries[j].cks_enable == 0) { |
563 | sclk += 5000; |
564 | break; |
565 | } |
566 | } |
567 | } |
568 | |
569 | PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, |
570 | VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc), |
571 | "Error retrieving EVV voltage value!" , |
572 | continue); |
573 | |
574 | |
575 | /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */ |
576 | PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0), |
577 | "Invalid VDDC value" , result = -EINVAL;); |
578 | |
579 | /* the voltage should not be zero nor equal to leakage ID */ |
580 | if (vddc != 0 && vddc != vv_id) { |
581 | data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100); |
582 | data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id; |
583 | data->vddc_leakage.count++; |
584 | } |
585 | } |
586 | } |
587 | |
588 | return 0; |
589 | } |
590 | |
591 | /** |
592 | * Change virtual leakage voltage to actual value. |
593 | * |
594 | * @param hwmgr the address of the powerplay hardware manager. |
595 | * @param pointer to changing voltage |
596 | * @param pointer to leakage table |
597 | */ |
598 | static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr, |
599 | uint16_t *voltage, struct vega10_leakage_voltage *leakage_table) |
600 | { |
601 | uint32_t index; |
602 | |
603 | /* search for leakage voltage ID 0xff01 ~ 0xff08 */ |
604 | for (index = 0; index < leakage_table->count; index++) { |
605 | /* if this voltage matches a leakage voltage ID */ |
606 | /* patch with actual leakage voltage */ |
607 | if (leakage_table->leakage_id[index] == *voltage) { |
608 | *voltage = leakage_table->actual_voltage[index]; |
609 | break; |
610 | } |
611 | } |
612 | |
613 | if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) |
614 | pr_info("Voltage value looks like a Leakage ID but it's not patched\n" ); |
615 | } |
616 | |
617 | /** |
618 | * Patch voltage lookup table by EVV leakages. |
619 | * |
620 | * @param hwmgr the address of the powerplay hardware manager. |
621 | * @param pointer to voltage lookup table |
622 | * @param pointer to leakage table |
623 | * @return always 0 |
624 | */ |
625 | static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, |
626 | phm_ppt_v1_voltage_lookup_table *lookup_table, |
627 | struct vega10_leakage_voltage *leakage_table) |
628 | { |
629 | uint32_t i; |
630 | |
631 | for (i = 0; i < lookup_table->count; i++) |
632 | vega10_patch_with_vdd_leakage(hwmgr, |
633 | &lookup_table->entries[i].us_vdd, leakage_table); |
634 | |
635 | return 0; |
636 | } |
637 | |
638 | static int vega10_patch_clock_voltage_limits_with_vddc_leakage( |
639 | struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table, |
640 | uint16_t *vddc) |
641 | { |
642 | vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); |
643 | |
644 | return 0; |
645 | } |
646 | #endif |
647 | |
648 | static int vega10_patch_voltage_dependency_tables_with_lookup_table( |
649 | struct pp_hwmgr *hwmgr) |
650 | { |
651 | uint8_t entry_id, voltage_id; |
652 | unsigned i; |
653 | struct phm_ppt_v2_information *table_info = |
654 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
655 | struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = |
656 | table_info->mm_dep_table; |
657 | struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = |
658 | table_info->vdd_dep_on_mclk; |
659 | |
660 | for (i = 0; i < 6; i++) { |
661 | struct phm_ppt_v1_clock_voltage_dependency_table *vdt; |
662 | switch (i) { |
663 | case 0: vdt = table_info->vdd_dep_on_socclk; break; |
664 | case 1: vdt = table_info->vdd_dep_on_sclk; break; |
665 | case 2: vdt = table_info->vdd_dep_on_dcefclk; break; |
666 | case 3: vdt = table_info->vdd_dep_on_pixclk; break; |
667 | case 4: vdt = table_info->vdd_dep_on_dispclk; break; |
668 | case 5: vdt = table_info->vdd_dep_on_phyclk; break; |
669 | } |
670 | |
671 | for (entry_id = 0; entry_id < vdt->count; entry_id++) { |
672 | voltage_id = vdt->entries[entry_id].vddInd; |
673 | vdt->entries[entry_id].vddc = |
674 | table_info->vddc_lookup_table->entries[voltage_id].us_vdd; |
675 | } |
676 | } |
677 | |
678 | for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { |
679 | voltage_id = mm_table->entries[entry_id].vddcInd; |
680 | mm_table->entries[entry_id].vddc = |
681 | table_info->vddc_lookup_table->entries[voltage_id].us_vdd; |
682 | } |
683 | |
684 | for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { |
685 | voltage_id = mclk_table->entries[entry_id].vddInd; |
686 | mclk_table->entries[entry_id].vddc = |
687 | table_info->vddc_lookup_table->entries[voltage_id].us_vdd; |
688 | voltage_id = mclk_table->entries[entry_id].vddciInd; |
689 | mclk_table->entries[entry_id].vddci = |
690 | table_info->vddci_lookup_table->entries[voltage_id].us_vdd; |
691 | voltage_id = mclk_table->entries[entry_id].mvddInd; |
692 | mclk_table->entries[entry_id].mvdd = |
693 | table_info->vddmem_lookup_table->entries[voltage_id].us_vdd; |
694 | } |
695 | |
696 | |
697 | return 0; |
698 | |
699 | } |
700 | |
701 | static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr, |
702 | struct phm_ppt_v1_voltage_lookup_table *lookup_table) |
703 | { |
704 | uint32_t table_size, i, j; |
705 | struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; |
706 | |
707 | PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count, |
708 | "Lookup table is empty" , return -EINVAL); |
709 | |
710 | table_size = lookup_table->count; |
711 | |
712 | /* Sorting voltages */ |
713 | for (i = 0; i < table_size - 1; i++) { |
714 | for (j = i + 1; j > 0; j--) { |
715 | if (lookup_table->entries[j].us_vdd < |
716 | lookup_table->entries[j - 1].us_vdd) { |
717 | tmp_voltage_lookup_record = lookup_table->entries[j - 1]; |
718 | lookup_table->entries[j - 1] = lookup_table->entries[j]; |
719 | lookup_table->entries[j] = tmp_voltage_lookup_record; |
720 | } |
721 | } |
722 | } |
723 | |
724 | return 0; |
725 | } |
726 | |
727 | static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr) |
728 | { |
729 | int result = 0; |
730 | int tmp_result; |
731 | struct phm_ppt_v2_information *table_info = |
732 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
733 | #ifdef PPLIB_VEGA10_EVV_SUPPORT |
734 | struct vega10_hwmgr *data = hwmgr->backend; |
735 | |
736 | tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr, |
737 | table_info->vddc_lookup_table, &(data->vddc_leakage)); |
738 | if (tmp_result) |
739 | result = tmp_result; |
740 | |
741 | tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, |
742 | &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); |
743 | if (tmp_result) |
744 | result = tmp_result; |
745 | #endif |
746 | |
747 | tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr); |
748 | if (tmp_result) |
749 | result = tmp_result; |
750 | |
751 | tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); |
752 | if (tmp_result) |
753 | result = tmp_result; |
754 | |
755 | return result; |
756 | } |
757 | |
758 | static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) |
759 | { |
760 | struct phm_ppt_v2_information *table_info = |
761 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
762 | struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = |
763 | table_info->vdd_dep_on_socclk; |
764 | struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = |
765 | table_info->vdd_dep_on_mclk; |
766 | |
767 | PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table, |
768 | "VDD dependency on SCLK table is missing. This table is mandatory" , return -EINVAL); |
769 | PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, |
770 | "VDD dependency on SCLK table is empty. This table is mandatory" , return -EINVAL); |
771 | |
772 | PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table, |
773 | "VDD dependency on MCLK table is missing. This table is mandatory" , return -EINVAL); |
774 | PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, |
775 | "VDD dependency on MCLK table is empty. This table is mandatory" , return -EINVAL); |
776 | |
777 | table_info->max_clock_voltage_on_ac.sclk = |
778 | allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; |
779 | table_info->max_clock_voltage_on_ac.mclk = |
780 | allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; |
781 | table_info->max_clock_voltage_on_ac.vddc = |
782 | allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; |
783 | table_info->max_clock_voltage_on_ac.vddci = |
784 | allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; |
785 | |
786 | hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = |
787 | table_info->max_clock_voltage_on_ac.sclk; |
788 | hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = |
789 | table_info->max_clock_voltage_on_ac.mclk; |
790 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = |
791 | table_info->max_clock_voltage_on_ac.vddc; |
792 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = |
793 | table_info->max_clock_voltage_on_ac.vddci; |
794 | |
795 | return 0; |
796 | } |
797 | |
798 | static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) |
799 | { |
800 | kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); |
801 | hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; |
802 | |
803 | kfree(hwmgr->backend); |
804 | hwmgr->backend = NULL; |
805 | |
806 | return 0; |
807 | } |
808 | |
809 | static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) |
810 | { |
811 | int result = 0; |
812 | struct vega10_hwmgr *data; |
813 | uint32_t config_telemetry = 0; |
814 | struct pp_atomfwctrl_voltage_table vol_table; |
815 | struct amdgpu_device *adev = hwmgr->adev; |
816 | |
817 | data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL); |
818 | if (data == NULL) |
819 | return -ENOMEM; |
820 | |
821 | hwmgr->backend = data; |
822 | |
823 | hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; |
824 | hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; |
825 | hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; |
826 | |
827 | vega10_set_default_registry_data(hwmgr); |
828 | data->disable_dpm_mask = 0xff; |
829 | |
830 | /* need to set voltage control types before EVV patching */ |
831 | data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE; |
832 | data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE; |
833 | data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE; |
834 | |
835 | /* VDDCR_SOC */ |
836 | if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr, |
837 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) { |
838 | if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr, |
839 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2, |
840 | &vol_table)) { |
841 | config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) | |
842 | (vol_table.telemetry_offset & 0xff); |
843 | data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2; |
844 | } |
845 | } else { |
846 | kfree(hwmgr->backend); |
847 | hwmgr->backend = NULL; |
848 | PP_ASSERT_WITH_CODE(false, |
849 | "VDDCR_SOC is not SVID2!" , |
850 | return -1); |
851 | } |
852 | |
853 | /* MVDDC */ |
854 | if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr, |
855 | VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) { |
856 | if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr, |
857 | VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2, |
858 | &vol_table)) { |
859 | config_telemetry |= |
860 | ((vol_table.telemetry_slope << 24) & 0xff000000) | |
861 | ((vol_table.telemetry_offset << 16) & 0xff0000); |
862 | data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2; |
863 | } |
864 | } |
865 | |
866 | /* VDDCI_MEM */ |
867 | if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) { |
868 | if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr, |
869 | VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) |
870 | data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO; |
871 | } |
872 | |
873 | data->config_telemetry = config_telemetry; |
874 | |
875 | vega10_set_features_platform_caps(hwmgr); |
876 | |
877 | vega10_init_dpm_defaults(hwmgr); |
878 | |
879 | #ifdef PPLIB_VEGA10_EVV_SUPPORT |
880 | /* Get leakage voltage based on leakage ID. */ |
881 | PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr), |
882 | "Get EVV Voltage Failed. Abort Driver loading!" , |
883 | return -1); |
884 | #endif |
885 | |
886 | /* Patch our voltage dependency table with actual leakage voltage |
887 | * We need to perform leakage translation before it's used by other functions |
888 | */ |
889 | vega10_complete_dependency_tables(hwmgr); |
890 | |
891 | /* Parse pptable data read from VBIOS */ |
892 | vega10_set_private_data_based_on_pptable(hwmgr); |
893 | |
894 | data->is_tlu_enabled = false; |
895 | |
896 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = |
897 | VEGA10_MAX_HARDWARE_POWERLEVELS; |
898 | hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; |
899 | hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; |
900 | |
901 | hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ |
902 | /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ |
903 | hwmgr->platform_descriptor.clockStep.engineClock = 500; |
904 | hwmgr->platform_descriptor.clockStep.memoryClock = 500; |
905 | |
906 | data->total_active_cus = adev->gfx.cu_info.number; |
907 | /* Setup default Overdrive Fan control settings */ |
908 | data->odn_fan_table.target_fan_speed = |
909 | hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM; |
910 | data->odn_fan_table.target_temperature = |
911 | hwmgr->thermal_controller. |
912 | advanceFanControlParameters.ucTargetTemperature; |
913 | data->odn_fan_table.min_performance_clock = |
914 | hwmgr->thermal_controller.advanceFanControlParameters. |
915 | ulMinFanSCLKAcousticLimit; |
916 | data->odn_fan_table.min_fan_limit = |
917 | hwmgr->thermal_controller. |
918 | advanceFanControlParameters.usFanPWMMinLimit * |
919 | hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100; |
920 | |
921 | data->mem_channels = (RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0) & |
922 | DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >> |
923 | DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; |
924 | PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number), |
925 | "Mem Channel Index Exceeded maximum!" , |
926 | return -EINVAL); |
927 | |
928 | return result; |
929 | } |
930 | |
931 | static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr) |
932 | { |
933 | struct vega10_hwmgr *data = hwmgr->backend; |
934 | |
935 | data->low_sclk_interrupt_threshold = 0; |
936 | |
937 | return 0; |
938 | } |
939 | |
940 | static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr) |
941 | { |
942 | struct vega10_hwmgr *data = hwmgr->backend; |
943 | PPTable_t *pp_table = &(data->smc_state_table.pp_table); |
944 | |
945 | struct pp_atomfwctrl_voltage_table table; |
946 | uint8_t i, j; |
947 | uint32_t mask = 0; |
948 | uint32_t tmp; |
949 | int32_t ret = 0; |
950 | |
951 | ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM, |
952 | VOLTAGE_OBJ_GPIO_LUT, &table); |
953 | |
954 | if (!ret) { |
955 | tmp = table.mask_low; |
956 | for (i = 0, j = 0; i < 32; i++) { |
957 | if (tmp & 1) { |
958 | mask |= (uint32_t)(i << (8 * j)); |
959 | if (++j >= 3) |
960 | break; |
961 | } |
962 | tmp >>= 1; |
963 | } |
964 | } |
965 | |
966 | pp_table->LedPin0 = (uint8_t)(mask & 0xff); |
967 | pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff); |
968 | pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff); |
969 | return 0; |
970 | } |
971 | |
972 | static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr) |
973 | { |
974 | PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr), |
975 | "Failed to init sclk threshold!" , |
976 | return -EINVAL); |
977 | |
978 | PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr), |
979 | "Failed to set up led dpm config!" , |
980 | return -EINVAL); |
981 | |
982 | smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0); |
983 | |
984 | return 0; |
985 | } |
986 | |
987 | /** |
988 | * Remove repeated voltage values and create table with unique values. |
989 | * |
990 | * @param hwmgr the address of the powerplay hardware manager. |
991 | * @param vol_table the pointer to changing voltage table |
992 | * @return 0 in success |
993 | */ |
994 | |
995 | static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr, |
996 | struct pp_atomfwctrl_voltage_table *vol_table) |
997 | { |
998 | uint32_t i, j; |
999 | uint16_t vvalue; |
1000 | bool found = false; |
1001 | struct pp_atomfwctrl_voltage_table *table; |
1002 | |
1003 | PP_ASSERT_WITH_CODE(vol_table, |
1004 | "Voltage Table empty." , return -EINVAL); |
1005 | table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table), |
1006 | GFP_KERNEL); |
1007 | |
1008 | if (!table) |
1009 | return -ENOMEM; |
1010 | |
1011 | table->mask_low = vol_table->mask_low; |
1012 | table->phase_delay = vol_table->phase_delay; |
1013 | |
1014 | for (i = 0; i < vol_table->count; i++) { |
1015 | vvalue = vol_table->entries[i].value; |
1016 | found = false; |
1017 | |
1018 | for (j = 0; j < table->count; j++) { |
1019 | if (vvalue == table->entries[j].value) { |
1020 | found = true; |
1021 | break; |
1022 | } |
1023 | } |
1024 | |
1025 | if (!found) { |
1026 | table->entries[table->count].value = vvalue; |
1027 | table->entries[table->count].smio_low = |
1028 | vol_table->entries[i].smio_low; |
1029 | table->count++; |
1030 | } |
1031 | } |
1032 | |
1033 | memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table)); |
1034 | kfree(table); |
1035 | |
1036 | return 0; |
1037 | } |
1038 | |
1039 | static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr, |
1040 | phm_ppt_v1_clock_voltage_dependency_table *dep_table, |
1041 | struct pp_atomfwctrl_voltage_table *vol_table) |
1042 | { |
1043 | int i; |
1044 | |
1045 | PP_ASSERT_WITH_CODE(dep_table->count, |
1046 | "Voltage Dependency Table empty." , |
1047 | return -EINVAL); |
1048 | |
1049 | vol_table->mask_low = 0; |
1050 | vol_table->phase_delay = 0; |
1051 | vol_table->count = dep_table->count; |
1052 | |
1053 | for (i = 0; i < vol_table->count; i++) { |
1054 | vol_table->entries[i].value = dep_table->entries[i].mvdd; |
1055 | vol_table->entries[i].smio_low = 0; |
1056 | } |
1057 | |
1058 | PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, |
1059 | vol_table), |
1060 | "Failed to trim MVDD Table!" , |
1061 | return -1); |
1062 | |
1063 | return 0; |
1064 | } |
1065 | |
1066 | static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr, |
1067 | phm_ppt_v1_clock_voltage_dependency_table *dep_table, |
1068 | struct pp_atomfwctrl_voltage_table *vol_table) |
1069 | { |
1070 | uint32_t i; |
1071 | |
1072 | PP_ASSERT_WITH_CODE(dep_table->count, |
1073 | "Voltage Dependency Table empty." , |
1074 | return -EINVAL); |
1075 | |
1076 | vol_table->mask_low = 0; |
1077 | vol_table->phase_delay = 0; |
1078 | vol_table->count = dep_table->count; |
1079 | |
1080 | for (i = 0; i < dep_table->count; i++) { |
1081 | vol_table->entries[i].value = dep_table->entries[i].vddci; |
1082 | vol_table->entries[i].smio_low = 0; |
1083 | } |
1084 | |
1085 | PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table), |
1086 | "Failed to trim VDDCI table." , |
1087 | return -1); |
1088 | |
1089 | return 0; |
1090 | } |
1091 | |
1092 | static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr, |
1093 | phm_ppt_v1_clock_voltage_dependency_table *dep_table, |
1094 | struct pp_atomfwctrl_voltage_table *vol_table) |
1095 | { |
1096 | int i; |
1097 | |
1098 | PP_ASSERT_WITH_CODE(dep_table->count, |
1099 | "Voltage Dependency Table empty." , |
1100 | return -EINVAL); |
1101 | |
1102 | vol_table->mask_low = 0; |
1103 | vol_table->phase_delay = 0; |
1104 | vol_table->count = dep_table->count; |
1105 | |
1106 | for (i = 0; i < vol_table->count; i++) { |
1107 | vol_table->entries[i].value = dep_table->entries[i].vddc; |
1108 | vol_table->entries[i].smio_low = 0; |
1109 | } |
1110 | |
1111 | return 0; |
1112 | } |
1113 | |
1114 | /* ---- Voltage Tables ---- |
1115 | * If the voltage table would be bigger than |
1116 | * what will fit into the state table on |
1117 | * the SMC keep only the higher entries. |
1118 | */ |
1119 | static void vega10_trim_voltage_table_to_fit_state_table( |
1120 | struct pp_hwmgr *hwmgr, |
1121 | uint32_t max_vol_steps, |
1122 | struct pp_atomfwctrl_voltage_table *vol_table) |
1123 | { |
1124 | unsigned int i, diff; |
1125 | |
1126 | if (vol_table->count <= max_vol_steps) |
1127 | return; |
1128 | |
1129 | diff = vol_table->count - max_vol_steps; |
1130 | |
1131 | for (i = 0; i < max_vol_steps; i++) |
1132 | vol_table->entries[i] = vol_table->entries[i + diff]; |
1133 | |
1134 | vol_table->count = max_vol_steps; |
1135 | } |
1136 | |
1137 | /** |
1138 | * Create Voltage Tables. |
1139 | * |
1140 | * @param hwmgr the address of the powerplay hardware manager. |
1141 | * @return always 0 |
1142 | */ |
1143 | static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr) |
1144 | { |
1145 | struct vega10_hwmgr *data = hwmgr->backend; |
1146 | struct phm_ppt_v2_information *table_info = |
1147 | (struct phm_ppt_v2_information *)hwmgr->pptable; |
1148 | int result; |
1149 | |
1150 | if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 || |
1151 | data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) { |
1152 | result = vega10_get_mvdd_voltage_table(hwmgr, |
1153 | table_info->vdd_dep_on_mclk, |
1154 | &(data->mvdd_voltage_table)); |
1155 | PP_ASSERT_WITH_CODE(!result, |
1156 | "Failed to retrieve MVDDC table!" , |
1157 | return result); |
1158 | } |
1159 | |
1160 | if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) { |
1161 | result = vega10_get_vddci_voltage_table(hwmgr, |
1162 | table_info->vdd_dep_on_mclk, |
1163 | &(data->vddci_voltage_table)); |
1164 | PP_ASSERT_WITH_CODE(!result, |
1165 | "Failed to retrieve VDDCI_MEM table!" , |
1166 | return result); |
1167 | } |
1168 | |
1169 | if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 || |
1170 | data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) { |
1171 | result = vega10_get_vdd_voltage_table(hwmgr, |
1172 | table_info->vdd_dep_on_sclk, |
1173 | &(data->vddc_voltage_table)); |
1174 | PP_ASSERT_WITH_CODE(!result, |
1175 | "Failed to retrieve VDDCR_SOC table!" , |
1176 | return result); |
1177 | } |
1178 | |
1179 | PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16, |
1180 | "Too many voltage values for VDDC. Trimming to fit state table." , |
1181 | vega10_trim_voltage_table_to_fit_state_table(hwmgr, |
1182 | 16, &(data->vddc_voltage_table))); |
1183 | |
1184 | PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16, |
1185 | "Too many voltage values for VDDCI. Trimming to fit state table." , |
1186 | vega10_trim_voltage_table_to_fit_state_table(hwmgr, |
1187 | 16, &(data->vddci_voltage_table))); |
1188 | |
1189 | PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16, |
1190 | "Too many voltage values for MVDD. Trimming to fit state table." , |
1191 | vega10_trim_voltage_table_to_fit_state_table(hwmgr, |
1192 | 16, &(data->mvdd_voltage_table))); |
1193 | |
1194 | |
1195 | return 0; |
1196 | } |
1197 | |
1198 | /* |
1199 | * @fn vega10_init_dpm_state |
1200 | * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff. |
1201 | * |
1202 | * @param dpm_state - the address of the DPM Table to initiailize. |
1203 | * @return None. |
1204 | */ |
1205 | static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state) |
1206 | { |
1207 | dpm_state->soft_min_level = 0xff; |
1208 | dpm_state->soft_max_level = 0xff; |
1209 | dpm_state->hard_min_level = 0xff; |
1210 | dpm_state->hard_max_level = 0xff; |
1211 | } |
1212 | |
1213 | static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr, |
1214 | struct vega10_single_dpm_table *dpm_table, |
1215 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_table) |
1216 | { |
1217 | int i; |
1218 | |
1219 | dpm_table->count = 0; |
1220 | |
1221 | for (i = 0; i < dep_table->count; i++) { |
1222 | if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <= |
1223 | dep_table->entries[i].clk) { |
1224 | dpm_table->dpm_levels[dpm_table->count].value = |
1225 | dep_table->entries[i].clk; |
1226 | dpm_table->dpm_levels[dpm_table->count].enabled = true; |
1227 | dpm_table->count++; |
1228 | } |
1229 | } |
1230 | } |
1231 | static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr) |
1232 | { |
1233 | struct vega10_hwmgr *data = hwmgr->backend; |
1234 | struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); |
1235 | struct phm_ppt_v2_information *table_info = |
1236 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
1237 | struct phm_ppt_v1_pcie_table *bios_pcie_table = |
1238 | table_info->pcie_table; |
1239 | uint32_t i; |
1240 | |
1241 | PP_ASSERT_WITH_CODE(bios_pcie_table->count, |
1242 | "Incorrect number of PCIE States from VBIOS!" , |
1243 | return -1); |
1244 | |
1245 | for (i = 0; i < NUM_LINK_LEVELS; i++) { |
1246 | if (data->registry_data.pcieSpeedOverride) |
1247 | pcie_table->pcie_gen[i] = |
1248 | data->registry_data.pcieSpeedOverride; |
1249 | else |
1250 | pcie_table->pcie_gen[i] = |
1251 | bios_pcie_table->entries[i].gen_speed; |
1252 | |
1253 | if (data->registry_data.pcieLaneOverride) |
1254 | pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width( |
1255 | data->registry_data.pcieLaneOverride); |
1256 | else |
1257 | pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width( |
1258 | bios_pcie_table->entries[i].lane_width); |
1259 | if (data->registry_data.pcieClockOverride) |
1260 | pcie_table->lclk[i] = |
1261 | data->registry_data.pcieClockOverride; |
1262 | else |
1263 | pcie_table->lclk[i] = |
1264 | bios_pcie_table->entries[i].pcie_sclk; |
1265 | } |
1266 | |
1267 | pcie_table->count = NUM_LINK_LEVELS; |
1268 | |
1269 | return 0; |
1270 | } |
1271 | |
1272 | /* |
1273 | * This function is to initialize all DPM state tables |
1274 | * for SMU based on the dependency table. |
1275 | * Dynamic state patching function will then trim these |
1276 | * state tables to the allowed range based |
1277 | * on the power policy or external client requests, |
1278 | * such as UVD request, etc. |
1279 | */ |
1280 | static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) |
1281 | { |
1282 | struct vega10_hwmgr *data = hwmgr->backend; |
1283 | struct phm_ppt_v2_information *table_info = |
1284 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
1285 | struct vega10_single_dpm_table *dpm_table; |
1286 | uint32_t i; |
1287 | |
1288 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table = |
1289 | table_info->vdd_dep_on_socclk; |
1290 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table = |
1291 | table_info->vdd_dep_on_sclk; |
1292 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = |
1293 | table_info->vdd_dep_on_mclk; |
1294 | struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table = |
1295 | table_info->mm_dep_table; |
1296 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table = |
1297 | table_info->vdd_dep_on_dcefclk; |
1298 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table = |
1299 | table_info->vdd_dep_on_pixclk; |
1300 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table = |
1301 | table_info->vdd_dep_on_dispclk; |
1302 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table = |
1303 | table_info->vdd_dep_on_phyclk; |
1304 | |
1305 | PP_ASSERT_WITH_CODE(dep_soc_table, |
1306 | "SOCCLK dependency table is missing. This table is mandatory" , |
1307 | return -EINVAL); |
1308 | PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1, |
1309 | "SOCCLK dependency table is empty. This table is mandatory" , |
1310 | return -EINVAL); |
1311 | |
1312 | PP_ASSERT_WITH_CODE(dep_gfx_table, |
1313 | "GFXCLK dependency table is missing. This table is mandatory" , |
1314 | return -EINVAL); |
1315 | PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1, |
1316 | "GFXCLK dependency table is empty. This table is mandatory" , |
1317 | return -EINVAL); |
1318 | |
1319 | PP_ASSERT_WITH_CODE(dep_mclk_table, |
1320 | "MCLK dependency table is missing. This table is mandatory" , |
1321 | return -EINVAL); |
1322 | PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, |
1323 | "MCLK dependency table has to have is missing. This table is mandatory" , |
1324 | return -EINVAL); |
1325 | |
1326 | /* Initialize Sclk DPM table based on allow Sclk values */ |
1327 | dpm_table = &(data->dpm_table.soc_table); |
1328 | vega10_setup_default_single_dpm_table(hwmgr, |
1329 | dpm_table, |
1330 | dep_soc_table); |
1331 | |
1332 | vega10_init_dpm_state(&(dpm_table->dpm_state)); |
1333 | |
1334 | dpm_table = &(data->dpm_table.gfx_table); |
1335 | vega10_setup_default_single_dpm_table(hwmgr, |
1336 | dpm_table, |
1337 | dep_gfx_table); |
1338 | if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0) |
1339 | hwmgr->platform_descriptor.overdriveLimit.engineClock = |
1340 | dpm_table->dpm_levels[dpm_table->count-1].value; |
1341 | vega10_init_dpm_state(&(dpm_table->dpm_state)); |
1342 | |
1343 | /* Initialize Mclk DPM table based on allow Mclk values */ |
1344 | data->dpm_table.mem_table.count = 0; |
1345 | dpm_table = &(data->dpm_table.mem_table); |
1346 | vega10_setup_default_single_dpm_table(hwmgr, |
1347 | dpm_table, |
1348 | dep_mclk_table); |
1349 | if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) |
1350 | hwmgr->platform_descriptor.overdriveLimit.memoryClock = |
1351 | dpm_table->dpm_levels[dpm_table->count-1].value; |
1352 | vega10_init_dpm_state(&(dpm_table->dpm_state)); |
1353 | |
1354 | data->dpm_table.eclk_table.count = 0; |
1355 | dpm_table = &(data->dpm_table.eclk_table); |
1356 | for (i = 0; i < dep_mm_table->count; i++) { |
1357 | if (i == 0 || dpm_table->dpm_levels |
1358 | [dpm_table->count - 1].value <= |
1359 | dep_mm_table->entries[i].eclk) { |
1360 | dpm_table->dpm_levels[dpm_table->count].value = |
1361 | dep_mm_table->entries[i].eclk; |
1362 | dpm_table->dpm_levels[dpm_table->count].enabled = |
1363 | (i == 0) ? true : false; |
1364 | dpm_table->count++; |
1365 | } |
1366 | } |
1367 | vega10_init_dpm_state(&(dpm_table->dpm_state)); |
1368 | |
1369 | data->dpm_table.vclk_table.count = 0; |
1370 | data->dpm_table.dclk_table.count = 0; |
1371 | dpm_table = &(data->dpm_table.vclk_table); |
1372 | for (i = 0; i < dep_mm_table->count; i++) { |
1373 | if (i == 0 || dpm_table->dpm_levels |
1374 | [dpm_table->count - 1].value <= |
1375 | dep_mm_table->entries[i].vclk) { |
1376 | dpm_table->dpm_levels[dpm_table->count].value = |
1377 | dep_mm_table->entries[i].vclk; |
1378 | dpm_table->dpm_levels[dpm_table->count].enabled = |
1379 | (i == 0) ? true : false; |
1380 | dpm_table->count++; |
1381 | } |
1382 | } |
1383 | vega10_init_dpm_state(&(dpm_table->dpm_state)); |
1384 | |
1385 | dpm_table = &(data->dpm_table.dclk_table); |
1386 | for (i = 0; i < dep_mm_table->count; i++) { |
1387 | if (i == 0 || dpm_table->dpm_levels |
1388 | [dpm_table->count - 1].value <= |
1389 | dep_mm_table->entries[i].dclk) { |
1390 | dpm_table->dpm_levels[dpm_table->count].value = |
1391 | dep_mm_table->entries[i].dclk; |
1392 | dpm_table->dpm_levels[dpm_table->count].enabled = |
1393 | (i == 0) ? true : false; |
1394 | dpm_table->count++; |
1395 | } |
1396 | } |
1397 | vega10_init_dpm_state(&(dpm_table->dpm_state)); |
1398 | |
1399 | /* Assume there is no headless Vega10 for now */ |
1400 | dpm_table = &(data->dpm_table.dcef_table); |
1401 | vega10_setup_default_single_dpm_table(hwmgr, |
1402 | dpm_table, |
1403 | dep_dcef_table); |
1404 | |
1405 | vega10_init_dpm_state(&(dpm_table->dpm_state)); |
1406 | |
1407 | dpm_table = &(data->dpm_table.pixel_table); |
1408 | vega10_setup_default_single_dpm_table(hwmgr, |
1409 | dpm_table, |
1410 | dep_pix_table); |
1411 | |
1412 | vega10_init_dpm_state(&(dpm_table->dpm_state)); |
1413 | |
1414 | dpm_table = &(data->dpm_table.display_table); |
1415 | vega10_setup_default_single_dpm_table(hwmgr, |
1416 | dpm_table, |
1417 | dep_disp_table); |
1418 | |
1419 | vega10_init_dpm_state(&(dpm_table->dpm_state)); |
1420 | |
1421 | dpm_table = &(data->dpm_table.phy_table); |
1422 | vega10_setup_default_single_dpm_table(hwmgr, |
1423 | dpm_table, |
1424 | dep_phy_table); |
1425 | |
1426 | vega10_init_dpm_state(&(dpm_table->dpm_state)); |
1427 | |
1428 | vega10_setup_default_pcie_table(hwmgr); |
1429 | |
1430 | /* save a copy of the default DPM table */ |
1431 | memcpy(&(data->golden_dpm_table), &(data->dpm_table), |
1432 | sizeof(struct vega10_dpm_table)); |
1433 | |
1434 | return 0; |
1435 | } |
1436 | |
1437 | /* |
1438 | * @fn vega10_populate_ulv_state |
1439 | * @brief Function to provide parameters for Utral Low Voltage state to SMC. |
1440 | * |
1441 | * @param hwmgr - the address of the hardware manager. |
1442 | * @return Always 0. |
1443 | */ |
1444 | static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr) |
1445 | { |
1446 | struct vega10_hwmgr *data = hwmgr->backend; |
1447 | struct phm_ppt_v2_information *table_info = |
1448 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
1449 | |
1450 | data->smc_state_table.pp_table.UlvOffsetVid = |
1451 | (uint8_t)table_info->us_ulv_voltage_offset; |
1452 | |
1453 | data->smc_state_table.pp_table.UlvSmnclkDid = |
1454 | (uint8_t)(table_info->us_ulv_smnclk_did); |
1455 | data->smc_state_table.pp_table.UlvMp1clkDid = |
1456 | (uint8_t)(table_info->us_ulv_mp1clk_did); |
1457 | data->smc_state_table.pp_table.UlvGfxclkBypass = |
1458 | (uint8_t)(table_info->us_ulv_gfxclk_bypass); |
1459 | data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 = |
1460 | (uint8_t)(data->vddc_voltage_table.psi0_enable); |
1461 | data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 = |
1462 | (uint8_t)(data->vddc_voltage_table.psi1_enable); |
1463 | |
1464 | return 0; |
1465 | } |
1466 | |
1467 | static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr, |
1468 | uint32_t lclock, uint8_t *curr_lclk_did) |
1469 | { |
1470 | struct pp_atomfwctrl_clock_dividers_soc15 dividers; |
1471 | |
1472 | PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10( |
1473 | hwmgr, |
1474 | COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, |
1475 | lclock, ÷rs), |
1476 | "Failed to get LCLK clock settings from VBIOS!" , |
1477 | return -1); |
1478 | |
1479 | *curr_lclk_did = dividers.ulDid; |
1480 | |
1481 | return 0; |
1482 | } |
1483 | |
1484 | static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr) |
1485 | { |
1486 | int result = -1; |
1487 | struct vega10_hwmgr *data = hwmgr->backend; |
1488 | PPTable_t *pp_table = &(data->smc_state_table.pp_table); |
1489 | struct vega10_pcie_table *pcie_table = |
1490 | &(data->dpm_table.pcie_table); |
1491 | uint32_t i, j; |
1492 | |
1493 | for (i = 0; i < pcie_table->count; i++) { |
1494 | pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i]; |
1495 | pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i]; |
1496 | |
1497 | result = vega10_populate_single_lclk_level(hwmgr, |
1498 | pcie_table->lclk[i], &(pp_table->LclkDid[i])); |
1499 | if (result) { |
1500 | pr_info("Populate LClock Level %d Failed!\n" , i); |
1501 | return result; |
1502 | } |
1503 | } |
1504 | |
1505 | j = i - 1; |
1506 | while (i < NUM_LINK_LEVELS) { |
1507 | pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j]; |
1508 | pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j]; |
1509 | |
1510 | result = vega10_populate_single_lclk_level(hwmgr, |
1511 | pcie_table->lclk[j], &(pp_table->LclkDid[i])); |
1512 | if (result) { |
1513 | pr_info("Populate LClock Level %d Failed!\n" , i); |
1514 | return result; |
1515 | } |
1516 | i++; |
1517 | } |
1518 | |
1519 | return result; |
1520 | } |
1521 | |
1522 | /** |
1523 | * Populates single SMC GFXSCLK structure using the provided engine clock |
1524 | * |
1525 | * @param hwmgr the address of the hardware manager |
1526 | * @param gfx_clock the GFX clock to use to populate the structure. |
1527 | * @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure. |
1528 | */ |
1529 | |
1530 | static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr, |
1531 | uint32_t gfx_clock, PllSetting_t *current_gfxclk_level, |
1532 | uint32_t *acg_freq) |
1533 | { |
1534 | struct phm_ppt_v2_information *table_info = |
1535 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
1536 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk; |
1537 | struct vega10_hwmgr *data = hwmgr->backend; |
1538 | struct pp_atomfwctrl_clock_dividers_soc15 dividers; |
1539 | uint32_t gfx_max_clock = |
1540 | hwmgr->platform_descriptor.overdriveLimit.engineClock; |
1541 | uint32_t i = 0; |
1542 | |
1543 | if (hwmgr->od_enabled) |
1544 | dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *) |
1545 | &(data->odn_dpm_table.vdd_dep_on_sclk); |
1546 | else |
1547 | dep_on_sclk = table_info->vdd_dep_on_sclk; |
1548 | |
1549 | PP_ASSERT_WITH_CODE(dep_on_sclk, |
1550 | "Invalid SOC_VDD-GFX_CLK Dependency Table!" , |
1551 | return -EINVAL); |
1552 | |
1553 | if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) |
1554 | gfx_clock = gfx_clock > gfx_max_clock ? gfx_max_clock : gfx_clock; |
1555 | else { |
1556 | for (i = 0; i < dep_on_sclk->count; i++) { |
1557 | if (dep_on_sclk->entries[i].clk == gfx_clock) |
1558 | break; |
1559 | } |
1560 | PP_ASSERT_WITH_CODE(dep_on_sclk->count > i, |
1561 | "Cannot find gfx_clk in SOC_VDD-GFX_CLK!" , |
1562 | return -EINVAL); |
1563 | } |
1564 | |
1565 | PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, |
1566 | COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK, |
1567 | gfx_clock, ÷rs), |
1568 | "Failed to get GFX Clock settings from VBIOS!" , |
1569 | return -EINVAL); |
1570 | |
1571 | /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */ |
1572 | current_gfxclk_level->FbMult = |
1573 | cpu_to_le32(dividers.ulPll_fb_mult); |
1574 | /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */ |
1575 | current_gfxclk_level->SsOn = dividers.ucPll_ss_enable; |
1576 | current_gfxclk_level->SsFbMult = |
1577 | cpu_to_le32(dividers.ulPll_ss_fbsmult); |
1578 | current_gfxclk_level->SsSlewFrac = |
1579 | cpu_to_le16(dividers.usPll_ss_slew_frac); |
1580 | current_gfxclk_level->Did = (uint8_t)(dividers.ulDid); |
1581 | |
1582 | *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */ |
1583 | |
1584 | return 0; |
1585 | } |
1586 | |
1587 | /** |
1588 | * @brief Populates single SMC SOCCLK structure using the provided clock. |
1589 | * |
1590 | * @param hwmgr - the address of the hardware manager. |
1591 | * @param soc_clock - the SOC clock to use to populate the structure. |
1592 | * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure. |
1593 | * @return 0 on success.. |
1594 | */ |
1595 | static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr, |
1596 | uint32_t soc_clock, uint8_t *current_soc_did, |
1597 | uint8_t *current_vol_index) |
1598 | { |
1599 | struct vega10_hwmgr *data = hwmgr->backend; |
1600 | struct phm_ppt_v2_information *table_info = |
1601 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
1602 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc; |
1603 | struct pp_atomfwctrl_clock_dividers_soc15 dividers; |
1604 | uint32_t i; |
1605 | |
1606 | if (hwmgr->od_enabled) { |
1607 | dep_on_soc = (struct phm_ppt_v1_clock_voltage_dependency_table *) |
1608 | &data->odn_dpm_table.vdd_dep_on_socclk; |
1609 | for (i = 0; i < dep_on_soc->count; i++) { |
1610 | if (dep_on_soc->entries[i].clk >= soc_clock) |
1611 | break; |
1612 | } |
1613 | } else { |
1614 | dep_on_soc = table_info->vdd_dep_on_socclk; |
1615 | for (i = 0; i < dep_on_soc->count; i++) { |
1616 | if (dep_on_soc->entries[i].clk == soc_clock) |
1617 | break; |
1618 | } |
1619 | } |
1620 | |
1621 | PP_ASSERT_WITH_CODE(dep_on_soc->count > i, |
1622 | "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table" , |
1623 | return -EINVAL); |
1624 | |
1625 | PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, |
1626 | COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, |
1627 | soc_clock, ÷rs), |
1628 | "Failed to get SOC Clock settings from VBIOS!" , |
1629 | return -EINVAL); |
1630 | |
1631 | *current_soc_did = (uint8_t)dividers.ulDid; |
1632 | *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd); |
1633 | return 0; |
1634 | } |
1635 | |
1636 | /** |
1637 | * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states |
1638 | * |
1639 | * @param hwmgr the address of the hardware manager |
1640 | */ |
1641 | static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) |
1642 | { |
1643 | struct vega10_hwmgr *data = hwmgr->backend; |
1644 | struct phm_ppt_v2_information *table_info = |
1645 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
1646 | PPTable_t *pp_table = &(data->smc_state_table.pp_table); |
1647 | struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); |
1648 | int result = 0; |
1649 | uint32_t i, j; |
1650 | |
1651 | for (i = 0; i < dpm_table->count; i++) { |
1652 | result = vega10_populate_single_gfx_level(hwmgr, |
1653 | dpm_table->dpm_levels[i].value, |
1654 | &(pp_table->GfxclkLevel[i]), |
1655 | &(pp_table->AcgFreqTable[i])); |
1656 | if (result) |
1657 | return result; |
1658 | } |
1659 | |
1660 | j = i - 1; |
1661 | while (i < NUM_GFXCLK_DPM_LEVELS) { |
1662 | result = vega10_populate_single_gfx_level(hwmgr, |
1663 | dpm_table->dpm_levels[j].value, |
1664 | &(pp_table->GfxclkLevel[i]), |
1665 | &(pp_table->AcgFreqTable[i])); |
1666 | if (result) |
1667 | return result; |
1668 | i++; |
1669 | } |
1670 | |
1671 | pp_table->GfxclkSlewRate = |
1672 | cpu_to_le16(table_info->us_gfxclk_slew_rate); |
1673 | |
1674 | dpm_table = &(data->dpm_table.soc_table); |
1675 | for (i = 0; i < dpm_table->count; i++) { |
1676 | result = vega10_populate_single_soc_level(hwmgr, |
1677 | dpm_table->dpm_levels[i].value, |
1678 | &(pp_table->SocclkDid[i]), |
1679 | &(pp_table->SocDpmVoltageIndex[i])); |
1680 | if (result) |
1681 | return result; |
1682 | } |
1683 | |
1684 | j = i - 1; |
1685 | while (i < NUM_SOCCLK_DPM_LEVELS) { |
1686 | result = vega10_populate_single_soc_level(hwmgr, |
1687 | dpm_table->dpm_levels[j].value, |
1688 | &(pp_table->SocclkDid[i]), |
1689 | &(pp_table->SocDpmVoltageIndex[i])); |
1690 | if (result) |
1691 | return result; |
1692 | i++; |
1693 | } |
1694 | |
1695 | return result; |
1696 | } |
1697 | |
1698 | static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr) |
1699 | { |
1700 | struct vega10_hwmgr *data = hwmgr->backend; |
1701 | PPTable_t *pp_table = &(data->smc_state_table.pp_table); |
1702 | struct phm_ppt_v2_information *table_info = hwmgr->pptable; |
1703 | struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; |
1704 | |
1705 | uint8_t soc_vid = 0; |
1706 | uint32_t i, max_vddc_level; |
1707 | |
1708 | if (hwmgr->od_enabled) |
1709 | vddc_lookup_table = (struct phm_ppt_v1_voltage_lookup_table *)&data->odn_dpm_table.vddc_lookup_table; |
1710 | else |
1711 | vddc_lookup_table = table_info->vddc_lookup_table; |
1712 | |
1713 | max_vddc_level = vddc_lookup_table->count; |
1714 | for (i = 0; i < max_vddc_level; i++) { |
1715 | soc_vid = (uint8_t)convert_to_vid(vddc_lookup_table->entries[i].us_vdd); |
1716 | pp_table->SocVid[i] = soc_vid; |
1717 | } |
1718 | while (i < MAX_REGULAR_DPM_NUMBER) { |
1719 | pp_table->SocVid[i] = soc_vid; |
1720 | i++; |
1721 | } |
1722 | } |
1723 | |
1724 | /** |
1725 | * @brief Populates single SMC GFXCLK structure using the provided clock. |
1726 | * |
1727 | * @param hwmgr - the address of the hardware manager. |
1728 | * @param mem_clock - the memory clock to use to populate the structure. |
1729 | * @return 0 on success.. |
1730 | */ |
1731 | static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr, |
1732 | uint32_t mem_clock, uint8_t *current_mem_vid, |
1733 | PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind) |
1734 | { |
1735 | struct vega10_hwmgr *data = hwmgr->backend; |
1736 | struct phm_ppt_v2_information *table_info = |
1737 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
1738 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk; |
1739 | struct pp_atomfwctrl_clock_dividers_soc15 dividers; |
1740 | uint32_t mem_max_clock = |
1741 | hwmgr->platform_descriptor.overdriveLimit.memoryClock; |
1742 | uint32_t i = 0; |
1743 | |
1744 | if (hwmgr->od_enabled) |
1745 | dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *) |
1746 | &data->odn_dpm_table.vdd_dep_on_mclk; |
1747 | else |
1748 | dep_on_mclk = table_info->vdd_dep_on_mclk; |
1749 | |
1750 | PP_ASSERT_WITH_CODE(dep_on_mclk, |
1751 | "Invalid SOC_VDD-UCLK Dependency Table!" , |
1752 | return -EINVAL); |
1753 | |
1754 | if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { |
1755 | mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock; |
1756 | } else { |
1757 | for (i = 0; i < dep_on_mclk->count; i++) { |
1758 | if (dep_on_mclk->entries[i].clk == mem_clock) |
1759 | break; |
1760 | } |
1761 | PP_ASSERT_WITH_CODE(dep_on_mclk->count > i, |
1762 | "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!" , |
1763 | return -EINVAL); |
1764 | } |
1765 | |
1766 | PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10( |
1767 | hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, ÷rs), |
1768 | "Failed to get UCLK settings from VBIOS!" , |
1769 | return -1); |
1770 | |
1771 | *current_mem_vid = |
1772 | (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd)); |
1773 | *current_mem_soc_vind = |
1774 | (uint8_t)(dep_on_mclk->entries[i].vddInd); |
1775 | current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult); |
1776 | current_memclk_level->Did = (uint8_t)(dividers.ulDid); |
1777 | |
1778 | PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1, |
1779 | "Invalid Divider ID!" , |
1780 | return -EINVAL); |
1781 | |
1782 | return 0; |
1783 | } |
1784 | |
1785 | /** |
1786 | * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states. |
1787 | * |
1788 | * @param pHwMgr - the address of the hardware manager. |
1789 | * @return PP_Result_OK on success. |
1790 | */ |
1791 | static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) |
1792 | { |
1793 | struct vega10_hwmgr *data = hwmgr->backend; |
1794 | PPTable_t *pp_table = &(data->smc_state_table.pp_table); |
1795 | struct vega10_single_dpm_table *dpm_table = |
1796 | &(data->dpm_table.mem_table); |
1797 | int result = 0; |
1798 | uint32_t i, j; |
1799 | |
1800 | for (i = 0; i < dpm_table->count; i++) { |
1801 | result = vega10_populate_single_memory_level(hwmgr, |
1802 | dpm_table->dpm_levels[i].value, |
1803 | &(pp_table->MemVid[i]), |
1804 | &(pp_table->UclkLevel[i]), |
1805 | &(pp_table->MemSocVoltageIndex[i])); |
1806 | if (result) |
1807 | return result; |
1808 | } |
1809 | |
1810 | j = i - 1; |
1811 | while (i < NUM_UCLK_DPM_LEVELS) { |
1812 | result = vega10_populate_single_memory_level(hwmgr, |
1813 | dpm_table->dpm_levels[j].value, |
1814 | &(pp_table->MemVid[i]), |
1815 | &(pp_table->UclkLevel[i]), |
1816 | &(pp_table->MemSocVoltageIndex[i])); |
1817 | if (result) |
1818 | return result; |
1819 | i++; |
1820 | } |
1821 | |
1822 | pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels); |
1823 | pp_table->MemoryChannelWidth = |
1824 | (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH * |
1825 | channel_number[data->mem_channels]); |
1826 | |
1827 | pp_table->LowestUclkReservedForUlv = |
1828 | (uint8_t)(data->lowest_uclk_reserved_for_ulv); |
1829 | |
1830 | return result; |
1831 | } |
1832 | |
1833 | static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr, |
1834 | DSPCLK_e disp_clock) |
1835 | { |
1836 | struct vega10_hwmgr *data = hwmgr->backend; |
1837 | PPTable_t *pp_table = &(data->smc_state_table.pp_table); |
1838 | struct phm_ppt_v2_information *table_info = |
1839 | (struct phm_ppt_v2_information *) |
1840 | (hwmgr->pptable); |
1841 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; |
1842 | uint32_t i; |
1843 | uint16_t clk = 0, vddc = 0; |
1844 | uint8_t vid = 0; |
1845 | |
1846 | switch (disp_clock) { |
1847 | case DSPCLK_DCEFCLK: |
1848 | dep_table = table_info->vdd_dep_on_dcefclk; |
1849 | break; |
1850 | case DSPCLK_DISPCLK: |
1851 | dep_table = table_info->vdd_dep_on_dispclk; |
1852 | break; |
1853 | case DSPCLK_PIXCLK: |
1854 | dep_table = table_info->vdd_dep_on_pixclk; |
1855 | break; |
1856 | case DSPCLK_PHYCLK: |
1857 | dep_table = table_info->vdd_dep_on_phyclk; |
1858 | break; |
1859 | default: |
1860 | return -1; |
1861 | } |
1862 | |
1863 | PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS, |
1864 | "Number Of Entries Exceeded maximum!" , |
1865 | return -1); |
1866 | |
1867 | for (i = 0; i < dep_table->count; i++) { |
1868 | clk = (uint16_t)(dep_table->entries[i].clk / 100); |
1869 | vddc = table_info->vddc_lookup_table-> |
1870 | entries[dep_table->entries[i].vddInd].us_vdd; |
1871 | vid = (uint8_t)convert_to_vid(vddc); |
1872 | pp_table->DisplayClockTable[disp_clock][i].Freq = |
1873 | cpu_to_le16(clk); |
1874 | pp_table->DisplayClockTable[disp_clock][i].Vid = |
1875 | cpu_to_le16(vid); |
1876 | } |
1877 | |
1878 | while (i < NUM_DSPCLK_LEVELS) { |
1879 | pp_table->DisplayClockTable[disp_clock][i].Freq = |
1880 | cpu_to_le16(clk); |
1881 | pp_table->DisplayClockTable[disp_clock][i].Vid = |
1882 | cpu_to_le16(vid); |
1883 | i++; |
1884 | } |
1885 | |
1886 | return 0; |
1887 | } |
1888 | |
1889 | static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr) |
1890 | { |
1891 | uint32_t i; |
1892 | |
1893 | for (i = 0; i < DSPCLK_COUNT; i++) { |
1894 | PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i), |
1895 | "Failed to populate Clock in DisplayClockTable!" , |
1896 | return -1); |
1897 | } |
1898 | |
1899 | return 0; |
1900 | } |
1901 | |
1902 | static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr, |
1903 | uint32_t eclock, uint8_t *current_eclk_did, |
1904 | uint8_t *current_soc_vol) |
1905 | { |
1906 | struct phm_ppt_v2_information *table_info = |
1907 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
1908 | struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table = |
1909 | table_info->mm_dep_table; |
1910 | struct pp_atomfwctrl_clock_dividers_soc15 dividers; |
1911 | uint32_t i; |
1912 | |
1913 | PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, |
1914 | COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, |
1915 | eclock, ÷rs), |
1916 | "Failed to get ECLK clock settings from VBIOS!" , |
1917 | return -1); |
1918 | |
1919 | *current_eclk_did = (uint8_t)dividers.ulDid; |
1920 | |
1921 | for (i = 0; i < dep_table->count; i++) { |
1922 | if (dep_table->entries[i].eclk == eclock) |
1923 | *current_soc_vol = dep_table->entries[i].vddcInd; |
1924 | } |
1925 | |
1926 | return 0; |
1927 | } |
1928 | |
1929 | static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr) |
1930 | { |
1931 | struct vega10_hwmgr *data = hwmgr->backend; |
1932 | PPTable_t *pp_table = &(data->smc_state_table.pp_table); |
1933 | struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table); |
1934 | int result = -EINVAL; |
1935 | uint32_t i, j; |
1936 | |
1937 | for (i = 0; i < dpm_table->count; i++) { |
1938 | result = vega10_populate_single_eclock_level(hwmgr, |
1939 | dpm_table->dpm_levels[i].value, |
1940 | &(pp_table->EclkDid[i]), |
1941 | &(pp_table->VceDpmVoltageIndex[i])); |
1942 | if (result) |
1943 | return result; |
1944 | } |
1945 | |
1946 | j = i - 1; |
1947 | while (i < NUM_VCE_DPM_LEVELS) { |
1948 | result = vega10_populate_single_eclock_level(hwmgr, |
1949 | dpm_table->dpm_levels[j].value, |
1950 | &(pp_table->EclkDid[i]), |
1951 | &(pp_table->VceDpmVoltageIndex[i])); |
1952 | if (result) |
1953 | return result; |
1954 | i++; |
1955 | } |
1956 | |
1957 | return result; |
1958 | } |
1959 | |
1960 | static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr, |
1961 | uint32_t vclock, uint8_t *current_vclk_did) |
1962 | { |
1963 | struct pp_atomfwctrl_clock_dividers_soc15 dividers; |
1964 | |
1965 | PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, |
1966 | COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, |
1967 | vclock, ÷rs), |
1968 | "Failed to get VCLK clock settings from VBIOS!" , |
1969 | return -EINVAL); |
1970 | |
1971 | *current_vclk_did = (uint8_t)dividers.ulDid; |
1972 | |
1973 | return 0; |
1974 | } |
1975 | |
1976 | static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr, |
1977 | uint32_t dclock, uint8_t *current_dclk_did) |
1978 | { |
1979 | struct pp_atomfwctrl_clock_dividers_soc15 dividers; |
1980 | |
1981 | PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, |
1982 | COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, |
1983 | dclock, ÷rs), |
1984 | "Failed to get DCLK clock settings from VBIOS!" , |
1985 | return -EINVAL); |
1986 | |
1987 | *current_dclk_did = (uint8_t)dividers.ulDid; |
1988 | |
1989 | return 0; |
1990 | } |
1991 | |
1992 | static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr) |
1993 | { |
1994 | struct vega10_hwmgr *data = hwmgr->backend; |
1995 | PPTable_t *pp_table = &(data->smc_state_table.pp_table); |
1996 | struct vega10_single_dpm_table *vclk_dpm_table = |
1997 | &(data->dpm_table.vclk_table); |
1998 | struct vega10_single_dpm_table *dclk_dpm_table = |
1999 | &(data->dpm_table.dclk_table); |
2000 | struct phm_ppt_v2_information *table_info = |
2001 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
2002 | struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table = |
2003 | table_info->mm_dep_table; |
2004 | int result = -EINVAL; |
2005 | uint32_t i, j; |
2006 | |
2007 | for (i = 0; i < vclk_dpm_table->count; i++) { |
2008 | result = vega10_populate_single_vclock_level(hwmgr, |
2009 | vclk_dpm_table->dpm_levels[i].value, |
2010 | &(pp_table->VclkDid[i])); |
2011 | if (result) |
2012 | return result; |
2013 | } |
2014 | |
2015 | j = i - 1; |
2016 | while (i < NUM_UVD_DPM_LEVELS) { |
2017 | result = vega10_populate_single_vclock_level(hwmgr, |
2018 | vclk_dpm_table->dpm_levels[j].value, |
2019 | &(pp_table->VclkDid[i])); |
2020 | if (result) |
2021 | return result; |
2022 | i++; |
2023 | } |
2024 | |
2025 | for (i = 0; i < dclk_dpm_table->count; i++) { |
2026 | result = vega10_populate_single_dclock_level(hwmgr, |
2027 | dclk_dpm_table->dpm_levels[i].value, |
2028 | &(pp_table->DclkDid[i])); |
2029 | if (result) |
2030 | return result; |
2031 | } |
2032 | |
2033 | j = i - 1; |
2034 | while (i < NUM_UVD_DPM_LEVELS) { |
2035 | result = vega10_populate_single_dclock_level(hwmgr, |
2036 | dclk_dpm_table->dpm_levels[j].value, |
2037 | &(pp_table->DclkDid[i])); |
2038 | if (result) |
2039 | return result; |
2040 | i++; |
2041 | } |
2042 | |
2043 | for (i = 0; i < dep_table->count; i++) { |
2044 | if (dep_table->entries[i].vclk == |
2045 | vclk_dpm_table->dpm_levels[i].value && |
2046 | dep_table->entries[i].dclk == |
2047 | dclk_dpm_table->dpm_levels[i].value) |
2048 | pp_table->UvdDpmVoltageIndex[i] = |
2049 | dep_table->entries[i].vddcInd; |
2050 | else |
2051 | return -1; |
2052 | } |
2053 | |
2054 | j = i - 1; |
2055 | while (i < NUM_UVD_DPM_LEVELS) { |
2056 | pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd; |
2057 | i++; |
2058 | } |
2059 | |
2060 | return 0; |
2061 | } |
2062 | |
2063 | static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr) |
2064 | { |
2065 | struct vega10_hwmgr *data = hwmgr->backend; |
2066 | PPTable_t *pp_table = &(data->smc_state_table.pp_table); |
2067 | struct phm_ppt_v2_information *table_info = |
2068 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
2069 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = |
2070 | table_info->vdd_dep_on_sclk; |
2071 | uint32_t i; |
2072 | |
2073 | for (i = 0; i < dep_table->count; i++) { |
2074 | pp_table->CksEnable[i] = dep_table->entries[i].cks_enable; |
2075 | pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset |
2076 | * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); |
2077 | } |
2078 | |
2079 | return 0; |
2080 | } |
2081 | |
2082 | static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) |
2083 | { |
2084 | struct vega10_hwmgr *data = hwmgr->backend; |
2085 | PPTable_t *pp_table = &(data->smc_state_table.pp_table); |
2086 | struct phm_ppt_v2_information *table_info = |
2087 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
2088 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = |
2089 | table_info->vdd_dep_on_sclk; |
2090 | struct pp_atomfwctrl_avfs_parameters avfs_params = {0}; |
2091 | int result = 0; |
2092 | uint32_t i; |
2093 | |
2094 | pp_table->MinVoltageVid = (uint8_t)0xff; |
2095 | pp_table->MaxVoltageVid = (uint8_t)0; |
2096 | |
2097 | if (data->smu_features[GNLD_AVFS].supported) { |
2098 | result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params); |
2099 | if (!result) { |
2100 | pp_table->MinVoltageVid = (uint8_t) |
2101 | convert_to_vid((uint16_t)(avfs_params.ulMinVddc)); |
2102 | pp_table->MaxVoltageVid = (uint8_t) |
2103 | convert_to_vid((uint16_t)(avfs_params.ulMaxVddc)); |
2104 | |
2105 | pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0); |
2106 | pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1); |
2107 | pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2); |
2108 | pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma); |
2109 | pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean); |
2110 | pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma); |
2111 | pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor); |
2112 | |
2113 | pp_table->BtcGbVdroopTableCksOff.a0 = |
2114 | cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0); |
2115 | pp_table->BtcGbVdroopTableCksOff.a0_shift = 20; |
2116 | pp_table->BtcGbVdroopTableCksOff.a1 = |
2117 | cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1); |
2118 | pp_table->BtcGbVdroopTableCksOff.a1_shift = 20; |
2119 | pp_table->BtcGbVdroopTableCksOff.a2 = |
2120 | cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2); |
2121 | pp_table->BtcGbVdroopTableCksOff.a2_shift = 20; |
2122 | |
2123 | pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson; |
2124 | pp_table->BtcGbVdroopTableCksOn.a0 = |
2125 | cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0); |
2126 | pp_table->BtcGbVdroopTableCksOn.a0_shift = 20; |
2127 | pp_table->BtcGbVdroopTableCksOn.a1 = |
2128 | cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1); |
2129 | pp_table->BtcGbVdroopTableCksOn.a1_shift = 20; |
2130 | pp_table->BtcGbVdroopTableCksOn.a2 = |
2131 | cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2); |
2132 | pp_table->BtcGbVdroopTableCksOn.a2_shift = 20; |
2133 | |
2134 | pp_table->AvfsGbCksOn.m1 = |
2135 | cpu_to_le32(avfs_params.ulGbFuseTableCksonM1); |
2136 | pp_table->AvfsGbCksOn.m2 = |
2137 | cpu_to_le32(avfs_params.ulGbFuseTableCksonM2); |
2138 | pp_table->AvfsGbCksOn.b = |
2139 | cpu_to_le32(avfs_params.ulGbFuseTableCksonB); |
2140 | pp_table->AvfsGbCksOn.m1_shift = 24; |
2141 | pp_table->AvfsGbCksOn.m2_shift = 12; |
2142 | pp_table->AvfsGbCksOn.b_shift = 0; |
2143 | |
2144 | pp_table->OverrideAvfsGbCksOn = |
2145 | avfs_params.ucEnableGbFuseTableCkson; |
2146 | pp_table->AvfsGbCksOff.m1 = |
2147 | cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1); |
2148 | pp_table->AvfsGbCksOff.m2 = |
2149 | cpu_to_le32(avfs_params.ulGbFuseTableCksoffM2); |
2150 | pp_table->AvfsGbCksOff.b = |
2151 | cpu_to_le32(avfs_params.ulGbFuseTableCksoffB); |
2152 | pp_table->AvfsGbCksOff.m1_shift = 24; |
2153 | pp_table->AvfsGbCksOff.m2_shift = 12; |
2154 | pp_table->AvfsGbCksOff.b_shift = 0; |
2155 | |
2156 | for (i = 0; i < dep_table->count; i++) |
2157 | pp_table->StaticVoltageOffsetVid[i] = |
2158 | convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset)); |
2159 | |
2160 | if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != |
2161 | data->disp_clk_quad_eqn_a) && |
2162 | (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != |
2163 | data->disp_clk_quad_eqn_b)) { |
2164 | pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 = |
2165 | (int32_t)data->disp_clk_quad_eqn_a; |
2166 | pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 = |
2167 | (int32_t)data->disp_clk_quad_eqn_b; |
2168 | pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b = |
2169 | (int32_t)data->disp_clk_quad_eqn_c; |
2170 | } else { |
2171 | pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 = |
2172 | (int32_t)avfs_params.ulDispclk2GfxclkM1; |
2173 | pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 = |
2174 | (int32_t)avfs_params.ulDispclk2GfxclkM2; |
2175 | pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b = |
2176 | (int32_t)avfs_params.ulDispclk2GfxclkB; |
2177 | } |
2178 | |
2179 | pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24; |
2180 | pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12; |
2181 | pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12; |
2182 | |
2183 | if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != |
2184 | data->dcef_clk_quad_eqn_a) && |
2185 | (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != |
2186 | data->dcef_clk_quad_eqn_b)) { |
2187 | pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 = |
2188 | (int32_t)data->dcef_clk_quad_eqn_a; |
2189 | pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 = |
2190 | (int32_t)data->dcef_clk_quad_eqn_b; |
2191 | pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b = |
2192 | (int32_t)data->dcef_clk_quad_eqn_c; |
2193 | } else { |
2194 | pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 = |
2195 | (int32_t)avfs_params.ulDcefclk2GfxclkM1; |
2196 | pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 = |
2197 | (int32_t)avfs_params.ulDcefclk2GfxclkM2; |
2198 | pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b = |
2199 | (int32_t)avfs_params.ulDcefclk2GfxclkB; |
2200 | } |
2201 | |
2202 | pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24; |
2203 | pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12; |
2204 | pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12; |
2205 | |
2206 | if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != |
2207 | data->pixel_clk_quad_eqn_a) && |
2208 | (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != |
2209 | data->pixel_clk_quad_eqn_b)) { |
2210 | pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 = |
2211 | (int32_t)data->pixel_clk_quad_eqn_a; |
2212 | pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 = |
2213 | (int32_t)data->pixel_clk_quad_eqn_b; |
2214 | pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b = |
2215 | (int32_t)data->pixel_clk_quad_eqn_c; |
2216 | } else { |
2217 | pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 = |
2218 | (int32_t)avfs_params.ulPixelclk2GfxclkM1; |
2219 | pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 = |
2220 | (int32_t)avfs_params.ulPixelclk2GfxclkM2; |
2221 | pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b = |
2222 | (int32_t)avfs_params.ulPixelclk2GfxclkB; |
2223 | } |
2224 | |
2225 | pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24; |
2226 | pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12; |
2227 | pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12; |
2228 | if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != |
2229 | data->phy_clk_quad_eqn_a) && |
2230 | (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != |
2231 | data->phy_clk_quad_eqn_b)) { |
2232 | pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 = |
2233 | (int32_t)data->phy_clk_quad_eqn_a; |
2234 | pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 = |
2235 | (int32_t)data->phy_clk_quad_eqn_b; |
2236 | pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b = |
2237 | (int32_t)data->phy_clk_quad_eqn_c; |
2238 | } else { |
2239 | pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 = |
2240 | (int32_t)avfs_params.ulPhyclk2GfxclkM1; |
2241 | pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 = |
2242 | (int32_t)avfs_params.ulPhyclk2GfxclkM2; |
2243 | pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b = |
2244 | (int32_t)avfs_params.ulPhyclk2GfxclkB; |
2245 | } |
2246 | |
2247 | pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24; |
2248 | pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12; |
2249 | pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12; |
2250 | |
2251 | pp_table->AcgBtcGbVdroopTable.a0 = avfs_params.ulAcgGbVdroopTableA0; |
2252 | pp_table->AcgBtcGbVdroopTable.a0_shift = 20; |
2253 | pp_table->AcgBtcGbVdroopTable.a1 = avfs_params.ulAcgGbVdroopTableA1; |
2254 | pp_table->AcgBtcGbVdroopTable.a1_shift = 20; |
2255 | pp_table->AcgBtcGbVdroopTable.a2 = avfs_params.ulAcgGbVdroopTableA2; |
2256 | pp_table->AcgBtcGbVdroopTable.a2_shift = 20; |
2257 | |
2258 | pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1; |
2259 | pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2; |
2260 | pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB; |
2261 | pp_table->AcgAvfsGb.m1_shift = 0; |
2262 | pp_table->AcgAvfsGb.m2_shift = 0; |
2263 | pp_table->AcgAvfsGb.b_shift = 0; |
2264 | |
2265 | } else { |
2266 | data->smu_features[GNLD_AVFS].supported = false; |
2267 | } |
2268 | } |
2269 | |
2270 | return 0; |
2271 | } |
2272 | |
2273 | static int vega10_acg_enable(struct pp_hwmgr *hwmgr) |
2274 | { |
2275 | struct vega10_hwmgr *data = hwmgr->backend; |
2276 | uint32_t agc_btc_response; |
2277 | |
2278 | if (data->smu_features[GNLD_ACG].supported) { |
2279 | if (0 == vega10_enable_smc_features(hwmgr, true, |
2280 | data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap)) |
2281 | data->smu_features[GNLD_DPM_PREFETCHER].enabled = true; |
2282 | |
2283 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg); |
2284 | |
2285 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc); |
2286 | agc_btc_response = smum_get_argument(hwmgr); |
2287 | |
2288 | if (1 == agc_btc_response) { |
2289 | if (1 == data->acg_loop_state) |
2290 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop); |
2291 | else if (2 == data->acg_loop_state) |
2292 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop); |
2293 | if (0 == vega10_enable_smc_features(hwmgr, true, |
2294 | data->smu_features[GNLD_ACG].smu_feature_bitmap)) |
2295 | data->smu_features[GNLD_ACG].enabled = true; |
2296 | } else { |
2297 | pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n" ); |
2298 | data->smu_features[GNLD_ACG].enabled = false; |
2299 | } |
2300 | } |
2301 | |
2302 | return 0; |
2303 | } |
2304 | |
2305 | static int vega10_acg_disable(struct pp_hwmgr *hwmgr) |
2306 | { |
2307 | struct vega10_hwmgr *data = hwmgr->backend; |
2308 | |
2309 | if (data->smu_features[GNLD_ACG].supported && |
2310 | data->smu_features[GNLD_ACG].enabled) |
2311 | if (!vega10_enable_smc_features(hwmgr, false, |
2312 | data->smu_features[GNLD_ACG].smu_feature_bitmap)) |
2313 | data->smu_features[GNLD_ACG].enabled = false; |
2314 | |
2315 | return 0; |
2316 | } |
2317 | |
2318 | static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr) |
2319 | { |
2320 | struct vega10_hwmgr *data = hwmgr->backend; |
2321 | PPTable_t *pp_table = &(data->smc_state_table.pp_table); |
2322 | struct pp_atomfwctrl_gpio_parameters gpio_params = {0}; |
2323 | int result; |
2324 | |
2325 | result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params); |
2326 | if (!result) { |
2327 | if (PP_CAP(PHM_PlatformCaps_RegulatorHot) && |
2328 | data->registry_data.regulator_hot_gpio_support) { |
2329 | pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio; |
2330 | pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity; |
2331 | pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio; |
2332 | pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity; |
2333 | } else { |
2334 | pp_table->VR0HotGpio = 0; |
2335 | pp_table->VR0HotPolarity = 0; |
2336 | pp_table->VR1HotGpio = 0; |
2337 | pp_table->VR1HotPolarity = 0; |
2338 | } |
2339 | |
2340 | if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) && |
2341 | data->registry_data.ac_dc_switch_gpio_support) { |
2342 | pp_table->AcDcGpio = gpio_params.ucAcDcGpio; |
2343 | pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity; |
2344 | } else { |
2345 | pp_table->AcDcGpio = 0; |
2346 | pp_table->AcDcPolarity = 0; |
2347 | } |
2348 | } |
2349 | |
2350 | return result; |
2351 | } |
2352 | |
2353 | static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable) |
2354 | { |
2355 | struct vega10_hwmgr *data = hwmgr->backend; |
2356 | |
2357 | if (data->smu_features[GNLD_AVFS].supported) { |
2358 | if (enable) { |
2359 | PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, |
2360 | true, |
2361 | data->smu_features[GNLD_AVFS].smu_feature_bitmap), |
2362 | "[avfs_control] Attempt to Enable AVFS feature Failed!" , |
2363 | return -1); |
2364 | data->smu_features[GNLD_AVFS].enabled = true; |
2365 | } else { |
2366 | PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, |
2367 | false, |
2368 | data->smu_features[GNLD_AVFS].smu_feature_bitmap), |
2369 | "[avfs_control] Attempt to Disable AVFS feature Failed!" , |
2370 | return -1); |
2371 | data->smu_features[GNLD_AVFS].enabled = false; |
2372 | } |
2373 | } |
2374 | |
2375 | return 0; |
2376 | } |
2377 | |
2378 | static int vega10_update_avfs(struct pp_hwmgr *hwmgr) |
2379 | { |
2380 | struct vega10_hwmgr *data = hwmgr->backend; |
2381 | |
2382 | if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { |
2383 | vega10_avfs_enable(hwmgr, false); |
2384 | } else if (data->need_update_dpm_table) { |
2385 | vega10_avfs_enable(hwmgr, false); |
2386 | vega10_avfs_enable(hwmgr, true); |
2387 | } else { |
2388 | vega10_avfs_enable(hwmgr, true); |
2389 | } |
2390 | |
2391 | return 0; |
2392 | } |
2393 | |
2394 | static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr) |
2395 | { |
2396 | int result = 0; |
2397 | |
2398 | uint64_t serial_number = 0; |
2399 | uint32_t top32, bottom32; |
2400 | struct phm_fuses_default fuse; |
2401 | |
2402 | struct vega10_hwmgr *data = hwmgr->backend; |
2403 | AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table); |
2404 | |
2405 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32); |
2406 | top32 = smum_get_argument(hwmgr); |
2407 | |
2408 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32); |
2409 | bottom32 = smum_get_argument(hwmgr); |
2410 | |
2411 | serial_number = ((uint64_t)bottom32 << 32) | top32; |
2412 | |
2413 | if (pp_override_get_default_fuse_value(serial_number, &fuse) == 0) { |
2414 | avfs_fuse_table->VFT0_b = fuse.VFT0_b; |
2415 | avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1; |
2416 | avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2; |
2417 | avfs_fuse_table->VFT1_b = fuse.VFT1_b; |
2418 | avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1; |
2419 | avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2; |
2420 | avfs_fuse_table->VFT2_b = fuse.VFT2_b; |
2421 | avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1; |
2422 | avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2; |
2423 | result = smum_smc_table_manager(hwmgr, (uint8_t *)avfs_fuse_table, |
2424 | AVFSFUSETABLE, false); |
2425 | PP_ASSERT_WITH_CODE(!result, |
2426 | "Failed to upload FuseOVerride!" , |
2427 | ); |
2428 | } |
2429 | |
2430 | return result; |
2431 | } |
2432 | |
2433 | static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr) |
2434 | { |
2435 | struct vega10_hwmgr *data = hwmgr->backend; |
2436 | struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); |
2437 | struct phm_ppt_v2_information *table_info = hwmgr->pptable; |
2438 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; |
2439 | struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; |
2440 | uint32_t i; |
2441 | |
2442 | dep_table = table_info->vdd_dep_on_mclk; |
2443 | odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk); |
2444 | |
2445 | for (i = 0; i < dep_table->count; i++) { |
2446 | if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { |
2447 | data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; |
2448 | return; |
2449 | } |
2450 | } |
2451 | |
2452 | dep_table = table_info->vdd_dep_on_sclk; |
2453 | odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk); |
2454 | for (i = 0; i < dep_table->count; i++) { |
2455 | if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { |
2456 | data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; |
2457 | return; |
2458 | } |
2459 | } |
2460 | |
2461 | if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { |
2462 | data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; |
2463 | data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; |
2464 | } |
2465 | } |
2466 | |
2467 | /** |
2468 | * Initializes the SMC table and uploads it |
2469 | * |
2470 | * @param hwmgr the address of the powerplay hardware manager. |
2471 | * @param pInput the pointer to input data (PowerState) |
2472 | * @return always 0 |
2473 | */ |
2474 | static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) |
2475 | { |
2476 | int result; |
2477 | struct vega10_hwmgr *data = hwmgr->backend; |
2478 | struct phm_ppt_v2_information *table_info = |
2479 | (struct phm_ppt_v2_information *)(hwmgr->pptable); |
2480 | PPTable_t *pp_table = &(data->smc_state_table.pp_table); |
2481 | struct pp_atomfwctrl_voltage_table voltage_table; |
2482 | struct pp_atomfwctrl_bios_boot_up_values boot_up_values; |
2483 | struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); |
2484 | |
2485 | result = vega10_setup_default_dpm_tables(hwmgr); |
2486 | PP_ASSERT_WITH_CODE(!result, |
2487 | "Failed to setup default DPM tables!" , |
2488 | return result); |
2489 | |
2490 | /* initialize ODN table */ |
2491 | if (hwmgr->od_enabled) { |
2492 | if (odn_table->max_vddc) { |
2493 | data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; |
2494 | vega10_check_dpm_table_updated(hwmgr); |
2495 | } else { |
2496 | vega10_odn_initial_default_setting(hwmgr); |
2497 | } |
2498 | } |
2499 | |
2500 | pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC, |
2501 | VOLTAGE_OBJ_SVID2, &voltage_table); |
2502 | pp_table->MaxVidStep = voltage_table.max_vid_step; |
2503 | |
2504 | pp_table->GfxDpmVoltageMode = |
2505 | (uint8_t)(table_info->uc_gfx_dpm_voltage_mode); |
2506 | pp_table->SocDpmVoltageMode = |
2507 | (uint8_t)(table_info->uc_soc_dpm_voltage_mode); |
2508 | pp_table->UclkDpmVoltageMode = |
2509 | (uint8_t)(table_info->uc_uclk_dpm_voltage_mode); |
2510 | pp_table->UvdDpmVoltageMode = |
2511 | (uint8_t)(table_info->uc_uvd_dpm_voltage_mode); |
2512 | pp_table->VceDpmVoltageMode = |
2513 | (uint8_t)(table_info->uc_vce_dpm_voltage_mode); |
2514 | pp_table->Mp0DpmVoltageMode = |
2515 | (uint8_t)(table_info->uc_mp0_dpm_voltage_mode); |
2516 | |
2517 | pp_table->DisplayDpmVoltageMode = |
2518 | (uint8_t)(table_info->uc_dcef_dpm_voltage_mode); |
2519 | |
2520 | data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable; |
2521 | data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable; |
2522 | |
2523 | if (data->registry_data.ulv_support && |
2524 | table_info->us_ulv_voltage_offset) { |
2525 | result = vega10_populate_ulv_state(hwmgr); |
2526 | PP_ASSERT_WITH_CODE(!result, |
2527 | "Failed to initialize ULV state!" , |
2528 | return result); |
2529 | } |
2530 | |
2531 | result = vega10_populate_smc_link_levels(hwmgr); |
2532 | PP_ASSERT_WITH_CODE(!result, |
2533 | "Failed to initialize Link Level!" , |
2534 | return result); |
2535 | |
2536 | result = vega10_populate_all_graphic_levels(hwmgr); |
2537 | PP_ASSERT_WITH_CODE(!result, |
2538 | "Failed to initialize Graphics Level!" , |
2539 | return result); |
2540 | |
2541 | result = vega10_populate_all_memory_levels(hwmgr); |
2542 | PP_ASSERT_WITH_CODE(!result, |
2543 | "Failed to initialize Memory Level!" , |
2544 | return result); |
2545 | |
2546 | vega10_populate_vddc_soc_levels(hwmgr); |
2547 | |
2548 | result = vega10_populate_all_display_clock_levels(hwmgr); |
2549 | PP_ASSERT_WITH_CODE(!result, |
2550 | "Failed to initialize Display Level!" , |
2551 | return result); |
2552 | |
2553 | result = vega10_populate_smc_vce_levels(hwmgr); |
2554 | PP_ASSERT_WITH_CODE(!result, |
2555 | "Failed to initialize VCE Level!" , |
2556 | return result); |
2557 | |
2558 | result = vega10_populate_smc_uvd_levels(hwmgr); |
2559 | PP_ASSERT_WITH_CODE(!result, |
2560 | "Failed to initialize UVD Level!" , |
2561 | return result); |
2562 | |
2563 | if (data->registry_data.clock_stretcher_support) { |
2564 | result = vega10_populate_clock_stretcher_table(hwmgr); |
2565 | PP_ASSERT_WITH_CODE(!result, |
2566 | "Failed to populate Clock Stretcher Table!" , |
2567 | return result); |
2568 | } |
2569 | |
2570 | result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values); |
2571 | if (!result) { |
2572 | data->vbios_boot_state.vddc = boot_up_values.usVddc; |
2573 | data->vbios_boot_state.vddci = boot_up_values.usVddci; |
2574 | data->vbios_boot_state.mvddc = boot_up_values.usMvddc; |
2575 | data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk; |
2576 | data->vbios_boot_state.mem_clock = boot_up_values.ulUClk; |
2577 | pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, |
2578 | SMU9_SYSPLL0_SOCCLK_ID, 0, &boot_up_values.ulSocClk); |
2579 | |
2580 | pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, |
2581 | SMU9_SYSPLL0_DCEFCLK_ID, 0, &boot_up_values.ulDCEFClk); |
2582 | |
2583 | data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; |
2584 | data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; |
2585 | if (0 != boot_up_values.usVddc) { |
2586 | smum_send_msg_to_smc_with_parameter(hwmgr, |
2587 | PPSMC_MSG_SetFloorSocVoltage, |
2588 | (boot_up_values.usVddc * 4)); |
2589 | data->vbios_boot_state.bsoc_vddc_lock = true; |
2590 | } else { |
2591 | data->vbios_boot_state.bsoc_vddc_lock = false; |
2592 | } |
2593 | smum_send_msg_to_smc_with_parameter(hwmgr, |
2594 | PPSMC_MSG_SetMinDeepSleepDcefclk, |
2595 | (uint32_t)(data->vbios_boot_state.dcef_clock / 100)); |
2596 | } |
2597 | |
2598 | result = vega10_populate_avfs_parameters(hwmgr); |
2599 | PP_ASSERT_WITH_CODE(!result, |
2600 | "Failed to initialize AVFS Parameters!" , |
2601 | return result); |
2602 | |
2603 | result = vega10_populate_gpio_parameters(hwmgr); |
2604 | PP_ASSERT_WITH_CODE(!result, |
2605 | "Failed to initialize GPIO Parameters!" , |
2606 | return result); |
2607 | |
2608 | pp_table->GfxclkAverageAlpha = (uint8_t) |
2609 | (data->gfxclk_average_alpha); |
2610 | pp_table->SocclkAverageAlpha = (uint8_t) |
2611 | (data->socclk_average_alpha); |
2612 | pp_table->UclkAverageAlpha = (uint8_t) |
2613 | (data->uclk_average_alpha); |
2614 | pp_table->GfxActivityAverageAlpha = (uint8_t) |
2615 | (data->gfx_activity_average_alpha); |
2616 | |
2617 | vega10_populate_and_upload_avfs_fuse_override(hwmgr); |
2618 | |
2619 | result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false); |
2620 | |
2621 | PP_ASSERT_WITH_CODE(!result, |
2622 | "Failed to upload PPtable!" , return result); |
2623 | |
2624 | result = vega10_avfs_enable(hwmgr, true); |
2625 | PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!" , |
2626 | return result); |
2627 | vega10_acg_enable(hwmgr); |
2628 | |
2629 | return 0; |
2630 | } |
2631 | |
2632 | static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr) |
2633 | { |
2634 | struct vega10_hwmgr *data = hwmgr->backend; |
2635 | |
2636 | if (data->smu_features[GNLD_THERMAL].supported) { |
2637 | if (data->smu_features[GNLD_THERMAL].enabled) |
2638 | pr_info("THERMAL Feature Already enabled!" ); |
2639 | |
2640 | PP_ASSERT_WITH_CODE( |
2641 | !vega10_enable_smc_features(hwmgr, |
2642 | true, |
2643 | data->smu_features[GNLD_THERMAL].smu_feature_bitmap), |
2644 | "Enable THERMAL Feature Failed!" , |
2645 | return -1); |
2646 | data->smu_features[GNLD_THERMAL].enabled = true; |
2647 | } |
2648 | |
2649 | return 0; |
2650 | } |
2651 | |
2652 | static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr) |
2653 | { |
2654 | struct vega10_hwmgr *data = hwmgr->backend; |
2655 | |
2656 | if (data->smu_features[GNLD_THERMAL].supported) { |
2657 | if (!data->smu_features[GNLD_THERMAL].enabled) |
2658 | pr_info("THERMAL Feature Already disabled!" ); |
2659 | |
2660 | PP_ASSERT_WITH_CODE( |
2661 | !vega10_enable_smc_features(hwmgr, |
2662 | false, |
2663 | data->smu_features[GNLD_THERMAL].smu_feature_bitmap), |
2664 | "disable THERMAL Feature Failed!" , |
2665 | return -1); |
2666 | data->smu_features[GNLD_THERMAL].enabled = false; |
2667 | } |
2668 | |
2669 | return 0; |
2670 | } |
2671 | |
2672 | static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr) |
2673 | { |
2674 | struct vega10_hwmgr *data = hwmgr->backend; |
2675 | |
2676 | if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) { |
2677 | if (data->smu_features[GNLD_VR0HOT].supported) { |
2678 | PP_ASSERT_WITH_CODE( |
2679 | !vega10_enable_smc_features(hwmgr, |
2680 | true, |
2681 | data->smu_features[GNLD_VR0HOT].smu_feature_bitmap), |
2682 | "Attempt to Enable VR0 Hot feature Failed!" , |
2683 | return -1); |
2684 | data->smu_features[GNLD_VR0HOT].enabled = true; |
2685 | } else { |
2686 | if (data->smu_features[GNLD_VR1HOT].supported) { |
2687 | PP_ASSERT_WITH_CODE( |
2688 | !vega10_enable_smc_features(hwmgr, |
2689 | true, |
2690 | data->smu_features[GNLD_VR1HOT].smu_feature_bitmap), |
2691 | "Attempt to Enable VR0 Hot feature Failed!" , |
2692 | return -1); |
2693 | data->smu_features[GNLD_VR1HOT].enabled = true; |
2694 | } |
2695 | } |
2696 | } |
2697 | return 0; |
2698 | } |
2699 | |
2700 | static int vega10_enable_ulv(struct pp_hwmgr *hwmgr) |
2701 | { |
2702 | struct vega10_hwmgr *data = hwmgr->backend; |
2703 | |
2704 | if (data->registry_data.ulv_support) { |
2705 | PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, |
2706 | true, data->smu_features[GNLD_ULV].smu_feature_bitmap), |
2707 | "Enable ULV Feature Failed!" , |
2708 | return -1); |
2709 | data->smu_features[GNLD_ULV].enabled = true; |
2710 | } |
2711 | |
2712 | return 0; |
2713 | } |
2714 | |
2715 | static int vega10_disable_ulv(struct pp_hwmgr *hwmgr) |
2716 | { |
2717 | struct vega10_hwmgr *data = hwmgr->backend; |
2718 | |
2719 | if (data->registry_data.ulv_support) { |
2720 | PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, |
2721 | false, data->smu_features[GNLD_ULV].smu_feature_bitmap), |
2722 | "disable ULV Feature Failed!" , |
2723 | return -EINVAL); |
2724 | data->smu_features[GNLD_ULV].enabled = false; |
2725 | } |
2726 | |
2727 | return 0; |
2728 | } |
2729 | |
2730 | static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) |
2731 | { |
2732 | struct vega10_hwmgr *data = hwmgr->backend; |
2733 | |
2734 | if (data->smu_features[GNLD_DS_GFXCLK].supported) { |
2735 | PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, |
2736 | true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap), |
2737 | "Attempt to Enable DS_GFXCLK Feature Failed!" , |
2738 | return -EINVAL); |
2739 | data->smu_features[GNLD_DS_GFXCLK].enabled = true; |
2740 | } |
2741 | |
2742 | if (data->smu_features[GNLD_DS_SOCCLK].supported) { |
2743 | PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, |
2744 | true, data->smu_features[ |
---|