1 | /* |
2 | * Copyright 2013 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #include "amdgpu.h" |
25 | #include "amdgpu_pm.h" |
26 | #include "cikd.h" |
27 | #include "atom.h" |
28 | #include "amdgpu_atombios.h" |
29 | #include "amdgpu_dpm.h" |
30 | #include "kv_dpm.h" |
31 | #include "gfx_v7_0.h" |
32 | #include <linux/seq_file.h> |
33 | |
34 | #include "smu/smu_7_0_0_d.h" |
35 | #include "smu/smu_7_0_0_sh_mask.h" |
36 | |
37 | #include "gca/gfx_7_2_d.h" |
38 | #include "gca/gfx_7_2_sh_mask.h" |
39 | #include "legacy_dpm.h" |
40 | |
41 | #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 |
42 | #define KV_MINIMUM_ENGINE_CLOCK 800 |
43 | #define SMC_RAM_END 0x40000 |
44 | |
45 | static const struct amd_pm_funcs kv_dpm_funcs; |
46 | |
47 | static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); |
48 | static int kv_enable_nb_dpm(struct amdgpu_device *adev, |
49 | bool enable); |
50 | static void kv_init_graphics_levels(struct amdgpu_device *adev); |
51 | static int kv_calculate_ds_divider(struct amdgpu_device *adev); |
52 | static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev); |
53 | static int kv_calculate_dpm_settings(struct amdgpu_device *adev); |
54 | static void kv_enable_new_levels(struct amdgpu_device *adev); |
55 | static void kv_program_nbps_index_settings(struct amdgpu_device *adev, |
56 | struct amdgpu_ps *new_rps); |
57 | static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level); |
58 | static int kv_set_enabled_levels(struct amdgpu_device *adev); |
59 | static int kv_force_dpm_highest(struct amdgpu_device *adev); |
60 | static int kv_force_dpm_lowest(struct amdgpu_device *adev); |
61 | static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, |
62 | struct amdgpu_ps *new_rps, |
63 | struct amdgpu_ps *old_rps); |
64 | static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, |
65 | int min_temp, int max_temp); |
66 | static int kv_init_fps_limits(struct amdgpu_device *adev); |
67 | |
68 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); |
69 | static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); |
70 | |
71 | |
72 | static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev, |
73 | struct sumo_vid_mapping_table *vid_mapping_table, |
74 | u32 vid_2bit) |
75 | { |
76 | struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = |
77 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; |
78 | u32 i; |
79 | |
80 | if (vddc_sclk_table && vddc_sclk_table->count) { |
81 | if (vid_2bit < vddc_sclk_table->count) |
82 | return vddc_sclk_table->entries[vid_2bit].v; |
83 | else |
84 | return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; |
85 | } else { |
86 | for (i = 0; i < vid_mapping_table->num_entries; i++) { |
87 | if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) |
88 | return vid_mapping_table->entries[i].vid_7bit; |
89 | } |
90 | return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; |
91 | } |
92 | } |
93 | |
94 | static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev, |
95 | struct sumo_vid_mapping_table *vid_mapping_table, |
96 | u32 vid_7bit) |
97 | { |
98 | struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = |
99 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; |
100 | u32 i; |
101 | |
102 | if (vddc_sclk_table && vddc_sclk_table->count) { |
103 | for (i = 0; i < vddc_sclk_table->count; i++) { |
104 | if (vddc_sclk_table->entries[i].v == vid_7bit) |
105 | return i; |
106 | } |
107 | return vddc_sclk_table->count - 1; |
108 | } else { |
109 | for (i = 0; i < vid_mapping_table->num_entries; i++) { |
110 | if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) |
111 | return vid_mapping_table->entries[i].vid_2bit; |
112 | } |
113 | |
114 | return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; |
115 | } |
116 | } |
117 | |
118 | static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable) |
119 | { |
120 | /* This bit selects who handles display phy powergating. |
121 | * Clear the bit to let atom handle it. |
122 | * Set it to let the driver handle it. |
123 | * For now we just let atom handle it. |
124 | */ |
125 | #if 0 |
126 | u32 v = RREG32(mmDOUT_SCRATCH3); |
127 | |
128 | if (enable) |
129 | v |= 0x4; |
130 | else |
131 | v &= 0xFFFFFFFB; |
132 | |
133 | WREG32(mmDOUT_SCRATCH3, v); |
134 | #endif |
135 | } |
136 | |
137 | static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev, |
138 | struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, |
139 | ATOM_AVAILABLE_SCLK_LIST *table) |
140 | { |
141 | u32 i; |
142 | u32 n = 0; |
143 | u32 prev_sclk = 0; |
144 | |
145 | for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { |
146 | if (table[i].ulSupportedSCLK > prev_sclk) { |
147 | sclk_voltage_mapping_table->entries[n].sclk_frequency = |
148 | table[i].ulSupportedSCLK; |
149 | sclk_voltage_mapping_table->entries[n].vid_2bit = |
150 | table[i].usVoltageIndex; |
151 | prev_sclk = table[i].ulSupportedSCLK; |
152 | n++; |
153 | } |
154 | } |
155 | |
156 | sclk_voltage_mapping_table->num_max_dpm_entries = n; |
157 | } |
158 | |
159 | static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, |
160 | struct sumo_vid_mapping_table *vid_mapping_table, |
161 | ATOM_AVAILABLE_SCLK_LIST *table) |
162 | { |
163 | u32 i, j; |
164 | |
165 | for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { |
166 | if (table[i].ulSupportedSCLK != 0) { |
167 | vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = |
168 | table[i].usVoltageID; |
169 | vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = |
170 | table[i].usVoltageIndex; |
171 | } |
172 | } |
173 | |
174 | for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { |
175 | if (vid_mapping_table->entries[i].vid_7bit == 0) { |
176 | for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) { |
177 | if (vid_mapping_table->entries[j].vid_7bit != 0) { |
178 | vid_mapping_table->entries[i] = |
179 | vid_mapping_table->entries[j]; |
180 | vid_mapping_table->entries[j].vid_7bit = 0; |
181 | break; |
182 | } |
183 | } |
184 | |
185 | if (j == SUMO_MAX_NUMBER_VOLTAGES) |
186 | break; |
187 | } |
188 | } |
189 | |
190 | vid_mapping_table->num_entries = i; |
191 | } |
192 | |
193 | #if 0 |
194 | static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = { |
195 | { 0, 4, 1 }, |
196 | { 1, 4, 1 }, |
197 | { 2, 5, 1 }, |
198 | { 3, 4, 2 }, |
199 | { 4, 1, 1 }, |
200 | { 5, 5, 2 }, |
201 | { 6, 6, 1 }, |
202 | { 7, 9, 2 }, |
203 | { 0xffffffff } |
204 | }; |
205 | |
206 | static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = { |
207 | { 0, 4, 1 }, |
208 | { 0xffffffff } |
209 | }; |
210 | |
211 | static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = { |
212 | { 0, 4, 1 }, |
213 | { 0xffffffff } |
214 | }; |
215 | |
216 | static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = { |
217 | { 0, 4, 1 }, |
218 | { 0xffffffff } |
219 | }; |
220 | |
221 | static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = { |
222 | { 0, 4, 1 }, |
223 | { 0xffffffff } |
224 | }; |
225 | |
226 | static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = { |
227 | { 0, 4, 1 }, |
228 | { 1, 4, 1 }, |
229 | { 2, 5, 1 }, |
230 | { 3, 4, 1 }, |
231 | { 4, 1, 1 }, |
232 | { 5, 5, 1 }, |
233 | { 6, 6, 1 }, |
234 | { 7, 9, 1 }, |
235 | { 8, 4, 1 }, |
236 | { 9, 2, 1 }, |
237 | { 10, 3, 1 }, |
238 | { 11, 6, 1 }, |
239 | { 12, 8, 2 }, |
240 | { 13, 1, 1 }, |
241 | { 14, 2, 1 }, |
242 | { 15, 3, 1 }, |
243 | { 16, 1, 1 }, |
244 | { 17, 4, 1 }, |
245 | { 18, 3, 1 }, |
246 | { 19, 1, 1 }, |
247 | { 20, 8, 1 }, |
248 | { 21, 5, 1 }, |
249 | { 22, 1, 1 }, |
250 | { 23, 1, 1 }, |
251 | { 24, 4, 1 }, |
252 | { 27, 6, 1 }, |
253 | { 28, 1, 1 }, |
254 | { 0xffffffff } |
255 | }; |
256 | |
257 | static const struct kv_lcac_config_reg sx0_cac_config_reg[] = { |
258 | { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } |
259 | }; |
260 | |
261 | static const struct kv_lcac_config_reg mc0_cac_config_reg[] = { |
262 | { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } |
263 | }; |
264 | |
265 | static const struct kv_lcac_config_reg mc1_cac_config_reg[] = { |
266 | { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } |
267 | }; |
268 | |
269 | static const struct kv_lcac_config_reg mc2_cac_config_reg[] = { |
270 | { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } |
271 | }; |
272 | |
273 | static const struct kv_lcac_config_reg mc3_cac_config_reg[] = { |
274 | { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } |
275 | }; |
276 | |
277 | static const struct kv_lcac_config_reg cpl_cac_config_reg[] = { |
278 | { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } |
279 | }; |
280 | #endif |
281 | |
282 | static const struct kv_pt_config_reg didt_config_kv[] = { |
283 | { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, |
284 | { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, |
285 | { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, |
286 | { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, |
287 | { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, |
288 | { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, |
289 | { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, |
290 | { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, |
291 | { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, |
292 | { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, |
293 | { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, |
294 | { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, |
295 | { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, |
296 | { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, |
297 | { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, |
298 | { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, |
299 | { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, |
300 | { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, |
301 | { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, |
302 | { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, |
303 | { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, |
304 | { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, |
305 | { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, |
306 | { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, |
307 | { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, |
308 | { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, |
309 | { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, |
310 | { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, |
311 | { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, |
312 | { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, |
313 | { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, |
314 | { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, |
315 | { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, |
316 | { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, |
317 | { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, |
318 | { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, |
319 | { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, |
320 | { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, |
321 | { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, |
322 | { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, |
323 | { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, |
324 | { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, |
325 | { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, |
326 | { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, |
327 | { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, |
328 | { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, |
329 | { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, |
330 | { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, |
331 | { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, |
332 | { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, |
333 | { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, |
334 | { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, |
335 | { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, |
336 | { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, |
337 | { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, |
338 | { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, |
339 | { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, |
340 | { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, |
341 | { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, |
342 | { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, |
343 | { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, |
344 | { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, |
345 | { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, |
346 | { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, |
347 | { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, |
348 | { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, |
349 | { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, |
350 | { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, |
351 | { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, |
352 | { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, |
353 | { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, |
354 | { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, |
355 | { 0xFFFFFFFF } |
356 | }; |
357 | |
358 | static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps) |
359 | { |
360 | struct kv_ps *ps = rps->ps_priv; |
361 | |
362 | return ps; |
363 | } |
364 | |
365 | static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev) |
366 | { |
367 | struct kv_power_info *pi = adev->pm.dpm.priv; |
368 | |
369 | return pi; |
370 | } |
371 | |
372 | #if 0 |
373 | static void kv_program_local_cac_table(struct amdgpu_device *adev, |
374 | const struct kv_lcac_config_values *local_cac_table, |
375 | const struct kv_lcac_config_reg *local_cac_reg) |
376 | { |
377 | u32 i, count, data; |
378 | const struct kv_lcac_config_values *values = local_cac_table; |
379 | |
380 | while (values->block_id != 0xffffffff) { |
381 | count = values->signal_id; |
382 | for (i = 0; i < count; i++) { |
383 | data = ((values->block_id << local_cac_reg->block_shift) & |
384 | local_cac_reg->block_mask); |
385 | data |= ((i << local_cac_reg->signal_shift) & |
386 | local_cac_reg->signal_mask); |
387 | data |= ((values->t << local_cac_reg->t_shift) & |
388 | local_cac_reg->t_mask); |
389 | data |= ((1 << local_cac_reg->enable_shift) & |
390 | local_cac_reg->enable_mask); |
391 | WREG32_SMC(local_cac_reg->cntl, data); |
392 | } |
393 | values++; |
394 | } |
395 | } |
396 | #endif |
397 | |
398 | static int kv_program_pt_config_registers(struct amdgpu_device *adev, |
399 | const struct kv_pt_config_reg *cac_config_regs) |
400 | { |
401 | const struct kv_pt_config_reg *config_regs = cac_config_regs; |
402 | u32 data; |
403 | u32 cache = 0; |
404 | |
405 | if (config_regs == NULL) |
406 | return -EINVAL; |
407 | |
408 | while (config_regs->offset != 0xFFFFFFFF) { |
409 | if (config_regs->type == KV_CONFIGREG_CACHE) { |
410 | cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); |
411 | } else { |
412 | switch (config_regs->type) { |
413 | case KV_CONFIGREG_SMC_IND: |
414 | data = RREG32_SMC(config_regs->offset); |
415 | break; |
416 | case KV_CONFIGREG_DIDT_IND: |
417 | data = RREG32_DIDT(config_regs->offset); |
418 | break; |
419 | default: |
420 | data = RREG32(config_regs->offset); |
421 | break; |
422 | } |
423 | |
424 | data &= ~config_regs->mask; |
425 | data |= ((config_regs->value << config_regs->shift) & config_regs->mask); |
426 | data |= cache; |
427 | cache = 0; |
428 | |
429 | switch (config_regs->type) { |
430 | case KV_CONFIGREG_SMC_IND: |
431 | WREG32_SMC(config_regs->offset, data); |
432 | break; |
433 | case KV_CONFIGREG_DIDT_IND: |
434 | WREG32_DIDT(config_regs->offset, data); |
435 | break; |
436 | default: |
437 | WREG32(config_regs->offset, data); |
438 | break; |
439 | } |
440 | } |
441 | config_regs++; |
442 | } |
443 | |
444 | return 0; |
445 | } |
446 | |
447 | static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable) |
448 | { |
449 | struct kv_power_info *pi = kv_get_pi(adev); |
450 | u32 data; |
451 | |
452 | if (pi->caps_sq_ramping) { |
453 | data = RREG32_DIDT(ixDIDT_SQ_CTRL0); |
454 | if (enable) |
455 | data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; |
456 | else |
457 | data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; |
458 | WREG32_DIDT(ixDIDT_SQ_CTRL0, data); |
459 | } |
460 | |
461 | if (pi->caps_db_ramping) { |
462 | data = RREG32_DIDT(ixDIDT_DB_CTRL0); |
463 | if (enable) |
464 | data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; |
465 | else |
466 | data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; |
467 | WREG32_DIDT(ixDIDT_DB_CTRL0, data); |
468 | } |
469 | |
470 | if (pi->caps_td_ramping) { |
471 | data = RREG32_DIDT(ixDIDT_TD_CTRL0); |
472 | if (enable) |
473 | data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; |
474 | else |
475 | data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; |
476 | WREG32_DIDT(ixDIDT_TD_CTRL0, data); |
477 | } |
478 | |
479 | if (pi->caps_tcp_ramping) { |
480 | data = RREG32_DIDT(ixDIDT_TCP_CTRL0); |
481 | if (enable) |
482 | data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; |
483 | else |
484 | data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; |
485 | WREG32_DIDT(ixDIDT_TCP_CTRL0, data); |
486 | } |
487 | } |
488 | |
489 | static int kv_enable_didt(struct amdgpu_device *adev, bool enable) |
490 | { |
491 | struct kv_power_info *pi = kv_get_pi(adev); |
492 | int ret; |
493 | |
494 | if (pi->caps_sq_ramping || |
495 | pi->caps_db_ramping || |
496 | pi->caps_td_ramping || |
497 | pi->caps_tcp_ramping) { |
498 | amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id: 0); |
499 | |
500 | if (enable) { |
501 | ret = kv_program_pt_config_registers(adev, cac_config_regs: didt_config_kv); |
502 | if (ret) { |
503 | amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id: 0); |
504 | return ret; |
505 | } |
506 | } |
507 | |
508 | kv_do_enable_didt(adev, enable); |
509 | |
510 | amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id: 0); |
511 | } |
512 | |
513 | return 0; |
514 | } |
515 | |
516 | #if 0 |
517 | static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev) |
518 | { |
519 | struct kv_power_info *pi = kv_get_pi(adev); |
520 | |
521 | if (pi->caps_cac) { |
522 | WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0); |
523 | WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0); |
524 | kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg); |
525 | |
526 | WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0); |
527 | WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0); |
528 | kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); |
529 | |
530 | WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0); |
531 | WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0); |
532 | kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); |
533 | |
534 | WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0); |
535 | WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0); |
536 | kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); |
537 | |
538 | WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0); |
539 | WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0); |
540 | kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); |
541 | |
542 | WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0); |
543 | WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0); |
544 | kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); |
545 | } |
546 | } |
547 | #endif |
548 | |
549 | static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable) |
550 | { |
551 | struct kv_power_info *pi = kv_get_pi(adev); |
552 | int ret = 0; |
553 | |
554 | if (pi->caps_cac) { |
555 | if (enable) { |
556 | ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac); |
557 | if (ret) |
558 | pi->cac_enabled = false; |
559 | else |
560 | pi->cac_enabled = true; |
561 | } else if (pi->cac_enabled) { |
562 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac); |
563 | pi->cac_enabled = false; |
564 | } |
565 | } |
566 | |
567 | return ret; |
568 | } |
569 | |
570 | static int (struct amdgpu_device *adev) |
571 | { |
572 | struct kv_power_info *pi = kv_get_pi(adev); |
573 | u32 tmp; |
574 | int ret; |
575 | |
576 | ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + |
577 | offsetof(SMU7_Firmware_Header, DpmTable), |
578 | value: &tmp, limit: pi->sram_end); |
579 | |
580 | if (ret == 0) |
581 | pi->dpm_table_start = tmp; |
582 | |
583 | ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + |
584 | offsetof(SMU7_Firmware_Header, SoftRegisters), |
585 | value: &tmp, limit: pi->sram_end); |
586 | |
587 | if (ret == 0) |
588 | pi->soft_regs_start = tmp; |
589 | |
590 | return ret; |
591 | } |
592 | |
593 | static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev) |
594 | { |
595 | struct kv_power_info *pi = kv_get_pi(adev); |
596 | int ret; |
597 | |
598 | pi->graphics_voltage_change_enable = 1; |
599 | |
600 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
601 | smc_start_address: pi->dpm_table_start + |
602 | offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), |
603 | src: &pi->graphics_voltage_change_enable, |
604 | byte_count: sizeof(u8), limit: pi->sram_end); |
605 | |
606 | return ret; |
607 | } |
608 | |
609 | static int kv_set_dpm_interval(struct amdgpu_device *adev) |
610 | { |
611 | struct kv_power_info *pi = kv_get_pi(adev); |
612 | int ret; |
613 | |
614 | pi->graphics_interval = 1; |
615 | |
616 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
617 | smc_start_address: pi->dpm_table_start + |
618 | offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), |
619 | src: &pi->graphics_interval, |
620 | byte_count: sizeof(u8), limit: pi->sram_end); |
621 | |
622 | return ret; |
623 | } |
624 | |
625 | static int kv_set_dpm_boot_state(struct amdgpu_device *adev) |
626 | { |
627 | struct kv_power_info *pi = kv_get_pi(adev); |
628 | int ret; |
629 | |
630 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
631 | smc_start_address: pi->dpm_table_start + |
632 | offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), |
633 | src: &pi->graphics_boot_level, |
634 | byte_count: sizeof(u8), limit: pi->sram_end); |
635 | |
636 | return ret; |
637 | } |
638 | |
639 | static void kv_program_vc(struct amdgpu_device *adev) |
640 | { |
641 | WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100); |
642 | } |
643 | |
644 | static void kv_clear_vc(struct amdgpu_device *adev) |
645 | { |
646 | WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); |
647 | } |
648 | |
649 | static int kv_set_divider_value(struct amdgpu_device *adev, |
650 | u32 index, u32 sclk) |
651 | { |
652 | struct kv_power_info *pi = kv_get_pi(adev); |
653 | struct atom_clock_dividers dividers; |
654 | int ret; |
655 | |
656 | ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, |
657 | clock: sclk, strobe_mode: false, dividers: ÷rs); |
658 | if (ret) |
659 | return ret; |
660 | |
661 | pi->graphics_level[index].SclkDid = (u8)dividers.post_div; |
662 | pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); |
663 | |
664 | return 0; |
665 | } |
666 | |
667 | static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev, |
668 | u16 voltage) |
669 | { |
670 | return 6200 - (voltage * 25); |
671 | } |
672 | |
673 | static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev, |
674 | u32 vid_2bit) |
675 | { |
676 | struct kv_power_info *pi = kv_get_pi(adev); |
677 | u32 vid_8bit = kv_convert_vid2_to_vid7(adev, |
678 | vid_mapping_table: &pi->sys_info.vid_mapping_table, |
679 | vid_2bit); |
680 | |
681 | return kv_convert_8bit_index_to_voltage(adev, voltage: (u16)vid_8bit); |
682 | } |
683 | |
684 | |
685 | static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid) |
686 | { |
687 | struct kv_power_info *pi = kv_get_pi(adev); |
688 | |
689 | pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; |
690 | pi->graphics_level[index].MinVddNb = |
691 | cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid)); |
692 | |
693 | return 0; |
694 | } |
695 | |
696 | static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at) |
697 | { |
698 | struct kv_power_info *pi = kv_get_pi(adev); |
699 | |
700 | pi->graphics_level[index].AT = cpu_to_be16((u16)at); |
701 | |
702 | return 0; |
703 | } |
704 | |
705 | static void kv_dpm_power_level_enable(struct amdgpu_device *adev, |
706 | u32 index, bool enable) |
707 | { |
708 | struct kv_power_info *pi = kv_get_pi(adev); |
709 | |
710 | pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; |
711 | } |
712 | |
713 | static void kv_start_dpm(struct amdgpu_device *adev) |
714 | { |
715 | u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); |
716 | |
717 | tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; |
718 | WREG32_SMC(ixGENERAL_PWRMGT, tmp); |
719 | |
720 | amdgpu_kv_smc_dpm_enable(adev, enable: true); |
721 | } |
722 | |
723 | static void kv_stop_dpm(struct amdgpu_device *adev) |
724 | { |
725 | amdgpu_kv_smc_dpm_enable(adev, enable: false); |
726 | } |
727 | |
728 | static void kv_start_am(struct amdgpu_device *adev) |
729 | { |
730 | u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); |
731 | |
732 | sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | |
733 | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); |
734 | sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; |
735 | |
736 | WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); |
737 | } |
738 | |
739 | static void kv_reset_am(struct amdgpu_device *adev) |
740 | { |
741 | u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); |
742 | |
743 | sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | |
744 | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); |
745 | |
746 | WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); |
747 | } |
748 | |
749 | static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze) |
750 | { |
751 | return amdgpu_kv_notify_message_to_smu(adev, id: freeze ? |
752 | PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); |
753 | } |
754 | |
755 | static int kv_force_lowest_valid(struct amdgpu_device *adev) |
756 | { |
757 | return kv_force_dpm_lowest(adev); |
758 | } |
759 | |
760 | static int kv_unforce_levels(struct amdgpu_device *adev) |
761 | { |
762 | if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) |
763 | return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel); |
764 | else |
765 | return kv_set_enabled_levels(adev); |
766 | } |
767 | |
768 | static int kv_update_sclk_t(struct amdgpu_device *adev) |
769 | { |
770 | struct kv_power_info *pi = kv_get_pi(adev); |
771 | u32 low_sclk_interrupt_t = 0; |
772 | int ret = 0; |
773 | |
774 | if (pi->caps_sclk_throttle_low_notification) { |
775 | low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); |
776 | |
777 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
778 | smc_start_address: pi->dpm_table_start + |
779 | offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), |
780 | src: (u8 *)&low_sclk_interrupt_t, |
781 | byte_count: sizeof(u32), limit: pi->sram_end); |
782 | } |
783 | return ret; |
784 | } |
785 | |
786 | static int kv_program_bootup_state(struct amdgpu_device *adev) |
787 | { |
788 | struct kv_power_info *pi = kv_get_pi(adev); |
789 | u32 i; |
790 | struct amdgpu_clock_voltage_dependency_table *table = |
791 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; |
792 | |
793 | if (table && table->count) { |
794 | for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { |
795 | if (table->entries[i].clk == pi->boot_pl.sclk) |
796 | break; |
797 | } |
798 | |
799 | pi->graphics_boot_level = (u8)i; |
800 | kv_dpm_power_level_enable(adev, index: i, enable: true); |
801 | } else { |
802 | struct sumo_sclk_voltage_mapping_table *table = |
803 | &pi->sys_info.sclk_voltage_mapping_table; |
804 | |
805 | if (table->num_max_dpm_entries == 0) |
806 | return -EINVAL; |
807 | |
808 | for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { |
809 | if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) |
810 | break; |
811 | } |
812 | |
813 | pi->graphics_boot_level = (u8)i; |
814 | kv_dpm_power_level_enable(adev, index: i, enable: true); |
815 | } |
816 | return 0; |
817 | } |
818 | |
819 | static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev) |
820 | { |
821 | struct kv_power_info *pi = kv_get_pi(adev); |
822 | int ret; |
823 | |
824 | pi->graphics_therm_throttle_enable = 1; |
825 | |
826 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
827 | smc_start_address: pi->dpm_table_start + |
828 | offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), |
829 | src: &pi->graphics_therm_throttle_enable, |
830 | byte_count: sizeof(u8), limit: pi->sram_end); |
831 | |
832 | return ret; |
833 | } |
834 | |
835 | static int kv_upload_dpm_settings(struct amdgpu_device *adev) |
836 | { |
837 | struct kv_power_info *pi = kv_get_pi(adev); |
838 | int ret; |
839 | |
840 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
841 | smc_start_address: pi->dpm_table_start + |
842 | offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), |
843 | src: (u8 *)&pi->graphics_level, |
844 | byte_count: sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, |
845 | limit: pi->sram_end); |
846 | |
847 | if (ret) |
848 | return ret; |
849 | |
850 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
851 | smc_start_address: pi->dpm_table_start + |
852 | offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), |
853 | src: &pi->graphics_dpm_level_count, |
854 | byte_count: sizeof(u8), limit: pi->sram_end); |
855 | |
856 | return ret; |
857 | } |
858 | |
859 | static u32 kv_get_clock_difference(u32 a, u32 b) |
860 | { |
861 | return (a >= b) ? a - b : b - a; |
862 | } |
863 | |
864 | static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk) |
865 | { |
866 | struct kv_power_info *pi = kv_get_pi(adev); |
867 | u32 value; |
868 | |
869 | if (pi->caps_enable_dfs_bypass) { |
870 | if (kv_get_clock_difference(a: clk, b: 40000) < 200) |
871 | value = 3; |
872 | else if (kv_get_clock_difference(a: clk, b: 30000) < 200) |
873 | value = 2; |
874 | else if (kv_get_clock_difference(a: clk, b: 20000) < 200) |
875 | value = 7; |
876 | else if (kv_get_clock_difference(a: clk, b: 15000) < 200) |
877 | value = 6; |
878 | else if (kv_get_clock_difference(a: clk, b: 10000) < 200) |
879 | value = 8; |
880 | else |
881 | value = 0; |
882 | } else { |
883 | value = 0; |
884 | } |
885 | |
886 | return value; |
887 | } |
888 | |
889 | static int kv_populate_uvd_table(struct amdgpu_device *adev) |
890 | { |
891 | struct kv_power_info *pi = kv_get_pi(adev); |
892 | struct amdgpu_uvd_clock_voltage_dependency_table *table = |
893 | &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; |
894 | struct atom_clock_dividers dividers; |
895 | int ret; |
896 | u32 i; |
897 | |
898 | if (table == NULL || table->count == 0) |
899 | return 0; |
900 | |
901 | pi->uvd_level_count = 0; |
902 | for (i = 0; i < table->count; i++) { |
903 | if (pi->high_voltage_t && |
904 | (pi->high_voltage_t < table->entries[i].v)) |
905 | break; |
906 | |
907 | pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); |
908 | pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); |
909 | pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); |
910 | |
911 | pi->uvd_level[i].VClkBypassCntl = |
912 | (u8)kv_get_clk_bypass(adev, clk: table->entries[i].vclk); |
913 | pi->uvd_level[i].DClkBypassCntl = |
914 | (u8)kv_get_clk_bypass(adev, clk: table->entries[i].dclk); |
915 | |
916 | ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, |
917 | clock: table->entries[i].vclk, strobe_mode: false, dividers: ÷rs); |
918 | if (ret) |
919 | return ret; |
920 | pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; |
921 | |
922 | ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, |
923 | clock: table->entries[i].dclk, strobe_mode: false, dividers: ÷rs); |
924 | if (ret) |
925 | return ret; |
926 | pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; |
927 | |
928 | pi->uvd_level_count++; |
929 | } |
930 | |
931 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
932 | smc_start_address: pi->dpm_table_start + |
933 | offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), |
934 | src: (u8 *)&pi->uvd_level_count, |
935 | byte_count: sizeof(u8), limit: pi->sram_end); |
936 | if (ret) |
937 | return ret; |
938 | |
939 | pi->uvd_interval = 1; |
940 | |
941 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
942 | smc_start_address: pi->dpm_table_start + |
943 | offsetof(SMU7_Fusion_DpmTable, UVDInterval), |
944 | src: &pi->uvd_interval, |
945 | byte_count: sizeof(u8), limit: pi->sram_end); |
946 | if (ret) |
947 | return ret; |
948 | |
949 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
950 | smc_start_address: pi->dpm_table_start + |
951 | offsetof(SMU7_Fusion_DpmTable, UvdLevel), |
952 | src: (u8 *)&pi->uvd_level, |
953 | byte_count: sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, |
954 | limit: pi->sram_end); |
955 | |
956 | return ret; |
957 | |
958 | } |
959 | |
960 | static int kv_populate_vce_table(struct amdgpu_device *adev) |
961 | { |
962 | struct kv_power_info *pi = kv_get_pi(adev); |
963 | int ret; |
964 | u32 i; |
965 | struct amdgpu_vce_clock_voltage_dependency_table *table = |
966 | &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; |
967 | struct atom_clock_dividers dividers; |
968 | |
969 | if (table == NULL || table->count == 0) |
970 | return 0; |
971 | |
972 | pi->vce_level_count = 0; |
973 | for (i = 0; i < table->count; i++) { |
974 | if (pi->high_voltage_t && |
975 | pi->high_voltage_t < table->entries[i].v) |
976 | break; |
977 | |
978 | pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); |
979 | pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); |
980 | |
981 | pi->vce_level[i].ClkBypassCntl = |
982 | (u8)kv_get_clk_bypass(adev, clk: table->entries[i].evclk); |
983 | |
984 | ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, |
985 | clock: table->entries[i].evclk, strobe_mode: false, dividers: ÷rs); |
986 | if (ret) |
987 | return ret; |
988 | pi->vce_level[i].Divider = (u8)dividers.post_div; |
989 | |
990 | pi->vce_level_count++; |
991 | } |
992 | |
993 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
994 | smc_start_address: pi->dpm_table_start + |
995 | offsetof(SMU7_Fusion_DpmTable, VceLevelCount), |
996 | src: (u8 *)&pi->vce_level_count, |
997 | byte_count: sizeof(u8), |
998 | limit: pi->sram_end); |
999 | if (ret) |
1000 | return ret; |
1001 | |
1002 | pi->vce_interval = 1; |
1003 | |
1004 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
1005 | smc_start_address: pi->dpm_table_start + |
1006 | offsetof(SMU7_Fusion_DpmTable, VCEInterval), |
1007 | src: (u8 *)&pi->vce_interval, |
1008 | byte_count: sizeof(u8), |
1009 | limit: pi->sram_end); |
1010 | if (ret) |
1011 | return ret; |
1012 | |
1013 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
1014 | smc_start_address: pi->dpm_table_start + |
1015 | offsetof(SMU7_Fusion_DpmTable, VceLevel), |
1016 | src: (u8 *)&pi->vce_level, |
1017 | byte_count: sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, |
1018 | limit: pi->sram_end); |
1019 | |
1020 | return ret; |
1021 | } |
1022 | |
1023 | static int kv_populate_samu_table(struct amdgpu_device *adev) |
1024 | { |
1025 | struct kv_power_info *pi = kv_get_pi(adev); |
1026 | struct amdgpu_clock_voltage_dependency_table *table = |
1027 | &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; |
1028 | struct atom_clock_dividers dividers; |
1029 | int ret; |
1030 | u32 i; |
1031 | |
1032 | if (table == NULL || table->count == 0) |
1033 | return 0; |
1034 | |
1035 | pi->samu_level_count = 0; |
1036 | for (i = 0; i < table->count; i++) { |
1037 | if (pi->high_voltage_t && |
1038 | pi->high_voltage_t < table->entries[i].v) |
1039 | break; |
1040 | |
1041 | pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); |
1042 | pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); |
1043 | |
1044 | pi->samu_level[i].ClkBypassCntl = |
1045 | (u8)kv_get_clk_bypass(adev, clk: table->entries[i].clk); |
1046 | |
1047 | ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, |
1048 | clock: table->entries[i].clk, strobe_mode: false, dividers: ÷rs); |
1049 | if (ret) |
1050 | return ret; |
1051 | pi->samu_level[i].Divider = (u8)dividers.post_div; |
1052 | |
1053 | pi->samu_level_count++; |
1054 | } |
1055 | |
1056 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
1057 | smc_start_address: pi->dpm_table_start + |
1058 | offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), |
1059 | src: (u8 *)&pi->samu_level_count, |
1060 | byte_count: sizeof(u8), |
1061 | limit: pi->sram_end); |
1062 | if (ret) |
1063 | return ret; |
1064 | |
1065 | pi->samu_interval = 1; |
1066 | |
1067 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
1068 | smc_start_address: pi->dpm_table_start + |
1069 | offsetof(SMU7_Fusion_DpmTable, SAMUInterval), |
1070 | src: (u8 *)&pi->samu_interval, |
1071 | byte_count: sizeof(u8), |
1072 | limit: pi->sram_end); |
1073 | if (ret) |
1074 | return ret; |
1075 | |
1076 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
1077 | smc_start_address: pi->dpm_table_start + |
1078 | offsetof(SMU7_Fusion_DpmTable, SamuLevel), |
1079 | src: (u8 *)&pi->samu_level, |
1080 | byte_count: sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, |
1081 | limit: pi->sram_end); |
1082 | if (ret) |
1083 | return ret; |
1084 | |
1085 | return ret; |
1086 | } |
1087 | |
1088 | |
1089 | static int kv_populate_acp_table(struct amdgpu_device *adev) |
1090 | { |
1091 | struct kv_power_info *pi = kv_get_pi(adev); |
1092 | struct amdgpu_clock_voltage_dependency_table *table = |
1093 | &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; |
1094 | struct atom_clock_dividers dividers; |
1095 | int ret; |
1096 | u32 i; |
1097 | |
1098 | if (table == NULL || table->count == 0) |
1099 | return 0; |
1100 | |
1101 | pi->acp_level_count = 0; |
1102 | for (i = 0; i < table->count; i++) { |
1103 | pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); |
1104 | pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); |
1105 | |
1106 | ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, |
1107 | clock: table->entries[i].clk, strobe_mode: false, dividers: ÷rs); |
1108 | if (ret) |
1109 | return ret; |
1110 | pi->acp_level[i].Divider = (u8)dividers.post_div; |
1111 | |
1112 | pi->acp_level_count++; |
1113 | } |
1114 | |
1115 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
1116 | smc_start_address: pi->dpm_table_start + |
1117 | offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), |
1118 | src: (u8 *)&pi->acp_level_count, |
1119 | byte_count: sizeof(u8), |
1120 | limit: pi->sram_end); |
1121 | if (ret) |
1122 | return ret; |
1123 | |
1124 | pi->acp_interval = 1; |
1125 | |
1126 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
1127 | smc_start_address: pi->dpm_table_start + |
1128 | offsetof(SMU7_Fusion_DpmTable, ACPInterval), |
1129 | src: (u8 *)&pi->acp_interval, |
1130 | byte_count: sizeof(u8), |
1131 | limit: pi->sram_end); |
1132 | if (ret) |
1133 | return ret; |
1134 | |
1135 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
1136 | smc_start_address: pi->dpm_table_start + |
1137 | offsetof(SMU7_Fusion_DpmTable, AcpLevel), |
1138 | src: (u8 *)&pi->acp_level, |
1139 | byte_count: sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, |
1140 | limit: pi->sram_end); |
1141 | if (ret) |
1142 | return ret; |
1143 | |
1144 | return ret; |
1145 | } |
1146 | |
1147 | static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev) |
1148 | { |
1149 | struct kv_power_info *pi = kv_get_pi(adev); |
1150 | u32 i; |
1151 | struct amdgpu_clock_voltage_dependency_table *table = |
1152 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; |
1153 | |
1154 | if (table && table->count) { |
1155 | for (i = 0; i < pi->graphics_dpm_level_count; i++) { |
1156 | if (pi->caps_enable_dfs_bypass) { |
1157 | if (kv_get_clock_difference(a: table->entries[i].clk, b: 40000) < 200) |
1158 | pi->graphics_level[i].ClkBypassCntl = 3; |
1159 | else if (kv_get_clock_difference(a: table->entries[i].clk, b: 30000) < 200) |
1160 | pi->graphics_level[i].ClkBypassCntl = 2; |
1161 | else if (kv_get_clock_difference(a: table->entries[i].clk, b: 26600) < 200) |
1162 | pi->graphics_level[i].ClkBypassCntl = 7; |
1163 | else if (kv_get_clock_difference(a: table->entries[i].clk, b: 20000) < 200) |
1164 | pi->graphics_level[i].ClkBypassCntl = 6; |
1165 | else if (kv_get_clock_difference(a: table->entries[i].clk, b: 10000) < 200) |
1166 | pi->graphics_level[i].ClkBypassCntl = 8; |
1167 | else |
1168 | pi->graphics_level[i].ClkBypassCntl = 0; |
1169 | } else { |
1170 | pi->graphics_level[i].ClkBypassCntl = 0; |
1171 | } |
1172 | } |
1173 | } else { |
1174 | struct sumo_sclk_voltage_mapping_table *table = |
1175 | &pi->sys_info.sclk_voltage_mapping_table; |
1176 | for (i = 0; i < pi->graphics_dpm_level_count; i++) { |
1177 | if (pi->caps_enable_dfs_bypass) { |
1178 | if (kv_get_clock_difference(a: table->entries[i].sclk_frequency, b: 40000) < 200) |
1179 | pi->graphics_level[i].ClkBypassCntl = 3; |
1180 | else if (kv_get_clock_difference(a: table->entries[i].sclk_frequency, b: 30000) < 200) |
1181 | pi->graphics_level[i].ClkBypassCntl = 2; |
1182 | else if (kv_get_clock_difference(a: table->entries[i].sclk_frequency, b: 26600) < 200) |
1183 | pi->graphics_level[i].ClkBypassCntl = 7; |
1184 | else if (kv_get_clock_difference(a: table->entries[i].sclk_frequency, b: 20000) < 200) |
1185 | pi->graphics_level[i].ClkBypassCntl = 6; |
1186 | else if (kv_get_clock_difference(a: table->entries[i].sclk_frequency, b: 10000) < 200) |
1187 | pi->graphics_level[i].ClkBypassCntl = 8; |
1188 | else |
1189 | pi->graphics_level[i].ClkBypassCntl = 0; |
1190 | } else { |
1191 | pi->graphics_level[i].ClkBypassCntl = 0; |
1192 | } |
1193 | } |
1194 | } |
1195 | } |
1196 | |
1197 | static int kv_enable_ulv(struct amdgpu_device *adev, bool enable) |
1198 | { |
1199 | return amdgpu_kv_notify_message_to_smu(adev, id: enable ? |
1200 | PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); |
1201 | } |
1202 | |
1203 | static void kv_reset_acp_boot_level(struct amdgpu_device *adev) |
1204 | { |
1205 | struct kv_power_info *pi = kv_get_pi(adev); |
1206 | |
1207 | pi->acp_boot_level = 0xff; |
1208 | } |
1209 | |
1210 | static void kv_update_current_ps(struct amdgpu_device *adev, |
1211 | struct amdgpu_ps *rps) |
1212 | { |
1213 | struct kv_ps *new_ps = kv_get_ps(rps); |
1214 | struct kv_power_info *pi = kv_get_pi(adev); |
1215 | |
1216 | pi->current_rps = *rps; |
1217 | pi->current_ps = *new_ps; |
1218 | pi->current_rps.ps_priv = &pi->current_ps; |
1219 | adev->pm.dpm.current_ps = &pi->current_rps; |
1220 | } |
1221 | |
1222 | static void kv_update_requested_ps(struct amdgpu_device *adev, |
1223 | struct amdgpu_ps *rps) |
1224 | { |
1225 | struct kv_ps *new_ps = kv_get_ps(rps); |
1226 | struct kv_power_info *pi = kv_get_pi(adev); |
1227 | |
1228 | pi->requested_rps = *rps; |
1229 | pi->requested_ps = *new_ps; |
1230 | pi->requested_rps.ps_priv = &pi->requested_ps; |
1231 | adev->pm.dpm.requested_ps = &pi->requested_rps; |
1232 | } |
1233 | |
1234 | static void kv_dpm_enable_bapm(void *handle, bool enable) |
1235 | { |
1236 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1237 | struct kv_power_info *pi = kv_get_pi(adev); |
1238 | int ret; |
1239 | |
1240 | if (pi->bapm_enable) { |
1241 | ret = amdgpu_kv_smc_bapm_enable(adev, enable); |
1242 | if (ret) |
1243 | DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n" ); |
1244 | } |
1245 | } |
1246 | |
1247 | static bool kv_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) |
1248 | { |
1249 | switch (sensor) { |
1250 | case THERMAL_TYPE_KV: |
1251 | return true; |
1252 | case THERMAL_TYPE_NONE: |
1253 | case THERMAL_TYPE_EXTERNAL: |
1254 | case THERMAL_TYPE_EXTERNAL_GPIO: |
1255 | default: |
1256 | return false; |
1257 | } |
1258 | } |
1259 | |
1260 | static int kv_dpm_enable(struct amdgpu_device *adev) |
1261 | { |
1262 | struct kv_power_info *pi = kv_get_pi(adev); |
1263 | int ret; |
1264 | |
1265 | ret = kv_process_firmware_header(adev); |
1266 | if (ret) { |
1267 | DRM_ERROR("kv_process_firmware_header failed\n" ); |
1268 | return ret; |
1269 | } |
1270 | kv_init_fps_limits(adev); |
1271 | kv_init_graphics_levels(adev); |
1272 | ret = kv_program_bootup_state(adev); |
1273 | if (ret) { |
1274 | DRM_ERROR("kv_program_bootup_state failed\n" ); |
1275 | return ret; |
1276 | } |
1277 | kv_calculate_dfs_bypass_settings(adev); |
1278 | ret = kv_upload_dpm_settings(adev); |
1279 | if (ret) { |
1280 | DRM_ERROR("kv_upload_dpm_settings failed\n" ); |
1281 | return ret; |
1282 | } |
1283 | ret = kv_populate_uvd_table(adev); |
1284 | if (ret) { |
1285 | DRM_ERROR("kv_populate_uvd_table failed\n" ); |
1286 | return ret; |
1287 | } |
1288 | ret = kv_populate_vce_table(adev); |
1289 | if (ret) { |
1290 | DRM_ERROR("kv_populate_vce_table failed\n" ); |
1291 | return ret; |
1292 | } |
1293 | ret = kv_populate_samu_table(adev); |
1294 | if (ret) { |
1295 | DRM_ERROR("kv_populate_samu_table failed\n" ); |
1296 | return ret; |
1297 | } |
1298 | ret = kv_populate_acp_table(adev); |
1299 | if (ret) { |
1300 | DRM_ERROR("kv_populate_acp_table failed\n" ); |
1301 | return ret; |
1302 | } |
1303 | kv_program_vc(adev); |
1304 | #if 0 |
1305 | kv_initialize_hardware_cac_manager(adev); |
1306 | #endif |
1307 | kv_start_am(adev); |
1308 | if (pi->enable_auto_thermal_throttling) { |
1309 | ret = kv_enable_auto_thermal_throttling(adev); |
1310 | if (ret) { |
1311 | DRM_ERROR("kv_enable_auto_thermal_throttling failed\n" ); |
1312 | return ret; |
1313 | } |
1314 | } |
1315 | ret = kv_enable_dpm_voltage_scaling(adev); |
1316 | if (ret) { |
1317 | DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n" ); |
1318 | return ret; |
1319 | } |
1320 | ret = kv_set_dpm_interval(adev); |
1321 | if (ret) { |
1322 | DRM_ERROR("kv_set_dpm_interval failed\n" ); |
1323 | return ret; |
1324 | } |
1325 | ret = kv_set_dpm_boot_state(adev); |
1326 | if (ret) { |
1327 | DRM_ERROR("kv_set_dpm_boot_state failed\n" ); |
1328 | return ret; |
1329 | } |
1330 | ret = kv_enable_ulv(adev, enable: true); |
1331 | if (ret) { |
1332 | DRM_ERROR("kv_enable_ulv failed\n" ); |
1333 | return ret; |
1334 | } |
1335 | kv_start_dpm(adev); |
1336 | ret = kv_enable_didt(adev, enable: true); |
1337 | if (ret) { |
1338 | DRM_ERROR("kv_enable_didt failed\n" ); |
1339 | return ret; |
1340 | } |
1341 | ret = kv_enable_smc_cac(adev, enable: true); |
1342 | if (ret) { |
1343 | DRM_ERROR("kv_enable_smc_cac failed\n" ); |
1344 | return ret; |
1345 | } |
1346 | |
1347 | kv_reset_acp_boot_level(adev); |
1348 | |
1349 | ret = amdgpu_kv_smc_bapm_enable(adev, enable: false); |
1350 | if (ret) { |
1351 | DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n" ); |
1352 | return ret; |
1353 | } |
1354 | |
1355 | if (adev->irq.installed && |
1356 | kv_is_internal_thermal_sensor(sensor: adev->pm.int_thermal_type)) { |
1357 | ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); |
1358 | if (ret) { |
1359 | DRM_ERROR("kv_set_thermal_temperature_range failed\n" ); |
1360 | return ret; |
1361 | } |
1362 | amdgpu_irq_get(adev, src: &adev->pm.dpm.thermal.irq, |
1363 | type: AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); |
1364 | amdgpu_irq_get(adev, src: &adev->pm.dpm.thermal.irq, |
1365 | type: AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); |
1366 | } |
1367 | |
1368 | return ret; |
1369 | } |
1370 | |
1371 | static void kv_dpm_disable(struct amdgpu_device *adev) |
1372 | { |
1373 | struct kv_power_info *pi = kv_get_pi(adev); |
1374 | int err; |
1375 | |
1376 | amdgpu_irq_put(adev, src: &adev->pm.dpm.thermal.irq, |
1377 | type: AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); |
1378 | amdgpu_irq_put(adev, src: &adev->pm.dpm.thermal.irq, |
1379 | type: AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); |
1380 | |
1381 | err = amdgpu_kv_smc_bapm_enable(adev, enable: false); |
1382 | if (err) |
1383 | DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n" ); |
1384 | |
1385 | if (adev->asic_type == CHIP_MULLINS) |
1386 | kv_enable_nb_dpm(adev, enable: false); |
1387 | |
1388 | /* powerup blocks */ |
1389 | kv_dpm_powergate_acp(adev, gate: false); |
1390 | kv_dpm_powergate_samu(adev, gate: false); |
1391 | if (pi->caps_vce_pg) /* power on the VCE block */ |
1392 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); |
1393 | if (pi->caps_uvd_pg) /* power on the UVD block */ |
1394 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); |
1395 | |
1396 | kv_enable_smc_cac(adev, enable: false); |
1397 | kv_enable_didt(adev, enable: false); |
1398 | kv_clear_vc(adev); |
1399 | kv_stop_dpm(adev); |
1400 | kv_enable_ulv(adev, enable: false); |
1401 | kv_reset_am(adev); |
1402 | |
1403 | kv_update_current_ps(adev, rps: adev->pm.dpm.boot_ps); |
1404 | } |
1405 | |
1406 | #if 0 |
1407 | static int kv_write_smc_soft_register(struct amdgpu_device *adev, |
1408 | u16 reg_offset, u32 value) |
1409 | { |
1410 | struct kv_power_info *pi = kv_get_pi(adev); |
1411 | |
1412 | return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset, |
1413 | (u8 *)&value, sizeof(u16), pi->sram_end); |
1414 | } |
1415 | |
1416 | static int kv_read_smc_soft_register(struct amdgpu_device *adev, |
1417 | u16 reg_offset, u32 *value) |
1418 | { |
1419 | struct kv_power_info *pi = kv_get_pi(adev); |
1420 | |
1421 | return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset, |
1422 | value, pi->sram_end); |
1423 | } |
1424 | #endif |
1425 | |
1426 | static void kv_init_sclk_t(struct amdgpu_device *adev) |
1427 | { |
1428 | struct kv_power_info *pi = kv_get_pi(adev); |
1429 | |
1430 | pi->low_sclk_interrupt_t = 0; |
1431 | } |
1432 | |
1433 | static int kv_init_fps_limits(struct amdgpu_device *adev) |
1434 | { |
1435 | struct kv_power_info *pi = kv_get_pi(adev); |
1436 | int ret = 0; |
1437 | |
1438 | if (pi->caps_fps) { |
1439 | u16 tmp; |
1440 | |
1441 | tmp = 45; |
1442 | pi->fps_high_t = cpu_to_be16(tmp); |
1443 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
1444 | smc_start_address: pi->dpm_table_start + |
1445 | offsetof(SMU7_Fusion_DpmTable, FpsHighT), |
1446 | src: (u8 *)&pi->fps_high_t, |
1447 | byte_count: sizeof(u16), limit: pi->sram_end); |
1448 | |
1449 | tmp = 30; |
1450 | pi->fps_low_t = cpu_to_be16(tmp); |
1451 | |
1452 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
1453 | smc_start_address: pi->dpm_table_start + |
1454 | offsetof(SMU7_Fusion_DpmTable, FpsLowT), |
1455 | src: (u8 *)&pi->fps_low_t, |
1456 | byte_count: sizeof(u16), limit: pi->sram_end); |
1457 | |
1458 | } |
1459 | return ret; |
1460 | } |
1461 | |
1462 | static void kv_init_powergate_state(struct amdgpu_device *adev) |
1463 | { |
1464 | struct kv_power_info *pi = kv_get_pi(adev); |
1465 | |
1466 | pi->uvd_power_gated = false; |
1467 | pi->vce_power_gated = false; |
1468 | pi->samu_power_gated = false; |
1469 | pi->acp_power_gated = false; |
1470 | |
1471 | } |
1472 | |
1473 | static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) |
1474 | { |
1475 | return amdgpu_kv_notify_message_to_smu(adev, id: enable ? |
1476 | PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); |
1477 | } |
1478 | |
1479 | static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable) |
1480 | { |
1481 | return amdgpu_kv_notify_message_to_smu(adev, id: enable ? |
1482 | PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); |
1483 | } |
1484 | |
1485 | static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable) |
1486 | { |
1487 | return amdgpu_kv_notify_message_to_smu(adev, id: enable ? |
1488 | PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); |
1489 | } |
1490 | |
1491 | static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable) |
1492 | { |
1493 | return amdgpu_kv_notify_message_to_smu(adev, id: enable ? |
1494 | PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); |
1495 | } |
1496 | |
1497 | static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate) |
1498 | { |
1499 | struct kv_power_info *pi = kv_get_pi(adev); |
1500 | struct amdgpu_uvd_clock_voltage_dependency_table *table = |
1501 | &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; |
1502 | int ret; |
1503 | u32 mask; |
1504 | |
1505 | if (!gate) { |
1506 | if (table->count) |
1507 | pi->uvd_boot_level = table->count - 1; |
1508 | else |
1509 | pi->uvd_boot_level = 0; |
1510 | |
1511 | if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { |
1512 | mask = 1 << pi->uvd_boot_level; |
1513 | } else { |
1514 | mask = 0x1f; |
1515 | } |
1516 | |
1517 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
1518 | smc_start_address: pi->dpm_table_start + |
1519 | offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), |
1520 | src: (uint8_t *)&pi->uvd_boot_level, |
1521 | byte_count: sizeof(u8), limit: pi->sram_end); |
1522 | if (ret) |
1523 | return ret; |
1524 | |
1525 | amdgpu_kv_send_msg_to_smc_with_parameter(adev, |
1526 | PPSMC_MSG_UVDDPM_SetEnabledMask, |
1527 | parameter: mask); |
1528 | } |
1529 | |
1530 | return kv_enable_uvd_dpm(adev, enable: !gate); |
1531 | } |
1532 | |
1533 | static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk) |
1534 | { |
1535 | u8 i; |
1536 | struct amdgpu_vce_clock_voltage_dependency_table *table = |
1537 | &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; |
1538 | |
1539 | for (i = 0; i < table->count; i++) { |
1540 | if (table->entries[i].evclk >= evclk) |
1541 | break; |
1542 | } |
1543 | |
1544 | return i; |
1545 | } |
1546 | |
1547 | static int kv_update_vce_dpm(struct amdgpu_device *adev, |
1548 | struct amdgpu_ps *amdgpu_new_state, |
1549 | struct amdgpu_ps *amdgpu_current_state) |
1550 | { |
1551 | struct kv_power_info *pi = kv_get_pi(adev); |
1552 | struct amdgpu_vce_clock_voltage_dependency_table *table = |
1553 | &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; |
1554 | int ret; |
1555 | |
1556 | if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { |
1557 | if (pi->caps_stable_p_state) |
1558 | pi->vce_boot_level = table->count - 1; |
1559 | else |
1560 | pi->vce_boot_level = kv_get_vce_boot_level(adev, evclk: amdgpu_new_state->evclk); |
1561 | |
1562 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
1563 | smc_start_address: pi->dpm_table_start + |
1564 | offsetof(SMU7_Fusion_DpmTable, VceBootLevel), |
1565 | src: (u8 *)&pi->vce_boot_level, |
1566 | byte_count: sizeof(u8), |
1567 | limit: pi->sram_end); |
1568 | if (ret) |
1569 | return ret; |
1570 | |
1571 | if (pi->caps_stable_p_state) |
1572 | amdgpu_kv_send_msg_to_smc_with_parameter(adev, |
1573 | PPSMC_MSG_VCEDPM_SetEnabledMask, |
1574 | parameter: (1 << pi->vce_boot_level)); |
1575 | kv_enable_vce_dpm(adev, enable: true); |
1576 | } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { |
1577 | kv_enable_vce_dpm(adev, enable: false); |
1578 | } |
1579 | |
1580 | return 0; |
1581 | } |
1582 | |
1583 | static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate) |
1584 | { |
1585 | struct kv_power_info *pi = kv_get_pi(adev); |
1586 | struct amdgpu_clock_voltage_dependency_table *table = |
1587 | &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; |
1588 | int ret; |
1589 | |
1590 | if (!gate) { |
1591 | if (pi->caps_stable_p_state) |
1592 | pi->samu_boot_level = table->count - 1; |
1593 | else |
1594 | pi->samu_boot_level = 0; |
1595 | |
1596 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
1597 | smc_start_address: pi->dpm_table_start + |
1598 | offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), |
1599 | src: (u8 *)&pi->samu_boot_level, |
1600 | byte_count: sizeof(u8), |
1601 | limit: pi->sram_end); |
1602 | if (ret) |
1603 | return ret; |
1604 | |
1605 | if (pi->caps_stable_p_state) |
1606 | amdgpu_kv_send_msg_to_smc_with_parameter(adev, |
1607 | PPSMC_MSG_SAMUDPM_SetEnabledMask, |
1608 | parameter: (1 << pi->samu_boot_level)); |
1609 | } |
1610 | |
1611 | return kv_enable_samu_dpm(adev, enable: !gate); |
1612 | } |
1613 | |
1614 | static u8 kv_get_acp_boot_level(struct amdgpu_device *adev) |
1615 | { |
1616 | return 0; |
1617 | } |
1618 | |
1619 | static void kv_update_acp_boot_level(struct amdgpu_device *adev) |
1620 | { |
1621 | struct kv_power_info *pi = kv_get_pi(adev); |
1622 | u8 acp_boot_level; |
1623 | |
1624 | if (!pi->caps_stable_p_state) { |
1625 | acp_boot_level = kv_get_acp_boot_level(adev); |
1626 | if (acp_boot_level != pi->acp_boot_level) { |
1627 | pi->acp_boot_level = acp_boot_level; |
1628 | amdgpu_kv_send_msg_to_smc_with_parameter(adev, |
1629 | PPSMC_MSG_ACPDPM_SetEnabledMask, |
1630 | parameter: (1 << pi->acp_boot_level)); |
1631 | } |
1632 | } |
1633 | } |
1634 | |
1635 | static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate) |
1636 | { |
1637 | struct kv_power_info *pi = kv_get_pi(adev); |
1638 | struct amdgpu_clock_voltage_dependency_table *table = |
1639 | &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; |
1640 | int ret; |
1641 | |
1642 | if (!gate) { |
1643 | if (pi->caps_stable_p_state) |
1644 | pi->acp_boot_level = table->count - 1; |
1645 | else |
1646 | pi->acp_boot_level = kv_get_acp_boot_level(adev); |
1647 | |
1648 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
1649 | smc_start_address: pi->dpm_table_start + |
1650 | offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), |
1651 | src: (u8 *)&pi->acp_boot_level, |
1652 | byte_count: sizeof(u8), |
1653 | limit: pi->sram_end); |
1654 | if (ret) |
1655 | return ret; |
1656 | |
1657 | if (pi->caps_stable_p_state) |
1658 | amdgpu_kv_send_msg_to_smc_with_parameter(adev, |
1659 | PPSMC_MSG_ACPDPM_SetEnabledMask, |
1660 | parameter: (1 << pi->acp_boot_level)); |
1661 | } |
1662 | |
1663 | return kv_enable_acp_dpm(adev, enable: !gate); |
1664 | } |
1665 | |
1666 | static void kv_dpm_powergate_uvd(void *handle, bool gate) |
1667 | { |
1668 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1669 | struct kv_power_info *pi = kv_get_pi(adev); |
1670 | |
1671 | pi->uvd_power_gated = gate; |
1672 | |
1673 | if (gate) { |
1674 | /* stop the UVD block */ |
1675 | amdgpu_device_ip_set_powergating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_UVD, |
1676 | state: AMD_PG_STATE_GATE); |
1677 | kv_update_uvd_dpm(adev, gate); |
1678 | if (pi->caps_uvd_pg) |
1679 | /* power off the UVD block */ |
1680 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); |
1681 | } else { |
1682 | if (pi->caps_uvd_pg) |
1683 | /* power on the UVD block */ |
1684 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); |
1685 | /* re-init the UVD block */ |
1686 | kv_update_uvd_dpm(adev, gate); |
1687 | |
1688 | amdgpu_device_ip_set_powergating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_UVD, |
1689 | state: AMD_PG_STATE_UNGATE); |
1690 | } |
1691 | } |
1692 | |
1693 | static void kv_dpm_powergate_vce(void *handle, bool gate) |
1694 | { |
1695 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1696 | struct kv_power_info *pi = kv_get_pi(adev); |
1697 | |
1698 | pi->vce_power_gated = gate; |
1699 | |
1700 | if (gate) { |
1701 | /* stop the VCE block */ |
1702 | amdgpu_device_ip_set_powergating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_VCE, |
1703 | state: AMD_PG_STATE_GATE); |
1704 | kv_enable_vce_dpm(adev, enable: false); |
1705 | if (pi->caps_vce_pg) /* power off the VCE block */ |
1706 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); |
1707 | } else { |
1708 | if (pi->caps_vce_pg) /* power on the VCE block */ |
1709 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); |
1710 | kv_enable_vce_dpm(adev, enable: true); |
1711 | /* re-init the VCE block */ |
1712 | amdgpu_device_ip_set_powergating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_VCE, |
1713 | state: AMD_PG_STATE_UNGATE); |
1714 | } |
1715 | } |
1716 | |
1717 | |
1718 | static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) |
1719 | { |
1720 | struct kv_power_info *pi = kv_get_pi(adev); |
1721 | |
1722 | if (pi->samu_power_gated == gate) |
1723 | return; |
1724 | |
1725 | pi->samu_power_gated = gate; |
1726 | |
1727 | if (gate) { |
1728 | kv_update_samu_dpm(adev, gate: true); |
1729 | if (pi->caps_samu_pg) |
1730 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF); |
1731 | } else { |
1732 | if (pi->caps_samu_pg) |
1733 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON); |
1734 | kv_update_samu_dpm(adev, gate: false); |
1735 | } |
1736 | } |
1737 | |
1738 | static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate) |
1739 | { |
1740 | struct kv_power_info *pi = kv_get_pi(adev); |
1741 | |
1742 | if (pi->acp_power_gated == gate) |
1743 | return; |
1744 | |
1745 | if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) |
1746 | return; |
1747 | |
1748 | pi->acp_power_gated = gate; |
1749 | |
1750 | if (gate) { |
1751 | kv_update_acp_dpm(adev, gate: true); |
1752 | if (pi->caps_acp_pg) |
1753 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF); |
1754 | } else { |
1755 | if (pi->caps_acp_pg) |
1756 | amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON); |
1757 | kv_update_acp_dpm(adev, gate: false); |
1758 | } |
1759 | } |
1760 | |
1761 | static void kv_set_valid_clock_range(struct amdgpu_device *adev, |
1762 | struct amdgpu_ps *new_rps) |
1763 | { |
1764 | struct kv_ps *new_ps = kv_get_ps(rps: new_rps); |
1765 | struct kv_power_info *pi = kv_get_pi(adev); |
1766 | u32 i; |
1767 | struct amdgpu_clock_voltage_dependency_table *table = |
1768 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; |
1769 | |
1770 | if (table && table->count) { |
1771 | for (i = 0; i < pi->graphics_dpm_level_count; i++) { |
1772 | if ((table->entries[i].clk >= new_ps->levels[0].sclk) || |
1773 | (i == (pi->graphics_dpm_level_count - 1))) { |
1774 | pi->lowest_valid = i; |
1775 | break; |
1776 | } |
1777 | } |
1778 | |
1779 | for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { |
1780 | if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) |
1781 | break; |
1782 | } |
1783 | pi->highest_valid = i; |
1784 | |
1785 | if (pi->lowest_valid > pi->highest_valid) { |
1786 | if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > |
1787 | (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) |
1788 | pi->highest_valid = pi->lowest_valid; |
1789 | else |
1790 | pi->lowest_valid = pi->highest_valid; |
1791 | } |
1792 | } else { |
1793 | struct sumo_sclk_voltage_mapping_table *table = |
1794 | &pi->sys_info.sclk_voltage_mapping_table; |
1795 | |
1796 | for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { |
1797 | if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || |
1798 | i == (int)(pi->graphics_dpm_level_count - 1)) { |
1799 | pi->lowest_valid = i; |
1800 | break; |
1801 | } |
1802 | } |
1803 | |
1804 | for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { |
1805 | if (table->entries[i].sclk_frequency <= |
1806 | new_ps->levels[new_ps->num_levels - 1].sclk) |
1807 | break; |
1808 | } |
1809 | pi->highest_valid = i; |
1810 | |
1811 | if (pi->lowest_valid > pi->highest_valid) { |
1812 | if ((new_ps->levels[0].sclk - |
1813 | table->entries[pi->highest_valid].sclk_frequency) > |
1814 | (table->entries[pi->lowest_valid].sclk_frequency - |
1815 | new_ps->levels[new_ps->num_levels - 1].sclk)) |
1816 | pi->highest_valid = pi->lowest_valid; |
1817 | else |
1818 | pi->lowest_valid = pi->highest_valid; |
1819 | } |
1820 | } |
1821 | } |
1822 | |
1823 | static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev, |
1824 | struct amdgpu_ps *new_rps) |
1825 | { |
1826 | struct kv_ps *new_ps = kv_get_ps(rps: new_rps); |
1827 | struct kv_power_info *pi = kv_get_pi(adev); |
1828 | int ret = 0; |
1829 | u8 clk_bypass_cntl; |
1830 | |
1831 | if (pi->caps_enable_dfs_bypass) { |
1832 | clk_bypass_cntl = new_ps->need_dfs_bypass ? |
1833 | pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; |
1834 | ret = amdgpu_kv_copy_bytes_to_smc(adev, |
1835 | smc_start_address: (pi->dpm_table_start + |
1836 | offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + |
1837 | (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + |
1838 | offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), |
1839 | src: &clk_bypass_cntl, |
1840 | byte_count: sizeof(u8), limit: pi->sram_end); |
1841 | } |
1842 | |
1843 | return ret; |
1844 | } |
1845 | |
1846 | static int kv_enable_nb_dpm(struct amdgpu_device *adev, |
1847 | bool enable) |
1848 | { |
1849 | struct kv_power_info *pi = kv_get_pi(adev); |
1850 | int ret = 0; |
1851 | |
1852 | if (enable) { |
1853 | if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { |
1854 | ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable); |
1855 | if (ret == 0) |
1856 | pi->nb_dpm_enabled = true; |
1857 | } |
1858 | } else { |
1859 | if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { |
1860 | ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable); |
1861 | if (ret == 0) |
1862 | pi->nb_dpm_enabled = false; |
1863 | } |
1864 | } |
1865 | |
1866 | return ret; |
1867 | } |
1868 | |
1869 | static int kv_dpm_force_performance_level(void *handle, |
1870 | enum amd_dpm_forced_level level) |
1871 | { |
1872 | int ret; |
1873 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1874 | |
1875 | if (level == AMD_DPM_FORCED_LEVEL_HIGH) { |
1876 | ret = kv_force_dpm_highest(adev); |
1877 | if (ret) |
1878 | return ret; |
1879 | } else if (level == AMD_DPM_FORCED_LEVEL_LOW) { |
1880 | ret = kv_force_dpm_lowest(adev); |
1881 | if (ret) |
1882 | return ret; |
1883 | } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) { |
1884 | ret = kv_unforce_levels(adev); |
1885 | if (ret) |
1886 | return ret; |
1887 | } |
1888 | |
1889 | adev->pm.dpm.forced_level = level; |
1890 | |
1891 | return 0; |
1892 | } |
1893 | |
1894 | static int kv_dpm_pre_set_power_state(void *handle) |
1895 | { |
1896 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1897 | struct kv_power_info *pi = kv_get_pi(adev); |
1898 | struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; |
1899 | struct amdgpu_ps *new_ps = &requested_ps; |
1900 | |
1901 | kv_update_requested_ps(adev, rps: new_ps); |
1902 | |
1903 | kv_apply_state_adjust_rules(adev, |
1904 | new_rps: &pi->requested_rps, |
1905 | old_rps: &pi->current_rps); |
1906 | |
1907 | return 0; |
1908 | } |
1909 | |
1910 | static int kv_dpm_set_power_state(void *handle) |
1911 | { |
1912 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1913 | struct kv_power_info *pi = kv_get_pi(adev); |
1914 | struct amdgpu_ps *new_ps = &pi->requested_rps; |
1915 | struct amdgpu_ps *old_ps = &pi->current_rps; |
1916 | int ret; |
1917 | |
1918 | if (pi->bapm_enable) { |
1919 | ret = amdgpu_kv_smc_bapm_enable(adev, enable: adev->pm.ac_power); |
1920 | if (ret) { |
1921 | DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n" ); |
1922 | return ret; |
1923 | } |
1924 | } |
1925 | |
1926 | if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { |
1927 | if (pi->enable_dpm) { |
1928 | kv_set_valid_clock_range(adev, new_rps: new_ps); |
1929 | kv_update_dfs_bypass_settings(adev, new_rps: new_ps); |
1930 | ret = kv_calculate_ds_divider(adev); |
1931 | if (ret) { |
1932 | DRM_ERROR("kv_calculate_ds_divider failed\n" ); |
1933 | return ret; |
1934 | } |
1935 | kv_calculate_nbps_level_settings(adev); |
1936 | kv_calculate_dpm_settings(adev); |
1937 | kv_force_lowest_valid(adev); |
1938 | kv_enable_new_levels(adev); |
1939 | kv_upload_dpm_settings(adev); |
1940 | kv_program_nbps_index_settings(adev, new_rps: new_ps); |
1941 | kv_unforce_levels(adev); |
1942 | kv_set_enabled_levels(adev); |
1943 | kv_force_lowest_valid(adev); |
1944 | kv_unforce_levels(adev); |
1945 | |
1946 | ret = kv_update_vce_dpm(adev, amdgpu_new_state: new_ps, amdgpu_current_state: old_ps); |
1947 | if (ret) { |
1948 | DRM_ERROR("kv_update_vce_dpm failed\n" ); |
1949 | return ret; |
1950 | } |
1951 | kv_update_sclk_t(adev); |
1952 | if (adev->asic_type == CHIP_MULLINS) |
1953 | kv_enable_nb_dpm(adev, enable: true); |
1954 | } |
1955 | } else { |
1956 | if (pi->enable_dpm) { |
1957 | kv_set_valid_clock_range(adev, new_rps: new_ps); |
1958 | kv_update_dfs_bypass_settings(adev, new_rps: new_ps); |
1959 | ret = kv_calculate_ds_divider(adev); |
1960 | if (ret) { |
1961 | DRM_ERROR("kv_calculate_ds_divider failed\n" ); |
1962 | return ret; |
1963 | } |
1964 | kv_calculate_nbps_level_settings(adev); |
1965 | kv_calculate_dpm_settings(adev); |
1966 | kv_freeze_sclk_dpm(adev, freeze: true); |
1967 | kv_upload_dpm_settings(adev); |
1968 | kv_program_nbps_index_settings(adev, new_rps: new_ps); |
1969 | kv_freeze_sclk_dpm(adev, freeze: false); |
1970 | kv_set_enabled_levels(adev); |
1971 | ret = kv_update_vce_dpm(adev, amdgpu_new_state: new_ps, amdgpu_current_state: old_ps); |
1972 | if (ret) { |
1973 | DRM_ERROR("kv_update_vce_dpm failed\n" ); |
1974 | return ret; |
1975 | } |
1976 | kv_update_acp_boot_level(adev); |
1977 | kv_update_sclk_t(adev); |
1978 | kv_enable_nb_dpm(adev, enable: true); |
1979 | } |
1980 | } |
1981 | |
1982 | return 0; |
1983 | } |
1984 | |
1985 | static void kv_dpm_post_set_power_state(void *handle) |
1986 | { |
1987 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1988 | struct kv_power_info *pi = kv_get_pi(adev); |
1989 | struct amdgpu_ps *new_ps = &pi->requested_rps; |
1990 | |
1991 | kv_update_current_ps(adev, rps: new_ps); |
1992 | } |
1993 | |
1994 | static void kv_dpm_setup_asic(struct amdgpu_device *adev) |
1995 | { |
1996 | sumo_take_smu_control(adev, enable: true); |
1997 | kv_init_powergate_state(adev); |
1998 | kv_init_sclk_t(adev); |
1999 | } |
2000 | |
2001 | #if 0 |
2002 | static void kv_dpm_reset_asic(struct amdgpu_device *adev) |
2003 | { |
2004 | struct kv_power_info *pi = kv_get_pi(adev); |
2005 | |
2006 | if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { |
2007 | kv_force_lowest_valid(adev); |
2008 | kv_init_graphics_levels(adev); |
2009 | kv_program_bootup_state(adev); |
2010 | kv_upload_dpm_settings(adev); |
2011 | kv_force_lowest_valid(adev); |
2012 | kv_unforce_levels(adev); |
2013 | } else { |
2014 | kv_init_graphics_levels(adev); |
2015 | kv_program_bootup_state(adev); |
2016 | kv_freeze_sclk_dpm(adev, true); |
2017 | kv_upload_dpm_settings(adev); |
2018 | kv_freeze_sclk_dpm(adev, false); |
2019 | kv_set_enabled_level(adev, pi->graphics_boot_level); |
2020 | } |
2021 | } |
2022 | #endif |
2023 | |
2024 | static void kv_construct_max_power_limits_table(struct amdgpu_device *adev, |
2025 | struct amdgpu_clock_and_voltage_limits *table) |
2026 | { |
2027 | struct kv_power_info *pi = kv_get_pi(adev); |
2028 | |
2029 | if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { |
2030 | int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; |
2031 | table->sclk = |
2032 | pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; |
2033 | table->vddc = |
2034 | kv_convert_2bit_index_to_voltage(adev, |
2035 | vid_2bit: pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); |
2036 | } |
2037 | |
2038 | table->mclk = pi->sys_info.nbp_memory_clock[0]; |
2039 | } |
2040 | |
2041 | static void kv_patch_voltage_values(struct amdgpu_device *adev) |
2042 | { |
2043 | int i; |
2044 | struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = |
2045 | &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; |
2046 | struct amdgpu_vce_clock_voltage_dependency_table *vce_table = |
2047 | &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; |
2048 | struct amdgpu_clock_voltage_dependency_table *samu_table = |
2049 | &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; |
2050 | struct amdgpu_clock_voltage_dependency_table *acp_table = |
2051 | &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; |
2052 | |
2053 | if (uvd_table->count) { |
2054 | for (i = 0; i < uvd_table->count; i++) |
2055 | uvd_table->entries[i].v = |
2056 | kv_convert_8bit_index_to_voltage(adev, |
2057 | voltage: uvd_table->entries[i].v); |
2058 | } |
2059 | |
2060 | if (vce_table->count) { |
2061 | for (i = 0; i < vce_table->count; i++) |
2062 | vce_table->entries[i].v = |
2063 | kv_convert_8bit_index_to_voltage(adev, |
2064 | voltage: vce_table->entries[i].v); |
2065 | } |
2066 | |
2067 | if (samu_table->count) { |
2068 | for (i = 0; i < samu_table->count; i++) |
2069 | samu_table->entries[i].v = |
2070 | kv_convert_8bit_index_to_voltage(adev, |
2071 | voltage: samu_table->entries[i].v); |
2072 | } |
2073 | |
2074 | if (acp_table->count) { |
2075 | for (i = 0; i < acp_table->count; i++) |
2076 | acp_table->entries[i].v = |
2077 | kv_convert_8bit_index_to_voltage(adev, |
2078 | voltage: acp_table->entries[i].v); |
2079 | } |
2080 | |
2081 | } |
2082 | |
2083 | static void kv_construct_boot_state(struct amdgpu_device *adev) |
2084 | { |
2085 | struct kv_power_info *pi = kv_get_pi(adev); |
2086 | |
2087 | pi->boot_pl.sclk = pi->sys_info.bootup_sclk; |
2088 | pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; |
2089 | pi->boot_pl.ds_divider_index = 0; |
2090 | pi->boot_pl.ss_divider_index = 0; |
2091 | pi->boot_pl.allow_gnb_slow = 1; |
2092 | pi->boot_pl.force_nbp_state = 0; |
2093 | pi->boot_pl.display_wm = 0; |
2094 | pi->boot_pl.vce_wm = 0; |
2095 | } |
2096 | |
2097 | static int kv_force_dpm_highest(struct amdgpu_device *adev) |
2098 | { |
2099 | int ret; |
2100 | u32 enable_mask, i; |
2101 | |
2102 | ret = amdgpu_kv_dpm_get_enable_mask(adev, enable_mask: &enable_mask); |
2103 | if (ret) |
2104 | return ret; |
2105 | |
2106 | for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { |
2107 | if (enable_mask & (1 << i)) |
2108 | break; |
2109 | } |
2110 | |
2111 | if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) |
2112 | return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, parameter: i); |
2113 | else |
2114 | return kv_set_enabled_level(adev, level: i); |
2115 | } |
2116 | |
2117 | static int kv_force_dpm_lowest(struct amdgpu_device *adev) |
2118 | { |
2119 | int ret; |
2120 | u32 enable_mask, i; |
2121 | |
2122 | ret = amdgpu_kv_dpm_get_enable_mask(adev, enable_mask: &enable_mask); |
2123 | if (ret) |
2124 | return ret; |
2125 | |
2126 | for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { |
2127 | if (enable_mask & (1 << i)) |
2128 | break; |
2129 | } |
2130 | |
2131 | if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) |
2132 | return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, parameter: i); |
2133 | else |
2134 | return kv_set_enabled_level(adev, level: i); |
2135 | } |
2136 | |
2137 | static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, |
2138 | u32 sclk, u32 min_sclk_in_sr) |
2139 | { |
2140 | struct kv_power_info *pi = kv_get_pi(adev); |
2141 | u32 i; |
2142 | u32 temp; |
2143 | u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK); |
2144 | |
2145 | if (sclk < min) |
2146 | return 0; |
2147 | |
2148 | if (!pi->caps_sclk_ds) |
2149 | return 0; |
2150 | |
2151 | for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { |
2152 | temp = sclk >> i; |
2153 | if (temp >= min) |
2154 | break; |
2155 | } |
2156 | |
2157 | return (u8)i; |
2158 | } |
2159 | |
2160 | static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit) |
2161 | { |
2162 | struct kv_power_info *pi = kv_get_pi(adev); |
2163 | struct amdgpu_clock_voltage_dependency_table *table = |
2164 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; |
2165 | int i; |
2166 | |
2167 | if (table && table->count) { |
2168 | for (i = table->count - 1; i >= 0; i--) { |
2169 | if (pi->high_voltage_t && |
2170 | (kv_convert_8bit_index_to_voltage(adev, voltage: table->entries[i].v) <= |
2171 | pi->high_voltage_t)) { |
2172 | *limit = i; |
2173 | return 0; |
2174 | } |
2175 | } |
2176 | } else { |
2177 | struct sumo_sclk_voltage_mapping_table *table = |
2178 | &pi->sys_info.sclk_voltage_mapping_table; |
2179 | |
2180 | for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { |
2181 | if (pi->high_voltage_t && |
2182 | (kv_convert_2bit_index_to_voltage(adev, vid_2bit: table->entries[i].vid_2bit) <= |
2183 | pi->high_voltage_t)) { |
2184 | *limit = i; |
2185 | return 0; |
2186 | } |
2187 | } |
2188 | } |
2189 | |
2190 | *limit = 0; |
2191 | return 0; |
2192 | } |
2193 | |
2194 | static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, |
2195 | struct amdgpu_ps *new_rps, |
2196 | struct amdgpu_ps *old_rps) |
2197 | { |
2198 | struct kv_ps *ps = kv_get_ps(rps: new_rps); |
2199 | struct kv_power_info *pi = kv_get_pi(adev); |
2200 | u32 min_sclk = 10000; /* ??? */ |
2201 | u32 sclk, mclk = 0; |
2202 | int i, limit; |
2203 | bool force_high; |
2204 | struct amdgpu_clock_voltage_dependency_table *table = |
2205 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; |
2206 | u32 stable_p_state_sclk = 0; |
2207 | struct amdgpu_clock_and_voltage_limits *max_limits = |
2208 | &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; |
2209 | |
2210 | if (new_rps->vce_active) { |
2211 | new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; |
2212 | new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; |
2213 | } else { |
2214 | new_rps->evclk = 0; |
2215 | new_rps->ecclk = 0; |
2216 | } |
2217 | |
2218 | mclk = max_limits->mclk; |
2219 | sclk = min_sclk; |
2220 | |
2221 | if (pi->caps_stable_p_state) { |
2222 | stable_p_state_sclk = (max_limits->sclk * 75) / 100; |
2223 | |
2224 | for (i = table->count - 1; i >= 0; i--) { |
2225 | if (stable_p_state_sclk >= table->entries[i].clk) { |
2226 | stable_p_state_sclk = table->entries[i].clk; |
2227 | break; |
2228 | } |
2229 | } |
2230 | |
2231 | if (i > 0) |
2232 | stable_p_state_sclk = table->entries[0].clk; |
2233 | |
2234 | sclk = stable_p_state_sclk; |
2235 | } |
2236 | |
2237 | if (new_rps->vce_active) { |
2238 | if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) |
2239 | sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; |
2240 | } |
2241 | |
2242 | ps->need_dfs_bypass = true; |
2243 | |
2244 | for (i = 0; i < ps->num_levels; i++) { |
2245 | if (ps->levels[i].sclk < sclk) |
2246 | ps->levels[i].sclk = sclk; |
2247 | } |
2248 | |
2249 | if (table && table->count) { |
2250 | for (i = 0; i < ps->num_levels; i++) { |
2251 | if (pi->high_voltage_t && |
2252 | (pi->high_voltage_t < |
2253 | kv_convert_8bit_index_to_voltage(adev, voltage: ps->levels[i].vddc_index))) { |
2254 | kv_get_high_voltage_limit(adev, limit: &limit); |
2255 | ps->levels[i].sclk = table->entries[limit].clk; |
2256 | } |
2257 | } |
2258 | } else { |
2259 | struct sumo_sclk_voltage_mapping_table *table = |
2260 | &pi->sys_info.sclk_voltage_mapping_table; |
2261 | |
2262 | for (i = 0; i < ps->num_levels; i++) { |
2263 | if (pi->high_voltage_t && |
2264 | (pi->high_voltage_t < |
2265 | kv_convert_8bit_index_to_voltage(adev, voltage: ps->levels[i].vddc_index))) { |
2266 | kv_get_high_voltage_limit(adev, limit: &limit); |
2267 | ps->levels[i].sclk = table->entries[limit].sclk_frequency; |
2268 | } |
2269 | } |
2270 | } |
2271 | |
2272 | if (pi->caps_stable_p_state) { |
2273 | for (i = 0; i < ps->num_levels; i++) { |
2274 | ps->levels[i].sclk = stable_p_state_sclk; |
2275 | } |
2276 | } |
2277 | |
2278 | pi->video_start = new_rps->dclk || new_rps->vclk || |
2279 | new_rps->evclk || new_rps->ecclk; |
2280 | |
2281 | if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == |
2282 | ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) |
2283 | pi->battery_state = true; |
2284 | else |
2285 | pi->battery_state = false; |
2286 | |
2287 | if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { |
2288 | ps->dpm0_pg_nb_ps_lo = 0x1; |
2289 | ps->dpm0_pg_nb_ps_hi = 0x0; |
2290 | ps->dpmx_nb_ps_lo = 0x1; |
2291 | ps->dpmx_nb_ps_hi = 0x0; |
2292 | } else { |
2293 | ps->dpm0_pg_nb_ps_lo = 0x3; |
2294 | ps->dpm0_pg_nb_ps_hi = 0x0; |
2295 | ps->dpmx_nb_ps_lo = 0x3; |
2296 | ps->dpmx_nb_ps_hi = 0x0; |
2297 | |
2298 | if (pi->sys_info.nb_dpm_enable) { |
2299 | force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || |
2300 | pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) || |
2301 | pi->disable_nb_ps3_in_battery; |
2302 | ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; |
2303 | ps->dpm0_pg_nb_ps_hi = 0x2; |
2304 | ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; |
2305 | ps->dpmx_nb_ps_hi = 0x2; |
2306 | } |
2307 | } |
2308 | } |
2309 | |
2310 | static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev, |
2311 | u32 index, bool enable) |
2312 | { |
2313 | struct kv_power_info *pi = kv_get_pi(adev); |
2314 | |
2315 | pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; |
2316 | } |
2317 | |
2318 | static int kv_calculate_ds_divider(struct amdgpu_device *adev) |
2319 | { |
2320 | struct kv_power_info *pi = kv_get_pi(adev); |
2321 | u32 sclk_in_sr = 10000; /* ??? */ |
2322 | u32 i; |
2323 | |
2324 | if (pi->lowest_valid > pi->highest_valid) |
2325 | return -EINVAL; |
2326 | |
2327 | for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { |
2328 | pi->graphics_level[i].DeepSleepDivId = |
2329 | kv_get_sleep_divider_id_from_clock(adev, |
2330 | be32_to_cpu(pi->graphics_level[i].SclkFrequency), |
2331 | min_sclk_in_sr: sclk_in_sr); |
2332 | } |
2333 | return 0; |
2334 | } |
2335 | |
2336 | static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev) |
2337 | { |
2338 | struct kv_power_info *pi = kv_get_pi(adev); |
2339 | u32 i; |
2340 | bool force_high; |
2341 | struct amdgpu_clock_and_voltage_limits *max_limits = |
2342 | &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; |
2343 | u32 mclk = max_limits->mclk; |
2344 | |
2345 | if (pi->lowest_valid > pi->highest_valid) |
2346 | return -EINVAL; |
2347 | |
2348 | if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { |
2349 | for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { |
2350 | pi->graphics_level[i].GnbSlow = 1; |
2351 | pi->graphics_level[i].ForceNbPs1 = 0; |
2352 | pi->graphics_level[i].UpH = 0; |
2353 | } |
2354 | |
2355 | if (!pi->sys_info.nb_dpm_enable) |
2356 | return 0; |
2357 | |
2358 | force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || |
2359 | (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); |
2360 | |
2361 | if (force_high) { |
2362 | for (i = pi->lowest_valid; i <= pi->highest_valid; i++) |
2363 | pi->graphics_level[i].GnbSlow = 0; |
2364 | } else { |
2365 | if (pi->battery_state) |
2366 | pi->graphics_level[0].ForceNbPs1 = 1; |
2367 | |
2368 | pi->graphics_level[1].GnbSlow = 0; |
2369 | pi->graphics_level[2].GnbSlow = 0; |
2370 | pi->graphics_level[3].GnbSlow = 0; |
2371 | pi->graphics_level[4].GnbSlow = 0; |
2372 | } |
2373 | } else { |
2374 | for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { |
2375 | pi->graphics_level[i].GnbSlow = 1; |
2376 | pi->graphics_level[i].ForceNbPs1 = 0; |
2377 | pi->graphics_level[i].UpH = 0; |
2378 | } |
2379 | |
2380 | if (pi->sys_info.nb_dpm_enable && pi->battery_state) { |
2381 | pi->graphics_level[pi->lowest_valid].UpH = 0x28; |
2382 | pi->graphics_level[pi->lowest_valid].GnbSlow = 0; |
2383 | if (pi->lowest_valid != pi->highest_valid) |
2384 | pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; |
2385 | } |
2386 | } |
2387 | return 0; |
2388 | } |
2389 | |
2390 | static int kv_calculate_dpm_settings(struct amdgpu_device *adev) |
2391 | { |
2392 | struct kv_power_info *pi = kv_get_pi(adev); |
2393 | u32 i; |
2394 | |
2395 | if (pi->lowest_valid > pi->highest_valid) |
2396 | return -EINVAL; |
2397 | |
2398 | for (i = pi->lowest_valid; i <= pi->highest_valid; i++) |
2399 | pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; |
2400 | |
2401 | return 0; |
2402 | } |
2403 | |
2404 | static void kv_init_graphics_levels(struct amdgpu_device *adev) |
2405 | { |
2406 | struct kv_power_info *pi = kv_get_pi(adev); |
2407 | u32 i; |
2408 | struct amdgpu_clock_voltage_dependency_table *table = |
2409 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; |
2410 | |
2411 | if (table && table->count) { |
2412 | u32 vid_2bit; |
2413 | |
2414 | pi->graphics_dpm_level_count = 0; |
2415 | for (i = 0; i < table->count; i++) { |
2416 | if (pi->high_voltage_t && |
2417 | (pi->high_voltage_t < |
2418 | kv_convert_8bit_index_to_voltage(adev, voltage: table->entries[i].v))) |
2419 | break; |
2420 | |
2421 | kv_set_divider_value(adev, index: i, sclk: table->entries[i].clk); |
2422 | vid_2bit = kv_convert_vid7_to_vid2(adev, |
2423 | vid_mapping_table: &pi->sys_info.vid_mapping_table, |
2424 | vid_7bit: table->entries[i].v); |
2425 | kv_set_vid(adev, index: i, vid: vid_2bit); |
2426 | kv_set_at(adev, index: i, at: pi->at[i]); |
2427 | kv_dpm_power_level_enabled_for_throttle(adev, index: i, enable: true); |
2428 | pi->graphics_dpm_level_count++; |
2429 | } |
2430 | } else { |
2431 | struct sumo_sclk_voltage_mapping_table *table = |
2432 | &pi->sys_info.sclk_voltage_mapping_table; |
2433 | |
2434 | pi->graphics_dpm_level_count = 0; |
2435 | for (i = 0; i < table->num_max_dpm_entries; i++) { |
2436 | if (pi->high_voltage_t && |
2437 | pi->high_voltage_t < |
2438 | kv_convert_2bit_index_to_voltage(adev, vid_2bit: table->entries[i].vid_2bit)) |
2439 | break; |
2440 | |
2441 | kv_set_divider_value(adev, index: i, sclk: table->entries[i].sclk_frequency); |
2442 | kv_set_vid(adev, index: i, vid: table->entries[i].vid_2bit); |
2443 | kv_set_at(adev, index: i, at: pi->at[i]); |
2444 | kv_dpm_power_level_enabled_for_throttle(adev, index: i, enable: true); |
2445 | pi->graphics_dpm_level_count++; |
2446 | } |
2447 | } |
2448 | |
2449 | for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) |
2450 | kv_dpm_power_level_enable(adev, index: i, enable: false); |
2451 | } |
2452 | |
2453 | static void kv_enable_new_levels(struct amdgpu_device *adev) |
2454 | { |
2455 | struct kv_power_info *pi = kv_get_pi(adev); |
2456 | u32 i; |
2457 | |
2458 | for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { |
2459 | if (i >= pi->lowest_valid && i <= pi->highest_valid) |
2460 | kv_dpm_power_level_enable(adev, index: i, enable: true); |
2461 | } |
2462 | } |
2463 | |
2464 | static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level) |
2465 | { |
2466 | u32 new_mask = (1 << level); |
2467 | |
2468 | return amdgpu_kv_send_msg_to_smc_with_parameter(adev, |
2469 | PPSMC_MSG_SCLKDPM_SetEnabledMask, |
2470 | parameter: new_mask); |
2471 | } |
2472 | |
2473 | static int kv_set_enabled_levels(struct amdgpu_device *adev) |
2474 | { |
2475 | struct kv_power_info *pi = kv_get_pi(adev); |
2476 | u32 i, new_mask = 0; |
2477 | |
2478 | for (i = pi->lowest_valid; i <= pi->highest_valid; i++) |
2479 | new_mask |= (1 << i); |
2480 | |
2481 | return amdgpu_kv_send_msg_to_smc_with_parameter(adev, |
2482 | PPSMC_MSG_SCLKDPM_SetEnabledMask, |
2483 | parameter: new_mask); |
2484 | } |
2485 | |
2486 | static void kv_program_nbps_index_settings(struct amdgpu_device *adev, |
2487 | struct amdgpu_ps *new_rps) |
2488 | { |
2489 | struct kv_ps *new_ps = kv_get_ps(rps: new_rps); |
2490 | struct kv_power_info *pi = kv_get_pi(adev); |
2491 | u32 nbdpmconfig1; |
2492 | |
2493 | if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) |
2494 | return; |
2495 | |
2496 | if (pi->sys_info.nb_dpm_enable) { |
2497 | nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1); |
2498 | nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK | |
2499 | NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK | |
2500 | NB_DPM_CONFIG_1__DpmXNbPsLo_MASK | |
2501 | NB_DPM_CONFIG_1__DpmXNbPsHi_MASK); |
2502 | nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) | |
2503 | (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) | |
2504 | (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) | |
2505 | (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT); |
2506 | WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1); |
2507 | } |
2508 | } |
2509 | |
2510 | static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, |
2511 | int min_temp, int max_temp) |
2512 | { |
2513 | int low_temp = 0 * 1000; |
2514 | int high_temp = 255 * 1000; |
2515 | u32 tmp; |
2516 | |
2517 | if (low_temp < min_temp) |
2518 | low_temp = min_temp; |
2519 | if (high_temp > max_temp) |
2520 | high_temp = max_temp; |
2521 | if (high_temp < low_temp) { |
2522 | DRM_ERROR("invalid thermal range: %d - %d\n" , low_temp, high_temp); |
2523 | return -EINVAL; |
2524 | } |
2525 | |
2526 | tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL); |
2527 | tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK | |
2528 | CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); |
2529 | tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) | |
2530 | ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT); |
2531 | WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp); |
2532 | |
2533 | adev->pm.dpm.thermal.min_temp = low_temp; |
2534 | adev->pm.dpm.thermal.max_temp = high_temp; |
2535 | |
2536 | return 0; |
2537 | } |
2538 | |
2539 | union igp_info { |
2540 | struct _ATOM_INTEGRATED_SYSTEM_INFO info; |
2541 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; |
2542 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; |
2543 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; |
2544 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; |
2545 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; |
2546 | }; |
2547 | |
2548 | static int kv_parse_sys_info_table(struct amdgpu_device *adev) |
2549 | { |
2550 | struct kv_power_info *pi = kv_get_pi(adev); |
2551 | struct amdgpu_mode_info *mode_info = &adev->mode_info; |
2552 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); |
2553 | union igp_info *igp_info; |
2554 | u8 frev, crev; |
2555 | u16 data_offset; |
2556 | int i; |
2557 | |
2558 | if (amdgpu_atom_parse_data_header(ctx: mode_info->atom_context, index, NULL, |
2559 | frev: &frev, crev: &crev, data_start: &data_offset)) { |
2560 | igp_info = (union igp_info *)(mode_info->atom_context->bios + |
2561 | data_offset); |
2562 | |
2563 | if (crev != 8) { |
2564 | DRM_ERROR("Unsupported IGP table: %d %d\n" , frev, crev); |
2565 | return -EINVAL; |
2566 | } |
2567 | pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); |
2568 | pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); |
2569 | pi->sys_info.bootup_nb_voltage_index = |
2570 | le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); |
2571 | if (igp_info->info_8.ucHtcTmpLmt == 0) |
2572 | pi->sys_info.htc_tmp_lmt = 203; |
2573 | else |
2574 | pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; |
2575 | if (igp_info->info_8.ucHtcHystLmt == 0) |
2576 | pi->sys_info.htc_hyst_lmt = 5; |
2577 | else |
2578 | pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; |
2579 | if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { |
2580 | DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n" ); |
2581 | } |
2582 | |
2583 | if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) |
2584 | pi->sys_info.nb_dpm_enable = true; |
2585 | else |
2586 | pi->sys_info.nb_dpm_enable = false; |
2587 | |
2588 | for (i = 0; i < KV_NUM_NBPSTATES; i++) { |
2589 | pi->sys_info.nbp_memory_clock[i] = |
2590 | le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); |
2591 | pi->sys_info.nbp_n_clock[i] = |
2592 | le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); |
2593 | } |
2594 | if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & |
2595 | SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) |
2596 | pi->caps_enable_dfs_bypass = true; |
2597 | |
2598 | sumo_construct_sclk_voltage_mapping_table(adev, |
2599 | sclk_voltage_mapping_table: &pi->sys_info.sclk_voltage_mapping_table, |
2600 | table: igp_info->info_8.sAvail_SCLK); |
2601 | |
2602 | sumo_construct_vid_mapping_table(adev, |
2603 | vid_mapping_table: &pi->sys_info.vid_mapping_table, |
2604 | table: igp_info->info_8.sAvail_SCLK); |
2605 | |
2606 | kv_construct_max_power_limits_table(adev, |
2607 | table: &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); |
2608 | } |
2609 | return 0; |
2610 | } |
2611 | |
2612 | union power_info { |
2613 | struct _ATOM_POWERPLAY_INFO info; |
2614 | struct _ATOM_POWERPLAY_INFO_V2 info_2; |
2615 | struct _ATOM_POWERPLAY_INFO_V3 info_3; |
2616 | struct _ATOM_PPLIB_POWERPLAYTABLE pplib; |
2617 | struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; |
2618 | struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; |
2619 | }; |
2620 | |
2621 | union pplib_clock_info { |
2622 | struct _ATOM_PPLIB_R600_CLOCK_INFO r600; |
2623 | struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; |
2624 | struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; |
2625 | struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; |
2626 | }; |
2627 | |
2628 | union pplib_power_state { |
2629 | struct _ATOM_PPLIB_STATE v1; |
2630 | struct _ATOM_PPLIB_STATE_V2 v2; |
2631 | }; |
2632 | |
2633 | static void kv_patch_boot_state(struct amdgpu_device *adev, |
2634 | struct kv_ps *ps) |
2635 | { |
2636 | struct kv_power_info *pi = kv_get_pi(adev); |
2637 | |
2638 | ps->num_levels = 1; |
2639 | ps->levels[0] = pi->boot_pl; |
2640 | } |
2641 | |
2642 | static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev, |
2643 | struct amdgpu_ps *rps, |
2644 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, |
2645 | u8 table_rev) |
2646 | { |
2647 | struct kv_ps *ps = kv_get_ps(rps); |
2648 | |
2649 | rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); |
2650 | rps->class = le16_to_cpu(non_clock_info->usClassification); |
2651 | rps->class2 = le16_to_cpu(non_clock_info->usClassification2); |
2652 | |
2653 | if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { |
2654 | rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); |
2655 | rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); |
2656 | } else { |
2657 | rps->vclk = 0; |
2658 | rps->dclk = 0; |
2659 | } |
2660 | |
2661 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { |
2662 | adev->pm.dpm.boot_ps = rps; |
2663 | kv_patch_boot_state(adev, ps); |
2664 | } |
2665 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) |
2666 | adev->pm.dpm.uvd_ps = rps; |
2667 | } |
2668 | |
2669 | static void kv_parse_pplib_clock_info(struct amdgpu_device *adev, |
2670 | struct amdgpu_ps *rps, int index, |
2671 | union pplib_clock_info *clock_info) |
2672 | { |
2673 | struct kv_power_info *pi = kv_get_pi(adev); |
2674 | struct kv_ps *ps = kv_get_ps(rps); |
2675 | struct kv_pl *pl = &ps->levels[index]; |
2676 | u32 sclk; |
2677 | |
2678 | sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); |
2679 | sclk |= clock_info->sumo.ucEngineClockHigh << 16; |
2680 | pl->sclk = sclk; |
2681 | pl->vddc_index = clock_info->sumo.vddcIndex; |
2682 | |
2683 | ps->num_levels = index + 1; |
2684 | |
2685 | if (pi->caps_sclk_ds) { |
2686 | pl->ds_divider_index = 5; |
2687 | pl->ss_divider_index = 5; |
2688 | } |
2689 | } |
2690 | |
2691 | static int kv_parse_power_table(struct amdgpu_device *adev) |
2692 | { |
2693 | struct amdgpu_mode_info *mode_info = &adev->mode_info; |
2694 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; |
2695 | union pplib_power_state *power_state; |
2696 | int i, j, k, non_clock_array_index, clock_array_index; |
2697 | union pplib_clock_info *clock_info; |
2698 | struct _StateArray *state_array; |
2699 | struct _ClockInfoArray *clock_info_array; |
2700 | struct _NonClockInfoArray *non_clock_info_array; |
2701 | union power_info *power_info; |
2702 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
2703 | u16 data_offset; |
2704 | u8 frev, crev; |
2705 | u8 *power_state_offset; |
2706 | struct kv_ps *ps; |
2707 | |
2708 | if (!amdgpu_atom_parse_data_header(ctx: mode_info->atom_context, index, NULL, |
2709 | frev: &frev, crev: &crev, data_start: &data_offset)) |
2710 | return -EINVAL; |
2711 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); |
2712 | |
2713 | amdgpu_add_thermal_controller(adev); |
2714 | |
2715 | state_array = (struct _StateArray *) |
2716 | (mode_info->atom_context->bios + data_offset + |
2717 | le16_to_cpu(power_info->pplib.usStateArrayOffset)); |
2718 | clock_info_array = (struct _ClockInfoArray *) |
2719 | (mode_info->atom_context->bios + data_offset + |
2720 | le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); |
2721 | non_clock_info_array = (struct _NonClockInfoArray *) |
2722 | (mode_info->atom_context->bios + data_offset + |
2723 | le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); |
2724 | |
2725 | adev->pm.dpm.ps = kcalloc(n: state_array->ucNumEntries, |
2726 | size: sizeof(struct amdgpu_ps), |
2727 | GFP_KERNEL); |
2728 | if (!adev->pm.dpm.ps) |
2729 | return -ENOMEM; |
2730 | power_state_offset = (u8 *)state_array->states; |
2731 | for (i = 0; i < state_array->ucNumEntries; i++) { |
2732 | u8 *idx; |
2733 | power_state = (union pplib_power_state *)power_state_offset; |
2734 | non_clock_array_index = power_state->v2.nonClockInfoIndex; |
2735 | non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) |
2736 | &non_clock_info_array->nonClockInfo[non_clock_array_index]; |
2737 | ps = kzalloc(size: sizeof(struct kv_ps), GFP_KERNEL); |
2738 | if (ps == NULL) |
2739 | return -ENOMEM; |
2740 | adev->pm.dpm.ps[i].ps_priv = ps; |
2741 | k = 0; |
2742 | idx = (u8 *)&power_state->v2.clockInfoIndex[0]; |
2743 | for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { |
2744 | clock_array_index = idx[j]; |
2745 | if (clock_array_index >= clock_info_array->ucNumEntries) |
2746 | continue; |
2747 | if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) |
2748 | break; |
2749 | clock_info = (union pplib_clock_info *) |
2750 | ((u8 *)&clock_info_array->clockInfo[0] + |
2751 | (clock_array_index * clock_info_array->ucEntrySize)); |
2752 | kv_parse_pplib_clock_info(adev, |
2753 | rps: &adev->pm.dpm.ps[i], index: k, |
2754 | clock_info); |
2755 | k++; |
2756 | } |
2757 | kv_parse_pplib_non_clock_info(adev, rps: &adev->pm.dpm.ps[i], |
2758 | non_clock_info, |
2759 | table_rev: non_clock_info_array->ucEntrySize); |
2760 | power_state_offset += 2 + power_state->v2.ucNumDPMLevels; |
2761 | } |
2762 | adev->pm.dpm.num_ps = state_array->ucNumEntries; |
2763 | |
2764 | /* fill in the vce power states */ |
2765 | for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { |
2766 | u32 sclk; |
2767 | clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; |
2768 | clock_info = (union pplib_clock_info *) |
2769 | &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; |
2770 | sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); |
2771 | sclk |= clock_info->sumo.ucEngineClockHigh << 16; |
2772 | adev->pm.dpm.vce_states[i].sclk = sclk; |
2773 | adev->pm.dpm.vce_states[i].mclk = 0; |
2774 | } |
2775 | |
2776 | return 0; |
2777 | } |
2778 | |
2779 | static int kv_dpm_init(struct amdgpu_device *adev) |
2780 | { |
2781 | struct kv_power_info *pi; |
2782 | int ret, i; |
2783 | |
2784 | pi = kzalloc(size: sizeof(struct kv_power_info), GFP_KERNEL); |
2785 | if (pi == NULL) |
2786 | return -ENOMEM; |
2787 | adev->pm.dpm.priv = pi; |
2788 | |
2789 | ret = amdgpu_get_platform_caps(adev); |
2790 | if (ret) |
2791 | return ret; |
2792 | |
2793 | ret = amdgpu_parse_extended_power_table(adev); |
2794 | if (ret) |
2795 | return ret; |
2796 | |
2797 | for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) |
2798 | pi->at[i] = TRINITY_AT_DFLT; |
2799 | |
2800 | pi->sram_end = SMC_RAM_END; |
2801 | |
2802 | pi->enable_nb_dpm = true; |
2803 | |
2804 | pi->caps_power_containment = true; |
2805 | pi->caps_cac = true; |
2806 | pi->enable_didt = false; |
2807 | if (pi->enable_didt) { |
2808 | pi->caps_sq_ramping = true; |
2809 | pi->caps_db_ramping = true; |
2810 | pi->caps_td_ramping = true; |
2811 | pi->caps_tcp_ramping = true; |
2812 | } |
2813 | |
2814 | if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) |
2815 | pi->caps_sclk_ds = true; |
2816 | else |
2817 | pi->caps_sclk_ds = false; |
2818 | |
2819 | pi->enable_auto_thermal_throttling = true; |
2820 | pi->disable_nb_ps3_in_battery = false; |
2821 | if (amdgpu_bapm == 0) |
2822 | pi->bapm_enable = false; |
2823 | else |
2824 | pi->bapm_enable = true; |
2825 | pi->voltage_drop_t = 0; |
2826 | pi->caps_sclk_throttle_low_notification = false; |
2827 | pi->caps_fps = false; /* true? */ |
2828 | pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; |
2829 | pi->caps_uvd_dpm = true; |
2830 | pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; |
2831 | pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false; |
2832 | pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; |
2833 | pi->caps_stable_p_state = false; |
2834 | |
2835 | ret = kv_parse_sys_info_table(adev); |
2836 | if (ret) |
2837 | return ret; |
2838 | |
2839 | kv_patch_voltage_values(adev); |
2840 | kv_construct_boot_state(adev); |
2841 | |
2842 | ret = kv_parse_power_table(adev); |
2843 | if (ret) |
2844 | return ret; |
2845 | |
2846 | pi->enable_dpm = true; |
2847 | |
2848 | return 0; |
2849 | } |
2850 | |
2851 | static void |
2852 | kv_dpm_debugfs_print_current_performance_level(void *handle, |
2853 | struct seq_file *m) |
2854 | { |
2855 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2856 | struct kv_power_info *pi = kv_get_pi(adev); |
2857 | u32 current_index = |
2858 | (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & |
2859 | TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> |
2860 | TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; |
2861 | u32 sclk, tmp; |
2862 | u16 vddc; |
2863 | |
2864 | if (current_index >= SMU__NUM_SCLK_DPM_STATE) { |
2865 | seq_printf(m, fmt: "invalid dpm profile %d\n" , current_index); |
2866 | } else { |
2867 | sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); |
2868 | tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & |
2869 | SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> |
2870 | SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; |
2871 | vddc = kv_convert_8bit_index_to_voltage(adev, voltage: (u16)tmp); |
2872 | seq_printf(m, fmt: "uvd %sabled\n" , pi->uvd_power_gated ? "dis" : "en" ); |
2873 | seq_printf(m, fmt: "vce %sabled\n" , pi->vce_power_gated ? "dis" : "en" ); |
2874 | seq_printf(m, fmt: "power level %d sclk: %u vddc: %u\n" , |
2875 | current_index, sclk, vddc); |
2876 | } |
2877 | } |
2878 | |
2879 | static void |
2880 | kv_dpm_print_power_state(void *handle, void *request_ps) |
2881 | { |
2882 | int i; |
2883 | struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; |
2884 | struct kv_ps *ps = kv_get_ps(rps); |
2885 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2886 | |
2887 | amdgpu_dpm_print_class_info(class: rps->class, class2: rps->class2); |
2888 | amdgpu_dpm_print_cap_info(caps: rps->caps); |
2889 | printk("\tuvd vclk: %d dclk: %d\n" , rps->vclk, rps->dclk); |
2890 | for (i = 0; i < ps->num_levels; i++) { |
2891 | struct kv_pl *pl = &ps->levels[i]; |
2892 | printk("\t\tpower level %d sclk: %u vddc: %u\n" , |
2893 | i, pl->sclk, |
2894 | kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); |
2895 | } |
2896 | amdgpu_dpm_print_ps_status(adev, rps); |
2897 | } |
2898 | |
2899 | static void kv_dpm_fini(struct amdgpu_device *adev) |
2900 | { |
2901 | int i; |
2902 | |
2903 | for (i = 0; i < adev->pm.dpm.num_ps; i++) { |
2904 | kfree(objp: adev->pm.dpm.ps[i].ps_priv); |
2905 | } |
2906 | kfree(objp: adev->pm.dpm.ps); |
2907 | kfree(objp: adev->pm.dpm.priv); |
2908 | amdgpu_free_extended_power_table(adev); |
2909 | } |
2910 | |
2911 | static void kv_dpm_display_configuration_changed(void *handle) |
2912 | { |
2913 | |
2914 | } |
2915 | |
2916 | static u32 kv_dpm_get_sclk(void *handle, bool low) |
2917 | { |
2918 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2919 | struct kv_power_info *pi = kv_get_pi(adev); |
2920 | struct kv_ps *requested_state = kv_get_ps(rps: &pi->requested_rps); |
2921 | |
2922 | if (low) |
2923 | return requested_state->levels[0].sclk; |
2924 | else |
2925 | return requested_state->levels[requested_state->num_levels - 1].sclk; |
2926 | } |
2927 | |
2928 | static u32 kv_dpm_get_mclk(void *handle, bool low) |
2929 | { |
2930 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2931 | struct kv_power_info *pi = kv_get_pi(adev); |
2932 | |
2933 | return pi->sys_info.bootup_uma_clk; |
2934 | } |
2935 | |
2936 | /* get temperature in millidegrees */ |
2937 | static int kv_dpm_get_temp(void *handle) |
2938 | { |
2939 | u32 temp; |
2940 | int actual_temp = 0; |
2941 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2942 | |
2943 | temp = RREG32_SMC(0xC0300E0C); |
2944 | |
2945 | if (temp) |
2946 | actual_temp = (temp / 8) - 49; |
2947 | else |
2948 | actual_temp = 0; |
2949 | |
2950 | actual_temp = actual_temp * 1000; |
2951 | |
2952 | return actual_temp; |
2953 | } |
2954 | |
2955 | static int kv_dpm_early_init(void *handle) |
2956 | { |
2957 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2958 | |
2959 | adev->powerplay.pp_funcs = &kv_dpm_funcs; |
2960 | adev->powerplay.pp_handle = adev; |
2961 | kv_dpm_set_irq_funcs(adev); |
2962 | |
2963 | return 0; |
2964 | } |
2965 | |
2966 | static int kv_dpm_late_init(void *handle) |
2967 | { |
2968 | /* powerdown unused blocks for now */ |
2969 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2970 | |
2971 | if (!adev->pm.dpm_enabled) |
2972 | return 0; |
2973 | |
2974 | kv_dpm_powergate_acp(adev, gate: true); |
2975 | kv_dpm_powergate_samu(adev, gate: true); |
2976 | |
2977 | return 0; |
2978 | } |
2979 | |
2980 | static int kv_dpm_sw_init(void *handle) |
2981 | { |
2982 | int ret; |
2983 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2984 | |
2985 | ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, src_id: 230, |
2986 | source: &adev->pm.dpm.thermal.irq); |
2987 | if (ret) |
2988 | return ret; |
2989 | |
2990 | ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, src_id: 231, |
2991 | source: &adev->pm.dpm.thermal.irq); |
2992 | if (ret) |
2993 | return ret; |
2994 | |
2995 | /* default to balanced state */ |
2996 | adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; |
2997 | adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; |
2998 | adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO; |
2999 | adev->pm.default_sclk = adev->clock.default_sclk; |
3000 | adev->pm.default_mclk = adev->clock.default_mclk; |
3001 | adev->pm.current_sclk = adev->clock.default_sclk; |
3002 | adev->pm.current_mclk = adev->clock.default_mclk; |
3003 | adev->pm.int_thermal_type = THERMAL_TYPE_NONE; |
3004 | |
3005 | if (amdgpu_dpm == 0) |
3006 | return 0; |
3007 | |
3008 | INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); |
3009 | ret = kv_dpm_init(adev); |
3010 | if (ret) |
3011 | goto dpm_failed; |
3012 | adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; |
3013 | if (amdgpu_dpm == 1) |
3014 | amdgpu_pm_print_power_states(adev); |
3015 | DRM_INFO("amdgpu: dpm initialized\n" ); |
3016 | |
3017 | return 0; |
3018 | |
3019 | dpm_failed: |
3020 | kv_dpm_fini(adev); |
3021 | DRM_ERROR("amdgpu: dpm initialization failed\n" ); |
3022 | return ret; |
3023 | } |
3024 | |
3025 | static int kv_dpm_sw_fini(void *handle) |
3026 | { |
3027 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
3028 | |
3029 | flush_work(work: &adev->pm.dpm.thermal.work); |
3030 | |
3031 | kv_dpm_fini(adev); |
3032 | |
3033 | return 0; |
3034 | } |
3035 | |
3036 | static int kv_dpm_hw_init(void *handle) |
3037 | { |
3038 | int ret; |
3039 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
3040 | |
3041 | if (!amdgpu_dpm) |
3042 | return 0; |
3043 | |
3044 | kv_dpm_setup_asic(adev); |
3045 | ret = kv_dpm_enable(adev); |
3046 | if (ret) |
3047 | adev->pm.dpm_enabled = false; |
3048 | else |
3049 | adev->pm.dpm_enabled = true; |
3050 | amdgpu_legacy_dpm_compute_clocks(handle: adev); |
3051 | return ret; |
3052 | } |
3053 | |
3054 | static int kv_dpm_hw_fini(void *handle) |
3055 | { |
3056 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
3057 | |
3058 | if (adev->pm.dpm_enabled) |
3059 | kv_dpm_disable(adev); |
3060 | |
3061 | return 0; |
3062 | } |
3063 | |
3064 | static int kv_dpm_suspend(void *handle) |
3065 | { |
3066 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
3067 | |
3068 | if (adev->pm.dpm_enabled) { |
3069 | /* disable dpm */ |
3070 | kv_dpm_disable(adev); |
3071 | /* reset the power state */ |
3072 | adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; |
3073 | } |
3074 | return 0; |
3075 | } |
3076 | |
3077 | static int kv_dpm_resume(void *handle) |
3078 | { |
3079 | int ret; |
3080 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
3081 | |
3082 | if (adev->pm.dpm_enabled) { |
3083 | /* asic init will reset to the boot state */ |
3084 | kv_dpm_setup_asic(adev); |
3085 | ret = kv_dpm_enable(adev); |
3086 | if (ret) |
3087 | adev->pm.dpm_enabled = false; |
3088 | else |
3089 | adev->pm.dpm_enabled = true; |
3090 | if (adev->pm.dpm_enabled) |
3091 | amdgpu_legacy_dpm_compute_clocks(handle: adev); |
3092 | } |
3093 | return 0; |
3094 | } |
3095 | |
3096 | static bool kv_dpm_is_idle(void *handle) |
3097 | { |
3098 | return true; |
3099 | } |
3100 | |
3101 | static int kv_dpm_wait_for_idle(void *handle) |
3102 | { |
3103 | return 0; |
3104 | } |
3105 | |
3106 | |
3107 | static int kv_dpm_soft_reset(void *handle) |
3108 | { |
3109 | return 0; |
3110 | } |
3111 | |
3112 | static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, |
3113 | struct amdgpu_irq_src *src, |
3114 | unsigned type, |
3115 | enum amdgpu_interrupt_state state) |
3116 | { |
3117 | u32 cg_thermal_int; |
3118 | |
3119 | switch (type) { |
3120 | case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: |
3121 | switch (state) { |
3122 | case AMDGPU_IRQ_STATE_DISABLE: |
3123 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); |
3124 | cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; |
3125 | WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); |
3126 | break; |
3127 | case AMDGPU_IRQ_STATE_ENABLE: |
3128 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); |
3129 | cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; |
3130 | WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); |
3131 | break; |
3132 | default: |
3133 | break; |
3134 | } |
3135 | break; |
3136 | |
3137 | case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: |
3138 | switch (state) { |
3139 | case AMDGPU_IRQ_STATE_DISABLE: |
3140 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); |
3141 | cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; |
3142 | WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); |
3143 | break; |
3144 | case AMDGPU_IRQ_STATE_ENABLE: |
3145 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); |
3146 | cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; |
3147 | WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); |
3148 | break; |
3149 | default: |
3150 | break; |
3151 | } |
3152 | break; |
3153 | |
3154 | default: |
3155 | break; |
3156 | } |
3157 | return 0; |
3158 | } |
3159 | |
3160 | static int kv_dpm_process_interrupt(struct amdgpu_device *adev, |
3161 | struct amdgpu_irq_src *source, |
3162 | struct amdgpu_iv_entry *entry) |
3163 | { |
3164 | bool queue_thermal = false; |
3165 | |
3166 | if (entry == NULL) |
3167 | return -EINVAL; |
3168 | |
3169 | switch (entry->src_id) { |
3170 | case 230: /* thermal low to high */ |
3171 | DRM_DEBUG("IH: thermal low to high\n" ); |
3172 | adev->pm.dpm.thermal.high_to_low = false; |
3173 | queue_thermal = true; |
3174 | break; |
3175 | case 231: /* thermal high to low */ |
3176 | DRM_DEBUG("IH: thermal high to low\n" ); |
3177 | adev->pm.dpm.thermal.high_to_low = true; |
3178 | queue_thermal = true; |
3179 | break; |
3180 | default: |
3181 | break; |
3182 | } |
3183 | |
3184 | if (queue_thermal) |
3185 | schedule_work(work: &adev->pm.dpm.thermal.work); |
3186 | |
3187 | return 0; |
3188 | } |
3189 | |
3190 | static int kv_dpm_set_clockgating_state(void *handle, |
3191 | enum amd_clockgating_state state) |
3192 | { |
3193 | return 0; |
3194 | } |
3195 | |
3196 | static int kv_dpm_set_powergating_state(void *handle, |
3197 | enum amd_powergating_state state) |
3198 | { |
3199 | return 0; |
3200 | } |
3201 | |
3202 | static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1, |
3203 | const struct kv_pl *kv_cpl2) |
3204 | { |
3205 | return ((kv_cpl1->sclk == kv_cpl2->sclk) && |
3206 | (kv_cpl1->vddc_index == kv_cpl2->vddc_index) && |
3207 | (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) && |
3208 | (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state)); |
3209 | } |
3210 | |
3211 | static int kv_check_state_equal(void *handle, |
3212 | void *current_ps, |
3213 | void *request_ps, |
3214 | bool *equal) |
3215 | { |
3216 | struct kv_ps *kv_cps; |
3217 | struct kv_ps *kv_rps; |
3218 | int i; |
3219 | struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps; |
3220 | struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; |
3221 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
3222 | |
3223 | if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) |
3224 | return -EINVAL; |
3225 | |
3226 | kv_cps = kv_get_ps(rps: cps); |
3227 | kv_rps = kv_get_ps(rps); |
3228 | |
3229 | if (kv_cps == NULL) { |
3230 | *equal = false; |
3231 | return 0; |
3232 | } |
3233 | |
3234 | if (kv_cps->num_levels != kv_rps->num_levels) { |
3235 | *equal = false; |
3236 | return 0; |
3237 | } |
3238 | |
3239 | for (i = 0; i < kv_cps->num_levels; i++) { |
3240 | if (!kv_are_power_levels_equal(kv_cpl1: &(kv_cps->levels[i]), |
3241 | kv_cpl2: &(kv_rps->levels[i]))) { |
3242 | *equal = false; |
3243 | return 0; |
3244 | } |
3245 | } |
3246 | |
3247 | /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ |
3248 | *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); |
3249 | *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); |
3250 | |
3251 | return 0; |
3252 | } |
3253 | |
3254 | static int kv_dpm_read_sensor(void *handle, int idx, |
3255 | void *value, int *size) |
3256 | { |
3257 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
3258 | struct kv_power_info *pi = kv_get_pi(adev); |
3259 | uint32_t sclk; |
3260 | u32 pl_index = |
3261 | (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & |
3262 | TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> |
3263 | TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; |
3264 | |
3265 | /* size must be at least 4 bytes for all sensors */ |
3266 | if (*size < 4) |
3267 | return -EINVAL; |
3268 | |
3269 | switch (idx) { |
3270 | case AMDGPU_PP_SENSOR_GFX_SCLK: |
3271 | if (pl_index < SMU__NUM_SCLK_DPM_STATE) { |
3272 | sclk = be32_to_cpu( |
3273 | pi->graphics_level[pl_index].SclkFrequency); |
3274 | *((uint32_t *)value) = sclk; |
3275 | *size = 4; |
3276 | return 0; |
3277 | } |
3278 | return -EINVAL; |
3279 | case AMDGPU_PP_SENSOR_GPU_TEMP: |
3280 | *((uint32_t *)value) = kv_dpm_get_temp(handle: adev); |
3281 | *size = 4; |
3282 | return 0; |
3283 | default: |
3284 | return -EOPNOTSUPP; |
3285 | } |
3286 | } |
3287 | |
3288 | static int kv_set_powergating_by_smu(void *handle, |
3289 | uint32_t block_type, bool gate) |
3290 | { |
3291 | switch (block_type) { |
3292 | case AMD_IP_BLOCK_TYPE_UVD: |
3293 | kv_dpm_powergate_uvd(handle, gate); |
3294 | break; |
3295 | case AMD_IP_BLOCK_TYPE_VCE: |
3296 | kv_dpm_powergate_vce(handle, gate); |
3297 | break; |
3298 | default: |
3299 | break; |
3300 | } |
3301 | return 0; |
3302 | } |
3303 | |
3304 | static const struct amd_ip_funcs kv_dpm_ip_funcs = { |
3305 | .name = "kv_dpm" , |
3306 | .early_init = kv_dpm_early_init, |
3307 | .late_init = kv_dpm_late_init, |
3308 | .sw_init = kv_dpm_sw_init, |
3309 | .sw_fini = kv_dpm_sw_fini, |
3310 | .hw_init = kv_dpm_hw_init, |
3311 | .hw_fini = kv_dpm_hw_fini, |
3312 | .suspend = kv_dpm_suspend, |
3313 | .resume = kv_dpm_resume, |
3314 | .is_idle = kv_dpm_is_idle, |
3315 | .wait_for_idle = kv_dpm_wait_for_idle, |
3316 | .soft_reset = kv_dpm_soft_reset, |
3317 | .set_clockgating_state = kv_dpm_set_clockgating_state, |
3318 | .set_powergating_state = kv_dpm_set_powergating_state, |
3319 | }; |
3320 | |
3321 | const struct amdgpu_ip_block_version kv_smu_ip_block = { |
3322 | .type = AMD_IP_BLOCK_TYPE_SMC, |
3323 | .major = 1, |
3324 | .minor = 0, |
3325 | .rev = 0, |
3326 | .funcs = &kv_dpm_ip_funcs, |
3327 | }; |
3328 | |
3329 | static const struct amd_pm_funcs kv_dpm_funcs = { |
3330 | .pre_set_power_state = &kv_dpm_pre_set_power_state, |
3331 | .set_power_state = &kv_dpm_set_power_state, |
3332 | .post_set_power_state = &kv_dpm_post_set_power_state, |
3333 | .display_configuration_changed = &kv_dpm_display_configuration_changed, |
3334 | .get_sclk = &kv_dpm_get_sclk, |
3335 | .get_mclk = &kv_dpm_get_mclk, |
3336 | .print_power_state = &kv_dpm_print_power_state, |
3337 | .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, |
3338 | .force_performance_level = &kv_dpm_force_performance_level, |
3339 | .set_powergating_by_smu = kv_set_powergating_by_smu, |
3340 | .enable_bapm = &kv_dpm_enable_bapm, |
3341 | .get_vce_clock_state = amdgpu_get_vce_clock_state, |
3342 | .check_state_equal = kv_check_state_equal, |
3343 | .read_sensor = &kv_dpm_read_sensor, |
3344 | .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks, |
3345 | }; |
3346 | |
3347 | static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { |
3348 | .set = kv_dpm_set_interrupt_state, |
3349 | .process = kv_dpm_process_interrupt, |
3350 | }; |
3351 | |
3352 | static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev) |
3353 | { |
3354 | adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; |
3355 | adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; |
3356 | } |
3357 | |