1 | /* |
2 | * Copyright 2017 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Authors: Rafał Miłecki <zajec5@gmail.com> |
23 | * Alex Deucher <alexdeucher@gmail.com> |
24 | */ |
25 | #include <drm/drmP.h> |
26 | #include "amdgpu.h" |
27 | #include "amdgpu_drv.h" |
28 | #include "amdgpu_pm.h" |
29 | #include "amdgpu_dpm.h" |
30 | #include "amdgpu_display.h" |
31 | #include "atom.h" |
32 | #include <linux/power_supply.h> |
33 | #include <linux/hwmon.h> |
34 | #include <linux/hwmon-sysfs.h> |
35 | #include <linux/nospec.h> |
36 | #include "hwmgr.h" |
37 | #define WIDTH_4K 3840 |
38 | |
39 | static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); |
40 | |
41 | static const struct cg_flag_name clocks[] = { |
42 | {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating" }, |
43 | {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep" }, |
44 | {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating" }, |
45 | {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep" }, |
46 | {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating" }, |
47 | {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep" }, |
48 | {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep" }, |
49 | {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep" }, |
50 | {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating" }, |
51 | {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep" }, |
52 | {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep" }, |
53 | {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating" }, |
54 | {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep" }, |
55 | {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating" }, |
56 | {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating" }, |
57 | {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep" }, |
58 | {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating" }, |
59 | {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating" }, |
60 | {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep" }, |
61 | {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating" }, |
62 | {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating" }, |
63 | {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep" }, |
64 | {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating" }, |
65 | {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating" }, |
66 | {0, NULL}, |
67 | }; |
68 | |
69 | void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) |
70 | { |
71 | if (adev->pm.dpm_enabled) { |
72 | mutex_lock(&adev->pm.mutex); |
73 | if (power_supply_is_system_supplied() > 0) |
74 | adev->pm.ac_power = true; |
75 | else |
76 | adev->pm.ac_power = false; |
77 | if (adev->powerplay.pp_funcs->enable_bapm) |
78 | amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); |
79 | mutex_unlock(&adev->pm.mutex); |
80 | } |
81 | } |
82 | |
83 | /** |
84 | * DOC: power_dpm_state |
85 | * |
86 | * The power_dpm_state file is a legacy interface and is only provided for |
87 | * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting |
88 | * certain power related parameters. The file power_dpm_state is used for this. |
89 | * It accepts the following arguments: |
90 | * |
91 | * - battery |
92 | * |
93 | * - balanced |
94 | * |
95 | * - performance |
96 | * |
97 | * battery |
98 | * |
99 | * On older GPUs, the vbios provided a special power state for battery |
100 | * operation. Selecting battery switched to this state. This is no |
101 | * longer provided on newer GPUs so the option does nothing in that case. |
102 | * |
103 | * balanced |
104 | * |
105 | * On older GPUs, the vbios provided a special power state for balanced |
106 | * operation. Selecting balanced switched to this state. This is no |
107 | * longer provided on newer GPUs so the option does nothing in that case. |
108 | * |
109 | * performance |
110 | * |
111 | * On older GPUs, the vbios provided a special power state for performance |
112 | * operation. Selecting performance switched to this state. This is no |
113 | * longer provided on newer GPUs so the option does nothing in that case. |
114 | * |
115 | */ |
116 | |
117 | static ssize_t amdgpu_get_dpm_state(struct device *dev, |
118 | struct device_attribute *attr, |
119 | char *buf) |
120 | { |
121 | struct drm_device *ddev = dev_get_drvdata(dev); |
122 | struct amdgpu_device *adev = ddev->dev_private; |
123 | enum amd_pm_state_type pm; |
124 | |
125 | if (adev->powerplay.pp_funcs->get_current_power_state) |
126 | pm = amdgpu_dpm_get_current_power_state(adev); |
127 | else |
128 | pm = adev->pm.dpm.user_state; |
129 | |
130 | return snprintf(buf, PAGE_SIZE, "%s\n" , |
131 | (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : |
132 | (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance" ); |
133 | } |
134 | |
135 | static ssize_t amdgpu_set_dpm_state(struct device *dev, |
136 | struct device_attribute *attr, |
137 | const char *buf, |
138 | size_t count) |
139 | { |
140 | struct drm_device *ddev = dev_get_drvdata(dev); |
141 | struct amdgpu_device *adev = ddev->dev_private; |
142 | enum amd_pm_state_type state; |
143 | |
144 | if (strncmp("battery" , buf, strlen("battery" )) == 0) |
145 | state = POWER_STATE_TYPE_BATTERY; |
146 | else if (strncmp("balanced" , buf, strlen("balanced" )) == 0) |
147 | state = POWER_STATE_TYPE_BALANCED; |
148 | else if (strncmp("performance" , buf, strlen("performance" )) == 0) |
149 | state = POWER_STATE_TYPE_PERFORMANCE; |
150 | else { |
151 | count = -EINVAL; |
152 | goto fail; |
153 | } |
154 | |
155 | if (adev->powerplay.pp_funcs->dispatch_tasks) { |
156 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); |
157 | } else { |
158 | mutex_lock(&adev->pm.mutex); |
159 | adev->pm.dpm.user_state = state; |
160 | mutex_unlock(&adev->pm.mutex); |
161 | |
162 | /* Can't set dpm state when the card is off */ |
163 | if (!(adev->flags & AMD_IS_PX) || |
164 | (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) |
165 | amdgpu_pm_compute_clocks(adev); |
166 | } |
167 | fail: |
168 | return count; |
169 | } |
170 | |
171 | |
172 | /** |
173 | * DOC: power_dpm_force_performance_level |
174 | * |
175 | * The amdgpu driver provides a sysfs API for adjusting certain power |
176 | * related parameters. The file power_dpm_force_performance_level is |
177 | * used for this. It accepts the following arguments: |
178 | * |
179 | * - auto |
180 | * |
181 | * - low |
182 | * |
183 | * - high |
184 | * |
185 | * - manual |
186 | * |
187 | * - profile_standard |
188 | * |
189 | * - profile_min_sclk |
190 | * |
191 | * - profile_min_mclk |
192 | * |
193 | * - profile_peak |
194 | * |
195 | * auto |
196 | * |
197 | * When auto is selected, the driver will attempt to dynamically select |
198 | * the optimal power profile for current conditions in the driver. |
199 | * |
200 | * low |
201 | * |
202 | * When low is selected, the clocks are forced to the lowest power state. |
203 | * |
204 | * high |
205 | * |
206 | * When high is selected, the clocks are forced to the highest power state. |
207 | * |
208 | * manual |
209 | * |
210 | * When manual is selected, the user can manually adjust which power states |
211 | * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk, |
212 | * and pp_dpm_pcie files and adjust the power state transition heuristics |
213 | * via the pp_power_profile_mode sysfs file. |
214 | * |
215 | * profile_standard |
216 | * profile_min_sclk |
217 | * profile_min_mclk |
218 | * profile_peak |
219 | * |
220 | * When the profiling modes are selected, clock and power gating are |
221 | * disabled and the clocks are set for different profiling cases. This |
222 | * mode is recommended for profiling specific work loads where you do |
223 | * not want clock or power gating for clock fluctuation to interfere |
224 | * with your results. profile_standard sets the clocks to a fixed clock |
225 | * level which varies from asic to asic. profile_min_sclk forces the sclk |
226 | * to the lowest level. profile_min_mclk forces the mclk to the lowest level. |
227 | * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels. |
228 | * |
229 | */ |
230 | |
231 | static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, |
232 | struct device_attribute *attr, |
233 | char *buf) |
234 | { |
235 | struct drm_device *ddev = dev_get_drvdata(dev); |
236 | struct amdgpu_device *adev = ddev->dev_private; |
237 | enum amd_dpm_forced_level level = 0xff; |
238 | |
239 | if ((adev->flags & AMD_IS_PX) && |
240 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) |
241 | return snprintf(buf, PAGE_SIZE, "off\n" ); |
242 | |
243 | if (adev->powerplay.pp_funcs->get_performance_level) |
244 | level = amdgpu_dpm_get_performance_level(adev); |
245 | else |
246 | level = adev->pm.dpm.forced_level; |
247 | |
248 | return snprintf(buf, PAGE_SIZE, "%s\n" , |
249 | (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : |
250 | (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : |
251 | (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : |
252 | (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : |
253 | (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : |
254 | (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : |
255 | (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : |
256 | (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : |
257 | "unknown" ); |
258 | } |
259 | |
260 | static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, |
261 | struct device_attribute *attr, |
262 | const char *buf, |
263 | size_t count) |
264 | { |
265 | struct drm_device *ddev = dev_get_drvdata(dev); |
266 | struct amdgpu_device *adev = ddev->dev_private; |
267 | enum amd_dpm_forced_level level; |
268 | enum amd_dpm_forced_level current_level = 0xff; |
269 | int ret = 0; |
270 | |
271 | /* Can't force performance level when the card is off */ |
272 | if ((adev->flags & AMD_IS_PX) && |
273 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) |
274 | return -EINVAL; |
275 | |
276 | if (adev->powerplay.pp_funcs->get_performance_level) |
277 | current_level = amdgpu_dpm_get_performance_level(adev); |
278 | |
279 | if (strncmp("low" , buf, strlen("low" )) == 0) { |
280 | level = AMD_DPM_FORCED_LEVEL_LOW; |
281 | } else if (strncmp("high" , buf, strlen("high" )) == 0) { |
282 | level = AMD_DPM_FORCED_LEVEL_HIGH; |
283 | } else if (strncmp("auto" , buf, strlen("auto" )) == 0) { |
284 | level = AMD_DPM_FORCED_LEVEL_AUTO; |
285 | } else if (strncmp("manual" , buf, strlen("manual" )) == 0) { |
286 | level = AMD_DPM_FORCED_LEVEL_MANUAL; |
287 | } else if (strncmp("profile_exit" , buf, strlen("profile_exit" )) == 0) { |
288 | level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; |
289 | } else if (strncmp("profile_standard" , buf, strlen("profile_standard" )) == 0) { |
290 | level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; |
291 | } else if (strncmp("profile_min_sclk" , buf, strlen("profile_min_sclk" )) == 0) { |
292 | level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; |
293 | } else if (strncmp("profile_min_mclk" , buf, strlen("profile_min_mclk" )) == 0) { |
294 | level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; |
295 | } else if (strncmp("profile_peak" , buf, strlen("profile_peak" )) == 0) { |
296 | level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; |
297 | } else { |
298 | count = -EINVAL; |
299 | goto fail; |
300 | } |
301 | |
302 | if (current_level == level) |
303 | return count; |
304 | |
305 | if (adev->powerplay.pp_funcs->force_performance_level) { |
306 | mutex_lock(&adev->pm.mutex); |
307 | if (adev->pm.dpm.thermal_active) { |
308 | count = -EINVAL; |
309 | mutex_unlock(&adev->pm.mutex); |
310 | goto fail; |
311 | } |
312 | ret = amdgpu_dpm_force_performance_level(adev, level); |
313 | if (ret) |
314 | count = -EINVAL; |
315 | else |
316 | adev->pm.dpm.forced_level = level; |
317 | mutex_unlock(&adev->pm.mutex); |
318 | } |
319 | |
320 | fail: |
321 | return count; |
322 | } |
323 | |
324 | static ssize_t amdgpu_get_pp_num_states(struct device *dev, |
325 | struct device_attribute *attr, |
326 | char *buf) |
327 | { |
328 | struct drm_device *ddev = dev_get_drvdata(dev); |
329 | struct amdgpu_device *adev = ddev->dev_private; |
330 | struct pp_states_info data; |
331 | int i, buf_len; |
332 | |
333 | if (adev->powerplay.pp_funcs->get_pp_num_states) |
334 | amdgpu_dpm_get_pp_num_states(adev, &data); |
335 | |
336 | buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n" , data.nums); |
337 | for (i = 0; i < data.nums; i++) |
338 | buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n" , i, |
339 | (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : |
340 | (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : |
341 | (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : |
342 | (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default" ); |
343 | |
344 | return buf_len; |
345 | } |
346 | |
347 | static ssize_t amdgpu_get_pp_cur_state(struct device *dev, |
348 | struct device_attribute *attr, |
349 | char *buf) |
350 | { |
351 | struct drm_device *ddev = dev_get_drvdata(dev); |
352 | struct amdgpu_device *adev = ddev->dev_private; |
353 | struct pp_states_info data; |
354 | enum amd_pm_state_type pm = 0; |
355 | int i = 0; |
356 | |
357 | if (adev->powerplay.pp_funcs->get_current_power_state |
358 | && adev->powerplay.pp_funcs->get_pp_num_states) { |
359 | pm = amdgpu_dpm_get_current_power_state(adev); |
360 | amdgpu_dpm_get_pp_num_states(adev, &data); |
361 | |
362 | for (i = 0; i < data.nums; i++) { |
363 | if (pm == data.states[i]) |
364 | break; |
365 | } |
366 | |
367 | if (i == data.nums) |
368 | i = -EINVAL; |
369 | } |
370 | |
371 | return snprintf(buf, PAGE_SIZE, "%d\n" , i); |
372 | } |
373 | |
374 | static ssize_t amdgpu_get_pp_force_state(struct device *dev, |
375 | struct device_attribute *attr, |
376 | char *buf) |
377 | { |
378 | struct drm_device *ddev = dev_get_drvdata(dev); |
379 | struct amdgpu_device *adev = ddev->dev_private; |
380 | |
381 | if (adev->pp_force_state_enabled) |
382 | return amdgpu_get_pp_cur_state(dev, attr, buf); |
383 | else |
384 | return snprintf(buf, PAGE_SIZE, "\n" ); |
385 | } |
386 | |
387 | static ssize_t amdgpu_set_pp_force_state(struct device *dev, |
388 | struct device_attribute *attr, |
389 | const char *buf, |
390 | size_t count) |
391 | { |
392 | struct drm_device *ddev = dev_get_drvdata(dev); |
393 | struct amdgpu_device *adev = ddev->dev_private; |
394 | enum amd_pm_state_type state = 0; |
395 | unsigned long idx; |
396 | int ret; |
397 | |
398 | if (strlen(buf) == 1) |
399 | adev->pp_force_state_enabled = false; |
400 | else if (adev->powerplay.pp_funcs->dispatch_tasks && |
401 | adev->powerplay.pp_funcs->get_pp_num_states) { |
402 | struct pp_states_info data; |
403 | |
404 | ret = kstrtoul(buf, 0, &idx); |
405 | if (ret || idx >= ARRAY_SIZE(data.states)) { |
406 | count = -EINVAL; |
407 | goto fail; |
408 | } |
409 | idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); |
410 | |
411 | amdgpu_dpm_get_pp_num_states(adev, &data); |
412 | state = data.states[idx]; |
413 | /* only set user selected power states */ |
414 | if (state != POWER_STATE_TYPE_INTERNAL_BOOT && |
415 | state != POWER_STATE_TYPE_DEFAULT) { |
416 | amdgpu_dpm_dispatch_task(adev, |
417 | AMD_PP_TASK_ENABLE_USER_STATE, &state); |
418 | adev->pp_force_state_enabled = true; |
419 | } |
420 | } |
421 | fail: |
422 | return count; |
423 | } |
424 | |
425 | /** |
426 | * DOC: pp_table |
427 | * |
428 | * The amdgpu driver provides a sysfs API for uploading new powerplay |
429 | * tables. The file pp_table is used for this. Reading the file |
430 | * will dump the current power play table. Writing to the file |
431 | * will attempt to upload a new powerplay table and re-initialize |
432 | * powerplay using that new table. |
433 | * |
434 | */ |
435 | |
436 | static ssize_t amdgpu_get_pp_table(struct device *dev, |
437 | struct device_attribute *attr, |
438 | char *buf) |
439 | { |
440 | struct drm_device *ddev = dev_get_drvdata(dev); |
441 | struct amdgpu_device *adev = ddev->dev_private; |
442 | char *table = NULL; |
443 | int size; |
444 | |
445 | if (adev->powerplay.pp_funcs->get_pp_table) |
446 | size = amdgpu_dpm_get_pp_table(adev, &table); |
447 | else |
448 | return 0; |
449 | |
450 | if (size >= PAGE_SIZE) |
451 | size = PAGE_SIZE - 1; |
452 | |
453 | memcpy(buf, table, size); |
454 | |
455 | return size; |
456 | } |
457 | |
458 | static ssize_t amdgpu_set_pp_table(struct device *dev, |
459 | struct device_attribute *attr, |
460 | const char *buf, |
461 | size_t count) |
462 | { |
463 | struct drm_device *ddev = dev_get_drvdata(dev); |
464 | struct amdgpu_device *adev = ddev->dev_private; |
465 | |
466 | if (adev->powerplay.pp_funcs->set_pp_table) |
467 | amdgpu_dpm_set_pp_table(adev, buf, count); |
468 | |
469 | return count; |
470 | } |
471 | |
472 | /** |
473 | * DOC: pp_od_clk_voltage |
474 | * |
475 | * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages |
476 | * in each power level within a power state. The pp_od_clk_voltage is used for |
477 | * this. |
478 | * |
479 | * < For Vega10 and previous ASICs > |
480 | * |
481 | * Reading the file will display: |
482 | * |
483 | * - a list of engine clock levels and voltages labeled OD_SCLK |
484 | * |
485 | * - a list of memory clock levels and voltages labeled OD_MCLK |
486 | * |
487 | * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE |
488 | * |
489 | * To manually adjust these settings, first select manual using |
490 | * power_dpm_force_performance_level. Enter a new value for each |
491 | * level by writing a string that contains "s/m level clock voltage" to |
492 | * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz |
493 | * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at |
494 | * 810 mV. When you have edited all of the states as needed, write |
495 | * "c" (commit) to the file to commit your changes. If you want to reset to the |
496 | * default power levels, write "r" (reset) to the file to reset them. |
497 | * |
498 | * |
499 | * < For Vega20 > |
500 | * |
501 | * Reading the file will display: |
502 | * |
503 | * - minimum and maximum engine clock labeled OD_SCLK |
504 | * |
505 | * - maximum memory clock labeled OD_MCLK |
506 | * |
507 | * - three <frequency, voltage> points labeled OD_VDDC_CURVE. |
508 | * They can be used to calibrate the sclk voltage curve. |
509 | * |
510 | * - a list of valid ranges for sclk, mclk, and voltage curve points |
511 | * labeled OD_RANGE |
512 | * |
513 | * To manually adjust these settings: |
514 | * |
515 | * - First select manual using power_dpm_force_performance_level |
516 | * |
517 | * - For clock frequency setting, enter a new value by writing a |
518 | * string that contains "s/m index clock" to the file. The index |
519 | * should be 0 if to set minimum clock. And 1 if to set maximum |
520 | * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz. |
521 | * "m 1 800" will update maximum mclk to be 800Mhz. |
522 | * |
523 | * For sclk voltage curve, enter the new values by writing a |
524 | * string that contains "vc point clock voltage" to the file. The |
525 | * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will |
526 | * update point1 with clock set as 300Mhz and voltage as |
527 | * 600mV. "vc 2 1000 1000" will update point3 with clock set |
528 | * as 1000Mhz and voltage 1000mV. |
529 | * |
530 | * - When you have edited all of the states as needed, write "c" (commit) |
531 | * to the file to commit your changes |
532 | * |
533 | * - If you want to reset to the default power levels, write "r" (reset) |
534 | * to the file to reset them |
535 | * |
536 | */ |
537 | |
538 | static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, |
539 | struct device_attribute *attr, |
540 | const char *buf, |
541 | size_t count) |
542 | { |
543 | struct drm_device *ddev = dev_get_drvdata(dev); |
544 | struct amdgpu_device *adev = ddev->dev_private; |
545 | int ret; |
546 | uint32_t parameter_size = 0; |
547 | long parameter[64]; |
548 | char buf_cpy[128]; |
549 | char *tmp_str; |
550 | char *sub_str; |
551 | const char delimiter[3] = {' ', '\n', '\0'}; |
552 | uint32_t type; |
553 | |
554 | if (count > 127) |
555 | return -EINVAL; |
556 | |
557 | if (*buf == 's') |
558 | type = PP_OD_EDIT_SCLK_VDDC_TABLE; |
559 | else if (*buf == 'm') |
560 | type = PP_OD_EDIT_MCLK_VDDC_TABLE; |
561 | else if(*buf == 'r') |
562 | type = PP_OD_RESTORE_DEFAULT_TABLE; |
563 | else if (*buf == 'c') |
564 | type = PP_OD_COMMIT_DPM_TABLE; |
565 | else if (!strncmp(buf, "vc" , 2)) |
566 | type = PP_OD_EDIT_VDDC_CURVE; |
567 | else |
568 | return -EINVAL; |
569 | |
570 | memcpy(buf_cpy, buf, count+1); |
571 | |
572 | tmp_str = buf_cpy; |
573 | |
574 | if (type == PP_OD_EDIT_VDDC_CURVE) |
575 | tmp_str++; |
576 | while (isspace(*++tmp_str)); |
577 | |
578 | while (tmp_str[0]) { |
579 | sub_str = strsep(&tmp_str, delimiter); |
580 | ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); |
581 | if (ret) |
582 | return -EINVAL; |
583 | parameter_size++; |
584 | |
585 | while (isspace(*tmp_str)) |
586 | tmp_str++; |
587 | } |
588 | |
589 | if (adev->powerplay.pp_funcs->odn_edit_dpm_table) |
590 | ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, |
591 | parameter, parameter_size); |
592 | |
593 | if (ret) |
594 | return -EINVAL; |
595 | |
596 | if (type == PP_OD_COMMIT_DPM_TABLE) { |
597 | if (adev->powerplay.pp_funcs->dispatch_tasks) { |
598 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); |
599 | return count; |
600 | } else { |
601 | return -EINVAL; |
602 | } |
603 | } |
604 | |
605 | return count; |
606 | } |
607 | |
608 | static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, |
609 | struct device_attribute *attr, |
610 | char *buf) |
611 | { |
612 | struct drm_device *ddev = dev_get_drvdata(dev); |
613 | struct amdgpu_device *adev = ddev->dev_private; |
614 | uint32_t size = 0; |
615 | |
616 | if (adev->powerplay.pp_funcs->print_clock_levels) { |
617 | size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); |
618 | size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); |
619 | size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size); |
620 | size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size); |
621 | return size; |
622 | } else { |
623 | return snprintf(buf, PAGE_SIZE, "\n" ); |
624 | } |
625 | |
626 | } |
627 | |
628 | /** |
629 | * DOC: ppfeatures |
630 | * |
631 | * The amdgpu driver provides a sysfs API for adjusting what powerplay |
632 | * features to be enabled. The file ppfeatures is used for this. And |
633 | * this is only available for Vega10 and later dGPUs. |
634 | * |
635 | * Reading back the file will show you the followings: |
636 | * - Current ppfeature masks |
637 | * - List of the all supported powerplay features with their naming, |
638 | * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled"). |
639 | * |
640 | * To manually enable or disable a specific feature, just set or clear |
641 | * the corresponding bit from original ppfeature masks and input the |
642 | * new ppfeature masks. |
643 | */ |
644 | static ssize_t amdgpu_set_ppfeature_status(struct device *dev, |
645 | struct device_attribute *attr, |
646 | const char *buf, |
647 | size_t count) |
648 | { |
649 | struct drm_device *ddev = dev_get_drvdata(dev); |
650 | struct amdgpu_device *adev = ddev->dev_private; |
651 | uint64_t featuremask; |
652 | int ret; |
653 | |
654 | ret = kstrtou64(buf, 0, &featuremask); |
655 | if (ret) |
656 | return -EINVAL; |
657 | |
658 | pr_debug("featuremask = 0x%llx\n" , featuremask); |
659 | |
660 | if (adev->powerplay.pp_funcs->set_ppfeature_status) { |
661 | ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); |
662 | if (ret) |
663 | return -EINVAL; |
664 | } |
665 | |
666 | return count; |
667 | } |
668 | |
669 | static ssize_t amdgpu_get_ppfeature_status(struct device *dev, |
670 | struct device_attribute *attr, |
671 | char *buf) |
672 | { |
673 | struct drm_device *ddev = dev_get_drvdata(dev); |
674 | struct amdgpu_device *adev = ddev->dev_private; |
675 | |
676 | if (adev->powerplay.pp_funcs->get_ppfeature_status) |
677 | return amdgpu_dpm_get_ppfeature_status(adev, buf); |
678 | |
679 | return snprintf(buf, PAGE_SIZE, "\n" ); |
680 | } |
681 | |
682 | /** |
683 | * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk |
684 | * pp_dpm_pcie |
685 | * |
686 | * The amdgpu driver provides a sysfs API for adjusting what power levels |
687 | * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, |
688 | * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for |
689 | * this. |
690 | * |
691 | * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for |
692 | * Vega10 and later ASICs. |
693 | * pp_dpm_fclk interface is only available for Vega20 and later ASICs. |
694 | * |
695 | * Reading back the files will show you the available power levels within |
696 | * the power state and the clock information for those levels. |
697 | * |
698 | * To manually adjust these states, first select manual using |
699 | * power_dpm_force_performance_level. |
700 | * Secondly,Enter a new value for each level by inputing a string that |
701 | * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" |
702 | * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6. |
703 | * |
704 | * NOTE: change to the dcefclk max dpm level is not supported now |
705 | */ |
706 | |
707 | static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, |
708 | struct device_attribute *attr, |
709 | char *buf) |
710 | { |
711 | struct drm_device *ddev = dev_get_drvdata(dev); |
712 | struct amdgpu_device *adev = ddev->dev_private; |
713 | |
714 | if (adev->powerplay.pp_funcs->print_clock_levels) |
715 | return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); |
716 | else |
717 | return snprintf(buf, PAGE_SIZE, "\n" ); |
718 | } |
719 | |
720 | /* |
721 | * Worst case: 32 bits individually specified, in octal at 12 characters |
722 | * per line (+1 for \n). |
723 | */ |
724 | #define AMDGPU_MASK_BUF_MAX (32 * 13) |
725 | |
726 | static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) |
727 | { |
728 | int ret; |
729 | long level; |
730 | char *sub_str = NULL; |
731 | char *tmp; |
732 | char buf_cpy[AMDGPU_MASK_BUF_MAX + 1]; |
733 | const char delimiter[3] = {' ', '\n', '\0'}; |
734 | size_t bytes; |
735 | |
736 | *mask = 0; |
737 | |
738 | bytes = min(count, sizeof(buf_cpy) - 1); |
739 | memcpy(buf_cpy, buf, bytes); |
740 | buf_cpy[bytes] = '\0'; |
741 | tmp = buf_cpy; |
742 | while (tmp[0]) { |
743 | sub_str = strsep(&tmp, delimiter); |
744 | if (strlen(sub_str)) { |
745 | ret = kstrtol(sub_str, 0, &level); |
746 | if (ret) |
747 | return -EINVAL; |
748 | *mask |= 1 << level; |
749 | } else |
750 | break; |
751 | } |
752 | |
753 | return 0; |
754 | } |
755 | |
756 | static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, |
757 | struct device_attribute *attr, |
758 | const char *buf, |
759 | size_t count) |
760 | { |
761 | struct drm_device *ddev = dev_get_drvdata(dev); |
762 | struct amdgpu_device *adev = ddev->dev_private; |
763 | int ret; |
764 | uint32_t mask = 0; |
765 | |
766 | ret = amdgpu_read_mask(buf, count, &mask); |
767 | if (ret) |
768 | return ret; |
769 | |
770 | if (adev->powerplay.pp_funcs->force_clock_level) |
771 | ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); |
772 | |
773 | if (ret) |
774 | return -EINVAL; |
775 | |
776 | return count; |
777 | } |
778 | |
779 | static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, |
780 | struct device_attribute *attr, |
781 | char *buf) |
782 | { |
783 | struct drm_device *ddev = dev_get_drvdata(dev); |
784 | struct amdgpu_device *adev = ddev->dev_private; |
785 | |
786 | if (adev->powerplay.pp_funcs->print_clock_levels) |
787 | return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); |
788 | else |
789 | return snprintf(buf, PAGE_SIZE, "\n" ); |
790 | } |
791 | |
792 | static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, |
793 | struct device_attribute *attr, |
794 | const char *buf, |
795 | size_t count) |
796 | { |
797 | struct drm_device *ddev = dev_get_drvdata(dev); |
798 | struct amdgpu_device *adev = ddev->dev_private; |
799 | int ret; |
800 | uint32_t mask = 0; |
801 | |
802 | ret = amdgpu_read_mask(buf, count, &mask); |
803 | if (ret) |
804 | return ret; |
805 | |
806 | if (adev->powerplay.pp_funcs->force_clock_level) |
807 | ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); |
808 | |
809 | if (ret) |
810 | return -EINVAL; |
811 | |
812 | return count; |
813 | } |
814 | |
815 | static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, |
816 | struct device_attribute *attr, |
817 | char *buf) |
818 | { |
819 | struct drm_device *ddev = dev_get_drvdata(dev); |
820 | struct amdgpu_device *adev = ddev->dev_private; |
821 | |
822 | if (adev->powerplay.pp_funcs->print_clock_levels) |
823 | return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); |
824 | else |
825 | return snprintf(buf, PAGE_SIZE, "\n" ); |
826 | } |
827 | |
828 | static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, |
829 | struct device_attribute *attr, |
830 | const char *buf, |
831 | size_t count) |
832 | { |
833 | struct drm_device *ddev = dev_get_drvdata(dev); |
834 | struct amdgpu_device *adev = ddev->dev_private; |
835 | int ret; |
836 | uint32_t mask = 0; |
837 | |
838 | ret = amdgpu_read_mask(buf, count, &mask); |
839 | if (ret) |
840 | return ret; |
841 | |
842 | if (adev->powerplay.pp_funcs->force_clock_level) |
843 | ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); |
844 | |
845 | if (ret) |
846 | return -EINVAL; |
847 | |
848 | return count; |
849 | } |
850 | |
851 | static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, |
852 | struct device_attribute *attr, |
853 | char *buf) |
854 | { |
855 | struct drm_device *ddev = dev_get_drvdata(dev); |
856 | struct amdgpu_device *adev = ddev->dev_private; |
857 | |
858 | if (adev->powerplay.pp_funcs->print_clock_levels) |
859 | return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); |
860 | else |
861 | return snprintf(buf, PAGE_SIZE, "\n" ); |
862 | } |
863 | |
864 | static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, |
865 | struct device_attribute *attr, |
866 | const char *buf, |
867 | size_t count) |
868 | { |
869 | struct drm_device *ddev = dev_get_drvdata(dev); |
870 | struct amdgpu_device *adev = ddev->dev_private; |
871 | int ret; |
872 | uint32_t mask = 0; |
873 | |
874 | ret = amdgpu_read_mask(buf, count, &mask); |
875 | if (ret) |
876 | return ret; |
877 | |
878 | if (adev->powerplay.pp_funcs->force_clock_level) |
879 | ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); |
880 | |
881 | if (ret) |
882 | return -EINVAL; |
883 | |
884 | return count; |
885 | } |
886 | |
887 | static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, |
888 | struct device_attribute *attr, |
889 | char *buf) |
890 | { |
891 | struct drm_device *ddev = dev_get_drvdata(dev); |
892 | struct amdgpu_device *adev = ddev->dev_private; |
893 | |
894 | if (adev->powerplay.pp_funcs->print_clock_levels) |
895 | return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); |
896 | else |
897 | return snprintf(buf, PAGE_SIZE, "\n" ); |
898 | } |
899 | |
900 | static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, |
901 | struct device_attribute *attr, |
902 | const char *buf, |
903 | size_t count) |
904 | { |
905 | struct drm_device *ddev = dev_get_drvdata(dev); |
906 | struct amdgpu_device *adev = ddev->dev_private; |
907 | int ret; |
908 | uint32_t mask = 0; |
909 | |
910 | ret = amdgpu_read_mask(buf, count, &mask); |
911 | if (ret) |
912 | return ret; |
913 | |
914 | if (adev->powerplay.pp_funcs->force_clock_level) |
915 | ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); |
916 | |
917 | if (ret) |
918 | return -EINVAL; |
919 | |
920 | return count; |
921 | } |
922 | |
923 | static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, |
924 | struct device_attribute *attr, |
925 | char *buf) |
926 | { |
927 | struct drm_device *ddev = dev_get_drvdata(dev); |
928 | struct amdgpu_device *adev = ddev->dev_private; |
929 | |
930 | if (adev->powerplay.pp_funcs->print_clock_levels) |
931 | return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); |
932 | else |
933 | return snprintf(buf, PAGE_SIZE, "\n" ); |
934 | } |
935 | |
936 | static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, |
937 | struct device_attribute *attr, |
938 | const char *buf, |
939 | size_t count) |
940 | { |
941 | struct drm_device *ddev = dev_get_drvdata(dev); |
942 | struct amdgpu_device *adev = ddev->dev_private; |
943 | int ret; |
944 | uint32_t mask = 0; |
945 | |
946 | ret = amdgpu_read_mask(buf, count, &mask); |
947 | if (ret) |
948 | return ret; |
949 | |
950 | if (adev->powerplay.pp_funcs->force_clock_level) |
951 | ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); |
952 | |
953 | if (ret) |
954 | return -EINVAL; |
955 | |
956 | return count; |
957 | } |
958 | |
959 | static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, |
960 | struct device_attribute *attr, |
961 | char *buf) |
962 | { |
963 | struct drm_device *ddev = dev_get_drvdata(dev); |
964 | struct amdgpu_device *adev = ddev->dev_private; |
965 | uint32_t value = 0; |
966 | |
967 | if (adev->powerplay.pp_funcs->get_sclk_od) |
968 | value = amdgpu_dpm_get_sclk_od(adev); |
969 | |
970 | return snprintf(buf, PAGE_SIZE, "%d\n" , value); |
971 | } |
972 | |
973 | static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, |
974 | struct device_attribute *attr, |
975 | const char *buf, |
976 | size_t count) |
977 | { |
978 | struct drm_device *ddev = dev_get_drvdata(dev); |
979 | struct amdgpu_device *adev = ddev->dev_private; |
980 | int ret; |
981 | long int value; |
982 | |
983 | ret = kstrtol(buf, 0, &value); |
984 | |
985 | if (ret) { |
986 | count = -EINVAL; |
987 | goto fail; |
988 | } |
989 | if (adev->powerplay.pp_funcs->set_sclk_od) |
990 | amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); |
991 | |
992 | if (adev->powerplay.pp_funcs->dispatch_tasks) { |
993 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); |
994 | } else { |
995 | adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; |
996 | amdgpu_pm_compute_clocks(adev); |
997 | } |
998 | |
999 | fail: |
1000 | return count; |
1001 | } |
1002 | |
1003 | static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, |
1004 | struct device_attribute *attr, |
1005 | char *buf) |
1006 | { |
1007 | struct drm_device *ddev = dev_get_drvdata(dev); |
1008 | struct amdgpu_device *adev = ddev->dev_private; |
1009 | uint32_t value = 0; |
1010 | |
1011 | if (adev->powerplay.pp_funcs->get_mclk_od) |
1012 | value = amdgpu_dpm_get_mclk_od(adev); |
1013 | |
1014 | return snprintf(buf, PAGE_SIZE, "%d\n" , value); |
1015 | } |
1016 | |
1017 | static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, |
1018 | struct device_attribute *attr, |
1019 | const char *buf, |
1020 | size_t count) |
1021 | { |
1022 | struct drm_device *ddev = dev_get_drvdata(dev); |
1023 | struct amdgpu_device *adev = ddev->dev_private; |
1024 | int ret; |
1025 | long int value; |
1026 | |
1027 | ret = kstrtol(buf, 0, &value); |
1028 | |
1029 | if (ret) { |
1030 | count = -EINVAL; |
1031 | goto fail; |
1032 | } |
1033 | if (adev->powerplay.pp_funcs->set_mclk_od) |
1034 | amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); |
1035 | |
1036 | if (adev->powerplay.pp_funcs->dispatch_tasks) { |
1037 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); |
1038 | } else { |
1039 | adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; |
1040 | amdgpu_pm_compute_clocks(adev); |
1041 | } |
1042 | |
1043 | fail: |
1044 | return count; |
1045 | } |
1046 | |
1047 | /** |
1048 | * DOC: pp_power_profile_mode |
1049 | * |
1050 | * The amdgpu driver provides a sysfs API for adjusting the heuristics |
1051 | * related to switching between power levels in a power state. The file |
1052 | * pp_power_profile_mode is used for this. |
1053 | * |
1054 | * Reading this file outputs a list of all of the predefined power profiles |
1055 | * and the relevant heuristics settings for that profile. |
1056 | * |
1057 | * To select a profile or create a custom profile, first select manual using |
1058 | * power_dpm_force_performance_level. Writing the number of a predefined |
1059 | * profile to pp_power_profile_mode will enable those heuristics. To |
1060 | * create a custom set of heuristics, write a string of numbers to the file |
1061 | * starting with the number of the custom profile along with a setting |
1062 | * for each heuristic parameter. Due to differences across asic families |
1063 | * the heuristic parameters vary from family to family. |
1064 | * |
1065 | */ |
1066 | |
1067 | static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, |
1068 | struct device_attribute *attr, |
1069 | char *buf) |
1070 | { |
1071 | struct drm_device *ddev = dev_get_drvdata(dev); |
1072 | struct amdgpu_device *adev = ddev->dev_private; |
1073 | |
1074 | if (adev->powerplay.pp_funcs->get_power_profile_mode) |
1075 | return amdgpu_dpm_get_power_profile_mode(adev, buf); |
1076 | |
1077 | return snprintf(buf, PAGE_SIZE, "\n" ); |
1078 | } |
1079 | |
1080 | |
1081 | static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, |
1082 | struct device_attribute *attr, |
1083 | const char *buf, |
1084 | size_t count) |
1085 | { |
1086 | int ret = 0xff; |
1087 | struct drm_device *ddev = dev_get_drvdata(dev); |
1088 | struct amdgpu_device *adev = ddev->dev_private; |
1089 | uint32_t parameter_size = 0; |
1090 | long parameter[64]; |
1091 | char *sub_str, buf_cpy[128]; |
1092 | char *tmp_str; |
1093 | uint32_t i = 0; |
1094 | char tmp[2]; |
1095 | long int profile_mode = 0; |
1096 | const char delimiter[3] = {' ', '\n', '\0'}; |
1097 | |
1098 | tmp[0] = *(buf); |
1099 | tmp[1] = '\0'; |
1100 | ret = kstrtol(tmp, 0, &profile_mode); |
1101 | if (ret) |
1102 | goto fail; |
1103 | |
1104 | if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { |
1105 | if (count < 2 || count > 127) |
1106 | return -EINVAL; |
1107 | while (isspace(*++buf)) |
1108 | i++; |
1109 | memcpy(buf_cpy, buf, count-i); |
1110 | tmp_str = buf_cpy; |
1111 | while (tmp_str[0]) { |
1112 | sub_str = strsep(&tmp_str, delimiter); |
1113 | ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); |
1114 | if (ret) { |
1115 | count = -EINVAL; |
1116 | goto fail; |
1117 | } |
1118 | parameter_size++; |
1119 | while (isspace(*tmp_str)) |
1120 | tmp_str++; |
1121 | } |
1122 | } |
1123 | parameter[parameter_size] = profile_mode; |
1124 | if (adev->powerplay.pp_funcs->set_power_profile_mode) |
1125 | ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); |
1126 | |
1127 | if (!ret) |
1128 | return count; |
1129 | fail: |
1130 | return -EINVAL; |
1131 | } |
1132 | |
1133 | /** |
1134 | * DOC: busy_percent |
1135 | * |
1136 | * The amdgpu driver provides a sysfs API for reading how busy the GPU |
1137 | * is as a percentage. The file gpu_busy_percent is used for this. |
1138 | * The SMU firmware computes a percentage of load based on the |
1139 | * aggregate activity level in the IP cores. |
1140 | */ |
1141 | static ssize_t amdgpu_get_busy_percent(struct device *dev, |
1142 | struct device_attribute *attr, |
1143 | char *buf) |
1144 | { |
1145 | struct drm_device *ddev = dev_get_drvdata(dev); |
1146 | struct amdgpu_device *adev = ddev->dev_private; |
1147 | int r, value, size = sizeof(value); |
1148 | |
1149 | /* sanity check PP is enabled */ |
1150 | if (!(adev->powerplay.pp_funcs && |
1151 | adev->powerplay.pp_funcs->read_sensor)) |
1152 | return -EINVAL; |
1153 | |
1154 | /* read the IP busy sensor */ |
1155 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, |
1156 | (void *)&value, &size); |
1157 | if (r) |
1158 | return r; |
1159 | |
1160 | return snprintf(buf, PAGE_SIZE, "%d\n" , value); |
1161 | } |
1162 | |
1163 | /** |
1164 | * DOC: pcie_bw |
1165 | * |
1166 | * The amdgpu driver provides a sysfs API for estimating how much data |
1167 | * has been received and sent by the GPU in the last second through PCIe. |
1168 | * The file pcie_bw is used for this. |
1169 | * The Perf counters count the number of received and sent messages and return |
1170 | * those values, as well as the maximum payload size of a PCIe packet (mps). |
1171 | * Note that it is not possible to easily and quickly obtain the size of each |
1172 | * packet transmitted, so we output the max payload size (mps) to allow for |
1173 | * quick estimation of the PCIe bandwidth usage |
1174 | */ |
1175 | static ssize_t amdgpu_get_pcie_bw(struct device *dev, |
1176 | struct device_attribute *attr, |
1177 | char *buf) |
1178 | { |
1179 | struct drm_device *ddev = dev_get_drvdata(dev); |
1180 | struct amdgpu_device *adev = ddev->dev_private; |
1181 | uint64_t count0, count1; |
1182 | |
1183 | amdgpu_asic_get_pcie_usage(adev, &count0, &count1); |
1184 | return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n" , |
1185 | count0, count1, pcie_get_mps(adev->pdev)); |
1186 | } |
1187 | |
1188 | static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); |
1189 | static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, |
1190 | amdgpu_get_dpm_forced_performance_level, |
1191 | amdgpu_set_dpm_forced_performance_level); |
1192 | static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL); |
1193 | static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL); |
1194 | static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR, |
1195 | amdgpu_get_pp_force_state, |
1196 | amdgpu_set_pp_force_state); |
1197 | static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR, |
1198 | amdgpu_get_pp_table, |
1199 | amdgpu_set_pp_table); |
1200 | static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR, |
1201 | amdgpu_get_pp_dpm_sclk, |
1202 | amdgpu_set_pp_dpm_sclk); |
1203 | static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, |
1204 | amdgpu_get_pp_dpm_mclk, |
1205 | amdgpu_set_pp_dpm_mclk); |
1206 | static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR, |
1207 | amdgpu_get_pp_dpm_socclk, |
1208 | amdgpu_set_pp_dpm_socclk); |
1209 | static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR, |
1210 | amdgpu_get_pp_dpm_fclk, |
1211 | amdgpu_set_pp_dpm_fclk); |
1212 | static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR, |
1213 | amdgpu_get_pp_dpm_dcefclk, |
1214 | amdgpu_set_pp_dpm_dcefclk); |
1215 | static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, |
1216 | amdgpu_get_pp_dpm_pcie, |
1217 | amdgpu_set_pp_dpm_pcie); |
1218 | static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR, |
1219 | amdgpu_get_pp_sclk_od, |
1220 | amdgpu_set_pp_sclk_od); |
1221 | static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, |
1222 | amdgpu_get_pp_mclk_od, |
1223 | amdgpu_set_pp_mclk_od); |
1224 | static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR, |
1225 | amdgpu_get_pp_power_profile_mode, |
1226 | amdgpu_set_pp_power_profile_mode); |
1227 | static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR, |
1228 | amdgpu_get_pp_od_clk_voltage, |
1229 | amdgpu_set_pp_od_clk_voltage); |
1230 | static DEVICE_ATTR(gpu_busy_percent, S_IRUGO, |
1231 | amdgpu_get_busy_percent, NULL); |
1232 | static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL); |
1233 | static DEVICE_ATTR(ppfeatures, S_IRUGO | S_IWUSR, |
1234 | amdgpu_get_ppfeature_status, |
1235 | amdgpu_set_ppfeature_status); |
1236 | |
1237 | static ssize_t amdgpu_hwmon_show_temp(struct device *dev, |
1238 | struct device_attribute *attr, |
1239 | char *buf) |
1240 | { |
1241 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1242 | struct drm_device *ddev = adev->ddev; |
1243 | int r, temp, size = sizeof(temp); |
1244 | |
1245 | /* Can't get temperature when the card is off */ |
1246 | if ((adev->flags & AMD_IS_PX) && |
1247 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) |
1248 | return -EINVAL; |
1249 | |
1250 | /* sanity check PP is enabled */ |
1251 | if (!(adev->powerplay.pp_funcs && |
1252 | adev->powerplay.pp_funcs->read_sensor)) |
1253 | return -EINVAL; |
1254 | |
1255 | /* get the temperature */ |
1256 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, |
1257 | (void *)&temp, &size); |
1258 | if (r) |
1259 | return r; |
1260 | |
1261 | return snprintf(buf, PAGE_SIZE, "%d\n" , temp); |
1262 | } |
1263 | |
1264 | static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, |
1265 | struct device_attribute *attr, |
1266 | char *buf) |
1267 | { |
1268 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1269 | int hyst = to_sensor_dev_attr(attr)->index; |
1270 | int temp; |
1271 | |
1272 | if (hyst) |
1273 | temp = adev->pm.dpm.thermal.min_temp; |
1274 | else |
1275 | temp = adev->pm.dpm.thermal.max_temp; |
1276 | |
1277 | return snprintf(buf, PAGE_SIZE, "%d\n" , temp); |
1278 | } |
1279 | |
1280 | static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, |
1281 | struct device_attribute *attr, |
1282 | char *buf) |
1283 | { |
1284 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1285 | u32 pwm_mode = 0; |
1286 | |
1287 | if (!adev->powerplay.pp_funcs->get_fan_control_mode) |
1288 | return -EINVAL; |
1289 | |
1290 | pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); |
1291 | |
1292 | return sprintf(buf, "%i\n" , pwm_mode); |
1293 | } |
1294 | |
1295 | static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, |
1296 | struct device_attribute *attr, |
1297 | const char *buf, |
1298 | size_t count) |
1299 | { |
1300 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1301 | int err; |
1302 | int value; |
1303 | |
1304 | /* Can't adjust fan when the card is off */ |
1305 | if ((adev->flags & AMD_IS_PX) && |
1306 | (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) |
1307 | return -EINVAL; |
1308 | |
1309 | if (!adev->powerplay.pp_funcs->set_fan_control_mode) |
1310 | return -EINVAL; |
1311 | |
1312 | err = kstrtoint(buf, 10, &value); |
1313 | if (err) |
1314 | return err; |
1315 | |
1316 | amdgpu_dpm_set_fan_control_mode(adev, value); |
1317 | |
1318 | return count; |
1319 | } |
1320 | |
1321 | static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev, |
1322 | struct device_attribute *attr, |
1323 | char *buf) |
1324 | { |
1325 | return sprintf(buf, "%i\n" , 0); |
1326 | } |
1327 | |
1328 | static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev, |
1329 | struct device_attribute *attr, |
1330 | char *buf) |
1331 | { |
1332 | return sprintf(buf, "%i\n" , 255); |
1333 | } |
1334 | |
1335 | static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, |
1336 | struct device_attribute *attr, |
1337 | const char *buf, size_t count) |
1338 | { |
1339 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1340 | int err; |
1341 | u32 value; |
1342 | u32 pwm_mode; |
1343 | |
1344 | /* Can't adjust fan when the card is off */ |
1345 | if ((adev->flags & AMD_IS_PX) && |
1346 | (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) |
1347 | return -EINVAL; |
1348 | |
1349 | pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); |
1350 | if (pwm_mode != AMD_FAN_CTRL_MANUAL) { |
1351 | pr_info("manual fan speed control should be enabled first\n" ); |
1352 | return -EINVAL; |
1353 | } |
1354 | |
1355 | err = kstrtou32(buf, 10, &value); |
1356 | if (err) |
1357 | return err; |
1358 | |
1359 | value = (value * 100) / 255; |
1360 | |
1361 | if (adev->powerplay.pp_funcs->set_fan_speed_percent) { |
1362 | err = amdgpu_dpm_set_fan_speed_percent(adev, value); |
1363 | if (err) |
1364 | return err; |
1365 | } |
1366 | |
1367 | return count; |
1368 | } |
1369 | |
1370 | static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, |
1371 | struct device_attribute *attr, |
1372 | char *buf) |
1373 | { |
1374 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1375 | int err; |
1376 | u32 speed = 0; |
1377 | |
1378 | /* Can't adjust fan when the card is off */ |
1379 | if ((adev->flags & AMD_IS_PX) && |
1380 | (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) |
1381 | return -EINVAL; |
1382 | |
1383 | if (adev->powerplay.pp_funcs->get_fan_speed_percent) { |
1384 | err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); |
1385 | if (err) |
1386 | return err; |
1387 | } |
1388 | |
1389 | speed = (speed * 255) / 100; |
1390 | |
1391 | return sprintf(buf, "%i\n" , speed); |
1392 | } |
1393 | |
1394 | static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, |
1395 | struct device_attribute *attr, |
1396 | char *buf) |
1397 | { |
1398 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1399 | int err; |
1400 | u32 speed = 0; |
1401 | |
1402 | /* Can't adjust fan when the card is off */ |
1403 | if ((adev->flags & AMD_IS_PX) && |
1404 | (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) |
1405 | return -EINVAL; |
1406 | |
1407 | if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { |
1408 | err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); |
1409 | if (err) |
1410 | return err; |
1411 | } |
1412 | |
1413 | return sprintf(buf, "%i\n" , speed); |
1414 | } |
1415 | |
1416 | static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, |
1417 | struct device_attribute *attr, |
1418 | char *buf) |
1419 | { |
1420 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1421 | u32 min_rpm = 0; |
1422 | u32 size = sizeof(min_rpm); |
1423 | int r; |
1424 | |
1425 | if (!adev->powerplay.pp_funcs->read_sensor) |
1426 | return -EINVAL; |
1427 | |
1428 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, |
1429 | (void *)&min_rpm, &size); |
1430 | if (r) |
1431 | return r; |
1432 | |
1433 | return snprintf(buf, PAGE_SIZE, "%d\n" , min_rpm); |
1434 | } |
1435 | |
1436 | static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, |
1437 | struct device_attribute *attr, |
1438 | char *buf) |
1439 | { |
1440 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1441 | u32 max_rpm = 0; |
1442 | u32 size = sizeof(max_rpm); |
1443 | int r; |
1444 | |
1445 | if (!adev->powerplay.pp_funcs->read_sensor) |
1446 | return -EINVAL; |
1447 | |
1448 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, |
1449 | (void *)&max_rpm, &size); |
1450 | if (r) |
1451 | return r; |
1452 | |
1453 | return snprintf(buf, PAGE_SIZE, "%d\n" , max_rpm); |
1454 | } |
1455 | |
1456 | static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, |
1457 | struct device_attribute *attr, |
1458 | char *buf) |
1459 | { |
1460 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1461 | int err; |
1462 | u32 rpm = 0; |
1463 | |
1464 | /* Can't adjust fan when the card is off */ |
1465 | if ((adev->flags & AMD_IS_PX) && |
1466 | (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) |
1467 | return -EINVAL; |
1468 | |
1469 | if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { |
1470 | err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); |
1471 | if (err) |
1472 | return err; |
1473 | } |
1474 | |
1475 | return sprintf(buf, "%i\n" , rpm); |
1476 | } |
1477 | |
1478 | static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, |
1479 | struct device_attribute *attr, |
1480 | const char *buf, size_t count) |
1481 | { |
1482 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1483 | int err; |
1484 | u32 value; |
1485 | u32 pwm_mode; |
1486 | |
1487 | pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); |
1488 | if (pwm_mode != AMD_FAN_CTRL_MANUAL) |
1489 | return -ENODATA; |
1490 | |
1491 | /* Can't adjust fan when the card is off */ |
1492 | if ((adev->flags & AMD_IS_PX) && |
1493 | (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) |
1494 | return -EINVAL; |
1495 | |
1496 | err = kstrtou32(buf, 10, &value); |
1497 | if (err) |
1498 | return err; |
1499 | |
1500 | if (adev->powerplay.pp_funcs->set_fan_speed_rpm) { |
1501 | err = amdgpu_dpm_set_fan_speed_rpm(adev, value); |
1502 | if (err) |
1503 | return err; |
1504 | } |
1505 | |
1506 | return count; |
1507 | } |
1508 | |
1509 | static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, |
1510 | struct device_attribute *attr, |
1511 | char *buf) |
1512 | { |
1513 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1514 | u32 pwm_mode = 0; |
1515 | |
1516 | if (!adev->powerplay.pp_funcs->get_fan_control_mode) |
1517 | return -EINVAL; |
1518 | |
1519 | pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); |
1520 | |
1521 | return sprintf(buf, "%i\n" , pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); |
1522 | } |
1523 | |
1524 | static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, |
1525 | struct device_attribute *attr, |
1526 | const char *buf, |
1527 | size_t count) |
1528 | { |
1529 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1530 | int err; |
1531 | int value; |
1532 | u32 pwm_mode; |
1533 | |
1534 | /* Can't adjust fan when the card is off */ |
1535 | if ((adev->flags & AMD_IS_PX) && |
1536 | (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) |
1537 | return -EINVAL; |
1538 | |
1539 | if (!adev->powerplay.pp_funcs->set_fan_control_mode) |
1540 | return -EINVAL; |
1541 | |
1542 | err = kstrtoint(buf, 10, &value); |
1543 | if (err) |
1544 | return err; |
1545 | |
1546 | if (value == 0) |
1547 | pwm_mode = AMD_FAN_CTRL_AUTO; |
1548 | else if (value == 1) |
1549 | pwm_mode = AMD_FAN_CTRL_MANUAL; |
1550 | else |
1551 | return -EINVAL; |
1552 | |
1553 | amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); |
1554 | |
1555 | return count; |
1556 | } |
1557 | |
1558 | static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, |
1559 | struct device_attribute *attr, |
1560 | char *buf) |
1561 | { |
1562 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1563 | struct drm_device *ddev = adev->ddev; |
1564 | u32 vddgfx; |
1565 | int r, size = sizeof(vddgfx); |
1566 | |
1567 | /* Can't get voltage when the card is off */ |
1568 | if ((adev->flags & AMD_IS_PX) && |
1569 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) |
1570 | return -EINVAL; |
1571 | |
1572 | /* sanity check PP is enabled */ |
1573 | if (!(adev->powerplay.pp_funcs && |
1574 | adev->powerplay.pp_funcs->read_sensor)) |
1575 | return -EINVAL; |
1576 | |
1577 | /* get the voltage */ |
1578 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, |
1579 | (void *)&vddgfx, &size); |
1580 | if (r) |
1581 | return r; |
1582 | |
1583 | return snprintf(buf, PAGE_SIZE, "%d\n" , vddgfx); |
1584 | } |
1585 | |
1586 | static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, |
1587 | struct device_attribute *attr, |
1588 | char *buf) |
1589 | { |
1590 | return snprintf(buf, PAGE_SIZE, "vddgfx\n" ); |
1591 | } |
1592 | |
1593 | static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, |
1594 | struct device_attribute *attr, |
1595 | char *buf) |
1596 | { |
1597 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1598 | struct drm_device *ddev = adev->ddev; |
1599 | u32 vddnb; |
1600 | int r, size = sizeof(vddnb); |
1601 | |
1602 | /* only APUs have vddnb */ |
1603 | if (!(adev->flags & AMD_IS_APU)) |
1604 | return -EINVAL; |
1605 | |
1606 | /* Can't get voltage when the card is off */ |
1607 | if ((adev->flags & AMD_IS_PX) && |
1608 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) |
1609 | return -EINVAL; |
1610 | |
1611 | /* sanity check PP is enabled */ |
1612 | if (!(adev->powerplay.pp_funcs && |
1613 | adev->powerplay.pp_funcs->read_sensor)) |
1614 | return -EINVAL; |
1615 | |
1616 | /* get the voltage */ |
1617 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, |
1618 | (void *)&vddnb, &size); |
1619 | if (r) |
1620 | return r; |
1621 | |
1622 | return snprintf(buf, PAGE_SIZE, "%d\n" , vddnb); |
1623 | } |
1624 | |
1625 | static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev, |
1626 | struct device_attribute *attr, |
1627 | char *buf) |
1628 | { |
1629 | return snprintf(buf, PAGE_SIZE, "vddnb\n" ); |
1630 | } |
1631 | |
1632 | static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, |
1633 | struct device_attribute *attr, |
1634 | char *buf) |
1635 | { |
1636 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1637 | struct drm_device *ddev = adev->ddev; |
1638 | u32 query = 0; |
1639 | int r, size = sizeof(u32); |
1640 | unsigned uw; |
1641 | |
1642 | /* Can't get power when the card is off */ |
1643 | if ((adev->flags & AMD_IS_PX) && |
1644 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) |
1645 | return -EINVAL; |
1646 | |
1647 | /* sanity check PP is enabled */ |
1648 | if (!(adev->powerplay.pp_funcs && |
1649 | adev->powerplay.pp_funcs->read_sensor)) |
1650 | return -EINVAL; |
1651 | |
1652 | /* get the voltage */ |
1653 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, |
1654 | (void *)&query, &size); |
1655 | if (r) |
1656 | return r; |
1657 | |
1658 | /* convert to microwatts */ |
1659 | uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; |
1660 | |
1661 | return snprintf(buf, PAGE_SIZE, "%u\n" , uw); |
1662 | } |
1663 | |
1664 | static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev, |
1665 | struct device_attribute *attr, |
1666 | char *buf) |
1667 | { |
1668 | return sprintf(buf, "%i\n" , 0); |
1669 | } |
1670 | |
1671 | static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, |
1672 | struct device_attribute *attr, |
1673 | char *buf) |
1674 | { |
1675 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1676 | uint32_t limit = 0; |
1677 | |
1678 | if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { |
1679 | adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); |
1680 | return snprintf(buf, PAGE_SIZE, "%u\n" , limit * 1000000); |
1681 | } else { |
1682 | return snprintf(buf, PAGE_SIZE, "\n" ); |
1683 | } |
1684 | } |
1685 | |
1686 | static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, |
1687 | struct device_attribute *attr, |
1688 | char *buf) |
1689 | { |
1690 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1691 | uint32_t limit = 0; |
1692 | |
1693 | if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { |
1694 | adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); |
1695 | return snprintf(buf, PAGE_SIZE, "%u\n" , limit * 1000000); |
1696 | } else { |
1697 | return snprintf(buf, PAGE_SIZE, "\n" ); |
1698 | } |
1699 | } |
1700 | |
1701 | |
1702 | static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, |
1703 | struct device_attribute *attr, |
1704 | const char *buf, |
1705 | size_t count) |
1706 | { |
1707 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1708 | int err; |
1709 | u32 value; |
1710 | |
1711 | err = kstrtou32(buf, 10, &value); |
1712 | if (err) |
1713 | return err; |
1714 | |
1715 | value = value / 1000000; /* convert to Watt */ |
1716 | if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) { |
1717 | err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); |
1718 | if (err) |
1719 | return err; |
1720 | } else { |
1721 | return -EINVAL; |
1722 | } |
1723 | |
1724 | return count; |
1725 | } |
1726 | |
1727 | static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, |
1728 | struct device_attribute *attr, |
1729 | char *buf) |
1730 | { |
1731 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1732 | struct drm_device *ddev = adev->ddev; |
1733 | uint32_t sclk; |
1734 | int r, size = sizeof(sclk); |
1735 | |
1736 | /* Can't get voltage when the card is off */ |
1737 | if ((adev->flags & AMD_IS_PX) && |
1738 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) |
1739 | return -EINVAL; |
1740 | |
1741 | /* sanity check PP is enabled */ |
1742 | if (!(adev->powerplay.pp_funcs && |
1743 | adev->powerplay.pp_funcs->read_sensor)) |
1744 | return -EINVAL; |
1745 | |
1746 | /* get the sclk */ |
1747 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, |
1748 | (void *)&sclk, &size); |
1749 | if (r) |
1750 | return r; |
1751 | |
1752 | return snprintf(buf, PAGE_SIZE, "%d\n" , sclk * 10 * 1000); |
1753 | } |
1754 | |
1755 | static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, |
1756 | struct device_attribute *attr, |
1757 | char *buf) |
1758 | { |
1759 | return snprintf(buf, PAGE_SIZE, "sclk\n" ); |
1760 | } |
1761 | |
1762 | static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, |
1763 | struct device_attribute *attr, |
1764 | char *buf) |
1765 | { |
1766 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1767 | struct drm_device *ddev = adev->ddev; |
1768 | uint32_t mclk; |
1769 | int r, size = sizeof(mclk); |
1770 | |
1771 | /* Can't get voltage when the card is off */ |
1772 | if ((adev->flags & AMD_IS_PX) && |
1773 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) |
1774 | return -EINVAL; |
1775 | |
1776 | /* sanity check PP is enabled */ |
1777 | if (!(adev->powerplay.pp_funcs && |
1778 | adev->powerplay.pp_funcs->read_sensor)) |
1779 | return -EINVAL; |
1780 | |
1781 | /* get the sclk */ |
1782 | r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, |
1783 | (void *)&mclk, &size); |
1784 | if (r) |
1785 | return r; |
1786 | |
1787 | return snprintf(buf, PAGE_SIZE, "%d\n" , mclk * 10 * 1000); |
1788 | } |
1789 | |
1790 | static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, |
1791 | struct device_attribute *attr, |
1792 | char *buf) |
1793 | { |
1794 | return snprintf(buf, PAGE_SIZE, "mclk\n" ); |
1795 | } |
1796 | |
1797 | /** |
1798 | * DOC: hwmon |
1799 | * |
1800 | * The amdgpu driver exposes the following sensor interfaces: |
1801 | * |
1802 | * - GPU temperature (via the on-die sensor) |
1803 | * |
1804 | * - GPU voltage |
1805 | * |
1806 | * - Northbridge voltage (APUs only) |
1807 | * |
1808 | * - GPU power |
1809 | * |
1810 | * - GPU fan |
1811 | * |
1812 | * - GPU gfx/compute engine clock |
1813 | * |
1814 | * - GPU memory clock (dGPU only) |
1815 | * |
1816 | * hwmon interfaces for GPU temperature: |
1817 | * |
1818 | * - temp1_input: the on die GPU temperature in millidegrees Celsius |
1819 | * |
1820 | * - temp1_crit: temperature critical max value in millidegrees Celsius |
1821 | * |
1822 | * - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius |
1823 | * |
1824 | * hwmon interfaces for GPU voltage: |
1825 | * |
1826 | * - in0_input: the voltage on the GPU in millivolts |
1827 | * |
1828 | * - in1_input: the voltage on the Northbridge in millivolts |
1829 | * |
1830 | * hwmon interfaces for GPU power: |
1831 | * |
1832 | * - power1_average: average power used by the GPU in microWatts |
1833 | * |
1834 | * - power1_cap_min: minimum cap supported in microWatts |
1835 | * |
1836 | * - power1_cap_max: maximum cap supported in microWatts |
1837 | * |
1838 | * - power1_cap: selected power cap in microWatts |
1839 | * |
1840 | * hwmon interfaces for GPU fan: |
1841 | * |
1842 | * - pwm1: pulse width modulation fan level (0-255) |
1843 | * |
1844 | * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control) |
1845 | * |
1846 | * - pwm1_min: pulse width modulation fan control minimum level (0) |
1847 | * |
1848 | * - pwm1_max: pulse width modulation fan control maximum level (255) |
1849 | * |
1850 | * - fan1_min: an minimum value Unit: revolution/min (RPM) |
1851 | * |
1852 | * - fan1_max: an maxmum value Unit: revolution/max (RPM) |
1853 | * |
1854 | * - fan1_input: fan speed in RPM |
1855 | * |
1856 | * - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM) |
1857 | * |
1858 | * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable |
1859 | * |
1860 | * hwmon interfaces for GPU clocks: |
1861 | * |
1862 | * - freq1_input: the gfx/compute clock in hertz |
1863 | * |
1864 | * - freq2_input: the memory clock in hertz |
1865 | * |
1866 | * You can use hwmon tools like sensors to view this information on your system. |
1867 | * |
1868 | */ |
1869 | |
1870 | static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0); |
1871 | static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); |
1872 | static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); |
1873 | static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0); |
1874 | static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0); |
1875 | static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); |
1876 | static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); |
1877 | static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); |
1878 | static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0); |
1879 | static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0); |
1880 | static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0); |
1881 | static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0); |
1882 | static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0); |
1883 | static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0); |
1884 | static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0); |
1885 | static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0); |
1886 | static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0); |
1887 | static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); |
1888 | static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0); |
1889 | static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); |
1890 | static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0); |
1891 | static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0); |
1892 | static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0); |
1893 | static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0); |
1894 | |
1895 | static struct attribute *hwmon_attributes[] = { |
1896 | &sensor_dev_attr_temp1_input.dev_attr.attr, |
1897 | &sensor_dev_attr_temp1_crit.dev_attr.attr, |
1898 | &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, |
1899 | &sensor_dev_attr_pwm1.dev_attr.attr, |
1900 | &sensor_dev_attr_pwm1_enable.dev_attr.attr, |
1901 | &sensor_dev_attr_pwm1_min.dev_attr.attr, |
1902 | &sensor_dev_attr_pwm1_max.dev_attr.attr, |
1903 | &sensor_dev_attr_fan1_input.dev_attr.attr, |
1904 | &sensor_dev_attr_fan1_min.dev_attr.attr, |
1905 | &sensor_dev_attr_fan1_max.dev_attr.attr, |
1906 | &sensor_dev_attr_fan1_target.dev_attr.attr, |
1907 | &sensor_dev_attr_fan1_enable.dev_attr.attr, |
1908 | &sensor_dev_attr_in0_input.dev_attr.attr, |
1909 | &sensor_dev_attr_in0_label.dev_attr.attr, |
1910 | &sensor_dev_attr_in1_input.dev_attr.attr, |
1911 | &sensor_dev_attr_in1_label.dev_attr.attr, |
1912 | &sensor_dev_attr_power1_average.dev_attr.attr, |
1913 | &sensor_dev_attr_power1_cap_max.dev_attr.attr, |
1914 | &sensor_dev_attr_power1_cap_min.dev_attr.attr, |
1915 | &sensor_dev_attr_power1_cap.dev_attr.attr, |
1916 | &sensor_dev_attr_freq1_input.dev_attr.attr, |
1917 | &sensor_dev_attr_freq1_label.dev_attr.attr, |
1918 | &sensor_dev_attr_freq2_input.dev_attr.attr, |
1919 | &sensor_dev_attr_freq2_label.dev_attr.attr, |
1920 | NULL |
1921 | }; |
1922 | |
1923 | static umode_t hwmon_attributes_visible(struct kobject *kobj, |
1924 | struct attribute *attr, int index) |
1925 | { |
1926 | struct device *dev = kobj_to_dev(kobj); |
1927 | struct amdgpu_device *adev = dev_get_drvdata(dev); |
1928 | umode_t effective_mode = attr->mode; |
1929 | |
1930 | /* Skip fan attributes if fan is not present */ |
1931 | if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || |
1932 | attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || |
1933 | attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || |
1934 | attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || |
1935 | attr == &sensor_dev_attr_fan1_input.dev_attr.attr || |
1936 | attr == &sensor_dev_attr_fan1_min.dev_attr.attr || |
1937 | attr == &sensor_dev_attr_fan1_max.dev_attr.attr || |
1938 | attr == &sensor_dev_attr_fan1_target.dev_attr.attr || |
1939 | attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) |
1940 | return 0; |
1941 | |
1942 | /* Skip fan attributes on APU */ |
1943 | if ((adev->flags & AMD_IS_APU) && |
1944 | (attr == &sensor_dev_attr_pwm1.dev_attr.attr || |
1945 | attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || |
1946 | attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || |
1947 | attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || |
1948 | attr == &sensor_dev_attr_fan1_input.dev_attr.attr || |
1949 | attr == &sensor_dev_attr_fan1_min.dev_attr.attr || |
1950 | attr == &sensor_dev_attr_fan1_max.dev_attr.attr || |
1951 | attr == &sensor_dev_attr_fan1_target.dev_attr.attr || |
1952 | attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) |
1953 | return 0; |
1954 | |
1955 | /* Skip limit attributes if DPM is not enabled */ |
1956 | if (!adev->pm.dpm_enabled && |
1957 | (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || |
1958 | attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || |
1959 | attr == &sensor_dev_attr_pwm1.dev_attr.attr || |
1960 | attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || |
1961 | attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || |
1962 | attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || |
1963 | attr == &sensor_dev_attr_fan1_input.dev_attr.attr || |
1964 | attr == &sensor_dev_attr_fan1_min.dev_attr.attr || |
1965 | attr == &sensor_dev_attr_fan1_max.dev_attr.attr || |
1966 | attr == &sensor_dev_attr_fan1_target.dev_attr.attr || |
1967 | attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) |
1968 | return 0; |
1969 | |
1970 | /* mask fan attributes if we have no bindings for this asic to expose */ |
1971 | if ((!adev->powerplay.pp_funcs->get_fan_speed_percent && |
1972 | attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ |
1973 | (!adev->powerplay.pp_funcs->get_fan_control_mode && |
1974 | attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ |
1975 | effective_mode &= ~S_IRUGO; |
1976 | |
1977 | if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && |
1978 | attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ |
1979 | (!adev->powerplay.pp_funcs->set_fan_control_mode && |
1980 | attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ |
1981 | effective_mode &= ~S_IWUSR; |
1982 | |
1983 | if ((adev->flags & AMD_IS_APU) && |
1984 | (attr == &sensor_dev_attr_power1_average.dev_attr.attr || |
1985 | attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || |
1986 | attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| |
1987 | attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) |
1988 | return 0; |
1989 | |
1990 | /* hide max/min values if we can't both query and manage the fan */ |
1991 | if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && |
1992 | !adev->powerplay.pp_funcs->get_fan_speed_percent) && |
1993 | (!adev->powerplay.pp_funcs->set_fan_speed_rpm && |
1994 | !adev->powerplay.pp_funcs->get_fan_speed_rpm) && |
1995 | (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || |
1996 | attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) |
1997 | return 0; |
1998 | |
1999 | if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm && |
2000 | !adev->powerplay.pp_funcs->get_fan_speed_rpm) && |
2001 | (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || |
2002 | attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) |
2003 | return 0; |
2004 | |
2005 | /* only APUs have vddnb */ |
2006 | if (!(adev->flags & AMD_IS_APU) && |
2007 | (attr == &sensor_dev_attr_in1_input.dev_attr.attr || |
2008 | attr == &sensor_dev_attr_in1_label.dev_attr.attr)) |
2009 | return 0; |
2010 | |
2011 | /* no mclk on APUs */ |
2012 | if ((adev->flags & AMD_IS_APU) && |
2013 | (attr == &sensor_dev_attr_freq2_input.dev_attr.attr || |
2014 | attr == &sensor_dev_attr_freq2_label.dev_attr.attr)) |
2015 | return 0; |
2016 | |
2017 | return effective_mode; |
2018 | } |
2019 | |
2020 | static const struct attribute_group hwmon_attrgroup = { |
2021 | .attrs = hwmon_attributes, |
2022 | .is_visible = hwmon_attributes_visible, |
2023 | }; |
2024 | |
2025 | static const struct attribute_group *hwmon_groups[] = { |
2026 | &hwmon_attrgroup, |
2027 | NULL |
2028 | }; |
2029 | |
2030 | void amdgpu_dpm_thermal_work_handler(struct work_struct *work) |
2031 | { |
2032 | struct amdgpu_device *adev = |
2033 | container_of(work, struct amdgpu_device, |
2034 | pm.dpm.thermal.work); |
2035 | /* switch to the thermal state */ |
2036 | enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; |
2037 | int temp, size = sizeof(temp); |
2038 | |
2039 | if (!adev->pm.dpm_enabled) |
2040 | return; |
2041 | |
2042 | if (adev->powerplay.pp_funcs && |
2043 | adev->powerplay.pp_funcs->read_sensor && |
2044 | !amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, |
2045 | (void *)&temp, &size)) { |
2046 | if (temp < adev->pm.dpm.thermal.min_temp) |
2047 | /* switch back the user state */ |
2048 | dpm_state = adev->pm.dpm.user_state; |
2049 | } else { |
2050 | if (adev->pm.dpm.thermal.high_to_low) |
2051 | /* switch back the user state */ |
2052 | dpm_state = adev->pm.dpm.user_state; |
2053 | } |
2054 | mutex_lock(&adev->pm.mutex); |
2055 | if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) |
2056 | adev->pm.dpm.thermal_active = true; |
2057 | else |
2058 | adev->pm.dpm.thermal_active = false; |
2059 | adev->pm.dpm.state = dpm_state; |
2060 | mutex_unlock(&adev->pm.mutex); |
2061 | |
2062 | amdgpu_pm_compute_clocks(adev); |
2063 | } |
2064 | |
2065 | static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, |
2066 | enum amd_pm_state_type dpm_state) |
2067 | { |
2068 | int i; |
2069 | struct amdgpu_ps *ps; |
2070 | u32 ui_class; |
2071 | bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? |
2072 | true : false; |
2073 | |
2074 | /* check if the vblank period is too short to adjust the mclk */ |
2075 | if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { |
2076 | if (amdgpu_dpm_vblank_too_short(adev)) |
2077 | single_display = false; |
2078 | } |
2079 | |
2080 | /* certain older asics have a separare 3D performance state, |
2081 | * so try that first if the user selected performance |
2082 | */ |
2083 | if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) |
2084 | dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; |
2085 | /* balanced states don't exist at the moment */ |
2086 | if (dpm_state == POWER_STATE_TYPE_BALANCED) |
2087 | dpm_state = POWER_STATE_TYPE_PERFORMANCE; |
2088 | |
2089 | restart_search: |
2090 | /* Pick the best power state based on current conditions */ |
2091 | for (i = 0; i < adev->pm.dpm.num_ps; i++) { |
2092 | ps = &adev->pm.dpm.ps[i]; |
2093 | ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; |
2094 | switch (dpm_state) { |
2095 | /* user states */ |
2096 | case POWER_STATE_TYPE_BATTERY: |
2097 | if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { |
2098 | if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { |
2099 | if (single_display) |
2100 | return ps; |
2101 | } else |
2102 | return ps; |
2103 | } |
2104 | break; |
2105 | case POWER_STATE_TYPE_BALANCED: |
2106 | if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { |
2107 | if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { |
2108 | if (single_display) |
2109 | return ps; |
2110 | } else |
2111 | return ps; |
2112 | } |
2113 | break; |
2114 | case POWER_STATE_TYPE_PERFORMANCE: |
2115 | if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { |
2116 | if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { |
2117 | if (single_display) |
2118 | return ps; |
2119 | } else |
2120 | return ps; |
2121 | } |
2122 | break; |
2123 | /* internal states */ |
2124 | case POWER_STATE_TYPE_INTERNAL_UVD: |
2125 | if (adev->pm.dpm.uvd_ps) |
2126 | return adev->pm.dpm.uvd_ps; |
2127 | else |
2128 | break; |
2129 | case POWER_STATE_TYPE_INTERNAL_UVD_SD: |
2130 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) |
2131 | return ps; |
2132 | break; |
2133 | case POWER_STATE_TYPE_INTERNAL_UVD_HD: |
2134 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) |
2135 | return ps; |
2136 | break; |
2137 | case POWER_STATE_TYPE_INTERNAL_UVD_HD2: |
2138 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) |
2139 | return ps; |
2140 | break; |
2141 | case POWER_STATE_TYPE_INTERNAL_UVD_MVC: |
2142 | if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) |
2143 | return ps; |
2144 | break; |
2145 | case POWER_STATE_TYPE_INTERNAL_BOOT: |
2146 | return adev->pm.dpm.boot_ps; |
2147 | case POWER_STATE_TYPE_INTERNAL_THERMAL: |
2148 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) |
2149 | return ps; |
2150 | break; |
2151 | case POWER_STATE_TYPE_INTERNAL_ACPI: |
2152 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) |
2153 | return ps; |
2154 | break; |
2155 | case POWER_STATE_TYPE_INTERNAL_ULV: |
2156 | if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) |
2157 | return ps; |
2158 | break; |
2159 | case POWER_STATE_TYPE_INTERNAL_3DPERF: |
2160 | if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) |
2161 | return ps; |
2162 | break; |
2163 | default: |
2164 | break; |
2165 | } |
2166 | } |
2167 | /* use a fallback state if we didn't match */ |
2168 | switch (dpm_state) { |
2169 | case POWER_STATE_TYPE_INTERNAL_UVD_SD: |
2170 | dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; |
2171 | goto restart_search; |
2172 | case POWER_STATE_TYPE_INTERNAL_UVD_HD: |
2173 | case POWER_STATE_TYPE_INTERNAL_UVD_HD2: |
2174 | case POWER_STATE_TYPE_INTERNAL_UVD_MVC: |
2175 | if (adev->pm.dpm.uvd_ps) { |
2176 | return adev->pm.dpm.uvd_ps; |
2177 | } else { |
2178 | dpm_state = POWER_STATE_TYPE_PERFORMANCE; |
2179 | goto restart_search; |
2180 | } |
2181 | case POWER_STATE_TYPE_INTERNAL_THERMAL: |
2182 | dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; |
2183 | goto restart_search; |
2184 | case POWER_STATE_TYPE_INTERNAL_ACPI: |
2185 | dpm_state = POWER_STATE_TYPE_BATTERY; |
2186 | goto restart_search; |
2187 | case POWER_STATE_TYPE_BATTERY: |
2188 | case POWER_STATE_TYPE_BALANCED: |
2189 | case POWER_STATE_TYPE_INTERNAL_3DPERF: |
2190 | dpm_state = POWER_STATE_TYPE_PERFORMANCE; |
2191 | goto restart_search; |
2192 | default: |
2193 | break; |
2194 | } |
2195 | |
2196 | return NULL; |
2197 | } |
2198 | |
2199 | static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) |
2200 | { |
2201 | struct amdgpu_ps *ps; |
2202 | enum amd_pm_state_type dpm_state; |
2203 | int ret; |
2204 | bool equal = false; |
2205 | |
2206 | /* if dpm init failed */ |
2207 | if (!adev->pm.dpm_enabled) |
2208 | return; |
2209 | |
2210 | if (adev->pm.dpm.user_state != adev->pm.dpm.state) { |
2211 | /* add other state override checks here */ |
2212 | if ((!adev->pm.dpm.thermal_active) && |
2213 | (!adev->pm.dpm.uvd_active)) |
2214 | adev->pm.dpm.state = adev->pm.dpm.user_state; |
2215 | } |
2216 | dpm_state = adev->pm.dpm.state; |
2217 | |
2218 | ps = amdgpu_dpm_pick_power_state(adev, dpm_state); |
2219 | if (ps) |
2220 | adev->pm.dpm.requested_ps = ps; |
2221 | else |
2222 | return; |
2223 | |
2224 | if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { |
2225 | printk("switching from power state:\n" ); |
2226 | amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); |
2227 | printk("switching to power state:\n" ); |
2228 | amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); |
2229 | } |
2230 | |
2231 | /* update whether vce is active */ |
2232 | ps->vce_active = adev->pm.dpm.vce_active; |
2233 | if (adev->powerplay.pp_funcs->display_configuration_changed) |
2234 | amdgpu_dpm_display_configuration_changed(adev); |
2235 | |
2236 | ret = amdgpu_dpm_pre_set_power_state(adev); |
2237 | if (ret) |
2238 | return; |
2239 | |
2240 | if (adev->powerplay.pp_funcs->check_state_equal) { |
2241 | if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) |
2242 | equal = false; |
2243 | } |
2244 | |
2245 | if (equal) |
2246 | return; |
2247 | |
2248 | amdgpu_dpm_set_power_state(adev); |
2249 | amdgpu_dpm_post_set_power_state(adev); |
2250 | |
2251 | adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; |
2252 | adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; |
2253 | |
2254 | if (adev->powerplay.pp_funcs->force_performance_level) { |
2255 | if (adev->pm.dpm.thermal_active) { |
2256 | enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; |
2257 | /* force low perf level for thermal */ |
2258 | amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); |
2259 | /* save the user's level */ |
2260 | adev->pm.dpm.forced_level = level; |
2261 | } else { |
2262 | /* otherwise, user selected level */ |
2263 | amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); |
2264 | } |
2265 | } |
2266 | } |
2267 | |
2268 | void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) |
2269 | { |
2270 | if (adev->powerplay.pp_funcs->set_powergating_by_smu) { |
2271 | /* enable/disable UVD */ |
2272 | mutex_lock(&adev->pm.mutex); |
2273 | amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); |
2274 | mutex_unlock(&adev->pm.mutex); |
2275 | } |
2276 | /* enable/disable Low Memory PState for UVD (4k videos) */ |
2277 | if (adev->asic_type == CHIP_STONEY && |
2278 | adev->uvd.decode_image_width >= WIDTH_4K) { |
2279 | struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; |
2280 | |
2281 | if (hwmgr && hwmgr->hwmgr_func && |
2282 | hwmgr->hwmgr_func->update_nbdpm_pstate) |
2283 | hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, |
2284 | !enable, |
2285 | true); |
2286 | } |
2287 | } |
2288 | |
2289 | void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) |
2290 | { |
2291 | if (adev->powerplay.pp_funcs->set_powergating_by_smu) { |
2292 | /* enable/disable VCE */ |
2293 | mutex_lock(&adev->pm.mutex); |
2294 | amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); |
2295 | mutex_unlock(&adev->pm.mutex); |
2296 | } |
2297 | } |
2298 | |
2299 | void amdgpu_pm_print_power_states(struct amdgpu_device *adev) |
2300 | { |
2301 | int i; |
2302 | |
2303 | if (adev->powerplay.pp_funcs->print_power_state == NULL) |
2304 | return; |
2305 | |
2306 | for (i = 0; i < adev->pm.dpm.num_ps; i++) |
2307 | amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); |
2308 | |
2309 | } |
2310 | |
2311 | int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) |
2312 | { |
2313 | struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; |
2314 | int ret; |
2315 | |
2316 | if (adev->pm.sysfs_initialized) |
2317 | return 0; |
2318 | |
2319 | if (adev->pm.dpm_enabled == 0) |
2320 | return 0; |
2321 | |
2322 | adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, |
2323 | DRIVER_NAME, adev, |
2324 | hwmon_groups); |
2325 | if (IS_ERR(adev->pm.int_hwmon_dev)) { |
2326 | ret = PTR_ERR(adev->pm.int_hwmon_dev); |
2327 | dev_err(adev->dev, |
2328 | "Unable to register hwmon device: %d\n" , ret); |
2329 | return ret; |
2330 | } |
2331 | |
2332 | ret = device_create_file(adev->dev, &dev_attr_power_dpm_state); |
2333 | if (ret) { |
2334 | DRM_ERROR("failed to create device file for dpm state\n" ); |
2335 | return ret; |
2336 | } |
2337 | ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); |
2338 | if (ret) { |
2339 | DRM_ERROR("failed to create device file for dpm state\n" ); |
2340 | return ret; |
2341 | } |
2342 | |
2343 | |
2344 | ret = device_create_file(adev->dev, &dev_attr_pp_num_states); |
2345 | if (ret) { |
2346 | DRM_ERROR("failed to create device file pp_num_states\n" ); |
2347 | return ret; |
2348 | } |
2349 | ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); |
2350 | if (ret) { |
2351 | DRM_ERROR("failed to create device file pp_cur_state\n" ); |
2352 | return ret; |
2353 | } |
2354 | ret = device_create_file(adev->dev, &dev_attr_pp_force_state); |
2355 | if (ret) { |
2356 | DRM_ERROR("failed to create device file pp_force_state\n" ); |
2357 | return ret; |
2358 | } |
2359 | ret = device_create_file(adev->dev, &dev_attr_pp_table); |
2360 | if (ret) { |
2361 | DRM_ERROR("failed to create device file pp_table\n" ); |
2362 | return ret; |
2363 | } |
2364 | |
2365 | ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); |
2366 | if (ret) { |
2367 | DRM_ERROR("failed to create device file pp_dpm_sclk\n" ); |
2368 | return ret; |
2369 | } |
2370 | ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); |
2371 | if (ret) { |
2372 | DRM_ERROR("failed to create device file pp_dpm_mclk\n" ); |
2373 | return ret; |
2374 | } |
2375 | if (adev->asic_type >= CHIP_VEGA10) { |
2376 | ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk); |
2377 | if (ret) { |
2378 | DRM_ERROR("failed to create device file pp_dpm_socclk\n" ); |
2379 | return ret; |
2380 | } |
2381 | ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk); |
2382 | if (ret) { |
2383 | DRM_ERROR("failed to create device file pp_dpm_dcefclk\n" ); |
2384 | return ret; |
2385 | } |
2386 | } |
2387 | if (adev->asic_type >= CHIP_VEGA20) { |
2388 | ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk); |
2389 | if (ret) { |
2390 | DRM_ERROR("failed to create device file pp_dpm_fclk\n" ); |
2391 | return ret; |
2392 | } |
2393 | } |
2394 | ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); |
2395 | if (ret) { |
2396 | DRM_ERROR("failed to create device file pp_dpm_pcie\n" ); |
2397 | return ret; |
2398 | } |
2399 | ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); |
2400 | if (ret) { |
2401 | DRM_ERROR("failed to create device file pp_sclk_od\n" ); |
2402 | return ret; |
2403 | } |
2404 | ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od); |
2405 | if (ret) { |
2406 | DRM_ERROR("failed to create device file pp_mclk_od\n" ); |
2407 | return ret; |
2408 | } |
2409 | ret = device_create_file(adev->dev, |
2410 | &dev_attr_pp_power_profile_mode); |
2411 | if (ret) { |
2412 | DRM_ERROR("failed to create device file " |
2413 | "pp_power_profile_mode\n" ); |
2414 | return ret; |
2415 | } |
2416 | if (hwmgr->od_enabled) { |
2417 | ret = device_create_file(adev->dev, |
2418 | &dev_attr_pp_od_clk_voltage); |
2419 | if (ret) { |
2420 | DRM_ERROR("failed to create device file " |
2421 | "pp_od_clk_voltage\n" ); |
2422 | return ret; |
2423 | } |
2424 | } |
2425 | ret = device_create_file(adev->dev, |
2426 | &dev_attr_gpu_busy_percent); |
2427 | if (ret) { |
2428 | DRM_ERROR("failed to create device file " |
2429 | "gpu_busy_level\n" ); |
2430 | return ret; |
2431 | } |
2432 | /* PCIe Perf counters won't work on APU nodes */ |
2433 | if (!(adev->flags & AMD_IS_APU)) { |
2434 | ret = device_create_file(adev->dev, &dev_attr_pcie_bw); |
2435 | if (ret) { |
2436 | DRM_ERROR("failed to create device file pcie_bw\n" ); |
2437 | return ret; |
2438 | } |
2439 | } |
2440 | ret = amdgpu_debugfs_pm_init(adev); |
2441 | if (ret) { |
2442 | DRM_ERROR("Failed to register debugfs file for dpm!\n" ); |
2443 | return ret; |
2444 | } |
2445 | |
2446 | if ((adev->asic_type >= CHIP_VEGA10) && |
2447 | !(adev->flags & AMD_IS_APU)) { |
2448 | ret = device_create_file(adev->dev, |
2449 | &dev_attr_ppfeatures); |
2450 | if (ret) { |
2451 | DRM_ERROR("failed to create device file " |
2452 | "ppfeatures\n" ); |
2453 | return ret; |
2454 | } |
2455 | } |
2456 | |
2457 | adev->pm.sysfs_initialized = true; |
2458 | |
2459 | return 0; |
2460 | } |
2461 | |
2462 | void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) |
2463 | { |
2464 | struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; |
2465 | |
2466 | if (adev->pm.dpm_enabled == 0) |
2467 | return; |
2468 | |
2469 | if (adev->pm.int_hwmon_dev) |
2470 | hwmon_device_unregister(adev->pm.int_hwmon_dev); |
2471 | device_remove_file(adev->dev, &dev_attr_power_dpm_state); |
2472 | device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); |
2473 | |
2474 | device_remove_file(adev->dev, &dev_attr_pp_num_states); |
2475 | device_remove_file(adev->dev, &dev_attr_pp_cur_state); |
2476 | device_remove_file(adev->dev, &dev_attr_pp_force_state); |
2477 | device_remove_file(adev->dev, &dev_attr_pp_table); |
2478 | |
2479 | device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); |
2480 | device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); |
2481 | if (adev->asic_type >= CHIP_VEGA10) { |
2482 | device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk); |
2483 | device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk); |
2484 | } |
2485 | device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); |
2486 | if (adev->asic_type >= CHIP_VEGA20) |
2487 | device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk); |
2488 | device_remove_file(adev->dev, &dev_attr_pp_sclk_od); |
2489 | device_remove_file(adev->dev, &dev_attr_pp_mclk_od); |
2490 | device_remove_file(adev->dev, |
2491 | &dev_attr_pp_power_profile_mode); |
2492 | if (hwmgr->od_enabled) |
2493 | device_remove_file(adev->dev, |
2494 | &dev_attr_pp_od_clk_voltage); |
2495 | device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); |
2496 | if (!(adev->flags & AMD_IS_APU)) |
2497 | device_remove_file(adev->dev, &dev_attr_pcie_bw); |
2498 | if ((adev->asic_type >= CHIP_VEGA10) && |
2499 | !(adev->flags & AMD_IS_APU)) |
2500 | device_remove_file(adev->dev, &dev_attr_ppfeatures); |
2501 | } |
2502 | |
2503 | void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) |
2504 | { |
2505 | int i = 0; |
2506 | |
2507 | if (!adev->pm.dpm_enabled) |
2508 | return; |
2509 | |
2510 | if (adev->mode_info.num_crtc) |
2511 | amdgpu_display_bandwidth_update(adev); |
2512 | |
2513 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
2514 | struct amdgpu_ring *ring = adev->rings[i]; |
2515 | if (ring && ring->sched.ready) |
2516 | amdgpu_fence_wait_empty(ring); |
2517 | } |
2518 | |
2519 | if (adev->powerplay.pp_funcs->dispatch_tasks) { |
2520 | if (!amdgpu_device_has_dc_support(adev)) { |
2521 | mutex_lock(&adev->pm.mutex); |
2522 | amdgpu_dpm_get_active_displays(adev); |
2523 | adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; |
2524 | adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); |
2525 | adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); |
2526 | /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ |
2527 | if (adev->pm.pm_display_cfg.vrefresh > 120) |
2528 | adev->pm.pm_display_cfg.min_vblank_time = 0; |
2529 | if (adev->powerplay.pp_funcs->display_configuration_change) |
2530 | adev->powerplay.pp_funcs->display_configuration_change( |
2531 | adev->powerplay.pp_handle, |
2532 | &adev->pm.pm_display_cfg); |
2533 | mutex_unlock(&adev->pm.mutex); |
2534 | } |
2535 | amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); |
2536 | } else { |
2537 | mutex_lock(&adev->pm.mutex); |
2538 | amdgpu_dpm_get_active_displays(adev); |
2539 | amdgpu_dpm_change_power_state_locked(adev); |
2540 | mutex_unlock(&adev->pm.mutex); |
2541 | } |
2542 | } |
2543 | |
2544 | /* |
2545 | * Debugfs info |
2546 | */ |
2547 | #if defined(CONFIG_DEBUG_FS) |
2548 | |
2549 | static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) |
2550 | { |
2551 | uint32_t value; |
2552 | uint64_t value64; |
2553 | uint32_t query = 0; |
2554 | int size; |
2555 | |
2556 | /* sanity check PP is enabled */ |
2557 | if (!(adev->powerplay.pp_funcs && |
2558 | adev->powerplay.pp_funcs->read_sensor)) |
2559 | return -EINVAL; |
2560 | |
2561 | /* GPU Clocks */ |
2562 | size = sizeof(value); |
2563 | seq_printf(m, "GFX Clocks and Power:\n" ); |
2564 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size)) |
2565 | seq_printf(m, "\t%u MHz (MCLK)\n" , value/100); |
2566 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size)) |
2567 | seq_printf(m, "\t%u MHz (SCLK)\n" , value/100); |
2568 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size)) |
2569 | seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n" , value/100); |
2570 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size)) |
2571 | seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n" , value/100); |
2572 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size)) |
2573 | seq_printf(m, "\t%u mV (VDDGFX)\n" , value); |
2574 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) |
2575 | seq_printf(m, "\t%u mV (VDDNB)\n" , value); |
2576 | size = sizeof(uint32_t); |
2577 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) |
2578 | seq_printf(m, "\t%u.%u W (average GPU)\n" , query >> 8, query & 0xff); |
2579 | size = sizeof(value); |
2580 | seq_printf(m, "\n" ); |
2581 | |
2582 | /* GPU Temp */ |
2583 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size)) |
2584 | seq_printf(m, "GPU Temperature: %u C\n" , value/1000); |
2585 | |
2586 | /* GPU Load */ |
2587 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size)) |
2588 | seq_printf(m, "GPU Load: %u %%\n" , value); |
2589 | seq_printf(m, "\n" ); |
2590 | |
2591 | /* SMC feature mask */ |
2592 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) |
2593 | seq_printf(m, "SMC Feature Mask: 0x%016llx\n" , value64); |
2594 | |
2595 | /* UVD clocks */ |
2596 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { |
2597 | if (!value) { |
2598 | seq_printf(m, "UVD: Disabled\n" ); |
2599 | } else { |
2600 | seq_printf(m, "UVD: Enabled\n" ); |
2601 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) |
2602 | seq_printf(m, "\t%u MHz (DCLK)\n" , value/100); |
2603 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) |
2604 | seq_printf(m, "\t%u MHz (VCLK)\n" , value/100); |
2605 | } |
2606 | } |
2607 | seq_printf(m, "\n" ); |
2608 | |
2609 | /* VCE clocks */ |
2610 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { |
2611 | if (!value) { |
2612 | seq_printf(m, "VCE: Disabled\n" ); |
2613 | } else { |
2614 | seq_printf(m, "VCE: Enabled\n" ); |
2615 | if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) |
2616 | seq_printf(m, "\t%u MHz (ECCLK)\n" , value/100); |
2617 | } |
2618 | } |
2619 | |
2620 | return 0; |
2621 | } |
2622 | |
2623 | static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags) |
2624 | { |
2625 | int i; |
2626 | |
2627 | for (i = 0; clocks[i].flag; i++) |
2628 | seq_printf(m, "\t%s: %s\n" , clocks[i].name, |
2629 | (flags & clocks[i].flag) ? "On" : "Off" ); |
2630 | } |
2631 | |
2632 | static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) |
2633 | { |
2634 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
2635 | struct drm_device *dev = node->minor->dev; |
2636 | struct amdgpu_device *adev = dev->dev_private; |
2637 | struct drm_device *ddev = adev->ddev; |
2638 | u32 flags = 0; |
2639 | |
2640 | amdgpu_device_ip_get_clockgating_state(adev, &flags); |
2641 | seq_printf(m, "Clock Gating Flags Mask: 0x%x\n" , flags); |
2642 | amdgpu_parse_cg_state(m, flags); |
2643 | seq_printf(m, "\n" ); |
2644 | |
2645 | if (!adev->pm.dpm_enabled) { |
2646 | seq_printf(m, "dpm not enabled\n" ); |
2647 | return 0; |
2648 | } |
2649 | if ((adev->flags & AMD_IS_PX) && |
2650 | (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { |
2651 | seq_printf(m, "PX asic powered off\n" ); |
2652 | } else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { |
2653 | mutex_lock(&adev->pm.mutex); |
2654 | if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) |
2655 | adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); |
2656 | else |
2657 | seq_printf(m, "Debugfs support not implemented for this asic\n" ); |
2658 | mutex_unlock(&adev->pm.mutex); |
2659 | } else { |
2660 | return amdgpu_debugfs_pm_info_pp(m, adev); |
2661 | } |
2662 | |
2663 | return 0; |
2664 | } |
2665 | |
2666 | static const struct drm_info_list amdgpu_pm_info_list[] = { |
2667 | {"amdgpu_pm_info" , amdgpu_debugfs_pm_info, 0, NULL}, |
2668 | }; |
2669 | #endif |
2670 | |
2671 | static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) |
2672 | { |
2673 | #if defined(CONFIG_DEBUG_FS) |
2674 | return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); |
2675 | #else |
2676 | return 0; |
2677 | #endif |
2678 | } |
2679 | |