1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * intel_pstate.c: Native P state management for Intel processors |
4 | * |
5 | * (C) Copyright 2012 Intel Corporation |
6 | * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> |
7 | */ |
8 | |
9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
10 | |
11 | #include <linux/kernel.h> |
12 | #include <linux/kernel_stat.h> |
13 | #include <linux/module.h> |
14 | #include <linux/ktime.h> |
15 | #include <linux/hrtimer.h> |
16 | #include <linux/tick.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/sched/cpufreq.h> |
19 | #include <linux/list.h> |
20 | #include <linux/cpu.h> |
21 | #include <linux/cpufreq.h> |
22 | #include <linux/sysfs.h> |
23 | #include <linux/types.h> |
24 | #include <linux/fs.h> |
25 | #include <linux/acpi.h> |
26 | #include <linux/vmalloc.h> |
27 | #include <linux/pm_qos.h> |
28 | #include <trace/events/power.h> |
29 | |
30 | #include <asm/cpu.h> |
31 | #include <asm/div64.h> |
32 | #include <asm/msr.h> |
33 | #include <asm/cpu_device_id.h> |
34 | #include <asm/cpufeature.h> |
35 | #include <asm/intel-family.h> |
36 | #include "../drivers/thermal/intel/thermal_interrupt.h" |
37 | |
38 | #define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC) |
39 | |
40 | #define INTEL_CPUFREQ_TRANSITION_LATENCY 20000 |
41 | #define INTEL_CPUFREQ_TRANSITION_DELAY_HWP 5000 |
42 | #define INTEL_CPUFREQ_TRANSITION_DELAY 500 |
43 | |
44 | #ifdef CONFIG_ACPI |
45 | #include <acpi/processor.h> |
46 | #include <acpi/cppc_acpi.h> |
47 | #endif |
48 | |
49 | #define FRAC_BITS 8 |
50 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) |
51 | #define fp_toint(X) ((X) >> FRAC_BITS) |
52 | |
53 | #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3)) |
54 | |
55 | #define EXT_BITS 6 |
56 | #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) |
57 | #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) |
58 | #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS) |
59 | |
60 | static inline int32_t mul_fp(int32_t x, int32_t y) |
61 | { |
62 | return ((int64_t)x * (int64_t)y) >> FRAC_BITS; |
63 | } |
64 | |
65 | static inline int32_t div_fp(s64 x, s64 y) |
66 | { |
67 | return div64_s64(dividend: (int64_t)x << FRAC_BITS, divisor: y); |
68 | } |
69 | |
70 | static inline int ceiling_fp(int32_t x) |
71 | { |
72 | int mask, ret; |
73 | |
74 | ret = fp_toint(x); |
75 | mask = (1 << FRAC_BITS) - 1; |
76 | if (x & mask) |
77 | ret += 1; |
78 | return ret; |
79 | } |
80 | |
81 | static inline u64 mul_ext_fp(u64 x, u64 y) |
82 | { |
83 | return (x * y) >> EXT_FRAC_BITS; |
84 | } |
85 | |
86 | static inline u64 div_ext_fp(u64 x, u64 y) |
87 | { |
88 | return div64_u64(dividend: x << EXT_FRAC_BITS, divisor: y); |
89 | } |
90 | |
91 | /** |
92 | * struct sample - Store performance sample |
93 | * @core_avg_perf: Ratio of APERF/MPERF which is the actual average |
94 | * performance during last sample period |
95 | * @busy_scaled: Scaled busy value which is used to calculate next |
96 | * P state. This can be different than core_avg_perf |
97 | * to account for cpu idle period |
98 | * @aperf: Difference of actual performance frequency clock count |
99 | * read from APERF MSR between last and current sample |
100 | * @mperf: Difference of maximum performance frequency clock count |
101 | * read from MPERF MSR between last and current sample |
102 | * @tsc: Difference of time stamp counter between last and |
103 | * current sample |
104 | * @time: Current time from scheduler |
105 | * |
106 | * This structure is used in the cpudata structure to store performance sample |
107 | * data for choosing next P State. |
108 | */ |
109 | struct sample { |
110 | int32_t core_avg_perf; |
111 | int32_t busy_scaled; |
112 | u64 aperf; |
113 | u64 mperf; |
114 | u64 tsc; |
115 | u64 time; |
116 | }; |
117 | |
118 | /** |
119 | * struct pstate_data - Store P state data |
120 | * @current_pstate: Current requested P state |
121 | * @min_pstate: Min P state possible for this platform |
122 | * @max_pstate: Max P state possible for this platform |
123 | * @max_pstate_physical:This is physical Max P state for a processor |
124 | * This can be higher than the max_pstate which can |
125 | * be limited by platform thermal design power limits |
126 | * @perf_ctl_scaling: PERF_CTL P-state to frequency scaling factor |
127 | * @scaling: Scaling factor between performance and frequency |
128 | * @turbo_pstate: Max Turbo P state possible for this platform |
129 | * @min_freq: @min_pstate frequency in cpufreq units |
130 | * @max_freq: @max_pstate frequency in cpufreq units |
131 | * @turbo_freq: @turbo_pstate frequency in cpufreq units |
132 | * |
133 | * Stores the per cpu model P state limits and current P state. |
134 | */ |
135 | struct pstate_data { |
136 | int current_pstate; |
137 | int min_pstate; |
138 | int max_pstate; |
139 | int max_pstate_physical; |
140 | int perf_ctl_scaling; |
141 | int scaling; |
142 | int turbo_pstate; |
143 | unsigned int min_freq; |
144 | unsigned int max_freq; |
145 | unsigned int turbo_freq; |
146 | }; |
147 | |
148 | /** |
149 | * struct vid_data - Stores voltage information data |
150 | * @min: VID data for this platform corresponding to |
151 | * the lowest P state |
152 | * @max: VID data corresponding to the highest P State. |
153 | * @turbo: VID data for turbo P state |
154 | * @ratio: Ratio of (vid max - vid min) / |
155 | * (max P state - Min P State) |
156 | * |
157 | * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) |
158 | * This data is used in Atom platforms, where in addition to target P state, |
159 | * the voltage data needs to be specified to select next P State. |
160 | */ |
161 | struct vid_data { |
162 | int min; |
163 | int max; |
164 | int turbo; |
165 | int32_t ratio; |
166 | }; |
167 | |
168 | /** |
169 | * struct global_params - Global parameters, mostly tunable via sysfs. |
170 | * @no_turbo: Whether or not to use turbo P-states. |
171 | * @turbo_disabled: Whether or not turbo P-states are available at all, |
172 | * based on the MSR_IA32_MISC_ENABLE value and whether or |
173 | * not the maximum reported turbo P-state is different from |
174 | * the maximum reported non-turbo one. |
175 | * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. |
176 | * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo |
177 | * P-state capacity. |
178 | * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo |
179 | * P-state capacity. |
180 | */ |
181 | struct global_params { |
182 | bool no_turbo; |
183 | bool turbo_disabled; |
184 | bool turbo_disabled_mf; |
185 | int max_perf_pct; |
186 | int min_perf_pct; |
187 | }; |
188 | |
189 | /** |
190 | * struct cpudata - Per CPU instance data storage |
191 | * @cpu: CPU number for this instance data |
192 | * @policy: CPUFreq policy value |
193 | * @update_util: CPUFreq utility callback information |
194 | * @update_util_set: CPUFreq utility callback is set |
195 | * @iowait_boost: iowait-related boost fraction |
196 | * @last_update: Time of the last update. |
197 | * @pstate: Stores P state limits for this CPU |
198 | * @vid: Stores VID limits for this CPU |
199 | * @last_sample_time: Last Sample time |
200 | * @aperf_mperf_shift: APERF vs MPERF counting frequency difference |
201 | * @prev_aperf: Last APERF value read from APERF MSR |
202 | * @prev_mperf: Last MPERF value read from MPERF MSR |
203 | * @prev_tsc: Last timestamp counter (TSC) value |
204 | * @prev_cummulative_iowait: IO Wait time difference from last and |
205 | * current sample |
206 | * @sample: Storage for storing last Sample data |
207 | * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios |
208 | * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios |
209 | * @acpi_perf_data: Stores ACPI perf information read from _PSS |
210 | * @valid_pss_table: Set to true for valid ACPI _PSS entries found |
211 | * @epp_powersave: Last saved HWP energy performance preference |
212 | * (EPP) or energy performance bias (EPB), |
213 | * when policy switched to performance |
214 | * @epp_policy: Last saved policy used to set EPP/EPB |
215 | * @epp_default: Power on default HWP energy performance |
216 | * preference/bias |
217 | * @epp_cached Cached HWP energy-performance preference value |
218 | * @hwp_req_cached: Cached value of the last HWP Request MSR |
219 | * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR |
220 | * @last_io_update: Last time when IO wake flag was set |
221 | * @sched_flags: Store scheduler flags for possible cross CPU update |
222 | * @hwp_boost_min: Last HWP boosted min performance |
223 | * @suspended: Whether or not the driver has been suspended. |
224 | * @hwp_notify_work: workqueue for HWP notifications. |
225 | * |
226 | * This structure stores per CPU instance data for all CPUs. |
227 | */ |
228 | struct cpudata { |
229 | int cpu; |
230 | |
231 | unsigned int policy; |
232 | struct update_util_data update_util; |
233 | bool update_util_set; |
234 | |
235 | struct pstate_data pstate; |
236 | struct vid_data vid; |
237 | |
238 | u64 last_update; |
239 | u64 last_sample_time; |
240 | u64 aperf_mperf_shift; |
241 | u64 prev_aperf; |
242 | u64 prev_mperf; |
243 | u64 prev_tsc; |
244 | u64 prev_cummulative_iowait; |
245 | struct sample sample; |
246 | int32_t min_perf_ratio; |
247 | int32_t max_perf_ratio; |
248 | #ifdef CONFIG_ACPI |
249 | struct acpi_processor_performance acpi_perf_data; |
250 | bool valid_pss_table; |
251 | #endif |
252 | unsigned int iowait_boost; |
253 | s16 epp_powersave; |
254 | s16 epp_policy; |
255 | s16 epp_default; |
256 | s16 epp_cached; |
257 | u64 hwp_req_cached; |
258 | u64 hwp_cap_cached; |
259 | u64 last_io_update; |
260 | unsigned int sched_flags; |
261 | u32 hwp_boost_min; |
262 | bool suspended; |
263 | struct delayed_work hwp_notify_work; |
264 | }; |
265 | |
266 | static struct cpudata **all_cpu_data; |
267 | |
268 | /** |
269 | * struct pstate_funcs - Per CPU model specific callbacks |
270 | * @get_max: Callback to get maximum non turbo effective P state |
271 | * @get_max_physical: Callback to get maximum non turbo physical P state |
272 | * @get_min: Callback to get minimum P state |
273 | * @get_turbo: Callback to get turbo P state |
274 | * @get_scaling: Callback to get frequency scaling factor |
275 | * @get_cpu_scaling: Get frequency scaling factor for a given cpu |
276 | * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference |
277 | * @get_val: Callback to convert P state to actual MSR write value |
278 | * @get_vid: Callback to get VID data for Atom platforms |
279 | * |
280 | * Core and Atom CPU models have different way to get P State limits. This |
281 | * structure is used to store those callbacks. |
282 | */ |
283 | struct pstate_funcs { |
284 | int (*get_max)(int cpu); |
285 | int (*get_max_physical)(int cpu); |
286 | int (*get_min)(int cpu); |
287 | int (*get_turbo)(int cpu); |
288 | int (*get_scaling)(void); |
289 | int (*get_cpu_scaling)(int cpu); |
290 | int (*get_aperf_mperf_shift)(void); |
291 | u64 (*get_val)(struct cpudata*, int pstate); |
292 | void (*get_vid)(struct cpudata *); |
293 | }; |
294 | |
295 | static struct pstate_funcs pstate_funcs __read_mostly; |
296 | |
297 | static int hwp_active __read_mostly; |
298 | static int hwp_mode_bdw __read_mostly; |
299 | static bool per_cpu_limits __read_mostly; |
300 | static bool hwp_boost __read_mostly; |
301 | static bool hwp_forced __read_mostly; |
302 | |
303 | static struct cpufreq_driver *intel_pstate_driver __read_mostly; |
304 | |
305 | #define HYBRID_SCALING_FACTOR 78741 |
306 | |
307 | static inline int core_get_scaling(void) |
308 | { |
309 | return 100000; |
310 | } |
311 | |
312 | #ifdef CONFIG_ACPI |
313 | static bool acpi_ppc; |
314 | #endif |
315 | |
316 | static struct global_params global; |
317 | |
318 | static DEFINE_MUTEX(intel_pstate_driver_lock); |
319 | static DEFINE_MUTEX(intel_pstate_limits_lock); |
320 | |
321 | #ifdef CONFIG_ACPI |
322 | |
323 | static bool intel_pstate_acpi_pm_profile_server(void) |
324 | { |
325 | if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER || |
326 | acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER) |
327 | return true; |
328 | |
329 | return false; |
330 | } |
331 | |
332 | static bool intel_pstate_get_ppc_enable_status(void) |
333 | { |
334 | if (intel_pstate_acpi_pm_profile_server()) |
335 | return true; |
336 | |
337 | return acpi_ppc; |
338 | } |
339 | |
340 | #ifdef CONFIG_ACPI_CPPC_LIB |
341 | |
342 | /* The work item is needed to avoid CPU hotplug locking issues */ |
343 | static void intel_pstste_sched_itmt_work_fn(struct work_struct *work) |
344 | { |
345 | sched_set_itmt_support(); |
346 | } |
347 | |
348 | static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn); |
349 | |
350 | #define CPPC_MAX_PERF U8_MAX |
351 | |
352 | static void intel_pstate_set_itmt_prio(int cpu) |
353 | { |
354 | struct cppc_perf_caps cppc_perf; |
355 | static u32 max_highest_perf = 0, min_highest_perf = U32_MAX; |
356 | int ret; |
357 | |
358 | ret = cppc_get_perf_caps(cpu, caps: &cppc_perf); |
359 | if (ret) |
360 | return; |
361 | |
362 | /* |
363 | * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff. |
364 | * In this case we can't use CPPC.highest_perf to enable ITMT. |
365 | * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide. |
366 | */ |
367 | if (cppc_perf.highest_perf == CPPC_MAX_PERF) |
368 | cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached)); |
369 | |
370 | /* |
371 | * The priorities can be set regardless of whether or not |
372 | * sched_set_itmt_support(true) has been called and it is valid to |
373 | * update them at any time after it has been called. |
374 | */ |
375 | sched_set_itmt_core_prio(prio: cppc_perf.highest_perf, core_cpu: cpu); |
376 | |
377 | if (max_highest_perf <= min_highest_perf) { |
378 | if (cppc_perf.highest_perf > max_highest_perf) |
379 | max_highest_perf = cppc_perf.highest_perf; |
380 | |
381 | if (cppc_perf.highest_perf < min_highest_perf) |
382 | min_highest_perf = cppc_perf.highest_perf; |
383 | |
384 | if (max_highest_perf > min_highest_perf) { |
385 | /* |
386 | * This code can be run during CPU online under the |
387 | * CPU hotplug locks, so sched_set_itmt_support() |
388 | * cannot be called from here. Queue up a work item |
389 | * to invoke it. |
390 | */ |
391 | schedule_work(work: &sched_itmt_work); |
392 | } |
393 | } |
394 | } |
395 | |
396 | static int intel_pstate_get_cppc_guaranteed(int cpu) |
397 | { |
398 | struct cppc_perf_caps cppc_perf; |
399 | int ret; |
400 | |
401 | ret = cppc_get_perf_caps(cpu, caps: &cppc_perf); |
402 | if (ret) |
403 | return ret; |
404 | |
405 | if (cppc_perf.guaranteed_perf) |
406 | return cppc_perf.guaranteed_perf; |
407 | |
408 | return cppc_perf.nominal_perf; |
409 | } |
410 | |
411 | static int intel_pstate_cppc_get_scaling(int cpu) |
412 | { |
413 | struct cppc_perf_caps cppc_perf; |
414 | int ret; |
415 | |
416 | ret = cppc_get_perf_caps(cpu, caps: &cppc_perf); |
417 | |
418 | /* |
419 | * If the nominal frequency and the nominal performance are not |
420 | * zero and the ratio between them is not 100, return the hybrid |
421 | * scaling factor. |
422 | */ |
423 | if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq && |
424 | cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq) |
425 | return HYBRID_SCALING_FACTOR; |
426 | |
427 | return core_get_scaling(); |
428 | } |
429 | |
430 | #else /* CONFIG_ACPI_CPPC_LIB */ |
431 | static inline void intel_pstate_set_itmt_prio(int cpu) |
432 | { |
433 | } |
434 | #endif /* CONFIG_ACPI_CPPC_LIB */ |
435 | |
436 | static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) |
437 | { |
438 | struct cpudata *cpu; |
439 | int ret; |
440 | int i; |
441 | |
442 | if (hwp_active) { |
443 | intel_pstate_set_itmt_prio(cpu: policy->cpu); |
444 | return; |
445 | } |
446 | |
447 | if (!intel_pstate_get_ppc_enable_status()) |
448 | return; |
449 | |
450 | cpu = all_cpu_data[policy->cpu]; |
451 | |
452 | ret = acpi_processor_register_performance(performance: &cpu->acpi_perf_data, |
453 | cpu: policy->cpu); |
454 | if (ret) |
455 | return; |
456 | |
457 | /* |
458 | * Check if the control value in _PSS is for PERF_CTL MSR, which should |
459 | * guarantee that the states returned by it map to the states in our |
460 | * list directly. |
461 | */ |
462 | if (cpu->acpi_perf_data.control_register.space_id != |
463 | ACPI_ADR_SPACE_FIXED_HARDWARE) |
464 | goto err; |
465 | |
466 | /* |
467 | * If there is only one entry _PSS, simply ignore _PSS and continue as |
468 | * usual without taking _PSS into account |
469 | */ |
470 | if (cpu->acpi_perf_data.state_count < 2) |
471 | goto err; |
472 | |
473 | pr_debug("CPU%u - ACPI _PSS perf data\n" , policy->cpu); |
474 | for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { |
475 | pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n" , |
476 | (i == cpu->acpi_perf_data.state ? '*' : ' '), i, |
477 | (u32) cpu->acpi_perf_data.states[i].core_frequency, |
478 | (u32) cpu->acpi_perf_data.states[i].power, |
479 | (u32) cpu->acpi_perf_data.states[i].control); |
480 | } |
481 | |
482 | cpu->valid_pss_table = true; |
483 | pr_debug("_PPC limits will be enforced\n" ); |
484 | |
485 | return; |
486 | |
487 | err: |
488 | cpu->valid_pss_table = false; |
489 | acpi_processor_unregister_performance(cpu: policy->cpu); |
490 | } |
491 | |
492 | static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) |
493 | { |
494 | struct cpudata *cpu; |
495 | |
496 | cpu = all_cpu_data[policy->cpu]; |
497 | if (!cpu->valid_pss_table) |
498 | return; |
499 | |
500 | acpi_processor_unregister_performance(cpu: policy->cpu); |
501 | } |
502 | #else /* CONFIG_ACPI */ |
503 | static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) |
504 | { |
505 | } |
506 | |
507 | static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) |
508 | { |
509 | } |
510 | |
511 | static inline bool intel_pstate_acpi_pm_profile_server(void) |
512 | { |
513 | return false; |
514 | } |
515 | #endif /* CONFIG_ACPI */ |
516 | |
517 | #ifndef CONFIG_ACPI_CPPC_LIB |
518 | static inline int intel_pstate_get_cppc_guaranteed(int cpu) |
519 | { |
520 | return -ENOTSUPP; |
521 | } |
522 | |
523 | static int intel_pstate_cppc_get_scaling(int cpu) |
524 | { |
525 | return core_get_scaling(); |
526 | } |
527 | #endif /* CONFIG_ACPI_CPPC_LIB */ |
528 | |
529 | /** |
530 | * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels. |
531 | * @cpu: Target CPU. |
532 | * |
533 | * On hybrid processors, HWP may expose more performance levels than there are |
534 | * P-states accessible through the PERF_CTL interface. If that happens, the |
535 | * scaling factor between HWP performance levels and CPU frequency will be less |
536 | * than the scaling factor between P-state values and CPU frequency. |
537 | * |
538 | * In that case, adjust the CPU parameters used in computations accordingly. |
539 | */ |
540 | static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) |
541 | { |
542 | int perf_ctl_max_phys = cpu->pstate.max_pstate_physical; |
543 | int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; |
544 | int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu); |
545 | int scaling = cpu->pstate.scaling; |
546 | |
547 | pr_debug("CPU%d: perf_ctl_max_phys = %d\n" , cpu->cpu, perf_ctl_max_phys); |
548 | pr_debug("CPU%d: perf_ctl_turbo = %d\n" , cpu->cpu, perf_ctl_turbo); |
549 | pr_debug("CPU%d: perf_ctl_scaling = %d\n" , cpu->cpu, perf_ctl_scaling); |
550 | pr_debug("CPU%d: HWP_CAP guaranteed = %d\n" , cpu->cpu, cpu->pstate.max_pstate); |
551 | pr_debug("CPU%d: HWP_CAP highest = %d\n" , cpu->cpu, cpu->pstate.turbo_pstate); |
552 | pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n" , cpu->cpu, scaling); |
553 | |
554 | cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling, |
555 | perf_ctl_scaling); |
556 | cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling, |
557 | perf_ctl_scaling); |
558 | |
559 | cpu->pstate.max_pstate_physical = |
560 | DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling, |
561 | scaling); |
562 | |
563 | cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; |
564 | /* |
565 | * Cast the min P-state value retrieved via pstate_funcs.get_min() to |
566 | * the effective range of HWP performance levels. |
567 | */ |
568 | cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling); |
569 | } |
570 | |
571 | static inline void update_turbo_state(void) |
572 | { |
573 | u64 misc_en; |
574 | |
575 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); |
576 | global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE; |
577 | } |
578 | |
579 | static int min_perf_pct_min(void) |
580 | { |
581 | struct cpudata *cpu = all_cpu_data[0]; |
582 | int turbo_pstate = cpu->pstate.turbo_pstate; |
583 | |
584 | return turbo_pstate ? |
585 | (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0; |
586 | } |
587 | |
588 | static s16 intel_pstate_get_epb(struct cpudata *cpu_data) |
589 | { |
590 | u64 epb; |
591 | int ret; |
592 | |
593 | if (!boot_cpu_has(X86_FEATURE_EPB)) |
594 | return -ENXIO; |
595 | |
596 | ret = rdmsrl_on_cpu(cpu: cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, q: &epb); |
597 | if (ret) |
598 | return (s16)ret; |
599 | |
600 | return (s16)(epb & 0x0f); |
601 | } |
602 | |
603 | static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data) |
604 | { |
605 | s16 epp; |
606 | |
607 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
608 | /* |
609 | * When hwp_req_data is 0, means that caller didn't read |
610 | * MSR_HWP_REQUEST, so need to read and get EPP. |
611 | */ |
612 | if (!hwp_req_data) { |
613 | epp = rdmsrl_on_cpu(cpu: cpu_data->cpu, MSR_HWP_REQUEST, |
614 | q: &hwp_req_data); |
615 | if (epp) |
616 | return epp; |
617 | } |
618 | epp = (hwp_req_data >> 24) & 0xff; |
619 | } else { |
620 | /* When there is no EPP present, HWP uses EPB settings */ |
621 | epp = intel_pstate_get_epb(cpu_data); |
622 | } |
623 | |
624 | return epp; |
625 | } |
626 | |
627 | static int intel_pstate_set_epb(int cpu, s16 pref) |
628 | { |
629 | u64 epb; |
630 | int ret; |
631 | |
632 | if (!boot_cpu_has(X86_FEATURE_EPB)) |
633 | return -ENXIO; |
634 | |
635 | ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, q: &epb); |
636 | if (ret) |
637 | return ret; |
638 | |
639 | epb = (epb & ~0x0f) | pref; |
640 | wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, q: epb); |
641 | |
642 | return 0; |
643 | } |
644 | |
645 | /* |
646 | * EPP/EPB display strings corresponding to EPP index in the |
647 | * energy_perf_strings[] |
648 | * index String |
649 | *------------------------------------- |
650 | * 0 default |
651 | * 1 performance |
652 | * 2 balance_performance |
653 | * 3 balance_power |
654 | * 4 power |
655 | */ |
656 | |
657 | enum energy_perf_value_index { |
658 | EPP_INDEX_DEFAULT = 0, |
659 | EPP_INDEX_PERFORMANCE, |
660 | EPP_INDEX_BALANCE_PERFORMANCE, |
661 | EPP_INDEX_BALANCE_POWERSAVE, |
662 | EPP_INDEX_POWERSAVE, |
663 | }; |
664 | |
665 | static const char * const energy_perf_strings[] = { |
666 | [EPP_INDEX_DEFAULT] = "default" , |
667 | [EPP_INDEX_PERFORMANCE] = "performance" , |
668 | [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance" , |
669 | [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power" , |
670 | [EPP_INDEX_POWERSAVE] = "power" , |
671 | NULL |
672 | }; |
673 | static unsigned int epp_values[] = { |
674 | [EPP_INDEX_DEFAULT] = 0, /* Unused index */ |
675 | [EPP_INDEX_PERFORMANCE] = HWP_EPP_PERFORMANCE, |
676 | [EPP_INDEX_BALANCE_PERFORMANCE] = HWP_EPP_BALANCE_PERFORMANCE, |
677 | [EPP_INDEX_BALANCE_POWERSAVE] = HWP_EPP_BALANCE_POWERSAVE, |
678 | [EPP_INDEX_POWERSAVE] = HWP_EPP_POWERSAVE, |
679 | }; |
680 | |
681 | static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp) |
682 | { |
683 | s16 epp; |
684 | int index = -EINVAL; |
685 | |
686 | *raw_epp = 0; |
687 | epp = intel_pstate_get_epp(cpu_data, hwp_req_data: 0); |
688 | if (epp < 0) |
689 | return epp; |
690 | |
691 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
692 | if (epp == epp_values[EPP_INDEX_PERFORMANCE]) |
693 | return EPP_INDEX_PERFORMANCE; |
694 | if (epp == epp_values[EPP_INDEX_BALANCE_PERFORMANCE]) |
695 | return EPP_INDEX_BALANCE_PERFORMANCE; |
696 | if (epp == epp_values[EPP_INDEX_BALANCE_POWERSAVE]) |
697 | return EPP_INDEX_BALANCE_POWERSAVE; |
698 | if (epp == epp_values[EPP_INDEX_POWERSAVE]) |
699 | return EPP_INDEX_POWERSAVE; |
700 | *raw_epp = epp; |
701 | return 0; |
702 | } else if (boot_cpu_has(X86_FEATURE_EPB)) { |
703 | /* |
704 | * Range: |
705 | * 0x00-0x03 : Performance |
706 | * 0x04-0x07 : Balance performance |
707 | * 0x08-0x0B : Balance power |
708 | * 0x0C-0x0F : Power |
709 | * The EPB is a 4 bit value, but our ranges restrict the |
710 | * value which can be set. Here only using top two bits |
711 | * effectively. |
712 | */ |
713 | index = (epp >> 2) + 1; |
714 | } |
715 | |
716 | return index; |
717 | } |
718 | |
719 | static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp) |
720 | { |
721 | int ret; |
722 | |
723 | /* |
724 | * Use the cached HWP Request MSR value, because in the active mode the |
725 | * register itself may be updated by intel_pstate_hwp_boost_up() or |
726 | * intel_pstate_hwp_boost_down() at any time. |
727 | */ |
728 | u64 value = READ_ONCE(cpu->hwp_req_cached); |
729 | |
730 | value &= ~GENMASK_ULL(31, 24); |
731 | value |= (u64)epp << 24; |
732 | /* |
733 | * The only other updater of hwp_req_cached in the active mode, |
734 | * intel_pstate_hwp_set(), is called under the same lock as this |
735 | * function, so it cannot run in parallel with the update below. |
736 | */ |
737 | WRITE_ONCE(cpu->hwp_req_cached, value); |
738 | ret = wrmsrl_on_cpu(cpu: cpu->cpu, MSR_HWP_REQUEST, q: value); |
739 | if (!ret) |
740 | cpu->epp_cached = epp; |
741 | |
742 | return ret; |
743 | } |
744 | |
745 | static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data, |
746 | int pref_index, bool use_raw, |
747 | u32 raw_epp) |
748 | { |
749 | int epp = -EINVAL; |
750 | int ret; |
751 | |
752 | if (!pref_index) |
753 | epp = cpu_data->epp_default; |
754 | |
755 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
756 | if (use_raw) |
757 | epp = raw_epp; |
758 | else if (epp == -EINVAL) |
759 | epp = epp_values[pref_index]; |
760 | |
761 | /* |
762 | * To avoid confusion, refuse to set EPP to any values different |
763 | * from 0 (performance) if the current policy is "performance", |
764 | * because those values would be overridden. |
765 | */ |
766 | if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) |
767 | return -EBUSY; |
768 | |
769 | ret = intel_pstate_set_epp(cpu: cpu_data, epp); |
770 | } else { |
771 | if (epp == -EINVAL) |
772 | epp = (pref_index - 1) << 2; |
773 | ret = intel_pstate_set_epb(cpu: cpu_data->cpu, pref: epp); |
774 | } |
775 | |
776 | return ret; |
777 | } |
778 | |
779 | static ssize_t show_energy_performance_available_preferences( |
780 | struct cpufreq_policy *policy, char *buf) |
781 | { |
782 | int i = 0; |
783 | int ret = 0; |
784 | |
785 | while (energy_perf_strings[i] != NULL) |
786 | ret += sprintf(buf: &buf[ret], fmt: "%s " , energy_perf_strings[i++]); |
787 | |
788 | ret += sprintf(buf: &buf[ret], fmt: "\n" ); |
789 | |
790 | return ret; |
791 | } |
792 | |
793 | cpufreq_freq_attr_ro(energy_performance_available_preferences); |
794 | |
795 | static struct cpufreq_driver intel_pstate; |
796 | |
797 | static ssize_t store_energy_performance_preference( |
798 | struct cpufreq_policy *policy, const char *buf, size_t count) |
799 | { |
800 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
801 | char str_preference[21]; |
802 | bool raw = false; |
803 | ssize_t ret; |
804 | u32 epp = 0; |
805 | |
806 | ret = sscanf(buf, "%20s" , str_preference); |
807 | if (ret != 1) |
808 | return -EINVAL; |
809 | |
810 | ret = match_string(array: energy_perf_strings, n: -1, string: str_preference); |
811 | if (ret < 0) { |
812 | if (!boot_cpu_has(X86_FEATURE_HWP_EPP)) |
813 | return ret; |
814 | |
815 | ret = kstrtouint(s: buf, base: 10, res: &epp); |
816 | if (ret) |
817 | return ret; |
818 | |
819 | if (epp > 255) |
820 | return -EINVAL; |
821 | |
822 | raw = true; |
823 | } |
824 | |
825 | /* |
826 | * This function runs with the policy R/W semaphore held, which |
827 | * guarantees that the driver pointer will not change while it is |
828 | * running. |
829 | */ |
830 | if (!intel_pstate_driver) |
831 | return -EAGAIN; |
832 | |
833 | mutex_lock(&intel_pstate_limits_lock); |
834 | |
835 | if (intel_pstate_driver == &intel_pstate) { |
836 | ret = intel_pstate_set_energy_pref_index(cpu_data: cpu, pref_index: ret, use_raw: raw, raw_epp: epp); |
837 | } else { |
838 | /* |
839 | * In the passive mode the governor needs to be stopped on the |
840 | * target CPU before the EPP update and restarted after it, |
841 | * which is super-heavy-weight, so make sure it is worth doing |
842 | * upfront. |
843 | */ |
844 | if (!raw) |
845 | epp = ret ? epp_values[ret] : cpu->epp_default; |
846 | |
847 | if (cpu->epp_cached != epp) { |
848 | int err; |
849 | |
850 | cpufreq_stop_governor(policy); |
851 | ret = intel_pstate_set_epp(cpu, epp); |
852 | err = cpufreq_start_governor(policy); |
853 | if (!ret) |
854 | ret = err; |
855 | } else { |
856 | ret = 0; |
857 | } |
858 | } |
859 | |
860 | mutex_unlock(lock: &intel_pstate_limits_lock); |
861 | |
862 | return ret ?: count; |
863 | } |
864 | |
865 | static ssize_t show_energy_performance_preference( |
866 | struct cpufreq_policy *policy, char *buf) |
867 | { |
868 | struct cpudata *cpu_data = all_cpu_data[policy->cpu]; |
869 | int preference, raw_epp; |
870 | |
871 | preference = intel_pstate_get_energy_pref_index(cpu_data, raw_epp: &raw_epp); |
872 | if (preference < 0) |
873 | return preference; |
874 | |
875 | if (raw_epp) |
876 | return sprintf(buf, fmt: "%d\n" , raw_epp); |
877 | else |
878 | return sprintf(buf, fmt: "%s\n" , energy_perf_strings[preference]); |
879 | } |
880 | |
881 | cpufreq_freq_attr_rw(energy_performance_preference); |
882 | |
883 | static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf) |
884 | { |
885 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
886 | int ratio, freq; |
887 | |
888 | ratio = intel_pstate_get_cppc_guaranteed(cpu: policy->cpu); |
889 | if (ratio <= 0) { |
890 | u64 cap; |
891 | |
892 | rdmsrl_on_cpu(cpu: policy->cpu, MSR_HWP_CAPABILITIES, q: &cap); |
893 | ratio = HWP_GUARANTEED_PERF(cap); |
894 | } |
895 | |
896 | freq = ratio * cpu->pstate.scaling; |
897 | if (cpu->pstate.scaling != cpu->pstate.perf_ctl_scaling) |
898 | freq = rounddown(freq, cpu->pstate.perf_ctl_scaling); |
899 | |
900 | return sprintf(buf, fmt: "%d\n" , freq); |
901 | } |
902 | |
903 | cpufreq_freq_attr_ro(base_frequency); |
904 | |
905 | static struct freq_attr *hwp_cpufreq_attrs[] = { |
906 | &energy_performance_preference, |
907 | &energy_performance_available_preferences, |
908 | &base_frequency, |
909 | NULL, |
910 | }; |
911 | |
912 | static void __intel_pstate_get_hwp_cap(struct cpudata *cpu) |
913 | { |
914 | u64 cap; |
915 | |
916 | rdmsrl_on_cpu(cpu: cpu->cpu, MSR_HWP_CAPABILITIES, q: &cap); |
917 | WRITE_ONCE(cpu->hwp_cap_cached, cap); |
918 | cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap); |
919 | cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap); |
920 | } |
921 | |
922 | static void intel_pstate_get_hwp_cap(struct cpudata *cpu) |
923 | { |
924 | int scaling = cpu->pstate.scaling; |
925 | |
926 | __intel_pstate_get_hwp_cap(cpu); |
927 | |
928 | cpu->pstate.max_freq = cpu->pstate.max_pstate * scaling; |
929 | cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling; |
930 | if (scaling != cpu->pstate.perf_ctl_scaling) { |
931 | int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; |
932 | |
933 | cpu->pstate.max_freq = rounddown(cpu->pstate.max_freq, |
934 | perf_ctl_scaling); |
935 | cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_freq, |
936 | perf_ctl_scaling); |
937 | } |
938 | } |
939 | |
940 | static void intel_pstate_hwp_set(unsigned int cpu) |
941 | { |
942 | struct cpudata *cpu_data = all_cpu_data[cpu]; |
943 | int max, min; |
944 | u64 value; |
945 | s16 epp; |
946 | |
947 | max = cpu_data->max_perf_ratio; |
948 | min = cpu_data->min_perf_ratio; |
949 | |
950 | if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) |
951 | min = max; |
952 | |
953 | rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, q: &value); |
954 | |
955 | value &= ~HWP_MIN_PERF(~0L); |
956 | value |= HWP_MIN_PERF(min); |
957 | |
958 | value &= ~HWP_MAX_PERF(~0L); |
959 | value |= HWP_MAX_PERF(max); |
960 | |
961 | if (cpu_data->epp_policy == cpu_data->policy) |
962 | goto skip_epp; |
963 | |
964 | cpu_data->epp_policy = cpu_data->policy; |
965 | |
966 | if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) { |
967 | epp = intel_pstate_get_epp(cpu_data, hwp_req_data: value); |
968 | cpu_data->epp_powersave = epp; |
969 | /* If EPP read was failed, then don't try to write */ |
970 | if (epp < 0) |
971 | goto skip_epp; |
972 | |
973 | epp = 0; |
974 | } else { |
975 | /* skip setting EPP, when saved value is invalid */ |
976 | if (cpu_data->epp_powersave < 0) |
977 | goto skip_epp; |
978 | |
979 | /* |
980 | * No need to restore EPP when it is not zero. This |
981 | * means: |
982 | * - Policy is not changed |
983 | * - user has manually changed |
984 | * - Error reading EPB |
985 | */ |
986 | epp = intel_pstate_get_epp(cpu_data, hwp_req_data: value); |
987 | if (epp) |
988 | goto skip_epp; |
989 | |
990 | epp = cpu_data->epp_powersave; |
991 | } |
992 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
993 | value &= ~GENMASK_ULL(31, 24); |
994 | value |= (u64)epp << 24; |
995 | } else { |
996 | intel_pstate_set_epb(cpu, pref: epp); |
997 | } |
998 | skip_epp: |
999 | WRITE_ONCE(cpu_data->hwp_req_cached, value); |
1000 | wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, q: value); |
1001 | } |
1002 | |
1003 | static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata); |
1004 | |
1005 | static void intel_pstate_hwp_offline(struct cpudata *cpu) |
1006 | { |
1007 | u64 value = READ_ONCE(cpu->hwp_req_cached); |
1008 | int min_perf; |
1009 | |
1010 | intel_pstate_disable_hwp_interrupt(cpudata: cpu); |
1011 | |
1012 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) { |
1013 | /* |
1014 | * In case the EPP has been set to "performance" by the |
1015 | * active mode "performance" scaling algorithm, replace that |
1016 | * temporary value with the cached EPP one. |
1017 | */ |
1018 | value &= ~GENMASK_ULL(31, 24); |
1019 | value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached); |
1020 | /* |
1021 | * However, make sure that EPP will be set to "performance" when |
1022 | * the CPU is brought back online again and the "performance" |
1023 | * scaling algorithm is still in effect. |
1024 | */ |
1025 | cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN; |
1026 | } |
1027 | |
1028 | /* |
1029 | * Clear the desired perf field in the cached HWP request value to |
1030 | * prevent nonzero desired values from being leaked into the active |
1031 | * mode. |
1032 | */ |
1033 | value &= ~HWP_DESIRED_PERF(~0L); |
1034 | WRITE_ONCE(cpu->hwp_req_cached, value); |
1035 | |
1036 | value &= ~GENMASK_ULL(31, 0); |
1037 | min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached)); |
1038 | |
1039 | /* Set hwp_max = hwp_min */ |
1040 | value |= HWP_MAX_PERF(min_perf); |
1041 | value |= HWP_MIN_PERF(min_perf); |
1042 | |
1043 | /* Set EPP to min */ |
1044 | if (boot_cpu_has(X86_FEATURE_HWP_EPP)) |
1045 | value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE); |
1046 | |
1047 | wrmsrl_on_cpu(cpu: cpu->cpu, MSR_HWP_REQUEST, q: value); |
1048 | } |
1049 | |
1050 | #define POWER_CTL_EE_ENABLE 1 |
1051 | #define POWER_CTL_EE_DISABLE 2 |
1052 | |
1053 | static int power_ctl_ee_state; |
1054 | |
1055 | static void set_power_ctl_ee_state(bool input) |
1056 | { |
1057 | u64 power_ctl; |
1058 | |
1059 | mutex_lock(&intel_pstate_driver_lock); |
1060 | rdmsrl(MSR_IA32_POWER_CTL, power_ctl); |
1061 | if (input) { |
1062 | power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE); |
1063 | power_ctl_ee_state = POWER_CTL_EE_ENABLE; |
1064 | } else { |
1065 | power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE); |
1066 | power_ctl_ee_state = POWER_CTL_EE_DISABLE; |
1067 | } |
1068 | wrmsrl(MSR_IA32_POWER_CTL, val: power_ctl); |
1069 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1070 | } |
1071 | |
1072 | static void intel_pstate_hwp_enable(struct cpudata *cpudata); |
1073 | |
1074 | static void intel_pstate_hwp_reenable(struct cpudata *cpu) |
1075 | { |
1076 | intel_pstate_hwp_enable(cpudata: cpu); |
1077 | wrmsrl_on_cpu(cpu: cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached)); |
1078 | } |
1079 | |
1080 | static int intel_pstate_suspend(struct cpufreq_policy *policy) |
1081 | { |
1082 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
1083 | |
1084 | pr_debug("CPU %d suspending\n" , cpu->cpu); |
1085 | |
1086 | cpu->suspended = true; |
1087 | |
1088 | /* disable HWP interrupt and cancel any pending work */ |
1089 | intel_pstate_disable_hwp_interrupt(cpudata: cpu); |
1090 | |
1091 | return 0; |
1092 | } |
1093 | |
1094 | static int intel_pstate_resume(struct cpufreq_policy *policy) |
1095 | { |
1096 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
1097 | |
1098 | pr_debug("CPU %d resuming\n" , cpu->cpu); |
1099 | |
1100 | /* Only restore if the system default is changed */ |
1101 | if (power_ctl_ee_state == POWER_CTL_EE_ENABLE) |
1102 | set_power_ctl_ee_state(true); |
1103 | else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE) |
1104 | set_power_ctl_ee_state(false); |
1105 | |
1106 | if (cpu->suspended && hwp_active) { |
1107 | mutex_lock(&intel_pstate_limits_lock); |
1108 | |
1109 | /* Re-enable HWP, because "online" has not done that. */ |
1110 | intel_pstate_hwp_reenable(cpu); |
1111 | |
1112 | mutex_unlock(lock: &intel_pstate_limits_lock); |
1113 | } |
1114 | |
1115 | cpu->suspended = false; |
1116 | |
1117 | return 0; |
1118 | } |
1119 | |
1120 | static void intel_pstate_update_policies(void) |
1121 | { |
1122 | int cpu; |
1123 | |
1124 | for_each_possible_cpu(cpu) |
1125 | cpufreq_update_policy(cpu); |
1126 | } |
1127 | |
1128 | static void __intel_pstate_update_max_freq(struct cpudata *cpudata, |
1129 | struct cpufreq_policy *policy) |
1130 | { |
1131 | policy->cpuinfo.max_freq = global.turbo_disabled_mf ? |
1132 | cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; |
1133 | refresh_frequency_limits(policy); |
1134 | } |
1135 | |
1136 | static void intel_pstate_update_max_freq(unsigned int cpu) |
1137 | { |
1138 | struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); |
1139 | |
1140 | if (!policy) |
1141 | return; |
1142 | |
1143 | __intel_pstate_update_max_freq(cpudata: all_cpu_data[cpu], policy); |
1144 | |
1145 | cpufreq_cpu_release(policy); |
1146 | } |
1147 | |
1148 | static void intel_pstate_update_limits(unsigned int cpu) |
1149 | { |
1150 | mutex_lock(&intel_pstate_driver_lock); |
1151 | |
1152 | update_turbo_state(); |
1153 | /* |
1154 | * If turbo has been turned on or off globally, policy limits for |
1155 | * all CPUs need to be updated to reflect that. |
1156 | */ |
1157 | if (global.turbo_disabled_mf != global.turbo_disabled) { |
1158 | global.turbo_disabled_mf = global.turbo_disabled; |
1159 | arch_set_max_freq_ratio(turbo_disabled: global.turbo_disabled); |
1160 | for_each_possible_cpu(cpu) |
1161 | intel_pstate_update_max_freq(cpu); |
1162 | } else { |
1163 | cpufreq_update_policy(cpu); |
1164 | } |
1165 | |
1166 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1167 | } |
1168 | |
1169 | /************************** sysfs begin ************************/ |
1170 | #define show_one(file_name, object) \ |
1171 | static ssize_t show_##file_name \ |
1172 | (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ |
1173 | { \ |
1174 | return sprintf(buf, "%u\n", global.object); \ |
1175 | } |
1176 | |
1177 | static ssize_t intel_pstate_show_status(char *buf); |
1178 | static int intel_pstate_update_status(const char *buf, size_t size); |
1179 | |
1180 | static ssize_t show_status(struct kobject *kobj, |
1181 | struct kobj_attribute *attr, char *buf) |
1182 | { |
1183 | ssize_t ret; |
1184 | |
1185 | mutex_lock(&intel_pstate_driver_lock); |
1186 | ret = intel_pstate_show_status(buf); |
1187 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1188 | |
1189 | return ret; |
1190 | } |
1191 | |
1192 | static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, |
1193 | const char *buf, size_t count) |
1194 | { |
1195 | char *p = memchr(p: buf, c: '\n', size: count); |
1196 | int ret; |
1197 | |
1198 | mutex_lock(&intel_pstate_driver_lock); |
1199 | ret = intel_pstate_update_status(buf, size: p ? p - buf : count); |
1200 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1201 | |
1202 | return ret < 0 ? ret : count; |
1203 | } |
1204 | |
1205 | static ssize_t show_turbo_pct(struct kobject *kobj, |
1206 | struct kobj_attribute *attr, char *buf) |
1207 | { |
1208 | struct cpudata *cpu; |
1209 | int total, no_turbo, turbo_pct; |
1210 | uint32_t turbo_fp; |
1211 | |
1212 | mutex_lock(&intel_pstate_driver_lock); |
1213 | |
1214 | if (!intel_pstate_driver) { |
1215 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1216 | return -EAGAIN; |
1217 | } |
1218 | |
1219 | cpu = all_cpu_data[0]; |
1220 | |
1221 | total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; |
1222 | no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1; |
1223 | turbo_fp = div_fp(x: no_turbo, y: total); |
1224 | turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); |
1225 | |
1226 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1227 | |
1228 | return sprintf(buf, fmt: "%u\n" , turbo_pct); |
1229 | } |
1230 | |
1231 | static ssize_t show_num_pstates(struct kobject *kobj, |
1232 | struct kobj_attribute *attr, char *buf) |
1233 | { |
1234 | struct cpudata *cpu; |
1235 | int total; |
1236 | |
1237 | mutex_lock(&intel_pstate_driver_lock); |
1238 | |
1239 | if (!intel_pstate_driver) { |
1240 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1241 | return -EAGAIN; |
1242 | } |
1243 | |
1244 | cpu = all_cpu_data[0]; |
1245 | total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; |
1246 | |
1247 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1248 | |
1249 | return sprintf(buf, fmt: "%u\n" , total); |
1250 | } |
1251 | |
1252 | static ssize_t show_no_turbo(struct kobject *kobj, |
1253 | struct kobj_attribute *attr, char *buf) |
1254 | { |
1255 | ssize_t ret; |
1256 | |
1257 | mutex_lock(&intel_pstate_driver_lock); |
1258 | |
1259 | if (!intel_pstate_driver) { |
1260 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1261 | return -EAGAIN; |
1262 | } |
1263 | |
1264 | update_turbo_state(); |
1265 | if (global.turbo_disabled) |
1266 | ret = sprintf(buf, fmt: "%u\n" , global.turbo_disabled); |
1267 | else |
1268 | ret = sprintf(buf, fmt: "%u\n" , global.no_turbo); |
1269 | |
1270 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1271 | |
1272 | return ret; |
1273 | } |
1274 | |
1275 | static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, |
1276 | const char *buf, size_t count) |
1277 | { |
1278 | unsigned int input; |
1279 | int ret; |
1280 | |
1281 | ret = sscanf(buf, "%u" , &input); |
1282 | if (ret != 1) |
1283 | return -EINVAL; |
1284 | |
1285 | mutex_lock(&intel_pstate_driver_lock); |
1286 | |
1287 | if (!intel_pstate_driver) { |
1288 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1289 | return -EAGAIN; |
1290 | } |
1291 | |
1292 | mutex_lock(&intel_pstate_limits_lock); |
1293 | |
1294 | update_turbo_state(); |
1295 | if (global.turbo_disabled) { |
1296 | pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n" ); |
1297 | mutex_unlock(lock: &intel_pstate_limits_lock); |
1298 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1299 | return -EPERM; |
1300 | } |
1301 | |
1302 | global.no_turbo = clamp_t(int, input, 0, 1); |
1303 | |
1304 | if (global.no_turbo) { |
1305 | struct cpudata *cpu = all_cpu_data[0]; |
1306 | int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; |
1307 | |
1308 | /* Squash the global minimum into the permitted range. */ |
1309 | if (global.min_perf_pct > pct) |
1310 | global.min_perf_pct = pct; |
1311 | } |
1312 | |
1313 | mutex_unlock(lock: &intel_pstate_limits_lock); |
1314 | |
1315 | intel_pstate_update_policies(); |
1316 | arch_set_max_freq_ratio(turbo_disabled: global.no_turbo); |
1317 | |
1318 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1319 | |
1320 | return count; |
1321 | } |
1322 | |
1323 | static void update_qos_request(enum freq_qos_req_type type) |
1324 | { |
1325 | struct freq_qos_request *req; |
1326 | struct cpufreq_policy *policy; |
1327 | int i; |
1328 | |
1329 | for_each_possible_cpu(i) { |
1330 | struct cpudata *cpu = all_cpu_data[i]; |
1331 | unsigned int freq, perf_pct; |
1332 | |
1333 | policy = cpufreq_cpu_get(cpu: i); |
1334 | if (!policy) |
1335 | continue; |
1336 | |
1337 | req = policy->driver_data; |
1338 | cpufreq_cpu_put(policy); |
1339 | |
1340 | if (!req) |
1341 | continue; |
1342 | |
1343 | if (hwp_active) |
1344 | intel_pstate_get_hwp_cap(cpu); |
1345 | |
1346 | if (type == FREQ_QOS_MIN) { |
1347 | perf_pct = global.min_perf_pct; |
1348 | } else { |
1349 | req++; |
1350 | perf_pct = global.max_perf_pct; |
1351 | } |
1352 | |
1353 | freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100); |
1354 | |
1355 | if (freq_qos_update_request(req, new_value: freq) < 0) |
1356 | pr_warn("Failed to update freq constraint: CPU%d\n" , i); |
1357 | } |
1358 | } |
1359 | |
1360 | static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, |
1361 | const char *buf, size_t count) |
1362 | { |
1363 | unsigned int input; |
1364 | int ret; |
1365 | |
1366 | ret = sscanf(buf, "%u" , &input); |
1367 | if (ret != 1) |
1368 | return -EINVAL; |
1369 | |
1370 | mutex_lock(&intel_pstate_driver_lock); |
1371 | |
1372 | if (!intel_pstate_driver) { |
1373 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1374 | return -EAGAIN; |
1375 | } |
1376 | |
1377 | mutex_lock(&intel_pstate_limits_lock); |
1378 | |
1379 | global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100); |
1380 | |
1381 | mutex_unlock(lock: &intel_pstate_limits_lock); |
1382 | |
1383 | if (intel_pstate_driver == &intel_pstate) |
1384 | intel_pstate_update_policies(); |
1385 | else |
1386 | update_qos_request(type: FREQ_QOS_MAX); |
1387 | |
1388 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1389 | |
1390 | return count; |
1391 | } |
1392 | |
1393 | static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, |
1394 | const char *buf, size_t count) |
1395 | { |
1396 | unsigned int input; |
1397 | int ret; |
1398 | |
1399 | ret = sscanf(buf, "%u" , &input); |
1400 | if (ret != 1) |
1401 | return -EINVAL; |
1402 | |
1403 | mutex_lock(&intel_pstate_driver_lock); |
1404 | |
1405 | if (!intel_pstate_driver) { |
1406 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1407 | return -EAGAIN; |
1408 | } |
1409 | |
1410 | mutex_lock(&intel_pstate_limits_lock); |
1411 | |
1412 | global.min_perf_pct = clamp_t(int, input, |
1413 | min_perf_pct_min(), global.max_perf_pct); |
1414 | |
1415 | mutex_unlock(lock: &intel_pstate_limits_lock); |
1416 | |
1417 | if (intel_pstate_driver == &intel_pstate) |
1418 | intel_pstate_update_policies(); |
1419 | else |
1420 | update_qos_request(type: FREQ_QOS_MIN); |
1421 | |
1422 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1423 | |
1424 | return count; |
1425 | } |
1426 | |
1427 | static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, |
1428 | struct kobj_attribute *attr, char *buf) |
1429 | { |
1430 | return sprintf(buf, fmt: "%u\n" , hwp_boost); |
1431 | } |
1432 | |
1433 | static ssize_t store_hwp_dynamic_boost(struct kobject *a, |
1434 | struct kobj_attribute *b, |
1435 | const char *buf, size_t count) |
1436 | { |
1437 | unsigned int input; |
1438 | int ret; |
1439 | |
1440 | ret = kstrtouint(s: buf, base: 10, res: &input); |
1441 | if (ret) |
1442 | return ret; |
1443 | |
1444 | mutex_lock(&intel_pstate_driver_lock); |
1445 | hwp_boost = !!input; |
1446 | intel_pstate_update_policies(); |
1447 | mutex_unlock(lock: &intel_pstate_driver_lock); |
1448 | |
1449 | return count; |
1450 | } |
1451 | |
1452 | static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr, |
1453 | char *buf) |
1454 | { |
1455 | u64 power_ctl; |
1456 | int enable; |
1457 | |
1458 | rdmsrl(MSR_IA32_POWER_CTL, power_ctl); |
1459 | enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE)); |
1460 | return sprintf(buf, fmt: "%d\n" , !enable); |
1461 | } |
1462 | |
1463 | static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b, |
1464 | const char *buf, size_t count) |
1465 | { |
1466 | bool input; |
1467 | int ret; |
1468 | |
1469 | ret = kstrtobool(s: buf, res: &input); |
1470 | if (ret) |
1471 | return ret; |
1472 | |
1473 | set_power_ctl_ee_state(input); |
1474 | |
1475 | return count; |
1476 | } |
1477 | |
1478 | show_one(max_perf_pct, max_perf_pct); |
1479 | show_one(min_perf_pct, min_perf_pct); |
1480 | |
1481 | define_one_global_rw(status); |
1482 | define_one_global_rw(no_turbo); |
1483 | define_one_global_rw(max_perf_pct); |
1484 | define_one_global_rw(min_perf_pct); |
1485 | define_one_global_ro(turbo_pct); |
1486 | define_one_global_ro(num_pstates); |
1487 | define_one_global_rw(hwp_dynamic_boost); |
1488 | define_one_global_rw(energy_efficiency); |
1489 | |
1490 | static struct attribute *intel_pstate_attributes[] = { |
1491 | &status.attr, |
1492 | &no_turbo.attr, |
1493 | NULL |
1494 | }; |
1495 | |
1496 | static const struct attribute_group intel_pstate_attr_group = { |
1497 | .attrs = intel_pstate_attributes, |
1498 | }; |
1499 | |
1500 | static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[]; |
1501 | |
1502 | static struct kobject *intel_pstate_kobject; |
1503 | |
1504 | static void __init intel_pstate_sysfs_expose_params(void) |
1505 | { |
1506 | struct device *dev_root = bus_get_dev_root(bus: &cpu_subsys); |
1507 | int rc; |
1508 | |
1509 | if (dev_root) { |
1510 | intel_pstate_kobject = kobject_create_and_add(name: "intel_pstate" , parent: &dev_root->kobj); |
1511 | put_device(dev: dev_root); |
1512 | } |
1513 | if (WARN_ON(!intel_pstate_kobject)) |
1514 | return; |
1515 | |
1516 | rc = sysfs_create_group(kobj: intel_pstate_kobject, grp: &intel_pstate_attr_group); |
1517 | if (WARN_ON(rc)) |
1518 | return; |
1519 | |
1520 | if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { |
1521 | rc = sysfs_create_file(kobj: intel_pstate_kobject, attr: &turbo_pct.attr); |
1522 | WARN_ON(rc); |
1523 | |
1524 | rc = sysfs_create_file(kobj: intel_pstate_kobject, attr: &num_pstates.attr); |
1525 | WARN_ON(rc); |
1526 | } |
1527 | |
1528 | /* |
1529 | * If per cpu limits are enforced there are no global limits, so |
1530 | * return without creating max/min_perf_pct attributes |
1531 | */ |
1532 | if (per_cpu_limits) |
1533 | return; |
1534 | |
1535 | rc = sysfs_create_file(kobj: intel_pstate_kobject, attr: &max_perf_pct.attr); |
1536 | WARN_ON(rc); |
1537 | |
1538 | rc = sysfs_create_file(kobj: intel_pstate_kobject, attr: &min_perf_pct.attr); |
1539 | WARN_ON(rc); |
1540 | |
1541 | if (x86_match_cpu(match: intel_pstate_cpu_ee_disable_ids)) { |
1542 | rc = sysfs_create_file(kobj: intel_pstate_kobject, attr: &energy_efficiency.attr); |
1543 | WARN_ON(rc); |
1544 | } |
1545 | } |
1546 | |
1547 | static void __init intel_pstate_sysfs_remove(void) |
1548 | { |
1549 | if (!intel_pstate_kobject) |
1550 | return; |
1551 | |
1552 | sysfs_remove_group(kobj: intel_pstate_kobject, grp: &intel_pstate_attr_group); |
1553 | |
1554 | if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { |
1555 | sysfs_remove_file(kobj: intel_pstate_kobject, attr: &num_pstates.attr); |
1556 | sysfs_remove_file(kobj: intel_pstate_kobject, attr: &turbo_pct.attr); |
1557 | } |
1558 | |
1559 | if (!per_cpu_limits) { |
1560 | sysfs_remove_file(kobj: intel_pstate_kobject, attr: &max_perf_pct.attr); |
1561 | sysfs_remove_file(kobj: intel_pstate_kobject, attr: &min_perf_pct.attr); |
1562 | |
1563 | if (x86_match_cpu(match: intel_pstate_cpu_ee_disable_ids)) |
1564 | sysfs_remove_file(kobj: intel_pstate_kobject, attr: &energy_efficiency.attr); |
1565 | } |
1566 | |
1567 | kobject_put(kobj: intel_pstate_kobject); |
1568 | } |
1569 | |
1570 | static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void) |
1571 | { |
1572 | int rc; |
1573 | |
1574 | if (!hwp_active) |
1575 | return; |
1576 | |
1577 | rc = sysfs_create_file(kobj: intel_pstate_kobject, attr: &hwp_dynamic_boost.attr); |
1578 | WARN_ON_ONCE(rc); |
1579 | } |
1580 | |
1581 | static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void) |
1582 | { |
1583 | if (!hwp_active) |
1584 | return; |
1585 | |
1586 | sysfs_remove_file(kobj: intel_pstate_kobject, attr: &hwp_dynamic_boost.attr); |
1587 | } |
1588 | |
1589 | /************************** sysfs end ************************/ |
1590 | |
1591 | static void intel_pstate_notify_work(struct work_struct *work) |
1592 | { |
1593 | struct cpudata *cpudata = |
1594 | container_of(to_delayed_work(work), struct cpudata, hwp_notify_work); |
1595 | struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu: cpudata->cpu); |
1596 | |
1597 | if (policy) { |
1598 | intel_pstate_get_hwp_cap(cpu: cpudata); |
1599 | __intel_pstate_update_max_freq(cpudata, policy); |
1600 | |
1601 | cpufreq_cpu_release(policy); |
1602 | } |
1603 | |
1604 | wrmsrl_on_cpu(cpu: cpudata->cpu, MSR_HWP_STATUS, q: 0); |
1605 | } |
1606 | |
1607 | static DEFINE_SPINLOCK(hwp_notify_lock); |
1608 | static cpumask_t hwp_intr_enable_mask; |
1609 | |
1610 | void notify_hwp_interrupt(void) |
1611 | { |
1612 | unsigned int this_cpu = smp_processor_id(); |
1613 | struct cpudata *cpudata; |
1614 | unsigned long flags; |
1615 | u64 value; |
1616 | |
1617 | if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) |
1618 | return; |
1619 | |
1620 | rdmsrl_safe(MSR_HWP_STATUS, p: &value); |
1621 | if (!(value & 0x01)) |
1622 | return; |
1623 | |
1624 | spin_lock_irqsave(&hwp_notify_lock, flags); |
1625 | |
1626 | if (!cpumask_test_cpu(cpu: this_cpu, cpumask: &hwp_intr_enable_mask)) |
1627 | goto ack_intr; |
1628 | |
1629 | /* |
1630 | * Currently we never free all_cpu_data. And we can't reach here |
1631 | * without this allocated. But for safety for future changes, added |
1632 | * check. |
1633 | */ |
1634 | if (unlikely(!READ_ONCE(all_cpu_data))) |
1635 | goto ack_intr; |
1636 | |
1637 | /* |
1638 | * The free is done during cleanup, when cpufreq registry is failed. |
1639 | * We wouldn't be here if it fails on init or switch status. But for |
1640 | * future changes, added check. |
1641 | */ |
1642 | cpudata = READ_ONCE(all_cpu_data[this_cpu]); |
1643 | if (unlikely(!cpudata)) |
1644 | goto ack_intr; |
1645 | |
1646 | schedule_delayed_work(dwork: &cpudata->hwp_notify_work, delay: msecs_to_jiffies(m: 10)); |
1647 | |
1648 | spin_unlock_irqrestore(lock: &hwp_notify_lock, flags); |
1649 | |
1650 | return; |
1651 | |
1652 | ack_intr: |
1653 | wrmsrl_safe(MSR_HWP_STATUS, val: 0); |
1654 | spin_unlock_irqrestore(lock: &hwp_notify_lock, flags); |
1655 | } |
1656 | |
1657 | static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) |
1658 | { |
1659 | unsigned long flags; |
1660 | |
1661 | if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) |
1662 | return; |
1663 | |
1664 | /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ |
1665 | wrmsrl_on_cpu(cpu: cpudata->cpu, MSR_HWP_INTERRUPT, q: 0x00); |
1666 | |
1667 | spin_lock_irqsave(&hwp_notify_lock, flags); |
1668 | if (cpumask_test_and_clear_cpu(cpu: cpudata->cpu, cpumask: &hwp_intr_enable_mask)) |
1669 | cancel_delayed_work(dwork: &cpudata->hwp_notify_work); |
1670 | spin_unlock_irqrestore(lock: &hwp_notify_lock, flags); |
1671 | } |
1672 | |
1673 | static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) |
1674 | { |
1675 | /* Enable HWP notification interrupt for guaranteed performance change */ |
1676 | if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { |
1677 | unsigned long flags; |
1678 | |
1679 | spin_lock_irqsave(&hwp_notify_lock, flags); |
1680 | INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); |
1681 | cpumask_set_cpu(cpu: cpudata->cpu, dstp: &hwp_intr_enable_mask); |
1682 | spin_unlock_irqrestore(lock: &hwp_notify_lock, flags); |
1683 | |
1684 | /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ |
1685 | wrmsrl_on_cpu(cpu: cpudata->cpu, MSR_HWP_INTERRUPT, q: 0x01); |
1686 | wrmsrl_on_cpu(cpu: cpudata->cpu, MSR_HWP_STATUS, q: 0); |
1687 | } |
1688 | } |
1689 | |
1690 | static void intel_pstate_update_epp_defaults(struct cpudata *cpudata) |
1691 | { |
1692 | cpudata->epp_default = intel_pstate_get_epp(cpu_data: cpudata, hwp_req_data: 0); |
1693 | |
1694 | /* |
1695 | * If this CPU gen doesn't call for change in balance_perf |
1696 | * EPP return. |
1697 | */ |
1698 | if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) |
1699 | return; |
1700 | |
1701 | /* |
1702 | * If the EPP is set by firmware, which means that firmware enabled HWP |
1703 | * - Is equal or less than 0x80 (default balance_perf EPP) |
1704 | * - But less performance oriented than performance EPP |
1705 | * then use this as new balance_perf EPP. |
1706 | */ |
1707 | if (hwp_forced && cpudata->epp_default <= HWP_EPP_BALANCE_PERFORMANCE && |
1708 | cpudata->epp_default > HWP_EPP_PERFORMANCE) { |
1709 | epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = cpudata->epp_default; |
1710 | return; |
1711 | } |
1712 | |
1713 | /* |
1714 | * Use hard coded value per gen to update the balance_perf |
1715 | * and default EPP. |
1716 | */ |
1717 | cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE]; |
1718 | intel_pstate_set_epp(cpu: cpudata, epp: cpudata->epp_default); |
1719 | } |
1720 | |
1721 | static void intel_pstate_hwp_enable(struct cpudata *cpudata) |
1722 | { |
1723 | /* First disable HWP notification interrupt till we activate again */ |
1724 | if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) |
1725 | wrmsrl_on_cpu(cpu: cpudata->cpu, MSR_HWP_INTERRUPT, q: 0x00); |
1726 | |
1727 | wrmsrl_on_cpu(cpu: cpudata->cpu, MSR_PM_ENABLE, q: 0x1); |
1728 | |
1729 | intel_pstate_enable_hwp_interrupt(cpudata); |
1730 | |
1731 | if (cpudata->epp_default >= 0) |
1732 | return; |
1733 | |
1734 | intel_pstate_update_epp_defaults(cpudata); |
1735 | } |
1736 | |
1737 | static int atom_get_min_pstate(int not_used) |
1738 | { |
1739 | u64 value; |
1740 | |
1741 | rdmsrl(MSR_ATOM_CORE_RATIOS, value); |
1742 | return (value >> 8) & 0x7F; |
1743 | } |
1744 | |
1745 | static int atom_get_max_pstate(int not_used) |
1746 | { |
1747 | u64 value; |
1748 | |
1749 | rdmsrl(MSR_ATOM_CORE_RATIOS, value); |
1750 | return (value >> 16) & 0x7F; |
1751 | } |
1752 | |
1753 | static int atom_get_turbo_pstate(int not_used) |
1754 | { |
1755 | u64 value; |
1756 | |
1757 | rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value); |
1758 | return value & 0x7F; |
1759 | } |
1760 | |
1761 | static u64 atom_get_val(struct cpudata *cpudata, int pstate) |
1762 | { |
1763 | u64 val; |
1764 | int32_t vid_fp; |
1765 | u32 vid; |
1766 | |
1767 | val = (u64)pstate << 8; |
1768 | if (global.no_turbo && !global.turbo_disabled) |
1769 | val |= (u64)1 << 32; |
1770 | |
1771 | vid_fp = cpudata->vid.min + mul_fp( |
1772 | int_tofp(pstate - cpudata->pstate.min_pstate), |
1773 | y: cpudata->vid.ratio); |
1774 | |
1775 | vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); |
1776 | vid = ceiling_fp(x: vid_fp); |
1777 | |
1778 | if (pstate > cpudata->pstate.max_pstate) |
1779 | vid = cpudata->vid.turbo; |
1780 | |
1781 | return val | vid; |
1782 | } |
1783 | |
1784 | static int silvermont_get_scaling(void) |
1785 | { |
1786 | u64 value; |
1787 | int i; |
1788 | /* Defined in Table 35-6 from SDM (Sept 2015) */ |
1789 | static int silvermont_freq_table[] = { |
1790 | 83300, 100000, 133300, 116700, 80000}; |
1791 | |
1792 | rdmsrl(MSR_FSB_FREQ, value); |
1793 | i = value & 0x7; |
1794 | WARN_ON(i > 4); |
1795 | |
1796 | return silvermont_freq_table[i]; |
1797 | } |
1798 | |
1799 | static int airmont_get_scaling(void) |
1800 | { |
1801 | u64 value; |
1802 | int i; |
1803 | /* Defined in Table 35-10 from SDM (Sept 2015) */ |
1804 | static int airmont_freq_table[] = { |
1805 | 83300, 100000, 133300, 116700, 80000, |
1806 | 93300, 90000, 88900, 87500}; |
1807 | |
1808 | rdmsrl(MSR_FSB_FREQ, value); |
1809 | i = value & 0xF; |
1810 | WARN_ON(i > 8); |
1811 | |
1812 | return airmont_freq_table[i]; |
1813 | } |
1814 | |
1815 | static void atom_get_vid(struct cpudata *cpudata) |
1816 | { |
1817 | u64 value; |
1818 | |
1819 | rdmsrl(MSR_ATOM_CORE_VIDS, value); |
1820 | cpudata->vid.min = int_tofp((value >> 8) & 0x7f); |
1821 | cpudata->vid.max = int_tofp((value >> 16) & 0x7f); |
1822 | cpudata->vid.ratio = div_fp( |
1823 | x: cpudata->vid.max - cpudata->vid.min, |
1824 | int_tofp(cpudata->pstate.max_pstate - |
1825 | cpudata->pstate.min_pstate)); |
1826 | |
1827 | rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value); |
1828 | cpudata->vid.turbo = value & 0x7f; |
1829 | } |
1830 | |
1831 | static int core_get_min_pstate(int cpu) |
1832 | { |
1833 | u64 value; |
1834 | |
1835 | rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, q: &value); |
1836 | return (value >> 40) & 0xFF; |
1837 | } |
1838 | |
1839 | static int core_get_max_pstate_physical(int cpu) |
1840 | { |
1841 | u64 value; |
1842 | |
1843 | rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, q: &value); |
1844 | return (value >> 8) & 0xFF; |
1845 | } |
1846 | |
1847 | static int core_get_tdp_ratio(int cpu, u64 plat_info) |
1848 | { |
1849 | /* Check how many TDP levels present */ |
1850 | if (plat_info & 0x600000000) { |
1851 | u64 tdp_ctrl; |
1852 | u64 tdp_ratio; |
1853 | int tdp_msr; |
1854 | int err; |
1855 | |
1856 | /* Get the TDP level (0, 1, 2) to get ratios */ |
1857 | err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, q: &tdp_ctrl); |
1858 | if (err) |
1859 | return err; |
1860 | |
1861 | /* TDP MSR are continuous starting at 0x648 */ |
1862 | tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); |
1863 | err = rdmsrl_safe_on_cpu(cpu, msr_no: tdp_msr, q: &tdp_ratio); |
1864 | if (err) |
1865 | return err; |
1866 | |
1867 | /* For level 1 and 2, bits[23:16] contain the ratio */ |
1868 | if (tdp_ctrl & 0x03) |
1869 | tdp_ratio >>= 16; |
1870 | |
1871 | tdp_ratio &= 0xff; /* ratios are only 8 bits long */ |
1872 | pr_debug("tdp_ratio %x\n" , (int)tdp_ratio); |
1873 | |
1874 | return (int)tdp_ratio; |
1875 | } |
1876 | |
1877 | return -ENXIO; |
1878 | } |
1879 | |
1880 | static int core_get_max_pstate(int cpu) |
1881 | { |
1882 | u64 tar; |
1883 | u64 plat_info; |
1884 | int max_pstate; |
1885 | int tdp_ratio; |
1886 | int err; |
1887 | |
1888 | rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, q: &plat_info); |
1889 | max_pstate = (plat_info >> 8) & 0xFF; |
1890 | |
1891 | tdp_ratio = core_get_tdp_ratio(cpu, plat_info); |
1892 | if (tdp_ratio <= 0) |
1893 | return max_pstate; |
1894 | |
1895 | if (hwp_active) { |
1896 | /* Turbo activation ratio is not used on HWP platforms */ |
1897 | return tdp_ratio; |
1898 | } |
1899 | |
1900 | err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, q: &tar); |
1901 | if (!err) { |
1902 | int tar_levels; |
1903 | |
1904 | /* Do some sanity checking for safety */ |
1905 | tar_levels = tar & 0xff; |
1906 | if (tdp_ratio - 1 == tar_levels) { |
1907 | max_pstate = tar_levels; |
1908 | pr_debug("max_pstate=TAC %x\n" , max_pstate); |
1909 | } |
1910 | } |
1911 | |
1912 | return max_pstate; |
1913 | } |
1914 | |
1915 | static int core_get_turbo_pstate(int cpu) |
1916 | { |
1917 | u64 value; |
1918 | int nont, ret; |
1919 | |
1920 | rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, q: &value); |
1921 | nont = core_get_max_pstate(cpu); |
1922 | ret = (value) & 255; |
1923 | if (ret <= nont) |
1924 | ret = nont; |
1925 | return ret; |
1926 | } |
1927 | |
1928 | static u64 core_get_val(struct cpudata *cpudata, int pstate) |
1929 | { |
1930 | u64 val; |
1931 | |
1932 | val = (u64)pstate << 8; |
1933 | if (global.no_turbo && !global.turbo_disabled) |
1934 | val |= (u64)1 << 32; |
1935 | |
1936 | return val; |
1937 | } |
1938 | |
1939 | static int knl_get_aperf_mperf_shift(void) |
1940 | { |
1941 | return 10; |
1942 | } |
1943 | |
1944 | static int knl_get_turbo_pstate(int cpu) |
1945 | { |
1946 | u64 value; |
1947 | int nont, ret; |
1948 | |
1949 | rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, q: &value); |
1950 | nont = core_get_max_pstate(cpu); |
1951 | ret = (((value) >> 8) & 0xFF); |
1952 | if (ret <= nont) |
1953 | ret = nont; |
1954 | return ret; |
1955 | } |
1956 | |
1957 | static void hybrid_get_type(void *data) |
1958 | { |
1959 | u8 *cpu_type = data; |
1960 | |
1961 | *cpu_type = get_this_hybrid_cpu_type(); |
1962 | } |
1963 | |
1964 | static int hwp_get_cpu_scaling(int cpu) |
1965 | { |
1966 | u8 cpu_type = 0; |
1967 | |
1968 | smp_call_function_single(cpuid: cpu, func: hybrid_get_type, info: &cpu_type, wait: 1); |
1969 | /* P-cores have a smaller perf level-to-freqency scaling factor. */ |
1970 | if (cpu_type == 0x40) |
1971 | return HYBRID_SCALING_FACTOR; |
1972 | |
1973 | /* Use default core scaling for E-cores */ |
1974 | if (cpu_type == 0x20) |
1975 | return core_get_scaling(); |
1976 | |
1977 | /* |
1978 | * If reached here, this system is either non-hybrid (like Tiger |
1979 | * Lake) or hybrid-capable (like Alder Lake or Raptor Lake) with |
1980 | * no E cores (in which case CPUID for hybrid support is 0). |
1981 | * |
1982 | * The CPPC nominal_frequency field is 0 for non-hybrid systems, |
1983 | * so the default core scaling will be used for them. |
1984 | */ |
1985 | return intel_pstate_cppc_get_scaling(cpu); |
1986 | } |
1987 | |
1988 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) |
1989 | { |
1990 | trace_cpu_frequency(frequency: pstate * cpu->pstate.scaling, cpu_id: cpu->cpu); |
1991 | cpu->pstate.current_pstate = pstate; |
1992 | /* |
1993 | * Generally, there is no guarantee that this code will always run on |
1994 | * the CPU being updated, so force the register update to run on the |
1995 | * right CPU. |
1996 | */ |
1997 | wrmsrl_on_cpu(cpu: cpu->cpu, MSR_IA32_PERF_CTL, |
1998 | q: pstate_funcs.get_val(cpu, pstate)); |
1999 | } |
2000 | |
2001 | static void intel_pstate_set_min_pstate(struct cpudata *cpu) |
2002 | { |
2003 | intel_pstate_set_pstate(cpu, pstate: cpu->pstate.min_pstate); |
2004 | } |
2005 | |
2006 | static void intel_pstate_max_within_limits(struct cpudata *cpu) |
2007 | { |
2008 | int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); |
2009 | |
2010 | update_turbo_state(); |
2011 | intel_pstate_set_pstate(cpu, pstate); |
2012 | } |
2013 | |
2014 | static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) |
2015 | { |
2016 | int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu); |
2017 | int perf_ctl_scaling = pstate_funcs.get_scaling(); |
2018 | |
2019 | cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu); |
2020 | cpu->pstate.max_pstate_physical = perf_ctl_max_phys; |
2021 | cpu->pstate.perf_ctl_scaling = perf_ctl_scaling; |
2022 | |
2023 | if (hwp_active && !hwp_mode_bdw) { |
2024 | __intel_pstate_get_hwp_cap(cpu); |
2025 | |
2026 | if (pstate_funcs.get_cpu_scaling) { |
2027 | cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu); |
2028 | if (cpu->pstate.scaling != perf_ctl_scaling) |
2029 | intel_pstate_hybrid_hwp_adjust(cpu); |
2030 | } else { |
2031 | cpu->pstate.scaling = perf_ctl_scaling; |
2032 | } |
2033 | } else { |
2034 | cpu->pstate.scaling = perf_ctl_scaling; |
2035 | cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu); |
2036 | cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(cpu->cpu); |
2037 | } |
2038 | |
2039 | if (cpu->pstate.scaling == perf_ctl_scaling) { |
2040 | cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling; |
2041 | cpu->pstate.max_freq = cpu->pstate.max_pstate * perf_ctl_scaling; |
2042 | cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * perf_ctl_scaling; |
2043 | } |
2044 | |
2045 | if (pstate_funcs.get_aperf_mperf_shift) |
2046 | cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); |
2047 | |
2048 | if (pstate_funcs.get_vid) |
2049 | pstate_funcs.get_vid(cpu); |
2050 | |
2051 | intel_pstate_set_min_pstate(cpu); |
2052 | } |
2053 | |
2054 | /* |
2055 | * Long hold time will keep high perf limits for long time, |
2056 | * which negatively impacts perf/watt for some workloads, |
2057 | * like specpower. 3ms is based on experiements on some |
2058 | * workoads. |
2059 | */ |
2060 | static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC; |
2061 | |
2062 | static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu) |
2063 | { |
2064 | u64 hwp_req = READ_ONCE(cpu->hwp_req_cached); |
2065 | u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); |
2066 | u32 max_limit = (hwp_req & 0xff00) >> 8; |
2067 | u32 min_limit = (hwp_req & 0xff); |
2068 | u32 boost_level1; |
2069 | |
2070 | /* |
2071 | * Cases to consider (User changes via sysfs or boot time): |
2072 | * If, P0 (Turbo max) = P1 (Guaranteed max) = min: |
2073 | * No boost, return. |
2074 | * If, P0 (Turbo max) > P1 (Guaranteed max) = min: |
2075 | * Should result in one level boost only for P0. |
2076 | * If, P0 (Turbo max) = P1 (Guaranteed max) > min: |
2077 | * Should result in two level boost: |
2078 | * (min + p1)/2 and P1. |
2079 | * If, P0 (Turbo max) > P1 (Guaranteed max) > min: |
2080 | * Should result in three level boost: |
2081 | * (min + p1)/2, P1 and P0. |
2082 | */ |
2083 | |
2084 | /* If max and min are equal or already at max, nothing to boost */ |
2085 | if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit) |
2086 | return; |
2087 | |
2088 | if (!cpu->hwp_boost_min) |
2089 | cpu->hwp_boost_min = min_limit; |
2090 | |
2091 | /* level at half way mark between min and guranteed */ |
2092 | boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1; |
2093 | |
2094 | if (cpu->hwp_boost_min < boost_level1) |
2095 | cpu->hwp_boost_min = boost_level1; |
2096 | else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap)) |
2097 | cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap); |
2098 | else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) && |
2099 | max_limit != HWP_GUARANTEED_PERF(hwp_cap)) |
2100 | cpu->hwp_boost_min = max_limit; |
2101 | else |
2102 | return; |
2103 | |
2104 | hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min; |
2105 | wrmsrl(MSR_HWP_REQUEST, val: hwp_req); |
2106 | cpu->last_update = cpu->sample.time; |
2107 | } |
2108 | |
2109 | static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu) |
2110 | { |
2111 | if (cpu->hwp_boost_min) { |
2112 | bool expired; |
2113 | |
2114 | /* Check if we are idle for hold time to boost down */ |
2115 | expired = time_after64(cpu->sample.time, cpu->last_update + |
2116 | hwp_boost_hold_time_ns); |
2117 | if (expired) { |
2118 | wrmsrl(MSR_HWP_REQUEST, val: cpu->hwp_req_cached); |
2119 | cpu->hwp_boost_min = 0; |
2120 | } |
2121 | } |
2122 | cpu->last_update = cpu->sample.time; |
2123 | } |
2124 | |
2125 | static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu, |
2126 | u64 time) |
2127 | { |
2128 | cpu->sample.time = time; |
2129 | |
2130 | if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) { |
2131 | bool do_io = false; |
2132 | |
2133 | cpu->sched_flags = 0; |
2134 | /* |
2135 | * Set iowait_boost flag and update time. Since IO WAIT flag |
2136 | * is set all the time, we can't just conclude that there is |
2137 | * some IO bound activity is scheduled on this CPU with just |
2138 | * one occurrence. If we receive at least two in two |
2139 | * consecutive ticks, then we treat as boost candidate. |
2140 | */ |
2141 | if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC)) |
2142 | do_io = true; |
2143 | |
2144 | cpu->last_io_update = time; |
2145 | |
2146 | if (do_io) |
2147 | intel_pstate_hwp_boost_up(cpu); |
2148 | |
2149 | } else { |
2150 | intel_pstate_hwp_boost_down(cpu); |
2151 | } |
2152 | } |
2153 | |
2154 | static inline void intel_pstate_update_util_hwp(struct update_util_data *data, |
2155 | u64 time, unsigned int flags) |
2156 | { |
2157 | struct cpudata *cpu = container_of(data, struct cpudata, update_util); |
2158 | |
2159 | cpu->sched_flags |= flags; |
2160 | |
2161 | if (smp_processor_id() == cpu->cpu) |
2162 | intel_pstate_update_util_hwp_local(cpu, time); |
2163 | } |
2164 | |
2165 | static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu) |
2166 | { |
2167 | struct sample *sample = &cpu->sample; |
2168 | |
2169 | sample->core_avg_perf = div_ext_fp(x: sample->aperf, y: sample->mperf); |
2170 | } |
2171 | |
2172 | static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) |
2173 | { |
2174 | u64 aperf, mperf; |
2175 | unsigned long flags; |
2176 | u64 tsc; |
2177 | |
2178 | local_irq_save(flags); |
2179 | rdmsrl(MSR_IA32_APERF, aperf); |
2180 | rdmsrl(MSR_IA32_MPERF, mperf); |
2181 | tsc = rdtsc(); |
2182 | if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) { |
2183 | local_irq_restore(flags); |
2184 | return false; |
2185 | } |
2186 | local_irq_restore(flags); |
2187 | |
2188 | cpu->last_sample_time = cpu->sample.time; |
2189 | cpu->sample.time = time; |
2190 | cpu->sample.aperf = aperf; |
2191 | cpu->sample.mperf = mperf; |
2192 | cpu->sample.tsc = tsc; |
2193 | cpu->sample.aperf -= cpu->prev_aperf; |
2194 | cpu->sample.mperf -= cpu->prev_mperf; |
2195 | cpu->sample.tsc -= cpu->prev_tsc; |
2196 | |
2197 | cpu->prev_aperf = aperf; |
2198 | cpu->prev_mperf = mperf; |
2199 | cpu->prev_tsc = tsc; |
2200 | /* |
2201 | * First time this function is invoked in a given cycle, all of the |
2202 | * previous sample data fields are equal to zero or stale and they must |
2203 | * be populated with meaningful numbers for things to work, so assume |
2204 | * that sample.time will always be reset before setting the utilization |
2205 | * update hook and make the caller skip the sample then. |
2206 | */ |
2207 | if (cpu->last_sample_time) { |
2208 | intel_pstate_calc_avg_perf(cpu); |
2209 | return true; |
2210 | } |
2211 | return false; |
2212 | } |
2213 | |
2214 | static inline int32_t get_avg_frequency(struct cpudata *cpu) |
2215 | { |
2216 | return mul_ext_fp(x: cpu->sample.core_avg_perf, y: cpu_khz); |
2217 | } |
2218 | |
2219 | static inline int32_t get_avg_pstate(struct cpudata *cpu) |
2220 | { |
2221 | return mul_ext_fp(x: cpu->pstate.max_pstate_physical, |
2222 | y: cpu->sample.core_avg_perf); |
2223 | } |
2224 | |
2225 | static inline int32_t get_target_pstate(struct cpudata *cpu) |
2226 | { |
2227 | struct sample *sample = &cpu->sample; |
2228 | int32_t busy_frac; |
2229 | int target, avg_pstate; |
2230 | |
2231 | busy_frac = div_fp(x: sample->mperf << cpu->aperf_mperf_shift, |
2232 | y: sample->tsc); |
2233 | |
2234 | if (busy_frac < cpu->iowait_boost) |
2235 | busy_frac = cpu->iowait_boost; |
2236 | |
2237 | sample->busy_scaled = busy_frac * 100; |
2238 | |
2239 | target = global.no_turbo || global.turbo_disabled ? |
2240 | cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; |
2241 | target += target >> 2; |
2242 | target = mul_fp(x: target, y: busy_frac); |
2243 | if (target < cpu->pstate.min_pstate) |
2244 | target = cpu->pstate.min_pstate; |
2245 | |
2246 | /* |
2247 | * If the average P-state during the previous cycle was higher than the |
2248 | * current target, add 50% of the difference to the target to reduce |
2249 | * possible performance oscillations and offset possible performance |
2250 | * loss related to moving the workload from one CPU to another within |
2251 | * a package/module. |
2252 | */ |
2253 | avg_pstate = get_avg_pstate(cpu); |
2254 | if (avg_pstate > target) |
2255 | target += (avg_pstate - target) >> 1; |
2256 | |
2257 | return target; |
2258 | } |
2259 | |
2260 | static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) |
2261 | { |
2262 | int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); |
2263 | int max_pstate = max(min_pstate, cpu->max_perf_ratio); |
2264 | |
2265 | return clamp_t(int, pstate, min_pstate, max_pstate); |
2266 | } |
2267 | |
2268 | static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate) |
2269 | { |
2270 | if (pstate == cpu->pstate.current_pstate) |
2271 | return; |
2272 | |
2273 | cpu->pstate.current_pstate = pstate; |
2274 | wrmsrl(MSR_IA32_PERF_CTL, val: pstate_funcs.get_val(cpu, pstate)); |
2275 | } |
2276 | |
2277 | static void intel_pstate_adjust_pstate(struct cpudata *cpu) |
2278 | { |
2279 | int from = cpu->pstate.current_pstate; |
2280 | struct sample *sample; |
2281 | int target_pstate; |
2282 | |
2283 | update_turbo_state(); |
2284 | |
2285 | target_pstate = get_target_pstate(cpu); |
2286 | target_pstate = intel_pstate_prepare_request(cpu, pstate: target_pstate); |
2287 | trace_cpu_frequency(frequency: target_pstate * cpu->pstate.scaling, cpu_id: cpu->cpu); |
2288 | intel_pstate_update_pstate(cpu, pstate: target_pstate); |
2289 | |
2290 | sample = &cpu->sample; |
2291 | trace_pstate_sample(core_busy: mul_ext_fp(x: 100, y: sample->core_avg_perf), |
2292 | fp_toint(sample->busy_scaled), |
2293 | from, |
2294 | to: cpu->pstate.current_pstate, |
2295 | mperf: sample->mperf, |
2296 | aperf: sample->aperf, |
2297 | tsc: sample->tsc, |
2298 | freq: get_avg_frequency(cpu), |
2299 | fp_toint(cpu->iowait_boost * 100)); |
2300 | } |
2301 | |
2302 | static void intel_pstate_update_util(struct update_util_data *data, u64 time, |
2303 | unsigned int flags) |
2304 | { |
2305 | struct cpudata *cpu = container_of(data, struct cpudata, update_util); |
2306 | u64 delta_ns; |
2307 | |
2308 | /* Don't allow remote callbacks */ |
2309 | if (smp_processor_id() != cpu->cpu) |
2310 | return; |
2311 | |
2312 | delta_ns = time - cpu->last_update; |
2313 | if (flags & SCHED_CPUFREQ_IOWAIT) { |
2314 | /* Start over if the CPU may have been idle. */ |
2315 | if (delta_ns > TICK_NSEC) { |
2316 | cpu->iowait_boost = ONE_EIGHTH_FP; |
2317 | } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) { |
2318 | cpu->iowait_boost <<= 1; |
2319 | if (cpu->iowait_boost > int_tofp(1)) |
2320 | cpu->iowait_boost = int_tofp(1); |
2321 | } else { |
2322 | cpu->iowait_boost = ONE_EIGHTH_FP; |
2323 | } |
2324 | } else if (cpu->iowait_boost) { |
2325 | /* Clear iowait_boost if the CPU may have been idle. */ |
2326 | if (delta_ns > TICK_NSEC) |
2327 | cpu->iowait_boost = 0; |
2328 | else |
2329 | cpu->iowait_boost >>= 1; |
2330 | } |
2331 | cpu->last_update = time; |
2332 | delta_ns = time - cpu->sample.time; |
2333 | if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) |
2334 | return; |
2335 | |
2336 | if (intel_pstate_sample(cpu, time)) |
2337 | intel_pstate_adjust_pstate(cpu); |
2338 | } |
2339 | |
2340 | static struct pstate_funcs core_funcs = { |
2341 | .get_max = core_get_max_pstate, |
2342 | .get_max_physical = core_get_max_pstate_physical, |
2343 | .get_min = core_get_min_pstate, |
2344 | .get_turbo = core_get_turbo_pstate, |
2345 | .get_scaling = core_get_scaling, |
2346 | .get_val = core_get_val, |
2347 | }; |
2348 | |
2349 | static const struct pstate_funcs silvermont_funcs = { |
2350 | .get_max = atom_get_max_pstate, |
2351 | .get_max_physical = atom_get_max_pstate, |
2352 | .get_min = atom_get_min_pstate, |
2353 | .get_turbo = atom_get_turbo_pstate, |
2354 | .get_val = atom_get_val, |
2355 | .get_scaling = silvermont_get_scaling, |
2356 | .get_vid = atom_get_vid, |
2357 | }; |
2358 | |
2359 | static const struct pstate_funcs airmont_funcs = { |
2360 | .get_max = atom_get_max_pstate, |
2361 | .get_max_physical = atom_get_max_pstate, |
2362 | .get_min = atom_get_min_pstate, |
2363 | .get_turbo = atom_get_turbo_pstate, |
2364 | .get_val = atom_get_val, |
2365 | .get_scaling = airmont_get_scaling, |
2366 | .get_vid = atom_get_vid, |
2367 | }; |
2368 | |
2369 | static const struct pstate_funcs knl_funcs = { |
2370 | .get_max = core_get_max_pstate, |
2371 | .get_max_physical = core_get_max_pstate_physical, |
2372 | .get_min = core_get_min_pstate, |
2373 | .get_turbo = knl_get_turbo_pstate, |
2374 | .get_aperf_mperf_shift = knl_get_aperf_mperf_shift, |
2375 | .get_scaling = core_get_scaling, |
2376 | .get_val = core_get_val, |
2377 | }; |
2378 | |
2379 | #define X86_MATCH(model, policy) \ |
2380 | X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ |
2381 | X86_FEATURE_APERFMPERF, &policy) |
2382 | |
2383 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { |
2384 | X86_MATCH(SANDYBRIDGE, core_funcs), |
2385 | X86_MATCH(SANDYBRIDGE_X, core_funcs), |
2386 | X86_MATCH(ATOM_SILVERMONT, silvermont_funcs), |
2387 | X86_MATCH(IVYBRIDGE, core_funcs), |
2388 | X86_MATCH(HASWELL, core_funcs), |
2389 | X86_MATCH(BROADWELL, core_funcs), |
2390 | X86_MATCH(IVYBRIDGE_X, core_funcs), |
2391 | X86_MATCH(HASWELL_X, core_funcs), |
2392 | X86_MATCH(HASWELL_L, core_funcs), |
2393 | X86_MATCH(HASWELL_G, core_funcs), |
2394 | X86_MATCH(BROADWELL_G, core_funcs), |
2395 | X86_MATCH(ATOM_AIRMONT, airmont_funcs), |
2396 | X86_MATCH(SKYLAKE_L, core_funcs), |
2397 | X86_MATCH(BROADWELL_X, core_funcs), |
2398 | X86_MATCH(SKYLAKE, core_funcs), |
2399 | X86_MATCH(BROADWELL_D, core_funcs), |
2400 | X86_MATCH(XEON_PHI_KNL, knl_funcs), |
2401 | X86_MATCH(XEON_PHI_KNM, knl_funcs), |
2402 | X86_MATCH(ATOM_GOLDMONT, core_funcs), |
2403 | X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs), |
2404 | X86_MATCH(SKYLAKE_X, core_funcs), |
2405 | X86_MATCH(COMETLAKE, core_funcs), |
2406 | X86_MATCH(ICELAKE_X, core_funcs), |
2407 | X86_MATCH(TIGERLAKE, core_funcs), |
2408 | X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), |
2409 | {} |
2410 | }; |
2411 | MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); |
2412 | |
2413 | static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { |
2414 | X86_MATCH(BROADWELL_D, core_funcs), |
2415 | X86_MATCH(BROADWELL_X, core_funcs), |
2416 | X86_MATCH(SKYLAKE_X, core_funcs), |
2417 | X86_MATCH(ICELAKE_X, core_funcs), |
2418 | X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), |
2419 | {} |
2420 | }; |
2421 | |
2422 | static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { |
2423 | X86_MATCH(KABYLAKE, core_funcs), |
2424 | {} |
2425 | }; |
2426 | |
2427 | static int intel_pstate_init_cpu(unsigned int cpunum) |
2428 | { |
2429 | struct cpudata *cpu; |
2430 | |
2431 | cpu = all_cpu_data[cpunum]; |
2432 | |
2433 | if (!cpu) { |
2434 | cpu = kzalloc(size: sizeof(*cpu), GFP_KERNEL); |
2435 | if (!cpu) |
2436 | return -ENOMEM; |
2437 | |
2438 | WRITE_ONCE(all_cpu_data[cpunum], cpu); |
2439 | |
2440 | cpu->cpu = cpunum; |
2441 | |
2442 | cpu->epp_default = -EINVAL; |
2443 | |
2444 | if (hwp_active) { |
2445 | intel_pstate_hwp_enable(cpudata: cpu); |
2446 | |
2447 | if (intel_pstate_acpi_pm_profile_server()) |
2448 | hwp_boost = true; |
2449 | } |
2450 | } else if (hwp_active) { |
2451 | /* |
2452 | * Re-enable HWP in case this happens after a resume from ACPI |
2453 | * S3 if the CPU was offline during the whole system/resume |
2454 | * cycle. |
2455 | */ |
2456 | intel_pstate_hwp_reenable(cpu); |
2457 | } |
2458 | |
2459 | cpu->epp_powersave = -EINVAL; |
2460 | cpu->epp_policy = 0; |
2461 | |
2462 | intel_pstate_get_cpu_pstates(cpu); |
2463 | |
2464 | pr_debug("controlling: cpu %d\n" , cpunum); |
2465 | |
2466 | return 0; |
2467 | } |
2468 | |
2469 | static void intel_pstate_set_update_util_hook(unsigned int cpu_num) |
2470 | { |
2471 | struct cpudata *cpu = all_cpu_data[cpu_num]; |
2472 | |
2473 | if (hwp_active && !hwp_boost) |
2474 | return; |
2475 | |
2476 | if (cpu->update_util_set) |
2477 | return; |
2478 | |
2479 | /* Prevent intel_pstate_update_util() from using stale data. */ |
2480 | cpu->sample.time = 0; |
2481 | cpufreq_add_update_util_hook(cpu: cpu_num, data: &cpu->update_util, |
2482 | func: (hwp_active ? |
2483 | intel_pstate_update_util_hwp : |
2484 | intel_pstate_update_util)); |
2485 | cpu->update_util_set = true; |
2486 | } |
2487 | |
2488 | static void intel_pstate_clear_update_util_hook(unsigned int cpu) |
2489 | { |
2490 | struct cpudata *cpu_data = all_cpu_data[cpu]; |
2491 | |
2492 | if (!cpu_data->update_util_set) |
2493 | return; |
2494 | |
2495 | cpufreq_remove_update_util_hook(cpu); |
2496 | cpu_data->update_util_set = false; |
2497 | synchronize_rcu(); |
2498 | } |
2499 | |
2500 | static int intel_pstate_get_max_freq(struct cpudata *cpu) |
2501 | { |
2502 | return global.turbo_disabled || global.no_turbo ? |
2503 | cpu->pstate.max_freq : cpu->pstate.turbo_freq; |
2504 | } |
2505 | |
2506 | static void intel_pstate_update_perf_limits(struct cpudata *cpu, |
2507 | unsigned int policy_min, |
2508 | unsigned int policy_max) |
2509 | { |
2510 | int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; |
2511 | int32_t max_policy_perf, min_policy_perf; |
2512 | |
2513 | max_policy_perf = policy_max / perf_ctl_scaling; |
2514 | if (policy_max == policy_min) { |
2515 | min_policy_perf = max_policy_perf; |
2516 | } else { |
2517 | min_policy_perf = policy_min / perf_ctl_scaling; |
2518 | min_policy_perf = clamp_t(int32_t, min_policy_perf, |
2519 | 0, max_policy_perf); |
2520 | } |
2521 | |
2522 | /* |
2523 | * HWP needs some special consideration, because HWP_REQUEST uses |
2524 | * abstract values to represent performance rather than pure ratios. |
2525 | */ |
2526 | if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) { |
2527 | int scaling = cpu->pstate.scaling; |
2528 | int freq; |
2529 | |
2530 | freq = max_policy_perf * perf_ctl_scaling; |
2531 | max_policy_perf = DIV_ROUND_UP(freq, scaling); |
2532 | freq = min_policy_perf * perf_ctl_scaling; |
2533 | min_policy_perf = DIV_ROUND_UP(freq, scaling); |
2534 | } |
2535 | |
2536 | pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n" , |
2537 | cpu->cpu, min_policy_perf, max_policy_perf); |
2538 | |
2539 | /* Normalize user input to [min_perf, max_perf] */ |
2540 | if (per_cpu_limits) { |
2541 | cpu->min_perf_ratio = min_policy_perf; |
2542 | cpu->max_perf_ratio = max_policy_perf; |
2543 | } else { |
2544 | int turbo_max = cpu->pstate.turbo_pstate; |
2545 | int32_t global_min, global_max; |
2546 | |
2547 | /* Global limits are in percent of the maximum turbo P-state. */ |
2548 | global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100); |
2549 | global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100); |
2550 | global_min = clamp_t(int32_t, global_min, 0, global_max); |
2551 | |
2552 | pr_debug("cpu:%d global_min:%d global_max:%d\n" , cpu->cpu, |
2553 | global_min, global_max); |
2554 | |
2555 | cpu->min_perf_ratio = max(min_policy_perf, global_min); |
2556 | cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf); |
2557 | cpu->max_perf_ratio = min(max_policy_perf, global_max); |
2558 | cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio); |
2559 | |
2560 | /* Make sure min_perf <= max_perf */ |
2561 | cpu->min_perf_ratio = min(cpu->min_perf_ratio, |
2562 | cpu->max_perf_ratio); |
2563 | |
2564 | } |
2565 | pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n" , cpu->cpu, |
2566 | cpu->max_perf_ratio, |
2567 | cpu->min_perf_ratio); |
2568 | } |
2569 | |
2570 | static int intel_pstate_set_policy(struct cpufreq_policy *policy) |
2571 | { |
2572 | struct cpudata *cpu; |
2573 | |
2574 | if (!policy->cpuinfo.max_freq) |
2575 | return -ENODEV; |
2576 | |
2577 | pr_debug("set_policy cpuinfo.max %u policy->max %u\n" , |
2578 | policy->cpuinfo.max_freq, policy->max); |
2579 | |
2580 | cpu = all_cpu_data[policy->cpu]; |
2581 | cpu->policy = policy->policy; |
2582 | |
2583 | mutex_lock(&intel_pstate_limits_lock); |
2584 | |
2585 | intel_pstate_update_perf_limits(cpu, policy_min: policy->min, policy_max: policy->max); |
2586 | |
2587 | if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { |
2588 | /* |
2589 | * NOHZ_FULL CPUs need this as the governor callback may not |
2590 | * be invoked on them. |
2591 | */ |
2592 | intel_pstate_clear_update_util_hook(cpu: policy->cpu); |
2593 | intel_pstate_max_within_limits(cpu); |
2594 | } else { |
2595 | intel_pstate_set_update_util_hook(cpu_num: policy->cpu); |
2596 | } |
2597 | |
2598 | if (hwp_active) { |
2599 | /* |
2600 | * When hwp_boost was active before and dynamically it |
2601 | * was turned off, in that case we need to clear the |
2602 | * update util hook. |
2603 | */ |
2604 | if (!hwp_boost) |
2605 | intel_pstate_clear_update_util_hook(cpu: policy->cpu); |
2606 | intel_pstate_hwp_set(cpu: policy->cpu); |
2607 | } |
2608 | /* |
2609 | * policy->cur is never updated with the intel_pstate driver, but it |
2610 | * is used as a stale frequency value. So, keep it within limits. |
2611 | */ |
2612 | policy->cur = policy->min; |
2613 | |
2614 | mutex_unlock(lock: &intel_pstate_limits_lock); |
2615 | |
2616 | return 0; |
2617 | } |
2618 | |
2619 | static void intel_pstate_adjust_policy_max(struct cpudata *cpu, |
2620 | struct cpufreq_policy_data *policy) |
2621 | { |
2622 | if (!hwp_active && |
2623 | cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && |
2624 | policy->max < policy->cpuinfo.max_freq && |
2625 | policy->max > cpu->pstate.max_freq) { |
2626 | pr_debug("policy->max > max non turbo frequency\n" ); |
2627 | policy->max = policy->cpuinfo.max_freq; |
2628 | } |
2629 | } |
2630 | |
2631 | static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, |
2632 | struct cpufreq_policy_data *policy) |
2633 | { |
2634 | int max_freq; |
2635 | |
2636 | update_turbo_state(); |
2637 | if (hwp_active) { |
2638 | intel_pstate_get_hwp_cap(cpu); |
2639 | max_freq = global.no_turbo || global.turbo_disabled ? |
2640 | cpu->pstate.max_freq : cpu->pstate.turbo_freq; |
2641 | } else { |
2642 | max_freq = intel_pstate_get_max_freq(cpu); |
2643 | } |
2644 | cpufreq_verify_within_limits(policy, min: policy->cpuinfo.min_freq, max: max_freq); |
2645 | |
2646 | intel_pstate_adjust_policy_max(cpu, policy); |
2647 | } |
2648 | |
2649 | static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy) |
2650 | { |
2651 | intel_pstate_verify_cpu_policy(cpu: all_cpu_data[policy->cpu], policy); |
2652 | |
2653 | return 0; |
2654 | } |
2655 | |
2656 | static int intel_cpufreq_cpu_offline(struct cpufreq_policy *policy) |
2657 | { |
2658 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
2659 | |
2660 | pr_debug("CPU %d going offline\n" , cpu->cpu); |
2661 | |
2662 | if (cpu->suspended) |
2663 | return 0; |
2664 | |
2665 | /* |
2666 | * If the CPU is an SMT thread and it goes offline with the performance |
2667 | * settings different from the minimum, it will prevent its sibling |
2668 | * from getting to lower performance levels, so force the minimum |
2669 | * performance on CPU offline to prevent that from happening. |
2670 | */ |
2671 | if (hwp_active) |
2672 | intel_pstate_hwp_offline(cpu); |
2673 | else |
2674 | intel_pstate_set_min_pstate(cpu); |
2675 | |
2676 | intel_pstate_exit_perf_limits(policy); |
2677 | |
2678 | return 0; |
2679 | } |
2680 | |
2681 | static int intel_pstate_cpu_online(struct cpufreq_policy *policy) |
2682 | { |
2683 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
2684 | |
2685 | pr_debug("CPU %d going online\n" , cpu->cpu); |
2686 | |
2687 | intel_pstate_init_acpi_perf_limits(policy); |
2688 | |
2689 | if (hwp_active) { |
2690 | /* |
2691 | * Re-enable HWP and clear the "suspended" flag to let "resume" |
2692 | * know that it need not do that. |
2693 | */ |
2694 | intel_pstate_hwp_reenable(cpu); |
2695 | cpu->suspended = false; |
2696 | } |
2697 | |
2698 | return 0; |
2699 | } |
2700 | |
2701 | static int intel_pstate_cpu_offline(struct cpufreq_policy *policy) |
2702 | { |
2703 | intel_pstate_clear_update_util_hook(cpu: policy->cpu); |
2704 | |
2705 | return intel_cpufreq_cpu_offline(policy); |
2706 | } |
2707 | |
2708 | static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) |
2709 | { |
2710 | pr_debug("CPU %d exiting\n" , policy->cpu); |
2711 | |
2712 | policy->fast_switch_possible = false; |
2713 | |
2714 | return 0; |
2715 | } |
2716 | |
2717 | static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) |
2718 | { |
2719 | struct cpudata *cpu; |
2720 | int rc; |
2721 | |
2722 | rc = intel_pstate_init_cpu(cpunum: policy->cpu); |
2723 | if (rc) |
2724 | return rc; |
2725 | |
2726 | cpu = all_cpu_data[policy->cpu]; |
2727 | |
2728 | cpu->max_perf_ratio = 0xFF; |
2729 | cpu->min_perf_ratio = 0; |
2730 | |
2731 | /* cpuinfo and default policy values */ |
2732 | policy->cpuinfo.min_freq = cpu->pstate.min_freq; |
2733 | update_turbo_state(); |
2734 | global.turbo_disabled_mf = global.turbo_disabled; |
2735 | policy->cpuinfo.max_freq = global.turbo_disabled ? |
2736 | cpu->pstate.max_freq : cpu->pstate.turbo_freq; |
2737 | |
2738 | policy->min = policy->cpuinfo.min_freq; |
2739 | policy->max = policy->cpuinfo.max_freq; |
2740 | |
2741 | intel_pstate_init_acpi_perf_limits(policy); |
2742 | |
2743 | policy->fast_switch_possible = true; |
2744 | |
2745 | return 0; |
2746 | } |
2747 | |
2748 | static int intel_pstate_cpu_init(struct cpufreq_policy *policy) |
2749 | { |
2750 | int ret = __intel_pstate_cpu_init(policy); |
2751 | |
2752 | if (ret) |
2753 | return ret; |
2754 | |
2755 | /* |
2756 | * Set the policy to powersave to provide a valid fallback value in case |
2757 | * the default cpufreq governor is neither powersave nor performance. |
2758 | */ |
2759 | policy->policy = CPUFREQ_POLICY_POWERSAVE; |
2760 | |
2761 | if (hwp_active) { |
2762 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
2763 | |
2764 | cpu->epp_cached = intel_pstate_get_epp(cpu_data: cpu, hwp_req_data: 0); |
2765 | } |
2766 | |
2767 | return 0; |
2768 | } |
2769 | |
2770 | static struct cpufreq_driver intel_pstate = { |
2771 | .flags = CPUFREQ_CONST_LOOPS, |
2772 | .verify = intel_pstate_verify_policy, |
2773 | .setpolicy = intel_pstate_set_policy, |
2774 | .suspend = intel_pstate_suspend, |
2775 | .resume = intel_pstate_resume, |
2776 | .init = intel_pstate_cpu_init, |
2777 | .exit = intel_pstate_cpu_exit, |
2778 | .offline = intel_pstate_cpu_offline, |
2779 | .online = intel_pstate_cpu_online, |
2780 | .update_limits = intel_pstate_update_limits, |
2781 | .name = "intel_pstate" , |
2782 | }; |
2783 | |
2784 | static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy) |
2785 | { |
2786 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
2787 | |
2788 | intel_pstate_verify_cpu_policy(cpu, policy); |
2789 | intel_pstate_update_perf_limits(cpu, policy_min: policy->min, policy_max: policy->max); |
2790 | |
2791 | return 0; |
2792 | } |
2793 | |
2794 | /* Use of trace in passive mode: |
2795 | * |
2796 | * In passive mode the trace core_busy field (also known as the |
2797 | * performance field, and lablelled as such on the graphs; also known as |
2798 | * core_avg_perf) is not needed and so is re-assigned to indicate if the |
2799 | * driver call was via the normal or fast switch path. Various graphs |
2800 | * output from the intel_pstate_tracer.py utility that include core_busy |
2801 | * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%, |
2802 | * so we use 10 to indicate the normal path through the driver, and |
2803 | * 90 to indicate the fast switch path through the driver. |
2804 | * The scaled_busy field is not used, and is set to 0. |
2805 | */ |
2806 | |
2807 | #define INTEL_PSTATE_TRACE_TARGET 10 |
2808 | #define INTEL_PSTATE_TRACE_FAST_SWITCH 90 |
2809 | |
2810 | static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate) |
2811 | { |
2812 | struct sample *sample; |
2813 | |
2814 | if (!trace_pstate_sample_enabled()) |
2815 | return; |
2816 | |
2817 | if (!intel_pstate_sample(cpu, time: ktime_get())) |
2818 | return; |
2819 | |
2820 | sample = &cpu->sample; |
2821 | trace_pstate_sample(core_busy: trace_type, |
2822 | scaled_busy: 0, |
2823 | from: old_pstate, |
2824 | to: cpu->pstate.current_pstate, |
2825 | mperf: sample->mperf, |
2826 | aperf: sample->aperf, |
2827 | tsc: sample->tsc, |
2828 | freq: get_avg_frequency(cpu), |
2829 | fp_toint(cpu->iowait_boost * 100)); |
2830 | } |
2831 | |
2832 | static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max, |
2833 | u32 desired, bool fast_switch) |
2834 | { |
2835 | u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev; |
2836 | |
2837 | value &= ~HWP_MIN_PERF(~0L); |
2838 | value |= HWP_MIN_PERF(min); |
2839 | |
2840 | value &= ~HWP_MAX_PERF(~0L); |
2841 | value |= HWP_MAX_PERF(max); |
2842 | |
2843 | value &= ~HWP_DESIRED_PERF(~0L); |
2844 | value |= HWP_DESIRED_PERF(desired); |
2845 | |
2846 | if (value == prev) |
2847 | return; |
2848 | |
2849 | WRITE_ONCE(cpu->hwp_req_cached, value); |
2850 | if (fast_switch) |
2851 | wrmsrl(MSR_HWP_REQUEST, val: value); |
2852 | else |
2853 | wrmsrl_on_cpu(cpu: cpu->cpu, MSR_HWP_REQUEST, q: value); |
2854 | } |
2855 | |
2856 | static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu, |
2857 | u32 target_pstate, bool fast_switch) |
2858 | { |
2859 | if (fast_switch) |
2860 | wrmsrl(MSR_IA32_PERF_CTL, |
2861 | val: pstate_funcs.get_val(cpu, target_pstate)); |
2862 | else |
2863 | wrmsrl_on_cpu(cpu: cpu->cpu, MSR_IA32_PERF_CTL, |
2864 | q: pstate_funcs.get_val(cpu, target_pstate)); |
2865 | } |
2866 | |
2867 | static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy, |
2868 | int target_pstate, bool fast_switch) |
2869 | { |
2870 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
2871 | int old_pstate = cpu->pstate.current_pstate; |
2872 | |
2873 | target_pstate = intel_pstate_prepare_request(cpu, pstate: target_pstate); |
2874 | if (hwp_active) { |
2875 | int max_pstate = policy->strict_target ? |
2876 | target_pstate : cpu->max_perf_ratio; |
2877 | |
2878 | intel_cpufreq_hwp_update(cpu, min: target_pstate, max: max_pstate, desired: 0, |
2879 | fast_switch); |
2880 | } else if (target_pstate != old_pstate) { |
2881 | intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch); |
2882 | } |
2883 | |
2884 | cpu->pstate.current_pstate = target_pstate; |
2885 | |
2886 | intel_cpufreq_trace(cpu, trace_type: fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH : |
2887 | INTEL_PSTATE_TRACE_TARGET, old_pstate); |
2888 | |
2889 | return target_pstate; |
2890 | } |
2891 | |
2892 | static int intel_cpufreq_target(struct cpufreq_policy *policy, |
2893 | unsigned int target_freq, |
2894 | unsigned int relation) |
2895 | { |
2896 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
2897 | struct cpufreq_freqs freqs; |
2898 | int target_pstate; |
2899 | |
2900 | update_turbo_state(); |
2901 | |
2902 | freqs.old = policy->cur; |
2903 | freqs.new = target_freq; |
2904 | |
2905 | cpufreq_freq_transition_begin(policy, freqs: &freqs); |
2906 | |
2907 | switch (relation) { |
2908 | case CPUFREQ_RELATION_L: |
2909 | target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling); |
2910 | break; |
2911 | case CPUFREQ_RELATION_H: |
2912 | target_pstate = freqs.new / cpu->pstate.scaling; |
2913 | break; |
2914 | default: |
2915 | target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling); |
2916 | break; |
2917 | } |
2918 | |
2919 | target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, fast_switch: false); |
2920 | |
2921 | freqs.new = target_pstate * cpu->pstate.scaling; |
2922 | |
2923 | cpufreq_freq_transition_end(policy, freqs: &freqs, transition_failed: false); |
2924 | |
2925 | return 0; |
2926 | } |
2927 | |
2928 | static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, |
2929 | unsigned int target_freq) |
2930 | { |
2931 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
2932 | int target_pstate; |
2933 | |
2934 | update_turbo_state(); |
2935 | |
2936 | target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); |
2937 | |
2938 | target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, fast_switch: true); |
2939 | |
2940 | return target_pstate * cpu->pstate.scaling; |
2941 | } |
2942 | |
2943 | static void intel_cpufreq_adjust_perf(unsigned int cpunum, |
2944 | unsigned long min_perf, |
2945 | unsigned long target_perf, |
2946 | unsigned long capacity) |
2947 | { |
2948 | struct cpudata *cpu = all_cpu_data[cpunum]; |
2949 | u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached); |
2950 | int old_pstate = cpu->pstate.current_pstate; |
2951 | int cap_pstate, min_pstate, max_pstate, target_pstate; |
2952 | |
2953 | update_turbo_state(); |
2954 | cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) : |
2955 | HWP_HIGHEST_PERF(hwp_cap); |
2956 | |
2957 | /* Optimization: Avoid unnecessary divisions. */ |
2958 | |
2959 | target_pstate = cap_pstate; |
2960 | if (target_perf < capacity) |
2961 | target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity); |
2962 | |
2963 | min_pstate = cap_pstate; |
2964 | if (min_perf < capacity) |
2965 | min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity); |
2966 | |
2967 | if (min_pstate < cpu->pstate.min_pstate) |
2968 | min_pstate = cpu->pstate.min_pstate; |
2969 | |
2970 | if (min_pstate < cpu->min_perf_ratio) |
2971 | min_pstate = cpu->min_perf_ratio; |
2972 | |
2973 | max_pstate = min(cap_pstate, cpu->max_perf_ratio); |
2974 | if (max_pstate < min_pstate) |
2975 | max_pstate = min_pstate; |
2976 | |
2977 | target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate); |
2978 | |
2979 | intel_cpufreq_hwp_update(cpu, min: min_pstate, max: max_pstate, desired: target_pstate, fast_switch: true); |
2980 | |
2981 | cpu->pstate.current_pstate = target_pstate; |
2982 | intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate); |
2983 | } |
2984 | |
2985 | static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy) |
2986 | { |
2987 | struct freq_qos_request *req; |
2988 | struct cpudata *cpu; |
2989 | struct device *dev; |
2990 | int ret, freq; |
2991 | |
2992 | dev = get_cpu_device(cpu: policy->cpu); |
2993 | if (!dev) |
2994 | return -ENODEV; |
2995 | |
2996 | ret = __intel_pstate_cpu_init(policy); |
2997 | if (ret) |
2998 | return ret; |
2999 | |
3000 | policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY; |
3001 | /* This reflects the intel_pstate_get_cpu_pstates() setting. */ |
3002 | policy->cur = policy->cpuinfo.min_freq; |
3003 | |
3004 | req = kcalloc(n: 2, size: sizeof(*req), GFP_KERNEL); |
3005 | if (!req) { |
3006 | ret = -ENOMEM; |
3007 | goto pstate_exit; |
3008 | } |
3009 | |
3010 | cpu = all_cpu_data[policy->cpu]; |
3011 | |
3012 | if (hwp_active) { |
3013 | u64 value; |
3014 | |
3015 | policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP; |
3016 | |
3017 | intel_pstate_get_hwp_cap(cpu); |
3018 | |
3019 | rdmsrl_on_cpu(cpu: cpu->cpu, MSR_HWP_REQUEST, q: &value); |
3020 | WRITE_ONCE(cpu->hwp_req_cached, value); |
3021 | |
3022 | cpu->epp_cached = intel_pstate_get_epp(cpu_data: cpu, hwp_req_data: value); |
3023 | } else { |
3024 | policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY; |
3025 | } |
3026 | |
3027 | freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100); |
3028 | |
3029 | ret = freq_qos_add_request(qos: &policy->constraints, req, type: FREQ_QOS_MIN, |
3030 | value: freq); |
3031 | if (ret < 0) { |
3032 | dev_err(dev, "Failed to add min-freq constraint (%d)\n" , ret); |
3033 | goto free_req; |
3034 | } |
3035 | |
3036 | freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100); |
3037 | |
3038 | ret = freq_qos_add_request(qos: &policy->constraints, req: req + 1, type: FREQ_QOS_MAX, |
3039 | value: freq); |
3040 | if (ret < 0) { |
3041 | dev_err(dev, "Failed to add max-freq constraint (%d)\n" , ret); |
3042 | goto remove_min_req; |
3043 | } |
3044 | |
3045 | policy->driver_data = req; |
3046 | |
3047 | return 0; |
3048 | |
3049 | remove_min_req: |
3050 | freq_qos_remove_request(req); |
3051 | free_req: |
3052 | kfree(objp: req); |
3053 | pstate_exit: |
3054 | intel_pstate_exit_perf_limits(policy); |
3055 | |
3056 | return ret; |
3057 | } |
3058 | |
3059 | static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy) |
3060 | { |
3061 | struct freq_qos_request *req; |
3062 | |
3063 | req = policy->driver_data; |
3064 | |
3065 | freq_qos_remove_request(req: req + 1); |
3066 | freq_qos_remove_request(req); |
3067 | kfree(objp: req); |
3068 | |
3069 | return intel_pstate_cpu_exit(policy); |
3070 | } |
3071 | |
3072 | static int intel_cpufreq_suspend(struct cpufreq_policy *policy) |
3073 | { |
3074 | intel_pstate_suspend(policy); |
3075 | |
3076 | if (hwp_active) { |
3077 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
3078 | u64 value = READ_ONCE(cpu->hwp_req_cached); |
3079 | |
3080 | /* |
3081 | * Clear the desired perf field in MSR_HWP_REQUEST in case |
3082 | * intel_cpufreq_adjust_perf() is in use and the last value |
3083 | * written by it may not be suitable. |
3084 | */ |
3085 | value &= ~HWP_DESIRED_PERF(~0L); |
3086 | wrmsrl_on_cpu(cpu: cpu->cpu, MSR_HWP_REQUEST, q: value); |
3087 | WRITE_ONCE(cpu->hwp_req_cached, value); |
3088 | } |
3089 | |
3090 | return 0; |
3091 | } |
3092 | |
3093 | static struct cpufreq_driver intel_cpufreq = { |
3094 | .flags = CPUFREQ_CONST_LOOPS, |
3095 | .verify = intel_cpufreq_verify_policy, |
3096 | .target = intel_cpufreq_target, |
3097 | .fast_switch = intel_cpufreq_fast_switch, |
3098 | .init = intel_cpufreq_cpu_init, |
3099 | .exit = intel_cpufreq_cpu_exit, |
3100 | .offline = intel_cpufreq_cpu_offline, |
3101 | .online = intel_pstate_cpu_online, |
3102 | .suspend = intel_cpufreq_suspend, |
3103 | .resume = intel_pstate_resume, |
3104 | .update_limits = intel_pstate_update_limits, |
3105 | .name = "intel_cpufreq" , |
3106 | }; |
3107 | |
3108 | static struct cpufreq_driver *default_driver; |
3109 | |
3110 | static void intel_pstate_driver_cleanup(void) |
3111 | { |
3112 | unsigned int cpu; |
3113 | |
3114 | cpus_read_lock(); |
3115 | for_each_online_cpu(cpu) { |
3116 | if (all_cpu_data[cpu]) { |
3117 | if (intel_pstate_driver == &intel_pstate) |
3118 | intel_pstate_clear_update_util_hook(cpu); |
3119 | |
3120 | spin_lock(lock: &hwp_notify_lock); |
3121 | kfree(objp: all_cpu_data[cpu]); |
3122 | WRITE_ONCE(all_cpu_data[cpu], NULL); |
3123 | spin_unlock(lock: &hwp_notify_lock); |
3124 | } |
3125 | } |
3126 | cpus_read_unlock(); |
3127 | |
3128 | intel_pstate_driver = NULL; |
3129 | } |
3130 | |
3131 | static int intel_pstate_register_driver(struct cpufreq_driver *driver) |
3132 | { |
3133 | int ret; |
3134 | |
3135 | if (driver == &intel_pstate) |
3136 | intel_pstate_sysfs_expose_hwp_dynamic_boost(); |
3137 | |
3138 | memset(&global, 0, sizeof(global)); |
3139 | global.max_perf_pct = 100; |
3140 | |
3141 | intel_pstate_driver = driver; |
3142 | ret = cpufreq_register_driver(driver_data: intel_pstate_driver); |
3143 | if (ret) { |
3144 | intel_pstate_driver_cleanup(); |
3145 | return ret; |
3146 | } |
3147 | |
3148 | global.min_perf_pct = min_perf_pct_min(); |
3149 | |
3150 | return 0; |
3151 | } |
3152 | |
3153 | static ssize_t intel_pstate_show_status(char *buf) |
3154 | { |
3155 | if (!intel_pstate_driver) |
3156 | return sprintf(buf, fmt: "off\n" ); |
3157 | |
3158 | return sprintf(buf, fmt: "%s\n" , intel_pstate_driver == &intel_pstate ? |
3159 | "active" : "passive" ); |
3160 | } |
3161 | |
3162 | static int intel_pstate_update_status(const char *buf, size_t size) |
3163 | { |
3164 | if (size == 3 && !strncmp(buf, "off" , size)) { |
3165 | if (!intel_pstate_driver) |
3166 | return -EINVAL; |
3167 | |
3168 | if (hwp_active) |
3169 | return -EBUSY; |
3170 | |
3171 | cpufreq_unregister_driver(driver_data: intel_pstate_driver); |
3172 | intel_pstate_driver_cleanup(); |
3173 | return 0; |
3174 | } |
3175 | |
3176 | if (size == 6 && !strncmp(buf, "active" , size)) { |
3177 | if (intel_pstate_driver) { |
3178 | if (intel_pstate_driver == &intel_pstate) |
3179 | return 0; |
3180 | |
3181 | cpufreq_unregister_driver(driver_data: intel_pstate_driver); |
3182 | } |
3183 | |
3184 | return intel_pstate_register_driver(driver: &intel_pstate); |
3185 | } |
3186 | |
3187 | if (size == 7 && !strncmp(buf, "passive" , size)) { |
3188 | if (intel_pstate_driver) { |
3189 | if (intel_pstate_driver == &intel_cpufreq) |
3190 | return 0; |
3191 | |
3192 | cpufreq_unregister_driver(driver_data: intel_pstate_driver); |
3193 | intel_pstate_sysfs_hide_hwp_dynamic_boost(); |
3194 | } |
3195 | |
3196 | return intel_pstate_register_driver(driver: &intel_cpufreq); |
3197 | } |
3198 | |
3199 | return -EINVAL; |
3200 | } |
3201 | |
3202 | static int no_load __initdata; |
3203 | static int no_hwp __initdata; |
3204 | static int hwp_only __initdata; |
3205 | static unsigned int force_load __initdata; |
3206 | |
3207 | static int __init intel_pstate_msrs_not_valid(void) |
3208 | { |
3209 | if (!pstate_funcs.get_max(0) || |
3210 | !pstate_funcs.get_min(0) || |
3211 | !pstate_funcs.get_turbo(0)) |
3212 | return -ENODEV; |
3213 | |
3214 | return 0; |
3215 | } |
3216 | |
3217 | static void __init copy_cpu_funcs(struct pstate_funcs *funcs) |
3218 | { |
3219 | pstate_funcs.get_max = funcs->get_max; |
3220 | pstate_funcs.get_max_physical = funcs->get_max_physical; |
3221 | pstate_funcs.get_min = funcs->get_min; |
3222 | pstate_funcs.get_turbo = funcs->get_turbo; |
3223 | pstate_funcs.get_scaling = funcs->get_scaling; |
3224 | pstate_funcs.get_val = funcs->get_val; |
3225 | pstate_funcs.get_vid = funcs->get_vid; |
3226 | pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift; |
3227 | } |
3228 | |
3229 | #ifdef CONFIG_ACPI |
3230 | |
3231 | static bool __init intel_pstate_no_acpi_pss(void) |
3232 | { |
3233 | int i; |
3234 | |
3235 | for_each_possible_cpu(i) { |
3236 | acpi_status status; |
3237 | union acpi_object *pss; |
3238 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
3239 | struct acpi_processor *pr = per_cpu(processors, i); |
3240 | |
3241 | if (!pr) |
3242 | continue; |
3243 | |
3244 | status = acpi_evaluate_object(object: pr->handle, pathname: "_PSS" , NULL, return_object_buffer: &buffer); |
3245 | if (ACPI_FAILURE(status)) |
3246 | continue; |
3247 | |
3248 | pss = buffer.pointer; |
3249 | if (pss && pss->type == ACPI_TYPE_PACKAGE) { |
3250 | kfree(objp: pss); |
3251 | return false; |
3252 | } |
3253 | |
3254 | kfree(objp: pss); |
3255 | } |
3256 | |
3257 | pr_debug("ACPI _PSS not found\n" ); |
3258 | return true; |
3259 | } |
3260 | |
3261 | static bool __init intel_pstate_no_acpi_pcch(void) |
3262 | { |
3263 | acpi_status status; |
3264 | acpi_handle handle; |
3265 | |
3266 | status = acpi_get_handle(NULL, pathname: "\\_SB" , ret_handle: &handle); |
3267 | if (ACPI_FAILURE(status)) |
3268 | goto not_found; |
3269 | |
3270 | if (acpi_has_method(handle, name: "PCCH" )) |
3271 | return false; |
3272 | |
3273 | not_found: |
3274 | pr_debug("ACPI PCCH not found\n" ); |
3275 | return true; |
3276 | } |
3277 | |
3278 | static bool __init intel_pstate_has_acpi_ppc(void) |
3279 | { |
3280 | int i; |
3281 | |
3282 | for_each_possible_cpu(i) { |
3283 | struct acpi_processor *pr = per_cpu(processors, i); |
3284 | |
3285 | if (!pr) |
3286 | continue; |
3287 | if (acpi_has_method(handle: pr->handle, name: "_PPC" )) |
3288 | return true; |
3289 | } |
3290 | pr_debug("ACPI _PPC not found\n" ); |
3291 | return false; |
3292 | } |
3293 | |
3294 | enum { |
3295 | PSS, |
3296 | PPC, |
3297 | }; |
3298 | |
3299 | /* Hardware vendor-specific info that has its own power management modes */ |
3300 | static struct acpi_platform_list plat_info[] __initdata = { |
3301 | {"HP " , "ProLiant" , 0, ACPI_SIG_FADT, all_versions, NULL, PSS}, |
3302 | {"ORACLE" , "X4-2 " , 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
3303 | {"ORACLE" , "X4-2L " , 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
3304 | {"ORACLE" , "X4-2B " , 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
3305 | {"ORACLE" , "X3-2 " , 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
3306 | {"ORACLE" , "X3-2L " , 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
3307 | {"ORACLE" , "X3-2B " , 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
3308 | {"ORACLE" , "X4470M2 " , 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
3309 | {"ORACLE" , "X4270M3 " , 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
3310 | {"ORACLE" , "X4270M2 " , 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
3311 | {"ORACLE" , "X4170M2 " , 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
3312 | {"ORACLE" , "X4170 M3" , 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
3313 | {"ORACLE" , "X4275 M3" , 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
3314 | {"ORACLE" , "X6-2 " , 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
3315 | {"ORACLE" , "Sudbury " , 0, ACPI_SIG_FADT, all_versions, NULL, PPC}, |
3316 | { } /* End */ |
3317 | }; |
3318 | |
3319 | #define BITMASK_OOB (BIT(8) | BIT(18)) |
3320 | |
3321 | static bool __init intel_pstate_platform_pwr_mgmt_exists(void) |
3322 | { |
3323 | const struct x86_cpu_id *id; |
3324 | u64 misc_pwr; |
3325 | int idx; |
3326 | |
3327 | id = x86_match_cpu(match: intel_pstate_cpu_oob_ids); |
3328 | if (id) { |
3329 | rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); |
3330 | if (misc_pwr & BITMASK_OOB) { |
3331 | pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n" ); |
3332 | pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n" ); |
3333 | return true; |
3334 | } |
3335 | } |
3336 | |
3337 | idx = acpi_match_platform_list(plat: plat_info); |
3338 | if (idx < 0) |
3339 | return false; |
3340 | |
3341 | switch (plat_info[idx].data) { |
3342 | case PSS: |
3343 | if (!intel_pstate_no_acpi_pss()) |
3344 | return false; |
3345 | |
3346 | return intel_pstate_no_acpi_pcch(); |
3347 | case PPC: |
3348 | return intel_pstate_has_acpi_ppc() && !force_load; |
3349 | } |
3350 | |
3351 | return false; |
3352 | } |
3353 | |
3354 | static void intel_pstate_request_control_from_smm(void) |
3355 | { |
3356 | /* |
3357 | * It may be unsafe to request P-states control from SMM if _PPC support |
3358 | * has not been enabled. |
3359 | */ |
3360 | if (acpi_ppc) |
3361 | acpi_processor_pstate_control(); |
3362 | } |
3363 | #else /* CONFIG_ACPI not enabled */ |
3364 | static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; } |
3365 | static inline bool intel_pstate_has_acpi_ppc(void) { return false; } |
3366 | static inline void intel_pstate_request_control_from_smm(void) {} |
3367 | #endif /* CONFIG_ACPI */ |
3368 | |
3369 | #define INTEL_PSTATE_HWP_BROADWELL 0x01 |
3370 | |
3371 | #define X86_MATCH_HWP(model, hwp_mode) \ |
3372 | X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \ |
3373 | X86_FEATURE_HWP, hwp_mode) |
3374 | |
3375 | static const struct x86_cpu_id hwp_support_ids[] __initconst = { |
3376 | X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL), |
3377 | X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL), |
3378 | X86_MATCH_HWP(ANY, 0), |
3379 | {} |
3380 | }; |
3381 | |
3382 | static bool intel_pstate_hwp_is_enabled(void) |
3383 | { |
3384 | u64 value; |
3385 | |
3386 | rdmsrl(MSR_PM_ENABLE, value); |
3387 | return !!(value & 0x1); |
3388 | } |
3389 | |
3390 | static const struct x86_cpu_id intel_epp_balance_perf[] = { |
3391 | /* |
3392 | * Set EPP value as 102, this is the max suggested EPP |
3393 | * which can result in one core turbo frequency for |
3394 | * AlderLake Mobile CPUs. |
3395 | */ |
3396 | X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102), |
3397 | X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 32), |
3398 | {} |
3399 | }; |
3400 | |
3401 | static int __init intel_pstate_init(void) |
3402 | { |
3403 | static struct cpudata **_all_cpu_data; |
3404 | const struct x86_cpu_id *id; |
3405 | int rc; |
3406 | |
3407 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) |
3408 | return -ENODEV; |
3409 | |
3410 | id = x86_match_cpu(match: hwp_support_ids); |
3411 | if (id) { |
3412 | hwp_forced = intel_pstate_hwp_is_enabled(); |
3413 | |
3414 | if (hwp_forced) |
3415 | pr_info("HWP enabled by BIOS\n" ); |
3416 | else if (no_load) |
3417 | return -ENODEV; |
3418 | |
3419 | copy_cpu_funcs(funcs: &core_funcs); |
3420 | /* |
3421 | * Avoid enabling HWP for processors without EPP support, |
3422 | * because that means incomplete HWP implementation which is a |
3423 | * corner case and supporting it is generally problematic. |
3424 | * |
3425 | * If HWP is enabled already, though, there is no choice but to |
3426 | * deal with it. |
3427 | */ |
3428 | if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { |
3429 | WRITE_ONCE(hwp_active, 1); |
3430 | hwp_mode_bdw = id->driver_data; |
3431 | intel_pstate.attr = hwp_cpufreq_attrs; |
3432 | intel_cpufreq.attr = hwp_cpufreq_attrs; |
3433 | intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS; |
3434 | intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf; |
3435 | if (!default_driver) |
3436 | default_driver = &intel_pstate; |
3437 | |
3438 | pstate_funcs.get_cpu_scaling = hwp_get_cpu_scaling; |
3439 | |
3440 | goto hwp_cpu_matched; |
3441 | } |
3442 | pr_info("HWP not enabled\n" ); |
3443 | } else { |
3444 | if (no_load) |
3445 | return -ENODEV; |
3446 | |
3447 | id = x86_match_cpu(match: intel_pstate_cpu_ids); |
3448 | if (!id) { |
3449 | pr_info("CPU model not supported\n" ); |
3450 | return -ENODEV; |
3451 | } |
3452 | |
3453 | copy_cpu_funcs(funcs: (struct pstate_funcs *)id->driver_data); |
3454 | } |
3455 | |
3456 | if (intel_pstate_msrs_not_valid()) { |
3457 | pr_info("Invalid MSRs\n" ); |
3458 | return -ENODEV; |
3459 | } |
3460 | /* Without HWP start in the passive mode. */ |
3461 | if (!default_driver) |
3462 | default_driver = &intel_cpufreq; |
3463 | |
3464 | hwp_cpu_matched: |
3465 | /* |
3466 | * The Intel pstate driver will be ignored if the platform |
3467 | * firmware has its own power management modes. |
3468 | */ |
3469 | if (intel_pstate_platform_pwr_mgmt_exists()) { |
3470 | pr_info("P-states controlled by the platform\n" ); |
3471 | return -ENODEV; |
3472 | } |
3473 | |
3474 | if (!hwp_active && hwp_only) |
3475 | return -ENOTSUPP; |
3476 | |
3477 | pr_info("Intel P-state driver initializing\n" ); |
3478 | |
3479 | _all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus())); |
3480 | if (!_all_cpu_data) |
3481 | return -ENOMEM; |
3482 | |
3483 | WRITE_ONCE(all_cpu_data, _all_cpu_data); |
3484 | |
3485 | intel_pstate_request_control_from_smm(); |
3486 | |
3487 | intel_pstate_sysfs_expose_params(); |
3488 | |
3489 | if (hwp_active) { |
3490 | const struct x86_cpu_id *id = x86_match_cpu(match: intel_epp_balance_perf); |
3491 | |
3492 | if (id) |
3493 | epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data; |
3494 | } |
3495 | |
3496 | mutex_lock(&intel_pstate_driver_lock); |
3497 | rc = intel_pstate_register_driver(driver: default_driver); |
3498 | mutex_unlock(lock: &intel_pstate_driver_lock); |
3499 | if (rc) { |
3500 | intel_pstate_sysfs_remove(); |
3501 | return rc; |
3502 | } |
3503 | |
3504 | if (hwp_active) { |
3505 | const struct x86_cpu_id *id; |
3506 | |
3507 | id = x86_match_cpu(match: intel_pstate_cpu_ee_disable_ids); |
3508 | if (id) { |
3509 | set_power_ctl_ee_state(false); |
3510 | pr_info("Disabling energy efficiency optimization\n" ); |
3511 | } |
3512 | |
3513 | pr_info("HWP enabled\n" ); |
3514 | } else if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) { |
3515 | pr_warn("Problematic setup: Hybrid processor with disabled HWP\n" ); |
3516 | } |
3517 | |
3518 | return 0; |
3519 | } |
3520 | device_initcall(intel_pstate_init); |
3521 | |
3522 | static int __init intel_pstate_setup(char *str) |
3523 | { |
3524 | if (!str) |
3525 | return -EINVAL; |
3526 | |
3527 | if (!strcmp(str, "disable" )) |
3528 | no_load = 1; |
3529 | else if (!strcmp(str, "active" )) |
3530 | default_driver = &intel_pstate; |
3531 | else if (!strcmp(str, "passive" )) |
3532 | default_driver = &intel_cpufreq; |
3533 | |
3534 | if (!strcmp(str, "no_hwp" )) |
3535 | no_hwp = 1; |
3536 | |
3537 | if (!strcmp(str, "force" )) |
3538 | force_load = 1; |
3539 | if (!strcmp(str, "hwp_only" )) |
3540 | hwp_only = 1; |
3541 | if (!strcmp(str, "per_cpu_perf_limits" )) |
3542 | per_cpu_limits = true; |
3543 | |
3544 | #ifdef CONFIG_ACPI |
3545 | if (!strcmp(str, "support_acpi_ppc" )) |
3546 | acpi_ppc = true; |
3547 | #endif |
3548 | |
3549 | return 0; |
3550 | } |
3551 | early_param("intel_pstate" , intel_pstate_setup); |
3552 | |
3553 | MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>" ); |
3554 | MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors" ); |
3555 | |