1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Thermal throttle event support code (such as syslog messaging and rate |
4 | * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c). |
5 | * |
6 | * This allows consistent reporting of CPU thermal throttle events. |
7 | * |
8 | * Maintains a counter in /sys that keeps track of the number of thermal |
9 | * events, such that the user knows how bad the thermal problem might be |
10 | * (since the logging to syslog is rate limited). |
11 | * |
12 | * Author: Dmitriy Zavin (dmitriyz@google.com) |
13 | * |
14 | * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c. |
15 | * Inspired by Ross Biro's and Al Borchers' counter code. |
16 | */ |
17 | #include <linux/interrupt.h> |
18 | #include <linux/notifier.h> |
19 | #include <linux/jiffies.h> |
20 | #include <linux/kernel.h> |
21 | #include <linux/percpu.h> |
22 | #include <linux/export.h> |
23 | #include <linux/types.h> |
24 | #include <linux/init.h> |
25 | #include <linux/smp.h> |
26 | #include <linux/cpu.h> |
27 | |
28 | #include <asm/processor.h> |
29 | #include <asm/thermal.h> |
30 | #include <asm/traps.h> |
31 | #include <asm/apic.h> |
32 | #include <asm/irq.h> |
33 | #include <asm/msr.h> |
34 | |
35 | #include "intel_hfi.h" |
36 | #include "thermal_interrupt.h" |
37 | |
38 | /* How long to wait between reporting thermal events */ |
39 | #define CHECK_INTERVAL (300 * HZ) |
40 | |
41 | #define THERMAL_THROTTLING_EVENT 0 |
42 | #define POWER_LIMIT_EVENT 1 |
43 | |
44 | /** |
45 | * struct _thermal_state - Represent the current thermal event state |
46 | * @next_check: Stores the next timestamp, when it is allowed |
47 | * to log the next warning message. |
48 | * @last_interrupt_time: Stores the timestamp for the last threshold |
49 | * high event. |
50 | * @therm_work: Delayed workqueue structure |
51 | * @count: Stores the current running count for thermal |
52 | * or power threshold interrupts. |
53 | * @last_count: Stores the previous running count for thermal |
54 | * or power threshold interrupts. |
55 | * @max_time_ms: This shows the maximum amount of time CPU was |
56 | * in throttled state for a single thermal |
57 | * threshold high to low state. |
58 | * @total_time_ms: This is a cumulative time during which CPU was |
59 | * in the throttled state. |
60 | * @rate_control_active: Set when a throttling message is logged. |
61 | * This is used for the purpose of rate-control. |
62 | * @new_event: Stores the last high/low status of the |
63 | * THERM_STATUS_PROCHOT or |
64 | * THERM_STATUS_POWER_LIMIT. |
65 | * @level: Stores whether this _thermal_state instance is |
66 | * for a CORE level or for PACKAGE level. |
67 | * @sample_index: Index for storing the next sample in the buffer |
68 | * temp_samples[]. |
69 | * @sample_count: Total number of samples collected in the buffer |
70 | * temp_samples[]. |
71 | * @average: The last moving average of temperature samples |
72 | * @baseline_temp: Temperature at which thermal threshold high |
73 | * interrupt was generated. |
74 | * @temp_samples: Storage for temperature samples to calculate |
75 | * moving average. |
76 | * |
77 | * This structure is used to represent data related to thermal state for a CPU. |
78 | * There is a separate storage for core and package level for each CPU. |
79 | */ |
80 | struct _thermal_state { |
81 | u64 next_check; |
82 | u64 last_interrupt_time; |
83 | struct delayed_work therm_work; |
84 | unsigned long count; |
85 | unsigned long last_count; |
86 | unsigned long max_time_ms; |
87 | unsigned long total_time_ms; |
88 | bool rate_control_active; |
89 | bool new_event; |
90 | u8 level; |
91 | u8 sample_index; |
92 | u8 sample_count; |
93 | u8 average; |
94 | u8 baseline_temp; |
95 | u8 temp_samples[3]; |
96 | }; |
97 | |
98 | struct thermal_state { |
99 | struct _thermal_state core_throttle; |
100 | struct _thermal_state core_power_limit; |
101 | struct _thermal_state package_throttle; |
102 | struct _thermal_state package_power_limit; |
103 | struct _thermal_state core_thresh0; |
104 | struct _thermal_state core_thresh1; |
105 | struct _thermal_state pkg_thresh0; |
106 | struct _thermal_state pkg_thresh1; |
107 | }; |
108 | |
109 | /* Callback to handle core threshold interrupts */ |
110 | int (*platform_thermal_notify)(__u64 msr_val); |
111 | EXPORT_SYMBOL(platform_thermal_notify); |
112 | |
113 | /* Callback to handle core package threshold_interrupts */ |
114 | int (*platform_thermal_package_notify)(__u64 msr_val); |
115 | EXPORT_SYMBOL_GPL(platform_thermal_package_notify); |
116 | |
117 | /* Callback support of rate control, return true, if |
118 | * callback has rate control */ |
119 | bool (*platform_thermal_package_rate_control)(void); |
120 | EXPORT_SYMBOL_GPL(platform_thermal_package_rate_control); |
121 | |
122 | |
123 | static DEFINE_PER_CPU(struct thermal_state, thermal_state); |
124 | |
125 | static atomic_t therm_throt_en = ATOMIC_INIT(0); |
126 | |
127 | static u32 lvtthmr_init __read_mostly; |
128 | |
129 | #ifdef CONFIG_SYSFS |
130 | #define define_therm_throt_device_one_ro(_name) \ |
131 | static DEVICE_ATTR(_name, 0444, \ |
132 | therm_throt_device_show_##_name, \ |
133 | NULL) \ |
134 | |
135 | #define define_therm_throt_device_show_func(event, name) \ |
136 | \ |
137 | static ssize_t therm_throt_device_show_##event##_##name( \ |
138 | struct device *dev, \ |
139 | struct device_attribute *attr, \ |
140 | char *buf) \ |
141 | { \ |
142 | unsigned int cpu = dev->id; \ |
143 | ssize_t ret; \ |
144 | \ |
145 | preempt_disable(); /* CPU hotplug */ \ |
146 | if (cpu_online(cpu)) { \ |
147 | ret = sprintf(buf, "%lu\n", \ |
148 | per_cpu(thermal_state, cpu).event.name); \ |
149 | } else \ |
150 | ret = 0; \ |
151 | preempt_enable(); \ |
152 | \ |
153 | return ret; \ |
154 | } |
155 | |
156 | define_therm_throt_device_show_func(core_throttle, count); |
157 | define_therm_throt_device_one_ro(core_throttle_count); |
158 | |
159 | define_therm_throt_device_show_func(core_power_limit, count); |
160 | define_therm_throt_device_one_ro(core_power_limit_count); |
161 | |
162 | define_therm_throt_device_show_func(package_throttle, count); |
163 | define_therm_throt_device_one_ro(package_throttle_count); |
164 | |
165 | define_therm_throt_device_show_func(package_power_limit, count); |
166 | define_therm_throt_device_one_ro(package_power_limit_count); |
167 | |
168 | define_therm_throt_device_show_func(core_throttle, max_time_ms); |
169 | define_therm_throt_device_one_ro(core_throttle_max_time_ms); |
170 | |
171 | define_therm_throt_device_show_func(package_throttle, max_time_ms); |
172 | define_therm_throt_device_one_ro(package_throttle_max_time_ms); |
173 | |
174 | define_therm_throt_device_show_func(core_throttle, total_time_ms); |
175 | define_therm_throt_device_one_ro(core_throttle_total_time_ms); |
176 | |
177 | define_therm_throt_device_show_func(package_throttle, total_time_ms); |
178 | define_therm_throt_device_one_ro(package_throttle_total_time_ms); |
179 | |
180 | static struct attribute *thermal_throttle_attrs[] = { |
181 | &dev_attr_core_throttle_count.attr, |
182 | &dev_attr_core_throttle_max_time_ms.attr, |
183 | &dev_attr_core_throttle_total_time_ms.attr, |
184 | NULL |
185 | }; |
186 | |
187 | static const struct attribute_group thermal_attr_group = { |
188 | .attrs = thermal_throttle_attrs, |
189 | .name = "thermal_throttle" |
190 | }; |
191 | #endif /* CONFIG_SYSFS */ |
192 | |
193 | #define THERM_THROT_POLL_INTERVAL HZ |
194 | #define THERM_STATUS_PROCHOT_LOG BIT(1) |
195 | |
196 | static u64 therm_intr_core_clear_mask; |
197 | static u64 therm_intr_pkg_clear_mask; |
198 | |
199 | static void thermal_intr_init_core_clear_mask(void) |
200 | { |
201 | if (therm_intr_core_clear_mask) |
202 | return; |
203 | |
204 | /* |
205 | * Reference: Intel SDM Volume 4 |
206 | * "Table 2-2. IA-32 Architectural MSRs", MSR 0x19C |
207 | * IA32_THERM_STATUS. |
208 | */ |
209 | |
210 | /* |
211 | * Bit 1, 3, 5: CPUID.01H:EDX[22] = 1. This driver will not |
212 | * enable interrupts, when 0 as it checks for X86_FEATURE_ACPI. |
213 | */ |
214 | therm_intr_core_clear_mask = (BIT(1) | BIT(3) | BIT(5)); |
215 | |
216 | /* |
217 | * Bit 7 and 9: Thermal Threshold #1 and #2 log |
218 | * If CPUID.01H:ECX[8] = 1 |
219 | */ |
220 | if (boot_cpu_has(X86_FEATURE_TM2)) |
221 | therm_intr_core_clear_mask |= (BIT(7) | BIT(9)); |
222 | |
223 | /* Bit 11: Power Limitation log (R/WC0) If CPUID.06H:EAX[4] = 1 */ |
224 | if (boot_cpu_has(X86_FEATURE_PLN)) |
225 | therm_intr_core_clear_mask |= BIT(11); |
226 | |
227 | /* |
228 | * Bit 13: Current Limit log (R/WC0) If CPUID.06H:EAX[7] = 1 |
229 | * Bit 15: Cross Domain Limit log (R/WC0) If CPUID.06H:EAX[7] = 1 |
230 | */ |
231 | if (boot_cpu_has(X86_FEATURE_HWP)) |
232 | therm_intr_core_clear_mask |= (BIT(13) | BIT(15)); |
233 | } |
234 | |
235 | static void thermal_intr_init_pkg_clear_mask(void) |
236 | { |
237 | if (therm_intr_pkg_clear_mask) |
238 | return; |
239 | |
240 | /* |
241 | * Reference: Intel SDM Volume 4 |
242 | * "Table 2-2. IA-32 Architectural MSRs", MSR 0x1B1 |
243 | * IA32_PACKAGE_THERM_STATUS. |
244 | */ |
245 | |
246 | /* All bits except BIT 26 depend on CPUID.06H: EAX[6] = 1 */ |
247 | if (boot_cpu_has(X86_FEATURE_PTS)) |
248 | therm_intr_pkg_clear_mask = (BIT(1) | BIT(3) | BIT(5) | BIT(7) | BIT(9) | BIT(11)); |
249 | |
250 | /* |
251 | * Intel SDM Volume 2A: Thermal and Power Management Leaf |
252 | * Bit 26: CPUID.06H: EAX[19] = 1 |
253 | */ |
254 | if (boot_cpu_has(X86_FEATURE_HFI)) |
255 | therm_intr_pkg_clear_mask |= BIT(26); |
256 | } |
257 | |
258 | /* |
259 | * Clear the bits in package thermal status register for bit = 1 |
260 | * in bitmask |
261 | */ |
262 | void thermal_clear_package_intr_status(int level, u64 bit_mask) |
263 | { |
264 | u64 msr_val; |
265 | int msr; |
266 | |
267 | if (level == CORE_LEVEL) { |
268 | msr = MSR_IA32_THERM_STATUS; |
269 | msr_val = therm_intr_core_clear_mask; |
270 | } else { |
271 | msr = MSR_IA32_PACKAGE_THERM_STATUS; |
272 | msr_val = therm_intr_pkg_clear_mask; |
273 | } |
274 | |
275 | msr_val &= ~bit_mask; |
276 | wrmsrl(msr, val: msr_val); |
277 | } |
278 | EXPORT_SYMBOL_GPL(thermal_clear_package_intr_status); |
279 | |
280 | static void get_therm_status(int level, bool *proc_hot, u8 *temp) |
281 | { |
282 | int msr; |
283 | u64 msr_val; |
284 | |
285 | if (level == CORE_LEVEL) |
286 | msr = MSR_IA32_THERM_STATUS; |
287 | else |
288 | msr = MSR_IA32_PACKAGE_THERM_STATUS; |
289 | |
290 | rdmsrl(msr, msr_val); |
291 | if (msr_val & THERM_STATUS_PROCHOT_LOG) |
292 | *proc_hot = true; |
293 | else |
294 | *proc_hot = false; |
295 | |
296 | *temp = (msr_val >> 16) & 0x7F; |
297 | } |
298 | |
299 | static void __maybe_unused throttle_active_work(struct work_struct *work) |
300 | { |
301 | struct _thermal_state *state = container_of(to_delayed_work(work), |
302 | struct _thermal_state, therm_work); |
303 | unsigned int i, avg, this_cpu = smp_processor_id(); |
304 | u64 now = get_jiffies_64(); |
305 | bool hot; |
306 | u8 temp; |
307 | |
308 | get_therm_status(level: state->level, proc_hot: &hot, temp: &temp); |
309 | /* temperature value is offset from the max so lesser means hotter */ |
310 | if (!hot && temp > state->baseline_temp) { |
311 | if (state->rate_control_active) |
312 | pr_info("CPU%d: %s temperature/speed normal (total events = %lu)\n" , |
313 | this_cpu, |
314 | state->level == CORE_LEVEL ? "Core" : "Package" , |
315 | state->count); |
316 | |
317 | state->rate_control_active = false; |
318 | return; |
319 | } |
320 | |
321 | if (time_before64(now, state->next_check) && |
322 | state->rate_control_active) |
323 | goto re_arm; |
324 | |
325 | state->next_check = now + CHECK_INTERVAL; |
326 | |
327 | if (state->count != state->last_count) { |
328 | /* There was one new thermal interrupt */ |
329 | state->last_count = state->count; |
330 | state->average = 0; |
331 | state->sample_count = 0; |
332 | state->sample_index = 0; |
333 | } |
334 | |
335 | state->temp_samples[state->sample_index] = temp; |
336 | state->sample_count++; |
337 | state->sample_index = (state->sample_index + 1) % ARRAY_SIZE(state->temp_samples); |
338 | if (state->sample_count < ARRAY_SIZE(state->temp_samples)) |
339 | goto re_arm; |
340 | |
341 | avg = 0; |
342 | for (i = 0; i < ARRAY_SIZE(state->temp_samples); ++i) |
343 | avg += state->temp_samples[i]; |
344 | |
345 | avg /= ARRAY_SIZE(state->temp_samples); |
346 | |
347 | if (state->average > avg) { |
348 | pr_warn("CPU%d: %s temperature is above threshold, cpu clock is throttled (total events = %lu)\n" , |
349 | this_cpu, |
350 | state->level == CORE_LEVEL ? "Core" : "Package" , |
351 | state->count); |
352 | state->rate_control_active = true; |
353 | } |
354 | |
355 | state->average = avg; |
356 | |
357 | re_arm: |
358 | thermal_clear_package_intr_status(state->level, THERM_STATUS_PROCHOT_LOG); |
359 | schedule_delayed_work_on(cpu: this_cpu, dwork: &state->therm_work, THERM_THROT_POLL_INTERVAL); |
360 | } |
361 | |
362 | /*** |
363 | * therm_throt_process - Process thermal throttling event from interrupt |
364 | * @curr: Whether the condition is current or not (boolean), since the |
365 | * thermal interrupt normally gets called both when the thermal |
366 | * event begins and once the event has ended. |
367 | * |
368 | * This function is called by the thermal interrupt after the |
369 | * IRQ has been acknowledged. |
370 | * |
371 | * It will take care of rate limiting and printing messages to the syslog. |
372 | */ |
373 | static void therm_throt_process(bool new_event, int event, int level) |
374 | { |
375 | struct _thermal_state *state; |
376 | unsigned int this_cpu = smp_processor_id(); |
377 | bool old_event; |
378 | u64 now; |
379 | struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); |
380 | |
381 | now = get_jiffies_64(); |
382 | if (level == CORE_LEVEL) { |
383 | if (event == THERMAL_THROTTLING_EVENT) |
384 | state = &pstate->core_throttle; |
385 | else if (event == POWER_LIMIT_EVENT) |
386 | state = &pstate->core_power_limit; |
387 | else |
388 | return; |
389 | } else if (level == PACKAGE_LEVEL) { |
390 | if (event == THERMAL_THROTTLING_EVENT) |
391 | state = &pstate->package_throttle; |
392 | else if (event == POWER_LIMIT_EVENT) |
393 | state = &pstate->package_power_limit; |
394 | else |
395 | return; |
396 | } else |
397 | return; |
398 | |
399 | old_event = state->new_event; |
400 | state->new_event = new_event; |
401 | |
402 | if (new_event) |
403 | state->count++; |
404 | |
405 | if (event != THERMAL_THROTTLING_EVENT) |
406 | return; |
407 | |
408 | if (new_event && !state->last_interrupt_time) { |
409 | bool hot; |
410 | u8 temp; |
411 | |
412 | get_therm_status(level: state->level, proc_hot: &hot, temp: &temp); |
413 | /* |
414 | * Ignore short temperature spike as the system is not close |
415 | * to PROCHOT. 10C offset is large enough to ignore. It is |
416 | * already dropped from the high threshold temperature. |
417 | */ |
418 | if (temp > 10) |
419 | return; |
420 | |
421 | state->baseline_temp = temp; |
422 | state->last_interrupt_time = now; |
423 | schedule_delayed_work_on(cpu: this_cpu, dwork: &state->therm_work, THERM_THROT_POLL_INTERVAL); |
424 | } else if (old_event && state->last_interrupt_time) { |
425 | unsigned long throttle_time; |
426 | |
427 | throttle_time = jiffies_delta_to_msecs(delta: now - state->last_interrupt_time); |
428 | if (throttle_time > state->max_time_ms) |
429 | state->max_time_ms = throttle_time; |
430 | state->total_time_ms += throttle_time; |
431 | state->last_interrupt_time = 0; |
432 | } |
433 | } |
434 | |
435 | static int thresh_event_valid(int level, int event) |
436 | { |
437 | struct _thermal_state *state; |
438 | unsigned int this_cpu = smp_processor_id(); |
439 | struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); |
440 | u64 now = get_jiffies_64(); |
441 | |
442 | if (level == PACKAGE_LEVEL) |
443 | state = (event == 0) ? &pstate->pkg_thresh0 : |
444 | &pstate->pkg_thresh1; |
445 | else |
446 | state = (event == 0) ? &pstate->core_thresh0 : |
447 | &pstate->core_thresh1; |
448 | |
449 | if (time_before64(now, state->next_check)) |
450 | return 0; |
451 | |
452 | state->next_check = now + CHECK_INTERVAL; |
453 | |
454 | return 1; |
455 | } |
456 | |
457 | static bool int_pln_enable; |
458 | static int __init int_pln_enable_setup(char *s) |
459 | { |
460 | int_pln_enable = true; |
461 | |
462 | return 1; |
463 | } |
464 | __setup("int_pln_enable" , int_pln_enable_setup); |
465 | |
466 | #ifdef CONFIG_SYSFS |
467 | /* Add/Remove thermal_throttle interface for CPU device: */ |
468 | static int thermal_throttle_add_dev(struct device *dev, unsigned int cpu) |
469 | { |
470 | int err; |
471 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
472 | |
473 | err = sysfs_create_group(kobj: &dev->kobj, grp: &thermal_attr_group); |
474 | if (err) |
475 | return err; |
476 | |
477 | if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) { |
478 | err = sysfs_add_file_to_group(kobj: &dev->kobj, |
479 | attr: &dev_attr_core_power_limit_count.attr, |
480 | group: thermal_attr_group.name); |
481 | if (err) |
482 | goto del_group; |
483 | } |
484 | |
485 | if (cpu_has(c, X86_FEATURE_PTS)) { |
486 | err = sysfs_add_file_to_group(kobj: &dev->kobj, |
487 | attr: &dev_attr_package_throttle_count.attr, |
488 | group: thermal_attr_group.name); |
489 | if (err) |
490 | goto del_group; |
491 | |
492 | err = sysfs_add_file_to_group(kobj: &dev->kobj, |
493 | attr: &dev_attr_package_throttle_max_time_ms.attr, |
494 | group: thermal_attr_group.name); |
495 | if (err) |
496 | goto del_group; |
497 | |
498 | err = sysfs_add_file_to_group(kobj: &dev->kobj, |
499 | attr: &dev_attr_package_throttle_total_time_ms.attr, |
500 | group: thermal_attr_group.name); |
501 | if (err) |
502 | goto del_group; |
503 | |
504 | if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) { |
505 | err = sysfs_add_file_to_group(kobj: &dev->kobj, |
506 | attr: &dev_attr_package_power_limit_count.attr, |
507 | group: thermal_attr_group.name); |
508 | if (err) |
509 | goto del_group; |
510 | } |
511 | } |
512 | |
513 | return 0; |
514 | |
515 | del_group: |
516 | sysfs_remove_group(kobj: &dev->kobj, grp: &thermal_attr_group); |
517 | |
518 | return err; |
519 | } |
520 | |
521 | static void thermal_throttle_remove_dev(struct device *dev) |
522 | { |
523 | sysfs_remove_group(kobj: &dev->kobj, grp: &thermal_attr_group); |
524 | } |
525 | |
526 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
527 | static int thermal_throttle_online(unsigned int cpu) |
528 | { |
529 | struct thermal_state *state = &per_cpu(thermal_state, cpu); |
530 | struct device *dev = get_cpu_device(cpu); |
531 | u32 l; |
532 | |
533 | state->package_throttle.level = PACKAGE_LEVEL; |
534 | state->core_throttle.level = CORE_LEVEL; |
535 | |
536 | INIT_DELAYED_WORK(&state->package_throttle.therm_work, throttle_active_work); |
537 | INIT_DELAYED_WORK(&state->core_throttle.therm_work, throttle_active_work); |
538 | |
539 | /* |
540 | * The first CPU coming online will enable the HFI. Usually this causes |
541 | * hardware to issue an HFI thermal interrupt. Such interrupt will reach |
542 | * the CPU once we enable the thermal vector in the local APIC. |
543 | */ |
544 | intel_hfi_online(cpu); |
545 | |
546 | /* Unmask the thermal vector after the above workqueues are initialized. */ |
547 | l = apic_read(APIC_LVTTHMR); |
548 | apic_write(APIC_LVTTHMR, val: l & ~APIC_LVT_MASKED); |
549 | |
550 | return thermal_throttle_add_dev(dev, cpu); |
551 | } |
552 | |
553 | static int thermal_throttle_offline(unsigned int cpu) |
554 | { |
555 | struct thermal_state *state = &per_cpu(thermal_state, cpu); |
556 | struct device *dev = get_cpu_device(cpu); |
557 | u32 l; |
558 | |
559 | /* Mask the thermal vector before draining evtl. pending work */ |
560 | l = apic_read(APIC_LVTTHMR); |
561 | apic_write(APIC_LVTTHMR, val: l | APIC_LVT_MASKED); |
562 | |
563 | intel_hfi_offline(cpu); |
564 | |
565 | cancel_delayed_work_sync(dwork: &state->package_throttle.therm_work); |
566 | cancel_delayed_work_sync(dwork: &state->core_throttle.therm_work); |
567 | |
568 | state->package_throttle.rate_control_active = false; |
569 | state->core_throttle.rate_control_active = false; |
570 | |
571 | thermal_throttle_remove_dev(dev); |
572 | return 0; |
573 | } |
574 | |
575 | static __init int thermal_throttle_init_device(void) |
576 | { |
577 | int ret; |
578 | |
579 | if (!atomic_read(v: &therm_throt_en)) |
580 | return 0; |
581 | |
582 | intel_hfi_init(); |
583 | |
584 | ret = cpuhp_setup_state(state: CPUHP_AP_ONLINE_DYN, name: "x86/therm:online" , |
585 | startup: thermal_throttle_online, |
586 | teardown: thermal_throttle_offline); |
587 | return ret < 0 ? ret : 0; |
588 | } |
589 | device_initcall(thermal_throttle_init_device); |
590 | |
591 | #endif /* CONFIG_SYSFS */ |
592 | |
593 | static void notify_package_thresholds(__u64 msr_val) |
594 | { |
595 | bool notify_thres_0 = false; |
596 | bool notify_thres_1 = false; |
597 | |
598 | if (!platform_thermal_package_notify) |
599 | return; |
600 | |
601 | /* lower threshold check */ |
602 | if (msr_val & THERM_LOG_THRESHOLD0) |
603 | notify_thres_0 = true; |
604 | /* higher threshold check */ |
605 | if (msr_val & THERM_LOG_THRESHOLD1) |
606 | notify_thres_1 = true; |
607 | |
608 | if (!notify_thres_0 && !notify_thres_1) |
609 | return; |
610 | |
611 | if (platform_thermal_package_rate_control && |
612 | platform_thermal_package_rate_control()) { |
613 | /* Rate control is implemented in callback */ |
614 | platform_thermal_package_notify(msr_val); |
615 | return; |
616 | } |
617 | |
618 | /* lower threshold reached */ |
619 | if (notify_thres_0 && thresh_event_valid(PACKAGE_LEVEL, event: 0)) |
620 | platform_thermal_package_notify(msr_val); |
621 | /* higher threshold reached */ |
622 | if (notify_thres_1 && thresh_event_valid(PACKAGE_LEVEL, event: 1)) |
623 | platform_thermal_package_notify(msr_val); |
624 | } |
625 | |
626 | static void notify_thresholds(__u64 msr_val) |
627 | { |
628 | /* check whether the interrupt handler is defined; |
629 | * otherwise simply return |
630 | */ |
631 | if (!platform_thermal_notify) |
632 | return; |
633 | |
634 | /* lower threshold reached */ |
635 | if ((msr_val & THERM_LOG_THRESHOLD0) && |
636 | thresh_event_valid(CORE_LEVEL, event: 0)) |
637 | platform_thermal_notify(msr_val); |
638 | /* higher threshold reached */ |
639 | if ((msr_val & THERM_LOG_THRESHOLD1) && |
640 | thresh_event_valid(CORE_LEVEL, event: 1)) |
641 | platform_thermal_notify(msr_val); |
642 | } |
643 | |
644 | void __weak notify_hwp_interrupt(void) |
645 | { |
646 | wrmsrl_safe(MSR_HWP_STATUS, val: 0); |
647 | } |
648 | |
649 | /* Thermal transition interrupt handler */ |
650 | void intel_thermal_interrupt(void) |
651 | { |
652 | __u64 msr_val; |
653 | |
654 | if (static_cpu_has(X86_FEATURE_HWP)) |
655 | notify_hwp_interrupt(); |
656 | |
657 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); |
658 | |
659 | /* Check for violation of core thermal thresholds*/ |
660 | notify_thresholds(msr_val); |
661 | |
662 | therm_throt_process(new_event: msr_val & THERM_STATUS_PROCHOT, |
663 | THERMAL_THROTTLING_EVENT, |
664 | CORE_LEVEL); |
665 | |
666 | if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable) |
667 | therm_throt_process(new_event: msr_val & THERM_STATUS_POWER_LIMIT, |
668 | POWER_LIMIT_EVENT, |
669 | CORE_LEVEL); |
670 | |
671 | if (this_cpu_has(X86_FEATURE_PTS)) { |
672 | rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); |
673 | /* check violations of package thermal thresholds */ |
674 | notify_package_thresholds(msr_val); |
675 | therm_throt_process(new_event: msr_val & PACKAGE_THERM_STATUS_PROCHOT, |
676 | THERMAL_THROTTLING_EVENT, |
677 | PACKAGE_LEVEL); |
678 | if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable) |
679 | therm_throt_process(new_event: msr_val & |
680 | PACKAGE_THERM_STATUS_POWER_LIMIT, |
681 | POWER_LIMIT_EVENT, |
682 | PACKAGE_LEVEL); |
683 | |
684 | if (this_cpu_has(X86_FEATURE_HFI)) |
685 | intel_hfi_process_event(pkg_therm_status_msr_val: msr_val & |
686 | PACKAGE_THERM_STATUS_HFI_UPDATED); |
687 | } |
688 | } |
689 | |
690 | /* Thermal monitoring depends on APIC, ACPI and clock modulation */ |
691 | static int intel_thermal_supported(struct cpuinfo_x86 *c) |
692 | { |
693 | if (!boot_cpu_has(X86_FEATURE_APIC)) |
694 | return 0; |
695 | if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC)) |
696 | return 0; |
697 | return 1; |
698 | } |
699 | |
700 | bool x86_thermal_enabled(void) |
701 | { |
702 | return atomic_read(v: &therm_throt_en); |
703 | } |
704 | |
705 | void __init therm_lvt_init(void) |
706 | { |
707 | /* |
708 | * This function is only called on boot CPU. Save the init thermal |
709 | * LVT value on BSP and use that value to restore APs' thermal LVT |
710 | * entry BIOS programmed later |
711 | */ |
712 | if (intel_thermal_supported(c: &boot_cpu_data)) |
713 | lvtthmr_init = apic_read(APIC_LVTTHMR); |
714 | } |
715 | |
716 | void intel_init_thermal(struct cpuinfo_x86 *c) |
717 | { |
718 | unsigned int cpu = smp_processor_id(); |
719 | int tm2 = 0; |
720 | u32 l, h; |
721 | |
722 | if (!intel_thermal_supported(c)) |
723 | return; |
724 | |
725 | /* |
726 | * First check if its enabled already, in which case there might |
727 | * be some SMM goo which handles it, so we can't even put a handler |
728 | * since it might be delivered via SMI already: |
729 | */ |
730 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
731 | |
732 | h = lvtthmr_init; |
733 | /* |
734 | * The initial value of thermal LVT entries on all APs always reads |
735 | * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI |
736 | * sequence to them and LVT registers are reset to 0s except for |
737 | * the mask bits which are set to 1s when APs receive INIT IPI. |
738 | * If BIOS takes over the thermal interrupt and sets its interrupt |
739 | * delivery mode to SMI (not fixed), it restores the value that the |
740 | * BIOS has programmed on AP based on BSP's info we saved since BIOS |
741 | * is always setting the same value for all threads/cores. |
742 | */ |
743 | if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED) |
744 | apic_write(APIC_LVTTHMR, val: lvtthmr_init); |
745 | |
746 | |
747 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { |
748 | if (system_state == SYSTEM_BOOTING) |
749 | pr_debug("CPU%d: Thermal monitoring handled by SMI\n" , cpu); |
750 | return; |
751 | } |
752 | |
753 | /* early Pentium M models use different method for enabling TM2 */ |
754 | if (cpu_has(c, X86_FEATURE_TM2)) { |
755 | if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) { |
756 | rdmsr(MSR_THERM2_CTL, l, h); |
757 | if (l & MSR_THERM2_CTL_TM_SELECT) |
758 | tm2 = 1; |
759 | } else if (l & MSR_IA32_MISC_ENABLE_TM2) |
760 | tm2 = 1; |
761 | } |
762 | |
763 | /* We'll mask the thermal vector in the lapic till we're ready: */ |
764 | h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED; |
765 | apic_write(APIC_LVTTHMR, val: h); |
766 | |
767 | thermal_intr_init_core_clear_mask(); |
768 | thermal_intr_init_pkg_clear_mask(); |
769 | |
770 | rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); |
771 | if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable) |
772 | wrmsr(MSR_IA32_THERM_INTERRUPT, |
773 | (l | (THERM_INT_LOW_ENABLE |
774 | | THERM_INT_HIGH_ENABLE)) & ~THERM_INT_PLN_ENABLE, h); |
775 | else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) |
776 | wrmsr(MSR_IA32_THERM_INTERRUPT, |
777 | l | (THERM_INT_LOW_ENABLE |
778 | | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h); |
779 | else |
780 | wrmsr(MSR_IA32_THERM_INTERRUPT, |
781 | l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h); |
782 | |
783 | if (cpu_has(c, X86_FEATURE_PTS)) { |
784 | rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); |
785 | if (cpu_has(c, X86_FEATURE_PLN) && !int_pln_enable) |
786 | wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, |
787 | (l | (PACKAGE_THERM_INT_LOW_ENABLE |
788 | | PACKAGE_THERM_INT_HIGH_ENABLE)) |
789 | & ~PACKAGE_THERM_INT_PLN_ENABLE, h); |
790 | else if (cpu_has(c, X86_FEATURE_PLN) && int_pln_enable) |
791 | wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, |
792 | l | (PACKAGE_THERM_INT_LOW_ENABLE |
793 | | PACKAGE_THERM_INT_HIGH_ENABLE |
794 | | PACKAGE_THERM_INT_PLN_ENABLE), h); |
795 | else |
796 | wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, |
797 | l | (PACKAGE_THERM_INT_LOW_ENABLE |
798 | | PACKAGE_THERM_INT_HIGH_ENABLE), h); |
799 | |
800 | if (cpu_has(c, X86_FEATURE_HFI)) { |
801 | rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); |
802 | wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, |
803 | l | PACKAGE_THERM_INT_HFI_ENABLE, h); |
804 | } |
805 | } |
806 | |
807 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
808 | wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); |
809 | |
810 | pr_info_once("CPU0: Thermal monitoring enabled (%s)\n" , |
811 | tm2 ? "TM2" : "TM1" ); |
812 | |
813 | /* enable thermal throttle processing */ |
814 | atomic_set(v: &therm_throt_en, i: 1); |
815 | } |
816 | |