1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * processor_idle - idle state submodule to the ACPI processor driver |
4 | * |
5 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> |
6 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
7 | * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> |
8 | * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
9 | * - Added processor hotplug support |
10 | * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> |
11 | * - Added support for C3 on SMP |
12 | */ |
13 | #define pr_fmt(fmt) "ACPI: " fmt |
14 | |
15 | #include <linux/module.h> |
16 | #include <linux/acpi.h> |
17 | #include <linux/dmi.h> |
18 | #include <linux/sched.h> /* need_resched() */ |
19 | #include <linux/sort.h> |
20 | #include <linux/tick.h> |
21 | #include <linux/cpuidle.h> |
22 | #include <linux/cpu.h> |
23 | #include <linux/minmax.h> |
24 | #include <linux/perf_event.h> |
25 | #include <acpi/processor.h> |
26 | #include <linux/context_tracking.h> |
27 | |
28 | /* |
29 | * Include the apic definitions for x86 to have the APIC timer related defines |
30 | * available also for UP (on SMP it gets magically included via linux/smp.h). |
31 | * asm/acpi.h is not an option, as it would require more include magic. Also |
32 | * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. |
33 | */ |
34 | #ifdef CONFIG_X86 |
35 | #include <asm/apic.h> |
36 | #include <asm/cpu.h> |
37 | #endif |
38 | |
39 | #define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0) |
40 | |
41 | static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; |
42 | module_param(max_cstate, uint, 0400); |
43 | static bool nocst __read_mostly; |
44 | module_param(nocst, bool, 0400); |
45 | static bool bm_check_disable __read_mostly; |
46 | module_param(bm_check_disable, bool, 0400); |
47 | |
48 | static unsigned int latency_factor __read_mostly = 2; |
49 | module_param(latency_factor, uint, 0644); |
50 | |
51 | static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); |
52 | |
53 | struct cpuidle_driver acpi_idle_driver = { |
54 | .name = "acpi_idle" , |
55 | .owner = THIS_MODULE, |
56 | }; |
57 | |
58 | #ifdef CONFIG_ACPI_PROCESSOR_CSTATE |
59 | static |
60 | DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate); |
61 | |
62 | static int disabled_by_idle_boot_param(void) |
63 | { |
64 | return boot_option_idle_override == IDLE_POLL || |
65 | boot_option_idle_override == IDLE_HALT; |
66 | } |
67 | |
68 | /* |
69 | * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. |
70 | * For now disable this. Probably a bug somewhere else. |
71 | * |
72 | * To skip this limit, boot/load with a large max_cstate limit. |
73 | */ |
74 | static int set_max_cstate(const struct dmi_system_id *id) |
75 | { |
76 | if (max_cstate > ACPI_PROCESSOR_MAX_POWER) |
77 | return 0; |
78 | |
79 | pr_notice("%s detected - limiting to C%ld max_cstate." |
80 | " Override with \"processor.max_cstate=%d\"\n" , id->ident, |
81 | (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); |
82 | |
83 | max_cstate = (long)id->driver_data; |
84 | |
85 | return 0; |
86 | } |
87 | |
88 | static const struct dmi_system_id processor_power_dmi_table[] = { |
89 | { set_max_cstate, "Clevo 5600D" , { |
90 | DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD" ), |
91 | DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307" )}, |
92 | (void *)2}, |
93 | { set_max_cstate, "Pavilion zv5000" , { |
94 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard" ), |
95 | DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)" )}, |
96 | (void *)1}, |
97 | { set_max_cstate, "Asus L8400B" , { |
98 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc." ), |
99 | DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC" )}, |
100 | (void *)1}, |
101 | {}, |
102 | }; |
103 | |
104 | |
105 | /* |
106 | * Callers should disable interrupts before the call and enable |
107 | * interrupts after return. |
108 | */ |
109 | static void __cpuidle acpi_safe_halt(void) |
110 | { |
111 | if (!tif_need_resched()) { |
112 | raw_safe_halt(); |
113 | raw_local_irq_disable(); |
114 | } |
115 | } |
116 | |
117 | #ifdef ARCH_APICTIMER_STOPS_ON_C3 |
118 | |
119 | /* |
120 | * Some BIOS implementations switch to C3 in the published C2 state. |
121 | * This seems to be a common problem on AMD boxen, but other vendors |
122 | * are affected too. We pick the most conservative approach: we assume |
123 | * that the local APIC stops in both C2 and C3. |
124 | */ |
125 | static void lapic_timer_check_state(int state, struct acpi_processor *pr, |
126 | struct acpi_processor_cx *cx) |
127 | { |
128 | struct acpi_processor_power *pwr = &pr->power; |
129 | u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; |
130 | |
131 | if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) |
132 | return; |
133 | |
134 | if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) |
135 | type = ACPI_STATE_C1; |
136 | |
137 | /* |
138 | * Check, if one of the previous states already marked the lapic |
139 | * unstable |
140 | */ |
141 | if (pwr->timer_broadcast_on_state < state) |
142 | return; |
143 | |
144 | if (cx->type >= type) |
145 | pr->power.timer_broadcast_on_state = state; |
146 | } |
147 | |
148 | static void __lapic_timer_propagate_broadcast(void *arg) |
149 | { |
150 | struct acpi_processor *pr = arg; |
151 | |
152 | if (pr->power.timer_broadcast_on_state < INT_MAX) |
153 | tick_broadcast_enable(); |
154 | else |
155 | tick_broadcast_disable(); |
156 | } |
157 | |
158 | static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) |
159 | { |
160 | smp_call_function_single(cpuid: pr->id, func: __lapic_timer_propagate_broadcast, |
161 | info: (void *)pr, wait: 1); |
162 | } |
163 | |
164 | /* Power(C) State timer broadcast control */ |
165 | static bool lapic_timer_needs_broadcast(struct acpi_processor *pr, |
166 | struct acpi_processor_cx *cx) |
167 | { |
168 | return cx - pr->power.states >= pr->power.timer_broadcast_on_state; |
169 | } |
170 | |
171 | #else |
172 | |
173 | static void lapic_timer_check_state(int state, struct acpi_processor *pr, |
174 | struct acpi_processor_cx *cstate) { } |
175 | static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } |
176 | |
177 | static bool lapic_timer_needs_broadcast(struct acpi_processor *pr, |
178 | struct acpi_processor_cx *cx) |
179 | { |
180 | return false; |
181 | } |
182 | |
183 | #endif |
184 | |
185 | #if defined(CONFIG_X86) |
186 | static void tsc_check_state(int state) |
187 | { |
188 | switch (boot_cpu_data.x86_vendor) { |
189 | case X86_VENDOR_HYGON: |
190 | case X86_VENDOR_AMD: |
191 | case X86_VENDOR_INTEL: |
192 | case X86_VENDOR_CENTAUR: |
193 | case X86_VENDOR_ZHAOXIN: |
194 | /* |
195 | * AMD Fam10h TSC will tick in all |
196 | * C/P/S0/S1 states when this bit is set. |
197 | */ |
198 | if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) |
199 | return; |
200 | fallthrough; |
201 | default: |
202 | /* TSC could halt in idle, so notify users */ |
203 | if (state > ACPI_STATE_C1) |
204 | mark_tsc_unstable(reason: "TSC halts in idle" ); |
205 | } |
206 | } |
207 | #else |
208 | static void tsc_check_state(int state) { return; } |
209 | #endif |
210 | |
211 | static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) |
212 | { |
213 | |
214 | if (!pr->pblk) |
215 | return -ENODEV; |
216 | |
217 | /* if info is obtained from pblk/fadt, type equals state */ |
218 | pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; |
219 | pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; |
220 | |
221 | #ifndef CONFIG_HOTPLUG_CPU |
222 | /* |
223 | * Check for P_LVL2_UP flag before entering C2 and above on |
224 | * an SMP system. |
225 | */ |
226 | if ((num_online_cpus() > 1) && |
227 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) |
228 | return -ENODEV; |
229 | #endif |
230 | |
231 | /* determine C2 and C3 address from pblk */ |
232 | pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; |
233 | pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; |
234 | |
235 | /* determine latencies from FADT */ |
236 | pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency; |
237 | pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency; |
238 | |
239 | /* |
240 | * FADT specified C2 latency must be less than or equal to |
241 | * 100 microseconds. |
242 | */ |
243 | if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { |
244 | acpi_handle_debug(pr->handle, "C2 latency too large [%d]\n" , |
245 | acpi_gbl_FADT.c2_latency); |
246 | /* invalidate C2 */ |
247 | pr->power.states[ACPI_STATE_C2].address = 0; |
248 | } |
249 | |
250 | /* |
251 | * FADT supplied C3 latency must be less than or equal to |
252 | * 1000 microseconds. |
253 | */ |
254 | if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { |
255 | acpi_handle_debug(pr->handle, "C3 latency too large [%d]\n" , |
256 | acpi_gbl_FADT.c3_latency); |
257 | /* invalidate C3 */ |
258 | pr->power.states[ACPI_STATE_C3].address = 0; |
259 | } |
260 | |
261 | acpi_handle_debug(pr->handle, "lvl2[0x%08x] lvl3[0x%08x]\n" , |
262 | pr->power.states[ACPI_STATE_C2].address, |
263 | pr->power.states[ACPI_STATE_C3].address); |
264 | |
265 | snprintf(buf: pr->power.states[ACPI_STATE_C2].desc, |
266 | ACPI_CX_DESC_LEN, fmt: "ACPI P_LVL2 IOPORT 0x%x" , |
267 | pr->power.states[ACPI_STATE_C2].address); |
268 | snprintf(buf: pr->power.states[ACPI_STATE_C3].desc, |
269 | ACPI_CX_DESC_LEN, fmt: "ACPI P_LVL3 IOPORT 0x%x" , |
270 | pr->power.states[ACPI_STATE_C3].address); |
271 | |
272 | return 0; |
273 | } |
274 | |
275 | static int acpi_processor_get_power_info_default(struct acpi_processor *pr) |
276 | { |
277 | if (!pr->power.states[ACPI_STATE_C1].valid) { |
278 | /* set the first C-State to C1 */ |
279 | /* all processors need to support C1 */ |
280 | pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; |
281 | pr->power.states[ACPI_STATE_C1].valid = 1; |
282 | pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT; |
283 | |
284 | snprintf(buf: pr->power.states[ACPI_STATE_C1].desc, |
285 | ACPI_CX_DESC_LEN, fmt: "ACPI HLT" ); |
286 | } |
287 | /* the C0 state only exists as a filler in our array */ |
288 | pr->power.states[ACPI_STATE_C0].valid = 1; |
289 | return 0; |
290 | } |
291 | |
292 | static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) |
293 | { |
294 | int ret; |
295 | |
296 | if (nocst) |
297 | return -ENODEV; |
298 | |
299 | ret = acpi_processor_evaluate_cst(handle: pr->handle, cpu: pr->id, info: &pr->power); |
300 | if (ret) |
301 | return ret; |
302 | |
303 | if (!pr->power.count) |
304 | return -EFAULT; |
305 | |
306 | pr->flags.has_cst = 1; |
307 | return 0; |
308 | } |
309 | |
310 | static void acpi_processor_power_verify_c3(struct acpi_processor *pr, |
311 | struct acpi_processor_cx *cx) |
312 | { |
313 | static int bm_check_flag = -1; |
314 | static int bm_control_flag = -1; |
315 | |
316 | |
317 | if (!cx->address) |
318 | return; |
319 | |
320 | /* |
321 | * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) |
322 | * DMA transfers are used by any ISA device to avoid livelock. |
323 | * Note that we could disable Type-F DMA (as recommended by |
324 | * the erratum), but this is known to disrupt certain ISA |
325 | * devices thus we take the conservative approach. |
326 | */ |
327 | if (errata.piix4.fdma) { |
328 | acpi_handle_debug(pr->handle, |
329 | "C3 not supported on PIIX4 with Type-F DMA\n" ); |
330 | return; |
331 | } |
332 | |
333 | /* All the logic here assumes flags.bm_check is same across all CPUs */ |
334 | if (bm_check_flag == -1) { |
335 | /* Determine whether bm_check is needed based on CPU */ |
336 | acpi_processor_power_init_bm_check(flags: &(pr->flags), cpu: pr->id); |
337 | bm_check_flag = pr->flags.bm_check; |
338 | bm_control_flag = pr->flags.bm_control; |
339 | } else { |
340 | pr->flags.bm_check = bm_check_flag; |
341 | pr->flags.bm_control = bm_control_flag; |
342 | } |
343 | |
344 | if (pr->flags.bm_check) { |
345 | if (!pr->flags.bm_control) { |
346 | if (pr->flags.has_cst != 1) { |
347 | /* bus mastering control is necessary */ |
348 | acpi_handle_debug(pr->handle, |
349 | "C3 support requires BM control\n" ); |
350 | return; |
351 | } else { |
352 | /* Here we enter C3 without bus mastering */ |
353 | acpi_handle_debug(pr->handle, |
354 | "C3 support without BM control\n" ); |
355 | } |
356 | } |
357 | } else { |
358 | /* |
359 | * WBINVD should be set in fadt, for C3 state to be |
360 | * supported on when bm_check is not required. |
361 | */ |
362 | if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { |
363 | acpi_handle_debug(pr->handle, |
364 | "Cache invalidation should work properly" |
365 | " for C3 to be enabled on SMP systems\n" ); |
366 | return; |
367 | } |
368 | } |
369 | |
370 | /* |
371 | * Otherwise we've met all of our C3 requirements. |
372 | * Normalize the C3 latency to expidite policy. Enable |
373 | * checking of bus mastering status (bm_check) so we can |
374 | * use this in our C3 policy |
375 | */ |
376 | cx->valid = 1; |
377 | |
378 | /* |
379 | * On older chipsets, BM_RLD needs to be set |
380 | * in order for Bus Master activity to wake the |
381 | * system from C3. Newer chipsets handle DMA |
382 | * during C3 automatically and BM_RLD is a NOP. |
383 | * In either case, the proper way to |
384 | * handle BM_RLD is to set it and leave it set. |
385 | */ |
386 | acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, value: 1); |
387 | } |
388 | |
389 | static int acpi_cst_latency_cmp(const void *a, const void *b) |
390 | { |
391 | const struct acpi_processor_cx *x = a, *y = b; |
392 | |
393 | if (!(x->valid && y->valid)) |
394 | return 0; |
395 | if (x->latency > y->latency) |
396 | return 1; |
397 | if (x->latency < y->latency) |
398 | return -1; |
399 | return 0; |
400 | } |
401 | static void acpi_cst_latency_swap(void *a, void *b, int n) |
402 | { |
403 | struct acpi_processor_cx *x = a, *y = b; |
404 | |
405 | if (!(x->valid && y->valid)) |
406 | return; |
407 | swap(x->latency, y->latency); |
408 | } |
409 | |
410 | static int acpi_processor_power_verify(struct acpi_processor *pr) |
411 | { |
412 | unsigned int i; |
413 | unsigned int working = 0; |
414 | unsigned int last_latency = 0; |
415 | unsigned int last_type = 0; |
416 | bool buggy_latency = false; |
417 | |
418 | pr->power.timer_broadcast_on_state = INT_MAX; |
419 | |
420 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { |
421 | struct acpi_processor_cx *cx = &pr->power.states[i]; |
422 | |
423 | switch (cx->type) { |
424 | case ACPI_STATE_C1: |
425 | cx->valid = 1; |
426 | break; |
427 | |
428 | case ACPI_STATE_C2: |
429 | if (!cx->address) |
430 | break; |
431 | cx->valid = 1; |
432 | break; |
433 | |
434 | case ACPI_STATE_C3: |
435 | acpi_processor_power_verify_c3(pr, cx); |
436 | break; |
437 | } |
438 | if (!cx->valid) |
439 | continue; |
440 | if (cx->type >= last_type && cx->latency < last_latency) |
441 | buggy_latency = true; |
442 | last_latency = cx->latency; |
443 | last_type = cx->type; |
444 | |
445 | lapic_timer_check_state(state: i, pr, cx); |
446 | tsc_check_state(state: cx->type); |
447 | working++; |
448 | } |
449 | |
450 | if (buggy_latency) { |
451 | pr_notice("FW issue: working around C-state latencies out of order\n" ); |
452 | sort(base: &pr->power.states[1], num: max_cstate, |
453 | size: sizeof(struct acpi_processor_cx), |
454 | cmp_func: acpi_cst_latency_cmp, |
455 | swap_func: acpi_cst_latency_swap); |
456 | } |
457 | |
458 | lapic_timer_propagate_broadcast(pr); |
459 | |
460 | return working; |
461 | } |
462 | |
463 | static int acpi_processor_get_cstate_info(struct acpi_processor *pr) |
464 | { |
465 | unsigned int i; |
466 | int result; |
467 | |
468 | |
469 | /* NOTE: the idle thread may not be running while calling |
470 | * this function */ |
471 | |
472 | /* Zero initialize all the C-states info. */ |
473 | memset(pr->power.states, 0, sizeof(pr->power.states)); |
474 | |
475 | result = acpi_processor_get_power_info_cst(pr); |
476 | if (result == -ENODEV) |
477 | result = acpi_processor_get_power_info_fadt(pr); |
478 | |
479 | if (result) |
480 | return result; |
481 | |
482 | acpi_processor_get_power_info_default(pr); |
483 | |
484 | pr->power.count = acpi_processor_power_verify(pr); |
485 | |
486 | /* |
487 | * if one state of type C2 or C3 is available, mark this |
488 | * CPU as being "idle manageable" |
489 | */ |
490 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { |
491 | if (pr->power.states[i].valid) { |
492 | pr->power.count = i; |
493 | pr->flags.power = 1; |
494 | } |
495 | } |
496 | |
497 | return 0; |
498 | } |
499 | |
500 | /** |
501 | * acpi_idle_bm_check - checks if bus master activity was detected |
502 | */ |
503 | static int acpi_idle_bm_check(void) |
504 | { |
505 | u32 bm_status = 0; |
506 | |
507 | if (bm_check_disable) |
508 | return 0; |
509 | |
510 | acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, return_value: &bm_status); |
511 | if (bm_status) |
512 | acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, value: 1); |
513 | /* |
514 | * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect |
515 | * the true state of bus mastering activity; forcing us to |
516 | * manually check the BMIDEA bit of each IDE channel. |
517 | */ |
518 | else if (errata.piix4.bmisx) { |
519 | if ((inb_p(port: errata.piix4.bmisx + 0x02) & 0x01) |
520 | || (inb_p(port: errata.piix4.bmisx + 0x0A) & 0x01)) |
521 | bm_status = 1; |
522 | } |
523 | return bm_status; |
524 | } |
525 | |
526 | static __cpuidle void io_idle(unsigned long addr) |
527 | { |
528 | /* IO port based C-state */ |
529 | inb(port: addr); |
530 | |
531 | #ifdef CONFIG_X86 |
532 | /* No delay is needed if we are in guest */ |
533 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) |
534 | return; |
535 | /* |
536 | * Modern (>=Nehalem) Intel systems use ACPI via intel_idle, |
537 | * not this code. Assume that any Intel systems using this |
538 | * are ancient and may need the dummy wait. This also assumes |
539 | * that the motivating chipset issue was Intel-only. |
540 | */ |
541 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) |
542 | return; |
543 | #endif |
544 | /* |
545 | * Dummy wait op - must do something useless after P_LVL2 read |
546 | * because chipsets cannot guarantee that STPCLK# signal gets |
547 | * asserted in time to freeze execution properly |
548 | * |
549 | * This workaround has been in place since the original ACPI |
550 | * implementation was merged, circa 2002. |
551 | * |
552 | * If a profile is pointing to this instruction, please first |
553 | * consider moving your system to a more modern idle |
554 | * mechanism. |
555 | */ |
556 | inl(port: acpi_gbl_FADT.xpm_timer_block.address); |
557 | } |
558 | |
559 | /** |
560 | * acpi_idle_do_entry - enter idle state using the appropriate method |
561 | * @cx: cstate data |
562 | * |
563 | * Caller disables interrupt before call and enables interrupt after return. |
564 | */ |
565 | static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx) |
566 | { |
567 | perf_lopwr_cb(lopwr_in: true); |
568 | |
569 | if (cx->entry_method == ACPI_CSTATE_FFH) { |
570 | /* Call into architectural FFH based C-state */ |
571 | acpi_processor_ffh_cstate_enter(cstate: cx); |
572 | } else if (cx->entry_method == ACPI_CSTATE_HALT) { |
573 | acpi_safe_halt(); |
574 | } else { |
575 | io_idle(addr: cx->address); |
576 | } |
577 | |
578 | perf_lopwr_cb(lopwr_in: false); |
579 | } |
580 | |
581 | /** |
582 | * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining) |
583 | * @dev: the target CPU |
584 | * @index: the index of suggested state |
585 | */ |
586 | static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) |
587 | { |
588 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); |
589 | |
590 | ACPI_FLUSH_CPU_CACHE(); |
591 | |
592 | while (1) { |
593 | |
594 | if (cx->entry_method == ACPI_CSTATE_HALT) |
595 | safe_halt(); |
596 | else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { |
597 | io_idle(addr: cx->address); |
598 | } else |
599 | return -ENODEV; |
600 | } |
601 | |
602 | /* Never reached */ |
603 | return 0; |
604 | } |
605 | |
606 | static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) |
607 | { |
608 | return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst && |
609 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED); |
610 | } |
611 | |
612 | static int c3_cpu_count; |
613 | static DEFINE_RAW_SPINLOCK(c3_lock); |
614 | |
615 | /** |
616 | * acpi_idle_enter_bm - enters C3 with proper BM handling |
617 | * @drv: cpuidle driver |
618 | * @pr: Target processor |
619 | * @cx: Target state context |
620 | * @index: index of target state |
621 | */ |
622 | static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv, |
623 | struct acpi_processor *pr, |
624 | struct acpi_processor_cx *cx, |
625 | int index) |
626 | { |
627 | static struct acpi_processor_cx safe_cx = { |
628 | .entry_method = ACPI_CSTATE_HALT, |
629 | }; |
630 | |
631 | /* |
632 | * disable bus master |
633 | * bm_check implies we need ARB_DIS |
634 | * bm_control implies whether we can do ARB_DIS |
635 | * |
636 | * That leaves a case where bm_check is set and bm_control is not set. |
637 | * In that case we cannot do much, we enter C3 without doing anything. |
638 | */ |
639 | bool dis_bm = pr->flags.bm_control; |
640 | |
641 | instrumentation_begin(); |
642 | |
643 | /* If we can skip BM, demote to a safe state. */ |
644 | if (!cx->bm_sts_skip && acpi_idle_bm_check()) { |
645 | dis_bm = false; |
646 | index = drv->safe_state_index; |
647 | if (index >= 0) { |
648 | cx = this_cpu_read(acpi_cstate[index]); |
649 | } else { |
650 | cx = &safe_cx; |
651 | index = -EBUSY; |
652 | } |
653 | } |
654 | |
655 | if (dis_bm) { |
656 | raw_spin_lock(&c3_lock); |
657 | c3_cpu_count++; |
658 | /* Disable bus master arbitration when all CPUs are in C3 */ |
659 | if (c3_cpu_count == num_online_cpus()) |
660 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, value: 1); |
661 | raw_spin_unlock(&c3_lock); |
662 | } |
663 | |
664 | ct_cpuidle_enter(); |
665 | |
666 | acpi_idle_do_entry(cx); |
667 | |
668 | ct_cpuidle_exit(); |
669 | |
670 | /* Re-enable bus master arbitration */ |
671 | if (dis_bm) { |
672 | raw_spin_lock(&c3_lock); |
673 | acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, value: 0); |
674 | c3_cpu_count--; |
675 | raw_spin_unlock(&c3_lock); |
676 | } |
677 | |
678 | instrumentation_end(); |
679 | |
680 | return index; |
681 | } |
682 | |
683 | static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev, |
684 | struct cpuidle_driver *drv, int index) |
685 | { |
686 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); |
687 | struct acpi_processor *pr; |
688 | |
689 | pr = __this_cpu_read(processors); |
690 | if (unlikely(!pr)) |
691 | return -EINVAL; |
692 | |
693 | if (cx->type != ACPI_STATE_C1) { |
694 | if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) |
695 | return acpi_idle_enter_bm(drv, pr, cx, index); |
696 | |
697 | /* C2 to C1 demotion. */ |
698 | if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) { |
699 | index = ACPI_IDLE_STATE_START; |
700 | cx = per_cpu(acpi_cstate[index], dev->cpu); |
701 | } |
702 | } |
703 | |
704 | if (cx->type == ACPI_STATE_C3) |
705 | ACPI_FLUSH_CPU_CACHE(); |
706 | |
707 | acpi_idle_do_entry(cx); |
708 | |
709 | return index; |
710 | } |
711 | |
712 | static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev, |
713 | struct cpuidle_driver *drv, int index) |
714 | { |
715 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); |
716 | |
717 | if (cx->type == ACPI_STATE_C3) { |
718 | struct acpi_processor *pr = __this_cpu_read(processors); |
719 | |
720 | if (unlikely(!pr)) |
721 | return 0; |
722 | |
723 | if (pr->flags.bm_check) { |
724 | u8 bm_sts_skip = cx->bm_sts_skip; |
725 | |
726 | /* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */ |
727 | cx->bm_sts_skip = 1; |
728 | acpi_idle_enter_bm(drv, pr, cx, index); |
729 | cx->bm_sts_skip = bm_sts_skip; |
730 | |
731 | return 0; |
732 | } else { |
733 | ACPI_FLUSH_CPU_CACHE(); |
734 | } |
735 | } |
736 | acpi_idle_do_entry(cx); |
737 | |
738 | return 0; |
739 | } |
740 | |
741 | static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, |
742 | struct cpuidle_device *dev) |
743 | { |
744 | int i, count = ACPI_IDLE_STATE_START; |
745 | struct acpi_processor_cx *cx; |
746 | struct cpuidle_state *state; |
747 | |
748 | if (max_cstate == 0) |
749 | max_cstate = 1; |
750 | |
751 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { |
752 | state = &acpi_idle_driver.states[count]; |
753 | cx = &pr->power.states[i]; |
754 | |
755 | if (!cx->valid) |
756 | continue; |
757 | |
758 | per_cpu(acpi_cstate[count], dev->cpu) = cx; |
759 | |
760 | if (lapic_timer_needs_broadcast(pr, cx)) |
761 | state->flags |= CPUIDLE_FLAG_TIMER_STOP; |
762 | |
763 | if (cx->type == ACPI_STATE_C3) { |
764 | state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; |
765 | if (pr->flags.bm_check) |
766 | state->flags |= CPUIDLE_FLAG_RCU_IDLE; |
767 | } |
768 | |
769 | count++; |
770 | if (count == CPUIDLE_STATE_MAX) |
771 | break; |
772 | } |
773 | |
774 | if (!count) |
775 | return -EINVAL; |
776 | |
777 | return 0; |
778 | } |
779 | |
780 | static int acpi_processor_setup_cstates(struct acpi_processor *pr) |
781 | { |
782 | int i, count; |
783 | struct acpi_processor_cx *cx; |
784 | struct cpuidle_state *state; |
785 | struct cpuidle_driver *drv = &acpi_idle_driver; |
786 | |
787 | if (max_cstate == 0) |
788 | max_cstate = 1; |
789 | |
790 | if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) { |
791 | cpuidle_poll_state_init(drv); |
792 | count = 1; |
793 | } else { |
794 | count = 0; |
795 | } |
796 | |
797 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { |
798 | cx = &pr->power.states[i]; |
799 | |
800 | if (!cx->valid) |
801 | continue; |
802 | |
803 | state = &drv->states[count]; |
804 | snprintf(buf: state->name, CPUIDLE_NAME_LEN, fmt: "C%d" , i); |
805 | strscpy(p: state->desc, q: cx->desc, CPUIDLE_DESC_LEN); |
806 | state->exit_latency = cx->latency; |
807 | state->target_residency = cx->latency * latency_factor; |
808 | state->enter = acpi_idle_enter; |
809 | |
810 | state->flags = 0; |
811 | if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 || |
812 | cx->type == ACPI_STATE_C3) { |
813 | state->enter_dead = acpi_idle_play_dead; |
814 | if (cx->type != ACPI_STATE_C3) |
815 | drv->safe_state_index = count; |
816 | } |
817 | /* |
818 | * Halt-induced C1 is not good for ->enter_s2idle, because it |
819 | * re-enables interrupts on exit. Moreover, C1 is generally not |
820 | * particularly interesting from the suspend-to-idle angle, so |
821 | * avoid C1 and the situations in which we may need to fall back |
822 | * to it altogether. |
823 | */ |
824 | if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr)) |
825 | state->enter_s2idle = acpi_idle_enter_s2idle; |
826 | |
827 | count++; |
828 | if (count == CPUIDLE_STATE_MAX) |
829 | break; |
830 | } |
831 | |
832 | drv->state_count = count; |
833 | |
834 | if (!count) |
835 | return -EINVAL; |
836 | |
837 | return 0; |
838 | } |
839 | |
840 | static inline void acpi_processor_cstate_first_run_checks(void) |
841 | { |
842 | static int first_run; |
843 | |
844 | if (first_run) |
845 | return; |
846 | dmi_check_system(list: processor_power_dmi_table); |
847 | max_cstate = acpi_processor_cstate_check(max_cstate); |
848 | if (max_cstate < ACPI_C_STATES_MAX) |
849 | pr_notice("processor limited to max C-state %d\n" , max_cstate); |
850 | |
851 | first_run++; |
852 | |
853 | if (nocst) |
854 | return; |
855 | |
856 | acpi_processor_claim_cst_control(); |
857 | } |
858 | #else |
859 | |
860 | static inline int disabled_by_idle_boot_param(void) { return 0; } |
861 | static inline void acpi_processor_cstate_first_run_checks(void) { } |
862 | static int acpi_processor_get_cstate_info(struct acpi_processor *pr) |
863 | { |
864 | return -ENODEV; |
865 | } |
866 | |
867 | static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, |
868 | struct cpuidle_device *dev) |
869 | { |
870 | return -EINVAL; |
871 | } |
872 | |
873 | static int acpi_processor_setup_cstates(struct acpi_processor *pr) |
874 | { |
875 | return -EINVAL; |
876 | } |
877 | |
878 | #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */ |
879 | |
880 | struct acpi_lpi_states_array { |
881 | unsigned int size; |
882 | unsigned int composite_states_size; |
883 | struct acpi_lpi_state *entries; |
884 | struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER]; |
885 | }; |
886 | |
887 | static int obj_get_integer(union acpi_object *obj, u32 *value) |
888 | { |
889 | if (obj->type != ACPI_TYPE_INTEGER) |
890 | return -EINVAL; |
891 | |
892 | *value = obj->integer.value; |
893 | return 0; |
894 | } |
895 | |
896 | static int acpi_processor_evaluate_lpi(acpi_handle handle, |
897 | struct acpi_lpi_states_array *info) |
898 | { |
899 | acpi_status status; |
900 | int ret = 0; |
901 | int pkg_count, state_idx = 1, loop; |
902 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
903 | union acpi_object *lpi_data; |
904 | struct acpi_lpi_state *lpi_state; |
905 | |
906 | status = acpi_evaluate_object(object: handle, pathname: "_LPI" , NULL, return_object_buffer: &buffer); |
907 | if (ACPI_FAILURE(status)) { |
908 | acpi_handle_debug(handle, "No _LPI, giving up\n" ); |
909 | return -ENODEV; |
910 | } |
911 | |
912 | lpi_data = buffer.pointer; |
913 | |
914 | /* There must be at least 4 elements = 3 elements + 1 package */ |
915 | if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE || |
916 | lpi_data->package.count < 4) { |
917 | pr_debug("not enough elements in _LPI\n" ); |
918 | ret = -ENODATA; |
919 | goto end; |
920 | } |
921 | |
922 | pkg_count = lpi_data->package.elements[2].integer.value; |
923 | |
924 | /* Validate number of power states. */ |
925 | if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) { |
926 | pr_debug("count given by _LPI is not valid\n" ); |
927 | ret = -ENODATA; |
928 | goto end; |
929 | } |
930 | |
931 | lpi_state = kcalloc(n: pkg_count, size: sizeof(*lpi_state), GFP_KERNEL); |
932 | if (!lpi_state) { |
933 | ret = -ENOMEM; |
934 | goto end; |
935 | } |
936 | |
937 | info->size = pkg_count; |
938 | info->entries = lpi_state; |
939 | |
940 | /* LPI States start at index 3 */ |
941 | for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) { |
942 | union acpi_object *element, *pkg_elem, *obj; |
943 | |
944 | element = &lpi_data->package.elements[loop]; |
945 | if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7) |
946 | continue; |
947 | |
948 | pkg_elem = element->package.elements; |
949 | |
950 | obj = pkg_elem + 6; |
951 | if (obj->type == ACPI_TYPE_BUFFER) { |
952 | struct acpi_power_register *reg; |
953 | |
954 | reg = (struct acpi_power_register *)obj->buffer.pointer; |
955 | if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && |
956 | reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) |
957 | continue; |
958 | |
959 | lpi_state->address = reg->address; |
960 | lpi_state->entry_method = |
961 | reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ? |
962 | ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO; |
963 | } else if (obj->type == ACPI_TYPE_INTEGER) { |
964 | lpi_state->entry_method = ACPI_CSTATE_INTEGER; |
965 | lpi_state->address = obj->integer.value; |
966 | } else { |
967 | continue; |
968 | } |
969 | |
970 | /* elements[7,8] skipped for now i.e. Residency/Usage counter*/ |
971 | |
972 | obj = pkg_elem + 9; |
973 | if (obj->type == ACPI_TYPE_STRING) |
974 | strscpy(p: lpi_state->desc, q: obj->string.pointer, |
975 | ACPI_CX_DESC_LEN); |
976 | |
977 | lpi_state->index = state_idx; |
978 | if (obj_get_integer(obj: pkg_elem + 0, value: &lpi_state->min_residency)) { |
979 | pr_debug("No min. residency found, assuming 10 us\n" ); |
980 | lpi_state->min_residency = 10; |
981 | } |
982 | |
983 | if (obj_get_integer(obj: pkg_elem + 1, value: &lpi_state->wake_latency)) { |
984 | pr_debug("No wakeup residency found, assuming 10 us\n" ); |
985 | lpi_state->wake_latency = 10; |
986 | } |
987 | |
988 | if (obj_get_integer(obj: pkg_elem + 2, value: &lpi_state->flags)) |
989 | lpi_state->flags = 0; |
990 | |
991 | if (obj_get_integer(obj: pkg_elem + 3, value: &lpi_state->arch_flags)) |
992 | lpi_state->arch_flags = 0; |
993 | |
994 | if (obj_get_integer(obj: pkg_elem + 4, value: &lpi_state->res_cnt_freq)) |
995 | lpi_state->res_cnt_freq = 1; |
996 | |
997 | if (obj_get_integer(obj: pkg_elem + 5, value: &lpi_state->enable_parent_state)) |
998 | lpi_state->enable_parent_state = 0; |
999 | } |
1000 | |
1001 | acpi_handle_debug(handle, "Found %d power states\n" , state_idx); |
1002 | end: |
1003 | kfree(objp: buffer.pointer); |
1004 | return ret; |
1005 | } |
1006 | |
1007 | /* |
1008 | * flat_state_cnt - the number of composite LPI states after the process of flattening |
1009 | */ |
1010 | static int flat_state_cnt; |
1011 | |
1012 | /** |
1013 | * combine_lpi_states - combine local and parent LPI states to form a composite LPI state |
1014 | * |
1015 | * @local: local LPI state |
1016 | * @parent: parent LPI state |
1017 | * @result: composite LPI state |
1018 | */ |
1019 | static bool combine_lpi_states(struct acpi_lpi_state *local, |
1020 | struct acpi_lpi_state *parent, |
1021 | struct acpi_lpi_state *result) |
1022 | { |
1023 | if (parent->entry_method == ACPI_CSTATE_INTEGER) { |
1024 | if (!parent->address) /* 0 means autopromotable */ |
1025 | return false; |
1026 | result->address = local->address + parent->address; |
1027 | } else { |
1028 | result->address = parent->address; |
1029 | } |
1030 | |
1031 | result->min_residency = max(local->min_residency, parent->min_residency); |
1032 | result->wake_latency = local->wake_latency + parent->wake_latency; |
1033 | result->enable_parent_state = parent->enable_parent_state; |
1034 | result->entry_method = local->entry_method; |
1035 | |
1036 | result->flags = parent->flags; |
1037 | result->arch_flags = parent->arch_flags; |
1038 | result->index = parent->index; |
1039 | |
1040 | strscpy(p: result->desc, q: local->desc, ACPI_CX_DESC_LEN); |
1041 | strlcat(p: result->desc, q: "+" , ACPI_CX_DESC_LEN); |
1042 | strlcat(p: result->desc, q: parent->desc, ACPI_CX_DESC_LEN); |
1043 | return true; |
1044 | } |
1045 | |
1046 | #define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0) |
1047 | |
1048 | static void stash_composite_state(struct acpi_lpi_states_array *curr_level, |
1049 | struct acpi_lpi_state *t) |
1050 | { |
1051 | curr_level->composite_states[curr_level->composite_states_size++] = t; |
1052 | } |
1053 | |
1054 | static int flatten_lpi_states(struct acpi_processor *pr, |
1055 | struct acpi_lpi_states_array *curr_level, |
1056 | struct acpi_lpi_states_array *prev_level) |
1057 | { |
1058 | int i, j, state_count = curr_level->size; |
1059 | struct acpi_lpi_state *p, *t = curr_level->entries; |
1060 | |
1061 | curr_level->composite_states_size = 0; |
1062 | for (j = 0; j < state_count; j++, t++) { |
1063 | struct acpi_lpi_state *flpi; |
1064 | |
1065 | if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED)) |
1066 | continue; |
1067 | |
1068 | if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) { |
1069 | pr_warn("Limiting number of LPI states to max (%d)\n" , |
1070 | ACPI_PROCESSOR_MAX_POWER); |
1071 | pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n" ); |
1072 | break; |
1073 | } |
1074 | |
1075 | flpi = &pr->power.lpi_states[flat_state_cnt]; |
1076 | |
1077 | if (!prev_level) { /* leaf/processor node */ |
1078 | memcpy(flpi, t, sizeof(*t)); |
1079 | stash_composite_state(curr_level, t: flpi); |
1080 | flat_state_cnt++; |
1081 | continue; |
1082 | } |
1083 | |
1084 | for (i = 0; i < prev_level->composite_states_size; i++) { |
1085 | p = prev_level->composite_states[i]; |
1086 | if (t->index <= p->enable_parent_state && |
1087 | combine_lpi_states(local: p, parent: t, result: flpi)) { |
1088 | stash_composite_state(curr_level, t: flpi); |
1089 | flat_state_cnt++; |
1090 | flpi++; |
1091 | } |
1092 | } |
1093 | } |
1094 | |
1095 | kfree(objp: curr_level->entries); |
1096 | return 0; |
1097 | } |
1098 | |
1099 | int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) |
1100 | { |
1101 | return -EOPNOTSUPP; |
1102 | } |
1103 | |
1104 | static int acpi_processor_get_lpi_info(struct acpi_processor *pr) |
1105 | { |
1106 | int ret, i; |
1107 | acpi_status status; |
1108 | acpi_handle handle = pr->handle, pr_ahandle; |
1109 | struct acpi_device *d = NULL; |
1110 | struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; |
1111 | |
1112 | /* make sure our architecture has support */ |
1113 | ret = acpi_processor_ffh_lpi_probe(cpu: pr->id); |
1114 | if (ret == -EOPNOTSUPP) |
1115 | return ret; |
1116 | |
1117 | if (!osc_pc_lpi_support_confirmed) |
1118 | return -EOPNOTSUPP; |
1119 | |
1120 | if (!acpi_has_method(handle, name: "_LPI" )) |
1121 | return -EINVAL; |
1122 | |
1123 | flat_state_cnt = 0; |
1124 | prev = &info[0]; |
1125 | curr = &info[1]; |
1126 | handle = pr->handle; |
1127 | ret = acpi_processor_evaluate_lpi(handle, info: prev); |
1128 | if (ret) |
1129 | return ret; |
1130 | flatten_lpi_states(pr, curr_level: prev, NULL); |
1131 | |
1132 | status = acpi_get_parent(object: handle, out_handle: &pr_ahandle); |
1133 | while (ACPI_SUCCESS(status)) { |
1134 | d = acpi_fetch_acpi_dev(handle: pr_ahandle); |
1135 | if (!d) |
1136 | break; |
1137 | |
1138 | handle = pr_ahandle; |
1139 | |
1140 | if (strcmp(acpi_device_hid(device: d), ACPI_PROCESSOR_CONTAINER_HID)) |
1141 | break; |
1142 | |
1143 | /* can be optional ? */ |
1144 | if (!acpi_has_method(handle, name: "_LPI" )) |
1145 | break; |
1146 | |
1147 | ret = acpi_processor_evaluate_lpi(handle, info: curr); |
1148 | if (ret) |
1149 | break; |
1150 | |
1151 | /* flatten all the LPI states in this level of hierarchy */ |
1152 | flatten_lpi_states(pr, curr_level: curr, prev_level: prev); |
1153 | |
1154 | tmp = prev, prev = curr, curr = tmp; |
1155 | |
1156 | status = acpi_get_parent(object: handle, out_handle: &pr_ahandle); |
1157 | } |
1158 | |
1159 | pr->power.count = flat_state_cnt; |
1160 | /* reset the index after flattening */ |
1161 | for (i = 0; i < pr->power.count; i++) |
1162 | pr->power.lpi_states[i].index = i; |
1163 | |
1164 | /* Tell driver that _LPI is supported. */ |
1165 | pr->flags.has_lpi = 1; |
1166 | pr->flags.power = 1; |
1167 | |
1168 | return 0; |
1169 | } |
1170 | |
1171 | int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) |
1172 | { |
1173 | return -ENODEV; |
1174 | } |
1175 | |
1176 | /** |
1177 | * acpi_idle_lpi_enter - enters an ACPI any LPI state |
1178 | * @dev: the target CPU |
1179 | * @drv: cpuidle driver containing cpuidle state info |
1180 | * @index: index of target state |
1181 | * |
1182 | * Return: 0 for success or negative value for error |
1183 | */ |
1184 | static int acpi_idle_lpi_enter(struct cpuidle_device *dev, |
1185 | struct cpuidle_driver *drv, int index) |
1186 | { |
1187 | struct acpi_processor *pr; |
1188 | struct acpi_lpi_state *lpi; |
1189 | |
1190 | pr = __this_cpu_read(processors); |
1191 | |
1192 | if (unlikely(!pr)) |
1193 | return -EINVAL; |
1194 | |
1195 | lpi = &pr->power.lpi_states[index]; |
1196 | if (lpi->entry_method == ACPI_CSTATE_FFH) |
1197 | return acpi_processor_ffh_lpi_enter(lpi); |
1198 | |
1199 | return -EINVAL; |
1200 | } |
1201 | |
1202 | static int acpi_processor_setup_lpi_states(struct acpi_processor *pr) |
1203 | { |
1204 | int i; |
1205 | struct acpi_lpi_state *lpi; |
1206 | struct cpuidle_state *state; |
1207 | struct cpuidle_driver *drv = &acpi_idle_driver; |
1208 | |
1209 | if (!pr->flags.has_lpi) |
1210 | return -EOPNOTSUPP; |
1211 | |
1212 | for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) { |
1213 | lpi = &pr->power.lpi_states[i]; |
1214 | |
1215 | state = &drv->states[i]; |
1216 | snprintf(buf: state->name, CPUIDLE_NAME_LEN, fmt: "LPI-%d" , i); |
1217 | strscpy(p: state->desc, q: lpi->desc, CPUIDLE_DESC_LEN); |
1218 | state->exit_latency = lpi->wake_latency; |
1219 | state->target_residency = lpi->min_residency; |
1220 | state->flags |= arch_get_idle_state_flags(arch_flags: lpi->arch_flags); |
1221 | if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH) |
1222 | state->flags |= CPUIDLE_FLAG_RCU_IDLE; |
1223 | state->enter = acpi_idle_lpi_enter; |
1224 | drv->safe_state_index = i; |
1225 | } |
1226 | |
1227 | drv->state_count = i; |
1228 | |
1229 | return 0; |
1230 | } |
1231 | |
1232 | /** |
1233 | * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle |
1234 | * global state data i.e. idle routines |
1235 | * |
1236 | * @pr: the ACPI processor |
1237 | */ |
1238 | static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) |
1239 | { |
1240 | int i; |
1241 | struct cpuidle_driver *drv = &acpi_idle_driver; |
1242 | |
1243 | if (!pr->flags.power_setup_done || !pr->flags.power) |
1244 | return -EINVAL; |
1245 | |
1246 | drv->safe_state_index = -1; |
1247 | for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) { |
1248 | drv->states[i].name[0] = '\0'; |
1249 | drv->states[i].desc[0] = '\0'; |
1250 | } |
1251 | |
1252 | if (pr->flags.has_lpi) |
1253 | return acpi_processor_setup_lpi_states(pr); |
1254 | |
1255 | return acpi_processor_setup_cstates(pr); |
1256 | } |
1257 | |
1258 | /** |
1259 | * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE |
1260 | * device i.e. per-cpu data |
1261 | * |
1262 | * @pr: the ACPI processor |
1263 | * @dev : the cpuidle device |
1264 | */ |
1265 | static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr, |
1266 | struct cpuidle_device *dev) |
1267 | { |
1268 | if (!pr->flags.power_setup_done || !pr->flags.power || !dev) |
1269 | return -EINVAL; |
1270 | |
1271 | dev->cpu = pr->id; |
1272 | if (pr->flags.has_lpi) |
1273 | return acpi_processor_ffh_lpi_probe(cpu: pr->id); |
1274 | |
1275 | return acpi_processor_setup_cpuidle_cx(pr, dev); |
1276 | } |
1277 | |
1278 | static int acpi_processor_get_power_info(struct acpi_processor *pr) |
1279 | { |
1280 | int ret; |
1281 | |
1282 | ret = acpi_processor_get_lpi_info(pr); |
1283 | if (ret) |
1284 | ret = acpi_processor_get_cstate_info(pr); |
1285 | |
1286 | return ret; |
1287 | } |
1288 | |
1289 | int acpi_processor_hotplug(struct acpi_processor *pr) |
1290 | { |
1291 | int ret = 0; |
1292 | struct cpuidle_device *dev; |
1293 | |
1294 | if (disabled_by_idle_boot_param()) |
1295 | return 0; |
1296 | |
1297 | if (!pr->flags.power_setup_done) |
1298 | return -ENODEV; |
1299 | |
1300 | dev = per_cpu(acpi_cpuidle_device, pr->id); |
1301 | cpuidle_pause_and_lock(); |
1302 | cpuidle_disable_device(dev); |
1303 | ret = acpi_processor_get_power_info(pr); |
1304 | if (!ret && pr->flags.power) { |
1305 | acpi_processor_setup_cpuidle_dev(pr, dev); |
1306 | ret = cpuidle_enable_device(dev); |
1307 | } |
1308 | cpuidle_resume_and_unlock(); |
1309 | |
1310 | return ret; |
1311 | } |
1312 | |
1313 | int acpi_processor_power_state_has_changed(struct acpi_processor *pr) |
1314 | { |
1315 | int cpu; |
1316 | struct acpi_processor *_pr; |
1317 | struct cpuidle_device *dev; |
1318 | |
1319 | if (disabled_by_idle_boot_param()) |
1320 | return 0; |
1321 | |
1322 | if (!pr->flags.power_setup_done) |
1323 | return -ENODEV; |
1324 | |
1325 | /* |
1326 | * FIXME: Design the ACPI notification to make it once per |
1327 | * system instead of once per-cpu. This condition is a hack |
1328 | * to make the code that updates C-States be called once. |
1329 | */ |
1330 | |
1331 | if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { |
1332 | |
1333 | /* Protect against cpu-hotplug */ |
1334 | cpus_read_lock(); |
1335 | cpuidle_pause_and_lock(); |
1336 | |
1337 | /* Disable all cpuidle devices */ |
1338 | for_each_online_cpu(cpu) { |
1339 | _pr = per_cpu(processors, cpu); |
1340 | if (!_pr || !_pr->flags.power_setup_done) |
1341 | continue; |
1342 | dev = per_cpu(acpi_cpuidle_device, cpu); |
1343 | cpuidle_disable_device(dev); |
1344 | } |
1345 | |
1346 | /* Populate Updated C-state information */ |
1347 | acpi_processor_get_power_info(pr); |
1348 | acpi_processor_setup_cpuidle_states(pr); |
1349 | |
1350 | /* Enable all cpuidle devices */ |
1351 | for_each_online_cpu(cpu) { |
1352 | _pr = per_cpu(processors, cpu); |
1353 | if (!_pr || !_pr->flags.power_setup_done) |
1354 | continue; |
1355 | acpi_processor_get_power_info(pr: _pr); |
1356 | if (_pr->flags.power) { |
1357 | dev = per_cpu(acpi_cpuidle_device, cpu); |
1358 | acpi_processor_setup_cpuidle_dev(pr: _pr, dev); |
1359 | cpuidle_enable_device(dev); |
1360 | } |
1361 | } |
1362 | cpuidle_resume_and_unlock(); |
1363 | cpus_read_unlock(); |
1364 | } |
1365 | |
1366 | return 0; |
1367 | } |
1368 | |
1369 | static int acpi_processor_registered; |
1370 | |
1371 | int acpi_processor_power_init(struct acpi_processor *pr) |
1372 | { |
1373 | int retval; |
1374 | struct cpuidle_device *dev; |
1375 | |
1376 | if (disabled_by_idle_boot_param()) |
1377 | return 0; |
1378 | |
1379 | acpi_processor_cstate_first_run_checks(); |
1380 | |
1381 | if (!acpi_processor_get_power_info(pr)) |
1382 | pr->flags.power_setup_done = 1; |
1383 | |
1384 | /* |
1385 | * Install the idle handler if processor power management is supported. |
1386 | * Note that we use previously set idle handler will be used on |
1387 | * platforms that only support C1. |
1388 | */ |
1389 | if (pr->flags.power) { |
1390 | /* Register acpi_idle_driver if not already registered */ |
1391 | if (!acpi_processor_registered) { |
1392 | acpi_processor_setup_cpuidle_states(pr); |
1393 | retval = cpuidle_register_driver(drv: &acpi_idle_driver); |
1394 | if (retval) |
1395 | return retval; |
1396 | pr_debug("%s registered with cpuidle\n" , |
1397 | acpi_idle_driver.name); |
1398 | } |
1399 | |
1400 | dev = kzalloc(size: sizeof(*dev), GFP_KERNEL); |
1401 | if (!dev) |
1402 | return -ENOMEM; |
1403 | per_cpu(acpi_cpuidle_device, pr->id) = dev; |
1404 | |
1405 | acpi_processor_setup_cpuidle_dev(pr, dev); |
1406 | |
1407 | /* Register per-cpu cpuidle_device. Cpuidle driver |
1408 | * must already be registered before registering device |
1409 | */ |
1410 | retval = cpuidle_register_device(dev); |
1411 | if (retval) { |
1412 | if (acpi_processor_registered == 0) |
1413 | cpuidle_unregister_driver(drv: &acpi_idle_driver); |
1414 | return retval; |
1415 | } |
1416 | acpi_processor_registered++; |
1417 | } |
1418 | return 0; |
1419 | } |
1420 | |
1421 | int acpi_processor_power_exit(struct acpi_processor *pr) |
1422 | { |
1423 | struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); |
1424 | |
1425 | if (disabled_by_idle_boot_param()) |
1426 | return 0; |
1427 | |
1428 | if (pr->flags.power) { |
1429 | cpuidle_unregister_device(dev); |
1430 | acpi_processor_registered--; |
1431 | if (acpi_processor_registered == 0) |
1432 | cpuidle_unregister_driver(drv: &acpi_idle_driver); |
1433 | } |
1434 | |
1435 | pr->flags.power_setup_done = 0; |
1436 | return 0; |
1437 | } |
1438 | |