1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | #ifndef _ASM_X86_ACPI_H |
3 | #define _ASM_X86_ACPI_H |
4 | |
5 | /* |
6 | * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
7 | * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org> |
8 | */ |
9 | #include <acpi/proc_cap_intel.h> |
10 | |
11 | #include <asm/numa.h> |
12 | #include <asm/fixmap.h> |
13 | #include <asm/processor.h> |
14 | #include <asm/mmu.h> |
15 | #include <asm/mpspec.h> |
16 | #include <asm/x86_init.h> |
17 | #include <asm/cpufeature.h> |
18 | #include <asm/irq_vectors.h> |
19 | |
20 | #ifdef CONFIG_ACPI_APEI |
21 | # include <asm/pgtable_types.h> |
22 | #endif |
23 | |
24 | #ifdef CONFIG_ACPI |
25 | extern int acpi_lapic; |
26 | extern int acpi_ioapic; |
27 | extern int acpi_noirq; |
28 | extern int acpi_strict; |
29 | extern int acpi_disabled; |
30 | extern int acpi_pci_disabled; |
31 | extern int acpi_skip_timer_override; |
32 | extern int acpi_use_timer_override; |
33 | extern int acpi_fix_pin2_polarity; |
34 | extern int acpi_disable_cmcff; |
35 | extern bool acpi_int_src_ovr[NR_IRQS_LEGACY]; |
36 | |
37 | extern u8 acpi_sci_flags; |
38 | extern u32 acpi_sci_override_gsi; |
39 | void acpi_pic_sci_set_trigger(unsigned int, u16); |
40 | |
41 | struct device; |
42 | |
43 | extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi, |
44 | int trigger, int polarity); |
45 | extern void (*__acpi_unregister_gsi)(u32 gsi); |
46 | |
47 | static inline void disable_acpi(void) |
48 | { |
49 | acpi_disabled = 1; |
50 | acpi_pci_disabled = 1; |
51 | acpi_noirq = 1; |
52 | } |
53 | |
54 | extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); |
55 | |
56 | static inline void acpi_noirq_set(void) { acpi_noirq = 1; } |
57 | static inline void acpi_disable_pci(void) |
58 | { |
59 | acpi_pci_disabled = 1; |
60 | acpi_noirq_set(); |
61 | } |
62 | |
63 | /* Low-level suspend routine. */ |
64 | extern int (*acpi_suspend_lowlevel)(void); |
65 | |
66 | /* Physical address to resume after wakeup */ |
67 | unsigned long acpi_get_wakeup_address(void); |
68 | |
69 | static inline bool acpi_skip_set_wakeup_address(void) |
70 | { |
71 | return cpu_feature_enabled(X86_FEATURE_XENPV); |
72 | } |
73 | |
74 | #define acpi_skip_set_wakeup_address acpi_skip_set_wakeup_address |
75 | |
76 | /* |
77 | * Check if the CPU can handle C2 and deeper |
78 | */ |
79 | static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) |
80 | { |
81 | /* |
82 | * Early models (<=5) of AMD Opterons are not supposed to go into |
83 | * C2 state. |
84 | * |
85 | * Steppings 0x0A and later are good |
86 | */ |
87 | if (boot_cpu_data.x86 == 0x0F && |
88 | boot_cpu_data.x86_vendor == X86_VENDOR_AMD && |
89 | boot_cpu_data.x86_model <= 0x05 && |
90 | boot_cpu_data.x86_stepping < 0x0A) |
91 | return 1; |
92 | else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E)) |
93 | return 1; |
94 | else |
95 | return max_cstate; |
96 | } |
97 | |
98 | static inline bool arch_has_acpi_pdc(void) |
99 | { |
100 | struct cpuinfo_x86 *c = &cpu_data(0); |
101 | return (c->x86_vendor == X86_VENDOR_INTEL || |
102 | c->x86_vendor == X86_VENDOR_CENTAUR); |
103 | } |
104 | |
105 | static inline void arch_acpi_set_proc_cap_bits(u32 *cap) |
106 | { |
107 | struct cpuinfo_x86 *c = &cpu_data(0); |
108 | |
109 | *cap |= ACPI_PROC_CAP_C_CAPABILITY_SMP; |
110 | |
111 | /* Enable coordination with firmware's _TSD info */ |
112 | *cap |= ACPI_PROC_CAP_SMP_T_SWCOORD; |
113 | |
114 | if (cpu_has(c, X86_FEATURE_EST)) |
115 | *cap |= ACPI_PROC_CAP_EST_CAPABILITY_SWSMP; |
116 | |
117 | if (cpu_has(c, X86_FEATURE_ACPI)) |
118 | *cap |= ACPI_PROC_CAP_T_FFH; |
119 | |
120 | if (cpu_has(c, X86_FEATURE_HWP)) |
121 | *cap |= ACPI_PROC_CAP_COLLAB_PROC_PERF; |
122 | |
123 | /* |
124 | * If mwait/monitor is unsupported, C_C1_FFH and |
125 | * C2/C3_FFH will be disabled. |
126 | */ |
127 | if (!cpu_has(c, X86_FEATURE_MWAIT) || |
128 | boot_option_idle_override == IDLE_NOMWAIT) |
129 | *cap &= ~(ACPI_PROC_CAP_C_C1_FFH | ACPI_PROC_CAP_C_C2C3_FFH); |
130 | } |
131 | |
132 | static inline bool acpi_has_cpu_in_madt(void) |
133 | { |
134 | return !!acpi_lapic; |
135 | } |
136 | |
137 | #define ACPI_HAVE_ARCH_SET_ROOT_POINTER |
138 | static inline void acpi_arch_set_root_pointer(u64 addr) |
139 | { |
140 | x86_init.acpi.set_root_pointer(addr); |
141 | } |
142 | |
143 | #define ACPI_HAVE_ARCH_GET_ROOT_POINTER |
144 | static inline u64 acpi_arch_get_root_pointer(void) |
145 | { |
146 | return x86_init.acpi.get_root_pointer(); |
147 | } |
148 | |
149 | void acpi_generic_reduced_hw_init(void); |
150 | |
151 | void x86_default_set_root_pointer(u64 addr); |
152 | u64 x86_default_get_root_pointer(void); |
153 | |
154 | #else /* !CONFIG_ACPI */ |
155 | |
156 | #define acpi_lapic 0 |
157 | #define acpi_ioapic 0 |
158 | #define acpi_disable_cmcff 0 |
159 | static inline void acpi_noirq_set(void) { } |
160 | static inline void acpi_disable_pci(void) { } |
161 | static inline void disable_acpi(void) { } |
162 | |
163 | static inline void acpi_generic_reduced_hw_init(void) { } |
164 | |
165 | static inline void x86_default_set_root_pointer(u64 addr) { } |
166 | |
167 | static inline u64 x86_default_get_root_pointer(void) |
168 | { |
169 | return 0; |
170 | } |
171 | |
172 | #endif /* !CONFIG_ACPI */ |
173 | |
174 | #define ARCH_HAS_POWER_INIT 1 |
175 | |
176 | #ifdef CONFIG_ACPI_NUMA |
177 | extern int x86_acpi_numa_init(void); |
178 | #endif /* CONFIG_ACPI_NUMA */ |
179 | |
180 | struct cper_ia_proc_ctx; |
181 | |
182 | #ifdef CONFIG_ACPI_APEI |
183 | static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) |
184 | { |
185 | /* |
186 | * We currently have no way to look up the EFI memory map |
187 | * attributes for a region in a consistent way, because the |
188 | * memmap is discarded after efi_free_boot_services(). So if |
189 | * you call efi_mem_attributes() during boot and at runtime, |
190 | * you could theoretically see different attributes. |
191 | * |
192 | * We are yet to see any x86 platforms that require anything |
193 | * other than PAGE_KERNEL (some ARM64 platforms require the |
194 | * equivalent of PAGE_KERNEL_NOCACHE). Additionally, if SME |
195 | * is active, the ACPI information will not be encrypted, |
196 | * so return PAGE_KERNEL_NOENC until we know differently. |
197 | */ |
198 | return PAGE_KERNEL_NOENC; |
199 | } |
200 | |
201 | int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info, |
202 | u64 lapic_id); |
203 | #else |
204 | static inline int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info, |
205 | u64 lapic_id) |
206 | { |
207 | return -EINVAL; |
208 | } |
209 | #endif |
210 | |
211 | #define ACPI_TABLE_UPGRADE_MAX_PHYS (max_low_pfn_mapped << PAGE_SHIFT) |
212 | |
213 | #endif /* _ASM_X86_ACPI_H */ |
214 | |