1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ARM64 Specific Low-Level ACPI Boot Support
4 *
5 * Copyright (C) 2013-2014, Linaro Ltd.
6 * Author: Al Stone <al.stone@linaro.org>
7 * Author: Graeme Gregory <graeme.gregory@linaro.org>
8 * Author: Hanjun Guo <hanjun.guo@linaro.org>
9 * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
10 * Author: Naresh Bhat <naresh.bhat@linaro.org>
11 */
12
13#define pr_fmt(fmt) "ACPI: " fmt
14
15#include <linux/acpi.h>
16#include <linux/arm-smccc.h>
17#include <linux/cpumask.h>
18#include <linux/efi.h>
19#include <linux/efi-bgrt.h>
20#include <linux/init.h>
21#include <linux/irq.h>
22#include <linux/irqdomain.h>
23#include <linux/irq_work.h>
24#include <linux/memblock.h>
25#include <linux/of_fdt.h>
26#include <linux/libfdt.h>
27#include <linux/smp.h>
28#include <linux/serial_core.h>
29#include <linux/pgtable.h>
30
31#include <acpi/ghes.h>
32#include <asm/cputype.h>
33#include <asm/cpu_ops.h>
34#include <asm/daifflags.h>
35#include <asm/smp_plat.h>
36
37int acpi_noirq = 1; /* skip ACPI IRQ initialization */
38int acpi_disabled = 1;
39EXPORT_SYMBOL(acpi_disabled);
40
41int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */
42EXPORT_SYMBOL(acpi_pci_disabled);
43
44static bool param_acpi_off __initdata;
45static bool param_acpi_on __initdata;
46static bool param_acpi_force __initdata;
47
48static int __init parse_acpi(char *arg)
49{
50 if (!arg)
51 return -EINVAL;
52
53 /* "acpi=off" disables both ACPI table parsing and interpreter */
54 if (strcmp(arg, "off") == 0)
55 param_acpi_off = true;
56 else if (strcmp(arg, "on") == 0) /* prefer ACPI over DT */
57 param_acpi_on = true;
58 else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */
59 param_acpi_force = true;
60 else
61 return -EINVAL; /* Core will print when we return error */
62
63 return 0;
64}
65early_param("acpi", parse_acpi);
66
67static bool __init dt_is_stub(void)
68{
69 int node;
70
71 fdt_for_each_subnode(node, initial_boot_params, 0) {
72 const char *name = fdt_get_name(fdt: initial_boot_params, nodeoffset: node, NULL);
73 if (strcmp(name, "chosen") == 0)
74 continue;
75 if (strcmp(name, "hypervisor") == 0 &&
76 of_flat_dt_is_compatible(node, name: "xen,xen"))
77 continue;
78
79 return false;
80 }
81
82 return true;
83}
84
85/*
86 * __acpi_map_table() will be called before page_init(), so early_ioremap()
87 * or early_memremap() should be called here to for ACPI table mapping.
88 */
89void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
90{
91 if (!size)
92 return NULL;
93
94 return early_memremap(phys_addr: phys, size);
95}
96
97void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
98{
99 if (!map || !size)
100 return;
101
102 early_memunmap(addr: map, size);
103}
104
105bool __init acpi_psci_present(void)
106{
107 return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
108}
109
110/* Whether HVC must be used instead of SMC as the PSCI conduit */
111bool acpi_psci_use_hvc(void)
112{
113 return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
114}
115
116/*
117 * acpi_fadt_sanity_check() - Check FADT presence and carry out sanity
118 * checks on it
119 *
120 * Return 0 on success, <0 on failure
121 */
122static int __init acpi_fadt_sanity_check(void)
123{
124 struct acpi_table_header *table;
125 struct acpi_table_fadt *fadt;
126 acpi_status status;
127 int ret = 0;
128
129 /*
130 * FADT is required on arm64; retrieve it to check its presence
131 * and carry out revision and ACPI HW reduced compliancy tests
132 */
133 status = acpi_get_table(ACPI_SIG_FADT, instance: 0, out_table: &table);
134 if (ACPI_FAILURE(status)) {
135 const char *msg = acpi_format_exception(exception: status);
136
137 pr_err("Failed to get FADT table, %s\n", msg);
138 return -ENODEV;
139 }
140
141 fadt = (struct acpi_table_fadt *)table;
142
143 /*
144 * Revision in table header is the FADT Major revision, and there
145 * is a minor revision of FADT which was introduced by ACPI 5.1,
146 * we only deal with ACPI 5.1 or newer revision to get GIC and SMP
147 * boot protocol configuration data.
148 */
149 if (table->revision < 5 ||
150 (table->revision == 5 && fadt->minor_revision < 1)) {
151 pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n",
152 table->revision, fadt->minor_revision);
153
154 if (!fadt->arm_boot_flags) {
155 ret = -EINVAL;
156 goto out;
157 }
158 pr_err("FADT has ARM boot flags set, assuming 5.1\n");
159 }
160
161 if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
162 pr_err("FADT not ACPI hardware reduced compliant\n");
163 ret = -EINVAL;
164 }
165
166out:
167 /*
168 * acpi_get_table() creates FADT table mapping that
169 * should be released after parsing and before resuming boot
170 */
171 acpi_put_table(table);
172 return ret;
173}
174
175/*
176 * acpi_boot_table_init() called from setup_arch(), always.
177 * 1. find RSDP and get its address, and then find XSDT
178 * 2. extract all tables and checksums them all
179 * 3. check ACPI FADT revision
180 * 4. check ACPI FADT HW reduced flag
181 *
182 * We can parse ACPI boot-time tables such as MADT after
183 * this function is called.
184 *
185 * On return ACPI is enabled if either:
186 *
187 * - ACPI tables are initialized and sanity checks passed
188 * - acpi=force was passed in the command line and ACPI was not disabled
189 * explicitly through acpi=off command line parameter
190 *
191 * ACPI is disabled on function return otherwise
192 */
193void __init acpi_boot_table_init(void)
194{
195 /*
196 * Enable ACPI instead of device tree unless
197 * - ACPI has been disabled explicitly (acpi=off), or
198 * - the device tree is not empty (it has more than just a /chosen node,
199 * and a /hypervisor node when running on Xen)
200 * and ACPI has not been [force] enabled (acpi=on|force)
201 */
202 if (param_acpi_off ||
203 (!param_acpi_on && !param_acpi_force && !dt_is_stub()))
204 goto done;
205
206 /*
207 * ACPI is disabled at this point. Enable it in order to parse
208 * the ACPI tables and carry out sanity checks
209 */
210 enable_acpi();
211
212 /*
213 * If ACPI tables are initialized and FADT sanity checks passed,
214 * leave ACPI enabled and carry on booting; otherwise disable ACPI
215 * on initialization error.
216 * If acpi=force was passed on the command line it forces ACPI
217 * to be enabled even if its initialization failed.
218 */
219 if (acpi_table_init() || acpi_fadt_sanity_check()) {
220 pr_err("Failed to init ACPI tables\n");
221 if (!param_acpi_force)
222 disable_acpi();
223 }
224
225done:
226 if (acpi_disabled) {
227 if (earlycon_acpi_spcr_enable)
228 early_init_dt_scan_chosen_stdout();
229 } else {
230 acpi_parse_spcr(enable_earlycon: earlycon_acpi_spcr_enable, enable_console: true);
231 if (IS_ENABLED(CONFIG_ACPI_BGRT))
232 acpi_table_parse(ACPI_SIG_BGRT, handler: acpi_parse_bgrt);
233 }
234}
235
236static pgprot_t __acpi_get_writethrough_mem_attribute(void)
237{
238 /*
239 * Although UEFI specifies the use of Normal Write-through for
240 * EFI_MEMORY_WT, it is seldom used in practice and not implemented
241 * by most (all?) CPUs. Rather than allocate a MAIR just for this
242 * purpose, emit a warning and use Normal Non-cacheable instead.
243 */
244 pr_warn_once("No MAIR allocation for EFI_MEMORY_WT; treating as Normal Non-cacheable\n");
245 return __pgprot(PROT_NORMAL_NC);
246}
247
248pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
249{
250 /*
251 * According to "Table 8 Map: EFI memory types to AArch64 memory
252 * types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is
253 * mapped to a corresponding MAIR attribute encoding.
254 * The EFI memory attribute advises all possible capabilities
255 * of a memory region.
256 */
257
258 u64 attr;
259
260 attr = efi_mem_attributes(phys_addr: addr);
261 if (attr & EFI_MEMORY_WB)
262 return PAGE_KERNEL;
263 if (attr & EFI_MEMORY_WC)
264 return __pgprot(PROT_NORMAL_NC);
265 if (attr & EFI_MEMORY_WT)
266 return __acpi_get_writethrough_mem_attribute();
267 return __pgprot(PROT_DEVICE_nGnRnE);
268}
269
270void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
271{
272 efi_memory_desc_t *md, *region = NULL;
273 pgprot_t prot;
274
275 if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP)))
276 return NULL;
277
278 for_each_efi_memory_desc(md) {
279 u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
280
281 if (phys < md->phys_addr || phys >= end)
282 continue;
283
284 if (phys + size > end) {
285 pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n");
286 return NULL;
287 }
288 region = md;
289 break;
290 }
291
292 /*
293 * It is fine for AML to remap regions that are not represented in the
294 * EFI memory map at all, as it only describes normal memory, and MMIO
295 * regions that require a virtual mapping to make them accessible to
296 * the EFI runtime services.
297 */
298 prot = __pgprot(PROT_DEVICE_nGnRnE);
299 if (region) {
300 switch (region->type) {
301 case EFI_LOADER_CODE:
302 case EFI_LOADER_DATA:
303 case EFI_BOOT_SERVICES_CODE:
304 case EFI_BOOT_SERVICES_DATA:
305 case EFI_CONVENTIONAL_MEMORY:
306 case EFI_PERSISTENT_MEMORY:
307 if (memblock_is_map_memory(addr: phys) ||
308 !memblock_is_region_memory(base: phys, size)) {
309 pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys);
310 return NULL;
311 }
312 /*
313 * Mapping kernel memory is permitted if the region in
314 * question is covered by a single memblock with the
315 * NOMAP attribute set: this enables the use of ACPI
316 * table overrides passed via initramfs, which are
317 * reserved in memory using arch_reserve_mem_area()
318 * below. As this particular use case only requires
319 * read access, fall through to the R/O mapping case.
320 */
321 fallthrough;
322
323 case EFI_RUNTIME_SERVICES_CODE:
324 /*
325 * This would be unusual, but not problematic per se,
326 * as long as we take care not to create a writable
327 * mapping for executable code.
328 */
329 prot = PAGE_KERNEL_RO;
330 break;
331
332 case EFI_ACPI_RECLAIM_MEMORY:
333 /*
334 * ACPI reclaim memory is used to pass firmware tables
335 * and other data that is intended for consumption by
336 * the OS only, which may decide it wants to reclaim
337 * that memory and use it for something else. We never
338 * do that, but we usually add it to the linear map
339 * anyway, in which case we should use the existing
340 * mapping.
341 */
342 if (memblock_is_map_memory(addr: phys))
343 return (void __iomem *)__phys_to_virt(phys);
344 fallthrough;
345
346 default:
347 if (region->attribute & EFI_MEMORY_WB)
348 prot = PAGE_KERNEL;
349 else if (region->attribute & EFI_MEMORY_WC)
350 prot = __pgprot(PROT_NORMAL_NC);
351 else if (region->attribute & EFI_MEMORY_WT)
352 prot = __acpi_get_writethrough_mem_attribute();
353 }
354 }
355 return ioremap_prot(offset: phys, size, pgprot_val(prot));
356}
357
358/*
359 * Claim Synchronous External Aborts as a firmware first notification.
360 *
361 * Used by KVM and the arch do_sea handler.
362 * @regs may be NULL when called from process context.
363 */
364int apei_claim_sea(struct pt_regs *regs)
365{
366 int err = -ENOENT;
367 bool return_to_irqs_enabled;
368 unsigned long current_flags;
369
370 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
371 return err;
372
373 current_flags = local_daif_save_flags();
374
375 /* current_flags isn't useful here as daif doesn't tell us about pNMI */
376 return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags());
377
378 if (regs)
379 return_to_irqs_enabled = interrupts_enabled(regs);
380
381 /*
382 * SEA can interrupt SError, mask it and describe this as an NMI so
383 * that APEI defers the handling.
384 */
385 local_daif_restore(DAIF_ERRCTX);
386 nmi_enter();
387 err = ghes_notify_sea();
388 nmi_exit();
389
390 /*
391 * APEI NMI-like notifications are deferred to irq_work. Unless
392 * we interrupted irqs-masked code, we can do that now.
393 */
394 if (!err) {
395 if (return_to_irqs_enabled) {
396 local_daif_restore(DAIF_PROCCTX_NOIRQ);
397 __irq_enter();
398 irq_work_run();
399 __irq_exit();
400 } else {
401 pr_warn_ratelimited("APEI work queued but not completed");
402 err = -EINPROGRESS;
403 }
404 }
405
406 local_daif_restore(current_flags);
407
408 return err;
409}
410
411void arch_reserve_mem_area(acpi_physical_address addr, size_t size)
412{
413 memblock_mark_nomap(base: addr, size);
414}
415
416#ifdef CONFIG_ACPI_FFH
417/*
418 * Implements ARM64 specific callbacks to support ACPI FFH Operation Region as
419 * specified in https://developer.arm.com/docs/den0048/latest
420 */
421struct acpi_ffh_data {
422 struct acpi_ffh_info info;
423 void (*invoke_ffh_fn)(unsigned long a0, unsigned long a1,
424 unsigned long a2, unsigned long a3,
425 unsigned long a4, unsigned long a5,
426 unsigned long a6, unsigned long a7,
427 struct arm_smccc_res *args,
428 struct arm_smccc_quirk *res);
429 void (*invoke_ffh64_fn)(const struct arm_smccc_1_2_regs *args,
430 struct arm_smccc_1_2_regs *res);
431};
432
433int acpi_ffh_address_space_arch_setup(void *handler_ctxt, void **region_ctxt)
434{
435 enum arm_smccc_conduit conduit;
436 struct acpi_ffh_data *ffh_ctxt;
437
438 if (arm_smccc_get_version() < ARM_SMCCC_VERSION_1_2)
439 return -EOPNOTSUPP;
440
441 conduit = arm_smccc_1_1_get_conduit();
442 if (conduit == SMCCC_CONDUIT_NONE) {
443 pr_err("%s: invalid SMCCC conduit\n", __func__);
444 return -EOPNOTSUPP;
445 }
446
447 ffh_ctxt = kzalloc(size: sizeof(*ffh_ctxt), GFP_KERNEL);
448 if (!ffh_ctxt)
449 return -ENOMEM;
450
451 if (conduit == SMCCC_CONDUIT_SMC) {
452 ffh_ctxt->invoke_ffh_fn = __arm_smccc_smc;
453 ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_smc;
454 } else {
455 ffh_ctxt->invoke_ffh_fn = __arm_smccc_hvc;
456 ffh_ctxt->invoke_ffh64_fn = arm_smccc_1_2_hvc;
457 }
458
459 memcpy(ffh_ctxt, handler_ctxt, sizeof(ffh_ctxt->info));
460
461 *region_ctxt = ffh_ctxt;
462 return AE_OK;
463}
464
465static bool acpi_ffh_smccc_owner_allowed(u32 fid)
466{
467 int owner = ARM_SMCCC_OWNER_NUM(fid);
468
469 if (owner == ARM_SMCCC_OWNER_STANDARD ||
470 owner == ARM_SMCCC_OWNER_SIP || owner == ARM_SMCCC_OWNER_OEM)
471 return true;
472
473 return false;
474}
475
476int acpi_ffh_address_space_arch_handler(acpi_integer *value, void *region_context)
477{
478 int ret = 0;
479 struct acpi_ffh_data *ffh_ctxt = region_context;
480
481 if (ffh_ctxt->info.offset == 0) {
482 /* SMC/HVC 32bit call */
483 struct arm_smccc_res res;
484 u32 a[8] = { 0 }, *ptr = (u32 *)value;
485
486 if (!ARM_SMCCC_IS_FAST_CALL(*ptr) || ARM_SMCCC_IS_64(*ptr) ||
487 !acpi_ffh_smccc_owner_allowed(fid: *ptr) ||
488 ffh_ctxt->info.length > 32) {
489 ret = AE_ERROR;
490 } else {
491 int idx, len = ffh_ctxt->info.length >> 2;
492
493 for (idx = 0; idx < len; idx++)
494 a[idx] = *(ptr + idx);
495
496 ffh_ctxt->invoke_ffh_fn(a[0], a[1], a[2], a[3], a[4],
497 a[5], a[6], a[7], &res, NULL);
498 memcpy(value, &res, sizeof(res));
499 }
500
501 } else if (ffh_ctxt->info.offset == 1) {
502 /* SMC/HVC 64bit call */
503 struct arm_smccc_1_2_regs *r = (struct arm_smccc_1_2_regs *)value;
504
505 if (!ARM_SMCCC_IS_FAST_CALL(r->a0) || !ARM_SMCCC_IS_64(r->a0) ||
506 !acpi_ffh_smccc_owner_allowed(r->a0) ||
507 ffh_ctxt->info.length > sizeof(*r)) {
508 ret = AE_ERROR;
509 } else {
510 ffh_ctxt->invoke_ffh64_fn(r, r);
511 memcpy(value, r, ffh_ctxt->info.length);
512 }
513 } else {
514 ret = AE_ERROR;
515 }
516
517 return ret;
518}
519#endif /* CONFIG_ACPI_FFH */
520

source code of linux/arch/arm64/kernel/acpi.c