1
2
3
4
5
6
7
8
9
10
11
12
13#define pr_fmt(fmt) "ACPI: " fmt
14
15#include <linux/acpi.h>
16#include <linux/cpumask.h>
17#include <linux/efi.h>
18#include <linux/efi-bgrt.h>
19#include <linux/init.h>
20#include <linux/irq.h>
21#include <linux/irqdomain.h>
22#include <linux/irq_work.h>
23#include <linux/memblock.h>
24#include <linux/of_fdt.h>
25#include <linux/libfdt.h>
26#include <linux/smp.h>
27#include <linux/serial_core.h>
28#include <linux/pgtable.h>
29
30#include <acpi/ghes.h>
31#include <asm/cputype.h>
32#include <asm/cpu_ops.h>
33#include <asm/daifflags.h>
34#include <asm/smp_plat.h>
35
36int acpi_noirq = 1;
37int acpi_disabled = 1;
38EXPORT_SYMBOL(acpi_disabled);
39
40int acpi_pci_disabled = 1;
41EXPORT_SYMBOL(acpi_pci_disabled);
42
43static bool param_acpi_off __initdata;
44static bool param_acpi_on __initdata;
45static bool param_acpi_force __initdata;
46
47static int __init parse_acpi(char *arg)
48{
49 if (!arg)
50 return -EINVAL;
51
52
53 if (strcmp(arg, "off") == 0)
54 param_acpi_off = true;
55 else if (strcmp(arg, "on") == 0)
56 param_acpi_on = true;
57 else if (strcmp(arg, "force") == 0)
58 param_acpi_force = true;
59 else
60 return -EINVAL;
61
62 return 0;
63}
64early_param("acpi", parse_acpi);
65
66static bool __init dt_is_stub(void)
67{
68 int node;
69
70 fdt_for_each_subnode(node, initial_boot_params, 0) {
71 const char *name = fdt_get_name(initial_boot_params, node, NULL);
72 if (strcmp(name, "chosen") == 0)
73 continue;
74 if (strcmp(name, "hypervisor") == 0 &&
75 of_flat_dt_is_compatible(node, "xen,xen"))
76 continue;
77
78 return false;
79 }
80
81 return true;
82}
83
84
85
86
87
88void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
89{
90 if (!size)
91 return NULL;
92
93 return early_memremap(phys, size);
94}
95
96void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
97{
98 if (!map || !size)
99 return;
100
101 early_memunmap(map, size);
102}
103
104bool __init acpi_psci_present(void)
105{
106 return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
107}
108
109
110bool acpi_psci_use_hvc(void)
111{
112 return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
113}
114
115
116
117
118
119
120
121static int __init acpi_fadt_sanity_check(void)
122{
123 struct acpi_table_header *table;
124 struct acpi_table_fadt *fadt;
125 acpi_status status;
126 int ret = 0;
127
128
129
130
131
132 status = acpi_get_table(ACPI_SIG_FADT, 0, &table);
133 if (ACPI_FAILURE(status)) {
134 const char *msg = acpi_format_exception(status);
135
136 pr_err("Failed to get FADT table, %s\n", msg);
137 return -ENODEV;
138 }
139
140 fadt = (struct acpi_table_fadt *)table;
141
142
143
144
145
146
147
148 if (table->revision < 5 ||
149 (table->revision == 5 && fadt->minor_revision < 1)) {
150 pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n",
151 table->revision, fadt->minor_revision);
152
153 if (!fadt->arm_boot_flags) {
154 ret = -EINVAL;
155 goto out;
156 }
157 pr_err("FADT has ARM boot flags set, assuming 5.1\n");
158 }
159
160 if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
161 pr_err("FADT not ACPI hardware reduced compliant\n");
162 ret = -EINVAL;
163 }
164
165out:
166
167
168
169
170 acpi_put_table(table);
171 return ret;
172}
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192void __init acpi_boot_table_init(void)
193{
194
195
196
197
198
199
200
201 if (param_acpi_off ||
202 (!param_acpi_on && !param_acpi_force && !dt_is_stub()))
203 goto done;
204
205
206
207
208
209 enable_acpi();
210
211
212
213
214
215
216
217
218 if (acpi_table_init() || acpi_fadt_sanity_check()) {
219 pr_err("Failed to init ACPI tables\n");
220 if (!param_acpi_force)
221 disable_acpi();
222 }
223
224done:
225 if (acpi_disabled) {
226 if (earlycon_acpi_spcr_enable)
227 early_init_dt_scan_chosen_stdout();
228 } else {
229 acpi_parse_spcr(earlycon_acpi_spcr_enable, true);
230 if (IS_ENABLED(CONFIG_ACPI_BGRT))
231 acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
232 }
233}
234
235static pgprot_t __acpi_get_writethrough_mem_attribute(void)
236{
237
238
239
240
241
242
243 pr_warn_once("No MAIR allocation for EFI_MEMORY_WT; treating as Normal Non-cacheable\n");
244 return __pgprot(PROT_NORMAL_NC);
245}
246
247pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
248{
249
250
251
252
253
254
255
256
257 u64 attr;
258
259 attr = efi_mem_attributes(addr);
260 if (attr & EFI_MEMORY_WB)
261 return PAGE_KERNEL;
262 if (attr & EFI_MEMORY_WC)
263 return __pgprot(PROT_NORMAL_NC);
264 if (attr & EFI_MEMORY_WT)
265 return __acpi_get_writethrough_mem_attribute();
266 return __pgprot(PROT_DEVICE_nGnRnE);
267}
268
269void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
270{
271 efi_memory_desc_t *md, *region = NULL;
272 pgprot_t prot;
273
274 if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP)))
275 return NULL;
276
277 for_each_efi_memory_desc(md) {
278 u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
279
280 if (phys < md->phys_addr || phys >= end)
281 continue;
282
283 if (phys + size > end) {
284 pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n");
285 return NULL;
286 }
287 region = md;
288 break;
289 }
290
291
292
293
294
295
296
297 prot = __pgprot(PROT_DEVICE_nGnRnE);
298 if (region) {
299 switch (region->type) {
300 case EFI_LOADER_CODE:
301 case EFI_LOADER_DATA:
302 case EFI_BOOT_SERVICES_CODE:
303 case EFI_BOOT_SERVICES_DATA:
304 case EFI_CONVENTIONAL_MEMORY:
305 case EFI_PERSISTENT_MEMORY:
306 if (memblock_is_map_memory(phys) ||
307 !memblock_is_region_memory(phys, size)) {
308 pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys);
309 return NULL;
310 }
311
312
313
314
315
316
317
318
319
320 fallthrough;
321
322 case EFI_RUNTIME_SERVICES_CODE:
323
324
325
326
327
328 prot = PAGE_KERNEL_RO;
329 break;
330
331 case EFI_ACPI_RECLAIM_MEMORY:
332
333
334
335
336
337
338
339
340
341 if (memblock_is_map_memory(phys))
342 return (void __iomem *)__phys_to_virt(phys);
343 fallthrough;
344
345 default:
346 if (region->attribute & EFI_MEMORY_WB)
347 prot = PAGE_KERNEL;
348 else if (region->attribute & EFI_MEMORY_WC)
349 prot = __pgprot(PROT_NORMAL_NC);
350 else if (region->attribute & EFI_MEMORY_WT)
351 prot = __acpi_get_writethrough_mem_attribute();
352 }
353 }
354 return __ioremap(phys, size, prot);
355}
356
357
358
359
360
361
362
363int apei_claim_sea(struct pt_regs *regs)
364{
365 int err = -ENOENT;
366 bool return_to_irqs_enabled;
367 unsigned long current_flags;
368
369 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
370 return err;
371
372 current_flags = local_daif_save_flags();
373
374
375 return_to_irqs_enabled = !irqs_disabled_flags(arch_local_save_flags());
376
377 if (regs)
378 return_to_irqs_enabled = interrupts_enabled(regs);
379
380
381
382
383
384 local_daif_restore(DAIF_ERRCTX);
385 nmi_enter();
386 err = ghes_notify_sea();
387 nmi_exit();
388
389
390
391
392
393 if (!err) {
394 if (return_to_irqs_enabled) {
395 local_daif_restore(DAIF_PROCCTX_NOIRQ);
396 __irq_enter();
397 irq_work_run();
398 __irq_exit();
399 } else {
400 pr_warn_ratelimited("APEI work queued but not completed");
401 err = -EINPROGRESS;
402 }
403 }
404
405 local_daif_restore(current_flags);
406
407 return err;
408}
409
410void arch_reserve_mem_area(acpi_physical_address addr, size_t size)
411{
412 memblock_mark_nomap(addr, size);
413}
414