1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/mm.h>
34#include <linux/highmem.h>
35#include <linux/pci.h>
36#include <linux/interrupt.h>
37#include <linux/kmod.h>
38#include <linux/delay.h>
39#include <linux/workqueue.h>
40#include <linux/nmi.h>
41#include <linux/acpi.h>
42#include <linux/efi.h>
43#include <linux/ioport.h>
44#include <linux/list.h>
45#include <linux/jiffies.h>
46#include <linux/semaphore.h>
47
48#include <asm/io.h>
49#include <asm/uaccess.h>
50
51#include "internal.h"
52
53#define _COMPONENT ACPI_OS_SERVICES
54ACPI_MODULE_NAME("osl");
55
56struct acpi_os_dpc {
57 acpi_osd_exec_callback function;
58 void *context;
59 struct work_struct work;
60};
61
62#ifdef CONFIG_ACPI_CUSTOM_DSDT
63#include CONFIG_ACPI_CUSTOM_DSDT_FILE
64#endif
65
66#ifdef ENABLE_DEBUGGER
67#include <linux/kdb.h>
68
69
70int acpi_in_debugger;
71EXPORT_SYMBOL(acpi_in_debugger);
72
73extern char line_buf[80];
74#endif
75
76static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
77 u32 pm1b_ctrl);
78static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
79 u32 val_b);
80
81static acpi_osd_handler acpi_irq_handler;
82static void *acpi_irq_context;
83static struct workqueue_struct *kacpid_wq;
84static struct workqueue_struct *kacpi_notify_wq;
85static struct workqueue_struct *kacpi_hotplug_wq;
86
87
88
89
90
91struct acpi_ioremap {
92 struct list_head list;
93 void __iomem *virt;
94 acpi_physical_address phys;
95 acpi_size size;
96 unsigned long refcount;
97};
98
99static LIST_HEAD(acpi_ioremaps);
100static DEFINE_MUTEX(acpi_ioremap_lock);
101
102static void __init acpi_osi_setup_late(void);
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137static struct osi_linux {
138 unsigned int enable:1;
139 unsigned int dmi:1;
140 unsigned int cmdline:1;
141 unsigned int default_disabling:1;
142} osi_linux = {0, 0, 0, 0};
143
144static u32 acpi_osi_handler(acpi_string interface, u32 supported)
145{
146 if (!strcmp("Linux", interface)) {
147
148 printk_once(KERN_NOTICE FW_BUG PREFIX
149 "BIOS _OSI(Linux) query %s%s\n",
150 osi_linux.enable ? "honored" : "ignored",
151 osi_linux.cmdline ? " via cmdline" :
152 osi_linux.dmi ? " via DMI" : "");
153 }
154
155 return supported;
156}
157
158static void __init acpi_request_region (struct acpi_generic_address *gas,
159 unsigned int length, char *desc)
160{
161 u64 addr;
162
163
164 memcpy(&addr, &gas->address, sizeof(addr));
165 if (!addr || !length)
166 return;
167
168
169 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
170 request_region(addr, length, desc);
171 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
172 request_mem_region(addr, length, desc);
173}
174
175static int __init acpi_reserve_resources(void)
176{
177 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
178 "ACPI PM1a_EVT_BLK");
179
180 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
181 "ACPI PM1b_EVT_BLK");
182
183 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
184 "ACPI PM1a_CNT_BLK");
185
186 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
187 "ACPI PM1b_CNT_BLK");
188
189 if (acpi_gbl_FADT.pm_timer_length == 4)
190 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
191
192 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
193 "ACPI PM2_CNT_BLK");
194
195
196
197 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
198 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
199 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
200
201 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
202 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
203 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
204
205 return 0;
206}
207device_initcall(acpi_reserve_resources);
208
209void acpi_os_printf(const char *fmt, ...)
210{
211 va_list args;
212 va_start(args, fmt);
213 acpi_os_vprintf(fmt, args);
214 va_end(args);
215}
216
217void acpi_os_vprintf(const char *fmt, va_list args)
218{
219 static char buffer[512];
220
221 vsprintf(buffer, fmt, args);
222
223#ifdef ENABLE_DEBUGGER
224 if (acpi_in_debugger) {
225 kdb_printf("%s", buffer);
226 } else {
227 printk(KERN_CONT "%s", buffer);
228 }
229#else
230 printk(KERN_CONT "%s", buffer);
231#endif
232}
233
234#ifdef CONFIG_KEXEC
235static unsigned long acpi_rsdp;
236static int __init setup_acpi_rsdp(char *arg)
237{
238 if (kstrtoul(arg, 16, &acpi_rsdp))
239 return -EINVAL;
240 return 0;
241}
242early_param("acpi_rsdp", setup_acpi_rsdp);
243#endif
244
245acpi_physical_address __init acpi_os_get_root_pointer(void)
246{
247#ifdef CONFIG_KEXEC
248 if (acpi_rsdp)
249 return acpi_rsdp;
250#endif
251
252 if (efi_enabled(EFI_CONFIG_TABLES)) {
253 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
254 return efi.acpi20;
255 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
256 return efi.acpi;
257 else {
258 printk(KERN_ERR PREFIX
259 "System description tables not found\n");
260 return 0;
261 }
262 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
263 acpi_physical_address pa = 0;
264
265 acpi_find_root_pointer(&pa);
266 return pa;
267 }
268
269 return 0;
270}
271
272
273static struct acpi_ioremap *
274acpi_map_lookup(acpi_physical_address phys, acpi_size size)
275{
276 struct acpi_ioremap *map;
277
278 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
279 if (map->phys <= phys &&
280 phys + size <= map->phys + map->size)
281 return map;
282
283 return NULL;
284}
285
286
287static void __iomem *
288acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
289{
290 struct acpi_ioremap *map;
291
292 map = acpi_map_lookup(phys, size);
293 if (map)
294 return map->virt + (phys - map->phys);
295
296 return NULL;
297}
298
299void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
300{
301 struct acpi_ioremap *map;
302 void __iomem *virt = NULL;
303
304 mutex_lock(&acpi_ioremap_lock);
305 map = acpi_map_lookup(phys, size);
306 if (map) {
307 virt = map->virt + (phys - map->phys);
308 map->refcount++;
309 }
310 mutex_unlock(&acpi_ioremap_lock);
311 return virt;
312}
313EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
314
315
316static struct acpi_ioremap *
317acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
318{
319 struct acpi_ioremap *map;
320
321 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
322 if (map->virt <= virt &&
323 virt + size <= map->virt + map->size)
324 return map;
325
326 return NULL;
327}
328
329#ifndef CONFIG_IA64
330#define should_use_kmap(pfn) page_is_ram(pfn)
331#else
332
333#define should_use_kmap(pfn) 0
334#endif
335
336static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
337{
338 unsigned long pfn;
339
340 pfn = pg_off >> PAGE_SHIFT;
341 if (should_use_kmap(pfn)) {
342 if (pg_sz > PAGE_SIZE)
343 return NULL;
344 return (void __iomem __force *)kmap(pfn_to_page(pfn));
345 } else
346 return acpi_os_ioremap(pg_off, pg_sz);
347}
348
349static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
350{
351 unsigned long pfn;
352
353 pfn = pg_off >> PAGE_SHIFT;
354 if (should_use_kmap(pfn))
355 kunmap(pfn_to_page(pfn));
356 else
357 iounmap(vaddr);
358}
359
360void __iomem *__init_refok
361acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
362{
363 struct acpi_ioremap *map;
364 void __iomem *virt;
365 acpi_physical_address pg_off;
366 acpi_size pg_sz;
367
368 if (phys > ULONG_MAX) {
369 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
370 return NULL;
371 }
372
373 if (!acpi_gbl_permanent_mmap)
374 return __acpi_map_table((unsigned long)phys, size);
375
376 mutex_lock(&acpi_ioremap_lock);
377
378 map = acpi_map_lookup(phys, size);
379 if (map) {
380 map->refcount++;
381 goto out;
382 }
383
384 map = kzalloc(sizeof(*map), GFP_KERNEL);
385 if (!map) {
386 mutex_unlock(&acpi_ioremap_lock);
387 return NULL;
388 }
389
390 pg_off = round_down(phys, PAGE_SIZE);
391 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
392 virt = acpi_map(pg_off, pg_sz);
393 if (!virt) {
394 mutex_unlock(&acpi_ioremap_lock);
395 kfree(map);
396 return NULL;
397 }
398
399 INIT_LIST_HEAD(&map->list);
400 map->virt = virt;
401 map->phys = pg_off;
402 map->size = pg_sz;
403 map->refcount = 1;
404
405 list_add_tail_rcu(&map->list, &acpi_ioremaps);
406
407out:
408 mutex_unlock(&acpi_ioremap_lock);
409 return map->virt + (phys - map->phys);
410}
411EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
412
413void *__init_refok
414acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
415{
416 return (void *)acpi_os_map_iomem(phys, size);
417}
418EXPORT_SYMBOL_GPL(acpi_os_map_memory);
419
420static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
421{
422 if (!--map->refcount)
423 list_del_rcu(&map->list);
424}
425
426static void acpi_os_map_cleanup(struct acpi_ioremap *map)
427{
428 if (!map->refcount) {
429 synchronize_rcu();
430 acpi_unmap(map->phys, map->virt);
431 kfree(map);
432 }
433}
434
435void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
436{
437 struct acpi_ioremap *map;
438
439 if (!acpi_gbl_permanent_mmap) {
440 __acpi_unmap_table(virt, size);
441 return;
442 }
443
444 mutex_lock(&acpi_ioremap_lock);
445 map = acpi_map_lookup_virt(virt, size);
446 if (!map) {
447 mutex_unlock(&acpi_ioremap_lock);
448 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
449 return;
450 }
451 acpi_os_drop_map_ref(map);
452 mutex_unlock(&acpi_ioremap_lock);
453
454 acpi_os_map_cleanup(map);
455}
456EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
457
458void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
459{
460 return acpi_os_unmap_iomem((void __iomem *)virt, size);
461}
462EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
463
464void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
465{
466 if (!acpi_gbl_permanent_mmap)
467 __acpi_unmap_table(virt, size);
468}
469
470int acpi_os_map_generic_address(struct acpi_generic_address *gas)
471{
472 u64 addr;
473 void __iomem *virt;
474
475 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
476 return 0;
477
478
479 memcpy(&addr, &gas->address, sizeof(addr));
480 if (!addr || !gas->bit_width)
481 return -EINVAL;
482
483 virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
484 if (!virt)
485 return -EIO;
486
487 return 0;
488}
489EXPORT_SYMBOL(acpi_os_map_generic_address);
490
491void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
492{
493 u64 addr;
494 struct acpi_ioremap *map;
495
496 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
497 return;
498
499
500 memcpy(&addr, &gas->address, sizeof(addr));
501 if (!addr || !gas->bit_width)
502 return;
503
504 mutex_lock(&acpi_ioremap_lock);
505 map = acpi_map_lookup(addr, gas->bit_width / 8);
506 if (!map) {
507 mutex_unlock(&acpi_ioremap_lock);
508 return;
509 }
510 acpi_os_drop_map_ref(map);
511 mutex_unlock(&acpi_ioremap_lock);
512
513 acpi_os_map_cleanup(map);
514}
515EXPORT_SYMBOL(acpi_os_unmap_generic_address);
516
517#ifdef ACPI_FUTURE_USAGE
518acpi_status
519acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
520{
521 if (!phys || !virt)
522 return AE_BAD_PARAMETER;
523
524 *phys = virt_to_phys(virt);
525
526 return AE_OK;
527}
528#endif
529
530#define ACPI_MAX_OVERRIDE_LEN 100
531
532static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
533
534acpi_status
535acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
536 acpi_string * new_val)
537{
538 if (!init_val || !new_val)
539 return AE_BAD_PARAMETER;
540
541 *new_val = NULL;
542 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
543 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
544 acpi_os_name);
545 *new_val = acpi_os_name;
546 }
547
548 return AE_OK;
549}
550
551#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
552#include <linux/earlycpio.h>
553#include <linux/memblock.h>
554
555static u64 acpi_tables_addr;
556static int all_tables_size;
557
558
559static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
560{
561 u8 sum = 0;
562 u8 *end = buffer + length;
563
564 while (buffer < end)
565 sum = (u8) (sum + *(buffer++));
566 return sum;
567}
568
569
570static const char * const table_sigs[] = {
571 ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
572 ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
573 ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
574 ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
575 ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
576 ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
577 ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
578 ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
579 ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
580
581#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
582
583#define ACPI_OVERRIDE_TABLES 64
584static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
585
586#define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
587
588void __init acpi_initrd_override(void *data, size_t size)
589{
590 int sig, no, table_nr = 0, total_offset = 0;
591 long offset = 0;
592 struct acpi_table_header *table;
593 char cpio_path[32] = "kernel/firmware/acpi/";
594 struct cpio_data file;
595
596 if (data == NULL || size == 0)
597 return;
598
599 for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
600 file = find_cpio_data(cpio_path, data, size, &offset);
601 if (!file.data)
602 break;
603
604 data += offset;
605 size -= offset;
606
607 if (file.size < sizeof(struct acpi_table_header)) {
608 pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
609 cpio_path, file.name);
610 continue;
611 }
612
613 table = file.data;
614
615 for (sig = 0; table_sigs[sig]; sig++)
616 if (!memcmp(table->signature, table_sigs[sig], 4))
617 break;
618
619 if (!table_sigs[sig]) {
620 pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
621 cpio_path, file.name);
622 continue;
623 }
624 if (file.size != table->length) {
625 pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
626 cpio_path, file.name);
627 continue;
628 }
629 if (acpi_table_checksum(file.data, table->length)) {
630 pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
631 cpio_path, file.name);
632 continue;
633 }
634
635 pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
636 table->signature, cpio_path, file.name, table->length);
637
638 all_tables_size += table->length;
639 acpi_initrd_files[table_nr].data = file.data;
640 acpi_initrd_files[table_nr].size = file.size;
641 table_nr++;
642 }
643 if (table_nr == 0)
644 return;
645
646 acpi_tables_addr =
647 memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
648 all_tables_size, PAGE_SIZE);
649 if (!acpi_tables_addr) {
650 WARN_ON(1);
651 return;
652 }
653
654
655
656
657
658
659
660
661
662
663 memblock_reserve(acpi_tables_addr, all_tables_size);
664 arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
665
666
667
668
669
670
671 for (no = 0; no < table_nr; no++) {
672 unsigned char *src_p = acpi_initrd_files[no].data;
673 phys_addr_t size = acpi_initrd_files[no].size;
674 phys_addr_t dest_addr = acpi_tables_addr + total_offset;
675 phys_addr_t slop, clen;
676 char *dest_p;
677
678 total_offset += size;
679
680 while (size) {
681 slop = dest_addr & ~PAGE_MASK;
682 clen = size;
683 if (clen > MAP_CHUNK_SIZE - slop)
684 clen = MAP_CHUNK_SIZE - slop;
685 dest_p = early_ioremap(dest_addr & PAGE_MASK,
686 clen + slop);
687 memcpy(dest_p + slop, src_p, clen);
688 early_iounmap(dest_p, clen + slop);
689 src_p += clen;
690 dest_addr += clen;
691 size -= clen;
692 }
693 }
694}
695#endif
696
697static void acpi_table_taint(struct acpi_table_header *table)
698{
699 pr_warn(PREFIX
700 "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
701 table->signature, table->oem_table_id);
702 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
703}
704
705
706acpi_status
707acpi_os_table_override(struct acpi_table_header * existing_table,
708 struct acpi_table_header ** new_table)
709{
710 if (!existing_table || !new_table)
711 return AE_BAD_PARAMETER;
712
713 *new_table = NULL;
714
715#ifdef CONFIG_ACPI_CUSTOM_DSDT
716 if (strncmp(existing_table->signature, "DSDT", 4) == 0)
717 *new_table = (struct acpi_table_header *)AmlCode;
718#endif
719 if (*new_table != NULL)
720 acpi_table_taint(existing_table);
721 return AE_OK;
722}
723
724acpi_status
725acpi_os_physical_table_override(struct acpi_table_header *existing_table,
726 acpi_physical_address *address,
727 u32 *table_length)
728{
729#ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
730 *table_length = 0;
731 *address = 0;
732 return AE_OK;
733#else
734 int table_offset = 0;
735 struct acpi_table_header *table;
736
737 *table_length = 0;
738 *address = 0;
739
740 if (!acpi_tables_addr)
741 return AE_OK;
742
743 do {
744 if (table_offset + ACPI_HEADER_SIZE > all_tables_size) {
745 WARN_ON(1);
746 return AE_OK;
747 }
748
749 table = acpi_os_map_memory(acpi_tables_addr + table_offset,
750 ACPI_HEADER_SIZE);
751
752 if (table_offset + table->length > all_tables_size) {
753 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
754 WARN_ON(1);
755 return AE_OK;
756 }
757
758 table_offset += table->length;
759
760 if (memcmp(existing_table->signature, table->signature, 4)) {
761 acpi_os_unmap_memory(table,
762 ACPI_HEADER_SIZE);
763 continue;
764 }
765
766
767 if (memcmp(table->oem_table_id, existing_table->oem_table_id,
768 ACPI_OEM_TABLE_ID_SIZE)) {
769 acpi_os_unmap_memory(table,
770 ACPI_HEADER_SIZE);
771 continue;
772 }
773
774 table_offset -= table->length;
775 *table_length = table->length;
776 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
777 *address = acpi_tables_addr + table_offset;
778 break;
779 } while (table_offset + ACPI_HEADER_SIZE < all_tables_size);
780
781 if (*address != 0)
782 acpi_table_taint(existing_table);
783 return AE_OK;
784#endif
785}
786
787static irqreturn_t acpi_irq(int irq, void *dev_id)
788{
789 u32 handled;
790
791 handled = (*acpi_irq_handler) (acpi_irq_context);
792
793 if (handled) {
794 acpi_irq_handled++;
795 return IRQ_HANDLED;
796 } else {
797 acpi_irq_not_handled++;
798 return IRQ_NONE;
799 }
800}
801
802acpi_status
803acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
804 void *context)
805{
806 unsigned int irq;
807
808 acpi_irq_stats_init();
809
810
811
812
813
814 if (gsi != acpi_gbl_FADT.sci_interrupt)
815 return AE_BAD_PARAMETER;
816
817 if (acpi_irq_handler)
818 return AE_ALREADY_ACQUIRED;
819
820 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
821 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
822 gsi);
823 return AE_OK;
824 }
825
826 acpi_irq_handler = handler;
827 acpi_irq_context = context;
828 if (request_irq(irq, acpi_irq, IRQF_SHARED | IRQF_NO_SUSPEND, "acpi", acpi_irq)) {
829 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
830 acpi_irq_handler = NULL;
831 return AE_NOT_ACQUIRED;
832 }
833
834 return AE_OK;
835}
836
837acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
838{
839 if (irq != acpi_gbl_FADT.sci_interrupt)
840 return AE_BAD_PARAMETER;
841
842 free_irq(irq, acpi_irq);
843 acpi_irq_handler = NULL;
844
845 return AE_OK;
846}
847
848
849
850
851
852void acpi_os_sleep(u64 ms)
853{
854 msleep(ms);
855}
856
857void acpi_os_stall(u32 us)
858{
859 while (us) {
860 u32 delay = 1000;
861
862 if (delay > us)
863 delay = us;
864 udelay(delay);
865 touch_nmi_watchdog();
866 us -= delay;
867 }
868}
869
870
871
872
873
874
875u64 acpi_os_get_timer(void)
876{
877 u64 time_ns = ktime_to_ns(ktime_get());
878 do_div(time_ns, 100);
879 return time_ns;
880}
881
882acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
883{
884 u32 dummy;
885
886 if (!value)
887 value = &dummy;
888
889 *value = 0;
890 if (width <= 8) {
891 *(u8 *) value = inb(port);
892 } else if (width <= 16) {
893 *(u16 *) value = inw(port);
894 } else if (width <= 32) {
895 *(u32 *) value = inl(port);
896 } else {
897 BUG();
898 }
899
900 return AE_OK;
901}
902
903EXPORT_SYMBOL(acpi_os_read_port);
904
905acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
906{
907 if (width <= 8) {
908 outb(value, port);
909 } else if (width <= 16) {
910 outw(value, port);
911 } else if (width <= 32) {
912 outl(value, port);
913 } else {
914 BUG();
915 }
916
917 return AE_OK;
918}
919
920EXPORT_SYMBOL(acpi_os_write_port);
921
922#ifdef readq
923static inline u64 read64(const volatile void __iomem *addr)
924{
925 return readq(addr);
926}
927#else
928static inline u64 read64(const volatile void __iomem *addr)
929{
930 u64 l, h;
931 l = readl(addr);
932 h = readl(addr+4);
933 return l | (h << 32);
934}
935#endif
936
937acpi_status
938acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
939{
940 void __iomem *virt_addr;
941 unsigned int size = width / 8;
942 bool unmap = false;
943 u64 dummy;
944
945 rcu_read_lock();
946 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
947 if (!virt_addr) {
948 rcu_read_unlock();
949 virt_addr = acpi_os_ioremap(phys_addr, size);
950 if (!virt_addr)
951 return AE_BAD_ADDRESS;
952 unmap = true;
953 }
954
955 if (!value)
956 value = &dummy;
957
958 switch (width) {
959 case 8:
960 *(u8 *) value = readb(virt_addr);
961 break;
962 case 16:
963 *(u16 *) value = readw(virt_addr);
964 break;
965 case 32:
966 *(u32 *) value = readl(virt_addr);
967 break;
968 case 64:
969 *(u64 *) value = read64(virt_addr);
970 break;
971 default:
972 BUG();
973 }
974
975 if (unmap)
976 iounmap(virt_addr);
977 else
978 rcu_read_unlock();
979
980 return AE_OK;
981}
982
983#ifdef writeq
984static inline void write64(u64 val, volatile void __iomem *addr)
985{
986 writeq(val, addr);
987}
988#else
989static inline void write64(u64 val, volatile void __iomem *addr)
990{
991 writel(val, addr);
992 writel(val>>32, addr+4);
993}
994#endif
995
996acpi_status
997acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
998{
999 void __iomem *virt_addr;
1000 unsigned int size = width / 8;
1001 bool unmap = false;
1002
1003 rcu_read_lock();
1004 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
1005 if (!virt_addr) {
1006 rcu_read_unlock();
1007 virt_addr = acpi_os_ioremap(phys_addr, size);
1008 if (!virt_addr)
1009 return AE_BAD_ADDRESS;
1010 unmap = true;
1011 }
1012
1013 switch (width) {
1014 case 8:
1015 writeb(value, virt_addr);
1016 break;
1017 case 16:
1018 writew(value, virt_addr);
1019 break;
1020 case 32:
1021 writel(value, virt_addr);
1022 break;
1023 case 64:
1024 write64(value, virt_addr);
1025 break;
1026 default:
1027 BUG();
1028 }
1029
1030 if (unmap)
1031 iounmap(virt_addr);
1032 else
1033 rcu_read_unlock();
1034
1035 return AE_OK;
1036}
1037
1038acpi_status
1039acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
1040 u64 *value, u32 width)
1041{
1042 int result, size;
1043 u32 value32;
1044
1045 if (!value)
1046 return AE_BAD_PARAMETER;
1047
1048 switch (width) {
1049 case 8:
1050 size = 1;
1051 break;
1052 case 16:
1053 size = 2;
1054 break;
1055 case 32:
1056 size = 4;
1057 break;
1058 default:
1059 return AE_ERROR;
1060 }
1061
1062 result = raw_pci_read(pci_id->segment, pci_id->bus,
1063 PCI_DEVFN(pci_id->device, pci_id->function),
1064 reg, size, &value32);
1065 *value = value32;
1066
1067 return (result ? AE_ERROR : AE_OK);
1068}
1069
1070acpi_status
1071acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
1072 u64 value, u32 width)
1073{
1074 int result, size;
1075
1076 switch (width) {
1077 case 8:
1078 size = 1;
1079 break;
1080 case 16:
1081 size = 2;
1082 break;
1083 case 32:
1084 size = 4;
1085 break;
1086 default:
1087 return AE_ERROR;
1088 }
1089
1090 result = raw_pci_write(pci_id->segment, pci_id->bus,
1091 PCI_DEVFN(pci_id->device, pci_id->function),
1092 reg, size, value);
1093
1094 return (result ? AE_ERROR : AE_OK);
1095}
1096
1097static void acpi_os_execute_deferred(struct work_struct *work)
1098{
1099 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
1100
1101 dpc->function(dpc->context);
1102 kfree(dpc);
1103}
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120acpi_status acpi_os_execute(acpi_execute_type type,
1121 acpi_osd_exec_callback function, void *context)
1122{
1123 acpi_status status = AE_OK;
1124 struct acpi_os_dpc *dpc;
1125 struct workqueue_struct *queue;
1126 int ret;
1127 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1128 "Scheduling function [%p(%p)] for deferred execution.\n",
1129 function, context));
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1141 if (!dpc)
1142 return AE_NO_MEMORY;
1143
1144 dpc->function = function;
1145 dpc->context = context;
1146
1147
1148
1149
1150
1151
1152 if (type == OSL_NOTIFY_HANDLER) {
1153 queue = kacpi_notify_wq;
1154 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1155 } else {
1156 queue = kacpid_wq;
1157 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1158 }
1159
1160
1161
1162
1163
1164
1165
1166
1167 ret = queue_work_on(0, queue, &dpc->work);
1168
1169 if (!ret) {
1170 printk(KERN_ERR PREFIX
1171 "Call to queue_work() failed.\n");
1172 status = AE_ERROR;
1173 kfree(dpc);
1174 }
1175 return status;
1176}
1177EXPORT_SYMBOL(acpi_os_execute);
1178
1179void acpi_os_wait_events_complete(void)
1180{
1181 flush_workqueue(kacpid_wq);
1182 flush_workqueue(kacpi_notify_wq);
1183}
1184
1185struct acpi_hp_work {
1186 struct work_struct work;
1187 struct acpi_device *adev;
1188 u32 src;
1189};
1190
1191static void acpi_hotplug_work_fn(struct work_struct *work)
1192{
1193 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1194
1195 acpi_os_wait_events_complete();
1196 acpi_device_hotplug(hpw->adev, hpw->src);
1197 kfree(hpw);
1198}
1199
1200acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1201{
1202 struct acpi_hp_work *hpw;
1203
1204 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1205 "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1206 adev, src));
1207
1208 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1209 if (!hpw)
1210 return AE_NO_MEMORY;
1211
1212 INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1213 hpw->adev = adev;
1214 hpw->src = src;
1215
1216
1217
1218
1219
1220
1221 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1222 kfree(hpw);
1223 return AE_ERROR;
1224 }
1225 return AE_OK;
1226}
1227
1228bool acpi_queue_hotplug_work(struct work_struct *work)
1229{
1230 return queue_work(kacpi_hotplug_wq, work);
1231}
1232
1233acpi_status
1234acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1235{
1236 struct semaphore *sem = NULL;
1237
1238 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1239 if (!sem)
1240 return AE_NO_MEMORY;
1241
1242 sema_init(sem, initial_units);
1243
1244 *handle = (acpi_handle *) sem;
1245
1246 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1247 *handle, initial_units));
1248
1249 return AE_OK;
1250}
1251
1252
1253
1254
1255
1256
1257
1258
1259acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1260{
1261 struct semaphore *sem = (struct semaphore *)handle;
1262
1263 if (!sem)
1264 return AE_BAD_PARAMETER;
1265
1266 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1267
1268 BUG_ON(!list_empty(&sem->wait_list));
1269 kfree(sem);
1270 sem = NULL;
1271
1272 return AE_OK;
1273}
1274
1275
1276
1277
1278acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1279{
1280 acpi_status status = AE_OK;
1281 struct semaphore *sem = (struct semaphore *)handle;
1282 long jiffies;
1283 int ret = 0;
1284
1285 if (!sem || (units < 1))
1286 return AE_BAD_PARAMETER;
1287
1288 if (units > 1)
1289 return AE_SUPPORT;
1290
1291 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1292 handle, units, timeout));
1293
1294 if (timeout == ACPI_WAIT_FOREVER)
1295 jiffies = MAX_SCHEDULE_TIMEOUT;
1296 else
1297 jiffies = msecs_to_jiffies(timeout);
1298
1299 ret = down_timeout(sem, jiffies);
1300 if (ret)
1301 status = AE_TIME;
1302
1303 if (ACPI_FAILURE(status)) {
1304 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1305 "Failed to acquire semaphore[%p|%d|%d], %s",
1306 handle, units, timeout,
1307 acpi_format_exception(status)));
1308 } else {
1309 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1310 "Acquired semaphore[%p|%d|%d]", handle,
1311 units, timeout));
1312 }
1313
1314 return status;
1315}
1316
1317
1318
1319
1320acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1321{
1322 struct semaphore *sem = (struct semaphore *)handle;
1323
1324 if (!sem || (units < 1))
1325 return AE_BAD_PARAMETER;
1326
1327 if (units > 1)
1328 return AE_SUPPORT;
1329
1330 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1331 units));
1332
1333 up(sem);
1334
1335 return AE_OK;
1336}
1337
1338#ifdef ACPI_FUTURE_USAGE
1339u32 acpi_os_get_line(char *buffer)
1340{
1341
1342#ifdef ENABLE_DEBUGGER
1343 if (acpi_in_debugger) {
1344 u32 chars;
1345
1346 kdb_read(buffer, sizeof(line_buf));
1347
1348
1349 chars = strlen(buffer) - 1;
1350 buffer[chars] = '\0';
1351 }
1352#endif
1353
1354 return 0;
1355}
1356#endif
1357
1358acpi_status acpi_os_signal(u32 function, void *info)
1359{
1360 switch (function) {
1361 case ACPI_SIGNAL_FATAL:
1362 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1363 break;
1364 case ACPI_SIGNAL_BREAKPOINT:
1365
1366
1367
1368
1369
1370
1371
1372
1373 break;
1374 default:
1375 break;
1376 }
1377
1378 return AE_OK;
1379}
1380
1381static int __init acpi_os_name_setup(char *str)
1382{
1383 char *p = acpi_os_name;
1384 int count = ACPI_MAX_OVERRIDE_LEN - 1;
1385
1386 if (!str || !*str)
1387 return 0;
1388
1389 for (; count-- && *str; str++) {
1390 if (isalnum(*str) || *str == ' ' || *str == ':')
1391 *p++ = *str;
1392 else if (*str == '\'' || *str == '"')
1393 continue;
1394 else
1395 break;
1396 }
1397 *p = 0;
1398
1399 return 1;
1400
1401}
1402
1403__setup("acpi_os_name=", acpi_os_name_setup);
1404
1405#define OSI_STRING_LENGTH_MAX 64
1406#define OSI_STRING_ENTRIES_MAX 16
1407
1408struct osi_setup_entry {
1409 char string[OSI_STRING_LENGTH_MAX];
1410 bool enable;
1411};
1412
1413static struct osi_setup_entry
1414 osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
1415 {"Module Device", true},
1416 {"Processor Device", true},
1417 {"3.0 _SCP Extensions", true},
1418 {"Processor Aggregator Device", true},
1419};
1420
1421void __init acpi_osi_setup(char *str)
1422{
1423 struct osi_setup_entry *osi;
1424 bool enable = true;
1425 int i;
1426
1427 if (!acpi_gbl_create_osi_method)
1428 return;
1429
1430 if (str == NULL || *str == '\0') {
1431 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1432 acpi_gbl_create_osi_method = FALSE;
1433 return;
1434 }
1435
1436 if (*str == '!') {
1437 str++;
1438 if (*str == '\0') {
1439 osi_linux.default_disabling = 1;
1440 return;
1441 } else if (*str == '*') {
1442 acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
1443 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1444 osi = &osi_setup_entries[i];
1445 osi->enable = false;
1446 }
1447 return;
1448 }
1449 enable = false;
1450 }
1451
1452 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1453 osi = &osi_setup_entries[i];
1454 if (!strcmp(osi->string, str)) {
1455 osi->enable = enable;
1456 break;
1457 } else if (osi->string[0] == '\0') {
1458 osi->enable = enable;
1459 strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
1460 break;
1461 }
1462 }
1463}
1464
1465static void __init set_osi_linux(unsigned int enable)
1466{
1467 if (osi_linux.enable != enable)
1468 osi_linux.enable = enable;
1469
1470 if (osi_linux.enable)
1471 acpi_osi_setup("Linux");
1472 else
1473 acpi_osi_setup("!Linux");
1474
1475 return;
1476}
1477
1478static void __init acpi_cmdline_osi_linux(unsigned int enable)
1479{
1480 osi_linux.cmdline = 1;
1481 osi_linux.dmi = 0;
1482 set_osi_linux(enable);
1483
1484 return;
1485}
1486
1487void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1488{
1489 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1490
1491 if (enable == -1)
1492 return;
1493
1494 osi_linux.dmi = 1;
1495 set_osi_linux(enable);
1496
1497 return;
1498}
1499
1500
1501
1502
1503
1504
1505
1506
1507static void __init acpi_osi_setup_late(void)
1508{
1509 struct osi_setup_entry *osi;
1510 char *str;
1511 int i;
1512 acpi_status status;
1513
1514 if (osi_linux.default_disabling) {
1515 status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
1516
1517 if (ACPI_SUCCESS(status))
1518 printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
1519 }
1520
1521 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1522 osi = &osi_setup_entries[i];
1523 str = osi->string;
1524
1525 if (*str == '\0')
1526 break;
1527 if (osi->enable) {
1528 status = acpi_install_interface(str);
1529
1530 if (ACPI_SUCCESS(status))
1531 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1532 } else {
1533 status = acpi_remove_interface(str);
1534
1535 if (ACPI_SUCCESS(status))
1536 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1537 }
1538 }
1539}
1540
1541static int __init osi_setup(char *str)
1542{
1543 if (str && !strcmp("Linux", str))
1544 acpi_cmdline_osi_linux(1);
1545 else if (str && !strcmp("!Linux", str))
1546 acpi_cmdline_osi_linux(0);
1547 else
1548 acpi_osi_setup(str);
1549
1550 return 1;
1551}
1552
1553__setup("acpi_osi=", osi_setup);
1554
1555
1556
1557
1558
1559
1560
1561static int __init acpi_no_auto_serialize_setup(char *str)
1562{
1563 acpi_gbl_auto_serialize_methods = FALSE;
1564 pr_info("ACPI: auto-serialization disabled\n");
1565
1566 return 1;
1567}
1568
1569__setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587#define ENFORCE_RESOURCES_STRICT 2
1588#define ENFORCE_RESOURCES_LAX 1
1589#define ENFORCE_RESOURCES_NO 0
1590
1591static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1592
1593static int __init acpi_enforce_resources_setup(char *str)
1594{
1595 if (str == NULL || *str == '\0')
1596 return 0;
1597
1598 if (!strcmp("strict", str))
1599 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1600 else if (!strcmp("lax", str))
1601 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1602 else if (!strcmp("no", str))
1603 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1604
1605 return 1;
1606}
1607
1608__setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1609
1610
1611
1612int acpi_check_resource_conflict(const struct resource *res)
1613{
1614 acpi_adr_space_type space_id;
1615 acpi_size length;
1616 u8 warn = 0;
1617 int clash = 0;
1618
1619 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1620 return 0;
1621 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1622 return 0;
1623
1624 if (res->flags & IORESOURCE_IO)
1625 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1626 else
1627 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1628
1629 length = resource_size(res);
1630 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1631 warn = 1;
1632 clash = acpi_check_address_range(space_id, res->start, length, warn);
1633
1634 if (clash) {
1635 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1636 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1637 printk(KERN_NOTICE "ACPI: This conflict may"
1638 " cause random problems and system"
1639 " instability\n");
1640 printk(KERN_INFO "ACPI: If an ACPI driver is available"
1641 " for this device, you should use it instead of"
1642 " the native driver\n");
1643 }
1644 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1645 return -EBUSY;
1646 }
1647 return 0;
1648}
1649EXPORT_SYMBOL(acpi_check_resource_conflict);
1650
1651int acpi_check_region(resource_size_t start, resource_size_t n,
1652 const char *name)
1653{
1654 struct resource res = {
1655 .start = start,
1656 .end = start + n - 1,
1657 .name = name,
1658 .flags = IORESOURCE_IO,
1659 };
1660
1661 return acpi_check_resource_conflict(&res);
1662}
1663EXPORT_SYMBOL(acpi_check_region);
1664
1665
1666
1667
1668int acpi_resources_are_enforced(void)
1669{
1670 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1671}
1672EXPORT_SYMBOL(acpi_resources_are_enforced);
1673
1674
1675
1676
1677void acpi_os_delete_lock(acpi_spinlock handle)
1678{
1679 ACPI_FREE(handle);
1680}
1681
1682
1683
1684
1685
1686
1687
1688acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1689{
1690 acpi_cpu_flags flags;
1691 spin_lock_irqsave(lockp, flags);
1692 return flags;
1693}
1694
1695
1696
1697
1698
1699void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1700{
1701 spin_unlock_irqrestore(lockp, flags);
1702}
1703
1704#ifndef ACPI_USE_LOCAL_CACHE
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721acpi_status
1722acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1723{
1724 *cache = kmem_cache_create(name, size, 0, 0, NULL);
1725 if (*cache == NULL)
1726 return AE_ERROR;
1727 else
1728 return AE_OK;
1729}
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1744{
1745 kmem_cache_shrink(cache);
1746 return (AE_OK);
1747}
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1763{
1764 kmem_cache_destroy(cache);
1765 return (AE_OK);
1766}
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1783{
1784 kmem_cache_free(cache, object);
1785 return (AE_OK);
1786}
1787#endif
1788
1789static int __init acpi_no_static_ssdt_setup(char *s)
1790{
1791 acpi_gbl_disable_ssdt_table_install = TRUE;
1792 pr_info("ACPI: static SSDT installation disabled\n");
1793
1794 return 0;
1795}
1796
1797early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
1798
1799static int __init acpi_disable_return_repair(char *s)
1800{
1801 printk(KERN_NOTICE PREFIX
1802 "ACPI: Predefined validation mechanism disabled\n");
1803 acpi_gbl_disable_auto_repair = TRUE;
1804
1805 return 1;
1806}
1807
1808__setup("acpica_no_return_repair", acpi_disable_return_repair);
1809
1810acpi_status __init acpi_os_initialize(void)
1811{
1812 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1813 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1814 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1815 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1816 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1817
1818
1819
1820
1821 int rv;
1822
1823 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1824 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
1825 }
1826
1827 return AE_OK;
1828}
1829
1830acpi_status __init acpi_os_initialize1(void)
1831{
1832 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1833 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1834 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
1835 BUG_ON(!kacpid_wq);
1836 BUG_ON(!kacpi_notify_wq);
1837 BUG_ON(!kacpi_hotplug_wq);
1838 acpi_install_interface_handler(acpi_osi_handler);
1839 acpi_osi_setup_late();
1840 return AE_OK;
1841}
1842
1843acpi_status acpi_os_terminate(void)
1844{
1845 if (acpi_irq_handler) {
1846 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1847 acpi_irq_handler);
1848 }
1849
1850 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1851 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1852 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1853 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1854 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1855 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
1856
1857 destroy_workqueue(kacpid_wq);
1858 destroy_workqueue(kacpi_notify_wq);
1859 destroy_workqueue(kacpi_hotplug_wq);
1860
1861 return AE_OK;
1862}
1863
1864acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1865 u32 pm1b_control)
1866{
1867 int rc = 0;
1868 if (__acpi_os_prepare_sleep)
1869 rc = __acpi_os_prepare_sleep(sleep_state,
1870 pm1a_control, pm1b_control);
1871 if (rc < 0)
1872 return AE_ERROR;
1873 else if (rc > 0)
1874 return AE_CTRL_SKIP;
1875
1876 return AE_OK;
1877}
1878
1879void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1880 u32 pm1a_ctrl, u32 pm1b_ctrl))
1881{
1882 __acpi_os_prepare_sleep = func;
1883}
1884
1885acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1886 u32 val_b)
1887{
1888 int rc = 0;
1889 if (__acpi_os_prepare_extended_sleep)
1890 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1891 val_a, val_b);
1892 if (rc < 0)
1893 return AE_ERROR;
1894 else if (rc > 0)
1895 return AE_CTRL_SKIP;
1896
1897 return AE_OK;
1898}
1899
1900void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1901 u32 val_a, u32 val_b))
1902{
1903 __acpi_os_prepare_extended_sleep = func;
1904}
1905