1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#ifndef _ASM_ACPI_H
27#define _ASM_ACPI_H
28
29#ifdef __KERNEL__
30
31#include <acpi/pdc_intel.h>
32
33#include <linux/init.h>
34#include <linux/numa.h>
35#include <asm/system.h>
36#include <asm/numa.h>
37
38#define COMPILER_DEPENDENT_INT64 long
39#define COMPILER_DEPENDENT_UINT64 unsigned long
40
41
42
43
44
45
46
47
48
49#define ACPI_SYSTEM_XFACE
50#define ACPI_EXTERNAL_XFACE
51#define ACPI_INTERNAL_XFACE
52#define ACPI_INTERNAL_VAR_XFACE
53
54
55
56#define ACPI_ASM_MACROS
57#define BREAKPOINT3
58#define ACPI_DISABLE_IRQS() local_irq_disable()
59#define ACPI_ENABLE_IRQS() local_irq_enable()
60#define ACPI_FLUSH_CPU_CACHE()
61
62static inline int
63ia64_acpi_acquire_global_lock (unsigned int *lock)
64{
65 unsigned int old, new, val;
66 do {
67 old = *lock;
68 new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
69 val = ia64_cmpxchg4_acq(lock, new, old);
70 } while (unlikely (val != old));
71 return (new < 3) ? -1 : 0;
72}
73
74static inline int
75ia64_acpi_release_global_lock (unsigned int *lock)
76{
77 unsigned int old, new, val;
78 do {
79 old = *lock;
80 new = old & ~0x3;
81 val = ia64_cmpxchg4_acq(lock, new, old);
82 } while (unlikely (val != old));
83 return old & 0x1;
84}
85
86#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
87 ((Acq) = ia64_acpi_acquire_global_lock(&facs->global_lock))
88
89#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
90 ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
91
92#ifdef CONFIG_ACPI
93#define acpi_disabled 0
94#define acpi_noirq 0
95#define acpi_pci_disabled 0
96#define acpi_strict 1
97#endif
98#define acpi_processor_cstate_check(x) (x)
99static inline void disable_acpi(void) { }
100static inline void pci_acpi_crs_quirks(void) { }
101
102#ifdef CONFIG_IA64_GENERIC
103const char *acpi_get_sysname (void);
104#else
105static inline const char *acpi_get_sysname (void)
106{
107# if defined (CONFIG_IA64_HP_SIM)
108 return "hpsim";
109# elif defined (CONFIG_IA64_HP_ZX1)
110 return "hpzx1";
111# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
112 return "hpzx1_swiotlb";
113# elif defined (CONFIG_IA64_SGI_SN2)
114 return "sn2";
115# elif defined (CONFIG_IA64_SGI_UV)
116 return "uv";
117# elif defined (CONFIG_IA64_DIG)
118 return "dig";
119# elif defined (CONFIG_IA64_XEN_GUEST)
120 return "xen";
121# elif defined(CONFIG_IA64_DIG_VTD)
122 return "dig_vtd";
123# else
124# error Unknown platform. Fix acpi.c.
125# endif
126}
127#endif
128int acpi_request_vector (u32 int_type);
129int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
130
131
132extern int acpi_save_state_mem(void);
133extern void acpi_restore_state_mem(void);
134extern unsigned long acpi_wakeup_address;
135
136
137
138
139
140extern unsigned int can_cpei_retarget(void);
141extern unsigned int is_cpu_cpei_target(unsigned int cpu);
142extern void set_cpei_target_cpu(unsigned int cpu);
143extern unsigned int get_cpei_target_cpu(void);
144extern void prefill_possible_map(void);
145#ifdef CONFIG_ACPI_HOTPLUG_CPU
146extern int additional_cpus;
147#else
148#define additional_cpus 0
149#endif
150
151#ifdef CONFIG_ACPI_NUMA
152#if MAX_NUMNODES > 256
153#define MAX_PXM_DOMAINS MAX_NUMNODES
154#else
155#define MAX_PXM_DOMAINS (256)
156#endif
157extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
158extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
159#endif
160
161static inline bool arch_has_acpi_pdc(void) { return true; }
162static inline void arch_acpi_set_pdc_bits(u32 *buf)
163{
164 buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
165}
166
167#define acpi_unlazy_tlb(x)
168
169#ifdef CONFIG_ACPI_NUMA
170extern cpumask_t early_cpu_possible_map;
171#define for_each_possible_early_cpu(cpu) \
172 for_each_cpu_mask((cpu), early_cpu_possible_map)
173
174static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
175{
176 int low_cpu, high_cpu;
177 int cpu;
178 int next_nid = 0;
179
180 low_cpu = cpus_weight(early_cpu_possible_map);
181
182 high_cpu = max(low_cpu, min_cpus);
183 high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
184
185 for (cpu = low_cpu; cpu < high_cpu; cpu++) {
186 cpu_set(cpu, early_cpu_possible_map);
187 if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
188 node_cpuid[cpu].nid = next_nid;
189 next_nid++;
190 if (next_nid >= num_online_nodes())
191 next_nid = 0;
192 }
193 }
194}
195#endif
196
197#endif
198
199#endif
200