1
2
3
4
5
6
7
8
9
10#include <acpi/acpi.h>
11#include "accommon.h"
12#include "acevents.h"
13#include "acnamesp.h"
14
15#define _COMPONENT ACPI_EVENTS
16ACPI_MODULE_NAME("evgpeblk")
17#if (!ACPI_REDUCED_HARDWARE)
18
19static acpi_status
20acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
21 u32 interrupt_number);
22
23static acpi_status
24acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40static acpi_status
41acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
42 u32 interrupt_number)
43{
44 struct acpi_gpe_block_info *next_gpe_block;
45 struct acpi_gpe_xrupt_info *gpe_xrupt_block;
46 acpi_status status;
47 acpi_cpu_flags flags;
48
49 ACPI_FUNCTION_TRACE(ev_install_gpe_block);
50
51 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
52 if (ACPI_FAILURE(status)) {
53 return_ACPI_STATUS(status);
54 }
55
56 status =
57 acpi_ev_get_gpe_xrupt_block(interrupt_number, &gpe_xrupt_block);
58 if (ACPI_FAILURE(status)) {
59 goto unlock_and_exit;
60 }
61
62
63
64 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
65 if (gpe_xrupt_block->gpe_block_list_head) {
66 next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
67 while (next_gpe_block->next) {
68 next_gpe_block = next_gpe_block->next;
69 }
70
71 next_gpe_block->next = gpe_block;
72 gpe_block->previous = next_gpe_block;
73 } else {
74 gpe_xrupt_block->gpe_block_list_head = gpe_block;
75 }
76
77 gpe_block->xrupt_block = gpe_xrupt_block;
78 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
79
80unlock_and_exit:
81 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
82 return_ACPI_STATUS(status);
83}
84
85
86
87
88
89
90
91
92
93
94
95
96
97acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
98{
99 acpi_status status;
100 acpi_cpu_flags flags;
101
102 ACPI_FUNCTION_TRACE(ev_install_gpe_block);
103
104 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
105 if (ACPI_FAILURE(status)) {
106 return_ACPI_STATUS(status);
107 }
108
109
110
111 status =
112 acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL);
113
114 if (!gpe_block->previous && !gpe_block->next) {
115
116
117
118 status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
119 if (ACPI_FAILURE(status)) {
120 goto unlock_and_exit;
121 }
122 } else {
123
124
125 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
126 if (gpe_block->previous) {
127 gpe_block->previous->next = gpe_block->next;
128 } else {
129 gpe_block->xrupt_block->gpe_block_list_head =
130 gpe_block->next;
131 }
132
133 if (gpe_block->next) {
134 gpe_block->next->previous = gpe_block->previous;
135 }
136
137 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
138 }
139
140 acpi_current_gpe_count -= gpe_block->gpe_count;
141
142
143
144 ACPI_FREE(gpe_block->register_info);
145 ACPI_FREE(gpe_block->event_info);
146 ACPI_FREE(gpe_block);
147
148unlock_and_exit:
149 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
150 return_ACPI_STATUS(status);
151}
152
153
154
155
156
157
158
159
160
161
162
163
164
165static acpi_status
166acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
167{
168 struct acpi_gpe_register_info *gpe_register_info = NULL;
169 struct acpi_gpe_event_info *gpe_event_info = NULL;
170 struct acpi_gpe_event_info *this_event;
171 struct acpi_gpe_register_info *this_register;
172 u32 i;
173 u32 j;
174 acpi_status status;
175
176 ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
177
178
179
180 gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->
181 register_count *
182 sizeof(struct
183 acpi_gpe_register_info));
184 if (!gpe_register_info) {
185 ACPI_ERROR((AE_INFO,
186 "Could not allocate the GpeRegisterInfo table"));
187 return_ACPI_STATUS(AE_NO_MEMORY);
188 }
189
190
191
192
193
194 gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->gpe_count *
195 sizeof(struct
196 acpi_gpe_event_info));
197 if (!gpe_event_info) {
198 ACPI_ERROR((AE_INFO,
199 "Could not allocate the GpeEventInfo table"));
200 status = AE_NO_MEMORY;
201 goto error_exit;
202 }
203
204
205
206 gpe_block->register_info = gpe_register_info;
207 gpe_block->event_info = gpe_event_info;
208
209
210
211
212
213
214
215 this_register = gpe_register_info;
216 this_event = gpe_event_info;
217
218 for (i = 0; i < gpe_block->register_count; i++) {
219
220
221
222 this_register->base_gpe_number = (u16)
223 (gpe_block->block_base_number +
224 (i * ACPI_GPE_REGISTER_WIDTH));
225
226 this_register->status_address.address = gpe_block->address + i;
227
228 this_register->enable_address.address =
229 gpe_block->address + i + gpe_block->register_count;
230
231 this_register->status_address.space_id = gpe_block->space_id;
232 this_register->enable_address.space_id = gpe_block->space_id;
233 this_register->status_address.bit_width =
234 ACPI_GPE_REGISTER_WIDTH;
235 this_register->enable_address.bit_width =
236 ACPI_GPE_REGISTER_WIDTH;
237 this_register->status_address.bit_offset = 0;
238 this_register->enable_address.bit_offset = 0;
239
240
241
242 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
243 this_event->gpe_number =
244 (u8) (this_register->base_gpe_number + j);
245 this_event->register_info = this_register;
246 this_event++;
247 }
248
249
250
251 status = acpi_hw_write(0x00, &this_register->enable_address);
252 if (ACPI_FAILURE(status)) {
253 goto error_exit;
254 }
255
256
257
258 status = acpi_hw_write(0xFF, &this_register->status_address);
259 if (ACPI_FAILURE(status)) {
260 goto error_exit;
261 }
262
263 this_register++;
264 }
265
266 return_ACPI_STATUS(AE_OK);
267
268error_exit:
269 if (gpe_register_info) {
270 ACPI_FREE(gpe_register_info);
271 }
272 if (gpe_event_info) {
273 ACPI_FREE(gpe_event_info);
274 }
275
276 return_ACPI_STATUS(status);
277}
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298acpi_status
299acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
300 u64 address,
301 u8 space_id,
302 u32 register_count,
303 u16 gpe_block_base_number,
304 u32 interrupt_number,
305 struct acpi_gpe_block_info **return_gpe_block)
306{
307 acpi_status status;
308 struct acpi_gpe_block_info *gpe_block;
309 struct acpi_gpe_walk_info walk_info;
310
311 ACPI_FUNCTION_TRACE(ev_create_gpe_block);
312
313 if (!register_count) {
314 return_ACPI_STATUS(AE_OK);
315 }
316
317
318
319 gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
320 if (!gpe_block) {
321 return_ACPI_STATUS(AE_NO_MEMORY);
322 }
323
324
325
326 gpe_block->address = address;
327 gpe_block->space_id = space_id;
328 gpe_block->node = gpe_device;
329 gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
330 gpe_block->initialized = FALSE;
331 gpe_block->register_count = register_count;
332 gpe_block->block_base_number = gpe_block_base_number;
333
334
335
336
337
338 status = acpi_ev_create_gpe_info_blocks(gpe_block);
339 if (ACPI_FAILURE(status)) {
340 ACPI_FREE(gpe_block);
341 return_ACPI_STATUS(status);
342 }
343
344
345
346 status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
347 if (ACPI_FAILURE(status)) {
348 ACPI_FREE(gpe_block->register_info);
349 ACPI_FREE(gpe_block->event_info);
350 ACPI_FREE(gpe_block);
351 return_ACPI_STATUS(status);
352 }
353
354 acpi_gbl_all_gpes_initialized = FALSE;
355
356
357
358 walk_info.gpe_block = gpe_block;
359 walk_info.gpe_device = gpe_device;
360 walk_info.execute_by_owner_id = FALSE;
361
362 status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
363 ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
364 acpi_ev_match_gpe_method, NULL,
365 &walk_info, NULL);
366
367
368
369 if (return_gpe_block) {
370 (*return_gpe_block) = gpe_block;
371 }
372
373 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
374 " Initialized GPE %02X to %02X [%4.4s] %u regs on interrupt 0x%X%s\n",
375 (u32)gpe_block->block_base_number,
376 (u32)(gpe_block->block_base_number +
377 (gpe_block->gpe_count - 1)),
378 gpe_device->name.ascii, gpe_block->register_count,
379 interrupt_number,
380 interrupt_number ==
381 acpi_gbl_FADT.sci_interrupt ? " (SCI)" : ""));
382
383
384
385 acpi_current_gpe_count += gpe_block->gpe_count;
386 return_ACPI_STATUS(AE_OK);
387}
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403acpi_status
404acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
405 struct acpi_gpe_block_info *gpe_block,
406 void *context)
407{
408 acpi_status status;
409 struct acpi_gpe_event_info *gpe_event_info;
410 u32 gpe_enabled_count;
411 u32 gpe_index;
412 u32 i;
413 u32 j;
414 u8 *is_polling_needed = context;
415 ACPI_ERROR_ONLY(u32 gpe_number);
416
417 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
418
419
420
421
422
423 if (!gpe_block || gpe_block->initialized) {
424 return_ACPI_STATUS(AE_OK);
425 }
426
427
428
429
430
431
432 gpe_enabled_count = 0;
433
434 for (i = 0; i < gpe_block->register_count; i++) {
435 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
436
437
438
439 gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
440 gpe_event_info = &gpe_block->event_info[gpe_index];
441 ACPI_ERROR_ONLY(gpe_number =
442 gpe_block->block_base_number +
443 gpe_index);
444 gpe_event_info->flags |= ACPI_GPE_INITIALIZED;
445
446
447
448
449
450 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
451 ACPI_GPE_DISPATCH_METHOD)
452 || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
453 continue;
454 }
455
456 status = acpi_ev_add_gpe_reference(gpe_event_info, FALSE);
457 if (ACPI_FAILURE(status)) {
458 ACPI_EXCEPTION((AE_INFO, status,
459 "Could not enable GPE 0x%02X",
460 gpe_number));
461 continue;
462 }
463
464 gpe_event_info->flags |= ACPI_GPE_AUTO_ENABLED;
465
466 if (is_polling_needed &&
467 ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) {
468 *is_polling_needed = TRUE;
469 }
470
471 gpe_enabled_count++;
472 }
473 }
474
475 if (gpe_enabled_count) {
476 ACPI_INFO(("Enabled %u GPEs in block %02X to %02X",
477 gpe_enabled_count, (u32)gpe_block->block_base_number,
478 (u32)(gpe_block->block_base_number +
479 (gpe_block->gpe_count - 1))));
480 }
481
482 gpe_block->initialized = TRUE;
483
484 return_ACPI_STATUS(AE_OK);
485}
486
487#endif
488