1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47#include "acnamesp.h"
48
49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evgpeblk")
51#if (!ACPI_REDUCED_HARDWARE)
52
53static acpi_status
54acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
55 u32 interrupt_number);
56
57static acpi_status
58acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74static acpi_status
75acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
76 u32 interrupt_number)
77{
78 struct acpi_gpe_block_info *next_gpe_block;
79 struct acpi_gpe_xrupt_info *gpe_xrupt_block;
80 acpi_status status;
81 acpi_cpu_flags flags;
82
83 ACPI_FUNCTION_TRACE(ev_install_gpe_block);
84
85 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
86 if (ACPI_FAILURE(status)) {
87 return_ACPI_STATUS(status);
88 }
89
90 gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number);
91 if (!gpe_xrupt_block) {
92 status = AE_NO_MEMORY;
93 goto unlock_and_exit;
94 }
95
96
97
98 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
99 if (gpe_xrupt_block->gpe_block_list_head) {
100 next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
101 while (next_gpe_block->next) {
102 next_gpe_block = next_gpe_block->next;
103 }
104
105 next_gpe_block->next = gpe_block;
106 gpe_block->previous = next_gpe_block;
107 } else {
108 gpe_xrupt_block->gpe_block_list_head = gpe_block;
109 }
110
111 gpe_block->xrupt_block = gpe_xrupt_block;
112 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
113
114 unlock_and_exit:
115 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
116 return_ACPI_STATUS(status);
117}
118
119
120
121
122
123
124
125
126
127
128
129
130
131acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
132{
133 acpi_status status;
134 acpi_cpu_flags flags;
135
136 ACPI_FUNCTION_TRACE(ev_install_gpe_block);
137
138 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
139 if (ACPI_FAILURE(status)) {
140 return_ACPI_STATUS(status);
141 }
142
143
144
145 status =
146 acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL);
147
148 if (!gpe_block->previous && !gpe_block->next) {
149
150
151
152 status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
153 if (ACPI_FAILURE(status)) {
154 goto unlock_and_exit;
155 }
156 } else {
157
158
159 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
160 if (gpe_block->previous) {
161 gpe_block->previous->next = gpe_block->next;
162 } else {
163 gpe_block->xrupt_block->gpe_block_list_head =
164 gpe_block->next;
165 }
166
167 if (gpe_block->next) {
168 gpe_block->next->previous = gpe_block->previous;
169 }
170 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
171 }
172
173 acpi_current_gpe_count -= gpe_block->gpe_count;
174
175
176
177 ACPI_FREE(gpe_block->register_info);
178 ACPI_FREE(gpe_block->event_info);
179 ACPI_FREE(gpe_block);
180
181 unlock_and_exit:
182 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
183 return_ACPI_STATUS(status);
184}
185
186
187
188
189
190
191
192
193
194
195
196
197
198static acpi_status
199acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
200{
201 struct acpi_gpe_register_info *gpe_register_info = NULL;
202 struct acpi_gpe_event_info *gpe_event_info = NULL;
203 struct acpi_gpe_event_info *this_event;
204 struct acpi_gpe_register_info *this_register;
205 u32 i;
206 u32 j;
207 acpi_status status;
208
209 ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
210
211
212
213 gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
214 register_count *
215 sizeof(struct
216 acpi_gpe_register_info));
217 if (!gpe_register_info) {
218 ACPI_ERROR((AE_INFO,
219 "Could not allocate the GpeRegisterInfo table"));
220 return_ACPI_STATUS(AE_NO_MEMORY);
221 }
222
223
224
225
226
227 gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->gpe_count *
228 sizeof(struct
229 acpi_gpe_event_info));
230 if (!gpe_event_info) {
231 ACPI_ERROR((AE_INFO,
232 "Could not allocate the GpeEventInfo table"));
233 status = AE_NO_MEMORY;
234 goto error_exit;
235 }
236
237
238
239 gpe_block->register_info = gpe_register_info;
240 gpe_block->event_info = gpe_event_info;
241
242
243
244
245
246
247
248 this_register = gpe_register_info;
249 this_event = gpe_event_info;
250
251 for (i = 0; i < gpe_block->register_count; i++) {
252
253
254
255 this_register->base_gpe_number =
256 (u8) (gpe_block->block_base_number +
257 (i * ACPI_GPE_REGISTER_WIDTH));
258
259 this_register->status_address.address =
260 gpe_block->block_address.address + i;
261
262 this_register->enable_address.address =
263 gpe_block->block_address.address + i +
264 gpe_block->register_count;
265
266 this_register->status_address.space_id =
267 gpe_block->block_address.space_id;
268 this_register->enable_address.space_id =
269 gpe_block->block_address.space_id;
270 this_register->status_address.bit_width =
271 ACPI_GPE_REGISTER_WIDTH;
272 this_register->enable_address.bit_width =
273 ACPI_GPE_REGISTER_WIDTH;
274 this_register->status_address.bit_offset = 0;
275 this_register->enable_address.bit_offset = 0;
276
277
278
279 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
280 this_event->gpe_number =
281 (u8) (this_register->base_gpe_number + j);
282 this_event->register_info = this_register;
283 this_event++;
284 }
285
286
287
288 status = acpi_hw_write(0x00, &this_register->enable_address);
289 if (ACPI_FAILURE(status)) {
290 goto error_exit;
291 }
292
293
294
295 status = acpi_hw_write(0xFF, &this_register->status_address);
296 if (ACPI_FAILURE(status)) {
297 goto error_exit;
298 }
299
300 this_register++;
301 }
302
303 return_ACPI_STATUS(AE_OK);
304
305 error_exit:
306 if (gpe_register_info) {
307 ACPI_FREE(gpe_register_info);
308 }
309 if (gpe_event_info) {
310 ACPI_FREE(gpe_event_info);
311 }
312
313 return_ACPI_STATUS(status);
314}
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335acpi_status
336acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
337 struct acpi_generic_address *gpe_block_address,
338 u32 register_count,
339 u8 gpe_block_base_number,
340 u32 interrupt_number,
341 struct acpi_gpe_block_info **return_gpe_block)
342{
343 acpi_status status;
344 struct acpi_gpe_block_info *gpe_block;
345 struct acpi_gpe_walk_info walk_info;
346
347 ACPI_FUNCTION_TRACE(ev_create_gpe_block);
348
349 if (!register_count) {
350 return_ACPI_STATUS(AE_OK);
351 }
352
353
354
355 gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
356 if (!gpe_block) {
357 return_ACPI_STATUS(AE_NO_MEMORY);
358 }
359
360
361
362 gpe_block->node = gpe_device;
363 gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
364 gpe_block->initialized = FALSE;
365 gpe_block->register_count = register_count;
366 gpe_block->block_base_number = gpe_block_base_number;
367
368 ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
369 sizeof(struct acpi_generic_address));
370
371
372
373
374
375 status = acpi_ev_create_gpe_info_blocks(gpe_block);
376 if (ACPI_FAILURE(status)) {
377 ACPI_FREE(gpe_block);
378 return_ACPI_STATUS(status);
379 }
380
381
382
383 status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
384 if (ACPI_FAILURE(status)) {
385 ACPI_FREE(gpe_block->register_info);
386 ACPI_FREE(gpe_block->event_info);
387 ACPI_FREE(gpe_block);
388 return_ACPI_STATUS(status);
389 }
390
391 acpi_gbl_all_gpes_initialized = FALSE;
392
393
394
395 walk_info.gpe_block = gpe_block;
396 walk_info.gpe_device = gpe_device;
397 walk_info.execute_by_owner_id = FALSE;
398
399 status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
400 ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
401 acpi_ev_match_gpe_method, NULL,
402 &walk_info, NULL);
403
404
405
406 if (return_gpe_block) {
407 (*return_gpe_block) = gpe_block;
408 }
409
410 ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
411 " Initialized GPE %02X to %02X [%4.4s] %u regs on interrupt 0x%X\n",
412 (u32)gpe_block->block_base_number,
413 (u32)(gpe_block->block_base_number +
414 (gpe_block->gpe_count - 1)),
415 gpe_device->name.ascii, gpe_block->register_count,
416 interrupt_number));
417
418
419
420 acpi_current_gpe_count += gpe_block->gpe_count;
421 return_ACPI_STATUS(AE_OK);
422}
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438acpi_status
439acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
440 struct acpi_gpe_block_info *gpe_block,
441 void *ignored)
442{
443 acpi_status status;
444 struct acpi_gpe_event_info *gpe_event_info;
445 u32 gpe_enabled_count;
446 u32 gpe_index;
447 u32 i;
448 u32 j;
449
450 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
451
452
453
454
455
456 if (!gpe_block || gpe_block->initialized) {
457 return_ACPI_STATUS(AE_OK);
458 }
459
460
461
462
463
464
465 gpe_enabled_count = 0;
466
467 for (i = 0; i < gpe_block->register_count; i++) {
468 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
469
470
471
472 gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
473 gpe_event_info = &gpe_block->event_info[gpe_index];
474
475
476
477
478
479 if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
480 ACPI_GPE_DISPATCH_NONE)
481 || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
482 == ACPI_GPE_DISPATCH_HANDLER)
483 || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
484 continue;
485 }
486
487 status = acpi_ev_add_gpe_reference(gpe_event_info);
488 if (ACPI_FAILURE(status)) {
489 ACPI_EXCEPTION((AE_INFO, status,
490 "Could not enable GPE 0x%02X",
491 gpe_index +
492 gpe_block->block_base_number));
493 continue;
494 }
495
496 gpe_enabled_count++;
497 }
498 }
499
500 if (gpe_enabled_count) {
501 ACPI_INFO((AE_INFO,
502 "Enabled %u GPEs in block %02X to %02X",
503 gpe_enabled_count, (u32)gpe_block->block_base_number,
504 (u32)(gpe_block->block_base_number +
505 (gpe_block->gpe_count - 1))));
506 }
507
508 gpe_block->initialized = TRUE;
509
510 return_ACPI_STATUS(AE_OK);
511}
512
513#endif
514