1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47#include "acnamesp.h"
48
49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evgpe")
51
52
53static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
54
55static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70acpi_status
71acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
72{
73 struct acpi_gpe_register_info *gpe_register_info;
74 u32 register_bit;
75
76 ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask);
77
78 gpe_register_info = gpe_event_info->register_info;
79 if (!gpe_register_info) {
80 return_ACPI_STATUS(AE_NOT_EXIST);
81 }
82
83 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info,
84 gpe_register_info);
85
86
87
88 ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
89
90
91
92 if (gpe_event_info->runtime_count) {
93 ACPI_SET_BIT(gpe_register_info->enable_for_run, (u8)register_bit);
94 }
95
96 return_ACPI_STATUS(AE_OK);
97}
98
99
100
101
102
103
104
105
106
107
108
109
110acpi_status
111acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
112{
113 acpi_status status;
114
115 ACPI_FUNCTION_TRACE(ev_enable_gpe);
116
117
118
119
120
121
122
123 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
124 ACPI_GPE_DISPATCH_NONE) {
125 return_ACPI_STATUS(AE_NO_HANDLER);
126 }
127
128
129 status = acpi_hw_clear_gpe(gpe_event_info);
130 if (ACPI_FAILURE(status)) {
131 return_ACPI_STATUS(status);
132 }
133
134
135 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
136
137 return_ACPI_STATUS(status);
138}
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
155{
156 acpi_status status = AE_OK;
157
158 ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
159
160 if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
161 return_ACPI_STATUS(AE_LIMIT);
162 }
163
164 gpe_event_info->runtime_count++;
165 if (gpe_event_info->runtime_count == 1) {
166
167
168
169 status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
170 if (ACPI_SUCCESS(status)) {
171 status = acpi_ev_enable_gpe(gpe_event_info);
172 }
173
174 if (ACPI_FAILURE(status)) {
175 gpe_event_info->runtime_count--;
176 }
177 }
178
179 return_ACPI_STATUS(status);
180}
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
196{
197 acpi_status status = AE_OK;
198
199 ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
200
201 if (!gpe_event_info->runtime_count) {
202 return_ACPI_STATUS(AE_LIMIT);
203 }
204
205 gpe_event_info->runtime_count--;
206 if (!gpe_event_info->runtime_count) {
207
208
209
210 status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
211 if (ACPI_SUCCESS(status)) {
212 status = acpi_hw_low_set_gpe(gpe_event_info,
213 ACPI_GPE_DISABLE);
214 }
215
216 if (ACPI_FAILURE(status)) {
217 gpe_event_info->runtime_count++;
218 }
219 }
220
221 return_ACPI_STATUS(status);
222}
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
240 struct acpi_gpe_block_info
241 *gpe_block)
242{
243 u32 gpe_index;
244
245
246
247
248
249 if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
250 return (NULL);
251 }
252
253 gpe_index = gpe_number - gpe_block->block_base_number;
254 if (gpe_index >= gpe_block->gpe_count) {
255 return (NULL);
256 }
257
258 return (&gpe_block->event_info[gpe_index]);
259}
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
280 u32 gpe_number)
281{
282 union acpi_operand_object *obj_desc;
283 struct acpi_gpe_event_info *gpe_info;
284 u32 i;
285
286 ACPI_FUNCTION_ENTRY();
287
288
289
290 if (!gpe_device) {
291
292
293
294 for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
295 gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
296 acpi_gbl_gpe_fadt_blocks
297 [i]);
298 if (gpe_info) {
299 return (gpe_info);
300 }
301 }
302
303
304
305 return (NULL);
306 }
307
308
309
310 obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
311 gpe_device);
312 if (!obj_desc || !obj_desc->device.gpe_block) {
313 return (NULL);
314 }
315
316 return (acpi_ev_low_get_gpe_info
317 (gpe_number, obj_desc->device.gpe_block));
318}
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
335{
336 acpi_status status;
337 struct acpi_gpe_block_info *gpe_block;
338 struct acpi_gpe_register_info *gpe_register_info;
339 u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
340 u8 enabled_status_byte;
341 u32 status_reg;
342 u32 enable_reg;
343 acpi_cpu_flags flags;
344 u32 i;
345 u32 j;
346
347 ACPI_FUNCTION_NAME(ev_gpe_detect);
348
349
350
351 if (!gpe_xrupt_list) {
352 return (int_status);
353 }
354
355
356
357
358
359
360 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
361
362
363
364 gpe_block = gpe_xrupt_list->gpe_block_list_head;
365 while (gpe_block) {
366
367
368
369
370 for (i = 0; i < gpe_block->register_count; i++) {
371
372
373
374 gpe_register_info = &gpe_block->register_info[i];
375
376
377
378 status =
379 acpi_hw_read(&status_reg,
380 &gpe_register_info->status_address);
381 if (ACPI_FAILURE(status)) {
382 goto unlock_and_exit;
383 }
384
385
386
387 status =
388 acpi_hw_read(&enable_reg,
389 &gpe_register_info->enable_address);
390 if (ACPI_FAILURE(status)) {
391 goto unlock_and_exit;
392 }
393
394 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
395 "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n",
396 gpe_register_info->base_gpe_number,
397 status_reg, enable_reg));
398
399
400
401 enabled_status_byte = (u8) (status_reg & enable_reg);
402 if (!enabled_status_byte) {
403
404
405
406 continue;
407 }
408
409
410
411 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
412
413
414
415 if (enabled_status_byte & (1 << j)) {
416
417
418
419
420 int_status |=
421 acpi_ev_gpe_dispatch(gpe_block->
422 node,
423 &gpe_block->
424 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
425 }
426 }
427 }
428
429 gpe_block = gpe_block->next;
430 }
431
432 unlock_and_exit:
433
434 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
435 return (int_status);
436}
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
455{
456 struct acpi_gpe_event_info *gpe_event_info = context;
457 acpi_status status;
458 struct acpi_gpe_event_info *local_gpe_event_info;
459 struct acpi_evaluate_info *info;
460 struct acpi_gpe_notify_object *notify_object;
461
462 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
463
464
465
466 local_gpe_event_info =
467 ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info));
468 if (!local_gpe_event_info) {
469 ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE"));
470 return_VOID;
471 }
472
473 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
474 if (ACPI_FAILURE(status)) {
475 ACPI_FREE(local_gpe_event_info);
476 return_VOID;
477 }
478
479
480
481 if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
482 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
483 ACPI_FREE(local_gpe_event_info);
484 return_VOID;
485 }
486
487
488
489
490
491 ACPI_MEMCPY(local_gpe_event_info, gpe_event_info,
492 sizeof(struct acpi_gpe_event_info));
493
494 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
495 if (ACPI_FAILURE(status)) {
496 return_VOID;
497 }
498
499
500
501 switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
502 case ACPI_GPE_DISPATCH_NOTIFY:
503
504
505
506
507
508
509
510
511
512 status = acpi_ev_queue_notify_request(
513 local_gpe_event_info->dispatch.device.node,
514 ACPI_NOTIFY_DEVICE_WAKE);
515
516 notify_object = local_gpe_event_info->dispatch.device.next;
517 while (ACPI_SUCCESS(status) && notify_object) {
518 status = acpi_ev_queue_notify_request(
519 notify_object->node,
520 ACPI_NOTIFY_DEVICE_WAKE);
521 notify_object = notify_object->next;
522 }
523
524 break;
525
526 case ACPI_GPE_DISPATCH_METHOD:
527
528
529
530 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
531 if (!info) {
532 status = AE_NO_MEMORY;
533 } else {
534
535
536
537
538 info->prefix_node =
539 local_gpe_event_info->dispatch.method_node;
540 info->flags = ACPI_IGNORE_RETURN_VALUE;
541
542 status = acpi_ns_evaluate(info);
543 ACPI_FREE(info);
544 }
545
546 if (ACPI_FAILURE(status)) {
547 ACPI_EXCEPTION((AE_INFO, status,
548 "while evaluating GPE method [%4.4s]",
549 acpi_ut_get_node_name
550 (local_gpe_event_info->dispatch.
551 method_node)));
552 }
553
554 break;
555
556 default:
557 return_VOID;
558 }
559
560
561
562 status = acpi_os_execute(OSL_NOTIFY_HANDLER,
563 acpi_ev_asynch_enable_gpe,
564 local_gpe_event_info);
565 if (ACPI_FAILURE(status)) {
566 ACPI_FREE(local_gpe_event_info);
567 }
568 return_VOID;
569}
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
587{
588 struct acpi_gpe_event_info *gpe_event_info = context;
589
590 (void)acpi_ev_finish_gpe(gpe_event_info);
591
592 ACPI_FREE(gpe_event_info);
593 return;
594}
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
611{
612 acpi_status status;
613
614 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
615 ACPI_GPE_LEVEL_TRIGGERED) {
616
617
618
619
620 status = acpi_hw_clear_gpe(gpe_event_info);
621 if (ACPI_FAILURE(status)) {
622 return (status);
623 }
624 }
625
626
627
628
629
630
631 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
632 return (AE_OK);
633}
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653u32
654acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
655 struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
656{
657 acpi_status status;
658 u32 return_value;
659
660 ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
661
662
663
664 acpi_gpe_count++;
665 if (acpi_gbl_global_event_handler) {
666 acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device,
667 gpe_number,
668 acpi_gbl_global_event_handler_context);
669 }
670
671
672
673
674
675 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
676 ACPI_GPE_EDGE_TRIGGERED) {
677 status = acpi_hw_clear_gpe(gpe_event_info);
678 if (ACPI_FAILURE(status)) {
679 ACPI_EXCEPTION((AE_INFO, status,
680 "Unable to clear GPE%02X", gpe_number));
681 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
682 }
683 }
684
685
686
687
688
689
690
691
692
693
694 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
695 if (ACPI_FAILURE(status)) {
696 ACPI_EXCEPTION((AE_INFO, status,
697 "Unable to disable GPE%02X", gpe_number));
698 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
699 }
700
701
702
703
704
705
706
707
708 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
709 case ACPI_GPE_DISPATCH_HANDLER:
710
711
712
713 return_value =
714 gpe_event_info->dispatch.handler->address(gpe_device,
715 gpe_number,
716 gpe_event_info->
717 dispatch.handler->
718 context);
719
720
721
722 if (return_value & ACPI_REENABLE_GPE) {
723 (void)acpi_ev_finish_gpe(gpe_event_info);
724 }
725 break;
726
727 case ACPI_GPE_DISPATCH_METHOD:
728 case ACPI_GPE_DISPATCH_NOTIFY:
729
730
731
732
733
734 status = acpi_os_execute(OSL_GPE_HANDLER,
735 acpi_ev_asynch_execute_gpe_method,
736 gpe_event_info);
737 if (ACPI_FAILURE(status)) {
738 ACPI_EXCEPTION((AE_INFO, status,
739 "Unable to queue handler for GPE%2X - event disabled",
740 gpe_number));
741 }
742 break;
743
744 default:
745
746
747
748
749
750
751 ACPI_ERROR((AE_INFO,
752 "No handler or method for GPE%02X, disabling event",
753 gpe_number));
754
755 break;
756 }
757
758 return_UINT32(ACPI_INTERRUPT_HANDLED);
759}
760