1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47#include "acnamesp.h"
48
49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evgpe")
51#if (!ACPI_REDUCED_HARDWARE)
52
53static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
54
55static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70acpi_status
71acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
72{
73 struct acpi_gpe_register_info *gpe_register_info;
74 u32 register_bit;
75
76 ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask);
77
78 gpe_register_info = gpe_event_info->register_info;
79 if (!gpe_register_info) {
80 return_ACPI_STATUS(AE_NOT_EXIST);
81 }
82
83 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
84
85
86
87 ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
88
89
90
91 if (gpe_event_info->runtime_count) {
92 ACPI_SET_BIT(gpe_register_info->enable_for_run,
93 (u8)register_bit);
94 }
95
96 return_ACPI_STATUS(AE_OK);
97}
98
99
100
101
102
103
104
105
106
107
108
109
110acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
111{
112 acpi_status status;
113
114 ACPI_FUNCTION_TRACE(ev_enable_gpe);
115
116
117
118
119
120
121
122 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
123 ACPI_GPE_DISPATCH_NONE) {
124 return_ACPI_STATUS(AE_NO_HANDLER);
125 }
126
127
128 status = acpi_hw_clear_gpe(gpe_event_info);
129 if (ACPI_FAILURE(status)) {
130 return_ACPI_STATUS(status);
131 }
132
133
134
135 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE_SAVE);
136 return_ACPI_STATUS(status);
137}
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153acpi_status
154acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
155{
156 struct acpi_gpe_register_info *gpe_register_info;
157 u32 register_bit;
158
159 ACPI_FUNCTION_TRACE(ev_mask_gpe);
160
161 gpe_register_info = gpe_event_info->register_info;
162 if (!gpe_register_info) {
163 return_ACPI_STATUS(AE_NOT_EXIST);
164 }
165
166 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
167
168
169
170 if (is_masked) {
171 if (register_bit & gpe_register_info->mask_for_run) {
172 return_ACPI_STATUS(AE_BAD_PARAMETER);
173 }
174
175 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
176 ACPI_SET_BIT(gpe_register_info->mask_for_run, (u8)register_bit);
177 } else {
178 if (!(register_bit & gpe_register_info->mask_for_run)) {
179 return_ACPI_STATUS(AE_BAD_PARAMETER);
180 }
181
182 ACPI_CLEAR_BIT(gpe_register_info->mask_for_run,
183 (u8)register_bit);
184 if (gpe_event_info->runtime_count
185 && !gpe_event_info->disable_for_dispatch) {
186 (void)acpi_hw_low_set_gpe(gpe_event_info,
187 ACPI_GPE_ENABLE);
188 }
189 }
190
191 return_ACPI_STATUS(AE_OK);
192}
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207acpi_status
208acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
209{
210 acpi_status status = AE_OK;
211
212 ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
213
214 if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
215 return_ACPI_STATUS(AE_LIMIT);
216 }
217
218 gpe_event_info->runtime_count++;
219 if (gpe_event_info->runtime_count == 1) {
220
221
222
223 status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
224 if (ACPI_SUCCESS(status)) {
225 status = acpi_ev_enable_gpe(gpe_event_info);
226 }
227
228 if (ACPI_FAILURE(status)) {
229 gpe_event_info->runtime_count--;
230 }
231 }
232
233 return_ACPI_STATUS(status);
234}
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249acpi_status
250acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
251{
252 acpi_status status = AE_OK;
253
254 ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
255
256 if (!gpe_event_info->runtime_count) {
257 return_ACPI_STATUS(AE_LIMIT);
258 }
259
260 gpe_event_info->runtime_count--;
261 if (!gpe_event_info->runtime_count) {
262
263
264
265 status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
266 if (ACPI_SUCCESS(status)) {
267 status =
268 acpi_hw_low_set_gpe(gpe_event_info,
269 ACPI_GPE_DISABLE_SAVE);
270 }
271
272 if (ACPI_FAILURE(status)) {
273 gpe_event_info->runtime_count++;
274 }
275 }
276
277 return_ACPI_STATUS(status);
278}
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
296 struct acpi_gpe_block_info
297 *gpe_block)
298{
299 u32 gpe_index;
300
301
302
303
304
305 if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
306 return (NULL);
307 }
308
309 gpe_index = gpe_number - gpe_block->block_base_number;
310 if (gpe_index >= gpe_block->gpe_count) {
311 return (NULL);
312 }
313
314 return (&gpe_block->event_info[gpe_index]);
315}
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
336 u32 gpe_number)
337{
338 union acpi_operand_object *obj_desc;
339 struct acpi_gpe_event_info *gpe_info;
340 u32 i;
341
342 ACPI_FUNCTION_ENTRY();
343
344
345
346 if (!gpe_device) {
347
348
349
350 for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
351 gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
352 acpi_gbl_gpe_fadt_blocks
353 [i]);
354 if (gpe_info) {
355 return (gpe_info);
356 }
357 }
358
359
360
361 return (NULL);
362 }
363
364
365
366 obj_desc =
367 acpi_ns_get_attached_object((struct acpi_namespace_node *)
368 gpe_device);
369 if (!obj_desc || !obj_desc->device.gpe_block) {
370 return (NULL);
371 }
372
373 return (acpi_ev_low_get_gpe_info
374 (gpe_number, obj_desc->device.gpe_block));
375}
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
392{
393 acpi_status status;
394 struct acpi_gpe_block_info *gpe_block;
395 struct acpi_gpe_register_info *gpe_register_info;
396 u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
397 u8 enabled_status_byte;
398 u32 status_reg;
399 u32 enable_reg;
400 acpi_cpu_flags flags;
401 u32 i;
402 u32 j;
403
404 ACPI_FUNCTION_NAME(ev_gpe_detect);
405
406
407
408 if (!gpe_xrupt_list) {
409 return (int_status);
410 }
411
412
413
414
415
416
417 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
418
419
420
421 gpe_block = gpe_xrupt_list->gpe_block_list_head;
422 while (gpe_block) {
423
424
425
426
427 for (i = 0; i < gpe_block->register_count; i++) {
428
429
430
431 gpe_register_info = &gpe_block->register_info[i];
432
433
434
435
436
437 if (!(gpe_register_info->enable_for_run |
438 gpe_register_info->enable_for_wake)) {
439 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
440 "Ignore disabled registers for GPE%02X-GPE%02X: "
441 "RunEnable=%02X, WakeEnable=%02X\n",
442 gpe_register_info->
443 base_gpe_number,
444 gpe_register_info->
445 base_gpe_number +
446 (ACPI_GPE_REGISTER_WIDTH - 1),
447 gpe_register_info->
448 enable_for_run,
449 gpe_register_info->
450 enable_for_wake));
451 continue;
452 }
453
454
455
456 status =
457 acpi_hw_read(&status_reg,
458 &gpe_register_info->status_address);
459 if (ACPI_FAILURE(status)) {
460 goto unlock_and_exit;
461 }
462
463
464
465 status =
466 acpi_hw_read(&enable_reg,
467 &gpe_register_info->enable_address);
468 if (ACPI_FAILURE(status)) {
469 goto unlock_and_exit;
470 }
471
472 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
473 "Read registers for GPE%02X-GPE%02X: Status=%02X, Enable=%02X, "
474 "RunEnable=%02X, WakeEnable=%02X\n",
475 gpe_register_info->base_gpe_number,
476 gpe_register_info->base_gpe_number +
477 (ACPI_GPE_REGISTER_WIDTH - 1),
478 status_reg, enable_reg,
479 gpe_register_info->enable_for_run,
480 gpe_register_info->enable_for_wake));
481
482
483
484 enabled_status_byte = (u8) (status_reg & enable_reg);
485 if (!enabled_status_byte) {
486
487
488
489 continue;
490 }
491
492
493
494 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
495
496
497
498 if (enabled_status_byte & (1 << j)) {
499
500
501
502
503 int_status |=
504 acpi_ev_gpe_dispatch(gpe_block->
505 node,
506 &gpe_block->
507 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
508 }
509 }
510 }
511
512 gpe_block = gpe_block->next;
513 }
514
515 unlock_and_exit:
516
517 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
518 return (int_status);
519}
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
538{
539 struct acpi_gpe_event_info *gpe_event_info = context;
540 acpi_status status;
541 struct acpi_gpe_event_info *local_gpe_event_info;
542 struct acpi_evaluate_info *info;
543 struct acpi_gpe_notify_info *notify;
544
545 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
546
547
548
549 local_gpe_event_info =
550 ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info));
551 if (!local_gpe_event_info) {
552 ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE"));
553 return_VOID;
554 }
555
556 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
557 if (ACPI_FAILURE(status)) {
558 ACPI_FREE(local_gpe_event_info);
559 return_VOID;
560 }
561
562
563
564 if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
565 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
566 ACPI_FREE(local_gpe_event_info);
567 return_VOID;
568 }
569
570
571
572
573
574 memcpy(local_gpe_event_info, gpe_event_info,
575 sizeof(struct acpi_gpe_event_info));
576
577 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
578 if (ACPI_FAILURE(status)) {
579 return_VOID;
580 }
581
582
583
584 switch (ACPI_GPE_DISPATCH_TYPE(local_gpe_event_info->flags)) {
585 case ACPI_GPE_DISPATCH_NOTIFY:
586
587
588
589
590
591
592
593
594
595
596
597 notify = local_gpe_event_info->dispatch.notify_list;
598 while (ACPI_SUCCESS(status) && notify) {
599 status =
600 acpi_ev_queue_notify_request(notify->device_node,
601 ACPI_NOTIFY_DEVICE_WAKE);
602
603 notify = notify->next;
604 }
605
606 break;
607
608 case ACPI_GPE_DISPATCH_METHOD:
609
610
611
612 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
613 if (!info) {
614 status = AE_NO_MEMORY;
615 } else {
616
617
618
619
620 info->prefix_node =
621 local_gpe_event_info->dispatch.method_node;
622 info->flags = ACPI_IGNORE_RETURN_VALUE;
623
624 status = acpi_ns_evaluate(info);
625 ACPI_FREE(info);
626 }
627
628 if (ACPI_FAILURE(status)) {
629 ACPI_EXCEPTION((AE_INFO, status,
630 "while evaluating GPE method [%4.4s]",
631 acpi_ut_get_node_name
632 (local_gpe_event_info->dispatch.
633 method_node)));
634 }
635 break;
636
637 default:
638
639 return_VOID;
640 }
641
642
643
644 status = acpi_os_execute(OSL_NOTIFY_HANDLER,
645 acpi_ev_asynch_enable_gpe,
646 local_gpe_event_info);
647 if (ACPI_FAILURE(status)) {
648 ACPI_FREE(local_gpe_event_info);
649 }
650 return_VOID;
651}
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
669{
670 struct acpi_gpe_event_info *gpe_event_info = context;
671
672 (void)acpi_ev_finish_gpe(gpe_event_info);
673
674 ACPI_FREE(gpe_event_info);
675 return;
676}
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
693{
694 acpi_status status;
695
696 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
697 ACPI_GPE_LEVEL_TRIGGERED) {
698
699
700
701
702 status = acpi_hw_clear_gpe(gpe_event_info);
703 if (ACPI_FAILURE(status)) {
704 return (status);
705 }
706 }
707
708
709
710
711
712
713 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
714 gpe_event_info->disable_for_dispatch = FALSE;
715 return (AE_OK);
716}
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736u32
737acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
738 struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
739{
740 acpi_status status;
741 u32 return_value;
742
743 ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
744
745
746
747 acpi_gpe_count++;
748 if (acpi_gbl_global_event_handler) {
749 acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device,
750 gpe_number,
751 acpi_gbl_global_event_handler_context);
752 }
753
754
755
756
757
758 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
759 ACPI_GPE_EDGE_TRIGGERED) {
760 status = acpi_hw_clear_gpe(gpe_event_info);
761 if (ACPI_FAILURE(status)) {
762 ACPI_EXCEPTION((AE_INFO, status,
763 "Unable to clear GPE%02X", gpe_number));
764 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
765 }
766 }
767
768
769
770
771
772
773
774
775
776
777 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
778 if (ACPI_FAILURE(status)) {
779 ACPI_EXCEPTION((AE_INFO, status,
780 "Unable to disable GPE%02X", gpe_number));
781 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
782 }
783
784 gpe_event_info->disable_for_dispatch = TRUE;
785
786
787
788
789
790
791
792
793 switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
794 case ACPI_GPE_DISPATCH_HANDLER:
795
796
797
798 return_value =
799 gpe_event_info->dispatch.handler->address(gpe_device,
800 gpe_number,
801 gpe_event_info->
802 dispatch.handler->
803 context);
804
805
806
807 if (return_value & ACPI_REENABLE_GPE) {
808 (void)acpi_ev_finish_gpe(gpe_event_info);
809 }
810 break;
811
812 case ACPI_GPE_DISPATCH_METHOD:
813 case ACPI_GPE_DISPATCH_NOTIFY:
814
815
816
817
818 status = acpi_os_execute(OSL_GPE_HANDLER,
819 acpi_ev_asynch_execute_gpe_method,
820 gpe_event_info);
821 if (ACPI_FAILURE(status)) {
822 ACPI_EXCEPTION((AE_INFO, status,
823 "Unable to queue handler for GPE%02X - event disabled",
824 gpe_number));
825 }
826 break;
827
828 default:
829
830
831
832
833
834 ACPI_ERROR((AE_INFO,
835 "No handler or method for GPE%02X, disabling event",
836 gpe_number));
837
838 break;
839 }
840
841 return_UINT32(ACPI_INTERRUPT_HANDLED);
842}
843
844#endif
845