1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47#include "acnamesp.h"
48
49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evgpe")
51#if (!ACPI_REDUCED_HARDWARE)
52
53static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
54
55static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70acpi_status
71acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
72{
73 struct acpi_gpe_register_info *gpe_register_info;
74 u32 register_bit;
75
76 ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask);
77
78 gpe_register_info = gpe_event_info->register_info;
79 if (!gpe_register_info) {
80 return_ACPI_STATUS(AE_NOT_EXIST);
81 }
82
83 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
84
85
86
87 ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
88
89
90
91 if (gpe_event_info->runtime_count) {
92 ACPI_SET_BIT(gpe_register_info->enable_for_run,
93 (u8)register_bit);
94 }
95
96 gpe_register_info->enable_mask = gpe_register_info->enable_for_run;
97 return_ACPI_STATUS(AE_OK);
98}
99
100
101
102
103
104
105
106
107
108
109
110
111
112acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
113{
114 acpi_status status;
115
116 ACPI_FUNCTION_TRACE(ev_enable_gpe);
117
118
119
120 status = acpi_hw_clear_gpe(gpe_event_info);
121 if (ACPI_FAILURE(status)) {
122 return_ACPI_STATUS(status);
123 }
124
125
126
127 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
128 return_ACPI_STATUS(status);
129}
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144acpi_status
145acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
146{
147 struct acpi_gpe_register_info *gpe_register_info;
148 u32 register_bit;
149
150 ACPI_FUNCTION_TRACE(ev_mask_gpe);
151
152 gpe_register_info = gpe_event_info->register_info;
153 if (!gpe_register_info) {
154 return_ACPI_STATUS(AE_NOT_EXIST);
155 }
156
157 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
158
159
160
161 if (is_masked) {
162 if (register_bit & gpe_register_info->mask_for_run) {
163 return_ACPI_STATUS(AE_BAD_PARAMETER);
164 }
165
166 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
167 ACPI_SET_BIT(gpe_register_info->mask_for_run, (u8)register_bit);
168 } else {
169 if (!(register_bit & gpe_register_info->mask_for_run)) {
170 return_ACPI_STATUS(AE_BAD_PARAMETER);
171 }
172
173 ACPI_CLEAR_BIT(gpe_register_info->mask_for_run,
174 (u8)register_bit);
175 if (gpe_event_info->runtime_count
176 && !gpe_event_info->disable_for_dispatch) {
177 (void)acpi_hw_low_set_gpe(gpe_event_info,
178 ACPI_GPE_ENABLE);
179 }
180 }
181
182 return_ACPI_STATUS(AE_OK);
183}
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198acpi_status
199acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
200{
201 acpi_status status = AE_OK;
202
203 ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
204
205 if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
206 return_ACPI_STATUS(AE_LIMIT);
207 }
208
209 gpe_event_info->runtime_count++;
210 if (gpe_event_info->runtime_count == 1) {
211
212
213
214 status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
215 if (ACPI_SUCCESS(status)) {
216 status = acpi_ev_enable_gpe(gpe_event_info);
217 }
218
219 if (ACPI_FAILURE(status)) {
220 gpe_event_info->runtime_count--;
221 }
222 }
223
224 return_ACPI_STATUS(status);
225}
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240acpi_status
241acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
242{
243 acpi_status status = AE_OK;
244
245 ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
246
247 if (!gpe_event_info->runtime_count) {
248 return_ACPI_STATUS(AE_LIMIT);
249 }
250
251 gpe_event_info->runtime_count--;
252 if (!gpe_event_info->runtime_count) {
253
254
255
256 status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
257 if (ACPI_SUCCESS(status)) {
258 status =
259 acpi_hw_low_set_gpe(gpe_event_info,
260 ACPI_GPE_DISABLE);
261 }
262
263 if (ACPI_FAILURE(status)) {
264 gpe_event_info->runtime_count++;
265 }
266 }
267
268 return_ACPI_STATUS(status);
269}
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
287 struct acpi_gpe_block_info
288 *gpe_block)
289{
290 u32 gpe_index;
291
292
293
294
295
296 if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
297 return (NULL);
298 }
299
300 gpe_index = gpe_number - gpe_block->block_base_number;
301 if (gpe_index >= gpe_block->gpe_count) {
302 return (NULL);
303 }
304
305 return (&gpe_block->event_info[gpe_index]);
306}
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
327 u32 gpe_number)
328{
329 union acpi_operand_object *obj_desc;
330 struct acpi_gpe_event_info *gpe_info;
331 u32 i;
332
333 ACPI_FUNCTION_ENTRY();
334
335
336
337 if (!gpe_device) {
338
339
340
341 for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
342 gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
343 acpi_gbl_gpe_fadt_blocks
344 [i]);
345 if (gpe_info) {
346 return (gpe_info);
347 }
348 }
349
350
351
352 return (NULL);
353 }
354
355
356
357 obj_desc =
358 acpi_ns_get_attached_object((struct acpi_namespace_node *)
359 gpe_device);
360 if (!obj_desc || !obj_desc->device.gpe_block) {
361 return (NULL);
362 }
363
364 return (acpi_ev_low_get_gpe_info
365 (gpe_number, obj_desc->device.gpe_block));
366}
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
383{
384 acpi_status status;
385 struct acpi_gpe_block_info *gpe_block;
386 struct acpi_namespace_node *gpe_device;
387 struct acpi_gpe_register_info *gpe_register_info;
388 struct acpi_gpe_event_info *gpe_event_info;
389 u32 gpe_number;
390 struct acpi_gpe_handler_info *gpe_handler_info;
391 u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
392 u8 enabled_status_byte;
393 u64 status_reg;
394 u64 enable_reg;
395 acpi_cpu_flags flags;
396 u32 i;
397 u32 j;
398
399 ACPI_FUNCTION_NAME(ev_gpe_detect);
400
401
402
403 if (!gpe_xrupt_list) {
404 return (int_status);
405 }
406
407
408
409
410
411
412 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
413
414
415
416 gpe_block = gpe_xrupt_list->gpe_block_list_head;
417 while (gpe_block) {
418 gpe_device = gpe_block->node;
419
420
421
422
423
424 for (i = 0; i < gpe_block->register_count; i++) {
425
426
427
428 gpe_register_info = &gpe_block->register_info[i];
429
430
431
432
433
434 if (!(gpe_register_info->enable_for_run |
435 gpe_register_info->enable_for_wake)) {
436 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
437 "Ignore disabled registers for GPE %02X-%02X: "
438 "RunEnable=%02X, WakeEnable=%02X\n",
439 gpe_register_info->
440 base_gpe_number,
441 gpe_register_info->
442 base_gpe_number +
443 (ACPI_GPE_REGISTER_WIDTH - 1),
444 gpe_register_info->
445 enable_for_run,
446 gpe_register_info->
447 enable_for_wake));
448 continue;
449 }
450
451
452
453 status =
454 acpi_hw_read(&status_reg,
455 &gpe_register_info->status_address);
456 if (ACPI_FAILURE(status)) {
457 goto unlock_and_exit;
458 }
459
460
461
462 status =
463 acpi_hw_read(&enable_reg,
464 &gpe_register_info->enable_address);
465 if (ACPI_FAILURE(status)) {
466 goto unlock_and_exit;
467 }
468
469 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
470 "Read registers for GPE %02X-%02X: Status=%02X, Enable=%02X, "
471 "RunEnable=%02X, WakeEnable=%02X\n",
472 gpe_register_info->base_gpe_number,
473 gpe_register_info->base_gpe_number +
474 (ACPI_GPE_REGISTER_WIDTH - 1),
475 (u32)status_reg, (u32)enable_reg,
476 gpe_register_info->enable_for_run,
477 gpe_register_info->enable_for_wake));
478
479
480
481 enabled_status_byte = (u8)(status_reg & enable_reg);
482 if (!enabled_status_byte) {
483
484
485
486 continue;
487 }
488
489
490
491 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
492
493
494
495 gpe_event_info =
496 &gpe_block->
497 event_info[((acpi_size)i *
498 ACPI_GPE_REGISTER_WIDTH) + j];
499 gpe_number =
500 j + gpe_register_info->base_gpe_number;
501
502 if (enabled_status_byte & (1 << j)) {
503
504
505
506 acpi_gpe_count++;
507 if (acpi_gbl_global_event_handler) {
508 acpi_gbl_global_event_handler
509 (ACPI_EVENT_TYPE_GPE,
510 gpe_device, gpe_number,
511 acpi_gbl_global_event_handler_context);
512 }
513
514
515
516 if (ACPI_GPE_DISPATCH_TYPE
517 (gpe_event_info->flags) ==
518 ACPI_GPE_DISPATCH_RAW_HANDLER) {
519
520
521
522 gpe_handler_info =
523 gpe_event_info->dispatch.
524 handler;
525
526
527
528
529
530
531
532
533
534
535
536 acpi_os_release_lock
537 (acpi_gbl_gpe_lock, flags);
538 int_status |=
539 gpe_handler_info->
540 address(gpe_device,
541 gpe_number,
542 gpe_handler_info->
543 context);
544 flags =
545 acpi_os_acquire_lock
546 (acpi_gbl_gpe_lock);
547 } else {
548
549
550
551
552 int_status |=
553 acpi_ev_gpe_dispatch
554 (gpe_device, gpe_event_info,
555 gpe_number);
556 }
557 }
558 }
559 }
560
561 gpe_block = gpe_block->next;
562 }
563
564unlock_and_exit:
565
566 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
567 return (int_status);
568}
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
587{
588 struct acpi_gpe_event_info *gpe_event_info = context;
589 acpi_status status = AE_OK;
590 struct acpi_evaluate_info *info;
591 struct acpi_gpe_notify_info *notify;
592
593 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
594
595
596
597 switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
598 case ACPI_GPE_DISPATCH_NOTIFY:
599
600
601
602
603
604
605
606
607
608
609
610 notify = gpe_event_info->dispatch.notify_list;
611 while (ACPI_SUCCESS(status) && notify) {
612 status =
613 acpi_ev_queue_notify_request(notify->device_node,
614 ACPI_NOTIFY_DEVICE_WAKE);
615
616 notify = notify->next;
617 }
618
619 break;
620
621 case ACPI_GPE_DISPATCH_METHOD:
622
623
624
625 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
626 if (!info) {
627 status = AE_NO_MEMORY;
628 } else {
629
630
631
632
633 info->prefix_node =
634 gpe_event_info->dispatch.method_node;
635 info->flags = ACPI_IGNORE_RETURN_VALUE;
636
637 status = acpi_ns_evaluate(info);
638 ACPI_FREE(info);
639 }
640
641 if (ACPI_FAILURE(status)) {
642 ACPI_EXCEPTION((AE_INFO, status,
643 "while evaluating GPE method [%4.4s]",
644 acpi_ut_get_node_name(gpe_event_info->
645 dispatch.
646 method_node)));
647 }
648 break;
649
650 default:
651
652 goto error_exit;
653 }
654
655
656
657 status = acpi_os_execute(OSL_NOTIFY_HANDLER,
658 acpi_ev_asynch_enable_gpe, gpe_event_info);
659 if (ACPI_SUCCESS(status)) {
660 return_VOID;
661 }
662
663error_exit:
664 acpi_ev_asynch_enable_gpe(gpe_event_info);
665 return_VOID;
666}
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
684{
685 struct acpi_gpe_event_info *gpe_event_info = context;
686 acpi_cpu_flags flags;
687
688 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
689 (void)acpi_ev_finish_gpe(gpe_event_info);
690 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
691
692 return;
693}
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
710{
711 acpi_status status;
712
713 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
714 ACPI_GPE_LEVEL_TRIGGERED) {
715
716
717
718
719 status = acpi_hw_clear_gpe(gpe_event_info);
720 if (ACPI_FAILURE(status)) {
721 return (status);
722 }
723 }
724
725
726
727
728
729
730 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
731 gpe_event_info->disable_for_dispatch = FALSE;
732 return (AE_OK);
733}
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753u32
754acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
755 struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
756{
757 acpi_status status;
758 u32 return_value;
759
760 ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
761
762
763
764
765
766
767
768
769
770
771 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
772 if (ACPI_FAILURE(status)) {
773 ACPI_EXCEPTION((AE_INFO, status,
774 "Unable to disable GPE %02X", gpe_number));
775 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
776 }
777
778
779
780
781
782 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
783 ACPI_GPE_EDGE_TRIGGERED) {
784 status = acpi_hw_clear_gpe(gpe_event_info);
785 if (ACPI_FAILURE(status)) {
786 ACPI_EXCEPTION((AE_INFO, status,
787 "Unable to clear GPE %02X",
788 gpe_number));
789 (void)acpi_hw_low_set_gpe(gpe_event_info,
790 ACPI_GPE_CONDITIONAL_ENABLE);
791 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
792 }
793 }
794
795 gpe_event_info->disable_for_dispatch = TRUE;
796
797
798
799
800
801
802
803
804 switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
805 case ACPI_GPE_DISPATCH_HANDLER:
806
807
808
809 return_value =
810 gpe_event_info->dispatch.handler->address(gpe_device,
811 gpe_number,
812 gpe_event_info->
813 dispatch.handler->
814 context);
815
816
817
818 if (return_value & ACPI_REENABLE_GPE) {
819 (void)acpi_ev_finish_gpe(gpe_event_info);
820 }
821 break;
822
823 case ACPI_GPE_DISPATCH_METHOD:
824 case ACPI_GPE_DISPATCH_NOTIFY:
825
826
827
828
829 status = acpi_os_execute(OSL_GPE_HANDLER,
830 acpi_ev_asynch_execute_gpe_method,
831 gpe_event_info);
832 if (ACPI_FAILURE(status)) {
833 ACPI_EXCEPTION((AE_INFO, status,
834 "Unable to queue handler for GPE %02X - event disabled",
835 gpe_number));
836 }
837 break;
838
839 default:
840
841
842
843
844
845 ACPI_ERROR((AE_INFO,
846 "No handler or method for GPE %02X, disabling event",
847 gpe_number));
848
849 break;
850 }
851
852 return_UINT32(ACPI_INTERRUPT_HANDLED);
853}
854
855#endif
856