1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47#include "acnamesp.h"
48
49#define _COMPONENT ACPI_EVENTS
50ACPI_MODULE_NAME("evgpe")
51#if (!ACPI_REDUCED_HARDWARE)
52
53static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
54
55static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70acpi_status
71acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
72{
73 struct acpi_gpe_register_info *gpe_register_info;
74 u32 register_bit;
75
76 ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask);
77
78 gpe_register_info = gpe_event_info->register_info;
79 if (!gpe_register_info) {
80 return_ACPI_STATUS(AE_NOT_EXIST);
81 }
82
83 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
84
85
86
87 ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
88
89
90
91 if (gpe_event_info->runtime_count) {
92 ACPI_SET_BIT(gpe_register_info->enable_for_run,
93 (u8)register_bit);
94 }
95
96 return_ACPI_STATUS(AE_OK);
97}
98
99
100
101
102
103
104
105
106
107
108
109
110acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
111{
112 acpi_status status;
113
114 ACPI_FUNCTION_TRACE(ev_enable_gpe);
115
116
117
118
119
120
121
122 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
123 ACPI_GPE_DISPATCH_NONE) {
124 return_ACPI_STATUS(AE_NO_HANDLER);
125 }
126
127
128 status = acpi_hw_clear_gpe(gpe_event_info);
129 if (ACPI_FAILURE(status)) {
130 return_ACPI_STATUS(status);
131 }
132
133
134
135 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
136 return_ACPI_STATUS(status);
137}
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153acpi_status
154acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
155{
156 acpi_status status = AE_OK;
157
158 ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
159
160 if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
161 return_ACPI_STATUS(AE_LIMIT);
162 }
163
164 gpe_event_info->runtime_count++;
165 if (gpe_event_info->runtime_count == 1) {
166
167
168
169 status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
170 if (ACPI_SUCCESS(status)) {
171 status = acpi_ev_enable_gpe(gpe_event_info);
172 }
173
174 if (ACPI_FAILURE(status)) {
175 gpe_event_info->runtime_count--;
176 }
177 }
178
179 return_ACPI_STATUS(status);
180}
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195acpi_status
196acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
197{
198 acpi_status status = AE_OK;
199
200 ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
201
202 if (!gpe_event_info->runtime_count) {
203 return_ACPI_STATUS(AE_LIMIT);
204 }
205
206 gpe_event_info->runtime_count--;
207 if (!gpe_event_info->runtime_count) {
208
209
210
211 status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
212 if (ACPI_SUCCESS(status)) {
213 status =
214 acpi_hw_low_set_gpe(gpe_event_info,
215 ACPI_GPE_DISABLE);
216 }
217
218 if (ACPI_FAILURE(status)) {
219 gpe_event_info->runtime_count++;
220 }
221 }
222
223 return_ACPI_STATUS(status);
224}
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
242 struct acpi_gpe_block_info
243 *gpe_block)
244{
245 u32 gpe_index;
246
247
248
249
250
251 if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
252 return (NULL);
253 }
254
255 gpe_index = gpe_number - gpe_block->block_base_number;
256 if (gpe_index >= gpe_block->gpe_count) {
257 return (NULL);
258 }
259
260 return (&gpe_block->event_info[gpe_index]);
261}
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
282 u32 gpe_number)
283{
284 union acpi_operand_object *obj_desc;
285 struct acpi_gpe_event_info *gpe_info;
286 u32 i;
287
288 ACPI_FUNCTION_ENTRY();
289
290
291
292 if (!gpe_device) {
293
294
295
296 for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
297 gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
298 acpi_gbl_gpe_fadt_blocks
299 [i]);
300 if (gpe_info) {
301 return (gpe_info);
302 }
303 }
304
305
306
307 return (NULL);
308 }
309
310
311
312 obj_desc =
313 acpi_ns_get_attached_object((struct acpi_namespace_node *)
314 gpe_device);
315 if (!obj_desc || !obj_desc->device.gpe_block) {
316 return (NULL);
317 }
318
319 return (acpi_ev_low_get_gpe_info
320 (gpe_number, obj_desc->device.gpe_block));
321}
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
338{
339 acpi_status status;
340 struct acpi_gpe_block_info *gpe_block;
341 struct acpi_gpe_register_info *gpe_register_info;
342 u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
343 u8 enabled_status_byte;
344 u32 status_reg;
345 u32 enable_reg;
346 acpi_cpu_flags flags;
347 u32 i;
348 u32 j;
349
350 ACPI_FUNCTION_NAME(ev_gpe_detect);
351
352
353
354 if (!gpe_xrupt_list) {
355 return (int_status);
356 }
357
358
359
360
361
362
363 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
364
365
366
367 gpe_block = gpe_xrupt_list->gpe_block_list_head;
368 while (gpe_block) {
369
370
371
372
373 for (i = 0; i < gpe_block->register_count; i++) {
374
375
376
377 gpe_register_info = &gpe_block->register_info[i];
378
379
380
381
382
383 if (!(gpe_register_info->enable_for_run |
384 gpe_register_info->enable_for_wake)) {
385 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
386 "Ignore disabled registers for GPE %02X-%02X: "
387 "RunEnable=%02X, WakeEnable=%02X\n",
388 gpe_register_info->
389 base_gpe_number,
390 gpe_register_info->
391 base_gpe_number +
392 (ACPI_GPE_REGISTER_WIDTH - 1),
393 gpe_register_info->
394 enable_for_run,
395 gpe_register_info->
396 enable_for_wake));
397 continue;
398 }
399
400
401
402 status =
403 acpi_hw_read(&status_reg,
404 &gpe_register_info->status_address);
405 if (ACPI_FAILURE(status)) {
406 goto unlock_and_exit;
407 }
408
409
410
411 status =
412 acpi_hw_read(&enable_reg,
413 &gpe_register_info->enable_address);
414 if (ACPI_FAILURE(status)) {
415 goto unlock_and_exit;
416 }
417
418 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
419 "Read registers for GPE %02X-%02X: Status=%02X, Enable=%02X, "
420 "RunEnable=%02X, WakeEnable=%02X\n",
421 gpe_register_info->base_gpe_number,
422 gpe_register_info->base_gpe_number +
423 (ACPI_GPE_REGISTER_WIDTH - 1),
424 status_reg, enable_reg,
425 gpe_register_info->enable_for_run,
426 gpe_register_info->enable_for_wake));
427
428
429
430 enabled_status_byte = (u8) (status_reg & enable_reg);
431 if (!enabled_status_byte) {
432
433
434
435 continue;
436 }
437
438
439
440 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
441
442
443
444 if (enabled_status_byte & (1 << j)) {
445
446
447
448
449 int_status |=
450 acpi_ev_gpe_dispatch(gpe_block->
451 node,
452 &gpe_block->
453 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
454 }
455 }
456 }
457
458 gpe_block = gpe_block->next;
459 }
460
461unlock_and_exit:
462
463 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
464 return (int_status);
465}
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
484{
485 struct acpi_gpe_event_info *gpe_event_info = context;
486 acpi_status status;
487 struct acpi_gpe_event_info *local_gpe_event_info;
488 struct acpi_evaluate_info *info;
489 struct acpi_gpe_notify_info *notify;
490
491 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
492
493
494
495 local_gpe_event_info =
496 ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info));
497 if (!local_gpe_event_info) {
498 ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE"));
499 return_VOID;
500 }
501
502 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
503 if (ACPI_FAILURE(status)) {
504 ACPI_FREE(local_gpe_event_info);
505 return_VOID;
506 }
507
508
509
510 if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
511 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
512 ACPI_FREE(local_gpe_event_info);
513 return_VOID;
514 }
515
516
517
518
519
520 ACPI_MEMCPY(local_gpe_event_info, gpe_event_info,
521 sizeof(struct acpi_gpe_event_info));
522
523 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
524 if (ACPI_FAILURE(status)) {
525 ACPI_FREE(local_gpe_event_info);
526 return_VOID;
527 }
528
529
530
531 switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
532 case ACPI_GPE_DISPATCH_NOTIFY:
533
534
535
536
537
538
539
540
541
542
543
544 notify = local_gpe_event_info->dispatch.notify_list;
545 while (ACPI_SUCCESS(status) && notify) {
546 status =
547 acpi_ev_queue_notify_request(notify->device_node,
548 ACPI_NOTIFY_DEVICE_WAKE);
549
550 notify = notify->next;
551 }
552
553 break;
554
555 case ACPI_GPE_DISPATCH_METHOD:
556
557
558
559 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
560 if (!info) {
561 status = AE_NO_MEMORY;
562 } else {
563
564
565
566
567 info->prefix_node =
568 local_gpe_event_info->dispatch.method_node;
569 info->flags = ACPI_IGNORE_RETURN_VALUE;
570
571 status = acpi_ns_evaluate(info);
572 ACPI_FREE(info);
573 }
574
575 if (ACPI_FAILURE(status)) {
576 ACPI_EXCEPTION((AE_INFO, status,
577 "while evaluating GPE method [%4.4s]",
578 acpi_ut_get_node_name
579 (local_gpe_event_info->dispatch.
580 method_node)));
581 }
582 break;
583
584 default:
585
586 return_VOID;
587 }
588
589
590
591 status = acpi_os_execute(OSL_NOTIFY_HANDLER,
592 acpi_ev_asynch_enable_gpe,
593 local_gpe_event_info);
594 if (ACPI_FAILURE(status)) {
595 ACPI_FREE(local_gpe_event_info);
596 }
597 return_VOID;
598}
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
616{
617 struct acpi_gpe_event_info *gpe_event_info = context;
618
619 (void)acpi_ev_finish_gpe(gpe_event_info);
620
621 ACPI_FREE(gpe_event_info);
622 return;
623}
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
640{
641 acpi_status status;
642
643 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
644 ACPI_GPE_LEVEL_TRIGGERED) {
645
646
647
648
649 status = acpi_hw_clear_gpe(gpe_event_info);
650 if (ACPI_FAILURE(status)) {
651 return (status);
652 }
653 }
654
655
656
657
658
659
660 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
661 return (AE_OK);
662}
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682u32
683acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
684 struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
685{
686 acpi_status status;
687 u32 return_value;
688
689 ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
690
691
692
693 acpi_gpe_count++;
694 if (acpi_gbl_global_event_handler) {
695 acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device,
696 gpe_number,
697 acpi_gbl_global_event_handler_context);
698 }
699
700
701
702
703
704
705
706
707
708
709 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
710 if (ACPI_FAILURE(status)) {
711 ACPI_EXCEPTION((AE_INFO, status,
712 "Unable to disable GPE %02X", gpe_number));
713 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
714 }
715
716
717
718
719
720 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
721 ACPI_GPE_EDGE_TRIGGERED) {
722 status = acpi_hw_clear_gpe(gpe_event_info);
723 if (ACPI_FAILURE(status)) {
724 ACPI_EXCEPTION((AE_INFO, status,
725 "Unable to clear GPE %02X",
726 gpe_number));
727 (void)acpi_hw_low_set_gpe(gpe_event_info,
728 ACPI_GPE_CONDITIONAL_ENABLE);
729 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
730 }
731 }
732
733
734
735
736
737
738
739
740 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
741 case ACPI_GPE_DISPATCH_HANDLER:
742
743
744
745 return_value =
746 gpe_event_info->dispatch.handler->address(gpe_device,
747 gpe_number,
748 gpe_event_info->
749 dispatch.handler->
750 context);
751
752
753
754 if (return_value & ACPI_REENABLE_GPE) {
755 (void)acpi_ev_finish_gpe(gpe_event_info);
756 }
757 break;
758
759 case ACPI_GPE_DISPATCH_METHOD:
760 case ACPI_GPE_DISPATCH_NOTIFY:
761
762
763
764
765 status = acpi_os_execute(OSL_GPE_HANDLER,
766 acpi_ev_asynch_execute_gpe_method,
767 gpe_event_info);
768 if (ACPI_FAILURE(status)) {
769 ACPI_EXCEPTION((AE_INFO, status,
770 "Unable to queue handler for GPE %02X - event disabled",
771 gpe_number));
772 }
773 break;
774
775 default:
776
777
778
779
780
781 ACPI_ERROR((AE_INFO,
782 "No handler or method for GPE %02X, disabling event",
783 gpe_number));
784
785 break;
786 }
787
788 return_UINT32(ACPI_INTERRUPT_HANDLED);
789}
790
791#endif
792