linux/drivers/acpi/acpica/evgpe.c
<<
>>
Prefs
   1/******************************************************************************
   2 *
   3 * Module Name: evgpe - General Purpose Event handling and dispatch
   4 *
   5 *****************************************************************************/
   6
   7/*
   8 * Copyright (C) 2000 - 2013, Intel Corp.
   9 * All rights reserved.
  10 *
  11 * Redistribution and use in source and binary forms, with or without
  12 * modification, are permitted provided that the following conditions
  13 * are met:
  14 * 1. Redistributions of source code must retain the above copyright
  15 *    notice, this list of conditions, and the following disclaimer,
  16 *    without modification.
  17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
  18 *    substantially similar to the "NO WARRANTY" disclaimer below
  19 *    ("Disclaimer") and any redistribution must be conditioned upon
  20 *    including a substantially similar Disclaimer requirement for further
  21 *    binary redistribution.
  22 * 3. Neither the names of the above-listed copyright holders nor the names
  23 *    of any contributors may be used to endorse or promote products derived
  24 *    from this software without specific prior written permission.
  25 *
  26 * Alternatively, this software may be distributed under the terms of the
  27 * GNU General Public License ("GPL") version 2 as published by the Free
  28 * Software Foundation.
  29 *
  30 * NO WARRANTY
  31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
  34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  41 * POSSIBILITY OF SUCH DAMAGES.
  42 */
  43
  44#include <acpi/acpi.h>
  45#include "accommon.h"
  46#include "acevents.h"
  47#include "acnamesp.h"
  48
  49#define _COMPONENT          ACPI_EVENTS
  50ACPI_MODULE_NAME("evgpe")
  51#if (!ACPI_REDUCED_HARDWARE)    /* Entire module */
  52/* Local prototypes */
  53static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
  54
  55static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
  56
  57/*******************************************************************************
  58 *
  59 * FUNCTION:    acpi_ev_update_gpe_enable_mask
  60 *
  61 * PARAMETERS:  gpe_event_info          - GPE to update
  62 *
  63 * RETURN:      Status
  64 *
  65 * DESCRIPTION: Updates GPE register enable mask based upon whether there are
  66 *              runtime references to this GPE
  67 *
  68 ******************************************************************************/
  69
  70acpi_status
  71acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
  72{
  73        struct acpi_gpe_register_info *gpe_register_info;
  74        u32 register_bit;
  75
  76        ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask);
  77
  78        gpe_register_info = gpe_event_info->register_info;
  79        if (!gpe_register_info) {
  80                return_ACPI_STATUS(AE_NOT_EXIST);
  81        }
  82
  83        register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
  84
  85        /* Clear the run bit up front */
  86
  87        ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
  88
  89        /* Set the mask bit only if there are references to this GPE */
  90
  91        if (gpe_event_info->runtime_count) {
  92                ACPI_SET_BIT(gpe_register_info->enable_for_run,
  93                             (u8)register_bit);
  94        }
  95
  96        return_ACPI_STATUS(AE_OK);
  97}
  98
  99/*******************************************************************************
 100 *
 101 * FUNCTION:    acpi_ev_enable_gpe
 102 *
 103 * PARAMETERS:  gpe_event_info  - GPE to enable
 104 *
 105 * RETURN:      Status
 106 *
 107 * DESCRIPTION: Clear a GPE of stale events and enable it.
 108 *
 109 ******************************************************************************/
 110acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
 111{
 112        acpi_status status;
 113
 114        ACPI_FUNCTION_TRACE(ev_enable_gpe);
 115
 116        /*
 117         * We will only allow a GPE to be enabled if it has either an associated
 118         * method (_Lxx/_Exx) or a handler, or is using the implicit notify
 119         * feature. Otherwise, the GPE will be immediately disabled by
 120         * acpi_ev_gpe_dispatch the first time it fires.
 121         */
 122        if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
 123            ACPI_GPE_DISPATCH_NONE) {
 124                return_ACPI_STATUS(AE_NO_HANDLER);
 125        }
 126
 127        /* Clear the GPE (of stale events) */
 128        status = acpi_hw_clear_gpe(gpe_event_info);
 129        if (ACPI_FAILURE(status)) {
 130                return_ACPI_STATUS(status);
 131        }
 132
 133        /* Enable the requested GPE */
 134
 135        status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE_SAVE);
 136        return_ACPI_STATUS(status);
 137}
 138
 139
 140/*******************************************************************************
 141 *
 142 * FUNCTION:    acpi_ev_mask_gpe
 143 *
 144 * PARAMETERS:  gpe_event_info          - GPE to be blocked/unblocked
 145 *              is_masked               - Whether the GPE is masked or not
 146 *
 147 * RETURN:      Status
 148 *
 149 * DESCRIPTION: Unconditionally mask/unmask a GPE during runtime.
 150 *
 151 ******************************************************************************/
 152
 153acpi_status
 154acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
 155{
 156        struct acpi_gpe_register_info *gpe_register_info;
 157        u32 register_bit;
 158
 159        ACPI_FUNCTION_TRACE(ev_mask_gpe);
 160
 161        gpe_register_info = gpe_event_info->register_info;
 162        if (!gpe_register_info) {
 163                return_ACPI_STATUS(AE_NOT_EXIST);
 164        }
 165
 166        register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
 167
 168        /* Perform the action */
 169
 170        if (is_masked) {
 171                if (register_bit & gpe_register_info->mask_for_run) {
 172                        return_ACPI_STATUS(AE_BAD_PARAMETER);
 173                }
 174
 175                (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
 176                ACPI_SET_BIT(gpe_register_info->mask_for_run, (u8)register_bit);
 177        } else {
 178                if (!(register_bit & gpe_register_info->mask_for_run)) {
 179                        return_ACPI_STATUS(AE_BAD_PARAMETER);
 180                }
 181
 182                ACPI_CLEAR_BIT(gpe_register_info->mask_for_run,
 183                               (u8)register_bit);
 184                if (gpe_event_info->runtime_count
 185                    && !gpe_event_info->disable_for_dispatch) {
 186                        (void)acpi_hw_low_set_gpe(gpe_event_info,
 187                                                  ACPI_GPE_ENABLE);
 188                }
 189        }
 190
 191        return_ACPI_STATUS(AE_OK);
 192}
 193
 194/*******************************************************************************
 195 *
 196 * FUNCTION:    acpi_ev_add_gpe_reference
 197 *
 198 * PARAMETERS:  gpe_event_info          - Add a reference to this GPE
 199 *
 200 * RETURN:      Status
 201 *
 202 * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
 203 *              hardware-enabled.
 204 *
 205 ******************************************************************************/
 206
 207acpi_status
 208acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
 209{
 210        acpi_status status = AE_OK;
 211
 212        ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
 213
 214        if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
 215                return_ACPI_STATUS(AE_LIMIT);
 216        }
 217
 218        gpe_event_info->runtime_count++;
 219        if (gpe_event_info->runtime_count == 1) {
 220
 221                /* Enable on first reference */
 222
 223                status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
 224                if (ACPI_SUCCESS(status)) {
 225                        status = acpi_ev_enable_gpe(gpe_event_info);
 226                }
 227
 228                if (ACPI_FAILURE(status)) {
 229                        gpe_event_info->runtime_count--;
 230                }
 231        }
 232
 233        return_ACPI_STATUS(status);
 234}
 235
 236/*******************************************************************************
 237 *
 238 * FUNCTION:    acpi_ev_remove_gpe_reference
 239 *
 240 * PARAMETERS:  gpe_event_info          - Remove a reference to this GPE
 241 *
 242 * RETURN:      Status
 243 *
 244 * DESCRIPTION: Remove a reference to a GPE. When the last reference is
 245 *              removed, the GPE is hardware-disabled.
 246 *
 247 ******************************************************************************/
 248
 249acpi_status
 250acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
 251{
 252        acpi_status status = AE_OK;
 253
 254        ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
 255
 256        if (!gpe_event_info->runtime_count) {
 257                return_ACPI_STATUS(AE_LIMIT);
 258        }
 259
 260        gpe_event_info->runtime_count--;
 261        if (!gpe_event_info->runtime_count) {
 262
 263                /* Disable on last reference */
 264
 265                status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
 266                if (ACPI_SUCCESS(status)) {
 267                        status =
 268                            acpi_hw_low_set_gpe(gpe_event_info,
 269                                                     ACPI_GPE_DISABLE_SAVE);
 270                }
 271
 272                if (ACPI_FAILURE(status)) {
 273                        gpe_event_info->runtime_count++;
 274                }
 275        }
 276
 277        return_ACPI_STATUS(status);
 278}
 279
 280/*******************************************************************************
 281 *
 282 * FUNCTION:    acpi_ev_low_get_gpe_info
 283 *
 284 * PARAMETERS:  gpe_number          - Raw GPE number
 285 *              gpe_block           - A GPE info block
 286 *
 287 * RETURN:      A GPE event_info struct. NULL if not a valid GPE (The gpe_number
 288 *              is not within the specified GPE block)
 289 *
 290 * DESCRIPTION: Returns the event_info struct associated with this GPE. This is
 291 *              the low-level implementation of ev_get_gpe_event_info.
 292 *
 293 ******************************************************************************/
 294
 295struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
 296                                                     struct acpi_gpe_block_info
 297                                                     *gpe_block)
 298{
 299        u32 gpe_index;
 300
 301        /*
 302         * Validate that the gpe_number is within the specified gpe_block.
 303         * (Two steps)
 304         */
 305        if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
 306                return (NULL);
 307        }
 308
 309        gpe_index = gpe_number - gpe_block->block_base_number;
 310        if (gpe_index >= gpe_block->gpe_count) {
 311                return (NULL);
 312        }
 313
 314        return (&gpe_block->event_info[gpe_index]);
 315}
 316
 317
 318/*******************************************************************************
 319 *
 320 * FUNCTION:    acpi_ev_get_gpe_event_info
 321 *
 322 * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
 323 *              gpe_number          - Raw GPE number
 324 *
 325 * RETURN:      A GPE event_info struct. NULL if not a valid GPE
 326 *
 327 * DESCRIPTION: Returns the event_info struct associated with this GPE.
 328 *              Validates the gpe_block and the gpe_number
 329 *
 330 *              Should be called only when the GPE lists are semaphore locked
 331 *              and not subject to change.
 332 *
 333 ******************************************************************************/
 334
 335struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
 336                                                       u32 gpe_number)
 337{
 338        union acpi_operand_object *obj_desc;
 339        struct acpi_gpe_event_info *gpe_info;
 340        u32 i;
 341
 342        ACPI_FUNCTION_ENTRY();
 343
 344        /* A NULL gpe_device means use the FADT-defined GPE block(s) */
 345
 346        if (!gpe_device) {
 347
 348                /* Examine GPE Block 0 and 1 (These blocks are permanent) */
 349
 350                for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
 351                        gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
 352                                                            acpi_gbl_gpe_fadt_blocks
 353                                                            [i]);
 354                        if (gpe_info) {
 355                                return (gpe_info);
 356                        }
 357                }
 358
 359                /* The gpe_number was not in the range of either FADT GPE block */
 360
 361                return (NULL);
 362        }
 363
 364        /* A Non-NULL gpe_device means this is a GPE Block Device */
 365
 366        obj_desc =
 367            acpi_ns_get_attached_object((struct acpi_namespace_node *)
 368                                               gpe_device);
 369        if (!obj_desc || !obj_desc->device.gpe_block) {
 370                return (NULL);
 371        }
 372
 373        return (acpi_ev_low_get_gpe_info
 374                (gpe_number, obj_desc->device.gpe_block));
 375}
 376
 377/*******************************************************************************
 378 *
 379 * FUNCTION:    acpi_ev_gpe_detect
 380 *
 381 * PARAMETERS:  gpe_xrupt_list      - Interrupt block for this interrupt.
 382 *                                    Can have multiple GPE blocks attached.
 383 *
 384 * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
 385 *
 386 * DESCRIPTION: Detect if any GP events have occurred. This function is
 387 *              executed at interrupt level.
 388 *
 389 ******************************************************************************/
 390
 391u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
 392{
 393        acpi_status status;
 394        struct acpi_gpe_block_info *gpe_block;
 395        struct acpi_gpe_register_info *gpe_register_info;
 396        u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
 397        u8 enabled_status_byte;
 398        u32 status_reg;
 399        u32 enable_reg;
 400        acpi_cpu_flags flags;
 401        u32 i;
 402        u32 j;
 403
 404        ACPI_FUNCTION_NAME(ev_gpe_detect);
 405
 406        /* Check for the case where there are no GPEs */
 407
 408        if (!gpe_xrupt_list) {
 409                return (int_status);
 410        }
 411
 412        /*
 413         * We need to obtain the GPE lock for both the data structs and registers
 414         * Note: Not necessary to obtain the hardware lock, since the GPE
 415         * registers are owned by the gpe_lock.
 416         */
 417        flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
 418
 419        /* Examine all GPE blocks attached to this interrupt level */
 420
 421        gpe_block = gpe_xrupt_list->gpe_block_list_head;
 422        while (gpe_block) {
 423                /*
 424                 * Read all of the 8-bit GPE status and enable registers in this GPE
 425                 * block, saving all of them. Find all currently active GP events.
 426                 */
 427                for (i = 0; i < gpe_block->register_count; i++) {
 428
 429                        /* Get the next status/enable pair */
 430
 431                        gpe_register_info = &gpe_block->register_info[i];
 432
 433                        /*
 434                         * Optimization: If there are no GPEs enabled within this
 435                         * register, we can safely ignore the entire register.
 436                         */
 437                        if (!(gpe_register_info->enable_for_run |
 438                              gpe_register_info->enable_for_wake)) {
 439                                ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
 440                                                  "Ignore disabled registers for GPE%02X-GPE%02X: "
 441                                                  "RunEnable=%02X, WakeEnable=%02X\n",
 442                                                  gpe_register_info->
 443                                                  base_gpe_number,
 444                                                  gpe_register_info->
 445                                                  base_gpe_number +
 446                                                  (ACPI_GPE_REGISTER_WIDTH - 1),
 447                                                  gpe_register_info->
 448                                                  enable_for_run,
 449                                                  gpe_register_info->
 450                                                  enable_for_wake));
 451                                continue;
 452                        }
 453
 454                        /* Read the Status Register */
 455
 456                        status =
 457                            acpi_hw_read(&status_reg,
 458                                         &gpe_register_info->status_address);
 459                        if (ACPI_FAILURE(status)) {
 460                                goto unlock_and_exit;
 461                        }
 462
 463                        /* Read the Enable Register */
 464
 465                        status =
 466                            acpi_hw_read(&enable_reg,
 467                                         &gpe_register_info->enable_address);
 468                        if (ACPI_FAILURE(status)) {
 469                                goto unlock_and_exit;
 470                        }
 471
 472                        ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
 473                                          "Read registers for GPE%02X-GPE%02X: Status=%02X, Enable=%02X, "
 474                                          "RunEnable=%02X, WakeEnable=%02X\n",
 475                                          gpe_register_info->base_gpe_number,
 476                                          gpe_register_info->base_gpe_number +
 477                                          (ACPI_GPE_REGISTER_WIDTH - 1),
 478                                          status_reg, enable_reg,
 479                                          gpe_register_info->enable_for_run,
 480                                          gpe_register_info->enable_for_wake));
 481
 482                        /* Check if there is anything active at all in this register */
 483
 484                        enabled_status_byte = (u8) (status_reg & enable_reg);
 485                        if (!enabled_status_byte) {
 486
 487                                /* No active GPEs in this register, move on */
 488
 489                                continue;
 490                        }
 491
 492                        /* Now look at the individual GPEs in this byte register */
 493
 494                        for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
 495
 496                                /* Examine one GPE bit */
 497
 498                                if (enabled_status_byte & (1 << j)) {
 499                                        /*
 500                                         * Found an active GPE. Dispatch the event to a handler
 501                                         * or method.
 502                                         */
 503                                        int_status |=
 504                                            acpi_ev_gpe_dispatch(gpe_block->
 505                                                                 node,
 506                                                                 &gpe_block->
 507                                                event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
 508                                }
 509                        }
 510                }
 511
 512                gpe_block = gpe_block->next;
 513        }
 514
 515      unlock_and_exit:
 516
 517        acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 518        return (int_status);
 519}
 520
 521/*******************************************************************************
 522 *
 523 * FUNCTION:    acpi_ev_asynch_execute_gpe_method
 524 *
 525 * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
 526 *
 527 * RETURN:      None
 528 *
 529 * DESCRIPTION: Perform the actual execution of a GPE control method. This
 530 *              function is called from an invocation of acpi_os_execute and
 531 *              therefore does NOT execute at interrupt level - so that
 532 *              the control method itself is not executed in the context of
 533 *              an interrupt handler.
 534 *
 535 ******************************************************************************/
 536
 537static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
 538{
 539        struct acpi_gpe_event_info *gpe_event_info = context;
 540        acpi_status status;
 541        struct acpi_gpe_event_info *local_gpe_event_info;
 542        struct acpi_evaluate_info *info;
 543        struct acpi_gpe_notify_info *notify;
 544
 545        ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
 546
 547        /* Allocate a local GPE block */
 548
 549        local_gpe_event_info =
 550            ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info));
 551        if (!local_gpe_event_info) {
 552                ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE"));
 553                return_VOID;
 554        }
 555
 556        status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
 557        if (ACPI_FAILURE(status)) {
 558                ACPI_FREE(local_gpe_event_info);
 559                return_VOID;
 560        }
 561
 562        /* Must revalidate the gpe_number/gpe_block */
 563
 564        if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
 565                status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
 566                ACPI_FREE(local_gpe_event_info);
 567                return_VOID;
 568        }
 569
 570        /*
 571         * Take a snapshot of the GPE info for this level - we copy the info to
 572         * prevent a race condition with remove_handler/remove_block.
 573         */
 574        memcpy(local_gpe_event_info, gpe_event_info,
 575               sizeof(struct acpi_gpe_event_info));
 576
 577        status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
 578        if (ACPI_FAILURE(status)) {
 579                return_VOID;
 580        }
 581
 582        /* Do the correct dispatch - normal method or implicit notify */
 583
 584        switch (ACPI_GPE_DISPATCH_TYPE(local_gpe_event_info->flags)) {
 585        case ACPI_GPE_DISPATCH_NOTIFY:
 586                /*
 587                 * Implicit notify.
 588                 * Dispatch a DEVICE_WAKE notify to the appropriate handler.
 589                 * NOTE: the request is queued for execution after this method
 590                 * completes. The notify handlers are NOT invoked synchronously
 591                 * from this thread -- because handlers may in turn run other
 592                 * control methods.
 593                 *
 594                 * June 2012: Expand implicit notify mechanism to support
 595                 * notifies on multiple device objects.
 596                 */
 597                notify = local_gpe_event_info->dispatch.notify_list;
 598                while (ACPI_SUCCESS(status) && notify) {
 599                        status =
 600                            acpi_ev_queue_notify_request(notify->device_node,
 601                                                         ACPI_NOTIFY_DEVICE_WAKE);
 602
 603                        notify = notify->next;
 604                }
 605
 606                break;
 607
 608        case ACPI_GPE_DISPATCH_METHOD:
 609
 610                /* Allocate the evaluation information block */
 611
 612                info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
 613                if (!info) {
 614                        status = AE_NO_MEMORY;
 615                } else {
 616                        /*
 617                         * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the
 618                         * _Lxx/_Exx control method that corresponds to this GPE
 619                         */
 620                        info->prefix_node =
 621                            local_gpe_event_info->dispatch.method_node;
 622                        info->flags = ACPI_IGNORE_RETURN_VALUE;
 623
 624                        status = acpi_ns_evaluate(info);
 625                        ACPI_FREE(info);
 626                }
 627
 628                if (ACPI_FAILURE(status)) {
 629                        ACPI_EXCEPTION((AE_INFO, status,
 630                                        "while evaluating GPE method [%4.4s]",
 631                                        acpi_ut_get_node_name
 632                                        (local_gpe_event_info->dispatch.
 633                                         method_node)));
 634                }
 635                break;
 636
 637        default:
 638
 639                return_VOID;    /* Should never happen */
 640        }
 641
 642        /* Defer enabling of GPE until all notify handlers are done */
 643
 644        status = acpi_os_execute(OSL_NOTIFY_HANDLER,
 645                                 acpi_ev_asynch_enable_gpe,
 646                                 local_gpe_event_info);
 647        if (ACPI_FAILURE(status)) {
 648                ACPI_FREE(local_gpe_event_info);
 649        }
 650        return_VOID;
 651}
 652
 653
 654/*******************************************************************************
 655 *
 656 * FUNCTION:    acpi_ev_asynch_enable_gpe
 657 *
 658 * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
 659 *              Callback from acpi_os_execute
 660 *
 661 * RETURN:      None
 662 *
 663 * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to
 664 *              complete (i.e., finish execution of Notify)
 665 *
 666 ******************************************************************************/
 667
 668static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
 669{
 670        struct acpi_gpe_event_info *gpe_event_info = context;
 671
 672        (void)acpi_ev_finish_gpe(gpe_event_info);
 673
 674        ACPI_FREE(gpe_event_info);
 675        return;
 676}
 677
 678
 679/*******************************************************************************
 680 *
 681 * FUNCTION:    acpi_ev_finish_gpe
 682 *
 683 * PARAMETERS:  gpe_event_info      - Info for this GPE
 684 *
 685 * RETURN:      Status
 686 *
 687 * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution
 688 *              of a GPE method or a synchronous or asynchronous GPE handler.
 689 *
 690 ******************************************************************************/
 691
 692acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
 693{
 694        acpi_status status;
 695
 696        if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
 697            ACPI_GPE_LEVEL_TRIGGERED) {
 698                /*
 699                 * GPE is level-triggered, we clear the GPE status bit after
 700                 * handling the event.
 701                 */
 702                status = acpi_hw_clear_gpe(gpe_event_info);
 703                if (ACPI_FAILURE(status)) {
 704                        return (status);
 705                }
 706        }
 707
 708        /*
 709         * Enable this GPE, conditionally. This means that the GPE will
 710         * only be physically enabled if the enable_mask bit is set
 711         * in the event_info.
 712         */
 713        (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
 714        gpe_event_info->disable_for_dispatch = FALSE;
 715        return (AE_OK);
 716}
 717
 718
 719/*******************************************************************************
 720 *
 721 * FUNCTION:    acpi_ev_gpe_dispatch
 722 *
 723 * PARAMETERS:  gpe_device      - Device node. NULL for GPE0/GPE1
 724 *              gpe_event_info  - Info for this GPE
 725 *              gpe_number      - Number relative to the parent GPE block
 726 *
 727 * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
 728 *
 729 * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
 730 *              or method (e.g. _Lxx/_Exx) handler.
 731 *
 732 *              This function executes at interrupt level.
 733 *
 734 ******************************************************************************/
 735
 736u32
 737acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
 738                    struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
 739{
 740        acpi_status status;
 741        u32 return_value;
 742
 743        ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
 744
 745        /* Invoke global event handler if present */
 746
 747        acpi_gpe_count++;
 748        if (acpi_gbl_global_event_handler) {
 749                acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device,
 750                                              gpe_number,
 751                                              acpi_gbl_global_event_handler_context);
 752        }
 753
 754        /*
 755         * If edge-triggered, clear the GPE status bit now. Note that
 756         * level-triggered events are cleared after the GPE is serviced.
 757         */
 758        if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
 759            ACPI_GPE_EDGE_TRIGGERED) {
 760                status = acpi_hw_clear_gpe(gpe_event_info);
 761                if (ACPI_FAILURE(status)) {
 762                        ACPI_EXCEPTION((AE_INFO, status,
 763                                        "Unable to clear GPE%02X", gpe_number));
 764                        return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
 765                }
 766        }
 767
 768        /*
 769         * Always disable the GPE so that it does not keep firing before
 770         * any asynchronous activity completes (either from the execution
 771         * of a GPE method or an asynchronous GPE handler.)
 772         *
 773         * If there is no handler or method to run, just disable the
 774         * GPE and leave it disabled permanently to prevent further such
 775         * pointless events from firing.
 776         */
 777        status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
 778        if (ACPI_FAILURE(status)) {
 779                ACPI_EXCEPTION((AE_INFO, status,
 780                                "Unable to disable GPE%02X", gpe_number));
 781                return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
 782        }
 783
 784        gpe_event_info->disable_for_dispatch = TRUE;
 785
 786        /*
 787         * Dispatch the GPE to either an installed handler or the control
 788         * method associated with this GPE (_Lxx or _Exx). If a handler
 789         * exists, we invoke it and do not attempt to run the method.
 790         * If there is neither a handler nor a method, leave the GPE
 791         * disabled.
 792         */
 793        switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
 794        case ACPI_GPE_DISPATCH_HANDLER:
 795
 796                /* Invoke the installed handler (at interrupt level) */
 797
 798                return_value =
 799                    gpe_event_info->dispatch.handler->address(gpe_device,
 800                                                              gpe_number,
 801                                                              gpe_event_info->
 802                                                              dispatch.handler->
 803                                                              context);
 804
 805                /* If requested, clear (if level-triggered) and reenable the GPE */
 806
 807                if (return_value & ACPI_REENABLE_GPE) {
 808                        (void)acpi_ev_finish_gpe(gpe_event_info);
 809                }
 810                break;
 811
 812        case ACPI_GPE_DISPATCH_METHOD:
 813        case ACPI_GPE_DISPATCH_NOTIFY:
 814                /*
 815                 * Execute the method associated with the GPE
 816                 * NOTE: Level-triggered GPEs are cleared after the method completes.
 817                 */
 818                status = acpi_os_execute(OSL_GPE_HANDLER,
 819                                         acpi_ev_asynch_execute_gpe_method,
 820                                         gpe_event_info);
 821                if (ACPI_FAILURE(status)) {
 822                        ACPI_EXCEPTION((AE_INFO, status,
 823                                        "Unable to queue handler for GPE%02X - event disabled",
 824                                        gpe_number));
 825                }
 826                break;
 827
 828        default:
 829                /*
 830                 * No handler or method to run!
 831                 * 03/2010: This case should no longer be possible. We will not allow
 832                 * a GPE to be enabled if it has no handler or method.
 833                 */
 834                ACPI_ERROR((AE_INFO,
 835                            "No handler or method for GPE%02X, disabling event",
 836                            gpe_number));
 837
 838                break;
 839        }
 840
 841        return_UINT32(ACPI_INTERRUPT_HANDLED);
 842}
 843
 844#endif                          /* !ACPI_REDUCED_HARDWARE */
 845