linux/drivers/acpi/acpica/evgpeblk.c
<<
>>
Prefs
   1/******************************************************************************
   2 *
   3 * Module Name: evgpeblk - GPE block creation and initialization.
   4 *
   5 *****************************************************************************/
   6
   7/*
   8 * Copyright (C) 2000 - 2008, Intel Corp.
   9 * All rights reserved.
  10 *
  11 * Redistribution and use in source and binary forms, with or without
  12 * modification, are permitted provided that the following conditions
  13 * are met:
  14 * 1. Redistributions of source code must retain the above copyright
  15 *    notice, this list of conditions, and the following disclaimer,
  16 *    without modification.
  17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
  18 *    substantially similar to the "NO WARRANTY" disclaimer below
  19 *    ("Disclaimer") and any redistribution must be conditioned upon
  20 *    including a substantially similar Disclaimer requirement for further
  21 *    binary redistribution.
  22 * 3. Neither the names of the above-listed copyright holders nor the names
  23 *    of any contributors may be used to endorse or promote products derived
  24 *    from this software without specific prior written permission.
  25 *
  26 * Alternatively, this software may be distributed under the terms of the
  27 * GNU General Public License ("GPL") version 2 as published by the Free
  28 * Software Foundation.
  29 *
  30 * NO WARRANTY
  31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
  34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  41 * POSSIBILITY OF SUCH DAMAGES.
  42 */
  43
  44#include <acpi/acpi.h>
  45#include "accommon.h"
  46#include "acevents.h"
  47#include "acnamesp.h"
  48
  49#define _COMPONENT          ACPI_EVENTS
  50ACPI_MODULE_NAME("evgpeblk")
  51
  52/* Local prototypes */
  53static acpi_status
  54acpi_ev_save_method_info(acpi_handle obj_handle,
  55                         u32 level, void *obj_desc, void **return_value);
  56
  57static acpi_status
  58acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
  59                          u32 level, void *info, void **return_value);
  60
  61static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
  62                                                               interrupt_number);
  63
  64static acpi_status
  65acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
  66
  67static acpi_status
  68acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
  69                          u32 interrupt_number);
  70
  71static acpi_status
  72acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
  73
  74/*******************************************************************************
  75 *
  76 * FUNCTION:    acpi_ev_valid_gpe_event
  77 *
  78 * PARAMETERS:  gpe_event_info              - Info for this GPE
  79 *
  80 * RETURN:      TRUE if the gpe_event is valid
  81 *
  82 * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
  83 *              Should be called only when the GPE lists are semaphore locked
  84 *              and not subject to change.
  85 *
  86 ******************************************************************************/
  87
  88u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
  89{
  90        struct acpi_gpe_xrupt_info *gpe_xrupt_block;
  91        struct acpi_gpe_block_info *gpe_block;
  92
  93        ACPI_FUNCTION_ENTRY();
  94
  95        /* No need for spin lock since we are not changing any list elements */
  96
  97        /* Walk the GPE interrupt levels */
  98
  99        gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
 100        while (gpe_xrupt_block) {
 101                gpe_block = gpe_xrupt_block->gpe_block_list_head;
 102
 103                /* Walk the GPE blocks on this interrupt level */
 104
 105                while (gpe_block) {
 106                        if ((&gpe_block->event_info[0] <= gpe_event_info) &&
 107                            (&gpe_block->event_info[((acpi_size)
 108                                                     gpe_block->
 109                                                     register_count) * 8] >
 110                             gpe_event_info)) {
 111                                return (TRUE);
 112                        }
 113
 114                        gpe_block = gpe_block->next;
 115                }
 116
 117                gpe_xrupt_block = gpe_xrupt_block->next;
 118        }
 119
 120        return (FALSE);
 121}
 122
 123/*******************************************************************************
 124 *
 125 * FUNCTION:    acpi_ev_walk_gpe_list
 126 *
 127 * PARAMETERS:  gpe_walk_callback   - Routine called for each GPE block
 128 *              Context             - Value passed to callback
 129 *
 130 * RETURN:      Status
 131 *
 132 * DESCRIPTION: Walk the GPE lists.
 133 *
 134 ******************************************************************************/
 135
 136acpi_status
 137acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
 138{
 139        struct acpi_gpe_block_info *gpe_block;
 140        struct acpi_gpe_xrupt_info *gpe_xrupt_info;
 141        acpi_status status = AE_OK;
 142        acpi_cpu_flags flags;
 143
 144        ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
 145
 146        flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
 147
 148        /* Walk the interrupt level descriptor list */
 149
 150        gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
 151        while (gpe_xrupt_info) {
 152
 153                /* Walk all Gpe Blocks attached to this interrupt level */
 154
 155                gpe_block = gpe_xrupt_info->gpe_block_list_head;
 156                while (gpe_block) {
 157
 158                        /* One callback per GPE block */
 159
 160                        status =
 161                            gpe_walk_callback(gpe_xrupt_info, gpe_block,
 162                                              context);
 163                        if (ACPI_FAILURE(status)) {
 164                                if (status == AE_CTRL_END) {    /* Callback abort */
 165                                        status = AE_OK;
 166                                }
 167                                goto unlock_and_exit;
 168                        }
 169
 170                        gpe_block = gpe_block->next;
 171                }
 172
 173                gpe_xrupt_info = gpe_xrupt_info->next;
 174        }
 175
 176      unlock_and_exit:
 177        acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 178        return_ACPI_STATUS(status);
 179}
 180
 181/*******************************************************************************
 182 *
 183 * FUNCTION:    acpi_ev_delete_gpe_handlers
 184 *
 185 * PARAMETERS:  gpe_xrupt_info      - GPE Interrupt info
 186 *              gpe_block           - Gpe Block info
 187 *
 188 * RETURN:      Status
 189 *
 190 * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
 191 *              Used only prior to termination.
 192 *
 193 ******************************************************************************/
 194
 195acpi_status
 196acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
 197                            struct acpi_gpe_block_info *gpe_block,
 198                            void *context)
 199{
 200        struct acpi_gpe_event_info *gpe_event_info;
 201        u32 i;
 202        u32 j;
 203
 204        ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
 205
 206        /* Examine each GPE Register within the block */
 207
 208        for (i = 0; i < gpe_block->register_count; i++) {
 209
 210                /* Now look at the individual GPEs in this byte register */
 211
 212                for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
 213                        gpe_event_info = &gpe_block->event_info[((acpi_size) i *
 214                                                                 ACPI_GPE_REGISTER_WIDTH)
 215                                                                + j];
 216
 217                        if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
 218                            ACPI_GPE_DISPATCH_HANDLER) {
 219                                ACPI_FREE(gpe_event_info->dispatch.handler);
 220                                gpe_event_info->dispatch.handler = NULL;
 221                                gpe_event_info->flags &=
 222                                    ~ACPI_GPE_DISPATCH_MASK;
 223                        }
 224                }
 225        }
 226
 227        return_ACPI_STATUS(AE_OK);
 228}
 229
 230/*******************************************************************************
 231 *
 232 * FUNCTION:    acpi_ev_save_method_info
 233 *
 234 * PARAMETERS:  Callback from walk_namespace
 235 *
 236 * RETURN:      Status
 237 *
 238 * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
 239 *              control method under the _GPE portion of the namespace.
 240 *              Extract the name and GPE type from the object, saving this
 241 *              information for quick lookup during GPE dispatch
 242 *
 243 *              The name of each GPE control method is of the form:
 244 *              "_Lxx" or "_Exx"
 245 *              Where:
 246 *                  L      - means that the GPE is level triggered
 247 *                  E      - means that the GPE is edge triggered
 248 *                  xx     - is the GPE number [in HEX]
 249 *
 250 ******************************************************************************/
 251
 252static acpi_status
 253acpi_ev_save_method_info(acpi_handle obj_handle,
 254                         u32 level, void *obj_desc, void **return_value)
 255{
 256        struct acpi_gpe_block_info *gpe_block = (void *)obj_desc;
 257        struct acpi_gpe_event_info *gpe_event_info;
 258        u32 gpe_number;
 259        char name[ACPI_NAME_SIZE + 1];
 260        u8 type;
 261        acpi_status status;
 262
 263        ACPI_FUNCTION_TRACE(ev_save_method_info);
 264
 265        /*
 266         * _Lxx and _Exx GPE method support
 267         *
 268         * 1) Extract the name from the object and convert to a string
 269         */
 270        ACPI_MOVE_32_TO_32(name,
 271                           &((struct acpi_namespace_node *)obj_handle)->name.
 272                           integer);
 273        name[ACPI_NAME_SIZE] = 0;
 274
 275        /*
 276         * 2) Edge/Level determination is based on the 2nd character
 277         *    of the method name
 278         *
 279         * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
 280         * if a _PRW object is found that points to this GPE.
 281         */
 282        switch (name[1]) {
 283        case 'L':
 284                type = ACPI_GPE_LEVEL_TRIGGERED;
 285                break;
 286
 287        case 'E':
 288                type = ACPI_GPE_EDGE_TRIGGERED;
 289                break;
 290
 291        default:
 292                /* Unknown method type, just ignore it! */
 293
 294                ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
 295                                  "Ignoring unknown GPE method type: %s "
 296                                  "(name not of form _Lxx or _Exx)", name));
 297                return_ACPI_STATUS(AE_OK);
 298        }
 299
 300        /* Convert the last two characters of the name to the GPE Number */
 301
 302        gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
 303        if (gpe_number == ACPI_UINT32_MAX) {
 304
 305                /* Conversion failed; invalid method, just ignore it */
 306
 307                ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
 308                                  "Could not extract GPE number from name: %s "
 309                                  "(name is not of form _Lxx or _Exx)", name));
 310                return_ACPI_STATUS(AE_OK);
 311        }
 312
 313        /* Ensure that we have a valid GPE number for this GPE block */
 314
 315        if ((gpe_number < gpe_block->block_base_number) ||
 316            (gpe_number >= (gpe_block->block_base_number +
 317                            (gpe_block->register_count * 8)))) {
 318                /*
 319                 * Not valid for this GPE block, just ignore it. However, it may be
 320                 * valid for a different GPE block, since GPE0 and GPE1 methods both
 321                 * appear under \_GPE.
 322                 */
 323                return_ACPI_STATUS(AE_OK);
 324        }
 325
 326        /*
 327         * Now we can add this information to the gpe_event_info block for use
 328         * during dispatch of this GPE. Default type is RUNTIME, although this may
 329         * change when the _PRW methods are executed later.
 330         */
 331        gpe_event_info =
 332            &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
 333
 334        gpe_event_info->flags = (u8)
 335            (type | ACPI_GPE_DISPATCH_METHOD | ACPI_GPE_TYPE_RUNTIME);
 336
 337        gpe_event_info->dispatch.method_node =
 338            (struct acpi_namespace_node *)obj_handle;
 339
 340        /* Update enable mask, but don't enable the HW GPE as of yet */
 341
 342        status = acpi_ev_enable_gpe(gpe_event_info, FALSE);
 343
 344        ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
 345                          "Registered GPE method %s as GPE number 0x%.2X\n",
 346                          name, gpe_number));
 347        return_ACPI_STATUS(status);
 348}
 349
 350/*******************************************************************************
 351 *
 352 * FUNCTION:    acpi_ev_match_prw_and_gpe
 353 *
 354 * PARAMETERS:  Callback from walk_namespace
 355 *
 356 * RETURN:      Status. NOTE: We ignore errors so that the _PRW walk is
 357 *              not aborted on a single _PRW failure.
 358 *
 359 * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
 360 *              Device. Run the _PRW method. If present, extract the GPE
 361 *              number and mark the GPE as a WAKE GPE.
 362 *
 363 ******************************************************************************/
 364
 365static acpi_status
 366acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
 367                          u32 level, void *info, void **return_value)
 368{
 369        struct acpi_gpe_walk_info *gpe_info = (void *)info;
 370        struct acpi_namespace_node *gpe_device;
 371        struct acpi_gpe_block_info *gpe_block;
 372        struct acpi_namespace_node *target_gpe_device;
 373        struct acpi_gpe_event_info *gpe_event_info;
 374        union acpi_operand_object *pkg_desc;
 375        union acpi_operand_object *obj_desc;
 376        u32 gpe_number;
 377        acpi_status status;
 378
 379        ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
 380
 381        /* Check for a _PRW method under this device */
 382
 383        status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
 384                                         ACPI_BTYPE_PACKAGE, &pkg_desc);
 385        if (ACPI_FAILURE(status)) {
 386
 387                /* Ignore all errors from _PRW, we don't want to abort the subsystem */
 388
 389                return_ACPI_STATUS(AE_OK);
 390        }
 391
 392        /* The returned _PRW package must have at least two elements */
 393
 394        if (pkg_desc->package.count < 2) {
 395                goto cleanup;
 396        }
 397
 398        /* Extract pointers from the input context */
 399
 400        gpe_device = gpe_info->gpe_device;
 401        gpe_block = gpe_info->gpe_block;
 402
 403        /*
 404         * The _PRW object must return a package, we are only interested in the
 405         * first element
 406         */
 407        obj_desc = pkg_desc->package.elements[0];
 408
 409        if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
 410
 411                /* Use FADT-defined GPE device (from definition of _PRW) */
 412
 413                target_gpe_device = acpi_gbl_fadt_gpe_device;
 414
 415                /* Integer is the GPE number in the FADT described GPE blocks */
 416
 417                gpe_number = (u32) obj_desc->integer.value;
 418        } else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
 419
 420                /* Package contains a GPE reference and GPE number within a GPE block */
 421
 422                if ((obj_desc->package.count < 2) ||
 423                    ((obj_desc->package.elements[0])->common.type !=
 424                     ACPI_TYPE_LOCAL_REFERENCE) ||
 425                    ((obj_desc->package.elements[1])->common.type !=
 426                     ACPI_TYPE_INTEGER)) {
 427                        goto cleanup;
 428                }
 429
 430                /* Get GPE block reference and decode */
 431
 432                target_gpe_device =
 433                    obj_desc->package.elements[0]->reference.node;
 434                gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
 435        } else {
 436                /* Unknown type, just ignore it */
 437
 438                goto cleanup;
 439        }
 440
 441        /*
 442         * Is this GPE within this block?
 443         *
 444         * TRUE if and only if these conditions are true:
 445         *     1) The GPE devices match.
 446         *     2) The GPE index(number) is within the range of the Gpe Block
 447         *          associated with the GPE device.
 448         */
 449        if ((gpe_device == target_gpe_device) &&
 450            (gpe_number >= gpe_block->block_base_number) &&
 451            (gpe_number < gpe_block->block_base_number +
 452             (gpe_block->register_count * 8))) {
 453                gpe_event_info = &gpe_block->event_info[gpe_number -
 454                                                        gpe_block->
 455                                                        block_base_number];
 456
 457                /* Mark GPE for WAKE-ONLY but WAKE_DISABLED */
 458
 459                gpe_event_info->flags &=
 460                    ~(ACPI_GPE_WAKE_ENABLED | ACPI_GPE_RUN_ENABLED);
 461
 462                status =
 463                    acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
 464                if (ACPI_FAILURE(status)) {
 465                        goto cleanup;
 466                }
 467
 468                status =
 469                    acpi_ev_update_gpe_enable_masks(gpe_event_info,
 470                                                    ACPI_GPE_DISABLE);
 471        }
 472
 473      cleanup:
 474        acpi_ut_remove_reference(pkg_desc);
 475        return_ACPI_STATUS(AE_OK);
 476}
 477
 478/*******************************************************************************
 479 *
 480 * FUNCTION:    acpi_ev_get_gpe_xrupt_block
 481 *
 482 * PARAMETERS:  interrupt_number     - Interrupt for a GPE block
 483 *
 484 * RETURN:      A GPE interrupt block
 485 *
 486 * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
 487 *              block per unique interrupt level used for GPEs. Should be
 488 *              called only when the GPE lists are semaphore locked and not
 489 *              subject to change.
 490 *
 491 ******************************************************************************/
 492
 493static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
 494                                                               interrupt_number)
 495{
 496        struct acpi_gpe_xrupt_info *next_gpe_xrupt;
 497        struct acpi_gpe_xrupt_info *gpe_xrupt;
 498        acpi_status status;
 499        acpi_cpu_flags flags;
 500
 501        ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
 502
 503        /* No need for lock since we are not changing any list elements here */
 504
 505        next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
 506        while (next_gpe_xrupt) {
 507                if (next_gpe_xrupt->interrupt_number == interrupt_number) {
 508                        return_PTR(next_gpe_xrupt);
 509                }
 510
 511                next_gpe_xrupt = next_gpe_xrupt->next;
 512        }
 513
 514        /* Not found, must allocate a new xrupt descriptor */
 515
 516        gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
 517        if (!gpe_xrupt) {
 518                return_PTR(NULL);
 519        }
 520
 521        gpe_xrupt->interrupt_number = interrupt_number;
 522
 523        /* Install new interrupt descriptor with spin lock */
 524
 525        flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
 526        if (acpi_gbl_gpe_xrupt_list_head) {
 527                next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
 528                while (next_gpe_xrupt->next) {
 529                        next_gpe_xrupt = next_gpe_xrupt->next;
 530                }
 531
 532                next_gpe_xrupt->next = gpe_xrupt;
 533                gpe_xrupt->previous = next_gpe_xrupt;
 534        } else {
 535                acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
 536        }
 537        acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 538
 539        /* Install new interrupt handler if not SCI_INT */
 540
 541        if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
 542                status = acpi_os_install_interrupt_handler(interrupt_number,
 543                                                           acpi_ev_gpe_xrupt_handler,
 544                                                           gpe_xrupt);
 545                if (ACPI_FAILURE(status)) {
 546                        ACPI_ERROR((AE_INFO,
 547                                    "Could not install GPE interrupt handler at level 0x%X",
 548                                    interrupt_number));
 549                        return_PTR(NULL);
 550                }
 551        }
 552
 553        return_PTR(gpe_xrupt);
 554}
 555
 556/*******************************************************************************
 557 *
 558 * FUNCTION:    acpi_ev_delete_gpe_xrupt
 559 *
 560 * PARAMETERS:  gpe_xrupt       - A GPE interrupt info block
 561 *
 562 * RETURN:      Status
 563 *
 564 * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
 565 *              interrupt handler if not the SCI interrupt.
 566 *
 567 ******************************************************************************/
 568
 569static acpi_status
 570acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
 571{
 572        acpi_status status;
 573        acpi_cpu_flags flags;
 574
 575        ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
 576
 577        /* We never want to remove the SCI interrupt handler */
 578
 579        if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
 580                gpe_xrupt->gpe_block_list_head = NULL;
 581                return_ACPI_STATUS(AE_OK);
 582        }
 583
 584        /* Disable this interrupt */
 585
 586        status =
 587            acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
 588                                             acpi_ev_gpe_xrupt_handler);
 589        if (ACPI_FAILURE(status)) {
 590                return_ACPI_STATUS(status);
 591        }
 592
 593        /* Unlink the interrupt block with lock */
 594
 595        flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
 596        if (gpe_xrupt->previous) {
 597                gpe_xrupt->previous->next = gpe_xrupt->next;
 598        } else {
 599                /* No previous, update list head */
 600
 601                acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
 602        }
 603
 604        if (gpe_xrupt->next) {
 605                gpe_xrupt->next->previous = gpe_xrupt->previous;
 606        }
 607        acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 608
 609        /* Free the block */
 610
 611        ACPI_FREE(gpe_xrupt);
 612        return_ACPI_STATUS(AE_OK);
 613}
 614
 615/*******************************************************************************
 616 *
 617 * FUNCTION:    acpi_ev_install_gpe_block
 618 *
 619 * PARAMETERS:  gpe_block               - New GPE block
 620 *              interrupt_number        - Xrupt to be associated with this
 621 *                                        GPE block
 622 *
 623 * RETURN:      Status
 624 *
 625 * DESCRIPTION: Install new GPE block with mutex support
 626 *
 627 ******************************************************************************/
 628
 629static acpi_status
 630acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
 631                          u32 interrupt_number)
 632{
 633        struct acpi_gpe_block_info *next_gpe_block;
 634        struct acpi_gpe_xrupt_info *gpe_xrupt_block;
 635        acpi_status status;
 636        acpi_cpu_flags flags;
 637
 638        ACPI_FUNCTION_TRACE(ev_install_gpe_block);
 639
 640        status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
 641        if (ACPI_FAILURE(status)) {
 642                return_ACPI_STATUS(status);
 643        }
 644
 645        gpe_xrupt_block = acpi_ev_get_gpe_xrupt_block(interrupt_number);
 646        if (!gpe_xrupt_block) {
 647                status = AE_NO_MEMORY;
 648                goto unlock_and_exit;
 649        }
 650
 651        /* Install the new block at the end of the list with lock */
 652
 653        flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
 654        if (gpe_xrupt_block->gpe_block_list_head) {
 655                next_gpe_block = gpe_xrupt_block->gpe_block_list_head;
 656                while (next_gpe_block->next) {
 657                        next_gpe_block = next_gpe_block->next;
 658                }
 659
 660                next_gpe_block->next = gpe_block;
 661                gpe_block->previous = next_gpe_block;
 662        } else {
 663                gpe_xrupt_block->gpe_block_list_head = gpe_block;
 664        }
 665
 666        gpe_block->xrupt_block = gpe_xrupt_block;
 667        acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 668
 669      unlock_and_exit:
 670        status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
 671        return_ACPI_STATUS(status);
 672}
 673
 674/*******************************************************************************
 675 *
 676 * FUNCTION:    acpi_ev_delete_gpe_block
 677 *
 678 * PARAMETERS:  gpe_block           - Existing GPE block
 679 *
 680 * RETURN:      Status
 681 *
 682 * DESCRIPTION: Remove a GPE block
 683 *
 684 ******************************************************************************/
 685
 686acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
 687{
 688        acpi_status status;
 689        acpi_cpu_flags flags;
 690
 691        ACPI_FUNCTION_TRACE(ev_install_gpe_block);
 692
 693        status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
 694        if (ACPI_FAILURE(status)) {
 695                return_ACPI_STATUS(status);
 696        }
 697
 698        /* Disable all GPEs in this block */
 699
 700        status =
 701            acpi_hw_disable_gpe_block(gpe_block->xrupt_block, gpe_block, NULL);
 702
 703        if (!gpe_block->previous && !gpe_block->next) {
 704
 705                /* This is the last gpe_block on this interrupt */
 706
 707                status = acpi_ev_delete_gpe_xrupt(gpe_block->xrupt_block);
 708                if (ACPI_FAILURE(status)) {
 709                        goto unlock_and_exit;
 710                }
 711        } else {
 712                /* Remove the block on this interrupt with lock */
 713
 714                flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
 715                if (gpe_block->previous) {
 716                        gpe_block->previous->next = gpe_block->next;
 717                } else {
 718                        gpe_block->xrupt_block->gpe_block_list_head =
 719                            gpe_block->next;
 720                }
 721
 722                if (gpe_block->next) {
 723                        gpe_block->next->previous = gpe_block->previous;
 724                }
 725                acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 726        }
 727
 728        acpi_current_gpe_count -=
 729            gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH;
 730
 731        /* Free the gpe_block */
 732
 733        ACPI_FREE(gpe_block->register_info);
 734        ACPI_FREE(gpe_block->event_info);
 735        ACPI_FREE(gpe_block);
 736
 737      unlock_and_exit:
 738        status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
 739        return_ACPI_STATUS(status);
 740}
 741
 742/*******************************************************************************
 743 *
 744 * FUNCTION:    acpi_ev_create_gpe_info_blocks
 745 *
 746 * PARAMETERS:  gpe_block   - New GPE block
 747 *
 748 * RETURN:      Status
 749 *
 750 * DESCRIPTION: Create the register_info and event_info blocks for this GPE block
 751 *
 752 ******************************************************************************/
 753
 754static acpi_status
 755acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
 756{
 757        struct acpi_gpe_register_info *gpe_register_info = NULL;
 758        struct acpi_gpe_event_info *gpe_event_info = NULL;
 759        struct acpi_gpe_event_info *this_event;
 760        struct acpi_gpe_register_info *this_register;
 761        u32 i;
 762        u32 j;
 763        acpi_status status;
 764
 765        ACPI_FUNCTION_TRACE(ev_create_gpe_info_blocks);
 766
 767        /* Allocate the GPE register information block */
 768
 769        gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
 770                                                 register_count *
 771                                                 sizeof(struct
 772                                                        acpi_gpe_register_info));
 773        if (!gpe_register_info) {
 774                ACPI_ERROR((AE_INFO,
 775                            "Could not allocate the GpeRegisterInfo table"));
 776                return_ACPI_STATUS(AE_NO_MEMORY);
 777        }
 778
 779        /*
 780         * Allocate the GPE event_info block. There are eight distinct GPEs
 781         * per register. Initialization to zeros is sufficient.
 782         */
 783        gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block->
 784                                               register_count *
 785                                               ACPI_GPE_REGISTER_WIDTH) *
 786                                              sizeof(struct
 787                                                     acpi_gpe_event_info));
 788        if (!gpe_event_info) {
 789                ACPI_ERROR((AE_INFO,
 790                            "Could not allocate the GpeEventInfo table"));
 791                status = AE_NO_MEMORY;
 792                goto error_exit;
 793        }
 794
 795        /* Save the new Info arrays in the GPE block */
 796
 797        gpe_block->register_info = gpe_register_info;
 798        gpe_block->event_info = gpe_event_info;
 799
 800        /*
 801         * Initialize the GPE Register and Event structures. A goal of these
 802         * tables is to hide the fact that there are two separate GPE register
 803         * sets in a given GPE hardware block, the status registers occupy the
 804         * first half, and the enable registers occupy the second half.
 805         */
 806        this_register = gpe_register_info;
 807        this_event = gpe_event_info;
 808
 809        for (i = 0; i < gpe_block->register_count; i++) {
 810
 811                /* Init the register_info for this GPE register (8 GPEs) */
 812
 813                this_register->base_gpe_number =
 814                    (u8) (gpe_block->block_base_number +
 815                          (i * ACPI_GPE_REGISTER_WIDTH));
 816
 817                this_register->status_address.address =
 818                    gpe_block->block_address.address + i;
 819
 820                this_register->enable_address.address =
 821                    gpe_block->block_address.address + i +
 822                    gpe_block->register_count;
 823
 824                this_register->status_address.space_id =
 825                    gpe_block->block_address.space_id;
 826                this_register->enable_address.space_id =
 827                    gpe_block->block_address.space_id;
 828                this_register->status_address.bit_width =
 829                    ACPI_GPE_REGISTER_WIDTH;
 830                this_register->enable_address.bit_width =
 831                    ACPI_GPE_REGISTER_WIDTH;
 832                this_register->status_address.bit_offset = 0;
 833                this_register->enable_address.bit_offset = 0;
 834
 835                /* Init the event_info for each GPE within this register */
 836
 837                for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
 838                        this_event->gpe_number =
 839                            (u8) (this_register->base_gpe_number + j);
 840                        this_event->register_info = this_register;
 841                        this_event++;
 842                }
 843
 844                /* Disable all GPEs within this register */
 845
 846                status = acpi_hw_write(0x00, &this_register->enable_address);
 847                if (ACPI_FAILURE(status)) {
 848                        goto error_exit;
 849                }
 850
 851                /* Clear any pending GPE events within this register */
 852
 853                status = acpi_hw_write(0xFF, &this_register->status_address);
 854                if (ACPI_FAILURE(status)) {
 855                        goto error_exit;
 856                }
 857
 858                this_register++;
 859        }
 860
 861        return_ACPI_STATUS(AE_OK);
 862
 863      error_exit:
 864        if (gpe_register_info) {
 865                ACPI_FREE(gpe_register_info);
 866        }
 867        if (gpe_event_info) {
 868                ACPI_FREE(gpe_event_info);
 869        }
 870
 871        return_ACPI_STATUS(status);
 872}
 873
 874/*******************************************************************************
 875 *
 876 * FUNCTION:    acpi_ev_create_gpe_block
 877 *
 878 * PARAMETERS:  gpe_device          - Handle to the parent GPE block
 879 *              gpe_block_address   - Address and space_iD
 880 *              register_count      - Number of GPE register pairs in the block
 881 *              gpe_block_base_number - Starting GPE number for the block
 882 *              interrupt_number    - H/W interrupt for the block
 883 *              return_gpe_block    - Where the new block descriptor is returned
 884 *
 885 * RETURN:      Status
 886 *
 887 * DESCRIPTION: Create and Install a block of GPE registers. All GPEs within
 888 *              the block are disabled at exit.
 889 *              Note: Assumes namespace is locked.
 890 *
 891 ******************************************************************************/
 892
 893acpi_status
 894acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
 895                         struct acpi_generic_address *gpe_block_address,
 896                         u32 register_count,
 897                         u8 gpe_block_base_number,
 898                         u32 interrupt_number,
 899                         struct acpi_gpe_block_info **return_gpe_block)
 900{
 901        acpi_status status;
 902        struct acpi_gpe_block_info *gpe_block;
 903
 904        ACPI_FUNCTION_TRACE(ev_create_gpe_block);
 905
 906        if (!register_count) {
 907                return_ACPI_STATUS(AE_OK);
 908        }
 909
 910        /* Allocate a new GPE block */
 911
 912        gpe_block = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_block_info));
 913        if (!gpe_block) {
 914                return_ACPI_STATUS(AE_NO_MEMORY);
 915        }
 916
 917        /* Initialize the new GPE block */
 918
 919        gpe_block->node = gpe_device;
 920        gpe_block->register_count = register_count;
 921        gpe_block->block_base_number = gpe_block_base_number;
 922
 923        ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address,
 924                    sizeof(struct acpi_generic_address));
 925
 926        /*
 927         * Create the register_info and event_info sub-structures
 928         * Note: disables and clears all GPEs in the block
 929         */
 930        status = acpi_ev_create_gpe_info_blocks(gpe_block);
 931        if (ACPI_FAILURE(status)) {
 932                ACPI_FREE(gpe_block);
 933                return_ACPI_STATUS(status);
 934        }
 935
 936        /* Install the new block in the global lists */
 937
 938        status = acpi_ev_install_gpe_block(gpe_block, interrupt_number);
 939        if (ACPI_FAILURE(status)) {
 940                ACPI_FREE(gpe_block);
 941                return_ACPI_STATUS(status);
 942        }
 943
 944        /* Find all GPE methods (_Lxx, _Exx) for this block */
 945
 946        status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
 947                                        ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
 948                                        acpi_ev_save_method_info, gpe_block,
 949                                        NULL);
 950
 951        /* Return the new block */
 952
 953        if (return_gpe_block) {
 954                (*return_gpe_block) = gpe_block;
 955        }
 956
 957        ACPI_DEBUG_PRINT((ACPI_DB_INIT,
 958                          "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
 959                          (u32) gpe_block->block_base_number,
 960                          (u32) (gpe_block->block_base_number +
 961                                 ((gpe_block->register_count *
 962                                   ACPI_GPE_REGISTER_WIDTH) - 1)),
 963                          gpe_device->name.ascii, gpe_block->register_count,
 964                          interrupt_number));
 965
 966        /* Update global count of currently available GPEs */
 967
 968        acpi_current_gpe_count += register_count * ACPI_GPE_REGISTER_WIDTH;
 969        return_ACPI_STATUS(AE_OK);
 970}
 971
 972/*******************************************************************************
 973 *
 974 * FUNCTION:    acpi_ev_initialize_gpe_block
 975 *
 976 * PARAMETERS:  gpe_device          - Handle to the parent GPE block
 977 *              gpe_block           - Gpe Block info
 978 *
 979 * RETURN:      Status
 980 *
 981 * DESCRIPTION: Initialize and enable a GPE block. First find and run any
 982 *              _PRT methods associated with the block, then enable the
 983 *              appropriate GPEs.
 984 *              Note: Assumes namespace is locked.
 985 *
 986 ******************************************************************************/
 987
 988acpi_status
 989acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
 990                             struct acpi_gpe_block_info *gpe_block)
 991{
 992        acpi_status status;
 993        struct acpi_gpe_event_info *gpe_event_info;
 994        struct acpi_gpe_walk_info gpe_info;
 995        u32 wake_gpe_count;
 996        u32 gpe_enabled_count;
 997        u32 i;
 998        u32 j;
 999
1000        ACPI_FUNCTION_TRACE(ev_initialize_gpe_block);
1001
1002        /* Ignore a null GPE block (e.g., if no GPE block 1 exists) */
1003
1004        if (!gpe_block) {
1005                return_ACPI_STATUS(AE_OK);
1006        }
1007
1008        /*
1009         * Runtime option: Should wake GPEs be enabled at runtime?  The default
1010         * is no, they should only be enabled just as the machine goes to sleep.
1011         */
1012        if (acpi_gbl_leave_wake_gpes_disabled) {
1013                /*
1014                 * Differentiate runtime vs wake GPEs, via the _PRW control methods.
1015                 * Each GPE that has one or more _PRWs that reference it is by
1016                 * definition a wake GPE and will not be enabled while the machine
1017                 * is running.
1018                 */
1019                gpe_info.gpe_block = gpe_block;
1020                gpe_info.gpe_device = gpe_device;
1021
1022                status =
1023                    acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
1024                                           ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
1025                                           acpi_ev_match_prw_and_gpe, &gpe_info,
1026                                           NULL);
1027        }
1028
1029        /*
1030         * Enable all GPEs in this block that have these attributes:
1031         * 1) are "runtime" or "run/wake" GPEs, and
1032         * 2) have a corresponding _Lxx or _Exx method
1033         *
1034         * Any other GPEs within this block must be enabled via the
1035         * acpi_enable_gpe() external interface.
1036         */
1037        wake_gpe_count = 0;
1038        gpe_enabled_count = 0;
1039
1040        for (i = 0; i < gpe_block->register_count; i++) {
1041                for (j = 0; j < 8; j++) {
1042
1043                        /* Get the info block for this particular GPE */
1044
1045                        gpe_event_info = &gpe_block->event_info[((acpi_size) i *
1046                                                                 ACPI_GPE_REGISTER_WIDTH)
1047                                                                + j];
1048
1049                        if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
1050                             ACPI_GPE_DISPATCH_METHOD) &&
1051                            (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) {
1052                                gpe_enabled_count++;
1053                        }
1054
1055                        if (gpe_event_info->flags & ACPI_GPE_TYPE_WAKE) {
1056                                wake_gpe_count++;
1057                        }
1058                }
1059        }
1060
1061        ACPI_DEBUG_PRINT((ACPI_DB_INIT,
1062                          "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
1063                          wake_gpe_count, gpe_enabled_count));
1064
1065        /* Enable all valid runtime GPEs found above */
1066
1067        status = acpi_hw_enable_runtime_gpe_block(NULL, gpe_block, NULL);
1068        if (ACPI_FAILURE(status)) {
1069                ACPI_ERROR((AE_INFO, "Could not enable GPEs in GpeBlock %p",
1070                            gpe_block));
1071        }
1072
1073        return_ACPI_STATUS(status);
1074}
1075
1076/*******************************************************************************
1077 *
1078 * FUNCTION:    acpi_ev_gpe_initialize
1079 *
1080 * PARAMETERS:  None
1081 *
1082 * RETURN:      Status
1083 *
1084 * DESCRIPTION: Initialize the GPE data structures
1085 *
1086 ******************************************************************************/
1087
1088acpi_status acpi_ev_gpe_initialize(void)
1089{
1090        u32 register_count0 = 0;
1091        u32 register_count1 = 0;
1092        u32 gpe_number_max = 0;
1093        acpi_status status;
1094
1095        ACPI_FUNCTION_TRACE(ev_gpe_initialize);
1096
1097        status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
1098        if (ACPI_FAILURE(status)) {
1099                return_ACPI_STATUS(status);
1100        }
1101
1102        /*
1103         * Initialize the GPE Block(s) defined in the FADT
1104         *
1105         * Why the GPE register block lengths are divided by 2:  From the ACPI
1106         * Spec, section "General-Purpose Event Registers", we have:
1107         *
1108         * "Each register block contains two registers of equal length
1109         *  GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
1110         *  GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
1111         *  The length of the GPE1_STS and GPE1_EN registers is equal to
1112         *  half the GPE1_LEN. If a generic register block is not supported
1113         *  then its respective block pointer and block length values in the
1114         *  FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
1115         *  to be the same size."
1116         */
1117
1118        /*
1119         * Determine the maximum GPE number for this machine.
1120         *
1121         * Note: both GPE0 and GPE1 are optional, and either can exist without
1122         * the other.
1123         *
1124         * If EITHER the register length OR the block address are zero, then that
1125         * particular block is not supported.
1126         */
1127        if (acpi_gbl_FADT.gpe0_block_length &&
1128            acpi_gbl_FADT.xgpe0_block.address) {
1129
1130                /* GPE block 0 exists (has both length and address > 0) */
1131
1132                register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
1133
1134                gpe_number_max =
1135                    (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
1136
1137                /* Install GPE Block 0 */
1138
1139                status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1140                                                  &acpi_gbl_FADT.xgpe0_block,
1141                                                  register_count0, 0,
1142                                                  acpi_gbl_FADT.sci_interrupt,
1143                                                  &acpi_gbl_gpe_fadt_blocks[0]);
1144
1145                if (ACPI_FAILURE(status)) {
1146                        ACPI_EXCEPTION((AE_INFO, status,
1147                                        "Could not create GPE Block 0"));
1148                }
1149        }
1150
1151        if (acpi_gbl_FADT.gpe1_block_length &&
1152            acpi_gbl_FADT.xgpe1_block.address) {
1153
1154                /* GPE block 1 exists (has both length and address > 0) */
1155
1156                register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
1157
1158                /* Check for GPE0/GPE1 overlap (if both banks exist) */
1159
1160                if ((register_count0) &&
1161                    (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
1162                        ACPI_ERROR((AE_INFO,
1163                                    "GPE0 block (GPE 0 to %d) overlaps the GPE1 block "
1164                                    "(GPE %d to %d) - Ignoring GPE1",
1165                                    gpe_number_max, acpi_gbl_FADT.gpe1_base,
1166                                    acpi_gbl_FADT.gpe1_base +
1167                                    ((register_count1 *
1168                                      ACPI_GPE_REGISTER_WIDTH) - 1)));
1169
1170                        /* Ignore GPE1 block by setting the register count to zero */
1171
1172                        register_count1 = 0;
1173                } else {
1174                        /* Install GPE Block 1 */
1175
1176                        status =
1177                            acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1178                                                     &acpi_gbl_FADT.xgpe1_block,
1179                                                     register_count1,
1180                                                     acpi_gbl_FADT.gpe1_base,
1181                                                     acpi_gbl_FADT.
1182                                                     sci_interrupt,
1183                                                     &acpi_gbl_gpe_fadt_blocks
1184                                                     [1]);
1185
1186                        if (ACPI_FAILURE(status)) {
1187                                ACPI_EXCEPTION((AE_INFO, status,
1188                                                "Could not create GPE Block 1"));
1189                        }
1190
1191                        /*
1192                         * GPE0 and GPE1 do not have to be contiguous in the GPE number
1193                         * space. However, GPE0 always starts at GPE number zero.
1194                         */
1195                        gpe_number_max = acpi_gbl_FADT.gpe1_base +
1196                            ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
1197                }
1198        }
1199
1200        /* Exit if there are no GPE registers */
1201
1202        if ((register_count0 + register_count1) == 0) {
1203
1204                /* GPEs are not required by ACPI, this is OK */
1205
1206                ACPI_DEBUG_PRINT((ACPI_DB_INIT,
1207                                  "There are no GPE blocks defined in the FADT\n"));
1208                status = AE_OK;
1209                goto cleanup;
1210        }
1211
1212        /* Check for Max GPE number out-of-range */
1213
1214        if (gpe_number_max > ACPI_GPE_MAX) {
1215                ACPI_ERROR((AE_INFO,
1216                            "Maximum GPE number from FADT is too large: 0x%X",
1217                            gpe_number_max));
1218                status = AE_BAD_VALUE;
1219                goto cleanup;
1220        }
1221
1222      cleanup:
1223        (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
1224        return_ACPI_STATUS(AE_OK);
1225}
1226