linux/drivers/acpi/acpi_ipmi.c
<<
>>
Prefs
   1/*
   2 *  acpi_ipmi.c - ACPI IPMI opregion
   3 *
   4 *  Copyright (C) 2010, 2013 Intel Corporation
   5 *    Author: Zhao Yakui <yakui.zhao@intel.com>
   6 *            Lv Zheng <lv.zheng@intel.com>
   7 *
   8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   9 *
  10 *  This program is free software; you can redistribute it and/or modify
  11 *  it under the terms of the GNU General Public License as published by
  12 *  the Free Software Foundation; either version 2 of the License, or (at
  13 *  your option) any later version.
  14 *
  15 *  This program is distributed in the hope that it will be useful, but
  16 *  WITHOUT ANY WARRANTY; without even the implied warranty of
  17 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 *  General Public License for more details.
  19 *
  20 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  21 */
  22
  23#include <linux/module.h>
  24#include <linux/acpi.h>
  25#include <linux/ipmi.h>
  26#include <linux/spinlock.h>
  27
  28MODULE_AUTHOR("Zhao Yakui");
  29MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
  30MODULE_LICENSE("GPL");
  31
  32#define ACPI_IPMI_OK                    0
  33#define ACPI_IPMI_TIMEOUT               0x10
  34#define ACPI_IPMI_UNKNOWN               0x07
  35/* the IPMI timeout is 5s */
  36#define IPMI_TIMEOUT                    (5000)
  37#define ACPI_IPMI_MAX_MSG_LENGTH        64
  38
  39struct acpi_ipmi_device {
  40        /* the device list attached to driver_data.ipmi_devices */
  41        struct list_head head;
  42
  43        /* the IPMI request message list */
  44        struct list_head tx_msg_list;
  45
  46        spinlock_t tx_msg_lock;
  47        acpi_handle handle;
  48        struct device *dev;
  49        ipmi_user_t user_interface;
  50        int ipmi_ifnum; /* IPMI interface number */
  51        long curr_msgid;
  52        bool dead;
  53        struct kref kref;
  54};
  55
  56struct ipmi_driver_data {
  57        struct list_head ipmi_devices;
  58        struct ipmi_smi_watcher bmc_events;
  59        const struct ipmi_user_hndl ipmi_hndlrs;
  60        struct mutex ipmi_lock;
  61
  62        /*
  63         * NOTE: IPMI System Interface Selection
  64         * There is no system interface specified by the IPMI operation
  65         * region access.  We try to select one system interface with ACPI
  66         * handle set.  IPMI messages passed from the ACPI codes are sent
  67         * to this selected global IPMI system interface.
  68         */
  69        struct acpi_ipmi_device *selected_smi;
  70};
  71
  72struct acpi_ipmi_msg {
  73        struct list_head head;
  74
  75        /*
  76         * General speaking the addr type should be SI_ADDR_TYPE. And
  77         * the addr channel should be BMC.
  78         * In fact it can also be IPMB type. But we will have to
  79         * parse it from the Netfn command buffer. It is so complex
  80         * that it is skipped.
  81         */
  82        struct ipmi_addr addr;
  83        long tx_msgid;
  84
  85        /* it is used to track whether the IPMI message is finished */
  86        struct completion tx_complete;
  87
  88        struct kernel_ipmi_msg tx_message;
  89        int msg_done;
  90
  91        /* tx/rx data . And copy it from/to ACPI object buffer */
  92        u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
  93        u8 rx_len;
  94
  95        struct acpi_ipmi_device *device;
  96        struct kref kref;
  97};
  98
  99/* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
 100struct acpi_ipmi_buffer {
 101        u8 status;
 102        u8 length;
 103        u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
 104};
 105
 106static void ipmi_register_bmc(int iface, struct device *dev);
 107static void ipmi_bmc_gone(int iface);
 108static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
 109
 110static struct ipmi_driver_data driver_data = {
 111        .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
 112        .bmc_events = {
 113                .owner = THIS_MODULE,
 114                .new_smi = ipmi_register_bmc,
 115                .smi_gone = ipmi_bmc_gone,
 116        },
 117        .ipmi_hndlrs = {
 118                .ipmi_recv_hndl = ipmi_msg_handler,
 119        },
 120        .ipmi_lock = __MUTEX_INITIALIZER(driver_data.ipmi_lock)
 121};
 122
 123static struct acpi_ipmi_device *
 124ipmi_dev_alloc(int iface, struct device *dev, acpi_handle handle)
 125{
 126        struct acpi_ipmi_device *ipmi_device;
 127        int err;
 128        ipmi_user_t user;
 129
 130        ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
 131        if (!ipmi_device)
 132                return NULL;
 133
 134        kref_init(&ipmi_device->kref);
 135        INIT_LIST_HEAD(&ipmi_device->head);
 136        INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
 137        spin_lock_init(&ipmi_device->tx_msg_lock);
 138        ipmi_device->handle = handle;
 139        ipmi_device->dev = get_device(dev);
 140        ipmi_device->ipmi_ifnum = iface;
 141
 142        err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
 143                               ipmi_device, &user);
 144        if (err) {
 145                put_device(dev);
 146                kfree(ipmi_device);
 147                return NULL;
 148        }
 149        ipmi_device->user_interface = user;
 150
 151        return ipmi_device;
 152}
 153
 154static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device)
 155{
 156        ipmi_destroy_user(ipmi_device->user_interface);
 157        put_device(ipmi_device->dev);
 158        kfree(ipmi_device);
 159}
 160
 161static void ipmi_dev_release_kref(struct kref *kref)
 162{
 163        struct acpi_ipmi_device *ipmi =
 164                container_of(kref, struct acpi_ipmi_device, kref);
 165
 166        ipmi_dev_release(ipmi);
 167}
 168
 169static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device)
 170{
 171        list_del(&ipmi_device->head);
 172        if (driver_data.selected_smi == ipmi_device)
 173                driver_data.selected_smi = NULL;
 174
 175        /*
 176         * Always setting dead flag after deleting from the list or
 177         * list_for_each_entry() codes must get changed.
 178         */
 179        ipmi_device->dead = true;
 180}
 181
 182static struct acpi_ipmi_device *acpi_ipmi_dev_get(void)
 183{
 184        struct acpi_ipmi_device *ipmi_device = NULL;
 185
 186        mutex_lock(&driver_data.ipmi_lock);
 187        if (driver_data.selected_smi) {
 188                ipmi_device = driver_data.selected_smi;
 189                kref_get(&ipmi_device->kref);
 190        }
 191        mutex_unlock(&driver_data.ipmi_lock);
 192
 193        return ipmi_device;
 194}
 195
 196static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device)
 197{
 198        kref_put(&ipmi_device->kref, ipmi_dev_release_kref);
 199}
 200
 201static struct acpi_ipmi_msg *ipmi_msg_alloc(void)
 202{
 203        struct acpi_ipmi_device *ipmi;
 204        struct acpi_ipmi_msg *ipmi_msg;
 205
 206        ipmi = acpi_ipmi_dev_get();
 207        if (!ipmi)
 208                return NULL;
 209
 210        ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
 211        if (!ipmi_msg) {
 212                acpi_ipmi_dev_put(ipmi);
 213                return NULL;
 214        }
 215
 216        kref_init(&ipmi_msg->kref);
 217        init_completion(&ipmi_msg->tx_complete);
 218        INIT_LIST_HEAD(&ipmi_msg->head);
 219        ipmi_msg->device = ipmi;
 220        ipmi_msg->msg_done = ACPI_IPMI_UNKNOWN;
 221
 222        return ipmi_msg;
 223}
 224
 225static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg)
 226{
 227        acpi_ipmi_dev_put(tx_msg->device);
 228        kfree(tx_msg);
 229}
 230
 231static void ipmi_msg_release_kref(struct kref *kref)
 232{
 233        struct acpi_ipmi_msg *tx_msg =
 234                container_of(kref, struct acpi_ipmi_msg, kref);
 235
 236        ipmi_msg_release(tx_msg);
 237}
 238
 239static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg)
 240{
 241        kref_get(&tx_msg->kref);
 242
 243        return tx_msg;
 244}
 245
 246static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg)
 247{
 248        kref_put(&tx_msg->kref, ipmi_msg_release_kref);
 249}
 250
 251#define IPMI_OP_RGN_NETFN(offset)       ((offset >> 8) & 0xff)
 252#define IPMI_OP_RGN_CMD(offset)         (offset & 0xff)
 253static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg,
 254                                    acpi_physical_address address,
 255                                    acpi_integer *value)
 256{
 257        struct kernel_ipmi_msg *msg;
 258        struct acpi_ipmi_buffer *buffer;
 259        struct acpi_ipmi_device *device;
 260        unsigned long flags;
 261
 262        msg = &tx_msg->tx_message;
 263
 264        /*
 265         * IPMI network function and command are encoded in the address
 266         * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
 267         */
 268        msg->netfn = IPMI_OP_RGN_NETFN(address);
 269        msg->cmd = IPMI_OP_RGN_CMD(address);
 270        msg->data = tx_msg->data;
 271
 272        /*
 273         * value is the parameter passed by the IPMI opregion space handler.
 274         * It points to the IPMI request message buffer
 275         */
 276        buffer = (struct acpi_ipmi_buffer *)value;
 277
 278        /* copy the tx message data */
 279        if (buffer->length > ACPI_IPMI_MAX_MSG_LENGTH) {
 280                dev_WARN_ONCE(tx_msg->device->dev, true,
 281                              "Unexpected request (msg len %d).\n",
 282                              buffer->length);
 283                return -EINVAL;
 284        }
 285        msg->data_len = buffer->length;
 286        memcpy(tx_msg->data, buffer->data, msg->data_len);
 287
 288        /*
 289         * now the default type is SYSTEM_INTERFACE and channel type is BMC.
 290         * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
 291         * the addr type should be changed to IPMB. Then we will have to parse
 292         * the IPMI request message buffer to get the IPMB address.
 293         * If so, please fix me.
 294         */
 295        tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
 296        tx_msg->addr.channel = IPMI_BMC_CHANNEL;
 297        tx_msg->addr.data[0] = 0;
 298
 299        /* Get the msgid */
 300        device = tx_msg->device;
 301
 302        spin_lock_irqsave(&device->tx_msg_lock, flags);
 303        device->curr_msgid++;
 304        tx_msg->tx_msgid = device->curr_msgid;
 305        spin_unlock_irqrestore(&device->tx_msg_lock, flags);
 306
 307        return 0;
 308}
 309
 310static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
 311                                      acpi_integer *value)
 312{
 313        struct acpi_ipmi_buffer *buffer;
 314
 315        /*
 316         * value is also used as output parameter. It represents the response
 317         * IPMI message returned by IPMI command.
 318         */
 319        buffer = (struct acpi_ipmi_buffer *)value;
 320
 321        /*
 322         * If the flag of msg_done is not set, it means that the IPMI command is
 323         * not executed correctly.
 324         */
 325        buffer->status = msg->msg_done;
 326        if (msg->msg_done != ACPI_IPMI_OK)
 327                return;
 328
 329        /*
 330         * If the IPMI response message is obtained correctly, the status code
 331         * will be ACPI_IPMI_OK
 332         */
 333        buffer->length = msg->rx_len;
 334        memcpy(buffer->data, msg->data, msg->rx_len);
 335}
 336
 337static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
 338{
 339        struct acpi_ipmi_msg *tx_msg;
 340        unsigned long flags;
 341
 342        /*
 343         * NOTE: On-going ipmi_recv_msg
 344         * ipmi_msg_handler() may still be invoked by ipmi_si after
 345         * flushing.  But it is safe to do a fast flushing on module_exit()
 346         * without waiting for all ipmi_recv_msg(s) to complete from
 347         * ipmi_msg_handler() as it is ensured by ipmi_si that all
 348         * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user().
 349         */
 350        spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
 351        while (!list_empty(&ipmi->tx_msg_list)) {
 352                tx_msg = list_first_entry(&ipmi->tx_msg_list,
 353                                          struct acpi_ipmi_msg,
 354                                          head);
 355                list_del(&tx_msg->head);
 356                spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
 357
 358                /* wake up the sleep thread on the Tx msg */
 359                complete(&tx_msg->tx_complete);
 360                acpi_ipmi_msg_put(tx_msg);
 361                spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
 362        }
 363        spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
 364}
 365
 366static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
 367                               struct acpi_ipmi_msg *msg)
 368{
 369        struct acpi_ipmi_msg *tx_msg, *temp;
 370        bool msg_found = false;
 371        unsigned long flags;
 372
 373        spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
 374        list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
 375                if (msg == tx_msg) {
 376                        msg_found = true;
 377                        list_del(&tx_msg->head);
 378                        break;
 379                }
 380        }
 381        spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
 382
 383        if (msg_found)
 384                acpi_ipmi_msg_put(tx_msg);
 385}
 386
 387static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
 388{
 389        struct acpi_ipmi_device *ipmi_device = user_msg_data;
 390        bool msg_found = false;
 391        struct acpi_ipmi_msg *tx_msg, *temp;
 392        struct device *dev = ipmi_device->dev;
 393        unsigned long flags;
 394
 395        if (msg->user != ipmi_device->user_interface) {
 396                dev_warn(dev,
 397                         "Unexpected response is returned. returned user %p, expected user %p\n",
 398                         msg->user, ipmi_device->user_interface);
 399                goto out_msg;
 400        }
 401
 402        spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
 403        list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) {
 404                if (msg->msgid == tx_msg->tx_msgid) {
 405                        msg_found = true;
 406                        list_del(&tx_msg->head);
 407                        break;
 408                }
 409        }
 410        spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
 411
 412        if (!msg_found) {
 413                dev_warn(dev,
 414                         "Unexpected response (msg id %ld) is returned.\n",
 415                         msg->msgid);
 416                goto out_msg;
 417        }
 418
 419        /* copy the response data to Rx_data buffer */
 420        if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) {
 421                dev_WARN_ONCE(dev, true,
 422                              "Unexpected response (msg len %d).\n",
 423                              msg->msg.data_len);
 424                goto out_comp;
 425        }
 426
 427        /* response msg is an error msg */
 428        msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
 429        if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE &&
 430            msg->msg.data_len == 1) {
 431                if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) {
 432                        dev_dbg_once(dev, "Unexpected response (timeout).\n");
 433                        tx_msg->msg_done = ACPI_IPMI_TIMEOUT;
 434                }
 435                goto out_comp;
 436        }
 437
 438        tx_msg->rx_len = msg->msg.data_len;
 439        memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len);
 440        tx_msg->msg_done = ACPI_IPMI_OK;
 441
 442out_comp:
 443        complete(&tx_msg->tx_complete);
 444        acpi_ipmi_msg_put(tx_msg);
 445out_msg:
 446        ipmi_free_recv_msg(msg);
 447}
 448
 449static void ipmi_register_bmc(int iface, struct device *dev)
 450{
 451        struct acpi_ipmi_device *ipmi_device, *temp;
 452        int err;
 453        struct ipmi_smi_info smi_data;
 454        acpi_handle handle;
 455
 456        err = ipmi_get_smi_info(iface, &smi_data);
 457        if (err)
 458                return;
 459
 460        if (smi_data.addr_src != SI_ACPI)
 461                goto err_ref;
 462        handle = smi_data.addr_info.acpi_info.acpi_handle;
 463        if (!handle)
 464                goto err_ref;
 465
 466        ipmi_device = ipmi_dev_alloc(iface, smi_data.dev, handle);
 467        if (!ipmi_device) {
 468                dev_warn(smi_data.dev, "Can't create IPMI user interface\n");
 469                goto err_ref;
 470        }
 471
 472        mutex_lock(&driver_data.ipmi_lock);
 473        list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
 474                /*
 475                 * if the corresponding ACPI handle is already added
 476                 * to the device list, don't add it again.
 477                 */
 478                if (temp->handle == handle)
 479                        goto err_lock;
 480        }
 481        if (!driver_data.selected_smi)
 482                driver_data.selected_smi = ipmi_device;
 483        list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
 484        mutex_unlock(&driver_data.ipmi_lock);
 485
 486        put_device(smi_data.dev);
 487        return;
 488
 489err_lock:
 490        mutex_unlock(&driver_data.ipmi_lock);
 491        ipmi_dev_release(ipmi_device);
 492err_ref:
 493        put_device(smi_data.dev);
 494        return;
 495}
 496
 497static void ipmi_bmc_gone(int iface)
 498{
 499        struct acpi_ipmi_device *ipmi_device, *temp;
 500        bool dev_found = false;
 501
 502        mutex_lock(&driver_data.ipmi_lock);
 503        list_for_each_entry_safe(ipmi_device, temp,
 504                                 &driver_data.ipmi_devices, head) {
 505                if (ipmi_device->ipmi_ifnum != iface) {
 506                        dev_found = true;
 507                        __ipmi_dev_kill(ipmi_device);
 508                        break;
 509                }
 510        }
 511        if (!driver_data.selected_smi)
 512                driver_data.selected_smi = list_first_entry_or_null(
 513                                        &driver_data.ipmi_devices,
 514                                        struct acpi_ipmi_device, head);
 515        mutex_unlock(&driver_data.ipmi_lock);
 516
 517        if (dev_found) {
 518                ipmi_flush_tx_msg(ipmi_device);
 519                acpi_ipmi_dev_put(ipmi_device);
 520        }
 521}
 522
 523/*
 524 * This is the IPMI opregion space handler.
 525 * @function: indicates the read/write. In fact as the IPMI message is driven
 526 * by command, only write is meaningful.
 527 * @address: This contains the netfn/command of IPMI request message.
 528 * @bits   : not used.
 529 * @value  : it is an in/out parameter. It points to the IPMI message buffer.
 530 *           Before the IPMI message is sent, it represents the actual request
 531 *           IPMI message. After the IPMI message is finished, it represents
 532 *           the response IPMI message returned by IPMI command.
 533 * @handler_context: IPMI device context.
 534 */
 535static acpi_status
 536acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
 537                        u32 bits, acpi_integer *value,
 538                        void *handler_context, void *region_context)
 539{
 540        struct acpi_ipmi_msg *tx_msg;
 541        struct acpi_ipmi_device *ipmi_device;
 542        int err;
 543        acpi_status status;
 544        unsigned long flags;
 545
 546        /*
 547         * IPMI opregion message.
 548         * IPMI message is firstly written to the BMC and system software
 549         * can get the respsonse. So it is unmeaningful for the read access
 550         * of IPMI opregion.
 551         */
 552        if ((function & ACPI_IO_MASK) == ACPI_READ)
 553                return AE_TYPE;
 554
 555        tx_msg = ipmi_msg_alloc();
 556        if (!tx_msg)
 557                return AE_NOT_EXIST;
 558        ipmi_device = tx_msg->device;
 559
 560        if (acpi_format_ipmi_request(tx_msg, address, value) != 0) {
 561                ipmi_msg_release(tx_msg);
 562                return AE_TYPE;
 563        }
 564
 565        acpi_ipmi_msg_get(tx_msg);
 566        mutex_lock(&driver_data.ipmi_lock);
 567        /* Do not add a tx_msg that can not be flushed. */
 568        if (ipmi_device->dead) {
 569                mutex_unlock(&driver_data.ipmi_lock);
 570                ipmi_msg_release(tx_msg);
 571                return AE_NOT_EXIST;
 572        }
 573        spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
 574        list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
 575        spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
 576        mutex_unlock(&driver_data.ipmi_lock);
 577
 578        err = ipmi_request_settime(ipmi_device->user_interface,
 579                                   &tx_msg->addr,
 580                                   tx_msg->tx_msgid,
 581                                   &tx_msg->tx_message,
 582                                   NULL, 0, 0, IPMI_TIMEOUT);
 583        if (err) {
 584                status = AE_ERROR;
 585                goto out_msg;
 586        }
 587        wait_for_completion(&tx_msg->tx_complete);
 588
 589        acpi_format_ipmi_response(tx_msg, value);
 590        status = AE_OK;
 591
 592out_msg:
 593        ipmi_cancel_tx_msg(ipmi_device, tx_msg);
 594        acpi_ipmi_msg_put(tx_msg);
 595        return status;
 596}
 597
 598static int __init acpi_ipmi_init(void)
 599{
 600        int result;
 601        acpi_status status;
 602
 603        if (acpi_disabled)
 604                return 0;
 605
 606        status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
 607                                                    ACPI_ADR_SPACE_IPMI,
 608                                                    &acpi_ipmi_space_handler,
 609                                                    NULL, NULL);
 610        if (ACPI_FAILURE(status)) {
 611                pr_warn("Can't register IPMI opregion space handle\n");
 612                return -EINVAL;
 613        }
 614        result = ipmi_smi_watcher_register(&driver_data.bmc_events);
 615        if (result)
 616                pr_err("Can't register IPMI system interface watcher\n");
 617
 618        return result;
 619}
 620
 621static void __exit acpi_ipmi_exit(void)
 622{
 623        struct acpi_ipmi_device *ipmi_device;
 624
 625        if (acpi_disabled)
 626                return;
 627
 628        ipmi_smi_watcher_unregister(&driver_data.bmc_events);
 629
 630        /*
 631         * When one smi_watcher is unregistered, it is only deleted
 632         * from the smi_watcher list. But the smi_gone callback function
 633         * is not called. So explicitly uninstall the ACPI IPMI oregion
 634         * handler and free it.
 635         */
 636        mutex_lock(&driver_data.ipmi_lock);
 637        while (!list_empty(&driver_data.ipmi_devices)) {
 638                ipmi_device = list_first_entry(&driver_data.ipmi_devices,
 639                                               struct acpi_ipmi_device,
 640                                               head);
 641                __ipmi_dev_kill(ipmi_device);
 642                mutex_unlock(&driver_data.ipmi_lock);
 643
 644                ipmi_flush_tx_msg(ipmi_device);
 645                acpi_ipmi_dev_put(ipmi_device);
 646
 647                mutex_lock(&driver_data.ipmi_lock);
 648        }
 649        mutex_unlock(&driver_data.ipmi_lock);
 650        acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
 651                                          ACPI_ADR_SPACE_IPMI,
 652                                          &acpi_ipmi_space_handler);
 653}
 654
 655module_init(acpi_ipmi_init);
 656module_exit(acpi_ipmi_exit);
 657