linux/drivers/acpi/acpi_ipmi.c
<<
>>
Prefs
   1/*
   2 *  acpi_ipmi.c - ACPI IPMI opregion
   3 *
   4 *  Copyright (C) 2010, 2013 Intel Corporation
   5 *    Author: Zhao Yakui <yakui.zhao@intel.com>
   6 *            Lv Zheng <lv.zheng@intel.com>
   7 *
   8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   9 *
  10 *  This program is free software; you can redistribute it and/or modify
  11 *  it under the terms of the GNU General Public License as published by
  12 *  the Free Software Foundation; either version 2 of the License, or (at
  13 *  your option) any later version.
  14 *
  15 *  This program is distributed in the hope that it will be useful, but
  16 *  WITHOUT ANY WARRANTY; without even the implied warranty of
  17 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 *  General Public License for more details.
  19 *
  20 *  You should have received a copy of the GNU General Public License along
  21 *  with this program; if not, write to the Free Software Foundation, Inc.,
  22 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  23 *
  24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  25 */
  26
  27#include <linux/module.h>
  28#include <linux/acpi.h>
  29#include <linux/ipmi.h>
  30#include <linux/spinlock.h>
  31
  32MODULE_AUTHOR("Zhao Yakui");
  33MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
  34MODULE_LICENSE("GPL");
  35
  36#define ACPI_IPMI_OK                    0
  37#define ACPI_IPMI_TIMEOUT               0x10
  38#define ACPI_IPMI_UNKNOWN               0x07
  39/* the IPMI timeout is 5s */
  40#define IPMI_TIMEOUT                    (5000)
  41#define ACPI_IPMI_MAX_MSG_LENGTH        64
  42
  43struct acpi_ipmi_device {
  44        /* the device list attached to driver_data.ipmi_devices */
  45        struct list_head head;
  46
  47        /* the IPMI request message list */
  48        struct list_head tx_msg_list;
  49
  50        spinlock_t tx_msg_lock;
  51        acpi_handle handle;
  52        struct device *dev;
  53        ipmi_user_t user_interface;
  54        int ipmi_ifnum; /* IPMI interface number */
  55        long curr_msgid;
  56        bool dead;
  57        struct kref kref;
  58};
  59
  60struct ipmi_driver_data {
  61        struct list_head ipmi_devices;
  62        struct ipmi_smi_watcher bmc_events;
  63        struct ipmi_user_hndl ipmi_hndlrs;
  64        struct mutex ipmi_lock;
  65
  66        /*
  67         * NOTE: IPMI System Interface Selection
  68         * There is no system interface specified by the IPMI operation
  69         * region access.  We try to select one system interface with ACPI
  70         * handle set.  IPMI messages passed from the ACPI codes are sent
  71         * to this selected global IPMI system interface.
  72         */
  73        struct acpi_ipmi_device *selected_smi;
  74};
  75
  76struct acpi_ipmi_msg {
  77        struct list_head head;
  78
  79        /*
  80         * General speaking the addr type should be SI_ADDR_TYPE. And
  81         * the addr channel should be BMC.
  82         * In fact it can also be IPMB type. But we will have to
  83         * parse it from the Netfn command buffer. It is so complex
  84         * that it is skipped.
  85         */
  86        struct ipmi_addr addr;
  87        long tx_msgid;
  88
  89        /* it is used to track whether the IPMI message is finished */
  90        struct completion tx_complete;
  91
  92        struct kernel_ipmi_msg tx_message;
  93        int msg_done;
  94
  95        /* tx/rx data . And copy it from/to ACPI object buffer */
  96        u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
  97        u8 rx_len;
  98
  99        struct acpi_ipmi_device *device;
 100        struct kref kref;
 101};
 102
 103/* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
 104struct acpi_ipmi_buffer {
 105        u8 status;
 106        u8 length;
 107        u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
 108};
 109
 110static void ipmi_register_bmc(int iface, struct device *dev);
 111static void ipmi_bmc_gone(int iface);
 112static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
 113
 114static struct ipmi_driver_data driver_data = {
 115        .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
 116        .bmc_events = {
 117                .owner = THIS_MODULE,
 118                .new_smi = ipmi_register_bmc,
 119                .smi_gone = ipmi_bmc_gone,
 120        },
 121        .ipmi_hndlrs = {
 122                .ipmi_recv_hndl = ipmi_msg_handler,
 123        },
 124        .ipmi_lock = __MUTEX_INITIALIZER(driver_data.ipmi_lock)
 125};
 126
 127static struct acpi_ipmi_device *
 128ipmi_dev_alloc(int iface, struct device *dev, acpi_handle handle)
 129{
 130        struct acpi_ipmi_device *ipmi_device;
 131        int err;
 132        ipmi_user_t user;
 133
 134        ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
 135        if (!ipmi_device)
 136                return NULL;
 137
 138        kref_init(&ipmi_device->kref);
 139        INIT_LIST_HEAD(&ipmi_device->head);
 140        INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
 141        spin_lock_init(&ipmi_device->tx_msg_lock);
 142        ipmi_device->handle = handle;
 143        ipmi_device->dev = get_device(dev);
 144        ipmi_device->ipmi_ifnum = iface;
 145
 146        err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
 147                               ipmi_device, &user);
 148        if (err) {
 149                put_device(dev);
 150                kfree(ipmi_device);
 151                return NULL;
 152        }
 153        ipmi_device->user_interface = user;
 154
 155        return ipmi_device;
 156}
 157
 158static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device)
 159{
 160        ipmi_destroy_user(ipmi_device->user_interface);
 161        put_device(ipmi_device->dev);
 162        kfree(ipmi_device);
 163}
 164
 165static void ipmi_dev_release_kref(struct kref *kref)
 166{
 167        struct acpi_ipmi_device *ipmi =
 168                container_of(kref, struct acpi_ipmi_device, kref);
 169
 170        ipmi_dev_release(ipmi);
 171}
 172
 173static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device)
 174{
 175        list_del(&ipmi_device->head);
 176        if (driver_data.selected_smi == ipmi_device)
 177                driver_data.selected_smi = NULL;
 178
 179        /*
 180         * Always setting dead flag after deleting from the list or
 181         * list_for_each_entry() codes must get changed.
 182         */
 183        ipmi_device->dead = true;
 184}
 185
 186static struct acpi_ipmi_device *acpi_ipmi_dev_get(void)
 187{
 188        struct acpi_ipmi_device *ipmi_device = NULL;
 189
 190        mutex_lock(&driver_data.ipmi_lock);
 191        if (driver_data.selected_smi) {
 192                ipmi_device = driver_data.selected_smi;
 193                kref_get(&ipmi_device->kref);
 194        }
 195        mutex_unlock(&driver_data.ipmi_lock);
 196
 197        return ipmi_device;
 198}
 199
 200static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device)
 201{
 202        kref_put(&ipmi_device->kref, ipmi_dev_release_kref);
 203}
 204
 205static struct acpi_ipmi_msg *ipmi_msg_alloc(void)
 206{
 207        struct acpi_ipmi_device *ipmi;
 208        struct acpi_ipmi_msg *ipmi_msg;
 209
 210        ipmi = acpi_ipmi_dev_get();
 211        if (!ipmi)
 212                return NULL;
 213
 214        ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
 215        if (!ipmi_msg) {
 216                acpi_ipmi_dev_put(ipmi);
 217                return NULL;
 218        }
 219
 220        kref_init(&ipmi_msg->kref);
 221        init_completion(&ipmi_msg->tx_complete);
 222        INIT_LIST_HEAD(&ipmi_msg->head);
 223        ipmi_msg->device = ipmi;
 224        ipmi_msg->msg_done = ACPI_IPMI_UNKNOWN;
 225
 226        return ipmi_msg;
 227}
 228
 229static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg)
 230{
 231        acpi_ipmi_dev_put(tx_msg->device);
 232        kfree(tx_msg);
 233}
 234
 235static void ipmi_msg_release_kref(struct kref *kref)
 236{
 237        struct acpi_ipmi_msg *tx_msg =
 238                container_of(kref, struct acpi_ipmi_msg, kref);
 239
 240        ipmi_msg_release(tx_msg);
 241}
 242
 243static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg)
 244{
 245        kref_get(&tx_msg->kref);
 246
 247        return tx_msg;
 248}
 249
 250static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg)
 251{
 252        kref_put(&tx_msg->kref, ipmi_msg_release_kref);
 253}
 254
 255#define IPMI_OP_RGN_NETFN(offset)       ((offset >> 8) & 0xff)
 256#define IPMI_OP_RGN_CMD(offset)         (offset & 0xff)
 257static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg,
 258                                    acpi_physical_address address,
 259                                    acpi_integer *value)
 260{
 261        struct kernel_ipmi_msg *msg;
 262        struct acpi_ipmi_buffer *buffer;
 263        struct acpi_ipmi_device *device;
 264        unsigned long flags;
 265
 266        msg = &tx_msg->tx_message;
 267
 268        /*
 269         * IPMI network function and command are encoded in the address
 270         * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
 271         */
 272        msg->netfn = IPMI_OP_RGN_NETFN(address);
 273        msg->cmd = IPMI_OP_RGN_CMD(address);
 274        msg->data = tx_msg->data;
 275
 276        /*
 277         * value is the parameter passed by the IPMI opregion space handler.
 278         * It points to the IPMI request message buffer
 279         */
 280        buffer = (struct acpi_ipmi_buffer *)value;
 281
 282        /* copy the tx message data */
 283        if (buffer->length > ACPI_IPMI_MAX_MSG_LENGTH) {
 284                dev_WARN_ONCE(tx_msg->device->dev, true,
 285                              "Unexpected request (msg len %d).\n",
 286                              buffer->length);
 287                return -EINVAL;
 288        }
 289        msg->data_len = buffer->length;
 290        memcpy(tx_msg->data, buffer->data, msg->data_len);
 291
 292        /*
 293         * now the default type is SYSTEM_INTERFACE and channel type is BMC.
 294         * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
 295         * the addr type should be changed to IPMB. Then we will have to parse
 296         * the IPMI request message buffer to get the IPMB address.
 297         * If so, please fix me.
 298         */
 299        tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
 300        tx_msg->addr.channel = IPMI_BMC_CHANNEL;
 301        tx_msg->addr.data[0] = 0;
 302
 303        /* Get the msgid */
 304        device = tx_msg->device;
 305
 306        spin_lock_irqsave(&device->tx_msg_lock, flags);
 307        device->curr_msgid++;
 308        tx_msg->tx_msgid = device->curr_msgid;
 309        spin_unlock_irqrestore(&device->tx_msg_lock, flags);
 310
 311        return 0;
 312}
 313
 314static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
 315                                      acpi_integer *value)
 316{
 317        struct acpi_ipmi_buffer *buffer;
 318
 319        /*
 320         * value is also used as output parameter. It represents the response
 321         * IPMI message returned by IPMI command.
 322         */
 323        buffer = (struct acpi_ipmi_buffer *)value;
 324
 325        /*
 326         * If the flag of msg_done is not set, it means that the IPMI command is
 327         * not executed correctly.
 328         */
 329        buffer->status = msg->msg_done;
 330        if (msg->msg_done != ACPI_IPMI_OK)
 331                return;
 332
 333        /*
 334         * If the IPMI response message is obtained correctly, the status code
 335         * will be ACPI_IPMI_OK
 336         */
 337        buffer->length = msg->rx_len;
 338        memcpy(buffer->data, msg->data, msg->rx_len);
 339}
 340
 341static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
 342{
 343        struct acpi_ipmi_msg *tx_msg;
 344        unsigned long flags;
 345
 346        /*
 347         * NOTE: On-going ipmi_recv_msg
 348         * ipmi_msg_handler() may still be invoked by ipmi_si after
 349         * flushing.  But it is safe to do a fast flushing on module_exit()
 350         * without waiting for all ipmi_recv_msg(s) to complete from
 351         * ipmi_msg_handler() as it is ensured by ipmi_si that all
 352         * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user().
 353         */
 354        spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
 355        while (!list_empty(&ipmi->tx_msg_list)) {
 356                tx_msg = list_first_entry(&ipmi->tx_msg_list,
 357                                          struct acpi_ipmi_msg,
 358                                          head);
 359                list_del(&tx_msg->head);
 360                spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
 361
 362                /* wake up the sleep thread on the Tx msg */
 363                complete(&tx_msg->tx_complete);
 364                acpi_ipmi_msg_put(tx_msg);
 365                spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
 366        }
 367        spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
 368}
 369
 370static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
 371                               struct acpi_ipmi_msg *msg)
 372{
 373        struct acpi_ipmi_msg *tx_msg, *temp;
 374        bool msg_found = false;
 375        unsigned long flags;
 376
 377        spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
 378        list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
 379                if (msg == tx_msg) {
 380                        msg_found = true;
 381                        list_del(&tx_msg->head);
 382                        break;
 383                }
 384        }
 385        spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
 386
 387        if (msg_found)
 388                acpi_ipmi_msg_put(tx_msg);
 389}
 390
 391static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
 392{
 393        struct acpi_ipmi_device *ipmi_device = user_msg_data;
 394        bool msg_found = false;
 395        struct acpi_ipmi_msg *tx_msg, *temp;
 396        struct device *dev = ipmi_device->dev;
 397        unsigned long flags;
 398
 399        if (msg->user != ipmi_device->user_interface) {
 400                dev_warn(dev,
 401                         "Unexpected response is returned. returned user %p, expected user %p\n",
 402                         msg->user, ipmi_device->user_interface);
 403                goto out_msg;
 404        }
 405
 406        spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
 407        list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) {
 408                if (msg->msgid == tx_msg->tx_msgid) {
 409                        msg_found = true;
 410                        list_del(&tx_msg->head);
 411                        break;
 412                }
 413        }
 414        spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
 415
 416        if (!msg_found) {
 417                dev_warn(dev,
 418                         "Unexpected response (msg id %ld) is returned.\n",
 419                         msg->msgid);
 420                goto out_msg;
 421        }
 422
 423        /* copy the response data to Rx_data buffer */
 424        if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) {
 425                dev_WARN_ONCE(dev, true,
 426                              "Unexpected response (msg len %d).\n",
 427                              msg->msg.data_len);
 428                goto out_comp;
 429        }
 430
 431        /* response msg is an error msg */
 432        msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
 433        if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE &&
 434            msg->msg.data_len == 1) {
 435                if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) {
 436                        dev_WARN_ONCE(dev, true,
 437                                      "Unexpected response (timeout).\n");
 438                        tx_msg->msg_done = ACPI_IPMI_TIMEOUT;
 439                }
 440                goto out_comp;
 441        }
 442
 443        tx_msg->rx_len = msg->msg.data_len;
 444        memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len);
 445        tx_msg->msg_done = ACPI_IPMI_OK;
 446
 447out_comp:
 448        complete(&tx_msg->tx_complete);
 449        acpi_ipmi_msg_put(tx_msg);
 450out_msg:
 451        ipmi_free_recv_msg(msg);
 452}
 453
 454static void ipmi_register_bmc(int iface, struct device *dev)
 455{
 456        struct acpi_ipmi_device *ipmi_device, *temp;
 457        int err;
 458        struct ipmi_smi_info smi_data;
 459        acpi_handle handle;
 460
 461        err = ipmi_get_smi_info(iface, &smi_data);
 462        if (err)
 463                return;
 464
 465        if (smi_data.addr_src != SI_ACPI)
 466                goto err_ref;
 467        handle = smi_data.addr_info.acpi_info.acpi_handle;
 468        if (!handle)
 469                goto err_ref;
 470
 471        ipmi_device = ipmi_dev_alloc(iface, smi_data.dev, handle);
 472        if (!ipmi_device) {
 473                dev_warn(smi_data.dev, "Can't create IPMI user interface\n");
 474                goto err_ref;
 475        }
 476
 477        mutex_lock(&driver_data.ipmi_lock);
 478        list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
 479                /*
 480                 * if the corresponding ACPI handle is already added
 481                 * to the device list, don't add it again.
 482                 */
 483                if (temp->handle == handle)
 484                        goto err_lock;
 485        }
 486        if (!driver_data.selected_smi)
 487                driver_data.selected_smi = ipmi_device;
 488        list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
 489        mutex_unlock(&driver_data.ipmi_lock);
 490
 491        put_device(smi_data.dev);
 492        return;
 493
 494err_lock:
 495        mutex_unlock(&driver_data.ipmi_lock);
 496        ipmi_dev_release(ipmi_device);
 497err_ref:
 498        put_device(smi_data.dev);
 499        return;
 500}
 501
 502static void ipmi_bmc_gone(int iface)
 503{
 504        struct acpi_ipmi_device *ipmi_device, *temp;
 505        bool dev_found = false;
 506
 507        mutex_lock(&driver_data.ipmi_lock);
 508        list_for_each_entry_safe(ipmi_device, temp,
 509                                 &driver_data.ipmi_devices, head) {
 510                if (ipmi_device->ipmi_ifnum != iface) {
 511                        dev_found = true;
 512                        __ipmi_dev_kill(ipmi_device);
 513                        break;
 514                }
 515        }
 516        if (!driver_data.selected_smi)
 517                driver_data.selected_smi = list_first_entry_or_null(
 518                                        &driver_data.ipmi_devices,
 519                                        struct acpi_ipmi_device, head);
 520        mutex_unlock(&driver_data.ipmi_lock);
 521
 522        if (dev_found) {
 523                ipmi_flush_tx_msg(ipmi_device);
 524                acpi_ipmi_dev_put(ipmi_device);
 525        }
 526}
 527
 528/*
 529 * This is the IPMI opregion space handler.
 530 * @function: indicates the read/write. In fact as the IPMI message is driven
 531 * by command, only write is meaningful.
 532 * @address: This contains the netfn/command of IPMI request message.
 533 * @bits   : not used.
 534 * @value  : it is an in/out parameter. It points to the IPMI message buffer.
 535 *           Before the IPMI message is sent, it represents the actual request
 536 *           IPMI message. After the IPMI message is finished, it represents
 537 *           the response IPMI message returned by IPMI command.
 538 * @handler_context: IPMI device context.
 539 */
 540static acpi_status
 541acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
 542                        u32 bits, acpi_integer *value,
 543                        void *handler_context, void *region_context)
 544{
 545        struct acpi_ipmi_msg *tx_msg;
 546        struct acpi_ipmi_device *ipmi_device;
 547        int err;
 548        acpi_status status;
 549        unsigned long flags;
 550
 551        /*
 552         * IPMI opregion message.
 553         * IPMI message is firstly written to the BMC and system software
 554         * can get the respsonse. So it is unmeaningful for the read access
 555         * of IPMI opregion.
 556         */
 557        if ((function & ACPI_IO_MASK) == ACPI_READ)
 558                return AE_TYPE;
 559
 560        tx_msg = ipmi_msg_alloc();
 561        if (!tx_msg)
 562                return AE_NOT_EXIST;
 563        ipmi_device = tx_msg->device;
 564
 565        if (acpi_format_ipmi_request(tx_msg, address, value) != 0) {
 566                ipmi_msg_release(tx_msg);
 567                return AE_TYPE;
 568        }
 569
 570        acpi_ipmi_msg_get(tx_msg);
 571        mutex_lock(&driver_data.ipmi_lock);
 572        /* Do not add a tx_msg that can not be flushed. */
 573        if (ipmi_device->dead) {
 574                mutex_unlock(&driver_data.ipmi_lock);
 575                ipmi_msg_release(tx_msg);
 576                return AE_NOT_EXIST;
 577        }
 578        spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
 579        list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
 580        spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
 581        mutex_unlock(&driver_data.ipmi_lock);
 582
 583        err = ipmi_request_settime(ipmi_device->user_interface,
 584                                   &tx_msg->addr,
 585                                   tx_msg->tx_msgid,
 586                                   &tx_msg->tx_message,
 587                                   NULL, 0, 0, IPMI_TIMEOUT);
 588        if (err) {
 589                status = AE_ERROR;
 590                goto out_msg;
 591        }
 592        wait_for_completion(&tx_msg->tx_complete);
 593
 594        acpi_format_ipmi_response(tx_msg, value);
 595        status = AE_OK;
 596
 597out_msg:
 598        ipmi_cancel_tx_msg(ipmi_device, tx_msg);
 599        acpi_ipmi_msg_put(tx_msg);
 600        return status;
 601}
 602
 603static int __init acpi_ipmi_init(void)
 604{
 605        int result;
 606        acpi_status status;
 607
 608        if (acpi_disabled)
 609                return 0;
 610
 611        status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
 612                                                    ACPI_ADR_SPACE_IPMI,
 613                                                    &acpi_ipmi_space_handler,
 614                                                    NULL, NULL);
 615        if (ACPI_FAILURE(status)) {
 616                pr_warn("Can't register IPMI opregion space handle\n");
 617                return -EINVAL;
 618        }
 619        result = ipmi_smi_watcher_register(&driver_data.bmc_events);
 620        if (result)
 621                pr_err("Can't register IPMI system interface watcher\n");
 622
 623        return result;
 624}
 625
 626static void __exit acpi_ipmi_exit(void)
 627{
 628        struct acpi_ipmi_device *ipmi_device;
 629
 630        if (acpi_disabled)
 631                return;
 632
 633        ipmi_smi_watcher_unregister(&driver_data.bmc_events);
 634
 635        /*
 636         * When one smi_watcher is unregistered, it is only deleted
 637         * from the smi_watcher list. But the smi_gone callback function
 638         * is not called. So explicitly uninstall the ACPI IPMI oregion
 639         * handler and free it.
 640         */
 641        mutex_lock(&driver_data.ipmi_lock);
 642        while (!list_empty(&driver_data.ipmi_devices)) {
 643                ipmi_device = list_first_entry(&driver_data.ipmi_devices,
 644                                               struct acpi_ipmi_device,
 645                                               head);
 646                __ipmi_dev_kill(ipmi_device);
 647                mutex_unlock(&driver_data.ipmi_lock);
 648
 649                ipmi_flush_tx_msg(ipmi_device);
 650                acpi_ipmi_dev_put(ipmi_device);
 651
 652                mutex_lock(&driver_data.ipmi_lock);
 653        }
 654        mutex_unlock(&driver_data.ipmi_lock);
 655        acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
 656                                          ACPI_ADR_SPACE_IPMI,
 657                                          &acpi_ipmi_space_handler);
 658}
 659
 660module_init(acpi_ipmi_init);
 661module_exit(acpi_ipmi_exit);
 662