linux/drivers/char/ipmi/ipmi_si_intf.c
<<
>>
Prefs
   1/*
   2 * ipmi_si.c
   3 *
   4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
   5 * BT).
   6 *
   7 * Author: MontaVista Software, Inc.
   8 *         Corey Minyard <minyard@mvista.com>
   9 *         source@mvista.com
  10 *
  11 * Copyright 2002 MontaVista Software Inc.
  12 * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
  13 *
  14 *  This program is free software; you can redistribute it and/or modify it
  15 *  under the terms of the GNU General Public License as published by the
  16 *  Free Software Foundation; either version 2 of the License, or (at your
  17 *  option) any later version.
  18 *
  19 *
  20 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  21 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  22 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  23 *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  24 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  25 *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
  26 *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  27 *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  28 *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  29 *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30 *
  31 *  You should have received a copy of the GNU General Public License along
  32 *  with this program; if not, write to the Free Software Foundation, Inc.,
  33 *  675 Mass Ave, Cambridge, MA 02139, USA.
  34 */
  35
  36/*
  37 * This file holds the "policy" for the interface to the SMI state
  38 * machine.  It does the configuration, handles timers and interrupts,
  39 * and drives the real SMI state machine.
  40 */
  41
  42#include <linux/module.h>
  43#include <linux/moduleparam.h>
  44#include <linux/sched.h>
  45#include <linux/seq_file.h>
  46#include <linux/timer.h>
  47#include <linux/errno.h>
  48#include <linux/spinlock.h>
  49#include <linux/slab.h>
  50#include <linux/delay.h>
  51#include <linux/list.h>
  52#include <linux/pci.h>
  53#include <linux/ioport.h>
  54#include <linux/notifier.h>
  55#include <linux/mutex.h>
  56#include <linux/kthread.h>
  57#include <asm/irq.h>
  58#include <linux/interrupt.h>
  59#include <linux/rcupdate.h>
  60#include <linux/ipmi.h>
  61#include <linux/ipmi_smi.h>
  62#include <asm/io.h>
  63#include "ipmi_si_sm.h"
  64#include <linux/init.h>
  65#include <linux/dmi.h>
  66#include <linux/string.h>
  67#include <linux/ctype.h>
  68#include <linux/pnp.h>
  69#include <linux/of_device.h>
  70#include <linux/of_platform.h>
  71#include <linux/of_address.h>
  72#include <linux/of_irq.h>
  73
  74#ifdef CONFIG_PARISC
  75#include <asm/hardware.h>       /* for register_parisc_driver() stuff */
  76#include <asm/parisc-device.h>
  77#endif
  78
  79#define PFX "ipmi_si: "
  80
  81/* Measure times between events in the driver. */
  82#undef DEBUG_TIMING
  83
  84/* Call every 10 ms. */
  85#define SI_TIMEOUT_TIME_USEC    10000
  86#define SI_USEC_PER_JIFFY       (1000000/HZ)
  87#define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
  88#define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
  89                                      short timeout */
  90
  91enum si_intf_state {
  92        SI_NORMAL,
  93        SI_GETTING_FLAGS,
  94        SI_GETTING_EVENTS,
  95        SI_CLEARING_FLAGS,
  96        SI_CLEARING_FLAGS_THEN_SET_IRQ,
  97        SI_GETTING_MESSAGES,
  98        SI_ENABLE_INTERRUPTS1,
  99        SI_ENABLE_INTERRUPTS2,
 100        SI_DISABLE_INTERRUPTS1,
 101        SI_DISABLE_INTERRUPTS2
 102        /* FIXME - add watchdog stuff. */
 103};
 104
 105/* Some BT-specific defines we need here. */
 106#define IPMI_BT_INTMASK_REG             2
 107#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
 108#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
 109
 110enum si_type {
 111    SI_KCS, SI_SMIC, SI_BT
 112};
 113static char *si_to_str[] = { "kcs", "smic", "bt" };
 114
 115static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
 116                                        "ACPI", "SMBIOS", "PCI",
 117                                        "device-tree", "default" };
 118
 119#define DEVICE_NAME "ipmi_si"
 120
 121static struct platform_driver ipmi_driver;
 122
 123/*
 124 * Indexes into stats[] in smi_info below.
 125 */
 126enum si_stat_indexes {
 127        /*
 128         * Number of times the driver requested a timer while an operation
 129         * was in progress.
 130         */
 131        SI_STAT_short_timeouts = 0,
 132
 133        /*
 134         * Number of times the driver requested a timer while nothing was in
 135         * progress.
 136         */
 137        SI_STAT_long_timeouts,
 138
 139        /* Number of times the interface was idle while being polled. */
 140        SI_STAT_idles,
 141
 142        /* Number of interrupts the driver handled. */
 143        SI_STAT_interrupts,
 144
 145        /* Number of time the driver got an ATTN from the hardware. */
 146        SI_STAT_attentions,
 147
 148        /* Number of times the driver requested flags from the hardware. */
 149        SI_STAT_flag_fetches,
 150
 151        /* Number of times the hardware didn't follow the state machine. */
 152        SI_STAT_hosed_count,
 153
 154        /* Number of completed messages. */
 155        SI_STAT_complete_transactions,
 156
 157        /* Number of IPMI events received from the hardware. */
 158        SI_STAT_events,
 159
 160        /* Number of watchdog pretimeouts. */
 161        SI_STAT_watchdog_pretimeouts,
 162
 163        /* Number of asynchronous messages received. */
 164        SI_STAT_incoming_messages,
 165
 166
 167        /* This *must* remain last, add new values above this. */
 168        SI_NUM_STATS
 169};
 170
 171struct smi_info {
 172        int                    intf_num;
 173        ipmi_smi_t             intf;
 174        struct si_sm_data      *si_sm;
 175        struct si_sm_handlers  *handlers;
 176        enum si_type           si_type;
 177        spinlock_t             si_lock;
 178        struct list_head       xmit_msgs;
 179        struct list_head       hp_xmit_msgs;
 180        struct ipmi_smi_msg    *curr_msg;
 181        enum si_intf_state     si_state;
 182
 183        /*
 184         * Used to handle the various types of I/O that can occur with
 185         * IPMI
 186         */
 187        struct si_sm_io io;
 188        int (*io_setup)(struct smi_info *info);
 189        void (*io_cleanup)(struct smi_info *info);
 190        int (*irq_setup)(struct smi_info *info);
 191        void (*irq_cleanup)(struct smi_info *info);
 192        unsigned int io_size;
 193        enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
 194        void (*addr_source_cleanup)(struct smi_info *info);
 195        void *addr_source_data;
 196
 197        /*
 198         * Per-OEM handler, called from handle_flags().  Returns 1
 199         * when handle_flags() needs to be re-run or 0 indicating it
 200         * set si_state itself.
 201         */
 202        int (*oem_data_avail_handler)(struct smi_info *smi_info);
 203
 204        /*
 205         * Flags from the last GET_MSG_FLAGS command, used when an ATTN
 206         * is set to hold the flags until we are done handling everything
 207         * from the flags.
 208         */
 209#define RECEIVE_MSG_AVAIL       0x01
 210#define EVENT_MSG_BUFFER_FULL   0x02
 211#define WDT_PRE_TIMEOUT_INT     0x08
 212#define OEM0_DATA_AVAIL     0x20
 213#define OEM1_DATA_AVAIL     0x40
 214#define OEM2_DATA_AVAIL     0x80
 215#define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
 216                             OEM1_DATA_AVAIL | \
 217                             OEM2_DATA_AVAIL)
 218        unsigned char       msg_flags;
 219
 220        /* Does the BMC have an event buffer? */
 221        char                has_event_buffer;
 222
 223        /*
 224         * If set to true, this will request events the next time the
 225         * state machine is idle.
 226         */
 227        atomic_t            req_events;
 228
 229        /*
 230         * If true, run the state machine to completion on every send
 231         * call.  Generally used after a panic to make sure stuff goes
 232         * out.
 233         */
 234        int                 run_to_completion;
 235
 236        /* The I/O port of an SI interface. */
 237        int                 port;
 238
 239        /*
 240         * The space between start addresses of the two ports.  For
 241         * instance, if the first port is 0xca2 and the spacing is 4, then
 242         * the second port is 0xca6.
 243         */
 244        unsigned int        spacing;
 245
 246        /* zero if no irq; */
 247        int                 irq;
 248
 249        /* The timer for this si. */
 250        struct timer_list   si_timer;
 251
 252        /* The time (in jiffies) the last timeout occurred at. */
 253        unsigned long       last_timeout_jiffies;
 254
 255        /* Used to gracefully stop the timer without race conditions. */
 256        atomic_t            stop_operation;
 257
 258        /*
 259         * The driver will disable interrupts when it gets into a
 260         * situation where it cannot handle messages due to lack of
 261         * memory.  Once that situation clears up, it will re-enable
 262         * interrupts.
 263         */
 264        int interrupt_disabled;
 265
 266        /* From the get device id response... */
 267        struct ipmi_device_id device_id;
 268
 269        /* Driver model stuff. */
 270        struct device *dev;
 271        struct platform_device *pdev;
 272
 273        /*
 274         * True if we allocated the device, false if it came from
 275         * someplace else (like PCI).
 276         */
 277        int dev_registered;
 278
 279        /* Slave address, could be reported from DMI. */
 280        unsigned char slave_addr;
 281
 282        /* Counters and things for the proc filesystem. */
 283        atomic_t stats[SI_NUM_STATS];
 284
 285        struct task_struct *thread;
 286
 287        struct list_head link;
 288        union ipmi_smi_info_union addr_info;
 289};
 290
 291#define smi_inc_stat(smi, stat) \
 292        atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
 293#define smi_get_stat(smi, stat) \
 294        ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
 295
 296#define SI_MAX_PARMS 4
 297
 298static int force_kipmid[SI_MAX_PARMS];
 299static int num_force_kipmid;
 300#ifdef CONFIG_PCI
 301static int pci_registered;
 302#endif
 303#ifdef CONFIG_ACPI
 304static int pnp_registered;
 305#endif
 306#ifdef CONFIG_PARISC
 307static int parisc_registered;
 308#endif
 309
 310static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
 311static int num_max_busy_us;
 312
 313static int unload_when_empty = 1;
 314
 315static int add_smi(struct smi_info *smi);
 316static int try_smi_init(struct smi_info *smi);
 317static void cleanup_one_si(struct smi_info *to_clean);
 318static void cleanup_ipmi_si(void);
 319
 320static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 321static int register_xaction_notifier(struct notifier_block *nb)
 322{
 323        return atomic_notifier_chain_register(&xaction_notifier_list, nb);
 324}
 325
 326static void deliver_recv_msg(struct smi_info *smi_info,
 327                             struct ipmi_smi_msg *msg)
 328{
 329        /* Deliver the message to the upper layer. */
 330        ipmi_smi_msg_received(smi_info->intf, msg);
 331}
 332
 333static void return_hosed_msg(struct smi_info *smi_info, int cCode)
 334{
 335        struct ipmi_smi_msg *msg = smi_info->curr_msg;
 336
 337        if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
 338                cCode = IPMI_ERR_UNSPECIFIED;
 339        /* else use it as is */
 340
 341        /* Make it a response */
 342        msg->rsp[0] = msg->data[0] | 4;
 343        msg->rsp[1] = msg->data[1];
 344        msg->rsp[2] = cCode;
 345        msg->rsp_size = 3;
 346
 347        smi_info->curr_msg = NULL;
 348        deliver_recv_msg(smi_info, msg);
 349}
 350
 351static enum si_sm_result start_next_msg(struct smi_info *smi_info)
 352{
 353        int              rv;
 354        struct list_head *entry = NULL;
 355#ifdef DEBUG_TIMING
 356        struct timeval t;
 357#endif
 358
 359        /* Pick the high priority queue first. */
 360        if (!list_empty(&(smi_info->hp_xmit_msgs))) {
 361                entry = smi_info->hp_xmit_msgs.next;
 362        } else if (!list_empty(&(smi_info->xmit_msgs))) {
 363                entry = smi_info->xmit_msgs.next;
 364        }
 365
 366        if (!entry) {
 367                smi_info->curr_msg = NULL;
 368                rv = SI_SM_IDLE;
 369        } else {
 370                int err;
 371
 372                list_del(entry);
 373                smi_info->curr_msg = list_entry(entry,
 374                                                struct ipmi_smi_msg,
 375                                                link);
 376#ifdef DEBUG_TIMING
 377                do_gettimeofday(&t);
 378                printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 379#endif
 380                err = atomic_notifier_call_chain(&xaction_notifier_list,
 381                                0, smi_info);
 382                if (err & NOTIFY_STOP_MASK) {
 383                        rv = SI_SM_CALL_WITHOUT_DELAY;
 384                        goto out;
 385                }
 386                err = smi_info->handlers->start_transaction(
 387                        smi_info->si_sm,
 388                        smi_info->curr_msg->data,
 389                        smi_info->curr_msg->data_size);
 390                if (err)
 391                        return_hosed_msg(smi_info, err);
 392
 393                rv = SI_SM_CALL_WITHOUT_DELAY;
 394        }
 395 out:
 396        return rv;
 397}
 398
 399static void start_enable_irq(struct smi_info *smi_info)
 400{
 401        unsigned char msg[2];
 402
 403        /*
 404         * If we are enabling interrupts, we have to tell the
 405         * BMC to use them.
 406         */
 407        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 408        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 409
 410        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
 411        smi_info->si_state = SI_ENABLE_INTERRUPTS1;
 412}
 413
 414static void start_disable_irq(struct smi_info *smi_info)
 415{
 416        unsigned char msg[2];
 417
 418        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 419        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 420
 421        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
 422        smi_info->si_state = SI_DISABLE_INTERRUPTS1;
 423}
 424
 425static void start_clear_flags(struct smi_info *smi_info)
 426{
 427        unsigned char msg[3];
 428
 429        /* Make sure the watchdog pre-timeout flag is not set at startup. */
 430        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 431        msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
 432        msg[2] = WDT_PRE_TIMEOUT_INT;
 433
 434        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
 435        smi_info->si_state = SI_CLEARING_FLAGS;
 436}
 437
 438/*
 439 * When we have a situtaion where we run out of memory and cannot
 440 * allocate messages, we just leave them in the BMC and run the system
 441 * polled until we can allocate some memory.  Once we have some
 442 * memory, we will re-enable the interrupt.
 443 */
 444static inline void disable_si_irq(struct smi_info *smi_info)
 445{
 446        if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
 447                start_disable_irq(smi_info);
 448                smi_info->interrupt_disabled = 1;
 449                if (!atomic_read(&smi_info->stop_operation))
 450                        mod_timer(&smi_info->si_timer,
 451                                  jiffies + SI_TIMEOUT_JIFFIES);
 452        }
 453}
 454
 455static inline void enable_si_irq(struct smi_info *smi_info)
 456{
 457        if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
 458                start_enable_irq(smi_info);
 459                smi_info->interrupt_disabled = 0;
 460        }
 461}
 462
 463static void handle_flags(struct smi_info *smi_info)
 464{
 465 retry:
 466        if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
 467                /* Watchdog pre-timeout */
 468                smi_inc_stat(smi_info, watchdog_pretimeouts);
 469
 470                start_clear_flags(smi_info);
 471                smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
 472                ipmi_smi_watchdog_pretimeout(smi_info->intf);
 473        } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
 474                /* Messages available. */
 475                smi_info->curr_msg = ipmi_alloc_smi_msg();
 476                if (!smi_info->curr_msg) {
 477                        disable_si_irq(smi_info);
 478                        smi_info->si_state = SI_NORMAL;
 479                        return;
 480                }
 481                enable_si_irq(smi_info);
 482
 483                smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
 484                smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
 485                smi_info->curr_msg->data_size = 2;
 486
 487                smi_info->handlers->start_transaction(
 488                        smi_info->si_sm,
 489                        smi_info->curr_msg->data,
 490                        smi_info->curr_msg->data_size);
 491                smi_info->si_state = SI_GETTING_MESSAGES;
 492        } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
 493                /* Events available. */
 494                smi_info->curr_msg = ipmi_alloc_smi_msg();
 495                if (!smi_info->curr_msg) {
 496                        disable_si_irq(smi_info);
 497                        smi_info->si_state = SI_NORMAL;
 498                        return;
 499                }
 500                enable_si_irq(smi_info);
 501
 502                smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
 503                smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
 504                smi_info->curr_msg->data_size = 2;
 505
 506                smi_info->handlers->start_transaction(
 507                        smi_info->si_sm,
 508                        smi_info->curr_msg->data,
 509                        smi_info->curr_msg->data_size);
 510                smi_info->si_state = SI_GETTING_EVENTS;
 511        } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
 512                   smi_info->oem_data_avail_handler) {
 513                if (smi_info->oem_data_avail_handler(smi_info))
 514                        goto retry;
 515        } else
 516                smi_info->si_state = SI_NORMAL;
 517}
 518
 519static void handle_transaction_done(struct smi_info *smi_info)
 520{
 521        struct ipmi_smi_msg *msg;
 522#ifdef DEBUG_TIMING
 523        struct timeval t;
 524
 525        do_gettimeofday(&t);
 526        printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 527#endif
 528        switch (smi_info->si_state) {
 529        case SI_NORMAL:
 530                if (!smi_info->curr_msg)
 531                        break;
 532
 533                smi_info->curr_msg->rsp_size
 534                        = smi_info->handlers->get_result(
 535                                smi_info->si_sm,
 536                                smi_info->curr_msg->rsp,
 537                                IPMI_MAX_MSG_LENGTH);
 538
 539                /*
 540                 * Do this here becase deliver_recv_msg() releases the
 541                 * lock, and a new message can be put in during the
 542                 * time the lock is released.
 543                 */
 544                msg = smi_info->curr_msg;
 545                smi_info->curr_msg = NULL;
 546                deliver_recv_msg(smi_info, msg);
 547                break;
 548
 549        case SI_GETTING_FLAGS:
 550        {
 551                unsigned char msg[4];
 552                unsigned int  len;
 553
 554                /* We got the flags from the SMI, now handle them. */
 555                len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 556                if (msg[2] != 0) {
 557                        /* Error fetching flags, just give up for now. */
 558                        smi_info->si_state = SI_NORMAL;
 559                } else if (len < 4) {
 560                        /*
 561                         * Hmm, no flags.  That's technically illegal, but
 562                         * don't use uninitialized data.
 563                         */
 564                        smi_info->si_state = SI_NORMAL;
 565                } else {
 566                        smi_info->msg_flags = msg[3];
 567                        handle_flags(smi_info);
 568                }
 569                break;
 570        }
 571
 572        case SI_CLEARING_FLAGS:
 573        case SI_CLEARING_FLAGS_THEN_SET_IRQ:
 574        {
 575                unsigned char msg[3];
 576
 577                /* We cleared the flags. */
 578                smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
 579                if (msg[2] != 0) {
 580                        /* Error clearing flags */
 581                        dev_warn(smi_info->dev,
 582                                 "Error clearing flags: %2.2x\n", msg[2]);
 583                }
 584                if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
 585                        start_enable_irq(smi_info);
 586                else
 587                        smi_info->si_state = SI_NORMAL;
 588                break;
 589        }
 590
 591        case SI_GETTING_EVENTS:
 592        {
 593                smi_info->curr_msg->rsp_size
 594                        = smi_info->handlers->get_result(
 595                                smi_info->si_sm,
 596                                smi_info->curr_msg->rsp,
 597                                IPMI_MAX_MSG_LENGTH);
 598
 599                /*
 600                 * Do this here becase deliver_recv_msg() releases the
 601                 * lock, and a new message can be put in during the
 602                 * time the lock is released.
 603                 */
 604                msg = smi_info->curr_msg;
 605                smi_info->curr_msg = NULL;
 606                if (msg->rsp[2] != 0) {
 607                        /* Error getting event, probably done. */
 608                        msg->done(msg);
 609
 610                        /* Take off the event flag. */
 611                        smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
 612                        handle_flags(smi_info);
 613                } else {
 614                        smi_inc_stat(smi_info, events);
 615
 616                        /*
 617                         * Do this before we deliver the message
 618                         * because delivering the message releases the
 619                         * lock and something else can mess with the
 620                         * state.
 621                         */
 622                        handle_flags(smi_info);
 623
 624                        deliver_recv_msg(smi_info, msg);
 625                }
 626                break;
 627        }
 628
 629        case SI_GETTING_MESSAGES:
 630        {
 631                smi_info->curr_msg->rsp_size
 632                        = smi_info->handlers->get_result(
 633                                smi_info->si_sm,
 634                                smi_info->curr_msg->rsp,
 635                                IPMI_MAX_MSG_LENGTH);
 636
 637                /*
 638                 * Do this here becase deliver_recv_msg() releases the
 639                 * lock, and a new message can be put in during the
 640                 * time the lock is released.
 641                 */
 642                msg = smi_info->curr_msg;
 643                smi_info->curr_msg = NULL;
 644                if (msg->rsp[2] != 0) {
 645                        /* Error getting event, probably done. */
 646                        msg->done(msg);
 647
 648                        /* Take off the msg flag. */
 649                        smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
 650                        handle_flags(smi_info);
 651                } else {
 652                        smi_inc_stat(smi_info, incoming_messages);
 653
 654                        /*
 655                         * Do this before we deliver the message
 656                         * because delivering the message releases the
 657                         * lock and something else can mess with the
 658                         * state.
 659                         */
 660                        handle_flags(smi_info);
 661
 662                        deliver_recv_msg(smi_info, msg);
 663                }
 664                break;
 665        }
 666
 667        case SI_ENABLE_INTERRUPTS1:
 668        {
 669                unsigned char msg[4];
 670
 671                /* We got the flags from the SMI, now handle them. */
 672                smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 673                if (msg[2] != 0) {
 674                        dev_warn(smi_info->dev,
 675                                 "Couldn't get irq info: %x.\n", msg[2]);
 676                        dev_warn(smi_info->dev,
 677                                 "Maybe ok, but ipmi might run very slowly.\n");
 678                        smi_info->si_state = SI_NORMAL;
 679                } else {
 680                        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 681                        msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
 682                        msg[2] = (msg[3] |
 683                                  IPMI_BMC_RCV_MSG_INTR |
 684                                  IPMI_BMC_EVT_MSG_INTR);
 685                        smi_info->handlers->start_transaction(
 686                                smi_info->si_sm, msg, 3);
 687                        smi_info->si_state = SI_ENABLE_INTERRUPTS2;
 688                }
 689                break;
 690        }
 691
 692        case SI_ENABLE_INTERRUPTS2:
 693        {
 694                unsigned char msg[4];
 695
 696                /* We got the flags from the SMI, now handle them. */
 697                smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 698                if (msg[2] != 0) {
 699                        dev_warn(smi_info->dev,
 700                                 "Couldn't set irq info: %x.\n", msg[2]);
 701                        dev_warn(smi_info->dev,
 702                                 "Maybe ok, but ipmi might run very slowly.\n");
 703                } else
 704                        smi_info->interrupt_disabled = 0;
 705                smi_info->si_state = SI_NORMAL;
 706                break;
 707        }
 708
 709        case SI_DISABLE_INTERRUPTS1:
 710        {
 711                unsigned char msg[4];
 712
 713                /* We got the flags from the SMI, now handle them. */
 714                smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 715                if (msg[2] != 0) {
 716                        dev_warn(smi_info->dev, "Could not disable interrupts"
 717                                 ", failed get.\n");
 718                        smi_info->si_state = SI_NORMAL;
 719                } else {
 720                        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 721                        msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
 722                        msg[2] = (msg[3] &
 723                                  ~(IPMI_BMC_RCV_MSG_INTR |
 724                                    IPMI_BMC_EVT_MSG_INTR));
 725                        smi_info->handlers->start_transaction(
 726                                smi_info->si_sm, msg, 3);
 727                        smi_info->si_state = SI_DISABLE_INTERRUPTS2;
 728                }
 729                break;
 730        }
 731
 732        case SI_DISABLE_INTERRUPTS2:
 733        {
 734                unsigned char msg[4];
 735
 736                /* We got the flags from the SMI, now handle them. */
 737                smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 738                if (msg[2] != 0) {
 739                        dev_warn(smi_info->dev, "Could not disable interrupts"
 740                                 ", failed set.\n");
 741                }
 742                smi_info->si_state = SI_NORMAL;
 743                break;
 744        }
 745        }
 746}
 747
 748/*
 749 * Called on timeouts and events.  Timeouts should pass the elapsed
 750 * time, interrupts should pass in zero.  Must be called with
 751 * si_lock held and interrupts disabled.
 752 */
 753static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
 754                                           int time)
 755{
 756        enum si_sm_result si_sm_result;
 757
 758 restart:
 759        /*
 760         * There used to be a loop here that waited a little while
 761         * (around 25us) before giving up.  That turned out to be
 762         * pointless, the minimum delays I was seeing were in the 300us
 763         * range, which is far too long to wait in an interrupt.  So
 764         * we just run until the state machine tells us something
 765         * happened or it needs a delay.
 766         */
 767        si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
 768        time = 0;
 769        while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
 770                si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
 771
 772        if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
 773                smi_inc_stat(smi_info, complete_transactions);
 774
 775                handle_transaction_done(smi_info);
 776                si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
 777        } else if (si_sm_result == SI_SM_HOSED) {
 778                smi_inc_stat(smi_info, hosed_count);
 779
 780                /*
 781                 * Do the before return_hosed_msg, because that
 782                 * releases the lock.
 783                 */
 784                smi_info->si_state = SI_NORMAL;
 785                if (smi_info->curr_msg != NULL) {
 786                        /*
 787                         * If we were handling a user message, format
 788                         * a response to send to the upper layer to
 789                         * tell it about the error.
 790                         */
 791                        return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
 792                }
 793                si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
 794        }
 795
 796        /*
 797         * We prefer handling attn over new messages.  But don't do
 798         * this if there is not yet an upper layer to handle anything.
 799         */
 800        if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) {
 801                unsigned char msg[2];
 802
 803                smi_inc_stat(smi_info, attentions);
 804
 805                /*
 806                 * Got a attn, send down a get message flags to see
 807                 * what's causing it.  It would be better to handle
 808                 * this in the upper layer, but due to the way
 809                 * interrupts work with the SMI, that's not really
 810                 * possible.
 811                 */
 812                msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 813                msg[1] = IPMI_GET_MSG_FLAGS_CMD;
 814
 815                smi_info->handlers->start_transaction(
 816                        smi_info->si_sm, msg, 2);
 817                smi_info->si_state = SI_GETTING_FLAGS;
 818                goto restart;
 819        }
 820
 821        /* If we are currently idle, try to start the next message. */
 822        if (si_sm_result == SI_SM_IDLE) {
 823                smi_inc_stat(smi_info, idles);
 824
 825                si_sm_result = start_next_msg(smi_info);
 826                if (si_sm_result != SI_SM_IDLE)
 827                        goto restart;
 828        }
 829
 830        if ((si_sm_result == SI_SM_IDLE)
 831            && (atomic_read(&smi_info->req_events))) {
 832                /*
 833                 * We are idle and the upper layer requested that I fetch
 834                 * events, so do so.
 835                 */
 836                atomic_set(&smi_info->req_events, 0);
 837
 838                smi_info->curr_msg = ipmi_alloc_smi_msg();
 839                if (!smi_info->curr_msg)
 840                        goto out;
 841
 842                smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
 843                smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
 844                smi_info->curr_msg->data_size = 2;
 845
 846                smi_info->handlers->start_transaction(
 847                        smi_info->si_sm,
 848                        smi_info->curr_msg->data,
 849                        smi_info->curr_msg->data_size);
 850                smi_info->si_state = SI_GETTING_EVENTS;
 851                goto restart;
 852        }
 853 out:
 854        return si_sm_result;
 855}
 856
 857static void sender(void                *send_info,
 858                   struct ipmi_smi_msg *msg,
 859                   int                 priority)
 860{
 861        struct smi_info   *smi_info = send_info;
 862        enum si_sm_result result;
 863        unsigned long     flags;
 864#ifdef DEBUG_TIMING
 865        struct timeval    t;
 866#endif
 867
 868        if (atomic_read(&smi_info->stop_operation)) {
 869                msg->rsp[0] = msg->data[0] | 4;
 870                msg->rsp[1] = msg->data[1];
 871                msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
 872                msg->rsp_size = 3;
 873                deliver_recv_msg(smi_info, msg);
 874                return;
 875        }
 876
 877#ifdef DEBUG_TIMING
 878        do_gettimeofday(&t);
 879        printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 880#endif
 881
 882        if (smi_info->run_to_completion) {
 883                /*
 884                 * If we are running to completion, then throw it in
 885                 * the list and run transactions until everything is
 886                 * clear.  Priority doesn't matter here.
 887                 */
 888
 889                /*
 890                 * Run to completion means we are single-threaded, no
 891                 * need for locks.
 892                 */
 893                list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
 894
 895                result = smi_event_handler(smi_info, 0);
 896                while (result != SI_SM_IDLE) {
 897                        udelay(SI_SHORT_TIMEOUT_USEC);
 898                        result = smi_event_handler(smi_info,
 899                                                   SI_SHORT_TIMEOUT_USEC);
 900                }
 901                return;
 902        }
 903
 904        spin_lock_irqsave(&smi_info->si_lock, flags);
 905        if (priority > 0)
 906                list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
 907        else
 908                list_add_tail(&msg->link, &smi_info->xmit_msgs);
 909
 910        if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
 911                /*
 912                 * last_timeout_jiffies is updated here to avoid
 913                 * smi_timeout() handler passing very large time_diff
 914                 * value to smi_event_handler() that causes
 915                 * the send command to abort.
 916                 */
 917                smi_info->last_timeout_jiffies = jiffies;
 918
 919                mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
 920
 921                if (smi_info->thread)
 922                        wake_up_process(smi_info->thread);
 923
 924                start_next_msg(smi_info);
 925                smi_event_handler(smi_info, 0);
 926        }
 927        spin_unlock_irqrestore(&smi_info->si_lock, flags);
 928}
 929
 930static void set_run_to_completion(void *send_info, int i_run_to_completion)
 931{
 932        struct smi_info   *smi_info = send_info;
 933        enum si_sm_result result;
 934
 935        smi_info->run_to_completion = i_run_to_completion;
 936        if (i_run_to_completion) {
 937                result = smi_event_handler(smi_info, 0);
 938                while (result != SI_SM_IDLE) {
 939                        udelay(SI_SHORT_TIMEOUT_USEC);
 940                        result = smi_event_handler(smi_info,
 941                                                   SI_SHORT_TIMEOUT_USEC);
 942                }
 943        }
 944}
 945
 946/*
 947 * Use -1 in the nsec value of the busy waiting timespec to tell that
 948 * we are spinning in kipmid looking for something and not delaying
 949 * between checks
 950 */
 951static inline void ipmi_si_set_not_busy(struct timespec *ts)
 952{
 953        ts->tv_nsec = -1;
 954}
 955static inline int ipmi_si_is_busy(struct timespec *ts)
 956{
 957        return ts->tv_nsec != -1;
 958}
 959
 960static int ipmi_thread_busy_wait(enum si_sm_result smi_result,
 961                                 const struct smi_info *smi_info,
 962                                 struct timespec *busy_until)
 963{
 964        unsigned int max_busy_us = 0;
 965
 966        if (smi_info->intf_num < num_max_busy_us)
 967                max_busy_us = kipmid_max_busy_us[smi_info->intf_num];
 968        if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
 969                ipmi_si_set_not_busy(busy_until);
 970        else if (!ipmi_si_is_busy(busy_until)) {
 971                getnstimeofday(busy_until);
 972                timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
 973        } else {
 974                struct timespec now;
 975                getnstimeofday(&now);
 976                if (unlikely(timespec_compare(&now, busy_until) > 0)) {
 977                        ipmi_si_set_not_busy(busy_until);
 978                        return 0;
 979                }
 980        }
 981        return 1;
 982}
 983
 984
 985/*
 986 * A busy-waiting loop for speeding up IPMI operation.
 987 *
 988 * Lousy hardware makes this hard.  This is only enabled for systems
 989 * that are not BT and do not have interrupts.  It starts spinning
 990 * when an operation is complete or until max_busy tells it to stop
 991 * (if that is enabled).  See the paragraph on kimid_max_busy_us in
 992 * Documentation/IPMI.txt for details.
 993 */
 994static int ipmi_thread(void *data)
 995{
 996        struct smi_info *smi_info = data;
 997        unsigned long flags;
 998        enum si_sm_result smi_result;
 999        struct timespec busy_until;
1000
1001        ipmi_si_set_not_busy(&busy_until);
1002        set_user_nice(current, 19);
1003        while (!kthread_should_stop()) {
1004                int busy_wait;
1005
1006                spin_lock_irqsave(&(smi_info->si_lock), flags);
1007                smi_result = smi_event_handler(smi_info, 0);
1008                spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1009                busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
1010                                                  &busy_until);
1011                if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1012                        ; /* do nothing */
1013                else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
1014                        schedule();
1015                else if (smi_result == SI_SM_IDLE)
1016                        schedule_timeout_interruptible(100);
1017                else
1018                        schedule_timeout_interruptible(1);
1019        }
1020        return 0;
1021}
1022
1023
1024static void poll(void *send_info)
1025{
1026        struct smi_info *smi_info = send_info;
1027        unsigned long flags = 0;
1028        int run_to_completion = smi_info->run_to_completion;
1029
1030        /*
1031         * Make sure there is some delay in the poll loop so we can
1032         * drive time forward and timeout things.
1033         */
1034        udelay(10);
1035        if (!run_to_completion)
1036                spin_lock_irqsave(&smi_info->si_lock, flags);
1037        smi_event_handler(smi_info, 10);
1038        if (!run_to_completion)
1039                spin_unlock_irqrestore(&smi_info->si_lock, flags);
1040}
1041
1042static void request_events(void *send_info)
1043{
1044        struct smi_info *smi_info = send_info;
1045
1046        if (atomic_read(&smi_info->stop_operation) ||
1047                                !smi_info->has_event_buffer)
1048                return;
1049
1050        atomic_set(&smi_info->req_events, 1);
1051}
1052
1053static int initialized;
1054
1055static void smi_timeout(unsigned long data)
1056{
1057        struct smi_info   *smi_info = (struct smi_info *) data;
1058        enum si_sm_result smi_result;
1059        unsigned long     flags;
1060        unsigned long     jiffies_now;
1061        long              time_diff;
1062        long              timeout;
1063#ifdef DEBUG_TIMING
1064        struct timeval    t;
1065#endif
1066
1067        spin_lock_irqsave(&(smi_info->si_lock), flags);
1068#ifdef DEBUG_TIMING
1069        do_gettimeofday(&t);
1070        printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1071#endif
1072        jiffies_now = jiffies;
1073        time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1074                     * SI_USEC_PER_JIFFY);
1075        smi_result = smi_event_handler(smi_info, time_diff);
1076
1077        spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1078
1079        smi_info->last_timeout_jiffies = jiffies_now;
1080
1081        if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1082                /* Running with interrupts, only do long timeouts. */
1083                timeout = jiffies + SI_TIMEOUT_JIFFIES;
1084                smi_inc_stat(smi_info, long_timeouts);
1085                goto do_mod_timer;
1086        }
1087
1088        /*
1089         * If the state machine asks for a short delay, then shorten
1090         * the timer timeout.
1091         */
1092        if (smi_result == SI_SM_CALL_WITH_DELAY) {
1093                smi_inc_stat(smi_info, short_timeouts);
1094                timeout = jiffies + 1;
1095        } else {
1096                smi_inc_stat(smi_info, long_timeouts);
1097                timeout = jiffies + SI_TIMEOUT_JIFFIES;
1098        }
1099
1100 do_mod_timer:
1101        if (smi_result != SI_SM_IDLE)
1102                mod_timer(&(smi_info->si_timer), timeout);
1103}
1104
1105static irqreturn_t si_irq_handler(int irq, void *data)
1106{
1107        struct smi_info *smi_info = data;
1108        unsigned long   flags;
1109#ifdef DEBUG_TIMING
1110        struct timeval  t;
1111#endif
1112
1113        spin_lock_irqsave(&(smi_info->si_lock), flags);
1114
1115        smi_inc_stat(smi_info, interrupts);
1116
1117#ifdef DEBUG_TIMING
1118        do_gettimeofday(&t);
1119        printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1120#endif
1121        smi_event_handler(smi_info, 0);
1122        spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1123        return IRQ_HANDLED;
1124}
1125
1126static irqreturn_t si_bt_irq_handler(int irq, void *data)
1127{
1128        struct smi_info *smi_info = data;
1129        /* We need to clear the IRQ flag for the BT interface. */
1130        smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
1131                             IPMI_BT_INTMASK_CLEAR_IRQ_BIT
1132                             | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1133        return si_irq_handler(irq, data);
1134}
1135
1136static int smi_start_processing(void       *send_info,
1137                                ipmi_smi_t intf)
1138{
1139        struct smi_info *new_smi = send_info;
1140        int             enable = 0;
1141
1142        new_smi->intf = intf;
1143
1144        /* Try to claim any interrupts. */
1145        if (new_smi->irq_setup)
1146                new_smi->irq_setup(new_smi);
1147
1148        /* Set up the timer that drives the interface. */
1149        setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1150        new_smi->last_timeout_jiffies = jiffies;
1151        mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
1152
1153        /*
1154         * Check if the user forcefully enabled the daemon.
1155         */
1156        if (new_smi->intf_num < num_force_kipmid)
1157                enable = force_kipmid[new_smi->intf_num];
1158        /*
1159         * The BT interface is efficient enough to not need a thread,
1160         * and there is no need for a thread if we have interrupts.
1161         */
1162        else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
1163                enable = 1;
1164
1165        if (enable) {
1166                new_smi->thread = kthread_run(ipmi_thread, new_smi,
1167                                              "kipmi%d", new_smi->intf_num);
1168                if (IS_ERR(new_smi->thread)) {
1169                        dev_notice(new_smi->dev, "Could not start"
1170                                   " kernel thread due to error %ld, only using"
1171                                   " timers to drive the interface\n",
1172                                   PTR_ERR(new_smi->thread));
1173                        new_smi->thread = NULL;
1174                }
1175        }
1176
1177        return 0;
1178}
1179
1180static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
1181{
1182        struct smi_info *smi = send_info;
1183
1184        data->addr_src = smi->addr_source;
1185        data->dev = smi->dev;
1186        data->addr_info = smi->addr_info;
1187        get_device(smi->dev);
1188
1189        return 0;
1190}
1191
1192static void set_maintenance_mode(void *send_info, int enable)
1193{
1194        struct smi_info   *smi_info = send_info;
1195
1196        if (!enable)
1197                atomic_set(&smi_info->req_events, 0);
1198}
1199
1200static struct ipmi_smi_handlers handlers = {
1201        .owner                  = THIS_MODULE,
1202        .start_processing       = smi_start_processing,
1203        .get_smi_info           = get_smi_info,
1204        .sender                 = sender,
1205        .request_events         = request_events,
1206        .set_maintenance_mode   = set_maintenance_mode,
1207        .set_run_to_completion  = set_run_to_completion,
1208        .poll                   = poll,
1209};
1210
1211/*
1212 * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1213 * a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS.
1214 */
1215
1216static LIST_HEAD(smi_infos);
1217static DEFINE_MUTEX(smi_infos_lock);
1218static int smi_num; /* Used to sequence the SMIs */
1219
1220#define DEFAULT_REGSPACING      1
1221#define DEFAULT_REGSIZE         1
1222
1223#ifdef CONFIG_ACPI
1224static bool          si_tryacpi = 1;
1225#endif
1226#ifdef CONFIG_DMI
1227static bool          si_trydmi = 1;
1228#endif
1229static bool          si_tryplatform = 1;
1230#ifdef CONFIG_PCI
1231static bool          si_trypci = 1;
1232#endif
1233static bool          si_trydefaults = 1;
1234static char          *si_type[SI_MAX_PARMS];
1235#define MAX_SI_TYPE_STR 30
1236static char          si_type_str[MAX_SI_TYPE_STR];
1237static unsigned long addrs[SI_MAX_PARMS];
1238static unsigned int num_addrs;
1239static unsigned int  ports[SI_MAX_PARMS];
1240static unsigned int num_ports;
1241static int           irqs[SI_MAX_PARMS];
1242static unsigned int num_irqs;
1243static int           regspacings[SI_MAX_PARMS];
1244static unsigned int num_regspacings;
1245static int           regsizes[SI_MAX_PARMS];
1246static unsigned int num_regsizes;
1247static int           regshifts[SI_MAX_PARMS];
1248static unsigned int num_regshifts;
1249static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
1250static unsigned int num_slave_addrs;
1251
1252#define IPMI_IO_ADDR_SPACE  0
1253#define IPMI_MEM_ADDR_SPACE 1
1254static char *addr_space_to_str[] = { "i/o", "mem" };
1255
1256static int hotmod_handler(const char *val, struct kernel_param *kp);
1257
1258module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1259MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
1260                 " Documentation/IPMI.txt in the kernel sources for the"
1261                 " gory details.");
1262
1263#ifdef CONFIG_ACPI
1264module_param_named(tryacpi, si_tryacpi, bool, 0);
1265MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
1266                 " default scan of the interfaces identified via ACPI");
1267#endif
1268#ifdef CONFIG_DMI
1269module_param_named(trydmi, si_trydmi, bool, 0);
1270MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
1271                 " default scan of the interfaces identified via DMI");
1272#endif
1273module_param_named(tryplatform, si_tryplatform, bool, 0);
1274MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
1275                 " default scan of the interfaces identified via platform"
1276                 " interfaces like openfirmware");
1277#ifdef CONFIG_PCI
1278module_param_named(trypci, si_trypci, bool, 0);
1279MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
1280                 " default scan of the interfaces identified via pci");
1281#endif
1282module_param_named(trydefaults, si_trydefaults, bool, 0);
1283MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1284                 " default scan of the KCS and SMIC interface at the standard"
1285                 " address");
1286module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1287MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1288                 " interface separated by commas.  The types are 'kcs',"
1289                 " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
1290                 " the first interface to kcs and the second to bt");
1291module_param_array(addrs, ulong, &num_addrs, 0);
1292MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1293                 " addresses separated by commas.  Only use if an interface"
1294                 " is in memory.  Otherwise, set it to zero or leave"
1295                 " it blank.");
1296module_param_array(ports, uint, &num_ports, 0);
1297MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1298                 " addresses separated by commas.  Only use if an interface"
1299                 " is a port.  Otherwise, set it to zero or leave"
1300                 " it blank.");
1301module_param_array(irqs, int, &num_irqs, 0);
1302MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1303                 " addresses separated by commas.  Only use if an interface"
1304                 " has an interrupt.  Otherwise, set it to zero or leave"
1305                 " it blank.");
1306module_param_array(regspacings, int, &num_regspacings, 0);
1307MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1308                 " and each successive register used by the interface.  For"
1309                 " instance, if the start address is 0xca2 and the spacing"
1310                 " is 2, then the second address is at 0xca4.  Defaults"
1311                 " to 1.");
1312module_param_array(regsizes, int, &num_regsizes, 0);
1313MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1314                 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1315                 " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1316                 " the 8-bit IPMI register has to be read from a larger"
1317                 " register.");
1318module_param_array(regshifts, int, &num_regshifts, 0);
1319MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1320                 " IPMI register, in bits.  For instance, if the data"
1321                 " is read from a 32-bit word and the IPMI data is in"
1322                 " bit 8-15, then the shift would be 8");
1323module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1324MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1325                 " the controller.  Normally this is 0x20, but can be"
1326                 " overridden by this parm.  This is an array indexed"
1327                 " by interface number.");
1328module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1329MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1330                 " disabled(0).  Normally the IPMI driver auto-detects"
1331                 " this, but the value may be overridden by this parm.");
1332module_param(unload_when_empty, int, 0);
1333MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1334                 " specified or found, default is 1.  Setting to 0"
1335                 " is useful for hot add of devices using hotmod.");
1336module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
1337MODULE_PARM_DESC(kipmid_max_busy_us,
1338                 "Max time (in microseconds) to busy-wait for IPMI data before"
1339                 " sleeping. 0 (default) means to wait forever. Set to 100-500"
1340                 " if kipmid is using up a lot of CPU time.");
1341
1342
1343static void std_irq_cleanup(struct smi_info *info)
1344{
1345        if (info->si_type == SI_BT)
1346                /* Disable the interrupt in the BT interface. */
1347                info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1348        free_irq(info->irq, info);
1349}
1350
1351static int std_irq_setup(struct smi_info *info)
1352{
1353        int rv;
1354
1355        if (!info->irq)
1356                return 0;
1357
1358        if (info->si_type == SI_BT) {
1359                rv = request_irq(info->irq,
1360                                 si_bt_irq_handler,
1361                                 IRQF_SHARED | IRQF_DISABLED,
1362                                 DEVICE_NAME,
1363                                 info);
1364                if (!rv)
1365                        /* Enable the interrupt in the BT interface. */
1366                        info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1367                                         IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1368        } else
1369                rv = request_irq(info->irq,
1370                                 si_irq_handler,
1371                                 IRQF_SHARED | IRQF_DISABLED,
1372                                 DEVICE_NAME,
1373                                 info);
1374        if (rv) {
1375                dev_warn(info->dev, "%s unable to claim interrupt %d,"
1376                         " running polled\n",
1377                         DEVICE_NAME, info->irq);
1378                info->irq = 0;
1379        } else {
1380                info->irq_cleanup = std_irq_cleanup;
1381                dev_info(info->dev, "Using irq %d\n", info->irq);
1382        }
1383
1384        return rv;
1385}
1386
1387static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1388{
1389        unsigned int addr = io->addr_data;
1390
1391        return inb(addr + (offset * io->regspacing));
1392}
1393
1394static void port_outb(struct si_sm_io *io, unsigned int offset,
1395                      unsigned char b)
1396{
1397        unsigned int addr = io->addr_data;
1398
1399        outb(b, addr + (offset * io->regspacing));
1400}
1401
1402static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1403{
1404        unsigned int addr = io->addr_data;
1405
1406        return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1407}
1408
1409static void port_outw(struct si_sm_io *io, unsigned int offset,
1410                      unsigned char b)
1411{
1412        unsigned int addr = io->addr_data;
1413
1414        outw(b << io->regshift, addr + (offset * io->regspacing));
1415}
1416
1417static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1418{
1419        unsigned int addr = io->addr_data;
1420
1421        return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1422}
1423
1424static void port_outl(struct si_sm_io *io, unsigned int offset,
1425                      unsigned char b)
1426{
1427        unsigned int addr = io->addr_data;
1428
1429        outl(b << io->regshift, addr+(offset * io->regspacing));
1430}
1431
1432static void port_cleanup(struct smi_info *info)
1433{
1434        unsigned int addr = info->io.addr_data;
1435        int          idx;
1436
1437        if (addr) {
1438                for (idx = 0; idx < info->io_size; idx++)
1439                        release_region(addr + idx * info->io.regspacing,
1440                                       info->io.regsize);
1441        }
1442}
1443
1444static int port_setup(struct smi_info *info)
1445{
1446        unsigned int addr = info->io.addr_data;
1447        int          idx;
1448
1449        if (!addr)
1450                return -ENODEV;
1451
1452        info->io_cleanup = port_cleanup;
1453
1454        /*
1455         * Figure out the actual inb/inw/inl/etc routine to use based
1456         * upon the register size.
1457         */
1458        switch (info->io.regsize) {
1459        case 1:
1460                info->io.inputb = port_inb;
1461                info->io.outputb = port_outb;
1462                break;
1463        case 2:
1464                info->io.inputb = port_inw;
1465                info->io.outputb = port_outw;
1466                break;
1467        case 4:
1468                info->io.inputb = port_inl;
1469                info->io.outputb = port_outl;
1470                break;
1471        default:
1472                dev_warn(info->dev, "Invalid register size: %d\n",
1473                         info->io.regsize);
1474                return -EINVAL;
1475        }
1476
1477        /*
1478         * Some BIOSes reserve disjoint I/O regions in their ACPI
1479         * tables.  This causes problems when trying to register the
1480         * entire I/O region.  Therefore we must register each I/O
1481         * port separately.
1482         */
1483        for (idx = 0; idx < info->io_size; idx++) {
1484                if (request_region(addr + idx * info->io.regspacing,
1485                                   info->io.regsize, DEVICE_NAME) == NULL) {
1486                        /* Undo allocations */
1487                        while (idx--) {
1488                                release_region(addr + idx * info->io.regspacing,
1489                                               info->io.regsize);
1490                        }
1491                        return -EIO;
1492                }
1493        }
1494        return 0;
1495}
1496
1497static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1498{
1499        return readb((io->addr)+(offset * io->regspacing));
1500}
1501
1502static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1503                     unsigned char b)
1504{
1505        writeb(b, (io->addr)+(offset * io->regspacing));
1506}
1507
1508static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1509{
1510        return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1511                & 0xff;
1512}
1513
1514static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1515                     unsigned char b)
1516{
1517        writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1518}
1519
1520static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1521{
1522        return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1523                & 0xff;
1524}
1525
1526static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1527                     unsigned char b)
1528{
1529        writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1530}
1531
1532#ifdef readq
1533static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1534{
1535        return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1536                & 0xff;
1537}
1538
1539static void mem_outq(struct si_sm_io *io, unsigned int offset,
1540                     unsigned char b)
1541{
1542        writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1543}
1544#endif
1545
1546static void mem_cleanup(struct smi_info *info)
1547{
1548        unsigned long addr = info->io.addr_data;
1549        int           mapsize;
1550
1551        if (info->io.addr) {
1552                iounmap(info->io.addr);
1553
1554                mapsize = ((info->io_size * info->io.regspacing)
1555                           - (info->io.regspacing - info->io.regsize));
1556
1557                release_mem_region(addr, mapsize);
1558        }
1559}
1560
1561static int mem_setup(struct smi_info *info)
1562{
1563        unsigned long addr = info->io.addr_data;
1564        int           mapsize;
1565
1566        if (!addr)
1567                return -ENODEV;
1568
1569        info->io_cleanup = mem_cleanup;
1570
1571        /*
1572         * Figure out the actual readb/readw/readl/etc routine to use based
1573         * upon the register size.
1574         */
1575        switch (info->io.regsize) {
1576        case 1:
1577                info->io.inputb = intf_mem_inb;
1578                info->io.outputb = intf_mem_outb;
1579                break;
1580        case 2:
1581                info->io.inputb = intf_mem_inw;
1582                info->io.outputb = intf_mem_outw;
1583                break;
1584        case 4:
1585                info->io.inputb = intf_mem_inl;
1586                info->io.outputb = intf_mem_outl;
1587                break;
1588#ifdef readq
1589        case 8:
1590                info->io.inputb = mem_inq;
1591                info->io.outputb = mem_outq;
1592                break;
1593#endif
1594        default:
1595                dev_warn(info->dev, "Invalid register size: %d\n",
1596                         info->io.regsize);
1597                return -EINVAL;
1598        }
1599
1600        /*
1601         * Calculate the total amount of memory to claim.  This is an
1602         * unusual looking calculation, but it avoids claiming any
1603         * more memory than it has to.  It will claim everything
1604         * between the first address to the end of the last full
1605         * register.
1606         */
1607        mapsize = ((info->io_size * info->io.regspacing)
1608                   - (info->io.regspacing - info->io.regsize));
1609
1610        if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1611                return -EIO;
1612
1613        info->io.addr = ioremap(addr, mapsize);
1614        if (info->io.addr == NULL) {
1615                release_mem_region(addr, mapsize);
1616                return -EIO;
1617        }
1618        return 0;
1619}
1620
1621/*
1622 * Parms come in as <op1>[:op2[:op3...]].  ops are:
1623 *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1624 * Options are:
1625 *   rsp=<regspacing>
1626 *   rsi=<regsize>
1627 *   rsh=<regshift>
1628 *   irq=<irq>
1629 *   ipmb=<ipmb addr>
1630 */
1631enum hotmod_op { HM_ADD, HM_REMOVE };
1632struct hotmod_vals {
1633        char *name;
1634        int  val;
1635};
1636static struct hotmod_vals hotmod_ops[] = {
1637        { "add",        HM_ADD },
1638        { "remove",     HM_REMOVE },
1639        { NULL }
1640};
1641static struct hotmod_vals hotmod_si[] = {
1642        { "kcs",        SI_KCS },
1643        { "smic",       SI_SMIC },
1644        { "bt",         SI_BT },
1645        { NULL }
1646};
1647static struct hotmod_vals hotmod_as[] = {
1648        { "mem",        IPMI_MEM_ADDR_SPACE },
1649        { "i/o",        IPMI_IO_ADDR_SPACE },
1650        { NULL }
1651};
1652
1653static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1654{
1655        char *s;
1656        int  i;
1657
1658        s = strchr(*curr, ',');
1659        if (!s) {
1660                printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1661                return -EINVAL;
1662        }
1663        *s = '\0';
1664        s++;
1665        for (i = 0; hotmod_ops[i].name; i++) {
1666                if (strcmp(*curr, v[i].name) == 0) {
1667                        *val = v[i].val;
1668                        *curr = s;
1669                        return 0;
1670                }
1671        }
1672
1673        printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1674        return -EINVAL;
1675}
1676
1677static int check_hotmod_int_op(const char *curr, const char *option,
1678                               const char *name, int *val)
1679{
1680        char *n;
1681
1682        if (strcmp(curr, name) == 0) {
1683                if (!option) {
1684                        printk(KERN_WARNING PFX
1685                               "No option given for '%s'\n",
1686                               curr);
1687                        return -EINVAL;
1688                }
1689                *val = simple_strtoul(option, &n, 0);
1690                if ((*n != '\0') || (*option == '\0')) {
1691                        printk(KERN_WARNING PFX
1692                               "Bad option given for '%s'\n",
1693                               curr);
1694                        return -EINVAL;
1695                }
1696                return 1;
1697        }
1698        return 0;
1699}
1700
1701static struct smi_info *smi_info_alloc(void)
1702{
1703        struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
1704
1705        if (info)
1706                spin_lock_init(&info->si_lock);
1707        return info;
1708}
1709
1710static int hotmod_handler(const char *val, struct kernel_param *kp)
1711{
1712        char *str = kstrdup(val, GFP_KERNEL);
1713        int  rv;
1714        char *next, *curr, *s, *n, *o;
1715        enum hotmod_op op;
1716        enum si_type si_type;
1717        int  addr_space;
1718        unsigned long addr;
1719        int regspacing;
1720        int regsize;
1721        int regshift;
1722        int irq;
1723        int ipmb;
1724        int ival;
1725        int len;
1726        struct smi_info *info;
1727
1728        if (!str)
1729                return -ENOMEM;
1730
1731        /* Kill any trailing spaces, as we can get a "\n" from echo. */
1732        len = strlen(str);
1733        ival = len - 1;
1734        while ((ival >= 0) && isspace(str[ival])) {
1735                str[ival] = '\0';
1736                ival--;
1737        }
1738
1739        for (curr = str; curr; curr = next) {
1740                regspacing = 1;
1741                regsize = 1;
1742                regshift = 0;
1743                irq = 0;
1744                ipmb = 0; /* Choose the default if not specified */
1745
1746                next = strchr(curr, ':');
1747                if (next) {
1748                        *next = '\0';
1749                        next++;
1750                }
1751
1752                rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1753                if (rv)
1754                        break;
1755                op = ival;
1756
1757                rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1758                if (rv)
1759                        break;
1760                si_type = ival;
1761
1762                rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1763                if (rv)
1764                        break;
1765
1766                s = strchr(curr, ',');
1767                if (s) {
1768                        *s = '\0';
1769                        s++;
1770                }
1771                addr = simple_strtoul(curr, &n, 0);
1772                if ((*n != '\0') || (*curr == '\0')) {
1773                        printk(KERN_WARNING PFX "Invalid hotmod address"
1774                               " '%s'\n", curr);
1775                        break;
1776                }
1777
1778                while (s) {
1779                        curr = s;
1780                        s = strchr(curr, ',');
1781                        if (s) {
1782                                *s = '\0';
1783                                s++;
1784                        }
1785                        o = strchr(curr, '=');
1786                        if (o) {
1787                                *o = '\0';
1788                                o++;
1789                        }
1790                        rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1791                        if (rv < 0)
1792                                goto out;
1793                        else if (rv)
1794                                continue;
1795                        rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1796                        if (rv < 0)
1797                                goto out;
1798                        else if (rv)
1799                                continue;
1800                        rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1801                        if (rv < 0)
1802                                goto out;
1803                        else if (rv)
1804                                continue;
1805                        rv = check_hotmod_int_op(curr, o, "irq", &irq);
1806                        if (rv < 0)
1807                                goto out;
1808                        else if (rv)
1809                                continue;
1810                        rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1811                        if (rv < 0)
1812                                goto out;
1813                        else if (rv)
1814                                continue;
1815
1816                        rv = -EINVAL;
1817                        printk(KERN_WARNING PFX
1818                               "Invalid hotmod option '%s'\n",
1819                               curr);
1820                        goto out;
1821                }
1822
1823                if (op == HM_ADD) {
1824                        info = smi_info_alloc();
1825                        if (!info) {
1826                                rv = -ENOMEM;
1827                                goto out;
1828                        }
1829
1830                        info->addr_source = SI_HOTMOD;
1831                        info->si_type = si_type;
1832                        info->io.addr_data = addr;
1833                        info->io.addr_type = addr_space;
1834                        if (addr_space == IPMI_MEM_ADDR_SPACE)
1835                                info->io_setup = mem_setup;
1836                        else
1837                                info->io_setup = port_setup;
1838
1839                        info->io.addr = NULL;
1840                        info->io.regspacing = regspacing;
1841                        if (!info->io.regspacing)
1842                                info->io.regspacing = DEFAULT_REGSPACING;
1843                        info->io.regsize = regsize;
1844                        if (!info->io.regsize)
1845                                info->io.regsize = DEFAULT_REGSPACING;
1846                        info->io.regshift = regshift;
1847                        info->irq = irq;
1848                        if (info->irq)
1849                                info->irq_setup = std_irq_setup;
1850                        info->slave_addr = ipmb;
1851
1852                        if (!add_smi(info)) {
1853                                if (try_smi_init(info))
1854                                        cleanup_one_si(info);
1855                        } else {
1856                                kfree(info);
1857                        }
1858                } else {
1859                        /* remove */
1860                        struct smi_info *e, *tmp_e;
1861
1862                        mutex_lock(&smi_infos_lock);
1863                        list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1864                                if (e->io.addr_type != addr_space)
1865                                        continue;
1866                                if (e->si_type != si_type)
1867                                        continue;
1868                                if (e->io.addr_data == addr)
1869                                        cleanup_one_si(e);
1870                        }
1871                        mutex_unlock(&smi_infos_lock);
1872                }
1873        }
1874        rv = len;
1875 out:
1876        kfree(str);
1877        return rv;
1878}
1879
1880static int hardcode_find_bmc(void)
1881{
1882        int ret = -ENODEV;
1883        int             i;
1884        struct smi_info *info;
1885
1886        for (i = 0; i < SI_MAX_PARMS; i++) {
1887                if (!ports[i] && !addrs[i])
1888                        continue;
1889
1890                info = smi_info_alloc();
1891                if (!info)
1892                        return -ENOMEM;
1893
1894                info->addr_source = SI_HARDCODED;
1895                printk(KERN_INFO PFX "probing via hardcoded address\n");
1896
1897                if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1898                        info->si_type = SI_KCS;
1899                } else if (strcmp(si_type[i], "smic") == 0) {
1900                        info->si_type = SI_SMIC;
1901                } else if (strcmp(si_type[i], "bt") == 0) {
1902                        info->si_type = SI_BT;
1903                } else {
1904                        printk(KERN_WARNING PFX "Interface type specified "
1905                               "for interface %d, was invalid: %s\n",
1906                               i, si_type[i]);
1907                        kfree(info);
1908                        continue;
1909                }
1910
1911                if (ports[i]) {
1912                        /* An I/O port */
1913                        info->io_setup = port_setup;
1914                        info->io.addr_data = ports[i];
1915                        info->io.addr_type = IPMI_IO_ADDR_SPACE;
1916                } else if (addrs[i]) {
1917                        /* A memory port */
1918                        info->io_setup = mem_setup;
1919                        info->io.addr_data = addrs[i];
1920                        info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1921                } else {
1922                        printk(KERN_WARNING PFX "Interface type specified "
1923                               "for interface %d, but port and address were "
1924                               "not set or set to zero.\n", i);
1925                        kfree(info);
1926                        continue;
1927                }
1928
1929                info->io.addr = NULL;
1930                info->io.regspacing = regspacings[i];
1931                if (!info->io.regspacing)
1932                        info->io.regspacing = DEFAULT_REGSPACING;
1933                info->io.regsize = regsizes[i];
1934                if (!info->io.regsize)
1935                        info->io.regsize = DEFAULT_REGSPACING;
1936                info->io.regshift = regshifts[i];
1937                info->irq = irqs[i];
1938                if (info->irq)
1939                        info->irq_setup = std_irq_setup;
1940                info->slave_addr = slave_addrs[i];
1941
1942                if (!add_smi(info)) {
1943                        if (try_smi_init(info))
1944                                cleanup_one_si(info);
1945                        ret = 0;
1946                } else {
1947                        kfree(info);
1948                }
1949        }
1950        return ret;
1951}
1952
1953#ifdef CONFIG_ACPI
1954
1955#include <linux/acpi.h>
1956
1957/*
1958 * Once we get an ACPI failure, we don't try any more, because we go
1959 * through the tables sequentially.  Once we don't find a table, there
1960 * are no more.
1961 */
1962static int acpi_failure;
1963
1964/* For GPE-type interrupts. */
1965static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
1966        u32 gpe_number, void *context)
1967{
1968        struct smi_info *smi_info = context;
1969        unsigned long   flags;
1970#ifdef DEBUG_TIMING
1971        struct timeval t;
1972#endif
1973
1974        spin_lock_irqsave(&(smi_info->si_lock), flags);
1975
1976        smi_inc_stat(smi_info, interrupts);
1977
1978#ifdef DEBUG_TIMING
1979        do_gettimeofday(&t);
1980        printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1981#endif
1982        smi_event_handler(smi_info, 0);
1983        spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1984
1985        return ACPI_INTERRUPT_HANDLED;
1986}
1987
1988static void acpi_gpe_irq_cleanup(struct smi_info *info)
1989{
1990        if (!info->irq)
1991                return;
1992
1993        acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1994}
1995
1996static int acpi_gpe_irq_setup(struct smi_info *info)
1997{
1998        acpi_status status;
1999
2000        if (!info->irq)
2001                return 0;
2002
2003        /* FIXME - is level triggered right? */
2004        status = acpi_install_gpe_handler(NULL,
2005                                          info->irq,
2006                                          ACPI_GPE_LEVEL_TRIGGERED,
2007                                          &ipmi_acpi_gpe,
2008                                          info);
2009        if (status != AE_OK) {
2010                dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
2011                         " running polled\n", DEVICE_NAME, info->irq);
2012                info->irq = 0;
2013                return -EINVAL;
2014        } else {
2015                info->irq_cleanup = acpi_gpe_irq_cleanup;
2016                dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
2017                return 0;
2018        }
2019}
2020
2021/*
2022 * Defined at
2023 * http://h21007.www2.hp.com/portal/download/files/unprot/hpspmi.pdf
2024 */
2025struct SPMITable {
2026        s8      Signature[4];
2027        u32     Length;
2028        u8      Revision;
2029        u8      Checksum;
2030        s8      OEMID[6];
2031        s8      OEMTableID[8];
2032        s8      OEMRevision[4];
2033        s8      CreatorID[4];
2034        s8      CreatorRevision[4];
2035        u8      InterfaceType;
2036        u8      IPMIlegacy;
2037        s16     SpecificationRevision;
2038
2039        /*
2040         * Bit 0 - SCI interrupt supported
2041         * Bit 1 - I/O APIC/SAPIC
2042         */
2043        u8      InterruptType;
2044
2045        /*
2046         * If bit 0 of InterruptType is set, then this is the SCI
2047         * interrupt in the GPEx_STS register.
2048         */
2049        u8      GPE;
2050
2051        s16     Reserved;
2052
2053        /*
2054         * If bit 1 of InterruptType is set, then this is the I/O
2055         * APIC/SAPIC interrupt.
2056         */
2057        u32     GlobalSystemInterrupt;
2058
2059        /* The actual register address. */
2060        struct acpi_generic_address addr;
2061
2062        u8      UID[4];
2063
2064        s8      spmi_id[1]; /* A '\0' terminated array starts here. */
2065};
2066
2067static int try_init_spmi(struct SPMITable *spmi)
2068{
2069        struct smi_info  *info;
2070
2071        if (spmi->IPMIlegacy != 1) {
2072                printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
2073                return -ENODEV;
2074        }
2075
2076        info = smi_info_alloc();
2077        if (!info) {
2078                printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
2079                return -ENOMEM;
2080        }
2081
2082        info->addr_source = SI_SPMI;
2083        printk(KERN_INFO PFX "probing via SPMI\n");
2084
2085        /* Figure out the interface type. */
2086        switch (spmi->InterfaceType) {
2087        case 1: /* KCS */
2088                info->si_type = SI_KCS;
2089                break;
2090        case 2: /* SMIC */
2091                info->si_type = SI_SMIC;
2092                break;
2093        case 3: /* BT */
2094                info->si_type = SI_BT;
2095                break;
2096        default:
2097                printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
2098                       spmi->InterfaceType);
2099                kfree(info);
2100                return -EIO;
2101        }
2102
2103        if (spmi->InterruptType & 1) {
2104                /* We've got a GPE interrupt. */
2105                info->irq = spmi->GPE;
2106                info->irq_setup = acpi_gpe_irq_setup;
2107        } else if (spmi->InterruptType & 2) {
2108                /* We've got an APIC/SAPIC interrupt. */
2109                info->irq = spmi->GlobalSystemInterrupt;
2110                info->irq_setup = std_irq_setup;
2111        } else {
2112                /* Use the default interrupt setting. */
2113                info->irq = 0;
2114                info->irq_setup = NULL;
2115        }
2116
2117        if (spmi->addr.bit_width) {
2118                /* A (hopefully) properly formed register bit width. */
2119                info->io.regspacing = spmi->addr.bit_width / 8;
2120        } else {
2121                info->io.regspacing = DEFAULT_REGSPACING;
2122        }
2123        info->io.regsize = info->io.regspacing;
2124        info->io.regshift = spmi->addr.bit_offset;
2125
2126        if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
2127                info->io_setup = mem_setup;
2128                info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2129        } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
2130                info->io_setup = port_setup;
2131                info->io.addr_type = IPMI_IO_ADDR_SPACE;
2132        } else {
2133                kfree(info);
2134                printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
2135                return -EIO;
2136        }
2137        info->io.addr_data = spmi->addr.address;
2138
2139        pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n",
2140                 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
2141                 info->io.addr_data, info->io.regsize, info->io.regspacing,
2142                 info->irq);
2143
2144        if (add_smi(info))
2145                kfree(info);
2146
2147        return 0;
2148}
2149
2150static void spmi_find_bmc(void)
2151{
2152        acpi_status      status;
2153        struct SPMITable *spmi;
2154        int              i;
2155
2156        if (acpi_disabled)
2157                return;
2158
2159        if (acpi_failure)
2160                return;
2161
2162        for (i = 0; ; i++) {
2163                status = acpi_get_table(ACPI_SIG_SPMI, i+1,
2164                                        (struct acpi_table_header **)&spmi);
2165                if (status != AE_OK)
2166                        return;
2167
2168                try_init_spmi(spmi);
2169        }
2170}
2171
2172static int ipmi_pnp_probe(struct pnp_dev *dev,
2173                                    const struct pnp_device_id *dev_id)
2174{
2175        struct acpi_device *acpi_dev;
2176        struct smi_info *info;
2177        struct resource *res, *res_second;
2178        acpi_handle handle;
2179        acpi_status status;
2180        unsigned long long tmp;
2181
2182        acpi_dev = pnp_acpi_device(dev);
2183        if (!acpi_dev)
2184                return -ENODEV;
2185
2186        info = smi_info_alloc();
2187        if (!info)
2188                return -ENOMEM;
2189
2190        info->addr_source = SI_ACPI;
2191        printk(KERN_INFO PFX "probing via ACPI\n");
2192
2193        handle = acpi_dev->handle;
2194        info->addr_info.acpi_info.acpi_handle = handle;
2195
2196        /* _IFT tells us the interface type: KCS, BT, etc */
2197        status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
2198        if (ACPI_FAILURE(status))
2199                goto err_free;
2200
2201        switch (tmp) {
2202        case 1:
2203                info->si_type = SI_KCS;
2204                break;
2205        case 2:
2206                info->si_type = SI_SMIC;
2207                break;
2208        case 3:
2209                info->si_type = SI_BT;
2210                break;
2211        default:
2212                dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
2213                goto err_free;
2214        }
2215
2216        res = pnp_get_resource(dev, IORESOURCE_IO, 0);
2217        if (res) {
2218                info->io_setup = port_setup;
2219                info->io.addr_type = IPMI_IO_ADDR_SPACE;
2220        } else {
2221                res = pnp_get_resource(dev, IORESOURCE_MEM, 0);
2222                if (res) {
2223                        info->io_setup = mem_setup;
2224                        info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2225                }
2226        }
2227        if (!res) {
2228                dev_err(&dev->dev, "no I/O or memory address\n");
2229                goto err_free;
2230        }
2231        info->io.addr_data = res->start;
2232
2233        info->io.regspacing = DEFAULT_REGSPACING;
2234        res_second = pnp_get_resource(dev,
2235                               (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
2236                                        IORESOURCE_IO : IORESOURCE_MEM,
2237                               1);
2238        if (res_second) {
2239                if (res_second->start > info->io.addr_data)
2240                        info->io.regspacing = res_second->start - info->io.addr_data;
2241        }
2242        info->io.regsize = DEFAULT_REGSPACING;
2243        info->io.regshift = 0;
2244
2245        /* If _GPE exists, use it; otherwise use standard interrupts */
2246        status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
2247        if (ACPI_SUCCESS(status)) {
2248                info->irq = tmp;
2249                info->irq_setup = acpi_gpe_irq_setup;
2250        } else if (pnp_irq_valid(dev, 0)) {
2251                info->irq = pnp_irq(dev, 0);
2252                info->irq_setup = std_irq_setup;
2253        }
2254
2255        info->dev = &dev->dev;
2256        pnp_set_drvdata(dev, info);
2257
2258        dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
2259                 res, info->io.regsize, info->io.regspacing,
2260                 info->irq);
2261
2262        if (add_smi(info))
2263                goto err_free;
2264
2265        return 0;
2266
2267err_free:
2268        kfree(info);
2269        return -EINVAL;
2270}
2271
2272static void ipmi_pnp_remove(struct pnp_dev *dev)
2273{
2274        struct smi_info *info = pnp_get_drvdata(dev);
2275
2276        cleanup_one_si(info);
2277}
2278
2279static const struct pnp_device_id pnp_dev_table[] = {
2280        {"IPI0001", 0},
2281        {"", 0},
2282};
2283
2284static struct pnp_driver ipmi_pnp_driver = {
2285        .name           = DEVICE_NAME,
2286        .probe          = ipmi_pnp_probe,
2287        .remove         = ipmi_pnp_remove,
2288        .id_table       = pnp_dev_table,
2289};
2290
2291MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
2292#endif
2293
2294#ifdef CONFIG_DMI
2295struct dmi_ipmi_data {
2296        u8              type;
2297        u8              addr_space;
2298        unsigned long   base_addr;
2299        u8              irq;
2300        u8              offset;
2301        u8              slave_addr;
2302};
2303
2304static int decode_dmi(const struct dmi_header *dm,
2305                                struct dmi_ipmi_data *dmi)
2306{
2307        const u8        *data = (const u8 *)dm;
2308        unsigned long   base_addr;
2309        u8              reg_spacing;
2310        u8              len = dm->length;
2311
2312        dmi->type = data[4];
2313
2314        memcpy(&base_addr, data+8, sizeof(unsigned long));
2315        if (len >= 0x11) {
2316                if (base_addr & 1) {
2317                        /* I/O */
2318                        base_addr &= 0xFFFE;
2319                        dmi->addr_space = IPMI_IO_ADDR_SPACE;
2320                } else
2321                        /* Memory */
2322                        dmi->addr_space = IPMI_MEM_ADDR_SPACE;
2323
2324                /* If bit 4 of byte 0x10 is set, then the lsb for the address
2325                   is odd. */
2326                dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
2327
2328                dmi->irq = data[0x11];
2329
2330                /* The top two bits of byte 0x10 hold the register spacing. */
2331                reg_spacing = (data[0x10] & 0xC0) >> 6;
2332                switch (reg_spacing) {
2333                case 0x00: /* Byte boundaries */
2334                    dmi->offset = 1;
2335                    break;
2336                case 0x01: /* 32-bit boundaries */
2337                    dmi->offset = 4;
2338                    break;
2339                case 0x02: /* 16-byte boundaries */
2340                    dmi->offset = 16;
2341                    break;
2342                default:
2343                    /* Some other interface, just ignore it. */
2344                    return -EIO;
2345                }
2346        } else {
2347                /* Old DMI spec. */
2348                /*
2349                 * Note that technically, the lower bit of the base
2350                 * address should be 1 if the address is I/O and 0 if
2351                 * the address is in memory.  So many systems get that
2352                 * wrong (and all that I have seen are I/O) so we just
2353                 * ignore that bit and assume I/O.  Systems that use
2354                 * memory should use the newer spec, anyway.
2355                 */
2356                dmi->base_addr = base_addr & 0xfffe;
2357                dmi->addr_space = IPMI_IO_ADDR_SPACE;
2358                dmi->offset = 1;
2359        }
2360
2361        dmi->slave_addr = data[6];
2362
2363        return 0;
2364}
2365
2366static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2367{
2368        struct smi_info *info;
2369
2370        info = smi_info_alloc();
2371        if (!info) {
2372                printk(KERN_ERR PFX "Could not allocate SI data\n");
2373                return;
2374        }
2375
2376        info->addr_source = SI_SMBIOS;
2377        printk(KERN_INFO PFX "probing via SMBIOS\n");
2378
2379        switch (ipmi_data->type) {
2380        case 0x01: /* KCS */
2381                info->si_type = SI_KCS;
2382                break;
2383        case 0x02: /* SMIC */
2384                info->si_type = SI_SMIC;
2385                break;
2386        case 0x03: /* BT */
2387                info->si_type = SI_BT;
2388                break;
2389        default:
2390                kfree(info);
2391                return;
2392        }
2393
2394        switch (ipmi_data->addr_space) {
2395        case IPMI_MEM_ADDR_SPACE:
2396                info->io_setup = mem_setup;
2397                info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2398                break;
2399
2400        case IPMI_IO_ADDR_SPACE:
2401                info->io_setup = port_setup;
2402                info->io.addr_type = IPMI_IO_ADDR_SPACE;
2403                break;
2404
2405        default:
2406                kfree(info);
2407                printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
2408                       ipmi_data->addr_space);
2409                return;
2410        }
2411        info->io.addr_data = ipmi_data->base_addr;
2412
2413        info->io.regspacing = ipmi_data->offset;
2414        if (!info->io.regspacing)
2415                info->io.regspacing = DEFAULT_REGSPACING;
2416        info->io.regsize = DEFAULT_REGSPACING;
2417        info->io.regshift = 0;
2418
2419        info->slave_addr = ipmi_data->slave_addr;
2420
2421        info->irq = ipmi_data->irq;
2422        if (info->irq)
2423                info->irq_setup = std_irq_setup;
2424
2425        pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
2426                 (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
2427                 info->io.addr_data, info->io.regsize, info->io.regspacing,
2428                 info->irq);
2429
2430        if (add_smi(info))
2431                kfree(info);
2432}
2433
2434static void dmi_find_bmc(void)
2435{
2436        const struct dmi_device *dev = NULL;
2437        struct dmi_ipmi_data data;
2438        int                  rv;
2439
2440        while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2441                memset(&data, 0, sizeof(data));
2442                rv = decode_dmi((const struct dmi_header *) dev->device_data,
2443                                &data);
2444                if (!rv)
2445                        try_init_dmi(&data);
2446        }
2447}
2448#endif /* CONFIG_DMI */
2449
2450#ifdef CONFIG_PCI
2451
2452#define PCI_ERMC_CLASSCODE              0x0C0700
2453#define PCI_ERMC_CLASSCODE_MASK         0xffffff00
2454#define PCI_ERMC_CLASSCODE_TYPE_MASK    0xff
2455#define PCI_ERMC_CLASSCODE_TYPE_SMIC    0x00
2456#define PCI_ERMC_CLASSCODE_TYPE_KCS     0x01
2457#define PCI_ERMC_CLASSCODE_TYPE_BT      0x02
2458
2459#define PCI_HP_VENDOR_ID    0x103C
2460#define PCI_MMC_DEVICE_ID   0x121A
2461#define PCI_MMC_ADDR_CW     0x10
2462
2463static void ipmi_pci_cleanup(struct smi_info *info)
2464{
2465        struct pci_dev *pdev = info->addr_source_data;
2466
2467        pci_disable_device(pdev);
2468}
2469
2470static int ipmi_pci_probe_regspacing(struct smi_info *info)
2471{
2472        if (info->si_type == SI_KCS) {
2473                unsigned char   status;
2474                int             regspacing;
2475
2476                info->io.regsize = DEFAULT_REGSIZE;
2477                info->io.regshift = 0;
2478                info->io_size = 2;
2479                info->handlers = &kcs_smi_handlers;
2480
2481                /* detect 1, 4, 16byte spacing */
2482                for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
2483                        info->io.regspacing = regspacing;
2484                        if (info->io_setup(info)) {
2485                                dev_err(info->dev,
2486                                        "Could not setup I/O space\n");
2487                                return DEFAULT_REGSPACING;
2488                        }
2489                        /* write invalid cmd */
2490                        info->io.outputb(&info->io, 1, 0x10);
2491                        /* read status back */
2492                        status = info->io.inputb(&info->io, 1);
2493                        info->io_cleanup(info);
2494                        if (status)
2495                                return regspacing;
2496                        regspacing *= 4;
2497                }
2498        }
2499        return DEFAULT_REGSPACING;
2500}
2501
2502static int ipmi_pci_probe(struct pci_dev *pdev,
2503                                    const struct pci_device_id *ent)
2504{
2505        int rv;
2506        int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2507        struct smi_info *info;
2508
2509        info = smi_info_alloc();
2510        if (!info)
2511                return -ENOMEM;
2512
2513        info->addr_source = SI_PCI;
2514        dev_info(&pdev->dev, "probing via PCI");
2515
2516        switch (class_type) {
2517        case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2518                info->si_type = SI_SMIC;
2519                break;
2520
2521        case PCI_ERMC_CLASSCODE_TYPE_KCS:
2522                info->si_type = SI_KCS;
2523                break;
2524
2525        case PCI_ERMC_CLASSCODE_TYPE_BT:
2526                info->si_type = SI_BT;
2527                break;
2528
2529        default:
2530                kfree(info);
2531                dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
2532                return -ENOMEM;
2533        }
2534
2535        rv = pci_enable_device(pdev);
2536        if (rv) {
2537                dev_err(&pdev->dev, "couldn't enable PCI device\n");
2538                kfree(info);
2539                return rv;
2540        }
2541
2542        info->addr_source_cleanup = ipmi_pci_cleanup;
2543        info->addr_source_data = pdev;
2544
2545        if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2546                info->io_setup = port_setup;
2547                info->io.addr_type = IPMI_IO_ADDR_SPACE;
2548        } else {
2549                info->io_setup = mem_setup;
2550                info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2551        }
2552        info->io.addr_data = pci_resource_start(pdev, 0);
2553
2554        info->io.regspacing = ipmi_pci_probe_regspacing(info);
2555        info->io.regsize = DEFAULT_REGSIZE;
2556        info->io.regshift = 0;
2557
2558        info->irq = pdev->irq;
2559        if (info->irq)
2560                info->irq_setup = std_irq_setup;
2561
2562        info->dev = &pdev->dev;
2563        pci_set_drvdata(pdev, info);
2564
2565        dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
2566                &pdev->resource[0], info->io.regsize, info->io.regspacing,
2567                info->irq);
2568
2569        if (add_smi(info))
2570                kfree(info);
2571
2572        return 0;
2573}
2574
2575static void ipmi_pci_remove(struct pci_dev *pdev)
2576{
2577        struct smi_info *info = pci_get_drvdata(pdev);
2578        cleanup_one_si(info);
2579}
2580
2581static struct pci_device_id ipmi_pci_devices[] = {
2582        { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2583        { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
2584        { 0, }
2585};
2586MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2587
2588static struct pci_driver ipmi_pci_driver = {
2589        .name =         DEVICE_NAME,
2590        .id_table =     ipmi_pci_devices,
2591        .probe =        ipmi_pci_probe,
2592        .remove =       ipmi_pci_remove,
2593};
2594#endif /* CONFIG_PCI */
2595
2596static struct of_device_id ipmi_match[];
2597static int ipmi_probe(struct platform_device *dev)
2598{
2599#ifdef CONFIG_OF
2600        const struct of_device_id *match;
2601        struct smi_info *info;
2602        struct resource resource;
2603        const __be32 *regsize, *regspacing, *regshift;
2604        struct device_node *np = dev->dev.of_node;
2605        int ret;
2606        int proplen;
2607
2608        dev_info(&dev->dev, "probing via device tree\n");
2609
2610        match = of_match_device(ipmi_match, &dev->dev);
2611        if (!match)
2612                return -EINVAL;
2613
2614        ret = of_address_to_resource(np, 0, &resource);
2615        if (ret) {
2616                dev_warn(&dev->dev, PFX "invalid address from OF\n");
2617                return ret;
2618        }
2619
2620        regsize = of_get_property(np, "reg-size", &proplen);
2621        if (regsize && proplen != 4) {
2622                dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
2623                return -EINVAL;
2624        }
2625
2626        regspacing = of_get_property(np, "reg-spacing", &proplen);
2627        if (regspacing && proplen != 4) {
2628                dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
2629                return -EINVAL;
2630        }
2631
2632        regshift = of_get_property(np, "reg-shift", &proplen);
2633        if (regshift && proplen != 4) {
2634                dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
2635                return -EINVAL;
2636        }
2637
2638        info = smi_info_alloc();
2639
2640        if (!info) {
2641                dev_err(&dev->dev,
2642                        "could not allocate memory for OF probe\n");
2643                return -ENOMEM;
2644        }
2645
2646        info->si_type           = (enum si_type) match->data;
2647        info->addr_source       = SI_DEVICETREE;
2648        info->irq_setup         = std_irq_setup;
2649
2650        if (resource.flags & IORESOURCE_IO) {
2651                info->io_setup          = port_setup;
2652                info->io.addr_type      = IPMI_IO_ADDR_SPACE;
2653        } else {
2654                info->io_setup          = mem_setup;
2655                info->io.addr_type      = IPMI_MEM_ADDR_SPACE;
2656        }
2657
2658        info->io.addr_data      = resource.start;
2659
2660        info->io.regsize        = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE;
2661        info->io.regspacing     = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING;
2662        info->io.regshift       = regshift ? be32_to_cpup(regshift) : 0;
2663
2664        info->irq               = irq_of_parse_and_map(dev->dev.of_node, 0);
2665        info->dev               = &dev->dev;
2666
2667        dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
2668                info->io.addr_data, info->io.regsize, info->io.regspacing,
2669                info->irq);
2670
2671        dev_set_drvdata(&dev->dev, info);
2672
2673        if (add_smi(info)) {
2674                kfree(info);
2675                return -EBUSY;
2676        }
2677#endif
2678        return 0;
2679}
2680
2681static int ipmi_remove(struct platform_device *dev)
2682{
2683#ifdef CONFIG_OF
2684        cleanup_one_si(dev_get_drvdata(&dev->dev));
2685#endif
2686        return 0;
2687}
2688
2689static struct of_device_id ipmi_match[] =
2690{
2691        { .type = "ipmi", .compatible = "ipmi-kcs",
2692          .data = (void *)(unsigned long) SI_KCS },
2693        { .type = "ipmi", .compatible = "ipmi-smic",
2694          .data = (void *)(unsigned long) SI_SMIC },
2695        { .type = "ipmi", .compatible = "ipmi-bt",
2696          .data = (void *)(unsigned long) SI_BT },
2697        {},
2698};
2699
2700static struct platform_driver ipmi_driver = {
2701        .driver = {
2702                .name = DEVICE_NAME,
2703                .owner = THIS_MODULE,
2704                .of_match_table = ipmi_match,
2705        },
2706        .probe          = ipmi_probe,
2707        .remove         = ipmi_remove,
2708};
2709
2710#ifdef CONFIG_PARISC
2711static int ipmi_parisc_probe(struct parisc_device *dev)
2712{
2713        struct smi_info *info;
2714
2715        info = smi_info_alloc();
2716
2717        if (!info) {
2718                dev_err(&dev->dev,
2719                        "could not allocate memory for PARISC probe\n");
2720                return -ENOMEM;
2721        }
2722
2723        info->si_type           = SI_KCS;
2724        info->addr_source       = SI_DEVICETREE;
2725        info->io_setup          = mem_setup;
2726        info->io.addr_type      = IPMI_MEM_ADDR_SPACE;
2727        info->io.addr_data      = dev->hpa.start;
2728        info->io.regsize        = 1;
2729        info->io.regspacing     = 1;
2730        info->io.regshift       = 0;
2731        info->irq               = 0; /* no interrupt */
2732        info->irq_setup         = NULL;
2733        info->dev               = &dev->dev;
2734
2735        dev_dbg(&dev->dev, "addr 0x%lx\n", info->io.addr_data);
2736
2737        dev_set_drvdata(&dev->dev, info);
2738
2739        if (add_smi(info)) {
2740                kfree(info);
2741                return -EBUSY;
2742        }
2743
2744        return 0;
2745}
2746
2747static int ipmi_parisc_remove(struct parisc_device *dev)
2748{
2749        cleanup_one_si(dev_get_drvdata(&dev->dev));
2750        return 0;
2751}
2752
2753static struct parisc_device_id ipmi_parisc_tbl[] = {
2754        { HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 },
2755        { 0, }
2756};
2757
2758static struct parisc_driver ipmi_parisc_driver = {
2759        .name =         "ipmi",
2760        .id_table =     ipmi_parisc_tbl,
2761        .probe =        ipmi_parisc_probe,
2762        .remove =       ipmi_parisc_remove,
2763};
2764#endif /* CONFIG_PARISC */
2765
2766static int wait_for_msg_done(struct smi_info *smi_info)
2767{
2768        enum si_sm_result     smi_result;
2769
2770        smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2771        for (;;) {
2772                if (smi_result == SI_SM_CALL_WITH_DELAY ||
2773                    smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2774                        schedule_timeout_uninterruptible(1);
2775                        smi_result = smi_info->handlers->event(
2776                                smi_info->si_sm, 100);
2777                } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
2778                        smi_result = smi_info->handlers->event(
2779                                smi_info->si_sm, 0);
2780                } else
2781                        break;
2782        }
2783        if (smi_result == SI_SM_HOSED)
2784                /*
2785                 * We couldn't get the state machine to run, so whatever's at
2786                 * the port is probably not an IPMI SMI interface.
2787                 */
2788                return -ENODEV;
2789
2790        return 0;
2791}
2792
2793static int try_get_dev_id(struct smi_info *smi_info)
2794{
2795        unsigned char         msg[2];
2796        unsigned char         *resp;
2797        unsigned long         resp_len;
2798        int                   rv = 0;
2799
2800        resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2801        if (!resp)
2802                return -ENOMEM;
2803
2804        /*
2805         * Do a Get Device ID command, since it comes back with some
2806         * useful info.
2807         */
2808        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2809        msg[1] = IPMI_GET_DEVICE_ID_CMD;
2810        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2811
2812        rv = wait_for_msg_done(smi_info);
2813        if (rv)
2814                goto out;
2815
2816        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2817                                                  resp, IPMI_MAX_MSG_LENGTH);
2818
2819        /* Check and record info from the get device id, in case we need it. */
2820        rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id);
2821
2822 out:
2823        kfree(resp);
2824        return rv;
2825}
2826
2827static int try_enable_event_buffer(struct smi_info *smi_info)
2828{
2829        unsigned char         msg[3];
2830        unsigned char         *resp;
2831        unsigned long         resp_len;
2832        int                   rv = 0;
2833
2834        resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2835        if (!resp)
2836                return -ENOMEM;
2837
2838        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2839        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
2840        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2841
2842        rv = wait_for_msg_done(smi_info);
2843        if (rv) {
2844                printk(KERN_WARNING PFX "Error getting response from get"
2845                       " global enables command, the event buffer is not"
2846                       " enabled.\n");
2847                goto out;
2848        }
2849
2850        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2851                                                  resp, IPMI_MAX_MSG_LENGTH);
2852
2853        if (resp_len < 4 ||
2854                        resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
2855                        resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
2856                        resp[2] != 0) {
2857                printk(KERN_WARNING PFX "Invalid return from get global"
2858                       " enables command, cannot enable the event buffer.\n");
2859                rv = -EINVAL;
2860                goto out;
2861        }
2862
2863        if (resp[3] & IPMI_BMC_EVT_MSG_BUFF)
2864                /* buffer is already enabled, nothing to do. */
2865                goto out;
2866
2867        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2868        msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
2869        msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
2870        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
2871
2872        rv = wait_for_msg_done(smi_info);
2873        if (rv) {
2874                printk(KERN_WARNING PFX "Error getting response from set"
2875                       " global, enables command, the event buffer is not"
2876                       " enabled.\n");
2877                goto out;
2878        }
2879
2880        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2881                                                  resp, IPMI_MAX_MSG_LENGTH);
2882
2883        if (resp_len < 3 ||
2884                        resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
2885                        resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
2886                printk(KERN_WARNING PFX "Invalid return from get global,"
2887                       "enables command, not enable the event buffer.\n");
2888                rv = -EINVAL;
2889                goto out;
2890        }
2891
2892        if (resp[2] != 0)
2893                /*
2894                 * An error when setting the event buffer bit means
2895                 * that the event buffer is not supported.
2896                 */
2897                rv = -ENOENT;
2898 out:
2899        kfree(resp);
2900        return rv;
2901}
2902
2903static int smi_type_proc_show(struct seq_file *m, void *v)
2904{
2905        struct smi_info *smi = m->private;
2906
2907        return seq_printf(m, "%s\n", si_to_str[smi->si_type]);
2908}
2909
2910static int smi_type_proc_open(struct inode *inode, struct file *file)
2911{
2912        return single_open(file, smi_type_proc_show, PDE_DATA(inode));
2913}
2914
2915static const struct file_operations smi_type_proc_ops = {
2916        .open           = smi_type_proc_open,
2917        .read           = seq_read,
2918        .llseek         = seq_lseek,
2919        .release        = single_release,
2920};
2921
2922static int smi_si_stats_proc_show(struct seq_file *m, void *v)
2923{
2924        struct smi_info *smi = m->private;
2925
2926        seq_printf(m, "interrupts_enabled:    %d\n",
2927                       smi->irq && !smi->interrupt_disabled);
2928        seq_printf(m, "short_timeouts:        %u\n",
2929                       smi_get_stat(smi, short_timeouts));
2930        seq_printf(m, "long_timeouts:         %u\n",
2931                       smi_get_stat(smi, long_timeouts));
2932        seq_printf(m, "idles:                 %u\n",
2933                       smi_get_stat(smi, idles));
2934        seq_printf(m, "interrupts:            %u\n",
2935                       smi_get_stat(smi, interrupts));
2936        seq_printf(m, "attentions:            %u\n",
2937                       smi_get_stat(smi, attentions));
2938        seq_printf(m, "flag_fetches:          %u\n",
2939                       smi_get_stat(smi, flag_fetches));
2940        seq_printf(m, "hosed_count:           %u\n",
2941                       smi_get_stat(smi, hosed_count));
2942        seq_printf(m, "complete_transactions: %u\n",
2943                       smi_get_stat(smi, complete_transactions));
2944        seq_printf(m, "events:                %u\n",
2945                       smi_get_stat(smi, events));
2946        seq_printf(m, "watchdog_pretimeouts:  %u\n",
2947                       smi_get_stat(smi, watchdog_pretimeouts));
2948        seq_printf(m, "incoming_messages:     %u\n",
2949                       smi_get_stat(smi, incoming_messages));
2950        return 0;
2951}
2952
2953static int smi_si_stats_proc_open(struct inode *inode, struct file *file)
2954{
2955        return single_open(file, smi_si_stats_proc_show, PDE_DATA(inode));
2956}
2957
2958static const struct file_operations smi_si_stats_proc_ops = {
2959        .open           = smi_si_stats_proc_open,
2960        .read           = seq_read,
2961        .llseek         = seq_lseek,
2962        .release        = single_release,
2963};
2964
2965static int smi_params_proc_show(struct seq_file *m, void *v)
2966{
2967        struct smi_info *smi = m->private;
2968
2969        return seq_printf(m,
2970                       "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2971                       si_to_str[smi->si_type],
2972                       addr_space_to_str[smi->io.addr_type],
2973                       smi->io.addr_data,
2974                       smi->io.regspacing,
2975                       smi->io.regsize,
2976                       smi->io.regshift,
2977                       smi->irq,
2978                       smi->slave_addr);
2979}
2980
2981static int smi_params_proc_open(struct inode *inode, struct file *file)
2982{
2983        return single_open(file, smi_params_proc_show, PDE_DATA(inode));
2984}
2985
2986static const struct file_operations smi_params_proc_ops = {
2987        .open           = smi_params_proc_open,
2988        .read           = seq_read,
2989        .llseek         = seq_lseek,
2990        .release        = single_release,
2991};
2992
2993/*
2994 * oem_data_avail_to_receive_msg_avail
2995 * @info - smi_info structure with msg_flags set
2996 *
2997 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2998 * Returns 1 indicating need to re-run handle_flags().
2999 */
3000static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
3001{
3002        smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
3003                               RECEIVE_MSG_AVAIL);
3004        return 1;
3005}
3006
3007/*
3008 * setup_dell_poweredge_oem_data_handler
3009 * @info - smi_info.device_id must be populated
3010 *
3011 * Systems that match, but have firmware version < 1.40 may assert
3012 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
3013 * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
3014 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
3015 * as RECEIVE_MSG_AVAIL instead.
3016 *
3017 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
3018 * assert the OEM[012] bits, and if it did, the driver would have to
3019 * change to handle that properly, we don't actually check for the
3020 * firmware version.
3021 * Device ID = 0x20                BMC on PowerEdge 8G servers
3022 * Device Revision = 0x80
3023 * Firmware Revision1 = 0x01       BMC version 1.40
3024 * Firmware Revision2 = 0x40       BCD encoded
3025 * IPMI Version = 0x51             IPMI 1.5
3026 * Manufacturer ID = A2 02 00      Dell IANA
3027 *
3028 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
3029 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
3030 *
3031 */
3032#define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
3033#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
3034#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
3035#define DELL_IANA_MFR_ID 0x0002a2
3036static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
3037{
3038        struct ipmi_device_id *id = &smi_info->device_id;
3039        if (id->manufacturer_id == DELL_IANA_MFR_ID) {
3040                if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
3041                    id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
3042                    id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
3043                        smi_info->oem_data_avail_handler =
3044                                oem_data_avail_to_receive_msg_avail;
3045                } else if (ipmi_version_major(id) < 1 ||
3046                           (ipmi_version_major(id) == 1 &&
3047                            ipmi_version_minor(id) < 5)) {
3048                        smi_info->oem_data_avail_handler =
3049                                oem_data_avail_to_receive_msg_avail;
3050                }
3051        }
3052}
3053
3054#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
3055static void return_hosed_msg_badsize(struct smi_info *smi_info)
3056{
3057        struct ipmi_smi_msg *msg = smi_info->curr_msg;
3058
3059        /* Make it a response */
3060        msg->rsp[0] = msg->data[0] | 4;
3061        msg->rsp[1] = msg->data[1];
3062        msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
3063        msg->rsp_size = 3;
3064        smi_info->curr_msg = NULL;
3065        deliver_recv_msg(smi_info, msg);
3066}
3067
3068/*
3069 * dell_poweredge_bt_xaction_handler
3070 * @info - smi_info.device_id must be populated
3071 *
3072 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
3073 * not respond to a Get SDR command if the length of the data
3074 * requested is exactly 0x3A, which leads to command timeouts and no
3075 * data returned.  This intercepts such commands, and causes userspace
3076 * callers to try again with a different-sized buffer, which succeeds.
3077 */
3078
3079#define STORAGE_NETFN 0x0A
3080#define STORAGE_CMD_GET_SDR 0x23
3081static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
3082                                             unsigned long unused,
3083                                             void *in)
3084{
3085        struct smi_info *smi_info = in;
3086        unsigned char *data = smi_info->curr_msg->data;
3087        unsigned int size   = smi_info->curr_msg->data_size;
3088        if (size >= 8 &&
3089            (data[0]>>2) == STORAGE_NETFN &&
3090            data[1] == STORAGE_CMD_GET_SDR &&
3091            data[7] == 0x3A) {
3092                return_hosed_msg_badsize(smi_info);
3093                return NOTIFY_STOP;
3094        }
3095        return NOTIFY_DONE;
3096}
3097
3098static struct notifier_block dell_poweredge_bt_xaction_notifier = {
3099        .notifier_call  = dell_poweredge_bt_xaction_handler,
3100};
3101
3102/*
3103 * setup_dell_poweredge_bt_xaction_handler
3104 * @info - smi_info.device_id must be filled in already
3105 *
3106 * Fills in smi_info.device_id.start_transaction_pre_hook
3107 * when we know what function to use there.
3108 */
3109static void
3110setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
3111{
3112        struct ipmi_device_id *id = &smi_info->device_id;
3113        if (id->manufacturer_id == DELL_IANA_MFR_ID &&
3114            smi_info->si_type == SI_BT)
3115                register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
3116}
3117
3118/*
3119 * setup_oem_data_handler
3120 * @info - smi_info.device_id must be filled in already
3121 *
3122 * Fills in smi_info.device_id.oem_data_available_handler
3123 * when we know what function to use there.
3124 */
3125
3126static void setup_oem_data_handler(struct smi_info *smi_info)
3127{
3128        setup_dell_poweredge_oem_data_handler(smi_info);
3129}
3130
3131static void setup_xaction_handlers(struct smi_info *smi_info)
3132{
3133        setup_dell_poweredge_bt_xaction_handler(smi_info);
3134}
3135
3136static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
3137{
3138        if (smi_info->intf) {
3139                /*
3140                 * The timer and thread are only running if the
3141                 * interface has been started up and registered.
3142                 */
3143                if (smi_info->thread != NULL)
3144                        kthread_stop(smi_info->thread);
3145                del_timer_sync(&smi_info->si_timer);
3146        }
3147}
3148
3149static struct ipmi_default_vals
3150{
3151        int type;
3152        int port;
3153} ipmi_defaults[] =
3154{
3155        { .type = SI_KCS, .port = 0xca2 },
3156        { .type = SI_SMIC, .port = 0xca9 },
3157        { .type = SI_BT, .port = 0xe4 },
3158        { .port = 0 }
3159};
3160
3161static void default_find_bmc(void)
3162{
3163        struct smi_info *info;
3164        int             i;
3165
3166        for (i = 0; ; i++) {
3167                if (!ipmi_defaults[i].port)
3168                        break;
3169#ifdef CONFIG_PPC
3170                if (check_legacy_ioport(ipmi_defaults[i].port))
3171                        continue;
3172#endif
3173                info = smi_info_alloc();
3174                if (!info)
3175                        return;
3176
3177                info->addr_source = SI_DEFAULT;
3178
3179                info->si_type = ipmi_defaults[i].type;
3180                info->io_setup = port_setup;
3181                info->io.addr_data = ipmi_defaults[i].port;
3182                info->io.addr_type = IPMI_IO_ADDR_SPACE;
3183
3184                info->io.addr = NULL;
3185                info->io.regspacing = DEFAULT_REGSPACING;
3186                info->io.regsize = DEFAULT_REGSPACING;
3187                info->io.regshift = 0;
3188
3189                if (add_smi(info) == 0) {
3190                        if ((try_smi_init(info)) == 0) {
3191                                /* Found one... */
3192                                printk(KERN_INFO PFX "Found default %s"
3193                                " state machine at %s address 0x%lx\n",
3194                                si_to_str[info->si_type],
3195                                addr_space_to_str[info->io.addr_type],
3196                                info->io.addr_data);
3197                        } else
3198                                cleanup_one_si(info);
3199                } else {
3200                        kfree(info);
3201                }
3202        }
3203}
3204
3205static int is_new_interface(struct smi_info *info)
3206{
3207        struct smi_info *e;
3208
3209        list_for_each_entry(e, &smi_infos, link) {
3210                if (e->io.addr_type != info->io.addr_type)
3211                        continue;
3212                if (e->io.addr_data == info->io.addr_data)
3213                        return 0;
3214        }
3215
3216        return 1;
3217}
3218
3219static int add_smi(struct smi_info *new_smi)
3220{
3221        int rv = 0;
3222
3223        printk(KERN_INFO PFX "Adding %s-specified %s state machine",
3224                        ipmi_addr_src_to_str[new_smi->addr_source],
3225                        si_to_str[new_smi->si_type]);
3226        mutex_lock(&smi_infos_lock);
3227        if (!is_new_interface(new_smi)) {
3228                printk(KERN_CONT " duplicate interface\n");
3229                rv = -EBUSY;
3230                goto out_err;
3231        }
3232
3233        printk(KERN_CONT "\n");
3234
3235        /* So we know not to free it unless we have allocated one. */
3236        new_smi->intf = NULL;
3237        new_smi->si_sm = NULL;
3238        new_smi->handlers = NULL;
3239
3240        list_add_tail(&new_smi->link, &smi_infos);
3241
3242out_err:
3243        mutex_unlock(&smi_infos_lock);
3244        return rv;
3245}
3246
3247static int try_smi_init(struct smi_info *new_smi)
3248{
3249        int rv = 0;
3250        int i;
3251
3252        printk(KERN_INFO PFX "Trying %s-specified %s state"
3253               " machine at %s address 0x%lx, slave address 0x%x,"
3254               " irq %d\n",
3255               ipmi_addr_src_to_str[new_smi->addr_source],
3256               si_to_str[new_smi->si_type],
3257               addr_space_to_str[new_smi->io.addr_type],
3258               new_smi->io.addr_data,
3259               new_smi->slave_addr, new_smi->irq);
3260
3261        switch (new_smi->si_type) {
3262        case SI_KCS:
3263                new_smi->handlers = &kcs_smi_handlers;
3264                break;
3265
3266        case SI_SMIC:
3267                new_smi->handlers = &smic_smi_handlers;
3268                break;
3269
3270        case SI_BT:
3271                new_smi->handlers = &bt_smi_handlers;
3272                break;
3273
3274        default:
3275                /* No support for anything else yet. */
3276                rv = -EIO;
3277                goto out_err;
3278        }
3279
3280        /* Allocate the state machine's data and initialize it. */
3281        new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
3282        if (!new_smi->si_sm) {
3283                printk(KERN_ERR PFX
3284                       "Could not allocate state machine memory\n");
3285                rv = -ENOMEM;
3286                goto out_err;
3287        }
3288        new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
3289                                                        &new_smi->io);
3290
3291        /* Now that we know the I/O size, we can set up the I/O. */
3292        rv = new_smi->io_setup(new_smi);
3293        if (rv) {
3294                printk(KERN_ERR PFX "Could not set up I/O space\n");
3295                goto out_err;
3296        }
3297
3298        /* Do low-level detection first. */
3299        if (new_smi->handlers->detect(new_smi->si_sm)) {
3300                if (new_smi->addr_source)
3301                        printk(KERN_INFO PFX "Interface detection failed\n");
3302                rv = -ENODEV;
3303                goto out_err;
3304        }
3305
3306        /*
3307         * Attempt a get device id command.  If it fails, we probably
3308         * don't have a BMC here.
3309         */
3310        rv = try_get_dev_id(new_smi);
3311        if (rv) {
3312                if (new_smi->addr_source)
3313                        printk(KERN_INFO PFX "There appears to be no BMC"
3314                               " at this location\n");
3315                goto out_err;
3316        }
3317
3318        setup_oem_data_handler(new_smi);
3319        setup_xaction_handlers(new_smi);
3320
3321        INIT_LIST_HEAD(&(new_smi->xmit_msgs));
3322        INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
3323        new_smi->curr_msg = NULL;
3324        atomic_set(&new_smi->req_events, 0);
3325        new_smi->run_to_completion = 0;
3326        for (i = 0; i < SI_NUM_STATS; i++)
3327                atomic_set(&new_smi->stats[i], 0);
3328
3329        new_smi->interrupt_disabled = 1;
3330        atomic_set(&new_smi->stop_operation, 0);
3331        new_smi->intf_num = smi_num;
3332        smi_num++;
3333
3334        rv = try_enable_event_buffer(new_smi);
3335        if (rv == 0)
3336                new_smi->has_event_buffer = 1;
3337
3338        /*
3339         * Start clearing the flags before we enable interrupts or the
3340         * timer to avoid racing with the timer.
3341         */
3342        start_clear_flags(new_smi);
3343        /* IRQ is defined to be set when non-zero. */
3344        if (new_smi->irq)
3345                new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
3346
3347        if (!new_smi->dev) {
3348                /*
3349                 * If we don't already have a device from something
3350                 * else (like PCI), then register a new one.
3351                 */
3352                new_smi->pdev = platform_device_alloc("ipmi_si",
3353                                                      new_smi->intf_num);
3354                if (!new_smi->pdev) {
3355                        printk(KERN_ERR PFX
3356                               "Unable to allocate platform device\n");
3357                        goto out_err;
3358                }
3359                new_smi->dev = &new_smi->pdev->dev;
3360                new_smi->dev->driver = &ipmi_driver.driver;
3361
3362                rv = platform_device_add(new_smi->pdev);
3363                if (rv) {
3364                        printk(KERN_ERR PFX
3365                               "Unable to register system interface device:"
3366                               " %d\n",
3367                               rv);
3368                        goto out_err;
3369                }
3370                new_smi->dev_registered = 1;
3371        }
3372
3373        rv = ipmi_register_smi(&handlers,
3374                               new_smi,
3375                               &new_smi->device_id,
3376                               new_smi->dev,
3377                               "bmc",
3378                               new_smi->slave_addr);
3379        if (rv) {
3380                dev_err(new_smi->dev, "Unable to register device: error %d\n",
3381                        rv);
3382                goto out_err_stop_timer;
3383        }
3384
3385        rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
3386                                     &smi_type_proc_ops,
3387                                     new_smi);
3388        if (rv) {
3389                dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3390                goto out_err_stop_timer;
3391        }
3392
3393        rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
3394                                     &smi_si_stats_proc_ops,
3395                                     new_smi);
3396        if (rv) {
3397                dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3398                goto out_err_stop_timer;
3399        }
3400
3401        rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
3402                                     &smi_params_proc_ops,
3403                                     new_smi);
3404        if (rv) {
3405                dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3406                goto out_err_stop_timer;
3407        }
3408
3409        dev_info(new_smi->dev, "IPMI %s interface initialized\n",
3410                 si_to_str[new_smi->si_type]);
3411
3412        return 0;
3413
3414 out_err_stop_timer:
3415        atomic_inc(&new_smi->stop_operation);
3416        wait_for_timer_and_thread(new_smi);
3417
3418 out_err:
3419        new_smi->interrupt_disabled = 1;
3420
3421        if (new_smi->intf) {
3422                ipmi_unregister_smi(new_smi->intf);
3423                new_smi->intf = NULL;
3424        }
3425
3426        if (new_smi->irq_cleanup) {
3427                new_smi->irq_cleanup(new_smi);
3428                new_smi->irq_cleanup = NULL;
3429        }
3430
3431        /*
3432         * Wait until we know that we are out of any interrupt
3433         * handlers might have been running before we freed the
3434         * interrupt.
3435         */
3436        synchronize_sched();
3437
3438        if (new_smi->si_sm) {
3439                if (new_smi->handlers)
3440                        new_smi->handlers->cleanup(new_smi->si_sm);
3441                kfree(new_smi->si_sm);
3442                new_smi->si_sm = NULL;
3443        }
3444        if (new_smi->addr_source_cleanup) {
3445                new_smi->addr_source_cleanup(new_smi);
3446                new_smi->addr_source_cleanup = NULL;
3447        }
3448        if (new_smi->io_cleanup) {
3449                new_smi->io_cleanup(new_smi);
3450                new_smi->io_cleanup = NULL;
3451        }
3452
3453        if (new_smi->dev_registered) {
3454                platform_device_unregister(new_smi->pdev);
3455                new_smi->dev_registered = 0;
3456        }
3457
3458        return rv;
3459}
3460
3461static int init_ipmi_si(void)
3462{
3463        int  i;
3464        char *str;
3465        int  rv;
3466        struct smi_info *e;
3467        enum ipmi_addr_src type = SI_INVALID;
3468
3469        if (initialized)
3470                return 0;
3471        initialized = 1;
3472
3473        if (si_tryplatform) {
3474                rv = platform_driver_register(&ipmi_driver);
3475                if (rv) {
3476                        printk(KERN_ERR PFX "Unable to register "
3477                               "driver: %d\n", rv);
3478                        return rv;
3479                }
3480        }
3481
3482        /* Parse out the si_type string into its components. */
3483        str = si_type_str;
3484        if (*str != '\0') {
3485                for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
3486                        si_type[i] = str;
3487                        str = strchr(str, ',');
3488                        if (str) {
3489                                *str = '\0';
3490                                str++;
3491                        } else {
3492                                break;
3493                        }
3494                }
3495        }
3496
3497        printk(KERN_INFO "IPMI System Interface driver.\n");
3498
3499        /* If the user gave us a device, they presumably want us to use it */
3500        if (!hardcode_find_bmc())
3501                return 0;
3502
3503#ifdef CONFIG_PCI
3504        if (si_trypci) {
3505                rv = pci_register_driver(&ipmi_pci_driver);
3506                if (rv)
3507                        printk(KERN_ERR PFX "Unable to register "
3508                               "PCI driver: %d\n", rv);
3509                else
3510                        pci_registered = 1;
3511        }
3512#endif
3513
3514#ifdef CONFIG_ACPI
3515        if (si_tryacpi) {
3516                pnp_register_driver(&ipmi_pnp_driver);
3517                pnp_registered = 1;
3518        }
3519#endif
3520
3521#ifdef CONFIG_DMI
3522        if (si_trydmi)
3523                dmi_find_bmc();
3524#endif
3525
3526#ifdef CONFIG_ACPI
3527        if (si_tryacpi)
3528                spmi_find_bmc();
3529#endif
3530
3531#ifdef CONFIG_PARISC
3532        register_parisc_driver(&ipmi_parisc_driver);
3533        parisc_registered = 1;
3534        /* poking PC IO addresses will crash machine, don't do it */
3535        si_trydefaults = 0;
3536#endif
3537
3538        /* We prefer devices with interrupts, but in the case of a machine
3539           with multiple BMCs we assume that there will be several instances
3540           of a given type so if we succeed in registering a type then also
3541           try to register everything else of the same type */
3542
3543        mutex_lock(&smi_infos_lock);
3544        list_for_each_entry(e, &smi_infos, link) {
3545                /* Try to register a device if it has an IRQ and we either
3546                   haven't successfully registered a device yet or this
3547                   device has the same type as one we successfully registered */
3548                if (e->irq && (!type || e->addr_source == type)) {
3549                        if (!try_smi_init(e)) {
3550                                type = e->addr_source;
3551                        }
3552                }
3553        }
3554
3555        /* type will only have been set if we successfully registered an si */
3556        if (type) {
3557                mutex_unlock(&smi_infos_lock);
3558                return 0;
3559        }
3560
3561        /* Fall back to the preferred device */
3562
3563        list_for_each_entry(e, &smi_infos, link) {
3564                if (!e->irq && (!type || e->addr_source == type)) {
3565                        if (!try_smi_init(e)) {
3566                                type = e->addr_source;
3567                        }
3568                }
3569        }
3570        mutex_unlock(&smi_infos_lock);
3571
3572        if (type)
3573                return 0;
3574
3575        if (si_trydefaults) {
3576                mutex_lock(&smi_infos_lock);
3577                if (list_empty(&smi_infos)) {
3578                        /* No BMC was found, try defaults. */
3579                        mutex_unlock(&smi_infos_lock);
3580                        default_find_bmc();
3581                } else
3582                        mutex_unlock(&smi_infos_lock);
3583        }
3584
3585        mutex_lock(&smi_infos_lock);
3586        if (unload_when_empty && list_empty(&smi_infos)) {
3587                mutex_unlock(&smi_infos_lock);
3588                cleanup_ipmi_si();
3589                printk(KERN_WARNING PFX
3590                       "Unable to find any System Interface(s)\n");
3591                return -ENODEV;
3592        } else {
3593                mutex_unlock(&smi_infos_lock);
3594                return 0;
3595        }
3596}
3597module_init(init_ipmi_si);
3598
3599static void cleanup_one_si(struct smi_info *to_clean)
3600{
3601        int           rv = 0;
3602        unsigned long flags;
3603
3604        if (!to_clean)
3605                return;
3606
3607        list_del(&to_clean->link);
3608
3609        /* Tell the driver that we are shutting down. */
3610        atomic_inc(&to_clean->stop_operation);
3611
3612        /*
3613         * Make sure the timer and thread are stopped and will not run
3614         * again.
3615         */
3616        wait_for_timer_and_thread(to_clean);
3617
3618        /*
3619         * Timeouts are stopped, now make sure the interrupts are off
3620         * for the device.  A little tricky with locks to make sure
3621         * there are no races.
3622         */
3623        spin_lock_irqsave(&to_clean->si_lock, flags);
3624        while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3625                spin_unlock_irqrestore(&to_clean->si_lock, flags);
3626                poll(to_clean);
3627                schedule_timeout_uninterruptible(1);
3628                spin_lock_irqsave(&to_clean->si_lock, flags);
3629        }
3630        disable_si_irq(to_clean);
3631        spin_unlock_irqrestore(&to_clean->si_lock, flags);
3632        while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3633                poll(to_clean);
3634                schedule_timeout_uninterruptible(1);
3635        }
3636
3637        /* Clean up interrupts and make sure that everything is done. */
3638        if (to_clean->irq_cleanup)
3639                to_clean->irq_cleanup(to_clean);
3640        while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3641                poll(to_clean);
3642                schedule_timeout_uninterruptible(1);
3643        }
3644
3645        if (to_clean->intf)
3646                rv = ipmi_unregister_smi(to_clean->intf);
3647
3648        if (rv) {
3649                printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
3650                       rv);
3651        }
3652
3653        if (to_clean->handlers)
3654                to_clean->handlers->cleanup(to_clean->si_sm);
3655
3656        kfree(to_clean->si_sm);
3657
3658        if (to_clean->addr_source_cleanup)
3659                to_clean->addr_source_cleanup(to_clean);
3660        if (to_clean->io_cleanup)
3661                to_clean->io_cleanup(to_clean);
3662
3663        if (to_clean->dev_registered)
3664                platform_device_unregister(to_clean->pdev);
3665
3666        kfree(to_clean);
3667}
3668
3669static void cleanup_ipmi_si(void)
3670{
3671        struct smi_info *e, *tmp_e;
3672
3673        if (!initialized)
3674                return;
3675
3676#ifdef CONFIG_PCI
3677        if (pci_registered)
3678                pci_unregister_driver(&ipmi_pci_driver);
3679#endif
3680#ifdef CONFIG_ACPI
3681        if (pnp_registered)
3682                pnp_unregister_driver(&ipmi_pnp_driver);
3683#endif
3684#ifdef CONFIG_PARISC
3685        if (parisc_registered)
3686                unregister_parisc_driver(&ipmi_parisc_driver);
3687#endif
3688
3689        platform_driver_unregister(&ipmi_driver);
3690
3691        mutex_lock(&smi_infos_lock);
3692        list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
3693                cleanup_one_si(e);
3694        mutex_unlock(&smi_infos_lock);
3695}
3696module_exit(cleanup_ipmi_si);
3697
3698MODULE_LICENSE("GPL");
3699MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3700MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
3701                   " system interfaces.");
3702