linux/drivers/char/ipmi/ipmi_si_intf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * ipmi_si.c
   4 *
   5 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
   6 * BT).
   7 *
   8 * Author: MontaVista Software, Inc.
   9 *         Corey Minyard <minyard@mvista.com>
  10 *         source@mvista.com
  11 *
  12 * Copyright 2002 MontaVista Software Inc.
  13 * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
  14 */
  15
  16/*
  17 * This file holds the "policy" for the interface to the SMI state
  18 * machine.  It does the configuration, handles timers and interrupts,
  19 * and drives the real SMI state machine.
  20 */
  21
  22#define pr_fmt(fmt) "ipmi_si: " fmt
  23
  24#include <linux/module.h>
  25#include <linux/moduleparam.h>
  26#include <linux/sched.h>
  27#include <linux/seq_file.h>
  28#include <linux/timer.h>
  29#include <linux/errno.h>
  30#include <linux/spinlock.h>
  31#include <linux/slab.h>
  32#include <linux/delay.h>
  33#include <linux/list.h>
  34#include <linux/notifier.h>
  35#include <linux/mutex.h>
  36#include <linux/kthread.h>
  37#include <asm/irq.h>
  38#include <linux/interrupt.h>
  39#include <linux/rcupdate.h>
  40#include <linux/ipmi.h>
  41#include <linux/ipmi_smi.h>
  42#include "ipmi_si.h"
  43#include "ipmi_si_sm.h"
  44#include <linux/string.h>
  45#include <linux/ctype.h>
  46
  47/* Measure times between events in the driver. */
  48#undef DEBUG_TIMING
  49
  50/* Call every 10 ms. */
  51#define SI_TIMEOUT_TIME_USEC    10000
  52#define SI_USEC_PER_JIFFY       (1000000/HZ)
  53#define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
  54#define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
  55                                      short timeout */
  56
  57enum si_intf_state {
  58        SI_NORMAL,
  59        SI_GETTING_FLAGS,
  60        SI_GETTING_EVENTS,
  61        SI_CLEARING_FLAGS,
  62        SI_GETTING_MESSAGES,
  63        SI_CHECKING_ENABLES,
  64        SI_SETTING_ENABLES
  65        /* FIXME - add watchdog stuff. */
  66};
  67
  68/* Some BT-specific defines we need here. */
  69#define IPMI_BT_INTMASK_REG             2
  70#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
  71#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
  72
  73static const char * const si_to_str[] = { "invalid", "kcs", "smic", "bt" };
  74
  75static bool initialized;
  76
  77/*
  78 * Indexes into stats[] in smi_info below.
  79 */
  80enum si_stat_indexes {
  81        /*
  82         * Number of times the driver requested a timer while an operation
  83         * was in progress.
  84         */
  85        SI_STAT_short_timeouts = 0,
  86
  87        /*
  88         * Number of times the driver requested a timer while nothing was in
  89         * progress.
  90         */
  91        SI_STAT_long_timeouts,
  92
  93        /* Number of times the interface was idle while being polled. */
  94        SI_STAT_idles,
  95
  96        /* Number of interrupts the driver handled. */
  97        SI_STAT_interrupts,
  98
  99        /* Number of time the driver got an ATTN from the hardware. */
 100        SI_STAT_attentions,
 101
 102        /* Number of times the driver requested flags from the hardware. */
 103        SI_STAT_flag_fetches,
 104
 105        /* Number of times the hardware didn't follow the state machine. */
 106        SI_STAT_hosed_count,
 107
 108        /* Number of completed messages. */
 109        SI_STAT_complete_transactions,
 110
 111        /* Number of IPMI events received from the hardware. */
 112        SI_STAT_events,
 113
 114        /* Number of watchdog pretimeouts. */
 115        SI_STAT_watchdog_pretimeouts,
 116
 117        /* Number of asynchronous messages received. */
 118        SI_STAT_incoming_messages,
 119
 120
 121        /* This *must* remain last, add new values above this. */
 122        SI_NUM_STATS
 123};
 124
 125struct smi_info {
 126        int                    si_num;
 127        struct ipmi_smi        *intf;
 128        struct si_sm_data      *si_sm;
 129        const struct si_sm_handlers *handlers;
 130        spinlock_t             si_lock;
 131        struct ipmi_smi_msg    *waiting_msg;
 132        struct ipmi_smi_msg    *curr_msg;
 133        enum si_intf_state     si_state;
 134
 135        /*
 136         * Used to handle the various types of I/O that can occur with
 137         * IPMI
 138         */
 139        struct si_sm_io io;
 140
 141        /*
 142         * Per-OEM handler, called from handle_flags().  Returns 1
 143         * when handle_flags() needs to be re-run or 0 indicating it
 144         * set si_state itself.
 145         */
 146        int (*oem_data_avail_handler)(struct smi_info *smi_info);
 147
 148        /*
 149         * Flags from the last GET_MSG_FLAGS command, used when an ATTN
 150         * is set to hold the flags until we are done handling everything
 151         * from the flags.
 152         */
 153#define RECEIVE_MSG_AVAIL       0x01
 154#define EVENT_MSG_BUFFER_FULL   0x02
 155#define WDT_PRE_TIMEOUT_INT     0x08
 156#define OEM0_DATA_AVAIL     0x20
 157#define OEM1_DATA_AVAIL     0x40
 158#define OEM2_DATA_AVAIL     0x80
 159#define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
 160                             OEM1_DATA_AVAIL | \
 161                             OEM2_DATA_AVAIL)
 162        unsigned char       msg_flags;
 163
 164        /* Does the BMC have an event buffer? */
 165        bool                has_event_buffer;
 166
 167        /*
 168         * If set to true, this will request events the next time the
 169         * state machine is idle.
 170         */
 171        atomic_t            req_events;
 172
 173        /*
 174         * If true, run the state machine to completion on every send
 175         * call.  Generally used after a panic to make sure stuff goes
 176         * out.
 177         */
 178        bool                run_to_completion;
 179
 180        /* The timer for this si. */
 181        struct timer_list   si_timer;
 182
 183        /* This flag is set, if the timer can be set */
 184        bool                timer_can_start;
 185
 186        /* This flag is set, if the timer is running (timer_pending() isn't enough) */
 187        bool                timer_running;
 188
 189        /* The time (in jiffies) the last timeout occurred at. */
 190        unsigned long       last_timeout_jiffies;
 191
 192        /* Are we waiting for the events, pretimeouts, received msgs? */
 193        atomic_t            need_watch;
 194
 195        /*
 196         * The driver will disable interrupts when it gets into a
 197         * situation where it cannot handle messages due to lack of
 198         * memory.  Once that situation clears up, it will re-enable
 199         * interrupts.
 200         */
 201        bool interrupt_disabled;
 202
 203        /*
 204         * Does the BMC support events?
 205         */
 206        bool supports_event_msg_buff;
 207
 208        /*
 209         * Can we disable interrupts the global enables receive irq
 210         * bit?  There are currently two forms of brokenness, some
 211         * systems cannot disable the bit (which is technically within
 212         * the spec but a bad idea) and some systems have the bit
 213         * forced to zero even though interrupts work (which is
 214         * clearly outside the spec).  The next bool tells which form
 215         * of brokenness is present.
 216         */
 217        bool cannot_disable_irq;
 218
 219        /*
 220         * Some systems are broken and cannot set the irq enable
 221         * bit, even if they support interrupts.
 222         */
 223        bool irq_enable_broken;
 224
 225        /* Is the driver in maintenance mode? */
 226        bool in_maintenance_mode;
 227
 228        /*
 229         * Did we get an attention that we did not handle?
 230         */
 231        bool got_attn;
 232
 233        /* From the get device id response... */
 234        struct ipmi_device_id device_id;
 235
 236        /* Have we added the device group to the device? */
 237        bool dev_group_added;
 238
 239        /* Counters and things for the proc filesystem. */
 240        atomic_t stats[SI_NUM_STATS];
 241
 242        struct task_struct *thread;
 243
 244        struct list_head link;
 245};
 246
 247#define smi_inc_stat(smi, stat) \
 248        atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
 249#define smi_get_stat(smi, stat) \
 250        ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
 251
 252#define IPMI_MAX_INTFS 4
 253static int force_kipmid[IPMI_MAX_INTFS];
 254static int num_force_kipmid;
 255
 256static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
 257static int num_max_busy_us;
 258
 259static bool unload_when_empty = true;
 260
 261static int try_smi_init(struct smi_info *smi);
 262static void cleanup_one_si(struct smi_info *smi_info);
 263static void cleanup_ipmi_si(void);
 264
 265#ifdef DEBUG_TIMING
 266void debug_timestamp(char *msg)
 267{
 268        struct timespec t;
 269
 270        ktime_get_ts(&t);
 271        pr_debug("**%s: %ld.%9.9ld\n", msg, (long) t.tv_sec, t.tv_nsec);
 272}
 273#else
 274#define debug_timestamp(x)
 275#endif
 276
 277static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 278static int register_xaction_notifier(struct notifier_block *nb)
 279{
 280        return atomic_notifier_chain_register(&xaction_notifier_list, nb);
 281}
 282
 283static void deliver_recv_msg(struct smi_info *smi_info,
 284                             struct ipmi_smi_msg *msg)
 285{
 286        /* Deliver the message to the upper layer. */
 287        ipmi_smi_msg_received(smi_info->intf, msg);
 288}
 289
 290static void return_hosed_msg(struct smi_info *smi_info, int cCode)
 291{
 292        struct ipmi_smi_msg *msg = smi_info->curr_msg;
 293
 294        if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
 295                cCode = IPMI_ERR_UNSPECIFIED;
 296        /* else use it as is */
 297
 298        /* Make it a response */
 299        msg->rsp[0] = msg->data[0] | 4;
 300        msg->rsp[1] = msg->data[1];
 301        msg->rsp[2] = cCode;
 302        msg->rsp_size = 3;
 303
 304        smi_info->curr_msg = NULL;
 305        deliver_recv_msg(smi_info, msg);
 306}
 307
 308static enum si_sm_result start_next_msg(struct smi_info *smi_info)
 309{
 310        int              rv;
 311
 312        if (!smi_info->waiting_msg) {
 313                smi_info->curr_msg = NULL;
 314                rv = SI_SM_IDLE;
 315        } else {
 316                int err;
 317
 318                smi_info->curr_msg = smi_info->waiting_msg;
 319                smi_info->waiting_msg = NULL;
 320                debug_timestamp("Start2");
 321                err = atomic_notifier_call_chain(&xaction_notifier_list,
 322                                0, smi_info);
 323                if (err & NOTIFY_STOP_MASK) {
 324                        rv = SI_SM_CALL_WITHOUT_DELAY;
 325                        goto out;
 326                }
 327                err = smi_info->handlers->start_transaction(
 328                        smi_info->si_sm,
 329                        smi_info->curr_msg->data,
 330                        smi_info->curr_msg->data_size);
 331                if (err)
 332                        return_hosed_msg(smi_info, err);
 333
 334                rv = SI_SM_CALL_WITHOUT_DELAY;
 335        }
 336out:
 337        return rv;
 338}
 339
 340static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
 341{
 342        if (!smi_info->timer_can_start)
 343                return;
 344        smi_info->last_timeout_jiffies = jiffies;
 345        mod_timer(&smi_info->si_timer, new_val);
 346        smi_info->timer_running = true;
 347}
 348
 349/*
 350 * Start a new message and (re)start the timer and thread.
 351 */
 352static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
 353                          unsigned int size)
 354{
 355        smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
 356
 357        if (smi_info->thread)
 358                wake_up_process(smi_info->thread);
 359
 360        smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
 361}
 362
 363static void start_check_enables(struct smi_info *smi_info)
 364{
 365        unsigned char msg[2];
 366
 367        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 368        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 369
 370        start_new_msg(smi_info, msg, 2);
 371        smi_info->si_state = SI_CHECKING_ENABLES;
 372}
 373
 374static void start_clear_flags(struct smi_info *smi_info)
 375{
 376        unsigned char msg[3];
 377
 378        /* Make sure the watchdog pre-timeout flag is not set at startup. */
 379        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 380        msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
 381        msg[2] = WDT_PRE_TIMEOUT_INT;
 382
 383        start_new_msg(smi_info, msg, 3);
 384        smi_info->si_state = SI_CLEARING_FLAGS;
 385}
 386
 387static void start_getting_msg_queue(struct smi_info *smi_info)
 388{
 389        smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
 390        smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
 391        smi_info->curr_msg->data_size = 2;
 392
 393        start_new_msg(smi_info, smi_info->curr_msg->data,
 394                      smi_info->curr_msg->data_size);
 395        smi_info->si_state = SI_GETTING_MESSAGES;
 396}
 397
 398static void start_getting_events(struct smi_info *smi_info)
 399{
 400        smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
 401        smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
 402        smi_info->curr_msg->data_size = 2;
 403
 404        start_new_msg(smi_info, smi_info->curr_msg->data,
 405                      smi_info->curr_msg->data_size);
 406        smi_info->si_state = SI_GETTING_EVENTS;
 407}
 408
 409/*
 410 * When we have a situtaion where we run out of memory and cannot
 411 * allocate messages, we just leave them in the BMC and run the system
 412 * polled until we can allocate some memory.  Once we have some
 413 * memory, we will re-enable the interrupt.
 414 *
 415 * Note that we cannot just use disable_irq(), since the interrupt may
 416 * be shared.
 417 */
 418static inline bool disable_si_irq(struct smi_info *smi_info)
 419{
 420        if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
 421                smi_info->interrupt_disabled = true;
 422                start_check_enables(smi_info);
 423                return true;
 424        }
 425        return false;
 426}
 427
 428static inline bool enable_si_irq(struct smi_info *smi_info)
 429{
 430        if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
 431                smi_info->interrupt_disabled = false;
 432                start_check_enables(smi_info);
 433                return true;
 434        }
 435        return false;
 436}
 437
 438/*
 439 * Allocate a message.  If unable to allocate, start the interrupt
 440 * disable process and return NULL.  If able to allocate but
 441 * interrupts are disabled, free the message and return NULL after
 442 * starting the interrupt enable process.
 443 */
 444static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
 445{
 446        struct ipmi_smi_msg *msg;
 447
 448        msg = ipmi_alloc_smi_msg();
 449        if (!msg) {
 450                if (!disable_si_irq(smi_info))
 451                        smi_info->si_state = SI_NORMAL;
 452        } else if (enable_si_irq(smi_info)) {
 453                ipmi_free_smi_msg(msg);
 454                msg = NULL;
 455        }
 456        return msg;
 457}
 458
 459static void handle_flags(struct smi_info *smi_info)
 460{
 461retry:
 462        if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
 463                /* Watchdog pre-timeout */
 464                smi_inc_stat(smi_info, watchdog_pretimeouts);
 465
 466                start_clear_flags(smi_info);
 467                smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
 468                ipmi_smi_watchdog_pretimeout(smi_info->intf);
 469        } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
 470                /* Messages available. */
 471                smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
 472                if (!smi_info->curr_msg)
 473                        return;
 474
 475                start_getting_msg_queue(smi_info);
 476        } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
 477                /* Events available. */
 478                smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
 479                if (!smi_info->curr_msg)
 480                        return;
 481
 482                start_getting_events(smi_info);
 483        } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
 484                   smi_info->oem_data_avail_handler) {
 485                if (smi_info->oem_data_avail_handler(smi_info))
 486                        goto retry;
 487        } else
 488                smi_info->si_state = SI_NORMAL;
 489}
 490
 491/*
 492 * Global enables we care about.
 493 */
 494#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
 495                             IPMI_BMC_EVT_MSG_INTR)
 496
 497static u8 current_global_enables(struct smi_info *smi_info, u8 base,
 498                                 bool *irq_on)
 499{
 500        u8 enables = 0;
 501
 502        if (smi_info->supports_event_msg_buff)
 503                enables |= IPMI_BMC_EVT_MSG_BUFF;
 504
 505        if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
 506             smi_info->cannot_disable_irq) &&
 507            !smi_info->irq_enable_broken)
 508                enables |= IPMI_BMC_RCV_MSG_INTR;
 509
 510        if (smi_info->supports_event_msg_buff &&
 511            smi_info->io.irq && !smi_info->interrupt_disabled &&
 512            !smi_info->irq_enable_broken)
 513                enables |= IPMI_BMC_EVT_MSG_INTR;
 514
 515        *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
 516
 517        return enables;
 518}
 519
 520static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
 521{
 522        u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
 523
 524        irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
 525
 526        if ((bool)irqstate == irq_on)
 527                return;
 528
 529        if (irq_on)
 530                smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
 531                                     IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
 532        else
 533                smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
 534}
 535
 536static void handle_transaction_done(struct smi_info *smi_info)
 537{
 538        struct ipmi_smi_msg *msg;
 539
 540        debug_timestamp("Done");
 541        switch (smi_info->si_state) {
 542        case SI_NORMAL:
 543                if (!smi_info->curr_msg)
 544                        break;
 545
 546                smi_info->curr_msg->rsp_size
 547                        = smi_info->handlers->get_result(
 548                                smi_info->si_sm,
 549                                smi_info->curr_msg->rsp,
 550                                IPMI_MAX_MSG_LENGTH);
 551
 552                /*
 553                 * Do this here becase deliver_recv_msg() releases the
 554                 * lock, and a new message can be put in during the
 555                 * time the lock is released.
 556                 */
 557                msg = smi_info->curr_msg;
 558                smi_info->curr_msg = NULL;
 559                deliver_recv_msg(smi_info, msg);
 560                break;
 561
 562        case SI_GETTING_FLAGS:
 563        {
 564                unsigned char msg[4];
 565                unsigned int  len;
 566
 567                /* We got the flags from the SMI, now handle them. */
 568                len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 569                if (msg[2] != 0) {
 570                        /* Error fetching flags, just give up for now. */
 571                        smi_info->si_state = SI_NORMAL;
 572                } else if (len < 4) {
 573                        /*
 574                         * Hmm, no flags.  That's technically illegal, but
 575                         * don't use uninitialized data.
 576                         */
 577                        smi_info->si_state = SI_NORMAL;
 578                } else {
 579                        smi_info->msg_flags = msg[3];
 580                        handle_flags(smi_info);
 581                }
 582                break;
 583        }
 584
 585        case SI_CLEARING_FLAGS:
 586        {
 587                unsigned char msg[3];
 588
 589                /* We cleared the flags. */
 590                smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
 591                if (msg[2] != 0) {
 592                        /* Error clearing flags */
 593                        dev_warn(smi_info->io.dev,
 594                                 "Error clearing flags: %2.2x\n", msg[2]);
 595                }
 596                smi_info->si_state = SI_NORMAL;
 597                break;
 598        }
 599
 600        case SI_GETTING_EVENTS:
 601        {
 602                smi_info->curr_msg->rsp_size
 603                        = smi_info->handlers->get_result(
 604                                smi_info->si_sm,
 605                                smi_info->curr_msg->rsp,
 606                                IPMI_MAX_MSG_LENGTH);
 607
 608                /*
 609                 * Do this here becase deliver_recv_msg() releases the
 610                 * lock, and a new message can be put in during the
 611                 * time the lock is released.
 612                 */
 613                msg = smi_info->curr_msg;
 614                smi_info->curr_msg = NULL;
 615                if (msg->rsp[2] != 0) {
 616                        /* Error getting event, probably done. */
 617                        msg->done(msg);
 618
 619                        /* Take off the event flag. */
 620                        smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
 621                        handle_flags(smi_info);
 622                } else {
 623                        smi_inc_stat(smi_info, events);
 624
 625                        /*
 626                         * Do this before we deliver the message
 627                         * because delivering the message releases the
 628                         * lock and something else can mess with the
 629                         * state.
 630                         */
 631                        handle_flags(smi_info);
 632
 633                        deliver_recv_msg(smi_info, msg);
 634                }
 635                break;
 636        }
 637
 638        case SI_GETTING_MESSAGES:
 639        {
 640                smi_info->curr_msg->rsp_size
 641                        = smi_info->handlers->get_result(
 642                                smi_info->si_sm,
 643                                smi_info->curr_msg->rsp,
 644                                IPMI_MAX_MSG_LENGTH);
 645
 646                /*
 647                 * Do this here becase deliver_recv_msg() releases the
 648                 * lock, and a new message can be put in during the
 649                 * time the lock is released.
 650                 */
 651                msg = smi_info->curr_msg;
 652                smi_info->curr_msg = NULL;
 653                if (msg->rsp[2] != 0) {
 654                        /* Error getting event, probably done. */
 655                        msg->done(msg);
 656
 657                        /* Take off the msg flag. */
 658                        smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
 659                        handle_flags(smi_info);
 660                } else {
 661                        smi_inc_stat(smi_info, incoming_messages);
 662
 663                        /*
 664                         * Do this before we deliver the message
 665                         * because delivering the message releases the
 666                         * lock and something else can mess with the
 667                         * state.
 668                         */
 669                        handle_flags(smi_info);
 670
 671                        deliver_recv_msg(smi_info, msg);
 672                }
 673                break;
 674        }
 675
 676        case SI_CHECKING_ENABLES:
 677        {
 678                unsigned char msg[4];
 679                u8 enables;
 680                bool irq_on;
 681
 682                /* We got the flags from the SMI, now handle them. */
 683                smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 684                if (msg[2] != 0) {
 685                        dev_warn(smi_info->io.dev,
 686                                 "Couldn't get irq info: %x.\n", msg[2]);
 687                        dev_warn(smi_info->io.dev,
 688                                 "Maybe ok, but ipmi might run very slowly.\n");
 689                        smi_info->si_state = SI_NORMAL;
 690                        break;
 691                }
 692                enables = current_global_enables(smi_info, 0, &irq_on);
 693                if (smi_info->io.si_type == SI_BT)
 694                        /* BT has its own interrupt enable bit. */
 695                        check_bt_irq(smi_info, irq_on);
 696                if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
 697                        /* Enables are not correct, fix them. */
 698                        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 699                        msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
 700                        msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
 701                        smi_info->handlers->start_transaction(
 702                                smi_info->si_sm, msg, 3);
 703                        smi_info->si_state = SI_SETTING_ENABLES;
 704                } else if (smi_info->supports_event_msg_buff) {
 705                        smi_info->curr_msg = ipmi_alloc_smi_msg();
 706                        if (!smi_info->curr_msg) {
 707                                smi_info->si_state = SI_NORMAL;
 708                                break;
 709                        }
 710                        start_getting_events(smi_info);
 711                } else {
 712                        smi_info->si_state = SI_NORMAL;
 713                }
 714                break;
 715        }
 716
 717        case SI_SETTING_ENABLES:
 718        {
 719                unsigned char msg[4];
 720
 721                smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
 722                if (msg[2] != 0)
 723                        dev_warn(smi_info->io.dev,
 724                                 "Could not set the global enables: 0x%x.\n",
 725                                 msg[2]);
 726
 727                if (smi_info->supports_event_msg_buff) {
 728                        smi_info->curr_msg = ipmi_alloc_smi_msg();
 729                        if (!smi_info->curr_msg) {
 730                                smi_info->si_state = SI_NORMAL;
 731                                break;
 732                        }
 733                        start_getting_events(smi_info);
 734                } else {
 735                        smi_info->si_state = SI_NORMAL;
 736                }
 737                break;
 738        }
 739        }
 740}
 741
 742/*
 743 * Called on timeouts and events.  Timeouts should pass the elapsed
 744 * time, interrupts should pass in zero.  Must be called with
 745 * si_lock held and interrupts disabled.
 746 */
 747static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
 748                                           int time)
 749{
 750        enum si_sm_result si_sm_result;
 751
 752restart:
 753        /*
 754         * There used to be a loop here that waited a little while
 755         * (around 25us) before giving up.  That turned out to be
 756         * pointless, the minimum delays I was seeing were in the 300us
 757         * range, which is far too long to wait in an interrupt.  So
 758         * we just run until the state machine tells us something
 759         * happened or it needs a delay.
 760         */
 761        si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
 762        time = 0;
 763        while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
 764                si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
 765
 766        if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
 767                smi_inc_stat(smi_info, complete_transactions);
 768
 769                handle_transaction_done(smi_info);
 770                goto restart;
 771        } else if (si_sm_result == SI_SM_HOSED) {
 772                smi_inc_stat(smi_info, hosed_count);
 773
 774                /*
 775                 * Do the before return_hosed_msg, because that
 776                 * releases the lock.
 777                 */
 778                smi_info->si_state = SI_NORMAL;
 779                if (smi_info->curr_msg != NULL) {
 780                        /*
 781                         * If we were handling a user message, format
 782                         * a response to send to the upper layer to
 783                         * tell it about the error.
 784                         */
 785                        return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
 786                }
 787                goto restart;
 788        }
 789
 790        /*
 791         * We prefer handling attn over new messages.  But don't do
 792         * this if there is not yet an upper layer to handle anything.
 793         */
 794        if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
 795                unsigned char msg[2];
 796
 797                if (smi_info->si_state != SI_NORMAL) {
 798                        /*
 799                         * We got an ATTN, but we are doing something else.
 800                         * Handle the ATTN later.
 801                         */
 802                        smi_info->got_attn = true;
 803                } else {
 804                        smi_info->got_attn = false;
 805                        smi_inc_stat(smi_info, attentions);
 806
 807                        /*
 808                         * Got a attn, send down a get message flags to see
 809                         * what's causing it.  It would be better to handle
 810                         * this in the upper layer, but due to the way
 811                         * interrupts work with the SMI, that's not really
 812                         * possible.
 813                         */
 814                        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 815                        msg[1] = IPMI_GET_MSG_FLAGS_CMD;
 816
 817                        start_new_msg(smi_info, msg, 2);
 818                        smi_info->si_state = SI_GETTING_FLAGS;
 819                        goto restart;
 820                }
 821        }
 822
 823        /* If we are currently idle, try to start the next message. */
 824        if (si_sm_result == SI_SM_IDLE) {
 825                smi_inc_stat(smi_info, idles);
 826
 827                si_sm_result = start_next_msg(smi_info);
 828                if (si_sm_result != SI_SM_IDLE)
 829                        goto restart;
 830        }
 831
 832        if ((si_sm_result == SI_SM_IDLE)
 833            && (atomic_read(&smi_info->req_events))) {
 834                /*
 835                 * We are idle and the upper layer requested that I fetch
 836                 * events, so do so.
 837                 */
 838                atomic_set(&smi_info->req_events, 0);
 839
 840                /*
 841                 * Take this opportunity to check the interrupt and
 842                 * message enable state for the BMC.  The BMC can be
 843                 * asynchronously reset, and may thus get interrupts
 844                 * disable and messages disabled.
 845                 */
 846                if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
 847                        start_check_enables(smi_info);
 848                } else {
 849                        smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
 850                        if (!smi_info->curr_msg)
 851                                goto out;
 852
 853                        start_getting_events(smi_info);
 854                }
 855                goto restart;
 856        }
 857
 858        if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
 859                /* Ok it if fails, the timer will just go off. */
 860                if (del_timer(&smi_info->si_timer))
 861                        smi_info->timer_running = false;
 862        }
 863
 864out:
 865        return si_sm_result;
 866}
 867
 868static void check_start_timer_thread(struct smi_info *smi_info)
 869{
 870        if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
 871                smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
 872
 873                if (smi_info->thread)
 874                        wake_up_process(smi_info->thread);
 875
 876                start_next_msg(smi_info);
 877                smi_event_handler(smi_info, 0);
 878        }
 879}
 880
 881static void flush_messages(void *send_info)
 882{
 883        struct smi_info *smi_info = send_info;
 884        enum si_sm_result result;
 885
 886        /*
 887         * Currently, this function is called only in run-to-completion
 888         * mode.  This means we are single-threaded, no need for locks.
 889         */
 890        result = smi_event_handler(smi_info, 0);
 891        while (result != SI_SM_IDLE) {
 892                udelay(SI_SHORT_TIMEOUT_USEC);
 893                result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
 894        }
 895}
 896
 897static void sender(void                *send_info,
 898                   struct ipmi_smi_msg *msg)
 899{
 900        struct smi_info   *smi_info = send_info;
 901        unsigned long     flags;
 902
 903        debug_timestamp("Enqueue");
 904
 905        if (smi_info->run_to_completion) {
 906                /*
 907                 * If we are running to completion, start it.  Upper
 908                 * layer will call flush_messages to clear it out.
 909                 */
 910                smi_info->waiting_msg = msg;
 911                return;
 912        }
 913
 914        spin_lock_irqsave(&smi_info->si_lock, flags);
 915        /*
 916         * The following two lines don't need to be under the lock for
 917         * the lock's sake, but they do need SMP memory barriers to
 918         * avoid getting things out of order.  We are already claiming
 919         * the lock, anyway, so just do it under the lock to avoid the
 920         * ordering problem.
 921         */
 922        BUG_ON(smi_info->waiting_msg);
 923        smi_info->waiting_msg = msg;
 924        check_start_timer_thread(smi_info);
 925        spin_unlock_irqrestore(&smi_info->si_lock, flags);
 926}
 927
 928static void set_run_to_completion(void *send_info, bool i_run_to_completion)
 929{
 930        struct smi_info   *smi_info = send_info;
 931
 932        smi_info->run_to_completion = i_run_to_completion;
 933        if (i_run_to_completion)
 934                flush_messages(smi_info);
 935}
 936
 937/*
 938 * Use -1 in the nsec value of the busy waiting timespec to tell that
 939 * we are spinning in kipmid looking for something and not delaying
 940 * between checks
 941 */
 942static inline void ipmi_si_set_not_busy(struct timespec *ts)
 943{
 944        ts->tv_nsec = -1;
 945}
 946static inline int ipmi_si_is_busy(struct timespec *ts)
 947{
 948        return ts->tv_nsec != -1;
 949}
 950
 951static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result,
 952                                         const struct smi_info *smi_info,
 953                                         struct timespec *busy_until)
 954{
 955        unsigned int max_busy_us = 0;
 956
 957        if (smi_info->si_num < num_max_busy_us)
 958                max_busy_us = kipmid_max_busy_us[smi_info->si_num];
 959        if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
 960                ipmi_si_set_not_busy(busy_until);
 961        else if (!ipmi_si_is_busy(busy_until)) {
 962                ktime_get_ts(busy_until);
 963                timespec_add_ns(busy_until, max_busy_us * NSEC_PER_USEC);
 964        } else {
 965                struct timespec now;
 966
 967                ktime_get_ts(&now);
 968                if (unlikely(timespec_compare(&now, busy_until) > 0)) {
 969                        ipmi_si_set_not_busy(busy_until);
 970                        return false;
 971                }
 972        }
 973        return true;
 974}
 975
 976
 977/*
 978 * A busy-waiting loop for speeding up IPMI operation.
 979 *
 980 * Lousy hardware makes this hard.  This is only enabled for systems
 981 * that are not BT and do not have interrupts.  It starts spinning
 982 * when an operation is complete or until max_busy tells it to stop
 983 * (if that is enabled).  See the paragraph on kimid_max_busy_us in
 984 * Documentation/IPMI.txt for details.
 985 */
 986static int ipmi_thread(void *data)
 987{
 988        struct smi_info *smi_info = data;
 989        unsigned long flags;
 990        enum si_sm_result smi_result;
 991        struct timespec busy_until = { 0, 0 };
 992
 993        ipmi_si_set_not_busy(&busy_until);
 994        set_user_nice(current, MAX_NICE);
 995        while (!kthread_should_stop()) {
 996                int busy_wait;
 997
 998                spin_lock_irqsave(&(smi_info->si_lock), flags);
 999                smi_result = smi_event_handler(smi_info, 0);
1000
1001                /*
1002                 * If the driver is doing something, there is a possible
1003                 * race with the timer.  If the timer handler see idle,
1004                 * and the thread here sees something else, the timer
1005                 * handler won't restart the timer even though it is
1006                 * required.  So start it here if necessary.
1007                 */
1008                if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
1009                        smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
1010
1011                spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1012                busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
1013                                                  &busy_until);
1014                if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
1015                        ; /* do nothing */
1016                } else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
1017                        /*
1018                         * In maintenance mode we run as fast as
1019                         * possible to allow firmware updates to
1020                         * complete as fast as possible, but normally
1021                         * don't bang on the scheduler.
1022                         */
1023                        if (smi_info->in_maintenance_mode)
1024                                schedule();
1025                        else
1026                                usleep_range(100, 200);
1027                } else if (smi_result == SI_SM_IDLE) {
1028                        if (atomic_read(&smi_info->need_watch)) {
1029                                schedule_timeout_interruptible(100);
1030                        } else {
1031                                /* Wait to be woken up when we are needed. */
1032                                __set_current_state(TASK_INTERRUPTIBLE);
1033                                schedule();
1034                        }
1035                } else {
1036                        schedule_timeout_interruptible(1);
1037                }
1038        }
1039        return 0;
1040}
1041
1042
1043static void poll(void *send_info)
1044{
1045        struct smi_info *smi_info = send_info;
1046        unsigned long flags = 0;
1047        bool run_to_completion = smi_info->run_to_completion;
1048
1049        /*
1050         * Make sure there is some delay in the poll loop so we can
1051         * drive time forward and timeout things.
1052         */
1053        udelay(10);
1054        if (!run_to_completion)
1055                spin_lock_irqsave(&smi_info->si_lock, flags);
1056        smi_event_handler(smi_info, 10);
1057        if (!run_to_completion)
1058                spin_unlock_irqrestore(&smi_info->si_lock, flags);
1059}
1060
1061static void request_events(void *send_info)
1062{
1063        struct smi_info *smi_info = send_info;
1064
1065        if (!smi_info->has_event_buffer)
1066                return;
1067
1068        atomic_set(&smi_info->req_events, 1);
1069}
1070
1071static void set_need_watch(void *send_info, unsigned int watch_mask)
1072{
1073        struct smi_info *smi_info = send_info;
1074        unsigned long flags;
1075        int enable;
1076
1077        enable = !!watch_mask;
1078
1079        atomic_set(&smi_info->need_watch, enable);
1080        spin_lock_irqsave(&smi_info->si_lock, flags);
1081        check_start_timer_thread(smi_info);
1082        spin_unlock_irqrestore(&smi_info->si_lock, flags);
1083}
1084
1085static void smi_timeout(struct timer_list *t)
1086{
1087        struct smi_info   *smi_info = from_timer(smi_info, t, si_timer);
1088        enum si_sm_result smi_result;
1089        unsigned long     flags;
1090        unsigned long     jiffies_now;
1091        long              time_diff;
1092        long              timeout;
1093
1094        spin_lock_irqsave(&(smi_info->si_lock), flags);
1095        debug_timestamp("Timer");
1096
1097        jiffies_now = jiffies;
1098        time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1099                     * SI_USEC_PER_JIFFY);
1100        smi_result = smi_event_handler(smi_info, time_diff);
1101
1102        if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
1103                /* Running with interrupts, only do long timeouts. */
1104                timeout = jiffies + SI_TIMEOUT_JIFFIES;
1105                smi_inc_stat(smi_info, long_timeouts);
1106                goto do_mod_timer;
1107        }
1108
1109        /*
1110         * If the state machine asks for a short delay, then shorten
1111         * the timer timeout.
1112         */
1113        if (smi_result == SI_SM_CALL_WITH_DELAY) {
1114                smi_inc_stat(smi_info, short_timeouts);
1115                timeout = jiffies + 1;
1116        } else {
1117                smi_inc_stat(smi_info, long_timeouts);
1118                timeout = jiffies + SI_TIMEOUT_JIFFIES;
1119        }
1120
1121do_mod_timer:
1122        if (smi_result != SI_SM_IDLE)
1123                smi_mod_timer(smi_info, timeout);
1124        else
1125                smi_info->timer_running = false;
1126        spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1127}
1128
1129irqreturn_t ipmi_si_irq_handler(int irq, void *data)
1130{
1131        struct smi_info *smi_info = data;
1132        unsigned long   flags;
1133
1134        if (smi_info->io.si_type == SI_BT)
1135                /* We need to clear the IRQ flag for the BT interface. */
1136                smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
1137                                     IPMI_BT_INTMASK_CLEAR_IRQ_BIT
1138                                     | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1139
1140        spin_lock_irqsave(&(smi_info->si_lock), flags);
1141
1142        smi_inc_stat(smi_info, interrupts);
1143
1144        debug_timestamp("Interrupt");
1145
1146        smi_event_handler(smi_info, 0);
1147        spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1148        return IRQ_HANDLED;
1149}
1150
1151static int smi_start_processing(void            *send_info,
1152                                struct ipmi_smi *intf)
1153{
1154        struct smi_info *new_smi = send_info;
1155        int             enable = 0;
1156
1157        new_smi->intf = intf;
1158
1159        /* Set up the timer that drives the interface. */
1160        timer_setup(&new_smi->si_timer, smi_timeout, 0);
1161        new_smi->timer_can_start = true;
1162        smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1163
1164        /* Try to claim any interrupts. */
1165        if (new_smi->io.irq_setup) {
1166                new_smi->io.irq_handler_data = new_smi;
1167                new_smi->io.irq_setup(&new_smi->io);
1168        }
1169
1170        /*
1171         * Check if the user forcefully enabled the daemon.
1172         */
1173        if (new_smi->si_num < num_force_kipmid)
1174                enable = force_kipmid[new_smi->si_num];
1175        /*
1176         * The BT interface is efficient enough to not need a thread,
1177         * and there is no need for a thread if we have interrupts.
1178         */
1179        else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
1180                enable = 1;
1181
1182        if (enable) {
1183                new_smi->thread = kthread_run(ipmi_thread, new_smi,
1184                                              "kipmi%d", new_smi->si_num);
1185                if (IS_ERR(new_smi->thread)) {
1186                        dev_notice(new_smi->io.dev, "Could not start"
1187                                   " kernel thread due to error %ld, only using"
1188                                   " timers to drive the interface\n",
1189                                   PTR_ERR(new_smi->thread));
1190                        new_smi->thread = NULL;
1191                }
1192        }
1193
1194        return 0;
1195}
1196
1197static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
1198{
1199        struct smi_info *smi = send_info;
1200
1201        data->addr_src = smi->io.addr_source;
1202        data->dev = smi->io.dev;
1203        data->addr_info = smi->io.addr_info;
1204        get_device(smi->io.dev);
1205
1206        return 0;
1207}
1208
1209static void set_maintenance_mode(void *send_info, bool enable)
1210{
1211        struct smi_info   *smi_info = send_info;
1212
1213        if (!enable)
1214                atomic_set(&smi_info->req_events, 0);
1215        smi_info->in_maintenance_mode = enable;
1216}
1217
1218static void shutdown_smi(void *send_info);
1219static const struct ipmi_smi_handlers handlers = {
1220        .owner                  = THIS_MODULE,
1221        .start_processing       = smi_start_processing,
1222        .shutdown               = shutdown_smi,
1223        .get_smi_info           = get_smi_info,
1224        .sender                 = sender,
1225        .request_events         = request_events,
1226        .set_need_watch         = set_need_watch,
1227        .set_maintenance_mode   = set_maintenance_mode,
1228        .set_run_to_completion  = set_run_to_completion,
1229        .flush_messages         = flush_messages,
1230        .poll                   = poll,
1231};
1232
1233static LIST_HEAD(smi_infos);
1234static DEFINE_MUTEX(smi_infos_lock);
1235static int smi_num; /* Used to sequence the SMIs */
1236
1237static const char * const addr_space_to_str[] = { "i/o", "mem" };
1238
1239module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1240MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1241                 " disabled(0).  Normally the IPMI driver auto-detects"
1242                 " this, but the value may be overridden by this parm.");
1243module_param(unload_when_empty, bool, 0);
1244MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1245                 " specified or found, default is 1.  Setting to 0"
1246                 " is useful for hot add of devices using hotmod.");
1247module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
1248MODULE_PARM_DESC(kipmid_max_busy_us,
1249                 "Max time (in microseconds) to busy-wait for IPMI data before"
1250                 " sleeping. 0 (default) means to wait forever. Set to 100-500"
1251                 " if kipmid is using up a lot of CPU time.");
1252
1253void ipmi_irq_finish_setup(struct si_sm_io *io)
1254{
1255        if (io->si_type == SI_BT)
1256                /* Enable the interrupt in the BT interface. */
1257                io->outputb(io, IPMI_BT_INTMASK_REG,
1258                            IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1259}
1260
1261void ipmi_irq_start_cleanup(struct si_sm_io *io)
1262{
1263        if (io->si_type == SI_BT)
1264                /* Disable the interrupt in the BT interface. */
1265                io->outputb(io, IPMI_BT_INTMASK_REG, 0);
1266}
1267
1268static void std_irq_cleanup(struct si_sm_io *io)
1269{
1270        ipmi_irq_start_cleanup(io);
1271        free_irq(io->irq, io->irq_handler_data);
1272}
1273
1274int ipmi_std_irq_setup(struct si_sm_io *io)
1275{
1276        int rv;
1277
1278        if (!io->irq)
1279                return 0;
1280
1281        rv = request_irq(io->irq,
1282                         ipmi_si_irq_handler,
1283                         IRQF_SHARED,
1284                         SI_DEVICE_NAME,
1285                         io->irq_handler_data);
1286        if (rv) {
1287                dev_warn(io->dev, "%s unable to claim interrupt %d,"
1288                         " running polled\n",
1289                         SI_DEVICE_NAME, io->irq);
1290                io->irq = 0;
1291        } else {
1292                io->irq_cleanup = std_irq_cleanup;
1293                ipmi_irq_finish_setup(io);
1294                dev_info(io->dev, "Using irq %d\n", io->irq);
1295        }
1296
1297        return rv;
1298}
1299
1300static int wait_for_msg_done(struct smi_info *smi_info)
1301{
1302        enum si_sm_result     smi_result;
1303
1304        smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1305        for (;;) {
1306                if (smi_result == SI_SM_CALL_WITH_DELAY ||
1307                    smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
1308                        schedule_timeout_uninterruptible(1);
1309                        smi_result = smi_info->handlers->event(
1310                                smi_info->si_sm, jiffies_to_usecs(1));
1311                } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
1312                        smi_result = smi_info->handlers->event(
1313                                smi_info->si_sm, 0);
1314                } else
1315                        break;
1316        }
1317        if (smi_result == SI_SM_HOSED)
1318                /*
1319                 * We couldn't get the state machine to run, so whatever's at
1320                 * the port is probably not an IPMI SMI interface.
1321                 */
1322                return -ENODEV;
1323
1324        return 0;
1325}
1326
1327static int try_get_dev_id(struct smi_info *smi_info)
1328{
1329        unsigned char         msg[2];
1330        unsigned char         *resp;
1331        unsigned long         resp_len;
1332        int                   rv = 0;
1333
1334        resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1335        if (!resp)
1336                return -ENOMEM;
1337
1338        /*
1339         * Do a Get Device ID command, since it comes back with some
1340         * useful info.
1341         */
1342        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1343        msg[1] = IPMI_GET_DEVICE_ID_CMD;
1344        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1345
1346        rv = wait_for_msg_done(smi_info);
1347        if (rv)
1348                goto out;
1349
1350        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1351                                                  resp, IPMI_MAX_MSG_LENGTH);
1352
1353        /* Check and record info from the get device id, in case we need it. */
1354        rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
1355                        resp + 2, resp_len - 2, &smi_info->device_id);
1356
1357out:
1358        kfree(resp);
1359        return rv;
1360}
1361
1362static int get_global_enables(struct smi_info *smi_info, u8 *enables)
1363{
1364        unsigned char         msg[3];
1365        unsigned char         *resp;
1366        unsigned long         resp_len;
1367        int                   rv;
1368
1369        resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1370        if (!resp)
1371                return -ENOMEM;
1372
1373        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1374        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1375        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1376
1377        rv = wait_for_msg_done(smi_info);
1378        if (rv) {
1379                dev_warn(smi_info->io.dev,
1380                         "Error getting response from get global enables command: %d\n",
1381                         rv);
1382                goto out;
1383        }
1384
1385        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1386                                                  resp, IPMI_MAX_MSG_LENGTH);
1387
1388        if (resp_len < 4 ||
1389                        resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1390                        resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
1391                        resp[2] != 0) {
1392                dev_warn(smi_info->io.dev,
1393                         "Invalid return from get global enables command: %ld %x %x %x\n",
1394                         resp_len, resp[0], resp[1], resp[2]);
1395                rv = -EINVAL;
1396                goto out;
1397        } else {
1398                *enables = resp[3];
1399        }
1400
1401out:
1402        kfree(resp);
1403        return rv;
1404}
1405
1406/*
1407 * Returns 1 if it gets an error from the command.
1408 */
1409static int set_global_enables(struct smi_info *smi_info, u8 enables)
1410{
1411        unsigned char         msg[3];
1412        unsigned char         *resp;
1413        unsigned long         resp_len;
1414        int                   rv;
1415
1416        resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1417        if (!resp)
1418                return -ENOMEM;
1419
1420        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1421        msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1422        msg[2] = enables;
1423        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1424
1425        rv = wait_for_msg_done(smi_info);
1426        if (rv) {
1427                dev_warn(smi_info->io.dev,
1428                         "Error getting response from set global enables command: %d\n",
1429                         rv);
1430                goto out;
1431        }
1432
1433        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1434                                                  resp, IPMI_MAX_MSG_LENGTH);
1435
1436        if (resp_len < 3 ||
1437                        resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1438                        resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
1439                dev_warn(smi_info->io.dev,
1440                         "Invalid return from set global enables command: %ld %x %x\n",
1441                         resp_len, resp[0], resp[1]);
1442                rv = -EINVAL;
1443                goto out;
1444        }
1445
1446        if (resp[2] != 0)
1447                rv = 1;
1448
1449out:
1450        kfree(resp);
1451        return rv;
1452}
1453
1454/*
1455 * Some BMCs do not support clearing the receive irq bit in the global
1456 * enables (even if they don't support interrupts on the BMC).  Check
1457 * for this and handle it properly.
1458 */
1459static void check_clr_rcv_irq(struct smi_info *smi_info)
1460{
1461        u8 enables = 0;
1462        int rv;
1463
1464        rv = get_global_enables(smi_info, &enables);
1465        if (!rv) {
1466                if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
1467                        /* Already clear, should work ok. */
1468                        return;
1469
1470                enables &= ~IPMI_BMC_RCV_MSG_INTR;
1471                rv = set_global_enables(smi_info, enables);
1472        }
1473
1474        if (rv < 0) {
1475                dev_err(smi_info->io.dev,
1476                        "Cannot check clearing the rcv irq: %d\n", rv);
1477                return;
1478        }
1479
1480        if (rv) {
1481                /*
1482                 * An error when setting the event buffer bit means
1483                 * clearing the bit is not supported.
1484                 */
1485                dev_warn(smi_info->io.dev,
1486                         "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
1487                smi_info->cannot_disable_irq = true;
1488        }
1489}
1490
1491/*
1492 * Some BMCs do not support setting the interrupt bits in the global
1493 * enables even if they support interrupts.  Clearly bad, but we can
1494 * compensate.
1495 */
1496static void check_set_rcv_irq(struct smi_info *smi_info)
1497{
1498        u8 enables = 0;
1499        int rv;
1500
1501        if (!smi_info->io.irq)
1502                return;
1503
1504        rv = get_global_enables(smi_info, &enables);
1505        if (!rv) {
1506                enables |= IPMI_BMC_RCV_MSG_INTR;
1507                rv = set_global_enables(smi_info, enables);
1508        }
1509
1510        if (rv < 0) {
1511                dev_err(smi_info->io.dev,
1512                        "Cannot check setting the rcv irq: %d\n", rv);
1513                return;
1514        }
1515
1516        if (rv) {
1517                /*
1518                 * An error when setting the event buffer bit means
1519                 * setting the bit is not supported.
1520                 */
1521                dev_warn(smi_info->io.dev,
1522                         "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
1523                smi_info->cannot_disable_irq = true;
1524                smi_info->irq_enable_broken = true;
1525        }
1526}
1527
1528static int try_enable_event_buffer(struct smi_info *smi_info)
1529{
1530        unsigned char         msg[3];
1531        unsigned char         *resp;
1532        unsigned long         resp_len;
1533        int                   rv = 0;
1534
1535        resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1536        if (!resp)
1537                return -ENOMEM;
1538
1539        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1540        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1541        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1542
1543        rv = wait_for_msg_done(smi_info);
1544        if (rv) {
1545                pr_warn("Error getting response from get global enables command, the event buffer is not enabled\n");
1546                goto out;
1547        }
1548
1549        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1550                                                  resp, IPMI_MAX_MSG_LENGTH);
1551
1552        if (resp_len < 4 ||
1553                        resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1554                        resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
1555                        resp[2] != 0) {
1556                pr_warn("Invalid return from get global enables command, cannot enable the event buffer\n");
1557                rv = -EINVAL;
1558                goto out;
1559        }
1560
1561        if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
1562                /* buffer is already enabled, nothing to do. */
1563                smi_info->supports_event_msg_buff = true;
1564                goto out;
1565        }
1566
1567        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1568        msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1569        msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
1570        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1571
1572        rv = wait_for_msg_done(smi_info);
1573        if (rv) {
1574                pr_warn("Error getting response from set global, enables command, the event buffer is not enabled\n");
1575                goto out;
1576        }
1577
1578        resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1579                                                  resp, IPMI_MAX_MSG_LENGTH);
1580
1581        if (resp_len < 3 ||
1582                        resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1583                        resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
1584                pr_warn("Invalid return from get global, enables command, not enable the event buffer\n");
1585                rv = -EINVAL;
1586                goto out;
1587        }
1588
1589        if (resp[2] != 0)
1590                /*
1591                 * An error when setting the event buffer bit means
1592                 * that the event buffer is not supported.
1593                 */
1594                rv = -ENOENT;
1595        else
1596                smi_info->supports_event_msg_buff = true;
1597
1598out:
1599        kfree(resp);
1600        return rv;
1601}
1602
1603#define IPMI_SI_ATTR(name) \
1604static ssize_t name##_show(struct device *dev,                  \
1605                           struct device_attribute *attr,               \
1606                           char *buf)                                   \
1607{                                                                       \
1608        struct smi_info *smi_info = dev_get_drvdata(dev);               \
1609                                                                        \
1610        return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
1611}                                                                       \
1612static DEVICE_ATTR(name, 0444, name##_show, NULL)
1613
1614static ssize_t type_show(struct device *dev,
1615                         struct device_attribute *attr,
1616                         char *buf)
1617{
1618        struct smi_info *smi_info = dev_get_drvdata(dev);
1619
1620        return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
1621}
1622static DEVICE_ATTR(type, 0444, type_show, NULL);
1623
1624static ssize_t interrupts_enabled_show(struct device *dev,
1625                                       struct device_attribute *attr,
1626                                       char *buf)
1627{
1628        struct smi_info *smi_info = dev_get_drvdata(dev);
1629        int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
1630
1631        return snprintf(buf, 10, "%d\n", enabled);
1632}
1633static DEVICE_ATTR(interrupts_enabled, 0444,
1634                   interrupts_enabled_show, NULL);
1635
1636IPMI_SI_ATTR(short_timeouts);
1637IPMI_SI_ATTR(long_timeouts);
1638IPMI_SI_ATTR(idles);
1639IPMI_SI_ATTR(interrupts);
1640IPMI_SI_ATTR(attentions);
1641IPMI_SI_ATTR(flag_fetches);
1642IPMI_SI_ATTR(hosed_count);
1643IPMI_SI_ATTR(complete_transactions);
1644IPMI_SI_ATTR(events);
1645IPMI_SI_ATTR(watchdog_pretimeouts);
1646IPMI_SI_ATTR(incoming_messages);
1647
1648static ssize_t params_show(struct device *dev,
1649                           struct device_attribute *attr,
1650                           char *buf)
1651{
1652        struct smi_info *smi_info = dev_get_drvdata(dev);
1653
1654        return snprintf(buf, 200,
1655                        "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
1656                        si_to_str[smi_info->io.si_type],
1657                        addr_space_to_str[smi_info->io.addr_space],
1658                        smi_info->io.addr_data,
1659                        smi_info->io.regspacing,
1660                        smi_info->io.regsize,
1661                        smi_info->io.regshift,
1662                        smi_info->io.irq,
1663                        smi_info->io.slave_addr);
1664}
1665static DEVICE_ATTR(params, 0444, params_show, NULL);
1666
1667static struct attribute *ipmi_si_dev_attrs[] = {
1668        &dev_attr_type.attr,
1669        &dev_attr_interrupts_enabled.attr,
1670        &dev_attr_short_timeouts.attr,
1671        &dev_attr_long_timeouts.attr,
1672        &dev_attr_idles.attr,
1673        &dev_attr_interrupts.attr,
1674        &dev_attr_attentions.attr,
1675        &dev_attr_flag_fetches.attr,
1676        &dev_attr_hosed_count.attr,
1677        &dev_attr_complete_transactions.attr,
1678        &dev_attr_events.attr,
1679        &dev_attr_watchdog_pretimeouts.attr,
1680        &dev_attr_incoming_messages.attr,
1681        &dev_attr_params.attr,
1682        NULL
1683};
1684
1685static const struct attribute_group ipmi_si_dev_attr_group = {
1686        .attrs          = ipmi_si_dev_attrs,
1687};
1688
1689/*
1690 * oem_data_avail_to_receive_msg_avail
1691 * @info - smi_info structure with msg_flags set
1692 *
1693 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
1694 * Returns 1 indicating need to re-run handle_flags().
1695 */
1696static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
1697{
1698        smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
1699                               RECEIVE_MSG_AVAIL);
1700        return 1;
1701}
1702
1703/*
1704 * setup_dell_poweredge_oem_data_handler
1705 * @info - smi_info.device_id must be populated
1706 *
1707 * Systems that match, but have firmware version < 1.40 may assert
1708 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
1709 * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
1710 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
1711 * as RECEIVE_MSG_AVAIL instead.
1712 *
1713 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
1714 * assert the OEM[012] bits, and if it did, the driver would have to
1715 * change to handle that properly, we don't actually check for the
1716 * firmware version.
1717 * Device ID = 0x20                BMC on PowerEdge 8G servers
1718 * Device Revision = 0x80
1719 * Firmware Revision1 = 0x01       BMC version 1.40
1720 * Firmware Revision2 = 0x40       BCD encoded
1721 * IPMI Version = 0x51             IPMI 1.5
1722 * Manufacturer ID = A2 02 00      Dell IANA
1723 *
1724 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
1725 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
1726 *
1727 */
1728#define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
1729#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
1730#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
1731#define DELL_IANA_MFR_ID 0x0002a2
1732static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
1733{
1734        struct ipmi_device_id *id = &smi_info->device_id;
1735        if (id->manufacturer_id == DELL_IANA_MFR_ID) {
1736                if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
1737                    id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
1738                    id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
1739                        smi_info->oem_data_avail_handler =
1740                                oem_data_avail_to_receive_msg_avail;
1741                } else if (ipmi_version_major(id) < 1 ||
1742                           (ipmi_version_major(id) == 1 &&
1743                            ipmi_version_minor(id) < 5)) {
1744                        smi_info->oem_data_avail_handler =
1745                                oem_data_avail_to_receive_msg_avail;
1746                }
1747        }
1748}
1749
1750#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
1751static void return_hosed_msg_badsize(struct smi_info *smi_info)
1752{
1753        struct ipmi_smi_msg *msg = smi_info->curr_msg;
1754
1755        /* Make it a response */
1756        msg->rsp[0] = msg->data[0] | 4;
1757        msg->rsp[1] = msg->data[1];
1758        msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
1759        msg->rsp_size = 3;
1760        smi_info->curr_msg = NULL;
1761        deliver_recv_msg(smi_info, msg);
1762}
1763
1764/*
1765 * dell_poweredge_bt_xaction_handler
1766 * @info - smi_info.device_id must be populated
1767 *
1768 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
1769 * not respond to a Get SDR command if the length of the data
1770 * requested is exactly 0x3A, which leads to command timeouts and no
1771 * data returned.  This intercepts such commands, and causes userspace
1772 * callers to try again with a different-sized buffer, which succeeds.
1773 */
1774
1775#define STORAGE_NETFN 0x0A
1776#define STORAGE_CMD_GET_SDR 0x23
1777static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
1778                                             unsigned long unused,
1779                                             void *in)
1780{
1781        struct smi_info *smi_info = in;
1782        unsigned char *data = smi_info->curr_msg->data;
1783        unsigned int size   = smi_info->curr_msg->data_size;
1784        if (size >= 8 &&
1785            (data[0]>>2) == STORAGE_NETFN &&
1786            data[1] == STORAGE_CMD_GET_SDR &&
1787            data[7] == 0x3A) {
1788                return_hosed_msg_badsize(smi_info);
1789                return NOTIFY_STOP;
1790        }
1791        return NOTIFY_DONE;
1792}
1793
1794static struct notifier_block dell_poweredge_bt_xaction_notifier = {
1795        .notifier_call  = dell_poweredge_bt_xaction_handler,
1796};
1797
1798/*
1799 * setup_dell_poweredge_bt_xaction_handler
1800 * @info - smi_info.device_id must be filled in already
1801 *
1802 * Fills in smi_info.device_id.start_transaction_pre_hook
1803 * when we know what function to use there.
1804 */
1805static void
1806setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
1807{
1808        struct ipmi_device_id *id = &smi_info->device_id;
1809        if (id->manufacturer_id == DELL_IANA_MFR_ID &&
1810            smi_info->io.si_type == SI_BT)
1811                register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
1812}
1813
1814/*
1815 * setup_oem_data_handler
1816 * @info - smi_info.device_id must be filled in already
1817 *
1818 * Fills in smi_info.device_id.oem_data_available_handler
1819 * when we know what function to use there.
1820 */
1821
1822static void setup_oem_data_handler(struct smi_info *smi_info)
1823{
1824        setup_dell_poweredge_oem_data_handler(smi_info);
1825}
1826
1827static void setup_xaction_handlers(struct smi_info *smi_info)
1828{
1829        setup_dell_poweredge_bt_xaction_handler(smi_info);
1830}
1831
1832static void check_for_broken_irqs(struct smi_info *smi_info)
1833{
1834        check_clr_rcv_irq(smi_info);
1835        check_set_rcv_irq(smi_info);
1836}
1837
1838static inline void stop_timer_and_thread(struct smi_info *smi_info)
1839{
1840        if (smi_info->thread != NULL) {
1841                kthread_stop(smi_info->thread);
1842                smi_info->thread = NULL;
1843        }
1844
1845        smi_info->timer_can_start = false;
1846        del_timer_sync(&smi_info->si_timer);
1847}
1848
1849static struct smi_info *find_dup_si(struct smi_info *info)
1850{
1851        struct smi_info *e;
1852
1853        list_for_each_entry(e, &smi_infos, link) {
1854                if (e->io.addr_space != info->io.addr_space)
1855                        continue;
1856                if (e->io.addr_data == info->io.addr_data) {
1857                        /*
1858                         * This is a cheap hack, ACPI doesn't have a defined
1859                         * slave address but SMBIOS does.  Pick it up from
1860                         * any source that has it available.
1861                         */
1862                        if (info->io.slave_addr && !e->io.slave_addr)
1863                                e->io.slave_addr = info->io.slave_addr;
1864                        return e;
1865                }
1866        }
1867
1868        return NULL;
1869}
1870
1871int ipmi_si_add_smi(struct si_sm_io *io)
1872{
1873        int rv = 0;
1874        struct smi_info *new_smi, *dup;
1875
1876        /*
1877         * If the user gave us a hard-coded device at the same
1878         * address, they presumably want us to use it and not what is
1879         * in the firmware.
1880         */
1881        if (io->addr_source != SI_HARDCODED && io->addr_source != SI_HOTMOD &&
1882            ipmi_si_hardcode_match(io->addr_space, io->addr_data)) {
1883                dev_info(io->dev,
1884                         "Hard-coded device at this address already exists");
1885                return -ENODEV;
1886        }
1887
1888        if (!io->io_setup) {
1889                if (io->addr_space == IPMI_IO_ADDR_SPACE) {
1890                        io->io_setup = ipmi_si_port_setup;
1891                } else if (io->addr_space == IPMI_MEM_ADDR_SPACE) {
1892                        io->io_setup = ipmi_si_mem_setup;
1893                } else {
1894                        return -EINVAL;
1895                }
1896        }
1897
1898        new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL);
1899        if (!new_smi)
1900                return -ENOMEM;
1901        spin_lock_init(&new_smi->si_lock);
1902
1903        new_smi->io = *io;
1904
1905        mutex_lock(&smi_infos_lock);
1906        dup = find_dup_si(new_smi);
1907        if (dup) {
1908                if (new_smi->io.addr_source == SI_ACPI &&
1909                    dup->io.addr_source == SI_SMBIOS) {
1910                        /* We prefer ACPI over SMBIOS. */
1911                        dev_info(dup->io.dev,
1912                                 "Removing SMBIOS-specified %s state machine in favor of ACPI\n",
1913                                 si_to_str[new_smi->io.si_type]);
1914                        cleanup_one_si(dup);
1915                } else {
1916                        dev_info(new_smi->io.dev,
1917                                 "%s-specified %s state machine: duplicate\n",
1918                                 ipmi_addr_src_to_str(new_smi->io.addr_source),
1919                                 si_to_str[new_smi->io.si_type]);
1920                        rv = -EBUSY;
1921                        kfree(new_smi);
1922                        goto out_err;
1923                }
1924        }
1925
1926        pr_info("Adding %s-specified %s state machine\n",
1927                ipmi_addr_src_to_str(new_smi->io.addr_source),
1928                si_to_str[new_smi->io.si_type]);
1929
1930        list_add_tail(&new_smi->link, &smi_infos);
1931
1932        if (initialized)
1933                rv = try_smi_init(new_smi);
1934out_err:
1935        mutex_unlock(&smi_infos_lock);
1936        return rv;
1937}
1938
1939/*
1940 * Try to start up an interface.  Must be called with smi_infos_lock
1941 * held, primarily to keep smi_num consistent, we only one to do these
1942 * one at a time.
1943 */
1944static int try_smi_init(struct smi_info *new_smi)
1945{
1946        int rv = 0;
1947        int i;
1948
1949        pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
1950                ipmi_addr_src_to_str(new_smi->io.addr_source),
1951                si_to_str[new_smi->io.si_type],
1952                addr_space_to_str[new_smi->io.addr_space],
1953                new_smi->io.addr_data,
1954                new_smi->io.slave_addr, new_smi->io.irq);
1955
1956        switch (new_smi->io.si_type) {
1957        case SI_KCS:
1958                new_smi->handlers = &kcs_smi_handlers;
1959                break;
1960
1961        case SI_SMIC:
1962                new_smi->handlers = &smic_smi_handlers;
1963                break;
1964
1965        case SI_BT:
1966                new_smi->handlers = &bt_smi_handlers;
1967                break;
1968
1969        default:
1970                /* No support for anything else yet. */
1971                rv = -EIO;
1972                goto out_err;
1973        }
1974
1975        new_smi->si_num = smi_num;
1976
1977        /* Do this early so it's available for logs. */
1978        if (!new_smi->io.dev) {
1979                pr_err("IPMI interface added with no device\n");
1980                rv = EIO;
1981                goto out_err;
1982        }
1983
1984        /* Allocate the state machine's data and initialize it. */
1985        new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
1986        if (!new_smi->si_sm) {
1987                rv = -ENOMEM;
1988                goto out_err;
1989        }
1990        new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
1991                                                           &new_smi->io);
1992
1993        /* Now that we know the I/O size, we can set up the I/O. */
1994        rv = new_smi->io.io_setup(&new_smi->io);
1995        if (rv) {
1996                dev_err(new_smi->io.dev, "Could not set up I/O space\n");
1997                goto out_err;
1998        }
1999
2000        /* Do low-level detection first. */
2001        if (new_smi->handlers->detect(new_smi->si_sm)) {
2002                if (new_smi->io.addr_source)
2003                        dev_err(new_smi->io.dev,
2004                                "Interface detection failed\n");
2005                rv = -ENODEV;
2006                goto out_err;
2007        }
2008
2009        /*
2010         * Attempt a get device id command.  If it fails, we probably
2011         * don't have a BMC here.
2012         */
2013        rv = try_get_dev_id(new_smi);
2014        if (rv) {
2015                if (new_smi->io.addr_source)
2016                        dev_err(new_smi->io.dev,
2017                               "There appears to be no BMC at this location\n");
2018                goto out_err;
2019        }
2020
2021        setup_oem_data_handler(new_smi);
2022        setup_xaction_handlers(new_smi);
2023        check_for_broken_irqs(new_smi);
2024
2025        new_smi->waiting_msg = NULL;
2026        new_smi->curr_msg = NULL;
2027        atomic_set(&new_smi->req_events, 0);
2028        new_smi->run_to_completion = false;
2029        for (i = 0; i < SI_NUM_STATS; i++)
2030                atomic_set(&new_smi->stats[i], 0);
2031
2032        new_smi->interrupt_disabled = true;
2033        atomic_set(&new_smi->need_watch, 0);
2034
2035        rv = try_enable_event_buffer(new_smi);
2036        if (rv == 0)
2037                new_smi->has_event_buffer = true;
2038
2039        /*
2040         * Start clearing the flags before we enable interrupts or the
2041         * timer to avoid racing with the timer.
2042         */
2043        start_clear_flags(new_smi);
2044
2045        /*
2046         * IRQ is defined to be set when non-zero.  req_events will
2047         * cause a global flags check that will enable interrupts.
2048         */
2049        if (new_smi->io.irq) {
2050                new_smi->interrupt_disabled = false;
2051                atomic_set(&new_smi->req_events, 1);
2052        }
2053
2054        dev_set_drvdata(new_smi->io.dev, new_smi);
2055        rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
2056        if (rv) {
2057                dev_err(new_smi->io.dev,
2058                        "Unable to add device attributes: error %d\n",
2059                        rv);
2060                goto out_err;
2061        }
2062        new_smi->dev_group_added = true;
2063
2064        rv = ipmi_register_smi(&handlers,
2065                               new_smi,
2066                               new_smi->io.dev,
2067                               new_smi->io.slave_addr);
2068        if (rv) {
2069                dev_err(new_smi->io.dev,
2070                        "Unable to register device: error %d\n",
2071                        rv);
2072                goto out_err;
2073        }
2074
2075        /* Don't increment till we know we have succeeded. */
2076        smi_num++;
2077
2078        dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
2079                 si_to_str[new_smi->io.si_type]);
2080
2081        WARN_ON(new_smi->io.dev->init_name != NULL);
2082
2083 out_err:
2084        if (rv && new_smi->io.io_cleanup) {
2085                new_smi->io.io_cleanup(&new_smi->io);
2086                new_smi->io.io_cleanup = NULL;
2087        }
2088
2089        return rv;
2090}
2091
2092static int __init init_ipmi_si(void)
2093{
2094        struct smi_info *e;
2095        enum ipmi_addr_src type = SI_INVALID;
2096
2097        if (initialized)
2098                return 0;
2099
2100        ipmi_hardcode_init();
2101
2102        pr_info("IPMI System Interface driver\n");
2103
2104        ipmi_si_platform_init();
2105
2106        ipmi_si_pci_init();
2107
2108        ipmi_si_parisc_init();
2109
2110        /* We prefer devices with interrupts, but in the case of a machine
2111           with multiple BMCs we assume that there will be several instances
2112           of a given type so if we succeed in registering a type then also
2113           try to register everything else of the same type */
2114        mutex_lock(&smi_infos_lock);
2115        list_for_each_entry(e, &smi_infos, link) {
2116                /* Try to register a device if it has an IRQ and we either
2117                   haven't successfully registered a device yet or this
2118                   device has the same type as one we successfully registered */
2119                if (e->io.irq && (!type || e->io.addr_source == type)) {
2120                        if (!try_smi_init(e)) {
2121                                type = e->io.addr_source;
2122                        }
2123                }
2124        }
2125
2126        /* type will only have been set if we successfully registered an si */
2127        if (type)
2128                goto skip_fallback_noirq;
2129
2130        /* Fall back to the preferred device */
2131
2132        list_for_each_entry(e, &smi_infos, link) {
2133                if (!e->io.irq && (!type || e->io.addr_source == type)) {
2134                        if (!try_smi_init(e)) {
2135                                type = e->io.addr_source;
2136                        }
2137                }
2138        }
2139
2140skip_fallback_noirq:
2141        initialized = true;
2142        mutex_unlock(&smi_infos_lock);
2143
2144        if (type)
2145                return 0;
2146
2147        mutex_lock(&smi_infos_lock);
2148        if (unload_when_empty && list_empty(&smi_infos)) {
2149                mutex_unlock(&smi_infos_lock);
2150                cleanup_ipmi_si();
2151                pr_warn("Unable to find any System Interface(s)\n");
2152                return -ENODEV;
2153        } else {
2154                mutex_unlock(&smi_infos_lock);
2155                return 0;
2156        }
2157}
2158module_init(init_ipmi_si);
2159
2160static void shutdown_smi(void *send_info)
2161{
2162        struct smi_info *smi_info = send_info;
2163
2164        if (smi_info->dev_group_added) {
2165                device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group);
2166                smi_info->dev_group_added = false;
2167        }
2168        if (smi_info->io.dev)
2169                dev_set_drvdata(smi_info->io.dev, NULL);
2170
2171        /*
2172         * Make sure that interrupts, the timer and the thread are
2173         * stopped and will not run again.
2174         */
2175        smi_info->interrupt_disabled = true;
2176        if (smi_info->io.irq_cleanup) {
2177                smi_info->io.irq_cleanup(&smi_info->io);
2178                smi_info->io.irq_cleanup = NULL;
2179        }
2180        stop_timer_and_thread(smi_info);
2181
2182        /*
2183         * Wait until we know that we are out of any interrupt
2184         * handlers might have been running before we freed the
2185         * interrupt.
2186         */
2187        synchronize_rcu();
2188
2189        /*
2190         * Timeouts are stopped, now make sure the interrupts are off
2191         * in the BMC.  Note that timers and CPU interrupts are off,
2192         * so no need for locks.
2193         */
2194        while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
2195                poll(smi_info);
2196                schedule_timeout_uninterruptible(1);
2197        }
2198        if (smi_info->handlers)
2199                disable_si_irq(smi_info);
2200        while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
2201                poll(smi_info);
2202                schedule_timeout_uninterruptible(1);
2203        }
2204        if (smi_info->handlers)
2205                smi_info->handlers->cleanup(smi_info->si_sm);
2206
2207        if (smi_info->io.addr_source_cleanup) {
2208                smi_info->io.addr_source_cleanup(&smi_info->io);
2209                smi_info->io.addr_source_cleanup = NULL;
2210        }
2211        if (smi_info->io.io_cleanup) {
2212                smi_info->io.io_cleanup(&smi_info->io);
2213                smi_info->io.io_cleanup = NULL;
2214        }
2215
2216        kfree(smi_info->si_sm);
2217        smi_info->si_sm = NULL;
2218
2219        smi_info->intf = NULL;
2220}
2221
2222/*
2223 * Must be called with smi_infos_lock held, to serialize the
2224 * smi_info->intf check.
2225 */
2226static void cleanup_one_si(struct smi_info *smi_info)
2227{
2228        if (!smi_info)
2229                return;
2230
2231        list_del(&smi_info->link);
2232
2233        if (smi_info->intf)
2234                ipmi_unregister_smi(smi_info->intf);
2235
2236        kfree(smi_info);
2237}
2238
2239int ipmi_si_remove_by_dev(struct device *dev)
2240{
2241        struct smi_info *e;
2242        int rv = -ENOENT;
2243
2244        mutex_lock(&smi_infos_lock);
2245        list_for_each_entry(e, &smi_infos, link) {
2246                if (e->io.dev == dev) {
2247                        cleanup_one_si(e);
2248                        rv = 0;
2249                        break;
2250                }
2251        }
2252        mutex_unlock(&smi_infos_lock);
2253
2254        return rv;
2255}
2256
2257struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
2258                                      unsigned long addr)
2259{
2260        /* remove */
2261        struct smi_info *e, *tmp_e;
2262        struct device *dev = NULL;
2263
2264        mutex_lock(&smi_infos_lock);
2265        list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
2266                if (e->io.addr_space != addr_space)
2267                        continue;
2268                if (e->io.si_type != si_type)
2269                        continue;
2270                if (e->io.addr_data == addr) {
2271                        dev = get_device(e->io.dev);
2272                        cleanup_one_si(e);
2273                }
2274        }
2275        mutex_unlock(&smi_infos_lock);
2276
2277        return dev;
2278}
2279
2280static void cleanup_ipmi_si(void)
2281{
2282        struct smi_info *e, *tmp_e;
2283
2284        if (!initialized)
2285                return;
2286
2287        ipmi_si_pci_shutdown();
2288
2289        ipmi_si_parisc_shutdown();
2290
2291        ipmi_si_platform_shutdown();
2292
2293        mutex_lock(&smi_infos_lock);
2294        list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2295                cleanup_one_si(e);
2296        mutex_unlock(&smi_infos_lock);
2297
2298        ipmi_si_hardcode_exit();
2299        ipmi_si_hotmod_exit();
2300}
2301module_exit(cleanup_ipmi_si);
2302
2303MODULE_ALIAS("platform:dmi-ipmi-si");
2304MODULE_LICENSE("GPL");
2305MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2306MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
2307                   " system interfaces.");
2308